summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.bp6
-rw-r--r--build/Android.common_path.mk3
-rw-r--r--build/Android.gtest.mk9
-rw-r--r--cmdline/cmdline_parser_test.cc5
-rw-r--r--cmdline/cmdline_types.h5
-rw-r--r--compiler/dex/dex_to_dex_decompiler_test.cc13
-rw-r--r--compiler/driver/compiler_driver.cc165
-rw-r--r--compiler/driver/compiler_driver.h6
-rw-r--r--compiler/driver/compiler_driver_test.cc17
-rw-r--r--compiler/image_test.h2
-rw-r--r--compiler/oat_test.cc8
-rw-r--r--compiler/oat_writer.cc9
-rw-r--r--compiler/optimizing/code_generator_arm.cc12
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc16
-rw-r--r--compiler/optimizing/code_generator_x86.cc10
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc11
-rw-r--r--compiler/verifier_deps_test.cc4
-rw-r--r--dex2oat/dex2oat.cc53
-rw-r--r--dexdump/dexdump.cc7
-rw-r--r--dexdump/dexdump_cfg.cc5
-rw-r--r--dexlayout/dex_ir.cc20
-rw-r--r--dexlayout/dex_ir.h40
-rw-r--r--dexlayout/dex_visualize.cc2
-rw-r--r--dexlayout/dexlayout.cc52
-rw-r--r--dexlayout/dexlayout_test.cc21
-rw-r--r--dexlist/dexlist.cc4
-rw-r--r--imgdiag/imgdiag.cc888
-rw-r--r--oatdump/oatdump.cc81
-rw-r--r--patchoat/patchoat.cc6
-rw-r--r--profman/profile_assistant.cc63
-rw-r--r--profman/profile_assistant_test.cc13
-rw-r--r--profman/profman.cc35
-rw-r--r--runtime/Android.bp3
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S48
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S43
-rw-r--r--runtime/art_method.cc8
-rw-r--r--runtime/base/array_ref.h33
-rw-r--r--runtime/base/array_slice.h114
-rw-r--r--runtime/base/length_prefixed_array.h6
-rw-r--r--runtime/base/scoped_flock.cc105
-rw-r--r--runtime/base/scoped_flock.h75
-rw-r--r--runtime/base/scoped_flock_test.cc32
-rw-r--r--runtime/class_linker.cc31
-rw-r--r--runtime/common_dex_operations.h21
-rw-r--r--runtime/common_runtime_test.h3
-rw-r--r--runtime/debugger.cc6
-rw-r--r--runtime/dex_file.cc8
-rw-r--r--runtime/dex_file.h24
-rw-r--r--runtime/dex_file_tracking_registrar.cc15
-rw-r--r--runtime/dex_method_iterator.h8
-rw-r--r--runtime/dex_reference_collection.h3
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc2
-rw-r--r--runtime/gc/space/image_space.cc19
-rw-r--r--runtime/interpreter/interpreter_common.cc25
-rw-r--r--runtime/interpreter/lock_count_data.cc111
-rw-r--r--runtime/interpreter/lock_count_data.h74
-rw-r--r--runtime/interpreter/shadow_frame.cc46
-rw-r--r--runtime/interpreter/shadow_frame.h431
-rw-r--r--runtime/java_frame_root_info.cc30
-rw-r--r--runtime/java_frame_root_info.h52
-rw-r--r--runtime/jit/jit_code_cache.cc65
-rw-r--r--runtime/jit/jit_code_cache.h7
-rw-r--r--runtime/jit/profile_compilation_info-inl.h69
-rw-r--r--runtime/jit/profile_compilation_info.cc183
-rw-r--r--runtime/jit/profile_compilation_info.h198
-rw-r--r--runtime/jit/profile_compilation_info_test.cc111
-rw-r--r--runtime/jit/profile_saver.cc74
-rw-r--r--runtime/jit/profile_saver_options.h18
-rw-r--r--runtime/jni_internal.cc97
-rw-r--r--runtime/jvalue-inl.h18
-rw-r--r--runtime/jvalue.h2
-rw-r--r--runtime/managed_stack-inl.h2
-rw-r--r--runtime/mem_map.cc188
-rw-r--r--runtime/mem_map.h14
-rw-r--r--runtime/method_handles.cc2
-rw-r--r--runtime/method_handles.h4
-rw-r--r--runtime/mirror/class-inl.h98
-rw-r--r--runtime/mirror/class.h17
-rw-r--r--runtime/oat_file_assistant.cc9
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc36
-rw-r--r--runtime/openjdkjvmti/art_jvmti.h23
-rw-r--r--runtime/openjdkjvmti/events-inl.h66
-rw-r--r--runtime/openjdkjvmti/events.cc110
-rw-r--r--runtime/openjdkjvmti/ti_class.cc68
-rw-r--r--runtime/openjdkjvmti/ti_class.h6
-rw-r--r--runtime/openjdkjvmti/ti_class_definition.cc8
-rw-r--r--runtime/openjdkjvmti/ti_class_definition.h12
-rw-r--r--runtime/openjdkjvmti/ti_class_loader.cc1
-rw-r--r--runtime/openjdkjvmti/ti_field.cc64
-rw-r--r--runtime/openjdkjvmti/ti_field.h5
-rw-r--r--runtime/openjdkjvmti/ti_heap.cc1
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc14
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h8
-rw-r--r--runtime/openjdkjvmti/ti_stack.cc114
-rw-r--r--runtime/openjdkjvmti/transform.cc5
-rw-r--r--runtime/stack.cc169
-rw-r--r--runtime/stack.h576
-rw-r--r--runtime/thread.cc2
-rw-r--r--runtime/vdex_file.cc16
-rw-r--r--runtime/verifier/method_verifier.cc9
-rwxr-xr-xtest/004-JniTest/build35
-rw-r--r--test/004-JniTest/src/dalvik/annotation/optimization/CriticalNative.java31
-rw-r--r--test/004-JniTest/src/dalvik/annotation/optimization/FastNative.java31
-rwxr-xr-xtest/065-mismatched-implements/build (renamed from test/604-hot-static-interface/build)18
-rw-r--r--test/104-growth-limit/src/Main.java55
-rw-r--r--test/551-checker-shifter-operand/build212
-rw-r--r--test/595-profile-saving/profile-saving.cc54
-rw-r--r--test/595-profile-saving/run1
-rw-r--r--test/595-profile-saving/src/Main.java32
-rwxr-xr-xtest/636-arm64-veneer-pool/build7
-rw-r--r--test/641-checker-arraycopy/build6
-rw-r--r--test/701-easy-div-rem/build2
-rw-r--r--test/702-LargeBranchOffset/build2
-rw-r--r--test/708-jit-cache-churn/expected.txt2
-rw-r--r--test/708-jit-cache-churn/info.txt1
-rw-r--r--test/708-jit-cache-churn/jit.cc56
-rw-r--r--test/708-jit-cache-churn/src/JitCacheChurnTest.java279
-rw-r--r--test/708-jit-cache-churn/src/Main.java31
-rw-r--r--test/952-invoke-custom/build5
-rwxr-xr-xtest/953-invoke-polymorphic-compiler/build5
-rwxr-xr-xtest/954-invoke-polymorphic-verifier/build5
-rwxr-xr-xtest/955-methodhandles-smali/build5
-rwxr-xr-xtest/956-methodhandles/build5
-rwxr-xr-xtest/957-methodhandle-transforms/build5
-rwxr-xr-xtest/958-methodhandle-stackframe/build5
-rw-r--r--test/959-invoke-polymorphic-accessors/build5
-rwxr-xr-xtest/960-default-smali/build7
-rwxr-xr-xtest/961-default-iface-resolution-gen/build10
-rw-r--r--[-rwxr-xr-x]test/962-iface-static/build9
-rwxr-xr-xtest/963-default-range-smali/build27
-rwxr-xr-xtest/964-default-iface-init-gen/build10
-rwxr-xr-xtest/965-default-verify/build27
-rwxr-xr-xtest/966-default-conflict/build27
-rwxr-xr-xtest/967-default-ame/build27
-rwxr-xr-xtest/969-iface-super/build7
-rw-r--r--test/988-method-trace/expected.txt37
-rw-r--r--test/988-method-trace/src/art/Test988.java8
-rw-r--r--test/988-method-trace/src/art/Trace.java30
-rw-r--r--test/989-method-trace-throw/src/art/Test989.java8
-rw-r--r--test/989-method-trace-throw/src/art/Trace.java30
-rw-r--r--test/990-field-trace/expected.txt52
-rw-r--r--test/990-field-trace/info.txt1
-rwxr-xr-x[-rw-r--r--]test/990-field-trace/run (renamed from test/048-reflect-v8/build)12
-rw-r--r--test/990-field-trace/src/Main.java21
-rw-r--r--test/990-field-trace/src/art/Test990.java232
-rw-r--r--test/990-field-trace/src/art/Trace.java49
-rw-r--r--test/991-field-trace-2/expected.txt118
-rw-r--r--test/991-field-trace-2/field_trace.cc59
-rw-r--r--test/991-field-trace-2/info.txt5
-rwxr-xr-xtest/991-field-trace-2/run (renamed from test/563-checker-invoke-super/build)12
-rw-r--r--test/991-field-trace-2/src/Main.java21
-rw-r--r--test/991-field-trace-2/src/art/Test991.java219
-rw-r--r--test/991-field-trace-2/src/art/Trace.java49
-rw-r--r--test/992-source-data/expected.txt10
-rw-r--r--test/992-source-data/info.txt1
-rwxr-xr-xtest/992-source-data/run (renamed from test/146-bad-interface/build)14
-rw-r--r--test/992-source-data/source_file.cc53
-rw-r--r--test/992-source-data/src/Main.java21
-rw-r--r--test/992-source-data/src/art/Test2.java19
-rw-r--r--test/992-source-data/src/art/Test992.java47
-rw-r--r--test/Android.bp3
-rwxr-xr-xtest/etc/default-build78
-rwxr-xr-xtest/etc/run-test-jar8
-rw-r--r--test/knownfailures.json62
-rwxr-xr-xtest/run-test23
-rw-r--r--test/testrunner/target_config.py11
-rwxr-xr-xtest/testrunner/testrunner.py9
-rw-r--r--test/ti-agent/common_helper.cc340
-rw-r--r--test/ti-stress/stress.cc599
-rwxr-xr-xtools/bootjars.sh87
-rwxr-xr-xtools/buildbot-build.sh11
-rwxr-xr-xtools/desugar.sh91
-rw-r--r--tools/libcore_failures.txt2
-rw-r--r--tools/libcore_gcstress_debug_failures.txt16
-rwxr-xr-xtools/run-jdwp-tests.sh29
-rwxr-xr-xtools/run-libcore-tests.sh40
176 files changed, 6014 insertions, 3025 deletions
diff --git a/build/Android.bp b/build/Android.bp
index ed6de3546f..289834beb8 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -153,6 +153,12 @@ art_global_defaults {
// No exceptions.
"-misc-noexcept-move-constructor",
],
+
+ tidy_flags: [
+ // The static analyzer treats DCHECK as always enabled; we sometimes get
+ // false positives when we use DCHECKs with code that relies on NDEBUG.
+ "-extra-arg=-UNDEBUG",
+ ],
}
art_debug_defaults {
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 446611816a..f4f8d49a7a 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -74,9 +74,6 @@ HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art
TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
# Jar files for core.art.
-TARGET_CORE_JARS := core-oj core-libart conscrypt okhttp bouncycastle apache-xml
-HOST_CORE_JARS := $(addsuffix -hostdex,$(TARGET_CORE_JARS))
-
HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
ifeq ($(ART_TEST_ANDROID_ROOT),)
TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index bcf48fd891..5b5c10fa5d 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -121,7 +121,7 @@ ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode Xan
ART_GTEST_proxy_test_DEX_DEPS := Interfaces
ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
ART_GTEST_profile_assistant_test_DEX_DEPS := ProfileTestMultiDex
-ART_GTEST_profile_compilation_info_test_DEX_DEPS := ProfileTestMultiDex
+ART_GTEST_profile_compilation_info_test_DEX_DEPS := ManyMethods ProfileTestMultiDex
ART_GTEST_runtime_callbacks_test_DEX_DEPS := XandY
ART_GTEST_stub_test_DEX_DEPS := AllFields
ART_GTEST_transaction_test_DEX_DEPS := Transaction
@@ -416,10 +416,13 @@ define define-art-gtest-rule-host
ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps)
+# Note: envsetup currently exports ASAN_OPTIONS=detect_leaks=0 to suppress leak detection, as some
+# build tools (e.g., ninja) intentionally leak. We want leak checks when we run our tests, so
+# override ASAN_OPTIONS. b/37751350
.PHONY: $$(gtest_rule)
$$(gtest_rule): $$(gtest_exe) $$(gtest_deps)
- $(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && $$(call ART_TEST_PASSED,$$@)) \
- || $$(call ART_TEST_FAILED,$$@)
+ $(hide) ($$(call ART_TEST_SKIP,$$@) && ASAN_OPTIONS=detect_leaks=1 $$< && \
+ $$(call ART_TEST_PASSED,$$@)) || $$(call ART_TEST_FAILED,$$@)
ART_TEST_HOST_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule)
ART_TEST_HOST_GTEST_RULES += $$(gtest_rule)
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 07639e8a7d..b224ec72de 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -484,7 +484,7 @@ TEST_F(CmdlineParserTest, TestJitOptions) {
* -Xps-*
*/
TEST_F(CmdlineParserTest, ProfileSaverOptions) {
- ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7, "abc");
+ ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7, "abc", true);
EXPECT_SINGLE_PARSE_VALUE(opt,
"-Xjitsaveprofilinginfo "
@@ -495,7 +495,8 @@ TEST_F(CmdlineParserTest, ProfileSaverOptions) {
"-Xps-min-classes-to-save:5 "
"-Xps-min-notification-before-wake:6 "
"-Xps-max-notification-before-wake:7 "
- "-Xps-profile-path:abc",
+ "-Xps-profile-path:abc "
+ "-Xps-profile-boot-class-path",
M::ProfileSaverOpts);
} // TEST_F
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 185a0e403e..4de8a48d45 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -712,6 +712,11 @@ struct CmdlineType<ProfileSaverOptions> : CmdlineTypeParser<ProfileSaverOptions>
return Result::SuccessNoValue();
}
+ if (option == "profile-boot-class-path") {
+ existing.profile_boot_class_path_ = true;
+ return Result::SuccessNoValue();
+ }
+
// The rest of these options are always the wildcard from '-Xps-*'
std::string suffix = RemovePrefix(option);
diff --git a/compiler/dex/dex_to_dex_decompiler_test.cc b/compiler/dex/dex_to_dex_decompiler_test.cc
index e486e2e6ec..88426a3b5f 100644
--- a/compiler/dex/dex_to_dex_decompiler_test.cc
+++ b/compiler/dex/dex_to_dex_decompiler_test.cc
@@ -39,10 +39,7 @@ class DexToDexDecompilerTest : public CommonCompilerTest {
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
compiler_options_->boot_image_ = false;
compiler_options_->SetCompilerFilter(CompilerFilter::kQuicken);
- compiler_driver_->CompileAll(class_loader,
- GetDexFiles(class_loader),
- /* verifier_deps */ nullptr,
- &timings);
+ compiler_driver_->CompileAll(class_loader, GetDexFiles(class_loader), &timings);
}
void RunTest(const char* dex_name) {
@@ -85,13 +82,7 @@ class DexToDexDecompilerTest : public CommonCompilerTest {
continue;
}
ClassDataItemIterator it(*updated_dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
// Unquicken each method.
while (it.HasNextDirectMethod()) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 0d0769fe98..622448fc59 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -425,26 +425,6 @@ INTRINSICS_LIST(SETUP_INTRINSICS)
FreeThreadPools();
}
-void CompilerDriver::CompileAll(jobject class_loader,
- const std::vector<const DexFile*>& dex_files,
- VdexFile* vdex_file,
- TimingLogger* timings) {
- if (vdex_file != nullptr) {
- // TODO: we unquicken unconditionnally, as we don't know
- // if the boot image has changed. How exactly we'll know is under
- // experimentation.
- TimingLogger::ScopedTiming t("Unquicken", timings);
- // We do not decompile a RETURN_VOID_NO_BARRIER into a RETURN_VOID, as the quickening
- // optimization does not depend on the boot image (the optimization relies on not
- // having final fields in a class, which does not change for an app).
- VdexFile::Unquicken(dex_files, vdex_file->GetQuickeningInfo());
-
- Runtime::Current()->GetCompilerCallbacks()->SetVerifierDeps(
- new verifier::VerifierDeps(dex_files, vdex_file->GetVerifierDepsData()));
- }
- CompileAll(class_loader, dex_files, timings);
-}
-
static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
Thread* self, const CompilerDriver& driver, Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file, const DexFile::ClassDef& class_def)
@@ -828,13 +808,7 @@ static void ResolveConstStrings(CompilerDriver* driver,
}
ClassDataItemIterator it(*dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
bool compilation_enabled = driver->IsClassToCompile(
dex_file->StringByTypeIdx(class_def.class_idx_));
@@ -1002,7 +976,7 @@ bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_r
}
// Compile only hot methods, it is the profile saver's job to decide what startup methods to mark
// as hot.
- bool result = profile_compilation_info_->ContainsHotMethod(method_ref);
+ bool result = profile_compilation_info_->GetMethodHotness(method_ref).IsHot();
if (kDebugProfileGuidedCompilation) {
LOG(INFO) << "[ProfileGuidedCompilation] "
@@ -1681,9 +1655,7 @@ bool CompilerDriver::RequiresConstructorBarrier(const DexFile& dex_file,
return false;
}
ClassDataItemIterator it(dex_file, class_data);
- while (it.HasNextStaticField()) {
- it.Next();
- }
+ it.SkipStaticFields();
// We require a constructor barrier if there are final instance fields.
while (it.HasNextInstanceField()) {
if (it.MemberIsFinal()) {
@@ -1893,13 +1865,7 @@ static void PopulateVerifiedMethods(const DexFile& dex_file,
return;
}
ClassDataItemIterator it(dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
while (it.HasNextDirectMethod()) {
verification_results->CreateVerifiedMethodFor(MethodReference(&dex_file, it.GetMemberIndex()));
@@ -2272,6 +2238,8 @@ class InitializeClassVisitor : public CompilationVisitor {
const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<3> hs(soa.Self());
+ const bool is_boot_image = manager_->GetCompiler()->GetCompilerOptions().IsBootImage();
+ const bool is_app_image = manager_->GetCompiler()->GetCompilerOptions().IsAppImage();
mirror::Class::Status old_status = klass->GetStatus();;
// Only try to initialize classes that were successfully verified.
@@ -2293,23 +2261,28 @@ class InitializeClassVisitor : public CompilationVisitor {
ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
// Attempt to initialize allowing initialization of parent classes but still not static
// fields.
- bool is_superclass_initialized = InitializeDependencies(klass, class_loader, soa.Self());
- if (is_superclass_initialized) {
- manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
- }
+ manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
old_status = klass->GetStatus();
- // If superclass cannot be initialized, no need to proceed.
+ // If the class was not initialized, we can proceed to see if we can initialize static
+ // fields.
if (!klass->IsInitialized() &&
- is_superclass_initialized &&
+ (is_app_image || is_boot_image) &&
manager_->GetCompiler()->IsImageClass(descriptor)) {
bool can_init_static_fields = false;
- if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage()) {
+ if (is_boot_image) {
// We need to initialize static fields, we only do this for image classes that aren't
- // marked with the $NoPreloadHolder (which implies this should not be initialized early).
+ // marked with the $NoPreloadHolder (which implies this should not be initialized
+ // early).
can_init_static_fields = !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
} else {
- can_init_static_fields = manager_->GetCompiler()->GetCompilerOptions().IsAppImage() &&
+ CHECK(is_app_image);
+ // The boot image case doesn't need to recursively initialize the dependencies with
+ // special logic since the class linker already does this.
+ bool is_superclass_initialized =
+ InitializeDependencies(klass, class_loader, soa.Self());
+ can_init_static_fields =
!soa.Self()->IsExceptionPending() &&
+ is_superclass_initialized &&
NoClinitInDependency(klass, soa.Self(), &class_loader);
// TODO The checking for clinit can be removed since it's already
// checked when init superclass. Currently keep it because it contains
@@ -2352,6 +2325,10 @@ class InitializeClassVisitor : public CompilationVisitor {
soa.Self()->ClearException();
transaction.Rollback();
CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
+ } else if (is_boot_image) {
+ // For boot image, we want to put the updated status in the oat class since we can't
+ // reject the image anyways.
+ old_status = klass->GetStatus();
}
}
@@ -2361,6 +2338,8 @@ class InitializeClassVisitor : public CompilationVisitor {
// above as we will allocate strings, so must be allowed to suspend.
if (&klass->GetDexFile() == manager_->GetDexFile()) {
InternStrings(klass, class_loader);
+ } else {
+ DCHECK(!is_boot_image) << "Boot image must have equal dex files";
}
}
}
@@ -2453,63 +2432,63 @@ class InitializeClassVisitor : public CompilationVisitor {
bool ResolveTypesOfMethods(Thread* self, ArtMethod* m)
REQUIRES_SHARED(Locks::mutator_lock_) {
- auto rtn_type = m->GetReturnType(true); // return value is discarded because resolve will be done internally.
- if (rtn_type == nullptr) {
- self->ClearException();
- return false;
- }
- const DexFile::TypeList* types = m->GetParameterTypeList();
- if (types != nullptr) {
- for (uint32_t i = 0; i < types->Size(); ++i) {
- dex::TypeIndex param_type_idx = types->GetTypeItem(i).type_idx_;
- auto param_type = m->GetClassFromTypeIndex(param_type_idx, true);
- if (param_type == nullptr) {
- self->ClearException();
- return false;
- }
+ auto rtn_type = m->GetReturnType(true); // return value is discarded because resolve will be done internally.
+ if (rtn_type == nullptr) {
+ self->ClearException();
+ return false;
+ }
+ const DexFile::TypeList* types = m->GetParameterTypeList();
+ if (types != nullptr) {
+ for (uint32_t i = 0; i < types->Size(); ++i) {
+ dex::TypeIndex param_type_idx = types->GetTypeItem(i).type_idx_;
+ auto param_type = m->GetClassFromTypeIndex(param_type_idx, true);
+ if (param_type == nullptr) {
+ self->ClearException();
+ return false;
}
}
- return true;
+ }
+ return true;
}
// Pre resolve types mentioned in all method signatures before start a transaction
// since ResolveType doesn't work in transaction mode.
bool PreResolveTypes(Thread* self, const Handle<mirror::Class>& klass)
REQUIRES_SHARED(Locks::mutator_lock_) {
- PointerSize pointer_size = manager_->GetClassLinker()->GetImagePointerSize();
- for (ArtMethod& m : klass->GetMethods(pointer_size)) {
- if (!ResolveTypesOfMethods(self, &m)) {
+ PointerSize pointer_size = manager_->GetClassLinker()->GetImagePointerSize();
+ for (ArtMethod& m : klass->GetMethods(pointer_size)) {
+ if (!ResolveTypesOfMethods(self, &m)) {
+ return false;
+ }
+ }
+ if (klass->IsInterface()) {
+ return true;
+ } else if (klass->HasSuperClass()) {
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Class> super_klass(hs.NewHandle<mirror::Class>(klass->GetSuperClass()));
+ for (int i = super_klass->GetVTableLength() - 1; i >= 0; --i) {
+ ArtMethod* m = klass->GetVTableEntry(i, pointer_size);
+ ArtMethod* super_m = super_klass->GetVTableEntry(i, pointer_size);
+ if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
return false;
}
}
- if (klass->IsInterface()) {
- return true;
- } else if (klass->HasSuperClass()) {
- StackHandleScope<1> hs(self);
- MutableHandle<mirror::Class> super_klass(hs.NewHandle<mirror::Class>(klass->GetSuperClass()));
- for (int i = super_klass->GetVTableLength() - 1; i >= 0; --i) {
- ArtMethod* m = klass->GetVTableEntry(i, pointer_size);
- ArtMethod* super_m = super_klass->GetVTableEntry(i, pointer_size);
- if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
- return false;
- }
- }
- for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
- super_klass.Assign(klass->GetIfTable()->GetInterface(i));
- if (klass->GetClassLoader() != super_klass->GetClassLoader()) {
- uint32_t num_methods = super_klass->NumVirtualMethods();
- for (uint32_t j = 0; j < num_methods; ++j) {
- ArtMethod* m = klass->GetIfTable()->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>(
- j, pointer_size);
- ArtMethod* super_m = super_klass->GetVirtualMethod(j, pointer_size);
- if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
- return false;
- }
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ super_klass.Assign(klass->GetIfTable()->GetInterface(i));
+ if (klass->GetClassLoader() != super_klass->GetClassLoader()) {
+ uint32_t num_methods = super_klass->NumVirtualMethods();
+ for (uint32_t j = 0; j < num_methods; ++j) {
+ ArtMethod* m = klass->GetIfTable()->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>(
+ j, pointer_size);
+ ArtMethod* super_m = super_klass->GetVirtualMethod(j, pointer_size);
+ if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
+ return false;
}
}
}
}
- return true;
+ }
+ return true;
}
// Initialize the klass's dependencies recursively before initializing itself.
@@ -2785,13 +2764,7 @@ class CompileClassVisitor : public CompilationVisitor {
GetDexToDexCompilationLevel(soa.Self(), *driver, jclass_loader, dex_file, class_def);
ClassDataItemIterator it(dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
bool compilation_enabled = driver->IsClassToCompile(
dex_file.StringByTypeIdx(class_def.class_idx_));
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 38e7d2c686..69f7b1bd3c 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -119,12 +119,6 @@ class CompilerDriver {
TimingLogger* timings)
REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_);
- void CompileAll(jobject class_loader,
- const std::vector<const DexFile*>& dex_files,
- VdexFile* vdex_file,
- TimingLogger* timings)
- REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_);
-
// Compile a single Method.
void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
REQUIRES_SHARED(Locks::mutator_lock_)
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 4b979d8125..b4ad325822 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -42,10 +42,7 @@ class CompilerDriverTest : public CommonCompilerTest {
void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
- compiler_driver_->CompileAll(class_loader,
- GetDexFiles(class_loader),
- /* verifier_deps */ nullptr,
- &timings);
+ compiler_driver_->CompileAll(class_loader, GetDexFiles(class_loader), &timings);
t.NewTiming("MakeAllExecutable");
MakeAllExecutable(class_loader);
}
@@ -239,14 +236,10 @@ class CompilerDriverProfileTest : public CompilerDriverTest {
ProfileCompilationInfo info;
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
- profile_info_.AddMethodIndex(dex_file->GetLocation(),
- dex_file->GetLocationChecksum(),
- 1,
- dex_file->NumMethodIds());
- profile_info_.AddMethodIndex(dex_file->GetLocation(),
- dex_file->GetLocationChecksum(),
- 2,
- dex_file->NumMethodIds());
+ profile_info_.AddMethodIndex(ProfileCompilationInfo::MethodHotness::kFlagHot,
+ MethodReference(dex_file.get(), 1));
+ profile_info_.AddMethodIndex(ProfileCompilationInfo::MethodHotness::kFlagHot,
+ MethodReference(dex_file.get(), 2));
}
return &profile_info_;
}
diff --git a/compiler/image_test.h b/compiler/image_test.h
index 3d89757d51..fa714ada6c 100644
--- a/compiler/image_test.h
+++ b/compiler/image_test.h
@@ -220,7 +220,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
TimingLogger timings("ImageTest::WriteRead", false, false);
TimingLogger::ScopedTiming t("CompileAll", &timings);
driver->SetDexFilesForOatFile(class_path);
- driver->CompileAll(class_loader, class_path, /* verifier_deps */ nullptr, &timings);
+ driver->CompileAll(class_loader, class_path, &timings);
t.NewTiming("WriteElf");
SafeMap<std::string, std::string> key_value_store;
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 55d0bd95d7..910d7a7c54 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -400,8 +400,7 @@ TEST_F(OatTest, WriteRead) {
if (kCompile) {
TimingLogger timings2("OatTest::WriteRead", false, false);
compiler_driver_->SetDexFilesForOatFile(class_linker->GetBootClassPath());
- compiler_driver_->CompileAll(
- class_loader, class_linker->GetBootClassPath(), /* verifier_deps */ nullptr, &timings2);
+ compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
}
ScratchFile tmp_oat, tmp_vdex(tmp_oat, ".vdex");
@@ -415,8 +414,7 @@ TEST_F(OatTest, WriteRead) {
ASSERT_TRUE(success);
if (kCompile) { // OatWriter strips the code, regenerate to compare
- compiler_driver_->CompileAll(
- class_loader, class_linker->GetBootClassPath(), /* verifier_deps */ nullptr, &timings);
+ compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
std::unique_ptr<OatFile> oat_file(OatFile::Open(tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
@@ -540,7 +538,7 @@ TEST_F(OatTest, EmptyTextSection) {
soa.Decode<mirror::ClassLoader>(class_loader).Ptr());
}
compiler_driver_->SetDexFilesForOatFile(dex_files);
- compiler_driver_->CompileAll(class_loader, dex_files, /* verifier_deps */ nullptr, &timings);
+ compiler_driver_->CompileAll(class_loader, dex_files, &timings);
ScratchFile tmp_oat, tmp_vdex(tmp_oat, ".vdex");
SafeMap<std::string, std::string> key_value_store;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 59daf5a09e..f7465c0d5f 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1676,12 +1676,7 @@ bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
const uint8_t* class_data = dex_file->GetClassData(class_def);
if (class_data != nullptr) { // ie not an empty class, such as a marker interface
ClassDataItemIterator it(*dex_file, class_data);
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
size_t class_def_method_index = 0u;
while (it.HasNextDirectMethod()) {
if (!visitor->VisitMethod(class_def_method_index, it)) {
@@ -2544,7 +2539,7 @@ bool OatWriter::WriteDexFile(OutputStream* out,
return false;
}
if (profile_compilation_info_ != nullptr) {
- DCHECK(!update_input_vdex);
+ CHECK(!update_input_vdex) << "We should never update the input vdex when doing dexlayout";
if (!LayoutAndWriteDexFile(out, oat_dex_file)) {
return false;
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e4efbef394..0b3ac204ff 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2521,12 +2521,6 @@ void CodeGeneratorARM::GenerateFrameEntry() {
__ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
}
- if (GetGraph()->HasShouldDeoptimizeFlag()) {
- // Initialize should_deoptimize flag to 0.
- __ mov(IP, ShifterOperand(0));
- __ StoreToOffset(kStoreWord, IP, SP, -kShouldDeoptimizeFlagSize);
- }
-
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
__ cfi().AdjustCFAOffset(adjust);
@@ -2537,6 +2531,12 @@ void CodeGeneratorARM::GenerateFrameEntry() {
if (RequiresCurrentMethod()) {
__ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ mov(IP, ShifterOperand(0));
+ __ StoreToOffset(kStoreWord, IP, SP, GetStackOffsetOfShouldDeoptimizeFlag());
+ }
}
void CodeGeneratorARM::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index c6bd871bc5..a8b00c358b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2659,14 +2659,6 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
GetAssembler()->cfi().RelOffsetForMany(DWARFReg(s0), 0, fpu_spill_mask_, kArmWordSize);
}
- if (GetGraph()->HasShouldDeoptimizeFlag()) {
- UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- // Initialize should_deoptimize flag to 0.
- __ Mov(temp, 0);
- GetAssembler()->StoreToOffset(kStoreWord, temp, sp, -kShouldDeoptimizeFlagSize);
- }
-
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ Sub(sp, sp, adjust);
GetAssembler()->cfi().AdjustCFAOffset(adjust);
@@ -2677,6 +2669,14 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
if (RequiresCurrentMethod()) {
GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ // Initialize should_deoptimize flag to 0.
+ __ Mov(temp, 0);
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag());
+ }
}
void CodeGeneratorARMVIXL::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 83a261d334..79fccfeaef 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1086,11 +1086,6 @@ void CodeGeneratorX86::GenerateFrameEntry() {
}
}
- if (GetGraph()->HasShouldDeoptimizeFlag()) {
- // Initialize should_deoptimize flag to 0.
- __ movl(Address(ESP, -kShouldDeoptimizeFlagSize), Immediate(0));
- }
-
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ subl(ESP, Immediate(adjust));
__ cfi().AdjustCFAOffset(adjust);
@@ -1100,6 +1095,11 @@ void CodeGeneratorX86::GenerateFrameEntry() {
if (RequiresCurrentMethod()) {
__ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ movl(Address(ESP, GetStackOffsetOfShouldDeoptimizeFlag()), Immediate(0));
+ }
}
void CodeGeneratorX86::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 7331a9e98e..57319ce735 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1298,12 +1298,6 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
}
}
- if (GetGraph()->HasShouldDeoptimizeFlag()) {
- // Initialize should_deoptimize flag to 0.
- __ movl(Address(CpuRegister(RSP), xmm_spill_location - kShouldDeoptimizeFlagSize),
- Immediate(0));
- }
-
// Save the current method if we need it. Note that we do not
// do this in HCurrentMethod, as the instruction might have been removed
// in the SSA graph.
@@ -1311,6 +1305,11 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
__ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
CpuRegister(kMethodRegisterArgument));
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ movl(Address(CpuRegister(RSP), GetStackOffsetOfShouldDeoptimizeFlag()), Immediate(0));
+ }
}
void CodeGeneratorX86_64::GenerateFrameExit() {
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index dd09fed06e..7e616a7af0 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -151,9 +151,7 @@ class VerifierDepsTest : public CommonCompilerTest {
CHECK(class_data != nullptr);
ClassDataItemIterator it(*primary_dex_file_, class_data);
- while (it.HasNextStaticField() || it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
ArtMethod* method = nullptr;
while (it.HasNextDirectMethod()) {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index b88fe09359..f9267e2eb3 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1292,8 +1292,6 @@ class Dex2Oat FINAL {
}
// OAT and VDEX file handling
- bool eagerly_unquicken_vdex = DoDexLayoutOptimizations();
-
if (oat_fd_ == -1) {
DCHECK(!oat_filenames_.empty());
for (const char* oat_filename : oat_filenames_) {
@@ -1314,7 +1312,7 @@ class Dex2Oat FINAL {
input_vdex_file_ = VdexFile::Open(input_vdex_,
/* writable */ false,
/* low_4gb */ false,
- eagerly_unquicken_vdex,
+ DoEagerUnquickeningOfVdex(),
&error_msg);
}
@@ -1364,7 +1362,7 @@ class Dex2Oat FINAL {
"vdex",
/* writable */ false,
/* low_4gb */ false,
- eagerly_unquicken_vdex,
+ DoEagerUnquickeningOfVdex(),
&error_msg);
// If there's any problem with the passed vdex, just warn and proceed
// without it.
@@ -1770,7 +1768,19 @@ class Dex2Oat FINAL {
swap_fd_,
profile_compilation_info_.get()));
driver_->SetDexFilesForOatFile(dex_files_);
- driver_->CompileAll(class_loader_, dex_files_, input_vdex_file_.get(), timings_);
+
+ // Setup vdex for compilation.
+ if (!DoEagerUnquickeningOfVdex() && input_vdex_file_ != nullptr) {
+ callbacks_->SetVerifierDeps(
+ new verifier::VerifierDeps(dex_files_, input_vdex_file_->GetVerifierDepsData()));
+
+ // TODO: we unquicken unconditionally, as we don't know
+ // if the boot image has changed. How exactly we'll know is under
+ // experimentation.
+ TimingLogger::ScopedTiming time_unquicken("Unquicken", timings_);
+ VdexFile::Unquicken(dex_files_, input_vdex_file_->GetQuickeningInfo());
+ }
+ driver_->CompileAll(class_loader_, dex_files_, timings_);
}
// Notes on the interleaving of creating the images and oat files to
@@ -2146,6 +2156,12 @@ class Dex2Oat FINAL {
return DoProfileGuidedOptimizations();
}
+ bool DoEagerUnquickeningOfVdex() const {
+ // DexLayout can invalidate the vdex metadata, so we need to unquicken
+ // the vdex file eagerly, before passing it to dexlayout.
+ return DoDexLayoutOptimizations();
+ }
+
bool LoadProfile() {
DCHECK(UseProfile());
// TODO(calin): We should be using the runtime arena pool (instead of the
@@ -2153,30 +2169,27 @@ class Dex2Oat FINAL {
// cleaning up before that (e.g. the oat writers are created before the
// runtime).
profile_compilation_info_.reset(new ProfileCompilationInfo());
- ScopedFlock flock;
- bool success = true;
+ ScopedFlock profile_file;
std::string error;
if (profile_file_fd_ != -1) {
- // The file doesn't need to be flushed so don't check the usage.
- // Pass a bogus path so that we can easily attribute any reported error.
- File file(profile_file_fd_, "profile", /*check_usage*/ false, /*read_only_mode*/ true);
- if (flock.Init(&file, &error)) {
- success = profile_compilation_info_->Load(profile_file_fd_);
- }
+ profile_file = LockedFile::DupOf(profile_file_fd_, "profile",
+ true /* read_only_mode */, &error);
} else if (profile_file_ != "") {
- if (flock.Init(profile_file_.c_str(), O_RDONLY, /* block */ true, &error)) {
- success = profile_compilation_info_->Load(flock.GetFile()->Fd());
- }
+ profile_file = LockedFile::Open(profile_file_.c_str(), O_RDONLY, true, &error);
}
- if (!error.empty()) {
- LOG(WARNING) << "Cannot lock profiles: " << error;
+
+ // Return early if we're unable to obtain a lock on the profile.
+ if (profile_file.get() == nullptr) {
+ LOG(ERROR) << "Cannot lock profiles: " << error;
+ return false;
}
- if (!success) {
+ if (!profile_compilation_info_->Load(profile_file->Fd())) {
profile_compilation_info_.reset(nullptr);
+ return false;
}
- return success;
+ return true;
}
private:
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 1541d7b39e..df0169f7d0 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -1376,12 +1376,7 @@ static void dumpCfg(const DexFile* dex_file, int idx) {
return;
}
ClassDataItemIterator it(*dex_file, class_data);
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
while (it.HasNextDirectMethod()) {
dumpCfg(dex_file,
it.GetMemberIndex(),
diff --git a/dexdump/dexdump_cfg.cc b/dexdump/dexdump_cfg.cc
index 9e581280da..9c0429ff2b 100644
--- a/dexdump/dexdump_cfg.cc
+++ b/dexdump/dexdump_cfg.cc
@@ -373,10 +373,7 @@ void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostrea
}
ClassDataItemIterator it(*dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField() || it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
// Find method, and dump it.
while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 62ee445085..a200d8d9c7 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -690,10 +690,10 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
}
MethodItem* Collections::GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii) {
- MethodId* method_item = GetMethodId(cdii.GetMemberIndex());
+ MethodId* method_id = GetMethodId(cdii.GetMemberIndex());
uint32_t access_flags = cdii.GetRawMemberAccessFlags();
const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
- CodeItem* code_item = code_items_.GetExistingObject(cdii.GetMethodCodeItemOffset());;
+ CodeItem* code_item = code_items_.GetExistingObject(cdii.GetMethodCodeItemOffset());
DebugInfoItem* debug_info = nullptr;
if (disk_code_item != nullptr) {
if (code_item == nullptr) {
@@ -707,7 +707,7 @@ MethodItem* Collections::GenerateMethodItem(const DexFile& dex_file, ClassDataIt
disk_code_item, is_static, cdii.GetMemberIndex(), GetLocalsCb, debug_info);
dex_file.DecodeDebugPositionInfo(disk_code_item, GetPositionsCb, debug_info);
}
- return new MethodItem(access_flags, method_item, code_item);
+ return new MethodItem(access_flags, method_id, code_item);
}
ClassData* Collections::CreateClassData(
@@ -719,14 +719,14 @@ ClassData* Collections::CreateClassData(
ClassDataItemIterator cdii(dex_file, encoded_data);
// Static fields.
FieldItemVector* static_fields = new FieldItemVector();
- for (uint32_t i = 0; cdii.HasNextStaticField(); i++, cdii.Next()) {
+ for (; cdii.HasNextStaticField(); cdii.Next()) {
FieldId* field_item = GetFieldId(cdii.GetMemberIndex());
uint32_t access_flags = cdii.GetRawMemberAccessFlags();
static_fields->push_back(std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
}
// Instance fields.
FieldItemVector* instance_fields = new FieldItemVector();
- for (uint32_t i = 0; cdii.HasNextInstanceField(); i++, cdii.Next()) {
+ for (; cdii.HasNextInstanceField(); cdii.Next()) {
FieldId* field_item = GetFieldId(cdii.GetMemberIndex());
uint32_t access_flags = cdii.GetRawMemberAccessFlags();
instance_fields->push_back(
@@ -734,15 +734,13 @@ ClassData* Collections::CreateClassData(
}
// Direct methods.
MethodItemVector* direct_methods = new MethodItemVector();
- for (uint32_t i = 0; cdii.HasNextDirectMethod(); i++, cdii.Next()) {
- direct_methods->push_back(
- std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, cdii)));
+ for (; cdii.HasNextDirectMethod(); cdii.Next()) {
+ direct_methods->push_back(std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, cdii)));
}
// Virtual methods.
MethodItemVector* virtual_methods = new MethodItemVector();
- for (uint32_t i = 0; cdii.HasNextVirtualMethod(); i++, cdii.Next()) {
- virtual_methods->push_back(
- std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, cdii)));
+ for (; cdii.HasNextVirtualMethod(); cdii.Next()) {
+ virtual_methods->push_back(std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, cdii)));
}
class_data = new ClassData(static_fields, instance_fields, direct_methods, virtual_methods);
class_data->SetSize(cdii.EndDataPointer() - encoded_data);
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 95e64bf3e7..fe7457231a 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -215,14 +215,38 @@ class Collections {
const DexFile& dex_file, const DexFile::CodeItem& disk_code_item, uint32_t offset);
ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset);
- StringId* GetStringId(uint32_t index) { return StringIds()[index].get(); }
- TypeId* GetTypeId(uint32_t index) { return TypeIds()[index].get(); }
- ProtoId* GetProtoId(uint32_t index) { return ProtoIds()[index].get(); }
- FieldId* GetFieldId(uint32_t index) { return FieldIds()[index].get(); }
- MethodId* GetMethodId(uint32_t index) { return MethodIds()[index].get(); }
- ClassDef* GetClassDef(uint32_t index) { return ClassDefs()[index].get(); }
- CallSiteId* GetCallSiteId(uint32_t index) { return CallSiteIds()[index].get(); }
- MethodHandleItem* GetMethodHandle(uint32_t index) { return MethodHandleItems()[index].get(); }
+ StringId* GetStringId(uint32_t index) {
+ CHECK_LT(index, StringIdsSize());
+ return StringIds()[index].get();
+ }
+ TypeId* GetTypeId(uint32_t index) {
+ CHECK_LT(index, TypeIdsSize());
+ return TypeIds()[index].get();
+ }
+ ProtoId* GetProtoId(uint32_t index) {
+ CHECK_LT(index, ProtoIdsSize());
+ return ProtoIds()[index].get();
+ }
+ FieldId* GetFieldId(uint32_t index) {
+ CHECK_LT(index, FieldIdsSize());
+ return FieldIds()[index].get();
+ }
+ MethodId* GetMethodId(uint32_t index) {
+ CHECK_LT(index, MethodIdsSize());
+ return MethodIds()[index].get();
+ }
+ ClassDef* GetClassDef(uint32_t index) {
+ CHECK_LT(index, ClassDefsSize());
+ return ClassDefs()[index].get();
+ }
+ CallSiteId* GetCallSiteId(uint32_t index) {
+ CHECK_LT(index, CallSiteIdsSize());
+ return CallSiteIds()[index].get();
+ }
+ MethodHandleItem* GetMethodHandle(uint32_t index) {
+ CHECK_LT(index, MethodHandleItemsSize());
+ return MethodHandleItems()[index].get();
+ }
StringId* GetStringIdOrNullPtr(uint32_t index) {
return index == DexFile::kDexNoIndex ? nullptr : GetStringId(index);
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index d279bcb65c..4b46341ada 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -174,7 +174,7 @@ class Dumper {
ProfileCompilationInfo* profile_info) {
if (profile_info != nullptr) {
uint32_t method_idx = method->GetMethodId()->GetIndex();
- if (!profile_info->ContainsHotMethod(MethodReference(dex_file, method_idx))) {
+ if (!profile_info->GetMethodHotness(MethodReference(dex_file, method_idx)).IsHot()) {
return;
}
}
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 50dda88c55..22f0cb042e 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -1557,7 +1557,7 @@ void DexLayout::LayoutStringData(const DexFile* dex_file) {
(method->GetAccessFlags() & kAccConstructor) != 0 &&
(method->GetAccessFlags() & kAccStatic) != 0;
const bool method_executed = is_clinit ||
- info_->IsStartupOrHotMethod(MethodReference(dex_file, method_id->GetIndex()));
+ info_->GetMethodHotness(MethodReference(dex_file, method_id->GetIndex())).HasAnyFlags();
if (!method_executed) {
continue;
}
@@ -1663,19 +1663,21 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
}
}
- enum CodeItemKind {
- kMethodNotExecuted = 0,
- kMethodClinit = 1,
- kMethodExecuted = 2,
- kSize = 3,
+ enum CodeItemState {
+ kCodeItemStateExecStartupOnly = 0,
+ kCodeItemStateHot,
+ kCodeItemStateClinit,
+ kCodeItemStateExec,
+ kCodeItemStateNotExecuted,
+ kCodeItemStateSize,
};
static constexpr InvokeType invoke_types[] = {
- kDirect,
- kVirtual
+ kDirect,
+ kVirtual
};
- std::unordered_set<dex_ir::CodeItem*> code_items[CodeItemKind::kSize];
+ std::unordered_set<dex_ir::CodeItem*> code_items[kCodeItemStateSize];
for (InvokeType invoke_type : invoke_types) {
for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
const bool is_profile_class =
@@ -1695,18 +1697,25 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
continue;
}
// Separate executed methods (clinits and profiled methods) from unexecuted methods.
- const bool is_clinit = is_profile_class &&
- (method->GetAccessFlags() & kAccConstructor) != 0 &&
+ const bool is_clinit = (method->GetAccessFlags() & kAccConstructor) != 0 &&
(method->GetAccessFlags() & kAccStatic) != 0;
- const bool is_method_executed =
- info_->IsStartupOrHotMethod(MethodReference(dex_file, method_id->GetIndex()));
- CodeItemKind code_item_kind = CodeItemKind::kMethodNotExecuted;
- if (is_clinit) {
- code_item_kind = CodeItemKind::kMethodClinit;
- } else if (is_method_executed) {
- code_item_kind = CodeItemKind::kMethodExecuted;
+ const bool is_startup_clinit = is_profile_class && is_clinit;
+ using Hotness = ProfileCompilationInfo::MethodHotness;
+ Hotness hotness = info_->GetMethodHotness(MethodReference(dex_file, method_id->GetIndex()));
+ CodeItemState state = kCodeItemStateNotExecuted;
+ if (hotness.IsHot()) {
+ // Hot code is compiled, maybe one day it won't be accessed. So lay it out together for
+ // now.
+ state = kCodeItemStateHot;
+ } else if (is_startup_clinit || hotness.GetFlags() == Hotness::kFlagStartup) {
+ // Startup clinit or a method that only has the startup flag.
+ state = kCodeItemStateExecStartupOnly;
+ } else if (is_clinit) {
+ state = kCodeItemStateClinit;
+ } else if (hotness.HasAnyFlags()) {
+ state = kCodeItemStateExec;
}
- code_items[code_item_kind].insert(code_item);
+ code_items[state].insert(code_item);
}
}
}
@@ -1718,6 +1727,7 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
for (std::unordered_set<dex_ir::CodeItem*>& code_items_set : code_items) {
// diff is reset for each class of code items.
int32_t diff = 0;
+ uint32_t start_offset = code_item_offset;
for (dex_ir::ClassData* data : new_class_data_order) {
data->SetOffset(data->GetOffset() + diff);
for (InvokeType invoke_type : invoke_types) {
@@ -1736,6 +1746,10 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
}
}
}
+ for (size_t i = 0; i < kCodeItemStateSize; ++i) {
+ VLOG(dex) << "Code item layout bucket " << i << " count=" << code_items[i].size()
+ << " bytes=" << code_item_offset - start_offset;
+ }
total_diff += diff;
}
// Adjust diff to be 4-byte aligned.
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 6fe8eeb66e..43c531deb7 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -334,24 +334,23 @@ class DexLayoutTest : public CommonRuntimeTest {
size_t profile_methods = 0;
size_t profile_classes = 0;
ProfileCompilationInfo pfi;
- std::vector<ProfileMethodInfo> pmis;
std::set<DexCacheResolvedClasses> classes;
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
for (uint32_t i = 0; i < dex_file->NumMethodIds(); i += 2) {
+ uint8_t flags = 0u;
+
if ((i & 3) != 0) {
- pfi.AddMethodIndex(dex_location,
- dex_file->GetLocationChecksum(),
- i,
- dex_file->NumMethodIds());
+ flags |= ProfileCompilationInfo::MethodHotness::kFlagHot;
++profile_methods;
} else if ((i & 2) != 0) {
- pfi.AddSampledMethod(/*startup*/true,
- dex_location,
- dex_file->GetLocationChecksum(),
- i,
- dex_file->NumMethodIds());
+ flags |= ProfileCompilationInfo::MethodHotness::kFlagStartup;
++profile_methods;
}
+ pfi.AddMethodIndex(static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags),
+ dex_location,
+ dex_file->GetLocationChecksum(),
+ /*dex_method_idx*/i,
+ dex_file->NumMethodIds());
}
DexCacheResolvedClasses cur_classes(dex_location,
dex_location,
@@ -366,7 +365,7 @@ class DexLayoutTest : public CommonRuntimeTest {
}
classes.insert(cur_classes);
}
- pfi.AddMethodsAndClasses(pmis, classes);
+ pfi.AddClasses(classes);
// Write to provided file.
std::unique_ptr<File> file(OS::CreateEmptyFile(out_profile.c_str()));
ASSERT_TRUE(file != nullptr);
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index efe1aad7c6..29707af704 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -149,9 +149,7 @@ void dumpClass(const DexFile* pDexFile, u4 idx) {
const u1* pEncodedData = pDexFile->GetClassData(pClassDef);
if (pEncodedData != nullptr) {
ClassDataItemIterator pClassData(*pDexFile, pEncodedData);
- // Skip the fields.
- for (; pClassData.HasNextStaticField(); pClassData.Next()) {}
- for (; pClassData.HasNextInstanceField(); pClassData.Next()) {}
+ pClassData.SkipAllFields();
// Direct methods.
for (; pClassData.HasNextDirectMethod(); pClassData.Next()) {
dumpMethod(pDexFile, fileName,
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 06a0f231ba..99a438ed54 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -61,54 +61,16 @@ class ImgDiagDumper {
image_header_(image_header),
image_location_(image_location),
image_diff_pid_(image_diff_pid),
- zygote_diff_pid_(zygote_diff_pid) {}
+ zygote_diff_pid_(zygote_diff_pid),
+ zygote_pid_only_(false) {}
- bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
- std::ostream& os = *os_;
- os << "IMAGE LOCATION: " << image_location_ << "\n\n";
-
- os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
-
- os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
-
- bool ret = true;
- if (image_diff_pid_ >= 0) {
- os << "IMAGE DIFF PID (" << image_diff_pid_ << "): ";
- ret = DumpImageDiff(image_diff_pid_, zygote_diff_pid_);
- os << "\n\n";
- } else {
- os << "IMAGE DIFF PID: disabled\n\n";
- }
-
- os << std::flush;
-
- return ret;
- }
-
- private:
- static bool EndsWith(const std::string& str, const std::string& suffix) {
- return str.size() >= suffix.size() &&
- str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
- }
-
- // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar)
- static std::string BaseName(const std::string& str) {
- size_t idx = str.rfind('/');
- if (idx == std::string::npos) {
- return str;
- }
-
- return str.substr(idx + 1);
- }
-
- bool DumpImageDiff(pid_t image_diff_pid, pid_t zygote_diff_pid)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool Init() {
std::ostream& os = *os_;
{
struct stat sts;
std::string proc_pid_str =
- StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
if (stat(proc_pid_str.c_str(), &sts) == -1) {
os << "Process does not exist";
return false;
@@ -116,19 +78,18 @@ class ImgDiagDumper {
}
// Open /proc/$pid/maps to view memory maps
- auto proc_maps = std::unique_ptr<BacktraceMap>(BacktraceMap::Create(image_diff_pid));
- if (proc_maps == nullptr) {
+ auto tmp_proc_maps = std::unique_ptr<BacktraceMap>(BacktraceMap::Create(image_diff_pid_));
+ if (tmp_proc_maps == nullptr) {
os << "Could not read backtrace maps";
return false;
}
bool found_boot_map = false;
- backtrace_map_t boot_map = backtrace_map_t();
// Find the memory map only for boot.art
- for (const backtrace_map_t& map : *proc_maps) {
+ for (const backtrace_map_t& map : *tmp_proc_maps) {
if (EndsWith(map.name, GetImageLocationBaseName())) {
if ((map.flags & PROT_WRITE) != 0) {
- boot_map = map;
+ boot_map_ = map;
found_boot_map = true;
break;
}
@@ -143,9 +104,237 @@ class ImgDiagDumper {
os << "Could not find map for " << GetImageLocationBaseName();
return false;
}
+ // Sanity check boot_map_.
+ CHECK(boot_map_.end >= boot_map_.start);
+ boot_map_size_ = boot_map_.end - boot_map_.start;
+
+ pointer_size_ = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
+
+ // Open /proc/<image_diff_pid_>/mem and read as remote_contents_.
+ std::string image_file_name =
+ StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
+ auto image_map_file = std::unique_ptr<File>(OS::OpenFileForReading(image_file_name.c_str()));
+ if (image_map_file == nullptr) {
+ os << "Failed to open " << image_file_name << " for reading";
+ return false;
+ }
+ std::vector<uint8_t> tmp_remote_contents(boot_map_size_);
+ if (!image_map_file->PreadFully(&tmp_remote_contents[0], boot_map_size_, boot_map_.start)) {
+ os << "Could not fully read file " << image_file_name;
+ return false;
+ }
+
+ // If zygote_diff_pid_ != -1, open /proc/<zygote_diff_pid_>/mem and read as zygote_contents_.
+ std::vector<uint8_t> tmp_zygote_contents;
+ if (zygote_diff_pid_ != -1) {
+ std::string zygote_file_name =
+ StringPrintf("/proc/%ld/mem", static_cast<long>(zygote_diff_pid_)); // NOLINT [runtime/int]
+ std::unique_ptr<File> zygote_map_file(OS::OpenFileForReading(zygote_file_name.c_str()));
+ if (zygote_map_file == nullptr) {
+ os << "Failed to open " << zygote_file_name << " for reading";
+ return false;
+ }
+ // The boot map should be at the same address.
+ tmp_zygote_contents.reserve(boot_map_size_);
+ if (!zygote_map_file->PreadFully(&tmp_zygote_contents[0], boot_map_size_, boot_map_.start)) {
+ LOG(WARNING) << "Could not fully read zygote file " << zygote_file_name;
+ return false;
+ }
+ }
+
+ // Open /proc/<image_diff_pid_>/pagemap.
+ std::string pagemap_file_name = StringPrintf(
+ "/proc/%ld/pagemap", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
+ auto tmp_pagemap_file =
+ std::unique_ptr<File>(OS::OpenFileForReading(pagemap_file_name.c_str()));
+ if (tmp_pagemap_file == nullptr) {
+ os << "Failed to open " << pagemap_file_name << " for reading: " << strerror(errno);
+ return false;
+ }
+
+ // Not truly clean, mmap-ing boot.art again would be more pristine, but close enough
+ const char* clean_pagemap_file_name = "/proc/self/pagemap";
+ auto tmp_clean_pagemap_file = std::unique_ptr<File>(
+ OS::OpenFileForReading(clean_pagemap_file_name));
+ if (tmp_clean_pagemap_file == nullptr) {
+ os << "Failed to open " << clean_pagemap_file_name << " for reading: " << strerror(errno);
+ return false;
+ }
+
+ auto tmp_kpageflags_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpageflags"));
+ if (tmp_kpageflags_file == nullptr) {
+ os << "Failed to open /proc/kpageflags for reading: " << strerror(errno);
+ return false;
+ }
+
+ auto tmp_kpagecount_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpagecount"));
+ if (tmp_kpagecount_file == nullptr) {
+ os << "Failed to open /proc/kpagecount for reading:" << strerror(errno);
+ return false;
+ }
+
+ // Commit the mappings, etc., to the object state.
+ proc_maps_ = std::move(tmp_proc_maps);
+ remote_contents_ = std::move(tmp_remote_contents);
+ zygote_contents_ = std::move(tmp_zygote_contents);
+ pagemap_file_ = std::move(*tmp_pagemap_file.release());
+ clean_pagemap_file_ = std::move(*tmp_clean_pagemap_file.release());
+ kpageflags_file_ = std::move(*tmp_kpageflags_file.release());
+ kpagecount_file_ = std::move(*tmp_kpagecount_file.release());
+
+ return true;
+ }
+
+ bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::ostream& os = *os_;
+ os << "IMAGE LOCATION: " << image_location_ << "\n\n";
+
+ os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
+
+ os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
+
+ PrintPidLine("IMAGE", image_diff_pid_);
+ os << "\n\n";
+ PrintPidLine("ZYGOTE", zygote_diff_pid_);
+ bool ret = true;
+ if (image_diff_pid_ >= 0 || zygote_diff_pid_ >= 0) {
+ if (image_diff_pid_ < 0) {
+ image_diff_pid_ = zygote_diff_pid_;
+ zygote_diff_pid_ = -1;
+ zygote_pid_only_ = true;
+ }
+ ret = DumpImageDiff();
+ os << "\n\n";
+ }
+
+ os << std::flush;
+
+ return ret;
+ }
+
+ private:
+ bool DumpImageDiff()
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return DumpImageDiffMap();
+ }
+
+ bool ComputeDirtyBytes(const uint8_t* image_begin,
+ size_t* dirty_pages /*out*/,
+ size_t* different_pages /*out*/,
+ size_t* different_bytes /*out*/,
+ size_t* different_int32s /*out*/,
+ size_t* private_pages /*out*/,
+ size_t* private_dirty_pages /*out*/,
+ std::set<size_t>* dirty_page_set_local) {
+ std::ostream& os = *os_;
+
+ size_t virtual_page_idx = 0; // Virtual page number (for an absolute memory address)
+ size_t page_idx = 0; // Page index relative to 0
+ size_t previous_page_idx = 0; // Previous page index relative to 0
+
+
+ // Iterate through one page at a time. Boot map begin/end already implicitly aligned.
+ for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; begin += kPageSize) {
+ ptrdiff_t offset = begin - boot_map_.start;
+
+ // We treat the image header as part of the memory map for now
+ // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
+ // But it might still be interesting to see if any of the ImageHeader data mutated
+ const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
+ uint8_t* remote_ptr = &remote_contents_[offset];
+
+ if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) {
+ different_pages++;
+
+ // Count the number of 32-bit integers that are different.
+ for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) {
+ uint32_t* remote_ptr_int32 = reinterpret_cast<uint32_t*>(remote_ptr);
+ const uint32_t* local_ptr_int32 = reinterpret_cast<const uint32_t*>(local_ptr);
+
+ if (remote_ptr_int32[i] != local_ptr_int32[i]) {
+ different_int32s++;
+ }
+ }
+ }
+ }
- // Future idea: diff against zygote so we can ignore the shared dirty pages.
- return DumpImageDiffMap(image_diff_pid, zygote_diff_pid, boot_map);
+ // Iterate through one byte at a time.
+ ptrdiff_t page_off_begin = image_header_.GetImageBegin() - image_begin;
+ for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; ++begin) {
+ previous_page_idx = page_idx;
+ ptrdiff_t offset = begin - boot_map_.start;
+
+ // We treat the image header as part of the memory map for now
+ // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
+ // But it might still be interesting to see if any of the ImageHeader data mutated
+ const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
+ uint8_t* remote_ptr = &remote_contents_[offset];
+
+ virtual_page_idx = reinterpret_cast<uintptr_t>(local_ptr) / kPageSize;
+
+ // Calculate the page index, relative to the 0th page where the image begins
+ page_idx = (offset + page_off_begin) / kPageSize;
+ if (*local_ptr != *remote_ptr) {
+ // Track number of bytes that are different
+ different_bytes++;
+ }
+
+ // Independently count the # of dirty pages on the remote side
+ size_t remote_virtual_page_idx = begin / kPageSize;
+ if (previous_page_idx != page_idx) {
+ uint64_t page_count = 0xC0FFEE;
+ // TODO: virtual_page_idx needs to be from the same process
+ std::string error_msg;
+ int dirtiness = (IsPageDirty(&pagemap_file_, // Image-diff-pid procmap
+ &clean_pagemap_file_, // Self procmap
+ &kpageflags_file_,
+ &kpagecount_file_,
+ remote_virtual_page_idx, // potentially "dirty" page
+ virtual_page_idx, // true "clean" page
+ &page_count,
+ &error_msg));
+ if (dirtiness < 0) {
+ os << error_msg;
+ return false;
+ } else if (dirtiness > 0) {
+ (*dirty_pages)++;
+ dirty_page_set_local->insert(dirty_page_set_local->end(), virtual_page_idx);
+ }
+
+ bool is_dirty = dirtiness > 0;
+ bool is_private = page_count == 1;
+
+ if (page_count == 1) {
+ (*private_pages)++;
+ }
+
+ if (is_dirty && is_private) {
+ (*private_dirty_pages)++;
+ }
+ }
+ }
+ return true;
+ }
+
+ bool ObjectIsOnDirtyPage(const uint8_t* item,
+ size_t size,
+ const std::set<size_t>& dirty_page_set_local) {
+ size_t page_off = 0;
+ size_t current_page_idx;
+ uintptr_t object_address = reinterpret_cast<uintptr_t>(item);
+ // Iterate every page this object belongs to
+ do {
+ current_page_idx = object_address / kPageSize + page_off;
+
+ if (dirty_page_set_local.find(current_page_idx) != dirty_page_set_local.end()) {
+ // This object is on a dirty page
+ return true;
+ }
+
+ page_off++;
+ } while ((current_page_idx * kPageSize) < RoundUp(object_address + size, kObjectAlignment));
+
+ return false;
}
static std::string PrettyFieldValue(ArtField* field, mirror::Object* obj)
@@ -199,24 +388,24 @@ class ImgDiagDumper {
// Aggregate and detail class data from an image diff.
struct ClassData {
- int dirty_object_count = 0;
+ size_t dirty_object_count = 0;
// Track only the byte-per-byte dirtiness (in bytes)
- int dirty_object_byte_count = 0;
+ size_t dirty_object_byte_count = 0;
// Track the object-by-object dirtiness (in bytes)
- int dirty_object_size_in_bytes = 0;
+ size_t dirty_object_size_in_bytes = 0;
- int clean_object_count = 0;
+ size_t clean_object_count = 0;
std::string descriptor;
- int false_dirty_byte_count = 0;
- int false_dirty_object_count = 0;
- std::vector<mirror::Object*> false_dirty_objects;
+ size_t false_dirty_byte_count = 0;
+ size_t false_dirty_object_count = 0;
+ std::vector<const uint8_t*> false_dirty_objects;
// Remote pointers to dirty objects
- std::vector<mirror::Object*> dirty_objects;
+ std::vector<const uint8_t*> dirty_objects;
};
void DiffObjectContents(mirror::Object* obj,
@@ -283,236 +472,185 @@ class ImgDiagDumper {
os << "\n";
}
- // Look at /proc/$pid/mem and only diff the things from there
- bool DumpImageDiffMap(pid_t image_diff_pid,
- pid_t zygote_diff_pid,
- const backtrace_map_t& boot_map)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- std::ostream& os = *os_;
- const PointerSize pointer_size = InstructionSetPointerSize(
- Runtime::Current()->GetInstructionSet());
+ struct ObjectRegionData {
+ // Count of objects that are different.
+ size_t different_objects = 0;
- std::string file_name =
- StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ // Local objects that are dirty (differ in at least one byte).
+ size_t dirty_object_bytes = 0;
+ std::vector<const uint8_t*>* dirty_objects;
- size_t boot_map_size = boot_map.end - boot_map.start;
+ // Local objects that are clean, but located on dirty pages.
+ size_t false_dirty_object_bytes = 0;
+ std::vector<const uint8_t*> false_dirty_objects;
- // Open /proc/$pid/mem as a file
- auto map_file = std::unique_ptr<File>(OS::OpenFileForReading(file_name.c_str()));
- if (map_file == nullptr) {
- os << "Failed to open " << file_name << " for reading";
- return false;
+ // Image dirty objects
+ // If zygote_pid_only_ == true, these are shared dirty objects in the zygote.
+ // If zygote_pid_only_ == false, these are private dirty objects in the application.
+ std::set<const uint8_t*> image_dirty_objects;
+
+ // Zygote dirty objects (probably private dirty).
+ // We only add objects here if they differed in both the image and the zygote, so
+ // they are probably private dirty.
+ std::set<const uint8_t*> zygote_dirty_objects;
+
+ std::map<off_t /* field offset */, size_t /* count */> field_dirty_count;
+ };
+
+ void ComputeObjectDirty(const uint8_t* current,
+ const uint8_t* current_remote,
+ const uint8_t* current_zygote,
+ ClassData* obj_class_data,
+ size_t obj_size,
+ const std::set<size_t>& dirty_page_set_local,
+ ObjectRegionData* region_data /*out*/) {
+ bool different_image_object = memcmp(current, current_remote, obj_size) != 0;
+ if (different_image_object) {
+ bool different_zygote_object = false;
+ if (!zygote_contents_.empty()) {
+ different_zygote_object = memcmp(current, current_zygote, obj_size) != 0;
+ }
+ if (different_zygote_object) {
+ // Different from zygote.
+ region_data->zygote_dirty_objects.insert(current);
+ } else {
+ // Just different from image.
+ region_data->image_dirty_objects.insert(current);
+ }
+
+ ++region_data->different_objects;
+ region_data->dirty_object_bytes += obj_size;
+
+ ++obj_class_data->dirty_object_count;
+
+ // Go byte-by-byte and figure out what exactly got dirtied
+ size_t dirty_byte_count_per_object = 0;
+ for (size_t i = 0; i < obj_size; ++i) {
+ if (current[i] != current_remote[i]) {
+ dirty_byte_count_per_object++;
+ }
+ }
+ obj_class_data->dirty_object_byte_count += dirty_byte_count_per_object;
+ obj_class_data->dirty_object_size_in_bytes += obj_size;
+ obj_class_data->dirty_objects.push_back(current_remote);
+ } else {
+ ++obj_class_data->clean_object_count;
}
- // Memory-map /proc/$pid/mem subset from the boot map
- CHECK(boot_map.end >= boot_map.start);
+ if (different_image_object) {
+ if (region_data->dirty_objects != nullptr) {
+ // print the fields that are dirty
+ for (size_t i = 0; i < obj_size; ++i) {
+ if (current[i] != current_remote[i]) {
+ region_data->field_dirty_count[i]++;
+ }
+ }
+
+ region_data->dirty_objects->push_back(current);
+ }
+ /*
+ * TODO: Resurrect this stuff in the client when we add ArtMethod iterator.
+ } else {
+ std::string descriptor = GetClassDescriptor(klass);
+ if (strcmp(descriptor.c_str(), "Ljava/lang/reflect/ArtMethod;") == 0) {
+ // this is an ArtMethod
+ ArtMethod* art_method = reinterpret_cast<ArtMethod*>(remote_obj);
+
+ // print the fields that are dirty
+ for (size_t i = 0; i < obj_size; ++i) {
+ if (current[i] != current_remote[i]) {
+ art_method_field_dirty_count[i]++;
+ }
+ }
+
+ art_method_dirty_objects.push_back(art_method);
+ }
+ }
+ */
+ } else if (ObjectIsOnDirtyPage(current, obj_size, dirty_page_set_local)) {
+ // This object was either never mutated or got mutated back to the same value.
+ // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
+ region_data->false_dirty_objects.push_back(current);
+ obj_class_data->false_dirty_objects.push_back(current);
+ region_data->false_dirty_object_bytes += obj_size;
+ obj_class_data->false_dirty_byte_count += obj_size;
+ obj_class_data->false_dirty_object_count += 1;
+ }
+ }
+ // Look at /proc/$pid/mem and only diff the things from there
+ bool DumpImageDiffMap()
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::ostream& os = *os_;
std::string error_msg;
// Walk the bytes and diff against our boot image
- const ImageHeader& boot_image_header = image_header_;
-
os << "\nObserving boot image header at address "
- << reinterpret_cast<const void*>(&boot_image_header)
+ << reinterpret_cast<const void*>(&image_header_)
<< "\n\n";
- const uint8_t* image_begin_unaligned = boot_image_header.GetImageBegin();
+ const uint8_t* image_begin_unaligned = image_header_.GetImageBegin();
const uint8_t* image_mirror_end_unaligned = image_begin_unaligned +
- boot_image_header.GetImageSection(ImageHeader::kSectionObjects).Size();
- const uint8_t* image_end_unaligned = image_begin_unaligned + boot_image_header.GetImageSize();
+ image_header_.GetImageSection(ImageHeader::kSectionObjects).Size();
+ const uint8_t* image_end_unaligned = image_begin_unaligned + image_header_.GetImageSize();
// Adjust range to nearest page
const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize);
const uint8_t* image_end = AlignUp(image_end_unaligned, kPageSize);
- ptrdiff_t page_off_begin = boot_image_header.GetImageBegin() - image_begin;
-
- if (reinterpret_cast<uintptr_t>(image_begin) > boot_map.start ||
- reinterpret_cast<uintptr_t>(image_end) < boot_map.end) {
+ if (reinterpret_cast<uintptr_t>(image_begin) > boot_map_.start ||
+ reinterpret_cast<uintptr_t>(image_end) < boot_map_.end) {
// Sanity check that we aren't trying to read a completely different boot image
os << "Remote boot map is out of range of local boot map: " <<
"local begin " << reinterpret_cast<const void*>(image_begin) <<
", local end " << reinterpret_cast<const void*>(image_end) <<
- ", remote begin " << reinterpret_cast<const void*>(boot_map.start) <<
- ", remote end " << reinterpret_cast<const void*>(boot_map.end);
+ ", remote begin " << reinterpret_cast<const void*>(boot_map_.start) <<
+ ", remote end " << reinterpret_cast<const void*>(boot_map_.end);
return false;
// If we wanted even more validation we could map the ImageHeader from the file
}
- std::vector<uint8_t> remote_contents(boot_map_size);
- if (!map_file->PreadFully(&remote_contents[0], boot_map_size, boot_map.start)) {
- os << "Could not fully read file " << file_name;
- return false;
- }
-
- std::vector<uint8_t> zygote_contents;
- std::unique_ptr<File> zygote_map_file;
- if (zygote_diff_pid != -1) {
- std::string zygote_file_name =
- StringPrintf("/proc/%ld/mem", static_cast<long>(zygote_diff_pid)); // NOLINT [runtime/int]
- zygote_map_file.reset(OS::OpenFileForReading(zygote_file_name.c_str()));
- // The boot map should be at the same address.
- zygote_contents.resize(boot_map_size);
- if (!zygote_map_file->PreadFully(&zygote_contents[0], boot_map_size, boot_map.start)) {
- LOG(WARNING) << "Could not fully read zygote file " << zygote_file_name;
- zygote_contents.clear();
- }
- }
-
- std::string page_map_file_name = StringPrintf(
- "/proc/%ld/pagemap", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
- auto page_map_file = std::unique_ptr<File>(OS::OpenFileForReading(page_map_file_name.c_str()));
- if (page_map_file == nullptr) {
- os << "Failed to open " << page_map_file_name << " for reading: " << strerror(errno);
- return false;
- }
-
- // Not truly clean, mmap-ing boot.art again would be more pristine, but close enough
- const char* clean_page_map_file_name = "/proc/self/pagemap";
- auto clean_page_map_file = std::unique_ptr<File>(
- OS::OpenFileForReading(clean_page_map_file_name));
- if (clean_page_map_file == nullptr) {
- os << "Failed to open " << clean_page_map_file_name << " for reading: " << strerror(errno);
- return false;
- }
-
- auto kpage_flags_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpageflags"));
- if (kpage_flags_file == nullptr) {
- os << "Failed to open /proc/kpageflags for reading: " << strerror(errno);
- return false;
- }
-
- auto kpage_count_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpagecount"));
- if (kpage_count_file == nullptr) {
- os << "Failed to open /proc/kpagecount for reading:" << strerror(errno);
- return false;
- }
-
- // Set of the remote virtual page indices that are dirty
- std::set<size_t> dirty_page_set_remote;
- // Set of the local virtual page indices that are dirty
- std::set<size_t> dirty_page_set_local;
-
- size_t different_int32s = 0;
- size_t different_bytes = 0;
- size_t different_pages = 0;
- size_t virtual_page_idx = 0; // Virtual page number (for an absolute memory address)
- size_t page_idx = 0; // Page index relative to 0
- size_t previous_page_idx = 0; // Previous page index relative to 0
size_t dirty_pages = 0;
+ size_t different_pages = 0;
+ size_t different_bytes = 0;
+ size_t different_int32s = 0;
size_t private_pages = 0;
size_t private_dirty_pages = 0;
- // Iterate through one page at a time. Boot map begin/end already implicitly aligned.
- for (uintptr_t begin = boot_map.start; begin != boot_map.end; begin += kPageSize) {
- ptrdiff_t offset = begin - boot_map.start;
-
- // We treat the image header as part of the memory map for now
- // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
- // But it might still be interesting to see if any of the ImageHeader data mutated
- const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&boot_image_header) + offset;
- uint8_t* remote_ptr = &remote_contents[offset];
-
- if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) {
- different_pages++;
-
- // Count the number of 32-bit integers that are different.
- for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) {
- uint32_t* remote_ptr_int32 = reinterpret_cast<uint32_t*>(remote_ptr);
- const uint32_t* local_ptr_int32 = reinterpret_cast<const uint32_t*>(local_ptr);
-
- if (remote_ptr_int32[i] != local_ptr_int32[i]) {
- different_int32s++;
- }
- }
- }
- }
-
- // Iterate through one byte at a time.
- for (uintptr_t begin = boot_map.start; begin != boot_map.end; ++begin) {
- previous_page_idx = page_idx;
- ptrdiff_t offset = begin - boot_map.start;
-
- // We treat the image header as part of the memory map for now
- // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
- // But it might still be interesting to see if any of the ImageHeader data mutated
- const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&boot_image_header) + offset;
- uint8_t* remote_ptr = &remote_contents[offset];
-
- virtual_page_idx = reinterpret_cast<uintptr_t>(local_ptr) / kPageSize;
-
- // Calculate the page index, relative to the 0th page where the image begins
- page_idx = (offset + page_off_begin) / kPageSize;
- if (*local_ptr != *remote_ptr) {
- // Track number of bytes that are different
- different_bytes++;
- }
-
- // Independently count the # of dirty pages on the remote side
- size_t remote_virtual_page_idx = begin / kPageSize;
- if (previous_page_idx != page_idx) {
- uint64_t page_count = 0xC0FFEE;
- // TODO: virtual_page_idx needs to be from the same process
- int dirtiness = (IsPageDirty(page_map_file.get(), // Image-diff-pid procmap
- clean_page_map_file.get(), // Self procmap
- kpage_flags_file.get(),
- kpage_count_file.get(),
- remote_virtual_page_idx, // potentially "dirty" page
- virtual_page_idx, // true "clean" page
- &page_count,
- &error_msg));
- if (dirtiness < 0) {
- os << error_msg;
- return false;
- } else if (dirtiness > 0) {
- dirty_pages++;
- dirty_page_set_remote.insert(dirty_page_set_remote.end(), remote_virtual_page_idx);
- dirty_page_set_local.insert(dirty_page_set_local.end(), virtual_page_idx);
- }
-
- bool is_dirty = dirtiness > 0;
- bool is_private = page_count == 1;
-
- if (page_count == 1) {
- private_pages++;
- }
+ // Set of the local virtual page indices that are dirty
+ std::set<size_t> dirty_page_set_local;
- if (is_dirty && is_private) {
- private_dirty_pages++;
- }
- }
+ if (!ComputeDirtyBytes(image_begin,
+ &dirty_pages,
+ &different_pages,
+ &different_bytes,
+ &different_int32s,
+ &private_pages,
+ &private_dirty_pages,
+ &dirty_page_set_local)) {
+ return false;
}
std::map<mirror::Class*, ClassData> class_data;
// Walk each object in the remote image space and compare it against ours
- size_t different_objects = 0;
-
std::map<off_t /* field offset */, int /* count */> art_method_field_dirty_count;
std::vector<ArtMethod*> art_method_dirty_objects;
- std::map<off_t /* field offset */, int /* count */> class_field_dirty_count;
- std::vector<mirror::Class*> class_dirty_objects;
+ std::map<off_t /* field offset */, size_t /* count */> class_field_dirty_count;
+ std::vector<const uint8_t*> class_dirty_objects;
- // List of local objects that are clean, but located on dirty pages.
- std::vector<mirror::Object*> false_dirty_objects;
- size_t false_dirty_object_bytes = 0;
// Look up remote classes by their descriptor
std::map<std::string, mirror::Class*> remote_class_map;
// Look up local classes by their descriptor
std::map<std::string, mirror::Class*> local_class_map;
- // Objects that are dirty against the image (possibly shared or private dirty).
- std::set<mirror::Object*> image_dirty_objects;
-
- // Objects that are dirty against the zygote (probably private dirty).
- std::set<mirror::Object*> zygote_dirty_objects;
-
- size_t dirty_object_bytes = 0;
const uint8_t* begin_image_ptr = image_begin_unaligned;
const uint8_t* end_image_ptr = image_mirror_end_unaligned;
+ ObjectRegionData region_data;
+
const uint8_t* current = begin_image_ptr + RoundUp(sizeof(ImageHeader), kObjectAlignment);
while (reinterpret_cast<uintptr_t>(current) < reinterpret_cast<uintptr_t>(end_image_ptr)) {
CHECK_ALIGNED(current, kObjectAlignment);
@@ -524,125 +662,56 @@ class ImgDiagDumper {
obj->AssertReadBarrierState();
}
- // Iterate every page this object belongs to
- bool on_dirty_page = false;
- size_t page_off = 0;
- size_t current_page_idx;
- uintptr_t object_address;
- do {
- object_address = reinterpret_cast<uintptr_t>(current);
- current_page_idx = object_address / kPageSize + page_off;
-
- if (dirty_page_set_local.find(current_page_idx) != dirty_page_set_local.end()) {
- // This object is on a dirty page
- on_dirty_page = true;
- }
-
- page_off++;
- } while ((current_page_idx * kPageSize) <
- RoundUp(object_address + obj->SizeOf(), kObjectAlignment));
-
mirror::Class* klass = obj->GetClass();
+ size_t obj_size = obj->SizeOf();
+ ClassData& obj_class_data = class_data[klass];
// Check against the other object and see if they are different
ptrdiff_t offset = current - begin_image_ptr;
- const uint8_t* current_remote = &remote_contents[offset];
- mirror::Object* remote_obj = reinterpret_cast<mirror::Object*>(
- const_cast<uint8_t*>(current_remote));
-
- bool different_image_object = memcmp(current, current_remote, obj->SizeOf()) != 0;
- if (different_image_object) {
- bool different_zygote_object = false;
- if (!zygote_contents.empty()) {
- const uint8_t* zygote_ptr = &zygote_contents[offset];
- different_zygote_object = memcmp(current, zygote_ptr, obj->SizeOf()) != 0;
- }
- if (different_zygote_object) {
- // Different from zygote.
- zygote_dirty_objects.insert(obj);
- } else {
- // Just different from iamge.
- image_dirty_objects.insert(obj);
- }
-
- different_objects++;
- dirty_object_bytes += obj->SizeOf();
-
- ++class_data[klass].dirty_object_count;
+ const uint8_t* current_remote = &remote_contents_[offset];
+ const uint8_t* current_zygote =
+ zygote_contents_.empty() ? nullptr : &zygote_contents_[offset];
- // Go byte-by-byte and figure out what exactly got dirtied
- size_t dirty_byte_count_per_object = 0;
- for (size_t i = 0; i < obj->SizeOf(); ++i) {
- if (current[i] != current_remote[i]) {
- dirty_byte_count_per_object++;
- }
- }
- class_data[klass].dirty_object_byte_count += dirty_byte_count_per_object;
- class_data[klass].dirty_object_size_in_bytes += obj->SizeOf();
- class_data[klass].dirty_objects.push_back(remote_obj);
- } else {
- ++class_data[klass].clean_object_count;
+ std::map<off_t /* field offset */, size_t /* count */>* field_dirty_count = nullptr;
+ if (klass->IsClassClass()) {
+ field_dirty_count = &class_field_dirty_count;
}
- std::string descriptor = GetClassDescriptor(klass);
- if (different_image_object) {
- if (klass->IsClassClass()) {
- // this is a "Class"
- mirror::Class* obj_as_class = reinterpret_cast<mirror::Class*>(remote_obj);
-
- // print the fields that are dirty
- for (size_t i = 0; i < obj->SizeOf(); ++i) {
- if (current[i] != current_remote[i]) {
- class_field_dirty_count[i]++;
- }
- }
-
- class_dirty_objects.push_back(obj_as_class);
- } else if (strcmp(descriptor.c_str(), "Ljava/lang/reflect/ArtMethod;") == 0) {
- // this is an ArtMethod
- ArtMethod* art_method = reinterpret_cast<ArtMethod*>(remote_obj);
-
- // print the fields that are dirty
- for (size_t i = 0; i < obj->SizeOf(); ++i) {
- if (current[i] != current_remote[i]) {
- art_method_field_dirty_count[i]++;
- }
- }
-
- art_method_dirty_objects.push_back(art_method);
- }
- } else if (on_dirty_page) {
- // This object was either never mutated or got mutated back to the same value.
- // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
- false_dirty_objects.push_back(obj);
- class_data[klass].false_dirty_objects.push_back(obj);
- false_dirty_object_bytes += obj->SizeOf();
- class_data[obj->GetClass()].false_dirty_byte_count += obj->SizeOf();
- class_data[obj->GetClass()].false_dirty_object_count += 1;
- }
+ ComputeObjectDirty(current,
+ current_remote,
+ current_zygote,
+ &obj_class_data,
+ obj_size,
+ dirty_page_set_local,
+ &region_data);
+ // Object specific stuff.
+ std::string descriptor = GetClassDescriptor(klass);
if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) {
local_class_map[descriptor] = reinterpret_cast<mirror::Class*>(obj);
+ mirror::Object* remote_obj = reinterpret_cast<mirror::Object*>(
+ const_cast<uint8_t*>(current_remote));
remote_class_map[descriptor] = reinterpret_cast<mirror::Class*>(remote_obj);
}
// Unconditionally store the class descriptor in case we need it later
- class_data[klass].descriptor = descriptor;
- current += RoundUp(obj->SizeOf(), kObjectAlignment);
+ obj_class_data.descriptor = descriptor;
+
+ current += RoundUp(obj_size, kObjectAlignment);
}
// Looking at only dirty pages, figure out how many of those bytes belong to dirty objects.
- float true_dirtied_percent = dirty_object_bytes * 1.0f / (dirty_pages * kPageSize);
+ float true_dirtied_percent = region_data.dirty_object_bytes * 1.0f / (dirty_pages * kPageSize);
size_t false_dirty_pages = dirty_pages - different_pages;
- os << "Mapping at [" << reinterpret_cast<void*>(boot_map.start) << ", "
- << reinterpret_cast<void*>(boot_map.end) << ") had: \n "
+ os << "Mapping at [" << reinterpret_cast<void*>(boot_map_.start) << ", "
+ << reinterpret_cast<void*>(boot_map_.end) << ") had: \n "
<< different_bytes << " differing bytes, \n "
<< different_int32s << " differing int32s, \n "
- << different_objects << " different objects, \n "
- << dirty_object_bytes << " different object [bytes], \n "
- << false_dirty_objects.size() << " false dirty objects,\n "
- << false_dirty_object_bytes << " false dirty object [bytes], \n "
+ << region_data.different_objects << " different objects, \n "
+ << region_data.dirty_object_bytes << " different object [bytes], \n "
+ << region_data.false_dirty_objects.size() << " false dirty objects,\n "
+ << region_data.false_dirty_object_bytes << " false dirty object [bytes], \n "
<< true_dirtied_percent << " different objects-vs-total in a dirty page;\n "
<< different_pages << " different pages; \n "
<< dirty_pages << " pages are dirty; \n "
@@ -657,22 +726,37 @@ class ImgDiagDumper {
auto clean_object_class_values = SortByValueDesc<mirror::Class*, int, ClassData>(
class_data, [](const ClassData& d) { return d.clean_object_count; });
- if (!zygote_dirty_objects.empty()) {
- os << "\n" << " Dirty objects compared to zygote (probably private dirty): "
- << zygote_dirty_objects.size() << "\n";
- for (mirror::Object* obj : zygote_dirty_objects) {
- const uint8_t* obj_bytes = reinterpret_cast<const uint8_t*>(obj);
+ if (!region_data.zygote_dirty_objects.empty()) {
+ // We only reach this point if both pids were specified. Furthermore,
+ // objects are only displayed here if they differed in both the image
+ // and the zygote, so they are probably private dirty.
+ CHECK(image_diff_pid_ > 0 && zygote_diff_pid_ > 0);
+ os << "\n" << " Zygote dirty objects (probably shared dirty): "
+ << region_data.zygote_dirty_objects.size() << "\n";
+ for (const uint8_t* obj_bytes : region_data.zygote_dirty_objects) {
+ auto obj = const_cast<mirror::Object*>(reinterpret_cast<const mirror::Object*>(obj_bytes));
ptrdiff_t offset = obj_bytes - begin_image_ptr;
- uint8_t* remote_bytes = &zygote_contents[offset];
+ uint8_t* remote_bytes = &zygote_contents_[offset];
DiffObjectContents(obj, remote_bytes, os);
}
}
- os << "\n" << " Dirty objects compared to image (private or shared dirty): "
- << image_dirty_objects.size() << "\n";
- for (mirror::Object* obj : image_dirty_objects) {
- const uint8_t* obj_bytes = reinterpret_cast<const uint8_t*>(obj);
+ os << "\n";
+ if (zygote_pid_only_) {
+ // image_diff_pid_ is the zygote process.
+ os << " Zygote shared dirty objects: ";
+ } else {
+ // image_diff_pid_ is actually the image (application) process.
+ if (zygote_diff_pid_ > 0) {
+ os << " Application dirty objects (private dirty): ";
+ } else {
+ os << " Application dirty objects (unknown whether private or shared dirty): ";
+ }
+ }
+ os << region_data.image_dirty_objects.size() << "\n";
+ for (const uint8_t* obj_bytes : region_data.image_dirty_objects) {
+ auto obj = const_cast<mirror::Object*>(reinterpret_cast<const mirror::Object*>(obj_bytes));
ptrdiff_t offset = obj_bytes - begin_image_ptr;
- uint8_t* remote_bytes = &remote_contents[offset];
+ uint8_t* remote_bytes = &remote_contents_[offset];
DiffObjectContents(obj, remote_bytes, os);
}
@@ -716,27 +800,26 @@ class ImgDiagDumper {
os << " field contents:\n";
const auto& dirty_objects_list = class_data[klass].dirty_objects;
- for (mirror::Object* obj : dirty_objects_list) {
+ for (const uint8_t* uobj : dirty_objects_list) {
+ auto obj = const_cast<mirror::Object*>(reinterpret_cast<const mirror::Object*>(uobj));
// remote method
auto art_method = reinterpret_cast<ArtMethod*>(obj);
// remote class
mirror::Class* remote_declaring_class =
- FixUpRemotePointer(art_method->GetDeclaringClass(), remote_contents, boot_map);
+ FixUpRemotePointer(art_method->GetDeclaringClass(), remote_contents_, boot_map_);
// local class
mirror::Class* declaring_class =
- RemoteContentsPointerToLocal(remote_declaring_class,
- remote_contents,
- boot_image_header);
+ RemoteContentsPointerToLocal(remote_declaring_class, remote_contents_, image_header_);
os << " " << reinterpret_cast<void*>(obj) << " ";
os << " entryPointFromJni: "
<< reinterpret_cast<const void*>(
- art_method->GetDataPtrSize(pointer_size)) << ", ";
+ art_method->GetDataPtrSize(pointer_size_)) << ", ";
os << " entryPointFromQuickCompiledCode: "
<< reinterpret_cast<const void*>(
- art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size))
+ art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_))
<< ", ";
os << " isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
os << " class_status (local): " << declaring_class->GetStatus();
@@ -749,13 +832,13 @@ class ImgDiagDumper {
for (size_t i = 0; i < class_dirty_objects.size() && i < kMaxAddressPrint; ++i) {
auto class_ptr = class_dirty_objects[i];
- os << reinterpret_cast<void*>(class_ptr) << ", ";
+ os << reinterpret_cast<const void*>(class_ptr) << ", ";
}
os << "\n";
os << " dirty byte +offset:count list = ";
auto class_field_dirty_count_sorted =
- SortByValueDesc<off_t, int, int>(class_field_dirty_count);
+ SortByValueDesc<off_t, int, size_t>(class_field_dirty_count);
for (auto pair : class_field_dirty_count_sorted) {
off_t offset = pair.second;
int count = pair.first;
@@ -765,17 +848,19 @@ class ImgDiagDumper {
os << "\n";
os << " field contents:\n";
+ // TODO: templatize this to avoid the awful casts down to uint8_t* and back.
const auto& dirty_objects_list = class_data[klass].dirty_objects;
- for (mirror::Object* obj : dirty_objects_list) {
+ for (const uint8_t* uobj : dirty_objects_list) {
+ auto obj = const_cast<mirror::Object*>(reinterpret_cast<const mirror::Object*>(uobj));
// remote class object
auto remote_klass = reinterpret_cast<mirror::Class*>(obj);
// local class object
auto local_klass = RemoteContentsPointerToLocal(remote_klass,
- remote_contents,
- boot_image_header);
+ remote_contents_,
+ image_header_);
- os << " " << reinterpret_cast<void*>(obj) << " ";
+ os << " " << reinterpret_cast<const void*>(obj) << " ";
os << " class_status (remote): " << remote_klass->GetStatus() << ", ";
os << " class_status (local): " << local_klass->GetStatus();
os << "\n";
@@ -801,23 +886,25 @@ class ImgDiagDumper {
<< ")\n";
if (strcmp(descriptor.c_str(), "Ljava/lang/reflect/ArtMethod;") == 0) {
+ // TODO: templatize this to avoid the awful casts down to uint8_t* and back.
auto& art_method_false_dirty_objects = class_data[klass].false_dirty_objects;
os << " field contents:\n";
- for (mirror::Object* obj : art_method_false_dirty_objects) {
+ for (const uint8_t* uobj : art_method_false_dirty_objects) {
+ auto obj = const_cast<mirror::Object*>(reinterpret_cast<const mirror::Object*>(uobj));
// local method
auto art_method = reinterpret_cast<ArtMethod*>(obj);
// local class
mirror::Class* declaring_class = art_method->GetDeclaringClass();
- os << " " << reinterpret_cast<void*>(obj) << " ";
+ os << " " << reinterpret_cast<const void*>(obj) << " ";
os << " entryPointFromJni: "
<< reinterpret_cast<const void*>(
- art_method->GetDataPtrSize(pointer_size)) << ", ";
+ art_method->GetDataPtrSize(pointer_size_)) << ", ";
os << " entryPointFromQuickCompiledCode: "
<< reinterpret_cast<const void*>(
- art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size))
+ art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_))
<< ", ";
os << " isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
os << " class_status (local): " << declaring_class->GetStatus();
@@ -932,18 +1019,18 @@ class ImgDiagDumper {
}
static int IsPageDirty(File* page_map_file,
- File* clean_page_map_file,
- File* kpage_flags_file,
- File* kpage_count_file,
+ File* clean_pagemap_file,
+ File* kpageflags_file,
+ File* kpagecount_file,
size_t virtual_page_idx,
size_t clean_virtual_page_idx,
// Out parameters:
uint64_t* page_count, std::string* error_msg) {
CHECK(page_map_file != nullptr);
- CHECK(clean_page_map_file != nullptr);
- CHECK_NE(page_map_file, clean_page_map_file);
- CHECK(kpage_flags_file != nullptr);
- CHECK(kpage_count_file != nullptr);
+ CHECK(clean_pagemap_file != nullptr);
+ CHECK_NE(page_map_file, clean_pagemap_file);
+ CHECK(kpageflags_file != nullptr);
+ CHECK(kpagecount_file != nullptr);
CHECK(page_count != nullptr);
CHECK(error_msg != nullptr);
@@ -961,27 +1048,27 @@ class ImgDiagDumper {
}
uint64_t page_frame_number_clean = 0;
- if (!GetPageFrameNumber(clean_page_map_file, clean_virtual_page_idx, &page_frame_number_clean,
+ if (!GetPageFrameNumber(clean_pagemap_file, clean_virtual_page_idx, &page_frame_number_clean,
error_msg)) {
return -1;
}
// Read 64-bit entry from /proc/kpageflags to get the dirty bit for a page
uint64_t kpage_flags_entry = 0;
- if (!kpage_flags_file->PreadFully(&kpage_flags_entry,
+ if (!kpageflags_file->PreadFully(&kpage_flags_entry,
kPageFlagsEntrySize,
page_frame_number * kPageFlagsEntrySize)) {
*error_msg = StringPrintf("Failed to read the page flags from %s",
- kpage_flags_file->GetPath().c_str());
+ kpageflags_file->GetPath().c_str());
return -1;
}
// Read 64-bit entyry from /proc/kpagecount to get mapping counts for a page
- if (!kpage_count_file->PreadFully(page_count /*out*/,
+ if (!kpagecount_file->PreadFully(page_count /*out*/,
kPageCountEntrySize,
page_frame_number * kPageCountEntrySize)) {
*error_msg = StringPrintf("Failed to read the page count from %s",
- kpage_count_file->GetPath().c_str());
+ kpagecount_file->GetPath().c_str());
return -1;
}
@@ -1002,7 +1089,29 @@ class ImgDiagDumper {
return page_frame_number != page_frame_number_clean;
}
- private:
+ void PrintPidLine(const std::string& kind, pid_t pid) {
+ if (pid < 0) {
+ *os_ << kind << " DIFF PID: disabled\n\n";
+ } else {
+ *os_ << kind << " DIFF PID (" << pid << "): ";
+ }
+ }
+
+ static bool EndsWith(const std::string& str, const std::string& suffix) {
+ return str.size() >= suffix.size() &&
+ str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
+ }
+
+ // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar)
+ static std::string BaseName(const std::string& str) {
+ size_t idx = str.rfind('/');
+ if (idx == std::string::npos) {
+ return str;
+ }
+
+ return str.substr(idx + 1);
+ }
+
// Return the image location, stripped of any directories, e.g. "boot.art" or "core.art"
std::string GetImageLocationBaseName() const {
return BaseName(std::string(image_location_));
@@ -1013,6 +1122,28 @@ class ImgDiagDumper {
const std::string image_location_;
pid_t image_diff_pid_; // Dump image diff against boot.art if pid is non-negative
pid_t zygote_diff_pid_; // Dump image diff against zygote boot.art if pid is non-negative
+ bool zygote_pid_only_; // The user only specified a pid for the zygote.
+
+ // Pointer size constant for object fields, etc.
+ PointerSize pointer_size_;
+ // BacktraceMap used for finding the memory mapping of the image file.
+ std::unique_ptr<BacktraceMap> proc_maps_;
+ // Boot image mapping.
+ backtrace_map_t boot_map_{}; // NOLINT
+ // The size of the boot image mapping.
+ size_t boot_map_size_;
+ // The contents of /proc/<image_diff_pid_>/maps.
+ std::vector<uint8_t> remote_contents_;
+ // The contents of /proc/<zygote_diff_pid_>/maps.
+ std::vector<uint8_t> zygote_contents_;
+ // A File for reading /proc/<zygote_diff_pid_>/maps.
+ File pagemap_file_;
+ // A File for reading /proc/self/pagemap.
+ File clean_pagemap_file_;
+ // A File for reading /proc/kpageflags.
+ File kpageflags_file_;
+ // A File for reading /proc/kpagecount.
+ File kpagecount_file_;
DISALLOW_COPY_AND_ASSIGN(ImgDiagDumper);
};
@@ -1037,6 +1168,9 @@ static int DumpImage(Runtime* runtime,
image_space->GetImageLocation(),
image_diff_pid,
zygote_diff_pid);
+ if (!img_diag_dumper.Init()) {
+ return EXIT_FAILURE;
+ }
if (!img_diag_dumper.Dump()) {
return EXIT_FAILURE;
}
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 9b95de2fb0..d8bafc011a 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -246,8 +246,7 @@ class OatSymbolizer FINAL {
// might be a static initializer.
ClassDataItemIterator it(dex_file, class_data);
uint32_t class_method_idx = 0;
- for (; it.HasNextStaticField(); it.Next()) { /* skip */ }
- for (; it.HasNextInstanceField(); it.Next()) { /* skip */ }
+ it.SkipAllFields();
for (; it.HasNextDirectMethod() || it.HasNextVirtualMethod(); it.Next()) {
WalkOatMethod(oat_class.GetOatMethod(class_method_idx++),
dex_file,
@@ -769,7 +768,7 @@ class OatDumper {
const uint8_t* class_data = dex_file->GetClassData(class_def);
if (class_data != nullptr) {
ClassDataItemIterator it(*dex_file, class_data);
- SkipAllFields(it);
+ it.SkipAllFields();
uint32_t class_method_index = 0;
while (it.HasNextDirectMethod()) {
AddOffsets(oat_class.GetOatMethod(class_method_index++));
@@ -856,7 +855,7 @@ class OatDumper {
return;
}
ClassDataItemIterator it(dex_file, class_data);
- SkipAllFields(it);
+ it.SkipAllFields();
while (it.HasNextDirectMethod()) {
WalkCodeItem(dex_file, it.GetMethodCodeItem());
it.Next();
@@ -1076,15 +1075,6 @@ class OatDumper {
return true;
}
- static void SkipAllFields(ClassDataItemIterator& it) {
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
- }
-
bool DumpOatClass(VariableIndentationOutputStream* vios,
const OatFile::OatClass& oat_class, const DexFile& dex_file,
const DexFile::ClassDef& class_def, bool* stop_analysis) {
@@ -1096,7 +1086,7 @@ class OatDumper {
return success;
}
ClassDataItemIterator it(dex_file, class_data);
- SkipAllFields(it);
+ it.SkipAllFields();
uint32_t class_method_index = 0;
while (it.HasNextDirectMethod()) {
if (!DumpOatMethod(vios, class_def, class_method_index, oat_class, dex_file,
@@ -1405,6 +1395,54 @@ class OatDumper {
method_info);
}
+ static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
+ // According to stack model, the first out is above the Method referernce.
+ return static_cast<size_t>(InstructionSetPointerSize(isa)) + out_num * sizeof(uint32_t);
+ }
+
+ static uint32_t GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
+ uint32_t core_spills,
+ uint32_t fp_spills,
+ size_t frame_size,
+ int reg,
+ InstructionSet isa) {
+ PointerSize pointer_size = InstructionSetPointerSize(isa);
+ if (kIsDebugBuild) {
+ auto* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size);
+ }
+ }
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
+ DCHECK_NE(reg, -1);
+ int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
+ + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
+ + sizeof(uint32_t); // Filler.
+ int num_regs = code_item->registers_size_ - code_item->ins_size_;
+ int temp_threshold = code_item->registers_size_;
+ const int max_num_special_temps = 1;
+ if (reg == temp_threshold) {
+ // The current method pointer corresponds to special location on stack.
+ return 0;
+ } else if (reg >= temp_threshold + max_num_special_temps) {
+ /*
+ * Special temporaries may have custom locations and the logic above deals with that.
+ * However, non-special temporaries are placed relative to the outs.
+ */
+ int temps_start = code_item->outs_size_ * sizeof(uint32_t)
+ + static_cast<size_t>(pointer_size) /* art method */;
+ int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
+ return temps_start + relative_offset;
+ } else if (reg < num_regs) {
+ int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
+ return locals_start + (reg * sizeof(uint32_t));
+ } else {
+ // Handle ins.
+ return frame_size + ((reg - num_regs) * sizeof(uint32_t))
+ + static_cast<size_t>(pointer_size) /* art method */;
+ }
+ }
+
void DumpVregLocations(std::ostream& os, const OatFile::OatMethod& oat_method,
const DexFile::CodeItem* code_item) {
if (code_item != nullptr) {
@@ -1424,13 +1462,12 @@ class OatDumper {
os << "\n\tlocals:";
}
- uint32_t offset = StackVisitor::GetVRegOffsetFromQuickCode(
- code_item,
- oat_method.GetCoreSpillMask(),
- oat_method.GetFpSpillMask(),
- oat_method.GetFrameSizeInBytes(),
- reg,
- GetInstructionSet());
+ uint32_t offset = GetVRegOffsetFromQuickCode(code_item,
+ oat_method.GetCoreSpillMask(),
+ oat_method.GetFpSpillMask(),
+ oat_method.GetFrameSizeInBytes(),
+ reg,
+ GetInstructionSet());
os << " v" << reg << "[sp + #" << offset << "]";
}
@@ -1439,7 +1476,7 @@ class OatDumper {
os << "\n\touts:";
}
- uint32_t offset = StackVisitor::GetOutVROffset(out_reg, GetInstructionSet());
+ uint32_t offset = GetOutVROffset(out_reg, GetInstructionSet());
os << " v" << out_reg << "[sp + #" << offset << "]";
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index ec3481b622..848eb8d329 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -304,8 +304,10 @@ bool PatchOat::WriteImage(File* out) {
TimingLogger::ScopedTiming t("Writing image File", timings_);
std::string error_msg;
- ScopedFlock img_flock;
- img_flock.Init(out, &error_msg);
+ // No error checking here, this is best effort. The locking may or may not
+ // succeed and we don't really care either way.
+ ScopedFlock img_flock = LockedFile::DupOf(out->Fd(), out->GetPath(),
+ true /* read_only_mode */, &error_msg);
CHECK(image_ != nullptr);
CHECK(out != nullptr);
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index b9a85bc9af..c238f0d665 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -33,7 +33,7 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
ProfileCompilationInfo info;
// Load the reference profile.
- if (!info.Load(reference_profile_file.GetFile()->Fd())) {
+ if (!info.Load(reference_profile_file->Fd())) {
LOG(WARNING) << "Could not load reference profile file";
return kErrorBadProfiles;
}
@@ -45,7 +45,7 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
// Merge all current profiles.
for (size_t i = 0; i < profile_files.size(); i++) {
ProfileCompilationInfo cur_info;
- if (!cur_info.Load(profile_files[i].GetFile()->Fd())) {
+ if (!cur_info.Load(profile_files[i]->Fd())) {
LOG(WARNING) << "Could not load profile file at index " << i;
return kErrorBadProfiles;
}
@@ -62,11 +62,11 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
}
// We were successful in merging all profile information. Update the reference profile.
- if (!reference_profile_file.GetFile()->ClearContent()) {
+ if (!reference_profile_file->ClearContent()) {
PLOG(WARNING) << "Could not clear reference profile file";
return kErrorIO;
}
- if (!info.Save(reference_profile_file.GetFile()->Fd())) {
+ if (!info.Save(reference_profile_file->Fd())) {
LOG(WARNING) << "Could not save reference profile file";
return kErrorIO;
}
@@ -74,26 +74,15 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
return kCompile;
}
-static bool InitFlock(const std::string& filename, ScopedFlock& flock, std::string* error) {
- return flock.Init(filename.c_str(), O_RDWR, /* block */ true, error);
-}
-
-static bool InitFlock(int fd, ScopedFlock& flock, std::string* error) {
- DCHECK_GE(fd, 0);
- // We do not own the descriptor, so disable auto-close and don't check usage.
- File file(fd, false);
- file.DisableAutoClose();
- return flock.Init(&file, error);
-}
-
-class ScopedCollectionFlock {
+class ScopedFlockList {
public:
- explicit ScopedCollectionFlock(size_t size) : flocks_(size) {}
+ explicit ScopedFlockList(size_t size) : flocks_(size) {}
// Will block until all the locks are acquired.
bool Init(const std::vector<std::string>& filenames, /* out */ std::string* error) {
for (size_t i = 0; i < filenames.size(); i++) {
- if (!InitFlock(filenames[i], flocks_[i], error)) {
+ flocks_[i] = LockedFile::Open(filenames[i].c_str(), O_RDWR, /* block */ true, error);
+ if (flocks_[i].get() == nullptr) {
*error += " (index=" + std::to_string(i) + ")";
return false;
}
@@ -105,7 +94,9 @@ class ScopedCollectionFlock {
bool Init(const std::vector<int>& fds, /* out */ std::string* error) {
for (size_t i = 0; i < fds.size(); i++) {
DCHECK_GE(fds[i], 0);
- if (!InitFlock(fds[i], flocks_[i], error)) {
+ flocks_[i] = LockedFile::DupOf(fds[i], "profile-file",
+ true /* read_only_mode */, error);
+ if (flocks_[i].get() == nullptr) {
*error += " (index=" + std::to_string(i) + ")";
return false;
}
@@ -123,39 +114,47 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<int>& profile_files_fd,
int reference_profile_file_fd) {
DCHECK_GE(reference_profile_file_fd, 0);
+
std::string error;
- ScopedCollectionFlock profile_files_flocks(profile_files_fd.size());
- if (!profile_files_flocks.Init(profile_files_fd, &error)) {
+ ScopedFlockList profile_files(profile_files_fd.size());
+ if (!profile_files.Init(profile_files_fd, &error)) {
LOG(WARNING) << "Could not lock profile files: " << error;
return kErrorCannotLock;
}
- ScopedFlock reference_profile_file_flock;
- if (!InitFlock(reference_profile_file_fd, reference_profile_file_flock, &error)) {
+
+ // The reference_profile_file is opened in read/write mode because it's
+ // cleared after processing.
+ ScopedFlock reference_profile_file = LockedFile::DupOf(reference_profile_file_fd,
+ "reference-profile",
+ false /* read_only_mode */,
+ &error);
+ if (reference_profile_file.get() == nullptr) {
LOG(WARNING) << "Could not lock reference profiled files: " << error;
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files_flocks.Get(),
- reference_profile_file_flock);
+ return ProcessProfilesInternal(profile_files.Get(), reference_profile_file);
}
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<std::string>& profile_files,
const std::string& reference_profile_file) {
std::string error;
- ScopedCollectionFlock profile_files_flocks(profile_files.size());
- if (!profile_files_flocks.Init(profile_files, &error)) {
+
+ ScopedFlockList profile_files_list(profile_files.size());
+ if (!profile_files_list.Init(profile_files, &error)) {
LOG(WARNING) << "Could not lock profile files: " << error;
return kErrorCannotLock;
}
- ScopedFlock reference_profile_file_flock;
- if (!InitFlock(reference_profile_file, reference_profile_file_flock, &error)) {
+
+ ScopedFlock locked_reference_profile_file = LockedFile::Open(
+ reference_profile_file.c_str(), O_RDWR, /* block */ true, &error);
+ if (locked_reference_profile_file.get() == nullptr) {
LOG(WARNING) << "Could not lock reference profile files: " << error;
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files_flocks.Get(),
- reference_profile_file_flock);
+ return ProcessProfilesInternal(profile_files_list.Get(), locked_reference_profile_file);
}
} // namespace art
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index ccf9ac6ad5..9e2ab39a48 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -86,14 +86,19 @@ class ProfileAssistantTest : public CommonRuntimeTest {
const ScratchFile& profile,
ProfileCompilationInfo* info) {
std::string dex_location = "location1" + id;
+ using Hotness = ProfileCompilationInfo::MethodHotness;
for (uint32_t idx : hot_methods) {
- info->AddMethodIndex(dex_location, checksum, idx, number_of_methods);
+ info->AddMethodIndex(Hotness::kFlagHot, dex_location, checksum, idx, number_of_methods);
}
for (uint32_t idx : startup_methods) {
- info->AddSampledMethod(/*startup*/true, dex_location, checksum, idx, number_of_methods);
+ info->AddMethodIndex(Hotness::kFlagStartup, dex_location, checksum, idx, number_of_methods);
}
for (uint32_t idx : post_startup_methods) {
- info->AddSampledMethod(/*startup*/false, dex_location, checksum, idx, number_of_methods);
+ info->AddMethodIndex(Hotness::kFlagPostStartup,
+ dex_location,
+ checksum,
+ idx,
+ number_of_methods);
}
ASSERT_TRUE(info->Save(GetFd(profile)));
ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -609,7 +614,7 @@ TEST_F(ProfileAssistantTest, TestProfileCreationGenerateMethods) {
info.GetMethod(method.GetDexFile()->GetLocation(),
method.GetDexFile()->GetLocationChecksum(),
method.GetDexMethodIndex());
- ASSERT_TRUE(pmi != nullptr);
+ ASSERT_TRUE(pmi != nullptr) << method.PrettyMethod();
}
}
EXPECT_GT(method_count, 0u);
diff --git a/profman/profman.cc b/profman/profman.cc
index adef0d0332..d8b5dafffe 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -772,20 +772,20 @@ class ProfMan FINAL {
const uint8_t* class_data = dex_file->GetClassData(*class_def);
if (class_data != nullptr) {
ClassDataItemIterator it(*dex_file, class_data);
- while (it.HasNextStaticField() || it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
if (it.GetMethodCodeItemOffset() != 0) {
// Add all of the methods that have code to the profile.
const uint32_t method_idx = it.GetMemberIndex();
- methods.push_back(ProfileMethodInfo(dex_file, method_idx));
+ methods.push_back(ProfileMethodInfo(MethodReference(dex_file, method_idx)));
}
it.Next();
}
}
}
- profile->AddMethodsAndClasses(methods, resolved_class_set);
+ // TODO: Check return values?
+ profile->AddMethods(methods);
+ profile->AddClasses(resolved_class_set);
return true;
}
@@ -833,28 +833,23 @@ class ProfMan FINAL {
}
inline_caches.emplace_back(dex_pc, is_missing_types, classes);
}
- ProfileMethodInfo pmi(class_ref.dex_file, method_index, inline_caches);
+ MethodReference ref(class_ref.dex_file, method_index);
if (is_hot) {
- profile->AddMethod(pmi);
+ profile->AddMethod(ProfileMethodInfo(ref, inline_caches));
}
+ uint32_t flags = 0;
+ using Hotness = ProfileCompilationInfo::MethodHotness;
if (is_startup) {
- if (!profile->AddSampledMethod(/*is_startup*/ true,
- pmi.dex_file->GetLocation(),
- pmi.dex_file->GetLocationChecksum(),
- method_index,
- pmi.dex_file->NumMethodIds())) {
- return false;
- }
- DCHECK(profile->IsStartupOrHotMethod(MethodReference(pmi.dex_file, method_index)));
+ flags |= Hotness::kFlagStartup;
}
if (is_post_startup) {
- if (!profile->AddSampledMethod(/*is_startup*/ false,
- pmi.dex_file->GetLocation(),
- pmi.dex_file->GetLocationChecksum(),
- method_index,
- pmi.dex_file->NumMethodIds())) {
+ flags |= Hotness::kFlagPostStartup;
+ }
+ if (flags != 0) {
+ if (!profile->AddMethodIndex(static_cast<Hotness::Flag>(flags), ref)) {
return false;
}
+ DCHECK(profile->GetMethodHotness(ref).HasAnyFlags());
}
return true;
}
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 26e52e012e..20f95c0c74 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -105,7 +105,10 @@ cc_defaults {
"interpreter/interpreter_common.cc",
"interpreter/interpreter_intrinsics.cc",
"interpreter/interpreter_switch_impl.cc",
+ "interpreter/lock_count_data.cc",
+ "interpreter/shadow_frame.cc",
"interpreter/unstarted_runtime.cc",
+ "java_frame_root_info.cc",
"java_vm_ext.cc",
"jdwp/jdwp_event.cc",
"jdwp/jdwp_expand_buf.cc",
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index d1da67f26e..a5a65e6843 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -2003,10 +2003,11 @@ END art_invoke_obsolete_method_stub
ENTRY art_quick_instrumentation_entry
SETUP_SAVE_REFS_AND_ARGS_FRAME
sw $a0, 28($sp) # save arg0 in free arg slot
- move $a3, $ra # pass $ra
+ addiu $a3, $sp, ARG_SLOT_SIZE # Pass $sp.
la $t9, artInstrumentationMethodEntryFromCode
- jalr $t9 # (Method*, Object*, Thread*, LR)
+ jalr $t9 # (Method*, Object*, Thread*, SP)
move $a2, rSELF # pass Thread::Current
+ beqz $v0, .Ldeliver_instrumentation_entry_exception
move $t9, $v0 # $t9 holds reference to code
lw $a0, 28($sp) # restore arg0 from free arg slot
RESTORE_SAVE_REFS_AND_ARGS_FRAME
@@ -2024,26 +2025,35 @@ art_quick_instrumentation_exit:
SETUP_SAVE_REFS_ONLY_FRAME
addiu $sp, $sp, -16 # allocate temp storage on the stack
.cfi_adjust_cfa_offset 16
- sw $v0, ARG_SLOT_SIZE+12($sp)
- .cfi_rel_offset 2, ARG_SLOT_SIZE+12
- sw $v1, ARG_SLOT_SIZE+8($sp)
- .cfi_rel_offset 3, ARG_SLOT_SIZE+8
+ sw $v0, ARG_SLOT_SIZE+8($sp)
+ .cfi_rel_offset 2, ARG_SLOT_SIZE+8
+ sw $v1, ARG_SLOT_SIZE+12($sp)
+ .cfi_rel_offset 3, ARG_SLOT_SIZE+12
s.d $f0, ARG_SLOT_SIZE($sp)
- s.d $f0, 16($sp) # pass fpr result
- move $a2, $v0 # pass gpr result
- move $a3, $v1
- addiu $a1, $sp, ARG_SLOT_SIZE+16 # pass $sp (remove arg slots and temp storage)
+ addiu $a3, $sp, ARG_SLOT_SIZE # Pass fpr_res pointer.
+ addiu $a2, $sp, ARG_SLOT_SIZE+8 # Pass gpr_res pointer.
+ addiu $a1, $sp, ARG_SLOT_SIZE+16 # Pass $sp (remove arg slots and temp storage).
la $t9, artInstrumentationMethodExitFromCode
- jalr $t9 # (Thread*, SP, gpr_res, fpr_res)
- move $a0, rSELF # pass Thread::Current
- move $t9, $v0 # set aside returned link register
- move $ra, $v1 # set link register for deoptimization
- lw $v0, ARG_SLOT_SIZE+12($sp) # restore return values
- lw $v1, ARG_SLOT_SIZE+8($sp)
+ jalr $t9 # (Thread*, SP, gpr_res*, fpr_res*)
+ move $a0, rSELF # Pass Thread::Current.
+ move $t9, $v0 # Set aside returned link register.
+ move $ra, $v1 # Set link register for deoptimization.
+ lw $v0, ARG_SLOT_SIZE+8($sp) # Restore return values.
+ lw $v1, ARG_SLOT_SIZE+12($sp)
l.d $f0, ARG_SLOT_SIZE($sp)
- jalr $zero, $t9 # return
- addiu $sp, $sp, ARG_SLOT_SIZE+FRAME_SIZE_SAVE_REFS_ONLY+16 # restore stack
- .cfi_adjust_cfa_offset -(ARG_SLOT_SIZE+FRAME_SIZE_SAVE_REFS_ONLY+16)
+ addiu $sp, $sp, 16
+ .cfi_adjust_cfa_offset -16
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ beqz $t9, .Ldo_deliver_instrumentation_exception
+ nop # Deliver exception if we got nullptr as function.
+ jalr $zero, $t9 # Otherwise, return.
+ nop
+.Ldeliver_instrumentation_entry_exception:
+ # Deliver exception for art_quick_instrumentation_entry placed after
+ # art_quick_instrumentation_exit so that the fallthrough works.
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+.Ldo_deliver_instrumentation_exception:
+ DELIVER_PENDING_EXCEPTION
END art_quick_instrumentation_exit
/*
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index c9eeb7ca65..10074fd43b 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1930,16 +1930,15 @@ END art_invoke_obsolete_method_stub
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
SETUP_SAVE_REFS_AND_ARGS_FRAME
- daddiu $sp, $sp, -16 # space for saving arg0
- .cfi_adjust_cfa_offset 16
- sd $a0, 0($sp) # save arg0
- move $a3, $ra # pass $ra
- jal artInstrumentationMethodEntryFromCode # (Method*, Object*, Thread*, RA)
+ # Preserve $a0 knowing there is a spare slot in kSaveRefsAndArgs.
+ sd $a0, 8($sp) # Save arg0.
+ move $a3, $sp # Pass $sp.
+ jal artInstrumentationMethodEntryFromCode # (Method*, Object*, Thread*, SP)
move $a2, rSELF # pass Thread::Current
+ beqzc $v0, .Ldeliver_instrumentation_entry_exception
+ # Deliver exception if we got nullptr as function.
move $t9, $v0 # $t9 holds reference to code
- ld $a0, 0($sp) # restore arg0
- daddiu $sp, $sp, 16 # remove args
- .cfi_adjust_cfa_offset -16
+ ld $a0, 8($sp) # Restore arg0.
RESTORE_SAVE_REFS_AND_ARGS_FRAME
jalr $t9 # call method
nop
@@ -1951,28 +1950,34 @@ art_quick_instrumentation_exit:
SETUP_GP
move $ra, $zero # link register is to here, so clobber with 0 for later checks
SETUP_SAVE_REFS_ONLY_FRAME
- move $t0, $sp # remember bottom of caller's frame
daddiu $sp, $sp, -16 # save return values and set up args
.cfi_adjust_cfa_offset 16
sd $v0, 0($sp)
.cfi_rel_offset 2, 0
s.d $f0, 8($sp)
- mov.d $f15, $f0 # pass fpr result
- move $a2, $v0 # pass gpr result
- move $a1, $t0 # pass $sp
+ daddiu $a3, $sp, 8 # Pass fpr_res pointer.
+ move $a2, $sp # Pass gpr_res pointer.
+ daddiu $a1, $sp, 16 # Pass $sp.
+ jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res*, fpr_res*)
move $a0, rSELF # pass Thread::Current
- jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res)
- .cpreturn # Restore gp from t8 in branch delay slot. gp is not used anymore,
- # and t8 may be clobbered in artInstrumentationMethodExitFromCode.
move $t9, $v0 # set aside returned link register
move $ra, $v1 # set link register for deoptimization
ld $v0, 0($sp) # restore return values
l.d $f0, 8($sp)
- jalr $zero, $t9 # return
- # restore stack, 16 bytes of saved values + ref_only callee save frame
- daddiu $sp, $sp, 16+FRAME_SIZE_SAVE_REFS_ONLY
- .cfi_adjust_cfa_offset -(16+FRAME_SIZE_SAVE_REFS_ONLY)
+ daddiu $sp, $sp, 16
+ .cfi_adjust_cfa_offset -16
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ beqz $t9, .Ldo_deliver_instrumentation_exception
+ nop # Deliver exception if we got nullptr as function.
+ jalr $zero, $t9 # Otherwise, return.
+ nop
+.Ldeliver_instrumentation_entry_exception:
+ # Deliver exception for art_quick_instrumentation_entry placed after
+ # art_quick_instrumentation_exit so that the fallthrough works.
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+.Ldo_deliver_instrumentation_exception:
+ DELIVER_PENDING_EXCEPTION
END art_quick_instrumentation_exit
/*
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index d591e0992c..32946ef0b4 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -433,13 +433,7 @@ static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file,
const uint8_t* class_data = dex_file.GetClassData(class_def);
CHECK(class_data != nullptr);
ClassDataItemIterator it(dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
// Process methods
size_t class_def_method_index = 0;
while (it.HasNextDirectMethod()) {
diff --git a/runtime/base/array_ref.h b/runtime/base/array_ref.h
index 00b9bad6bf..630a036f3d 100644
--- a/runtime/base/array_ref.h
+++ b/runtime/base/array_ref.h
@@ -40,17 +40,17 @@ namespace art {
template <typename T>
class ArrayRef {
public:
- typedef T value_type;
- typedef T& reference;
- typedef const T& const_reference;
- typedef T* pointer;
- typedef const T* const_pointer;
- typedef T* iterator;
- typedef const T* const_iterator;
- typedef std::reverse_iterator<iterator> reverse_iterator;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
- typedef ptrdiff_t difference_type;
- typedef size_t size_type;
+ using value_type = T;
+ using reference = T&;
+ using const_reference = const T&;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using iterator = T*;
+ using const_iterator = const T*;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using difference_type = ptrdiff_t;
+ using size_type = size_t;
// Constructors.
@@ -140,22 +140,22 @@ class ArrayRef {
}
reference front() {
- DCHECK_NE(size_, 0u);
+ DCHECK(!empty());
return array_[0];
}
const_reference front() const {
- DCHECK_NE(size_, 0u);
+ DCHECK(!empty());
return array_[0];
}
reference back() {
- DCHECK_NE(size_, 0u);
+ DCHECK(!empty());
return array_[size_ - 1u];
}
const_reference back() const {
- DCHECK_NE(size_, 0u);
+ DCHECK(!empty());
return array_[size_ - 1u];
}
@@ -165,14 +165,17 @@ class ArrayRef {
ArrayRef SubArray(size_type pos) {
return SubArray(pos, size() - pos);
}
+
ArrayRef<const T> SubArray(size_type pos) const {
return SubArray(pos, size() - pos);
}
+
ArrayRef SubArray(size_type pos, size_type length) {
DCHECK_LE(pos, size());
DCHECK_LE(length, size() - pos);
return ArrayRef(data() + pos, length);
}
+
ArrayRef<const T> SubArray(size_type pos, size_type length) const {
DCHECK_LE(pos, size());
DCHECK_LE(length, size() - pos);
diff --git a/runtime/base/array_slice.h b/runtime/base/array_slice.h
index 32283d0a0a..0da977d97d 100644
--- a/runtime/base/array_slice.h
+++ b/runtime/base/array_slice.h
@@ -17,7 +17,6 @@
#ifndef ART_RUNTIME_BASE_ARRAY_SLICE_H_
#define ART_RUNTIME_BASE_ARRAY_SLICE_H_
-#include "length_prefixed_array.h"
#include "stride_iterator.h"
#include "base/bit_utils.h"
#include "base/casts.h"
@@ -27,9 +26,21 @@ namespace art {
// An ArraySlice is an abstraction over an array or a part of an array of a particular type. It does
// bounds checking and can be made from several common array-like structures in Art.
-template<typename T>
+template <typename T>
class ArraySlice {
public:
+ using value_type = T;
+ using reference = T&;
+ using const_reference = const T&;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using iterator = StrideIterator<T>;
+ using const_iterator = StrideIterator<const T>;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using difference_type = ptrdiff_t;
+ using size_type = size_t;
+
// Create an empty array slice.
ArraySlice() : array_(nullptr), size_(0), element_size_(0) {}
@@ -44,85 +55,74 @@ class ArraySlice {
DCHECK(array_ != nullptr || length == 0);
}
- // Create an array slice of the elements between start_offset and end_offset of the array with
- // each element being element_size bytes long. Both start_offset and end_offset are in
- // element_size units.
- ArraySlice(T* array,
- uint32_t start_offset,
- uint32_t end_offset,
- size_t element_size = sizeof(T))
- : array_(nullptr),
- size_(end_offset - start_offset),
- element_size_(element_size) {
- DCHECK(array_ != nullptr || size_ == 0);
- DCHECK_LE(start_offset, end_offset);
- if (size_ != 0) {
- uintptr_t offset = start_offset * element_size_;
- array_ = *reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(array) + offset);
- }
- }
+ // Iterators.
+ iterator begin() { return iterator(&AtUnchecked(0), element_size_); }
+ const_iterator begin() const { return const_iterator(&AtUnchecked(0), element_size_); }
+ const_iterator cbegin() const { return const_iterator(&AtUnchecked(0), element_size_); }
+ StrideIterator<T> end() { return StrideIterator<T>(&AtUnchecked(size_), element_size_); }
+ const_iterator end() const { return const_iterator(&AtUnchecked(size_), element_size_); }
+ const_iterator cend() const { return const_iterator(&AtUnchecked(size_), element_size_); }
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
+ const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
+ const_reverse_iterator crend() const { return const_reverse_iterator(cbegin()); }
- // Create an array slice of the elements between start_offset and end_offset of the array with
- // each element being element_size bytes long and having the given alignment. Both start_offset
- // and end_offset are in element_size units.
- ArraySlice(LengthPrefixedArray<T>* array,
- uint32_t start_offset,
- uint32_t end_offset,
- size_t element_size = sizeof(T),
- size_t alignment = alignof(T))
- : array_(nullptr),
- size_(end_offset - start_offset),
- element_size_(element_size) {
- DCHECK(array != nullptr || size_ == 0);
- if (size_ != 0) {
- DCHECK_LE(start_offset, end_offset);
- DCHECK_LE(start_offset, array->size());
- DCHECK_LE(end_offset, array->size());
- array_ = &array->At(start_offset, element_size_, alignment);
- }
- }
+ // Size.
+ size_type size() const { return size_; }
+ bool empty() const { return size() == 0u; }
+
+ // Element access. NOTE: Not providing at() and data().
- T& At(size_t index) {
+ reference operator[](size_t index) {
DCHECK_LT(index, size_);
return AtUnchecked(index);
}
- const T& At(size_t index) const {
+ const_reference operator[](size_t index) const {
DCHECK_LT(index, size_);
return AtUnchecked(index);
}
- T& operator[](size_t index) {
- return At(index);
+ reference front() {
+ DCHECK(!empty());
+ return (*this)[0];
}
- const T& operator[](size_t index) const {
- return At(index);
+ const_reference front() const {
+ DCHECK(!empty());
+ return (*this)[0];
}
- StrideIterator<T> begin() {
- return StrideIterator<T>(&AtUnchecked(0), element_size_);
+ reference back() {
+ DCHECK(!empty());
+ return (*this)[size_ - 1u];
}
- StrideIterator<const T> begin() const {
- return StrideIterator<const T>(&AtUnchecked(0), element_size_);
+ const_reference back() const {
+ DCHECK(!empty());
+ return (*this)[size_ - 1u];
}
- StrideIterator<T> end() {
- return StrideIterator<T>(&AtUnchecked(size_), element_size_);
+ ArraySlice<T> SubArray(size_type pos) {
+ return SubArray(pos, size() - pos);
}
- StrideIterator<const T> end() const {
- return StrideIterator<const T>(&AtUnchecked(size_), element_size_);
+ ArraySlice<const T> SubArray(size_type pos) const {
+ return SubArray(pos, size() - pos);
}
- IterationRange<StrideIterator<T>> AsRange() {
- return size() != 0 ? MakeIterationRange(begin(), end())
- : MakeEmptyIterationRange(StrideIterator<T>(nullptr, 0));
+ ArraySlice<T> SubArray(size_type pos, size_type length) {
+ DCHECK_LE(pos, size());
+ DCHECK_LE(length, size() - pos);
+ return ArraySlice<T>(&AtUnchecked(pos), length, element_size_);
}
- size_t size() const {
- return size_;
+ ArraySlice<const T> SubArray(size_type pos, size_type length) const {
+ DCHECK_LE(pos, size());
+ DCHECK_LE(length, size() - pos);
+ return ArraySlice<const T>(&AtUnchecked(pos), length, element_size_);
}
size_t ElementSize() const {
diff --git a/runtime/base/length_prefixed_array.h b/runtime/base/length_prefixed_array.h
index 8060263863..a570b819ba 100644
--- a/runtime/base/length_prefixed_array.h
+++ b/runtime/base/length_prefixed_array.h
@@ -65,7 +65,7 @@ class LengthPrefixedArray {
size_t element_size = sizeof(T),
size_t alignment = alignof(T)) {
DCHECK_ALIGNED_PARAM(element_size, alignment);
- return RoundUp(offsetof(LengthPrefixedArray<T>, data), alignment) + index * element_size;
+ return RoundUp(offsetof(LengthPrefixedArray<T>, data_), alignment) + index * element_size;
}
static size_t ComputeSize(size_t num_elements,
@@ -87,7 +87,7 @@ class LengthPrefixedArray {
// Clear the potentially uninitialized padding between the size_ and actual data.
void ClearPadding(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
- size_t gap_offset = offsetof(LengthPrefixedArray<T>, data);
+ size_t gap_offset = offsetof(LengthPrefixedArray<T>, data_);
size_t gap_size = OffsetOfElement(0, element_size, alignment) - gap_offset;
memset(reinterpret_cast<uint8_t*>(this) + gap_offset, 0, gap_size);
}
@@ -104,7 +104,7 @@ class LengthPrefixedArray {
}
uint32_t size_;
- uint8_t data[0];
+ uint8_t data_[0];
};
// Returns empty iteration range if the array is null.
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index 862f0d0b00..b8df6897e4 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -28,46 +28,39 @@ namespace art {
using android::base::StringPrintf;
-bool ScopedFlock::Init(const char* filename, std::string* error_msg) {
- return Init(filename, O_CREAT | O_RDWR, true, error_msg);
+/* static */ ScopedFlock LockedFile::Open(const char* filename, std::string* error_msg) {
+ return Open(filename, O_CREAT | O_RDWR, true, error_msg);
}
-bool ScopedFlock::Init(const char* filename, int flags, bool block, std::string* error_msg) {
- return Init(filename, flags, block, true, error_msg);
-}
-
-bool ScopedFlock::Init(const char* filename,
- int flags,
- bool block,
- bool flush_on_close,
- std::string* error_msg) {
- flush_on_close_ = flush_on_close;
+/* static */ ScopedFlock LockedFile::Open(const char* filename, int flags, bool block,
+ std::string* error_msg) {
while (true) {
- if (file_.get() != nullptr) {
- UNUSED(file_->FlushCloseOrErase()); // Ignore result.
- }
-
- bool check_usage = flush_on_close; // Check usage only if we need to flush on close.
- file_.reset(OS::OpenFileWithFlags(filename, flags, check_usage));
- if (file_.get() == nullptr) {
+ // NOTE: We don't check usage here because the ScopedFlock should *never* be
+ // responsible for flushing its underlying FD. Its only purpose should be
+ // to acquire a lock, and the unlock / close in the corresponding
+ // destructor. Callers should explicitly flush files they're writing to if
+ // that is the desired behaviour.
+ std::unique_ptr<File> file(OS::OpenFileWithFlags(filename, flags, false /* check_usage */));
+ if (file.get() == nullptr) {
*error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
- return false;
+ return nullptr;
}
+
int operation = block ? LOCK_EX : (LOCK_EX | LOCK_NB);
- int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), operation));
+ int flock_result = TEMP_FAILURE_RETRY(flock(file->Fd(), operation));
if (flock_result == EWOULDBLOCK) {
// File is locked by someone else and we are required not to block;
- return false;
+ return nullptr;
}
if (flock_result != 0) {
*error_msg = StringPrintf("Failed to lock file '%s': %s", filename, strerror(errno));
- return false;
+ return nullptr;
}
struct stat fstat_stat;
- int fstat_result = TEMP_FAILURE_RETRY(fstat(file_->Fd(), &fstat_stat));
+ int fstat_result = TEMP_FAILURE_RETRY(fstat(file->Fd(), &fstat_stat));
if (fstat_result != 0) {
*error_msg = StringPrintf("Failed to fstat file '%s': %s", filename, strerror(errno));
- return false;
+ return nullptr;
}
struct stat stat_stat;
int stat_result = TEMP_FAILURE_RETRY(stat(filename, &stat_stat));
@@ -80,7 +73,7 @@ bool ScopedFlock::Init(const char* filename,
// Note that in theory we could race with someone here for a long time and end up retrying
// over and over again. This potential behavior does not fit well in the non-blocking
// semantics. Thus, if we are not require to block return failure when racing.
- return false;
+ return nullptr;
}
}
if (fstat_stat.st_dev != stat_stat.st_dev || fstat_stat.st_ino != stat_stat.st_ino) {
@@ -89,61 +82,47 @@ bool ScopedFlock::Init(const char* filename,
continue;
} else {
// See comment above.
- return false;
+ return nullptr;
}
}
- return true;
+
+ return ScopedFlock(new LockedFile(std::move((*file.get()))));
}
}
-bool ScopedFlock::Init(File* file, std::string* error_msg) {
- flush_on_close_ = true;
- file_.reset(new File(dup(file->Fd()), file->GetPath(), file->CheckUsage(), file->ReadOnlyMode()));
- if (file_->Fd() == -1) {
- file_.reset();
+ScopedFlock LockedFile::DupOf(const int fd, const std::string& path,
+ const bool read_only_mode, std::string* error_msg) {
+ // NOTE: We don't check usage here because the ScopedFlock should *never* be
+ // responsible for flushing its underlying FD. Its only purpose should be
+ // to acquire a lock, and the unlock / close in the corresponding
+ // destructor. Callers should explicitly flush files they're writing to if
+ // that is the desired behaviour.
+ ScopedFlock locked_file(
+ new LockedFile(dup(fd), path, false /* check_usage */, read_only_mode));
+ if (locked_file->Fd() == -1) {
*error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
- file->GetPath().c_str(), strerror(errno));
- return false;
+ locked_file->GetPath().c_str(), strerror(errno));
+ return nullptr;
}
- if (0 != TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX))) {
- file_.reset();
+ if (0 != TEMP_FAILURE_RETRY(flock(locked_file->Fd(), LOCK_EX))) {
*error_msg = StringPrintf(
- "Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
- return false;
+ "Failed to lock file '%s': %s", locked_file->GetPath().c_str(), strerror(errno));
+ return nullptr;
}
- return true;
-}
-
-File* ScopedFlock::GetFile() const {
- CHECK(file_.get() != nullptr);
- return file_.get();
-}
-bool ScopedFlock::HasFile() {
- return file_.get() != nullptr;
+ return locked_file;
}
-ScopedFlock::ScopedFlock() : flush_on_close_(true) { }
-
-ScopedFlock::~ScopedFlock() {
- if (file_.get() != nullptr) {
- int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
+void LockedFile::ReleaseLock() {
+ if (this->Fd() != -1) {
+ int flock_result = TEMP_FAILURE_RETRY(flock(this->Fd(), LOCK_UN));
if (flock_result != 0) {
// Only printing a warning is okay since this is only used with either:
// 1) a non-blocking Init call, or
// 2) as a part of a seperate binary (eg dex2oat) which has it's own timeout logic to prevent
// deadlocks.
// This means we can be sure that the warning won't cause a deadlock.
- PLOG(WARNING) << "Unable to unlock file " << file_->GetPath();
- }
- int close_result = -1;
- if (file_->ReadOnlyMode() || !flush_on_close_) {
- close_result = file_->Close();
- } else {
- close_result = file_->FlushCloseOrErase();
- }
- if (close_result != 0) {
- PLOG(WARNING) << "Could not close scoped file lock file.";
+ PLOG(WARNING) << "Unable to unlock file " << this->GetPath();
}
}
}
diff --git a/runtime/base/scoped_flock.h b/runtime/base/scoped_flock.h
index a3a320f4cc..1b933c07f3 100644
--- a/runtime/base/scoped_flock.h
+++ b/runtime/base/scoped_flock.h
@@ -20,63 +20,68 @@
#include <memory>
#include <string>
+#include "android-base/unique_fd.h"
+
+#include "base/logging.h"
#include "base/macros.h"
+#include "base/unix_file/fd_file.h"
#include "os.h"
namespace art {
-// A scoped file-lock implemented using flock. The file is locked by calling the Init function and
-// is released during destruction. Note that failing to unlock the file only causes a warning to be
-// printed. Users should take care that this does not cause potential deadlocks.
-//
-// Only printing a warning on unlock failure is okay since this is only used with either:
-// 1) a non-blocking Init call, or
-// 2) as a part of a seperate binary (eg dex2oat) which has it's own timeout logic to prevent
-// deadlocks.
-// This means we can be sure that the warning won't cause a deadlock.
-class ScopedFlock {
- public:
- ScopedFlock();
+class LockedFile;
+class LockedFileCloseNoFlush;
+
+// A scoped File object that calls Close without flushing.
+typedef std::unique_ptr<LockedFile, LockedFileCloseNoFlush> ScopedFlock;
+class LockedFile : public unix_file::FdFile {
+ public:
// Attempts to acquire an exclusive file lock (see flock(2)) on the file
// at filename, and blocks until it can do so.
//
- // Returns true if the lock could be acquired, or false if an error occurred.
// It is an error if its inode changed (usually due to a new file being
// created at the same path) between attempts to lock it. In blocking mode,
// locking will be retried if the file changed. In non-blocking mode, false
// is returned and no attempt is made to re-acquire the lock.
//
- // The argument `flush_on_close` controls whether or not the file
- // will be explicitly flushed before close.
- //
// The file is opened with the provided flags.
- bool Init(const char* filename,
- int flags,
- bool block,
- bool flush_on_close,
- std::string* error_msg);
- // Calls Init(filename, flags, block, true, error_msg);
- bool Init(const char* filename, int flags, bool block, std::string* error_msg);
- // Calls Init(filename, O_CREAT | O_RDWR, true, errror_msg)
- bool Init(const char* filename, std::string* error_msg);
+ static ScopedFlock Open(const char* filename, int flags, bool block,
+ std::string* error_msg);
+
+ // Calls Open(filename, O_CREAT | O_RDWR, true, errror_msg)
+ static ScopedFlock Open(const char* filename, std::string* error_msg);
+
// Attempt to acquire an exclusive file lock (see flock(2)) on 'file'.
// Returns true if the lock could be acquired or false if an error
// occured.
- bool Init(File* file, std::string* error_msg);
+ static ScopedFlock DupOf(const int fd, const std::string& path,
+ const bool read_only_mode, std::string* error_message);
+
+ // Release a lock held on this file, if any.
+ void ReleaseLock();
- // Returns the (locked) file associated with this instance.
- File* GetFile() const;
+ private:
+ // Constructors should not be invoked directly, use one of the factory
+ // methods instead.
+ explicit LockedFile(FdFile&& other) : FdFile(std::move(other)) {
+ }
- // Returns whether a file is held.
- bool HasFile();
+ // Constructors should not be invoked directly, use one of the factory
+ // methods instead.
+ LockedFile(int fd, const std::string& path, bool check_usage, bool read_only_mode)
+ : FdFile(fd, path, check_usage, read_only_mode) {
+ }
+};
- ~ScopedFlock();
+class LockedFileCloseNoFlush {
+ public:
+ void operator()(LockedFile* ptr) {
+ ptr->ReleaseLock();
+ UNUSED(ptr->Close());
- private:
- std::unique_ptr<File> file_;
- bool flush_on_close_;
- DISALLOW_COPY_AND_ASSIGN(ScopedFlock);
+ delete ptr;
+ }
};
} // namespace art
diff --git a/runtime/base/scoped_flock_test.cc b/runtime/base/scoped_flock_test.cc
index 1fa7a12d17..1b6caaf747 100644
--- a/runtime/base/scoped_flock_test.cc
+++ b/runtime/base/scoped_flock_test.cc
@@ -30,11 +30,33 @@ TEST_F(ScopedFlockTest, TestLocking) {
// to each other, so attempting to query locks set by flock using
// using fcntl(,F_GETLK,) will not work. see kernel doc at
// Documentation/filesystems/locks.txt.
- ScopedFlock file_lock;
- ASSERT_TRUE(file_lock.Init(scratch_file.GetFilename().c_str(),
- &error_msg));
-
- ASSERT_FALSE(file_lock.Init("/guaranteed/not/to/exist", &error_msg));
+ {
+ ScopedFlock file_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
+ &error_msg);
+ ASSERT_TRUE(file_lock.get() != nullptr);
+
+ // Attempt to acquire a second lock on the same file. This must fail.
+ ScopedFlock second_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
+ O_RDONLY,
+ /* block */ false,
+ &error_msg);
+ ASSERT_TRUE(second_lock.get() == nullptr);
+ ASSERT_TRUE(!error_msg.empty());
+ }
+
+ {
+ // Attempt to reacquire the lock once the first lock has been released, this
+ // must succeed.
+ ScopedFlock file_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
+ &error_msg);
+ ASSERT_TRUE(file_lock.get() != nullptr);
+ }
+
+ {
+ ScopedFlock file_lock = LockedFile::Open("/will/not/exist",
+ &error_msg);
+ ASSERT_TRUE(file_lock.get() == nullptr);
+ }
}
} // namespace art
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0fa25d15d2..71558e1820 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3019,13 +3019,7 @@ void ClassLinker::FixupStaticTrampolines(ObjPtr<mirror::Class> klass) {
// There should always be class data if there were direct methods.
CHECK(class_data != nullptr) << klass->PrettyDescriptor();
ClassDataItemIterator it(dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
bool has_oat_class;
OatFile::OatClass oat_class = OatFile::FindOatClass(dex_file,
klass->GetDexClassDefIndex(),
@@ -5451,25 +5445,10 @@ static void CountMethodsAndFields(ClassDataItemIterator& dex_data,
size_t* direct_methods,
size_t* static_fields,
size_t* instance_fields) {
- *virtual_methods = *direct_methods = *static_fields = *instance_fields = 0;
-
- while (dex_data.HasNextStaticField()) {
- dex_data.Next();
- (*static_fields)++;
- }
- while (dex_data.HasNextInstanceField()) {
- dex_data.Next();
- (*instance_fields)++;
- }
- while (dex_data.HasNextDirectMethod()) {
- (*direct_methods)++;
- dex_data.Next();
- }
- while (dex_data.HasNextVirtualMethod()) {
- (*virtual_methods)++;
- dex_data.Next();
- }
- DCHECK(!dex_data.HasNext());
+ *static_fields = dex_data.NumStaticFields();
+ *instance_fields = dex_data.NumInstanceFields();
+ *direct_methods = dex_data.NumDirectMethods();
+ *virtual_methods = dex_data.NumVirtualMethods();
}
static void DumpClass(std::ostream& os,
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index 133ddb0721..528db96dd5 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -62,7 +62,7 @@ inline void PerformCall(Thread* self,
}
template<Primitive::Type field_type>
-static ALWAYS_INLINE void DoFieldGetCommon(Thread* self,
+static ALWAYS_INLINE bool DoFieldGetCommon(Thread* self,
const ShadowFrame& shadow_frame,
ObjPtr<mirror::Object> obj,
ArtField* field,
@@ -85,6 +85,9 @@ static ALWAYS_INLINE void DoFieldGetCommon(Thread* self,
shadow_frame.GetMethod(),
shadow_frame.GetDexPC(),
field);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ return false;
+ }
}
switch (field_type) {
@@ -113,6 +116,7 @@ static ALWAYS_INLINE void DoFieldGetCommon(Thread* self,
LOG(FATAL) << "Unreachable " << field_type;
break;
}
+ return true;
}
template<Primitive::Type field_type, bool do_assignability_check, bool transaction_active>
@@ -120,7 +124,7 @@ ALWAYS_INLINE bool DoFieldPutCommon(Thread* self,
const ShadowFrame& shadow_frame,
ObjPtr<mirror::Object> obj,
ArtField* field,
- const JValue& value)
+ JValue& value)
REQUIRES_SHARED(Locks::mutator_lock_) {
field->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
@@ -128,15 +132,22 @@ ALWAYS_INLINE bool DoFieldPutCommon(Thread* self,
// the field from the base of the object, we need to look for it first.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
- StackHandleScope<1> hs(self);
- // Wrap in handle wrapper in case the listener does thread suspension.
+ StackHandleScope<2> hs(self);
+ // Save this and return value (if needed) in case the instrumentation causes a suspend.
HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
ObjPtr<mirror::Object> this_object = field->IsStatic() ? nullptr : obj;
- instrumentation->FieldWriteEvent(self, this_object.Ptr(),
+ mirror::Object* fake_root = nullptr;
+ HandleWrapper<mirror::Object> ret(hs.NewHandleWrapper<mirror::Object>(
+ field_type == Primitive::kPrimNot ? value.GetGCRoot() : &fake_root));
+ instrumentation->FieldWriteEvent(self,
+ this_object.Ptr(),
shadow_frame.GetMethod(),
shadow_frame.GetDexPC(),
field,
value);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ return false;
+ }
}
switch (field_type) {
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 1274a3623b..019770302d 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -132,8 +132,7 @@ class CommonRuntimeTestImpl {
std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name);
- std::unique_ptr<const DexFile> OpenTestDexFile(const char* name)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ std::unique_ptr<const DexFile> OpenTestDexFile(const char* name);
jobject LoadDex(const char* dex_name) REQUIRES_SHARED(Locks::mutator_lock_);
jobject LoadMultiDex(const char* first_dex_name, const char* second_dex_name)
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 7e70b7564c..12bdb32fec 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2927,7 +2927,8 @@ void Dbg::PostLocationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_objec
void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc,
mirror::Object* this_object, ArtField* f) {
- if (!IsDebuggerActive()) {
+ // TODO We should send events for native methods.
+ if (!IsDebuggerActive() || m->IsNative()) {
return;
}
DCHECK(m != nullptr);
@@ -2941,7 +2942,8 @@ void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc,
void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc,
mirror::Object* this_object, ArtField* f,
const JValue* field_value) {
- if (!IsDebuggerActive()) {
+ // TODO We should send events for native methods.
+ if (!IsDebuggerActive() || m->IsNative()) {
return;
}
DCHECK(m != nullptr);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index b267e5f22a..6d1158260a 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -656,13 +656,7 @@ uint32_t DexFile::FindCodeItemOffset(const DexFile::ClassDef& class_def,
const uint8_t* class_data = GetClassData(class_def);
CHECK(class_data != nullptr);
ClassDataItemIterator it(*this, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
while (it.HasNextDirectMethod()) {
if (it.GetMemberIndex() == method_idx) {
return it.GetMethodCodeItemOffset();
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 3249edbe83..81a39afbee 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1343,6 +1343,30 @@ class ClassDataItemIterator {
bool HasNextVirtualMethod() const {
return pos_ >= EndOfDirectMethodsPos() && pos_ < EndOfVirtualMethodsPos();
}
+ void SkipStaticFields() {
+ while (HasNextStaticField()) {
+ Next();
+ }
+ }
+ void SkipInstanceFields() {
+ while (HasNextInstanceField()) {
+ Next();
+ }
+ }
+ void SkipAllFields() {
+ SkipStaticFields();
+ SkipInstanceFields();
+ }
+ void SkipDirectMethods() {
+ while (HasNextDirectMethod()) {
+ Next();
+ }
+ }
+ void SkipVirtualMethods() {
+ while (HasNextVirtualMethod()) {
+ Next();
+ }
+ }
bool HasNext() const {
return pos_ < EndOfVirtualMethodsPos();
}
diff --git a/runtime/dex_file_tracking_registrar.cc b/runtime/dex_file_tracking_registrar.cc
index 848e2f3cd8..d958568e55 100644
--- a/runtime/dex_file_tracking_registrar.cc
+++ b/runtime/dex_file_tracking_registrar.cc
@@ -137,10 +137,7 @@ void DexFileTrackingRegistrar::SetAllCodeItemRegistration(bool should_poison) {
const uint8_t* class_data = dex_file_->GetClassData(cd);
if (class_data != nullptr) {
ClassDataItemIterator cdit(*dex_file_, class_data);
- // Skipping Fields
- while (cdit.HasNextStaticField() || cdit.HasNextInstanceField()) {
- cdit.Next();
- }
+ cdit.SkipAllFields();
while (cdit.HasNextDirectMethod()) {
const DexFile::CodeItem* code_item = cdit.GetMethodCodeItem();
if (code_item != nullptr) {
@@ -160,10 +157,7 @@ void DexFileTrackingRegistrar::SetAllInsnsRegistration(bool should_poison) {
const uint8_t* class_data = dex_file_->GetClassData(cd);
if (class_data != nullptr) {
ClassDataItemIterator cdit(*dex_file_, class_data);
- // Skipping Fields
- while (cdit.HasNextStaticField() || cdit.HasNextInstanceField()) {
- cdit.Next();
- }
+ cdit.SkipAllFields();
while (cdit.HasNextDirectMethod()) {
const DexFile::CodeItem* code_item = cdit.GetMethodCodeItem();
if (code_item != nullptr) {
@@ -184,10 +178,7 @@ void DexFileTrackingRegistrar::SetCodeItemRegistration(const char* class_name, b
const uint8_t* class_data = dex_file_->GetClassData(cd);
if (class_data != nullptr) {
ClassDataItemIterator cdit(*dex_file_, class_data);
- // Skipping Fields
- while (cdit.HasNextStaticField() || cdit.HasNextInstanceField()) {
- cdit.Next();
- }
+ cdit.SkipAllFields();
while (cdit.HasNextDirectMethod()) {
const DexFile::MethodId& methodid_item = dex_file_->GetMethodId(cdit.GetMemberIndex());
const char * methodid_name = dex_file_->GetMethodName(methodid_item);
diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h
index 7fae277c14..8a4bed31b1 100644
--- a/runtime/dex_method_iterator.h
+++ b/runtime/dex_method_iterator.h
@@ -66,13 +66,7 @@ class DexMethodIterator {
}
if (it_.get() == nullptr) {
it_.reset(new ClassDataItemIterator(GetDexFileInternal(), class_data_));
- // Skip fields
- while (GetIterator().HasNextStaticField()) {
- GetIterator().Next();
- }
- while (GetIterator().HasNextInstanceField()) {
- GetIterator().Next();
- }
+ GetIterator().SkipAllFields();
direct_method_ = true;
}
if (direct_method_ && GetIterator().HasNextDirectMethod()) {
diff --git a/runtime/dex_reference_collection.h b/runtime/dex_reference_collection.h
index 76355d6335..01b9b97786 100644
--- a/runtime/dex_reference_collection.h
+++ b/runtime/dex_reference_collection.h
@@ -63,12 +63,13 @@ class DexReferenceCollection {
private:
DexFileMap map_;
- // Optimize for adding to same vector in succession.
const DexFile* current_dex_file_ = nullptr;
IndexVector* current_vector_ = nullptr;
VectorAllocator vector_allocator_;
ALWAYS_INLINE IndexVector* GetOrInsertVector(const DexFile* dex) {
+ // Optimize for adding to same vector in succession, the cached dex file and vector aims to
+ // prevent map lookups.
if (UNLIKELY(current_dex_file_ != dex)) {
// There is an assumption that constructing an empty vector wont do any allocations. If this
// incorrect, this might leak for the arena case.
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 88a5a13246..01fc9ce668 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -136,7 +136,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
// Rely on the fact that the methods are contiguous to determine the index of the method in
// the slice.
int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) -
- reinterpret_cast<uintptr_t>(&virtual_methods.At(0))) / method_size;
+ reinterpret_cast<uintptr_t>(&virtual_methods[0])) / method_size;
CHECK_LT(throws_index, static_cast<int>(num_virtuals));
mirror::ObjectArray<mirror::Class>* declared_exceptions =
proxy_class->GetProxyThrows()->Get(throws_index);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 9da2876416..1bf92851af 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -482,21 +482,22 @@ class ImageSpaceLoader {
bool validate_oat_file,
std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // Note that we must not use the file descriptor associated with
- // ScopedFlock::GetFile to Init the image file. We want the file
- // descriptor (and the associated exclusive lock) to be released when
- // we leave Create.
- ScopedFlock image_lock;
// Should this be a RDWR lock? This is only a defensive measure, as at
// this point the image should exist.
// However, only the zygote can write into the global dalvik-cache, so
// restrict to zygote processes, or any process that isn't using
// /data/dalvik-cache (which we assume to be allowed to write there).
const bool rw_lock = is_zygote || !is_global_cache;
- image_lock.Init(image_filename.c_str(),
- rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
- true /* block */,
- error_msg);
+
+ // Note that we must not use the file descriptor associated with
+ // ScopedFlock::GetFile to Init the image file. We want the file
+ // descriptor (and the associated exclusive lock) to be released when
+ // we leave Create.
+ ScopedFlock image = LockedFile::Open(image_filename.c_str(),
+ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
+ true /* block */,
+ error_msg);
+
VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
<< image_location;
// If we are in /system we can assume the image is good. We can also
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index d06ac23d3c..1b36c3f12b 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -66,7 +66,11 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst
}
JValue result;
- DoFieldGetCommon<field_type>(self, shadow_frame, obj, f, &result);
+ if (UNLIKELY(!DoFieldGetCommon<field_type>(self, shadow_frame, obj, f, &result))) {
+ // Instrumentation threw an error!
+ CHECK(self->IsExceptionPending());
+ return false;
+ }
uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
switch (field_type) {
case Primitive::kPrimBoolean:
@@ -149,14 +153,18 @@ bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t in
field_offset.Uint32Value());
DCHECK(f != nullptr);
DCHECK(!f->IsStatic());
- StackHandleScope<1> hs(Thread::Current());
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
// Save obj in case the instrumentation event has thread suspension.
HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&obj);
- instrumentation->FieldReadEvent(Thread::Current(),
+ instrumentation->FieldReadEvent(self,
obj.Ptr(),
shadow_frame.GetMethod(),
shadow_frame.GetDexPC(),
f);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ return false;
+ }
}
// Note: iget-x-quick instructions are only for non-volatile fields.
const uint32_t vregA = inst->VRegA_22c(inst_data);
@@ -322,15 +330,22 @@ bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint1
DCHECK(f != nullptr);
DCHECK(!f->IsStatic());
JValue field_value = GetFieldValue<field_type>(shadow_frame, vregA);
- StackHandleScope<1> hs(Thread::Current());
+ Thread* self = Thread::Current();
+ StackHandleScope<2> hs(self);
// Save obj in case the instrumentation event has thread suspension.
HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&obj);
- instrumentation->FieldWriteEvent(Thread::Current(),
+ mirror::Object* fake_root = nullptr;
+ HandleWrapper<mirror::Object> ret(hs.NewHandleWrapper<mirror::Object>(
+ field_type == Primitive::kPrimNot ? field_value.GetGCRoot() : &fake_root));
+ instrumentation->FieldWriteEvent(self,
obj.Ptr(),
shadow_frame.GetMethod(),
shadow_frame.GetDexPC(),
f,
field_value);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ return false;
+ }
}
// Note: iput-x-quick instructions are only for non-volatile fields.
switch (field_type) {
diff --git a/runtime/interpreter/lock_count_data.cc b/runtime/interpreter/lock_count_data.cc
new file mode 100644
index 0000000000..64b59cd390
--- /dev/null
+++ b/runtime/interpreter/lock_count_data.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "lock_count_data.h"
+
+#include <algorithm>
+#include <string>
+
+#include "android-base/logging.h"
+#include "mirror/object-inl.h"
+#include "thread.h"
+
+namespace art {
+
+void LockCountData::AddMonitor(Thread* self, mirror::Object* obj) {
+ if (obj == nullptr) {
+ return;
+ }
+
+ // If there's an error during enter, we won't have locked the monitor. So check there's no
+ // exception.
+ if (self->IsExceptionPending()) {
+ return;
+ }
+
+ if (monitors_ == nullptr) {
+ monitors_.reset(new std::vector<mirror::Object*>());
+ }
+ monitors_->push_back(obj);
+}
+
+void LockCountData::RemoveMonitorOrThrow(Thread* self, const mirror::Object* obj) {
+ if (obj == nullptr) {
+ return;
+ }
+ bool found_object = false;
+ if (monitors_ != nullptr) {
+ // We need to remove one pointer to ref, as duplicates are used for counting recursive locks.
+ // We arbitrarily choose the first one.
+ auto it = std::find(monitors_->begin(), monitors_->end(), obj);
+ if (it != monitors_->end()) {
+ monitors_->erase(it);
+ found_object = true;
+ }
+ }
+ if (!found_object) {
+ // The object wasn't found. Time for an IllegalMonitorStateException.
+ // The order here isn't fully clear. Assume that any other pending exception is swallowed.
+ // TODO: Maybe make already pending exception a suppressed exception.
+ self->ClearException();
+ self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
+ "did not lock monitor on object of type '%s' before unlocking",
+ const_cast<mirror::Object*>(obj)->PrettyTypeOf().c_str());
+ }
+}
+
+// Helper to unlock a monitor. Must be NO_THREAD_SAFETY_ANALYSIS, as we can't statically show
+// that the object was locked.
+void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
+ obj->MonitorExit(self);
+}
+
+bool LockCountData::CheckAllMonitorsReleasedOrThrow(Thread* self) {
+ DCHECK(self != nullptr);
+ if (monitors_ != nullptr) {
+ if (!monitors_->empty()) {
+ // There may be an exception pending, if the method is terminating abruptly. Clear it.
+ // TODO: Should we add this as a suppressed exception?
+ self->ClearException();
+
+ // OK, there are monitors that are still locked. To enforce structured locking (and avoid
+ // deadlocks) we unlock all of them before we raise the IllegalMonitorState exception.
+ for (mirror::Object* obj : *monitors_) {
+ MonitorExitHelper(self, obj);
+ // If this raised an exception, ignore. TODO: Should we add this as suppressed
+ // exceptions?
+ if (self->IsExceptionPending()) {
+ self->ClearException();
+ }
+ }
+ // Raise an exception, just give the first object as the sample.
+ mirror::Object* first = (*monitors_)[0];
+ self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
+ "did not unlock monitor on object of type '%s'",
+ mirror::Object::PrettyTypeOf(first).c_str());
+
+ // To make sure this path is not triggered again, clean out the monitors.
+ monitors_->clear();
+
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace art
diff --git a/runtime/interpreter/lock_count_data.h b/runtime/interpreter/lock_count_data.h
new file mode 100644
index 0000000000..64874a5db7
--- /dev/null
+++ b/runtime/interpreter/lock_count_data.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_LOCK_COUNT_DATA_H_
+#define ART_RUNTIME_INTERPRETER_LOCK_COUNT_DATA_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/mutex.h"
+
+namespace art {
+
+namespace mirror {
+ class Object;
+} // namespace mirror
+
+class Thread;
+
+// Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks.
+// The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are
+// thread roots).
+// Note: implementation is split so that the call sites may be optimized to no-ops in case no
+// lock counting is necessary. The actual implementation is in the cc file to avoid
+// dependencies.
+class LockCountData {
+ public:
+ // Add the given object to the list of monitors, that is, objects that have been locked. This
+ // will not throw (but be skipped if there is an exception pending on entry).
+ void AddMonitor(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Try to remove the given object from the monitor list, indicating an unlock operation.
+ // This will throw an IllegalMonitorStateException (clearing any already pending exception), in
+ // case that there wasn't a lock recorded for the object.
+ void RemoveMonitorOrThrow(Thread* self,
+ const mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Check whether all acquired monitors have been released. This will potentially throw an
+ // IllegalMonitorStateException, clearing any already pending exception. Returns true if the
+ // check shows that everything is OK wrt/ lock counting, false otherwise.
+ bool CheckAllMonitorsReleasedOrThrow(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ template <typename T, typename... Args>
+ void VisitMonitors(T visitor, Args&&... args) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (monitors_ != nullptr) {
+ // Visitors may change the Object*. Be careful with the foreach loop.
+ for (mirror::Object*& obj : *monitors_) {
+ visitor(/* inout */ &obj, std::forward<Args>(args)...);
+ }
+ }
+ }
+
+ private:
+ // Stores references to the locked-on objects. As noted, this should be visited during thread
+ // marking.
+ std::unique_ptr<std::vector<mirror::Object*>> monitors_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_LOCK_COUNT_DATA_H_
diff --git a/runtime/interpreter/shadow_frame.cc b/runtime/interpreter/shadow_frame.cc
new file mode 100644
index 0000000000..ab154cf767
--- /dev/null
+++ b/runtime/interpreter/shadow_frame.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "shadow_frame.h"
+
+#include "art_method-inl.h"
+
+namespace art {
+
+mirror::Object* ShadowFrame::GetThisObject() const {
+ ArtMethod* m = GetMethod();
+ if (m->IsStatic()) {
+ return nullptr;
+ } else if (m->IsNative()) {
+ return GetVRegReference(0);
+ } else {
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ CHECK(code_item != nullptr) << ArtMethod::PrettyMethod(m);
+ uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
+ return GetVRegReference(reg);
+ }
+}
+
+mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
+ ArtMethod* m = GetMethod();
+ if (m->IsStatic()) {
+ return nullptr;
+ } else {
+ return GetVRegReference(NumberOfVRegs() - num_ins);
+ }
+}
+
+} // namespace art
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
new file mode 100644
index 0000000000..69b2382cbc
--- /dev/null
+++ b/runtime/interpreter/shadow_frame.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_
+#define ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_
+
+#include <cstring>
+#include <stdint.h>
+#include <string>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "dex_file.h"
+#include "lock_count_data.h"
+#include "read_barrier.h"
+#include "stack_reference.h"
+#include "verify_object.h"
+
+namespace art {
+
+namespace mirror {
+ class Object;
+} // namespace mirror
+
+class ArtMethod;
+class ShadowFrame;
+class Thread;
+union JValue;
+
+// Forward declaration. Just calls the destructor.
+struct ShadowFrameDeleter;
+using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>;
+
+// ShadowFrame has 2 possible layouts:
+// - interpreter - separate VRegs and reference arrays. References are in the reference array.
+// - JNI - just VRegs, but where every VReg holds a reference.
+class ShadowFrame {
+ public:
+ // Compute size of ShadowFrame in bytes assuming it has a reference array.
+ static size_t ComputeSize(uint32_t num_vregs) {
+ return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) +
+ (sizeof(StackReference<mirror::Object>) * num_vregs);
+ }
+
+ // Create ShadowFrame in heap for deoptimization.
+ static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link,
+ ArtMethod* method, uint32_t dex_pc) {
+ uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
+ return CreateShadowFrameImpl(num_vregs, link, method, dex_pc, memory);
+ }
+
+ // Delete a ShadowFrame allocated on the heap for deoptimization.
+ static void DeleteDeoptimizedFrame(ShadowFrame* sf) {
+ sf->~ShadowFrame(); // Explicitly destruct.
+ uint8_t* memory = reinterpret_cast<uint8_t*>(sf);
+ delete[] memory;
+ }
+
+ // Create a shadow frame in a fresh alloca. This needs to be in the context of the caller.
+ // Inlining doesn't work, the compiler will still undo the alloca. So this needs to be a macro.
+#define CREATE_SHADOW_FRAME(num_vregs, link, method, dex_pc) ({ \
+ size_t frame_size = ShadowFrame::ComputeSize(num_vregs); \
+ void* alloca_mem = alloca(frame_size); \
+ ShadowFrameAllocaUniquePtr( \
+ ShadowFrame::CreateShadowFrameImpl((num_vregs), (link), (method), (dex_pc), \
+ (alloca_mem))); \
+ })
+
+ ~ShadowFrame() {}
+
+ // TODO(iam): Clean references array up since they're always there,
+ // we don't need to do conditionals.
+ bool HasReferenceArray() const {
+ return true;
+ }
+
+ uint32_t NumberOfVRegs() const {
+ return number_of_vregs_;
+ }
+
+ uint32_t GetDexPC() const {
+ return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - code_item_->insns_;
+ }
+
+ int16_t GetCachedHotnessCountdown() const {
+ return cached_hotness_countdown_;
+ }
+
+ void SetCachedHotnessCountdown(int16_t cached_hotness_countdown) {
+ cached_hotness_countdown_ = cached_hotness_countdown;
+ }
+
+ int16_t GetHotnessCountdown() const {
+ return hotness_countdown_;
+ }
+
+ void SetHotnessCountdown(int16_t hotness_countdown) {
+ hotness_countdown_ = hotness_countdown;
+ }
+
+ void SetDexPC(uint32_t dex_pc) {
+ dex_pc_ = dex_pc;
+ dex_pc_ptr_ = nullptr;
+ }
+
+ ShadowFrame* GetLink() const {
+ return link_;
+ }
+
+ void SetLink(ShadowFrame* frame) {
+ DCHECK_NE(this, frame);
+ link_ = frame;
+ }
+
+ int32_t GetVReg(size_t i) const {
+ DCHECK_LT(i, NumberOfVRegs());
+ const uint32_t* vreg = &vregs_[i];
+ return *reinterpret_cast<const int32_t*>(vreg);
+ }
+
+ // Shorts are extended to Ints in VRegs. Interpreter intrinsics needs them as shorts.
+ int16_t GetVRegShort(size_t i) const {
+ return static_cast<int16_t>(GetVReg(i));
+ }
+
+ uint32_t* GetVRegAddr(size_t i) {
+ return &vregs_[i];
+ }
+
+ uint32_t* GetShadowRefAddr(size_t i) {
+ DCHECK(HasReferenceArray());
+ DCHECK_LT(i, NumberOfVRegs());
+ return &vregs_[i + NumberOfVRegs()];
+ }
+
+ void SetCodeItem(const DexFile::CodeItem* code_item) {
+ code_item_ = code_item;
+ }
+
+ const DexFile::CodeItem* GetCodeItem() const {
+ return code_item_;
+ }
+
+ float GetVRegFloat(size_t i) const {
+ DCHECK_LT(i, NumberOfVRegs());
+ // NOTE: Strict-aliasing?
+ const uint32_t* vreg = &vregs_[i];
+ return *reinterpret_cast<const float*>(vreg);
+ }
+
+ int64_t GetVRegLong(size_t i) const {
+ DCHECK_LT(i, NumberOfVRegs());
+ const uint32_t* vreg = &vregs_[i];
+ typedef const int64_t unaligned_int64 __attribute__ ((aligned (4)));
+ return *reinterpret_cast<unaligned_int64*>(vreg);
+ }
+
+ double GetVRegDouble(size_t i) const {
+ DCHECK_LT(i, NumberOfVRegs());
+ const uint32_t* vreg = &vregs_[i];
+ typedef const double unaligned_double __attribute__ ((aligned (4)));
+ return *reinterpret_cast<unaligned_double*>(vreg);
+ }
+
+ // Look up the reference given its virtual register number.
+ // If this returns non-null then this does not mean the vreg is currently a reference
+ // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_LT(i, NumberOfVRegs());
+ mirror::Object* ref;
+ if (HasReferenceArray()) {
+ ref = References()[i].AsMirrorPtr();
+ } else {
+ const uint32_t* vreg_ptr = &vregs_[i];
+ ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
+ }
+ if (kUseReadBarrier) {
+ ReadBarrier::AssertToSpaceInvariant(ref);
+ }
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(ref);
+ }
+ return ref;
+ }
+
+ // Get view of vregs as range of consecutive arguments starting at i.
+ uint32_t* GetVRegArgs(size_t i) {
+ return &vregs_[i];
+ }
+
+ void SetVReg(size_t i, int32_t val) {
+ DCHECK_LT(i, NumberOfVRegs());
+ uint32_t* vreg = &vregs_[i];
+ *reinterpret_cast<int32_t*>(vreg) = val;
+ // This is needed for moving collectors since these can update the vreg references if they
+ // happen to agree with references in the reference array.
+ if (kMovingCollector && HasReferenceArray()) {
+ References()[i].Clear();
+ }
+ }
+
+ void SetVRegFloat(size_t i, float val) {
+ DCHECK_LT(i, NumberOfVRegs());
+ uint32_t* vreg = &vregs_[i];
+ *reinterpret_cast<float*>(vreg) = val;
+ // This is needed for moving collectors since these can update the vreg references if they
+ // happen to agree with references in the reference array.
+ if (kMovingCollector && HasReferenceArray()) {
+ References()[i].Clear();
+ }
+ }
+
+ void SetVRegLong(size_t i, int64_t val) {
+ DCHECK_LT(i, NumberOfVRegs());
+ uint32_t* vreg = &vregs_[i];
+ typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
+ *reinterpret_cast<unaligned_int64*>(vreg) = val;
+ // This is needed for moving collectors since these can update the vreg references if they
+ // happen to agree with references in the reference array.
+ if (kMovingCollector && HasReferenceArray()) {
+ References()[i].Clear();
+ References()[i + 1].Clear();
+ }
+ }
+
+ void SetVRegDouble(size_t i, double val) {
+ DCHECK_LT(i, NumberOfVRegs());
+ uint32_t* vreg = &vregs_[i];
+ typedef double unaligned_double __attribute__ ((aligned (4)));
+ *reinterpret_cast<unaligned_double*>(vreg) = val;
+ // This is needed for moving collectors since these can update the vreg references if they
+ // happen to agree with references in the reference array.
+ if (kMovingCollector && HasReferenceArray()) {
+ References()[i].Clear();
+ References()[i + 1].Clear();
+ }
+ }
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetVRegReference(size_t i, mirror::Object* val) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_LT(i, NumberOfVRegs());
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(val);
+ }
+ if (kUseReadBarrier) {
+ ReadBarrier::AssertToSpaceInvariant(val);
+ }
+ uint32_t* vreg = &vregs_[i];
+ reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val);
+ if (HasReferenceArray()) {
+ References()[i].Assign(val);
+ }
+ }
+
+ void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_) {
+ DCHECK(method != nullptr);
+ DCHECK(method_ != nullptr);
+ method_ = method;
+ }
+
+ ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(method_ != nullptr);
+ return method_;
+ }
+
+ mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
+
+ mirror::Object* GetThisObject(uint16_t num_ins) const REQUIRES_SHARED(Locks::mutator_lock_);
+
+ bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
+ if (HasReferenceArray()) {
+ return ((&References()[0] <= shadow_frame_entry_obj) &&
+ (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1])));
+ } else {
+ uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj);
+ return ((&vregs_[0] <= shadow_frame_entry) &&
+ (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1])));
+ }
+ }
+
+ LockCountData& GetLockCountData() {
+ return lock_count_data_;
+ }
+
+ static size_t LockCountDataOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_);
+ }
+
+ static size_t LinkOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, link_);
+ }
+
+ static size_t MethodOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, method_);
+ }
+
+ static size_t DexPCOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
+ }
+
+ static size_t NumberOfVRegsOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_);
+ }
+
+ static size_t VRegsOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, vregs_);
+ }
+
+ static size_t ResultRegisterOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, result_register_);
+ }
+
+ static size_t DexPCPtrOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_);
+ }
+
+ static size_t CodeItemOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, code_item_);
+ }
+
+ static size_t CachedHotnessCountdownOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_);
+ }
+
+ static size_t HotnessCountdownOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_);
+ }
+
+ // Create ShadowFrame for interpreter using provided memory.
+ static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs,
+ ShadowFrame* link,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ void* memory) {
+ return new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
+ }
+
+ const uint16_t* GetDexPCPtr() {
+ return dex_pc_ptr_;
+ }
+
+ void SetDexPCPtr(uint16_t* dex_pc_ptr) {
+ dex_pc_ptr_ = dex_pc_ptr;
+ }
+
+ JValue* GetResultRegister() {
+ return result_register_;
+ }
+
+ private:
+ ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
+ uint32_t dex_pc, bool has_reference_array)
+ : link_(link),
+ method_(method),
+ result_register_(nullptr),
+ dex_pc_ptr_(nullptr),
+ code_item_(nullptr),
+ number_of_vregs_(num_vregs),
+ dex_pc_(dex_pc),
+ cached_hotness_countdown_(0),
+ hotness_countdown_(0) {
+ // TODO(iam): Remove this parameter, it's an an artifact of portable removal
+ DCHECK(has_reference_array);
+ if (has_reference_array) {
+ memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>)));
+ } else {
+ memset(vregs_, 0, num_vregs * sizeof(uint32_t));
+ }
+ }
+
+ const StackReference<mirror::Object>* References() const {
+ DCHECK(HasReferenceArray());
+ const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
+ return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end);
+ }
+
+ StackReference<mirror::Object>* References() {
+ return const_cast<StackReference<mirror::Object>*>(
+ const_cast<const ShadowFrame*>(this)->References());
+ }
+
+ // Link to previous shadow frame or null.
+ ShadowFrame* link_;
+ ArtMethod* method_;
+ JValue* result_register_;
+ const uint16_t* dex_pc_ptr_;
+ const DexFile::CodeItem* code_item_;
+ LockCountData lock_count_data_; // This may contain GC roots when lock counting is active.
+ const uint32_t number_of_vregs_;
+ uint32_t dex_pc_;
+ int16_t cached_hotness_countdown_;
+ int16_t hotness_countdown_;
+
+ // This is a two-part array:
+ // - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4
+ // bytes.
+ // - [number_of_vregs..number_of_vregs*2) holds only reference registers. Each element here is
+ // ptr-sized.
+ // In other words when a primitive is stored in vX, the second (reference) part of the array will
+ // be null. When a reference is stored in vX, the second (reference) part of the array will be a
+ // copy of vX.
+ uint32_t vregs_[0];
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame);
+};
+
+struct ShadowFrameDeleter {
+ inline void operator()(ShadowFrame* frame) {
+ if (frame != nullptr) {
+ frame->~ShadowFrame();
+ }
+ }
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_
diff --git a/runtime/java_frame_root_info.cc b/runtime/java_frame_root_info.cc
new file mode 100644
index 0000000000..dd3be5d415
--- /dev/null
+++ b/runtime/java_frame_root_info.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "java_frame_root_info.h"
+
+#include "stack.h"
+
+namespace art {
+
+void JavaFrameRootInfo::Describe(std::ostream& os) const {
+ const StackVisitor* visitor = stack_visitor_;
+ CHECK(visitor != nullptr);
+ os << "Type=" << GetType() << " thread_id=" << GetThreadId() << " location=" <<
+ visitor->DescribeLocation() << " vreg=" << vreg_;
+}
+
+} // namespace art
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
new file mode 100644
index 0000000000..25ac6e2a31
--- /dev/null
+++ b/runtime/java_frame_root_info.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JAVA_FRAME_ROOT_INFO_H_
+#define ART_RUNTIME_JAVA_FRAME_ROOT_INFO_H_
+
+#include <iosfwd>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc_root.h"
+
+namespace art {
+
+class StackVisitor;
+
+class JavaFrameRootInfo FINAL : public RootInfo {
+ public:
+ JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
+ : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
+ }
+ void Describe(std::ostream& os) const OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ size_t GetVReg() const {
+ return vreg_;
+ }
+ const StackVisitor* GetVisitor() const {
+ return stack_visitor_;
+ }
+
+ private:
+ const StackVisitor* const stack_visitor_;
+ const size_t vreg_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_JAVA_FRAME_ROOT_INFO_H_
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2744c4f24a..1c36bde6ea 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -90,17 +90,17 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// We could do PC-relative addressing to avoid this problem, but that
// would require reserving code and data area before submitting, which
// means more windows for the code memory to be RWX.
- MemMap* data_map = MemMap::MapAnonymous(
+ std::unique_ptr<MemMap> data_map(MemMap::MapAnonymous(
"data-code-cache", nullptr,
max_capacity,
- kProtAll,
+ kProtData,
/* low_4gb */ true,
/* reuse */ false,
&error_str,
- use_ashmem);
+ use_ashmem));
if (data_map == nullptr) {
std::ostringstream oss;
- oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
+ oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
*error_msg = oss.str();
return nullptr;
}
@@ -129,7 +129,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
code_size = initial_capacity - data_size;
DCHECK_EQ(code_size + data_size, initial_capacity);
return new JitCodeCache(
- code_map, data_map, code_size, data_size, max_capacity, garbage_collect_code);
+ code_map, data_map.release(), code_size, data_size, max_capacity, garbage_collect_code);
}
JitCodeCache::JitCodeCache(MemMap* code_map,
@@ -688,6 +688,57 @@ size_t JitCodeCache::CodeCacheSize() {
return CodeCacheSizeLocked();
}
+bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
+ MutexLock mu(Thread::Current(), lock_);
+ if (method->IsNative()) {
+ return false;
+ }
+
+ bool in_cache = false;
+ {
+ ScopedCodeCacheWrite ccw(code_map_.get());
+ for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
+ if (code_iter->second == method) {
+ if (release_memory) {
+ FreeCode(code_iter->first);
+ }
+ code_iter = method_code_map_.erase(code_iter);
+ in_cache = true;
+ continue;
+ }
+ ++code_iter;
+ }
+ }
+
+ bool osr = false;
+ auto code_map = osr_code_map_.find(method);
+ if (code_map != osr_code_map_.end()) {
+ osr_code_map_.erase(code_map);
+ osr = true;
+ }
+
+ if (!in_cache) {
+ return false;
+ }
+
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
+ if (info != nullptr) {
+ auto profile = std::find(profiling_infos_.begin(), profiling_infos_.end(), info);
+ DCHECK(profile != profiling_infos_.end());
+ profiling_infos_.erase(profile);
+ }
+ method->SetProfilingInfo(nullptr);
+ method->ClearCounter();
+ Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
+ method, GetQuickToInterpreterBridge());
+ VLOG(jit)
+ << "JIT removed (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
+ << ArtMethod::PrettyMethod(method) << "@" << method
+ << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
+ << " dcache_size=" << PrettySize(DataCacheSizeLocked());
+ return true;
+}
+
// This notifies the code cache that the given method has been redefined and that it should remove
// any cached information it has on the method. All threads must be suspended before calling this
// method. The compiled code for the method (if there is any) must not be in any threads call stack.
@@ -1309,7 +1360,7 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
// If the inline cache is empty the compiler will generate a regular invoke virtual/interface.
if (method->GetCounter() < jit_compile_threshold) {
methods.emplace_back(/*ProfileMethodInfo*/
- dex_file, method->GetDexMethodIndex(), inline_caches);
+ MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
continue;
}
@@ -1366,7 +1417,7 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
}
}
methods.emplace_back(/*ProfileMethodInfo*/
- dex_file, method->GetDexMethodIndex(), inline_caches);
+ MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
}
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 9ecc876716..daa1d616a6 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -171,6 +171,13 @@ class JitCodeCache {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Removes method from the cache for testing purposes. The caller
+ // must ensure that all threads are suspended and the method should
+ // not be in any thread's stack.
+ bool RemoveMethod(ArtMethod* method, bool release_memory)
+ REQUIRES(!lock_)
+ REQUIRES(Locks::mutator_lock_);
+
// Remove all methods in our cache that were allocated by 'alloc'.
void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
REQUIRES(!lock_)
diff --git a/runtime/jit/profile_compilation_info-inl.h b/runtime/jit/profile_compilation_info-inl.h
deleted file mode 100644
index 8a067a5870..0000000000
--- a/runtime/jit/profile_compilation_info-inl.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_INL_H_
-#define ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_INL_H_
-
-#include "profile_compilation_info.h"
-
-namespace art {
-
-template <class Iterator>
-inline bool ProfileCompilationInfo::AddSampledMethodsForDex(bool startup,
- const DexFile* dex_file,
- Iterator index_begin,
- Iterator index_end) {
- DexFileData* data = GetOrAddDexFileData(dex_file);
- if (data == nullptr) {
- return false;
- }
- for (auto it = index_begin; it != index_end; ++it) {
- DCHECK_LT(*it, data->num_method_ids);
- data->AddSampledMethod(startup, *it);
- }
- return true;
-}
-
-template <class Iterator>
-inline bool ProfileCompilationInfo::AddHotMethodsForDex(const DexFile* dex_file,
- Iterator index_begin,
- Iterator index_end) {
- DexFileData* data = GetOrAddDexFileData(dex_file);
- if (data == nullptr) {
- return false;
- }
- for (auto it = index_begin; it != index_end; ++it) {
- DCHECK_LT(*it, data->num_method_ids);
- data->FindOrAddMethod(*it);
- }
- return true;
-}
-
-template <class Iterator>
-inline bool ProfileCompilationInfo::AddClassesForDex(const DexFile* dex_file,
- Iterator index_begin,
- Iterator index_end) {
- DexFileData* data = GetOrAddDexFileData(dex_file);
- if (data == nullptr) {
- return false;
- }
- data->class_set.insert(index_begin, index_end);
- return true;
-}
-
-} // namespace art
-
-#endif // ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_INL_H_
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index ea27d3b300..175563ab9c 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -68,6 +68,10 @@ static_assert(InlineCache::kIndividualCacheSize < kIsMegamorphicEncoding,
static_assert(InlineCache::kIndividualCacheSize < kIsMissingTypesEncoding,
"InlineCache::kIndividualCacheSize is larger than expected");
+static bool ChecksumMatch(uint32_t dex_file_checksum, uint32_t checksum) {
+ return kDebugIgnoreChecksum || dex_file_checksum == checksum;
+}
+
ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
: default_arena_pool_(),
arena_(custom_arena_pool),
@@ -132,29 +136,40 @@ std::string ProfileCompilationInfo::GetProfileDexFileKey(const std::string& dex_
}
}
-bool ProfileCompilationInfo::AddSampledMethod(bool startup,
- const std::string& dex_location,
- uint32_t checksum,
- uint16_t method_idx,
- uint32_t num_method_ids) {
+bool ProfileCompilationInfo::AddMethodIndex(MethodHotness::Flag flags, const MethodReference& ref) {
+ DexFileData* data = GetOrAddDexFileData(ref.dex_file);
+ if (data == nullptr) {
+ return false;
+ }
+ data->AddMethod(flags, ref.dex_method_index);
+ return true;
+}
+
+bool ProfileCompilationInfo::AddMethodIndex(MethodHotness::Flag flags,
+ const std::string& dex_location,
+ uint32_t checksum,
+ uint16_t method_idx,
+ uint32_t num_method_ids) {
DexFileData* data = GetOrAddDexFileData(GetProfileDexFileKey(dex_location),
checksum,
num_method_ids);
if (data == nullptr) {
return false;
}
- data->AddSampledMethod(startup, method_idx);
+ data->AddMethod(flags, method_idx);
return true;
}
-bool ProfileCompilationInfo::AddMethodsAndClasses(
- const std::vector<ProfileMethodInfo>& methods,
- const std::set<DexCacheResolvedClasses>& resolved_classes) {
+bool ProfileCompilationInfo::AddMethods(const std::vector<ProfileMethodInfo>& methods) {
for (const ProfileMethodInfo& method : methods) {
if (!AddMethod(method)) {
return false;
}
}
+ return true;
+}
+
+bool ProfileCompilationInfo::AddClasses(const std::set<DexCacheResolvedClasses>& resolved_classes) {
for (const DexCacheResolvedClasses& dex_cache : resolved_classes) {
if (!AddResolvedClasses(dex_cache)) {
return false;
@@ -165,18 +180,20 @@ bool ProfileCompilationInfo::AddMethodsAndClasses(
bool ProfileCompilationInfo::Load(const std::string& filename, bool clear_if_invalid) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- ScopedFlock flock;
std::string error;
int flags = O_RDWR | O_NOFOLLOW | O_CLOEXEC;
// There's no need to fsync profile data right away. We get many chances
// to write it again in case something goes wrong. We can rely on a simple
// close(), no sync, and let to the kernel decide when to write to disk.
- if (!flock.Init(filename.c_str(), flags, /*block*/false, /*flush_on_close*/false, &error)) {
+ ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
+ /*block*/false, &error);
+
+ if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
return false;
}
- int fd = flock.GetFile()->Fd();
+ int fd = profile_file->Fd();
ProfileLoadSatus status = LoadInternal(fd, &error);
if (status == kProfileLoadSuccess) {
@@ -187,7 +204,7 @@ bool ProfileCompilationInfo::Load(const std::string& filename, bool clear_if_inv
((status == kProfileLoadVersionMismatch) || (status == kProfileLoadBadData))) {
LOG(WARNING) << "Clearing bad or obsolete profile data from file "
<< filename << ": " << error;
- if (flock.GetFile()->ClearContent()) {
+ if (profile_file->ClearContent()) {
return true;
} else {
PLOG(WARNING) << "Could not clear profile file: " << filename;
@@ -201,21 +218,22 @@ bool ProfileCompilationInfo::Load(const std::string& filename, bool clear_if_inv
bool ProfileCompilationInfo::Save(const std::string& filename, uint64_t* bytes_written) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- ScopedFlock flock;
std::string error;
int flags = O_WRONLY | O_NOFOLLOW | O_CLOEXEC;
// There's no need to fsync profile data right away. We get many chances
// to write it again in case something goes wrong. We can rely on a simple
// close(), no sync, and let to the kernel decide when to write to disk.
- if (!flock.Init(filename.c_str(), flags, /*block*/false, /*flush_on_close*/false, &error)) {
+ ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
+ /*block*/false, &error);
+ if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
return false;
}
- int fd = flock.GetFile()->Fd();
+ int fd = profile_file->Fd();
// We need to clear the data because we don't support appending to the profiles yet.
- if (!flock.GetFile()->ClearContent()) {
+ if (!profile_file->ClearContent()) {
PLOG(WARNING) << "Could not clear profile file: " << filename;
return false;
}
@@ -533,8 +551,7 @@ ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData
DCHECK_EQ(num_method_ids, result->num_method_ids);
// Check that the checksum matches.
- // This may different if for example the dex file was updated and
- // we had a record of the old one.
+ // This may different if for example the dex file was updated and we had a record of the old one.
if (result->checksum != checksum) {
LOG(WARNING) << "Checksum mismatch for dex " << profile_key;
return nullptr;
@@ -543,7 +560,9 @@ ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData
}
const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexData(
- const std::string& profile_key) const {
+ const std::string& profile_key,
+ uint32_t checksum,
+ bool verify_checksum) const {
const auto profile_index_it = profile_key_map_.find(profile_key);
if (profile_index_it == profile_key_map_.end()) {
return nullptr;
@@ -551,6 +570,9 @@ const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexData(
uint8_t profile_index = profile_index_it->second;
const DexFileData* result = info_[profile_index];
+ if (verify_checksum && !ChecksumMatch(result->checksum, checksum)) {
+ return nullptr;
+ }
DCHECK_EQ(profile_key, result->profile_key);
DCHECK_EQ(profile_index, result->profile_index);
return result;
@@ -567,17 +589,6 @@ bool ProfileCompilationInfo::AddResolvedClasses(const DexCacheResolvedClasses& c
return true;
}
-bool ProfileCompilationInfo::AddMethodIndex(const std::string& dex_location,
- uint32_t dex_checksum,
- uint16_t method_index,
- uint32_t num_method_ids) {
- return AddMethod(dex_location,
- dex_checksum,
- method_index,
- num_method_ids,
- OfflineProfileMethodInfo(nullptr));
-}
-
bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t method_index,
@@ -630,11 +641,11 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
}
bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
- DexFileData* const data = GetOrAddDexFileData(pmi.dex_file);
+ DexFileData* const data = GetOrAddDexFileData(pmi.ref.dex_file);
if (data == nullptr) { // checksum mismatch
return false;
}
- InlineCacheMap* inline_cache = data->FindOrAddMethod(pmi.dex_method_index);
+ InlineCacheMap* inline_cache = data->FindOrAddMethod(pmi.ref.dex_method_index);
for (const ProfileMethodInfo::ProfileInlineCache& cache : pmi.inline_caches) {
if (cache.is_missing_types) {
@@ -1143,7 +1154,9 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
// Note that the number of elements should be very small, so this should not
// be a performance issue.
for (const DexFileData* other_dex_data : other.info_) {
- const DexFileData* dex_data = FindDexData(other_dex_data->profile_key);
+ const DexFileData* dex_data = FindDexData(other_dex_data->profile_key,
+ 0u,
+ /* verify_checksum */ false);
if ((dex_data != nullptr) && (dex_data->checksum != other_dex_data->checksum)) {
LOG(WARNING) << "Checksum mismatch for dex " << other_dex_data->profile_key;
return false;
@@ -1174,7 +1187,8 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
// Merge the actual profile data.
for (const DexFileData* other_dex_data : other.info_) {
- DexFileData* dex_data = const_cast<DexFileData*>(FindDexData(other_dex_data->profile_key));
+ DexFileData* dex_data = const_cast<DexFileData*>(FindDexData(other_dex_data->profile_key,
+ other_dex_data->checksum));
DCHECK(dex_data != nullptr);
// Merge the classes.
@@ -1203,68 +1217,41 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
}
}
- // Merge the bitmaps.
+ // Merge the method bitmaps.
dex_data->MergeBitmap(*other_dex_data);
}
return true;
}
-static bool ChecksumMatch(uint32_t dex_file_checksum, uint32_t checksum) {
- return kDebugIgnoreChecksum || dex_file_checksum == checksum;
-}
-
-static bool ChecksumMatch(const DexFile& dex_file, uint32_t checksum) {
- return ChecksumMatch(dex_file.GetLocationChecksum(), checksum);
-}
-
-bool ProfileCompilationInfo::IsStartupOrHotMethod(const MethodReference& method_ref) const {
- return IsStartupOrHotMethod(method_ref.dex_file->GetLocation(),
- method_ref.dex_file->GetLocationChecksum(),
- method_ref.dex_method_index);
+const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexData(
+ const DexFile* dex_file) const {
+ return FindDexData(GetProfileDexFileKey(dex_file->GetLocation()),
+ dex_file->GetLocationChecksum());
}
-bool ProfileCompilationInfo::IsStartupOrHotMethod(const std::string& dex_location,
- uint32_t dex_checksum,
- uint16_t dex_method_index) const {
- const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_location));
- if (dex_data == nullptr || !ChecksumMatch(dex_checksum, dex_data->checksum)) {
- return false;
- }
- if (dex_data->HasSampledMethod(/*startup*/ true, dex_method_index)) {
- return true;
- }
- const MethodMap& methods = dex_data->method_map;
- const auto method_it = methods.find(dex_method_index);
- return method_it != methods.end();
+ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::GetMethodHotness(
+ const MethodReference& method_ref) const {
+ const DexFileData* dex_data = FindDexData(method_ref.dex_file);
+ return dex_data != nullptr
+ ? dex_data->GetHotnessInfo(method_ref.dex_method_index)
+ : MethodHotness();
}
-bool ProfileCompilationInfo::ContainsHotMethod(const MethodReference& method_ref) const {
- return FindMethod(method_ref.dex_file->GetLocation(),
- method_ref.dex_file->GetLocationChecksum(),
- method_ref.dex_method_index) != nullptr;
+ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::GetMethodHotness(
+ const std::string& dex_location,
+ uint32_t dex_checksum,
+ uint16_t dex_method_index) const {
+ const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_location), dex_checksum);
+ return dex_data != nullptr ? dex_data->GetHotnessInfo(dex_method_index) : MethodHotness();
}
-const ProfileCompilationInfo::InlineCacheMap*
-ProfileCompilationInfo::FindMethod(const std::string& dex_location,
- uint32_t dex_checksum,
- uint16_t dex_method_index) const {
- const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_location));
- if (dex_data != nullptr) {
- if (!ChecksumMatch(dex_checksum, dex_data->checksum)) {
- return nullptr;
- }
- const MethodMap& methods = dex_data->method_map;
- const auto method_it = methods.find(dex_method_index);
- return method_it == methods.end() ? nullptr : &(method_it->second);
- }
- return nullptr;
-}
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> ProfileCompilationInfo::GetMethod(
- const std::string& dex_location,
- uint32_t dex_checksum,
- uint16_t dex_method_index) const {
- const InlineCacheMap* inline_caches = FindMethod(dex_location, dex_checksum, dex_method_index);
+ const std::string& dex_location,
+ uint32_t dex_checksum,
+ uint16_t dex_method_index) const {
+ MethodHotness hotness(GetMethodHotness(dex_location, dex_checksum, dex_method_index));
+ const InlineCacheMap* inline_caches = hotness.GetInlineCacheMap();
if (inline_caches == nullptr) {
return nullptr;
}
@@ -1283,11 +1270,8 @@ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> ProfileCompila
bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const {
- const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_file.GetLocation()));
+ const DexFileData* dex_data = FindDexData(&dex_file);
if (dex_data != nullptr) {
- if (!ChecksumMatch(dex_file, dex_data->checksum)) {
- return false;
- }
const ArenaSet<dex::TypeIndex>& classes = dex_data->class_set;
return classes.find(type_idx) != classes.end();
}
@@ -1386,7 +1370,8 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
while (true) {
os << "\n\t" << (startup ? "startup methods: " : "post startup methods: ");
for (uint32_t method_idx = 0; method_idx < dex_data->num_method_ids; ++method_idx) {
- if (dex_data->HasSampledMethod(startup, method_idx)) {
+ MethodHotness hotness_info(dex_data->GetHotnessInfo(method_idx));
+ if (startup ? hotness_info.IsStartup() : hotness_info.IsPostStartup()) {
os << method_idx << ", ";
}
}
@@ -1414,19 +1399,19 @@ bool ProfileCompilationInfo::GetClassesAndMethods(
/*out*/std::set<uint16_t>* startup_method_set,
/*out*/std::set<uint16_t>* post_startup_method_method_set) const {
std::set<std::string> ret;
- std::string profile_key = GetProfileDexFileKey(dex_file.GetLocation());
- const DexFileData* dex_data = FindDexData(profile_key);
- if (dex_data == nullptr || dex_data->checksum != dex_file.GetLocationChecksum()) {
+ const DexFileData* dex_data = FindDexData(&dex_file);
+ if (dex_data == nullptr) {
return false;
}
for (const auto& it : dex_data->method_map) {
hot_method_set->insert(it.first);
}
for (uint32_t method_idx = 0; method_idx < dex_data->num_method_ids; ++method_idx) {
- if (dex_data->HasSampledMethod(/*startup*/ true, method_idx)) {
+ MethodHotness hotness = dex_data->GetHotnessInfo(method_idx);
+ if (hotness.IsStartup()) {
startup_method_set->insert(method_idx);
}
- if (dex_data->HasSampledMethod(/*startup*/ false, method_idx)) {
+ if (hotness.IsPostStartup()) {
post_startup_method_method_set->insert(method_idx);
}
}
@@ -1511,7 +1496,11 @@ bool ProfileCompilationInfo::GenerateTestProfile(int fd,
if (m < (number_of_methods / kFavorSplit)) {
method_idx %= kFavorFirstN;
}
- info.AddMethodIndex(profile_key, 0, method_idx, max_method);
+ info.AddMethodIndex(MethodHotness::kFlagHot,
+ profile_key,
+ /*method_idx*/ 0,
+ method_idx,
+ max_method);
}
for (uint16_t c = 0; c < number_of_classes; c++) {
@@ -1540,14 +1529,14 @@ bool ProfileCompilationInfo::GenerateTestProfile(
if (std::rand() % 2 != 0) {
info.AddClassIndex(location,
checksum,
- dex::TypeIndex(dex_file->GetClassDef(i).class_idx_),
+ dex_file->GetClassDef(i).class_idx_,
dex_file->NumMethodIds());
}
}
for (uint32_t i = 0; i < dex_file->NumMethodIds(); ++i) {
// Randomly add a method from the dex file (with 50% chance).
if (std::rand() % 2 != 0) {
- info.AddMethodIndex(location, checksum, i, dex_file->NumMethodIds());
+ info.AddMethodIndex(MethodHotness::kFlagHot, MethodReference(dex_file.get(), i));
}
}
}
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index bd1b9d651f..b2d541f896 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -49,19 +49,14 @@ struct ProfileMethodInfo {
const std::vector<TypeReference> classes;
};
- ProfileMethodInfo(const DexFile* dex, uint32_t method_index)
- : dex_file(dex), dex_method_index(method_index) {}
-
- ProfileMethodInfo(const DexFile* dex,
- uint32_t method_index,
- const std::vector<ProfileInlineCache>& caches)
- : dex_file(dex),
- dex_method_index(method_index),
+ explicit ProfileMethodInfo(MethodReference reference) : ref(reference) {}
+
+ ProfileMethodInfo(MethodReference reference, const std::vector<ProfileInlineCache>& caches)
+ : ref(reference),
inline_caches(caches) {}
- const DexFile* dex_file;
- const uint32_t dex_method_index;
- const std::vector<ProfileInlineCache> inline_caches;
+ MethodReference ref;
+ std::vector<ProfileInlineCache> inline_caches;
};
/**
@@ -173,6 +168,53 @@ class ProfileCompilationInfo {
// Maps a method dex index to its inline cache.
using MethodMap = ArenaSafeMap<uint16_t, InlineCacheMap>;
+ // Profile method hotness information for a single method. Also includes a pointer to the inline
+ // cache map.
+ class MethodHotness {
+ public:
+ enum Flag {
+ kFlagHot = 0x1,
+ kFlagStartup = 0x2,
+ kFlagPostStartup = 0x4,
+ };
+
+ bool IsHot() const {
+ return (flags_ & kFlagHot) != 0;
+ }
+
+ bool IsStartup() const {
+ return (flags_ & kFlagStartup) != 0;
+ }
+
+ bool IsPostStartup() const {
+ return (flags_ & kFlagPostStartup) != 0;
+ }
+
+ void AddFlag(Flag flag) {
+ flags_ |= flag;
+ }
+
+ uint8_t GetFlags() const {
+ return flags_;
+ }
+
+ bool HasAnyFlags() const {
+ return flags_ != 0;
+ }
+
+ const InlineCacheMap* GetInlineCacheMap() const {
+ return inline_cache_map_;
+ }
+
+ void SetInlineCacheMap(const InlineCacheMap* info) {
+ inline_cache_map_ = info;
+ }
+
+ private:
+ const InlineCacheMap* inline_cache_map_ = nullptr;
+ uint8_t flags_ = 0;
+ };
+
// Encodes the full set of inline caches for a given method.
// The dex_references vector is indexed according to the ClassReference::dex_profile_index.
// i.e. the dex file of any ClassReference present in the inline caches can be found at
@@ -193,42 +235,53 @@ class ProfileCompilationInfo {
~ProfileCompilationInfo();
- // Add the given methods and classes to the current profile object.
- bool AddMethodsAndClasses(const std::vector<ProfileMethodInfo>& methods,
- const std::set<DexCacheResolvedClasses>& resolved_classes);
+ // Add the given methods to the current profile object.
+ bool AddMethods(const std::vector<ProfileMethodInfo>& methods);
- // Iterator is type for ids not class defs.
+ // Add the given classes to the current profile object.
+ bool AddClasses(const std::set<DexCacheResolvedClasses>& resolved_classes);
+
+ // Add multiple type ids for classes in a single dex file. Iterator is for type_ids not
+ // class_defs.
template <class Iterator>
- bool AddClassesForDex(const DexFile* dex_file, Iterator index_begin, Iterator index_end);
+ bool AddClassesForDex(const DexFile* dex_file, Iterator index_begin, Iterator index_end) {
+ DexFileData* data = GetOrAddDexFileData(dex_file);
+ if (data == nullptr) {
+ return false;
+ }
+ data->class_set.insert(index_begin, index_end);
+ return true;
+ }
- // Add a method index to the profile (without inline caches).
- bool AddMethodIndex(const std::string& dex_location,
+ // Add a method index to the profile (without inline caches). The method flags determine if it is
+ // hot, startup, or post startup, or a combination of the previous.
+ bool AddMethodIndex(MethodHotness::Flag flags,
+ const std::string& dex_location,
uint32_t checksum,
uint16_t method_idx,
uint32_t num_method_ids);
+ bool AddMethodIndex(MethodHotness::Flag flags, const MethodReference& ref);
// Add a method to the profile using its online representation (containing runtime structures).
bool AddMethod(const ProfileMethodInfo& pmi);
- // Add methods that have samples but are are not necessarily hot. These are partitioned into two
- // possibly intersecting sets startup and post startup.
- bool AddSampledMethod(bool startup,
- const std::string& dex_location,
- uint32_t checksum,
- uint16_t method_idx,
- uint32_t num_method_ids);
- // Bulk add sampled methods for a single dex, fast since it only has one GetOrAddDexFileData call.
- template <class Iterator>
- bool AddSampledMethodsForDex(bool startup,
- const DexFile* dex_file,
- Iterator index_begin,
- Iterator index_end);
-
- // Bulk add hot methods for a single dex, fast since it only has one GetOrAddDexFileData call.
+ // Bulk add sampled methods and/or hot methods for a single dex, fast since it only has one
+ // GetOrAddDexFileData call.
template <class Iterator>
- bool AddHotMethodsForDex(const DexFile* dex_file,
- Iterator index_begin,
- Iterator index_end);
+ bool AddMethodsForDex(MethodHotness::Flag flags,
+ const DexFile* dex_file,
+ Iterator index_begin,
+ Iterator index_end) {
+ DexFileData* data = GetOrAddDexFileData(dex_file);
+ if (data == nullptr) {
+ return false;
+ }
+ for (Iterator it = index_begin; it != index_end; ++it) {
+ DCHECK_LT(*it, data->num_method_ids);
+ data->AddMethod(flags, *it);
+ }
+ return true;
+ }
// Load profile information from the given file descriptor.
// If the current profile is non-empty the load will fail.
@@ -255,14 +308,11 @@ class ProfileCompilationInfo {
// Return the number of resolved classes that were profiled.
uint32_t GetNumberOfResolvedClasses() const;
- // Return true if the method reference is a hot or startup method in the profiling info.
- bool IsStartupOrHotMethod(const MethodReference& method_ref) const;
- bool IsStartupOrHotMethod(const std::string& dex_location,
- uint32_t dex_checksum,
- uint16_t dex_method_index) const;
-
- // Return true if the method reference iS present and hot in the profiling info.
- bool ContainsHotMethod(const MethodReference& method_ref) const;
+ // Returns the profile method info for a given method reference.
+ MethodHotness GetMethodHotness(const MethodReference& method_ref) const;
+ MethodHotness GetMethodHotness(const std::string& dex_location,
+ uint32_t dex_checksum,
+ uint16_t dex_method_index) const;
// Return true if the class's type is present in the profiling info.
bool ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const;
@@ -358,7 +408,7 @@ class ProfileCompilationInfo {
class_set(std::less<dex::TypeIndex>(), arena->Adapter(kArenaAllocProfile)),
num_method_ids(num_methods),
bitmap_storage(arena->Adapter(kArenaAllocProfile)) {
- const size_t num_bits = num_method_ids * kBitmapCount;
+ const size_t num_bits = num_method_ids * kBitmapIndexCount;
bitmap_storage.resize(RoundUp(num_bits, kBitsPerByte) / kBitsPerByte);
if (!bitmap_storage.empty()) {
method_bitmap =
@@ -371,12 +421,18 @@ class ProfileCompilationInfo {
}
// Mark a method as executed at least once.
- void AddSampledMethod(bool startup, size_t index) {
- method_bitmap.StoreBit(MethodBitIndex(startup, index), true);
- }
-
- bool HasSampledMethod(bool startup, size_t index) const {
- return method_bitmap.LoadBit(MethodBitIndex(startup, index));
+ void AddMethod(MethodHotness::Flag flags, size_t index) {
+ if ((flags & MethodHotness::kFlagStartup) != 0) {
+ method_bitmap.StoreBit(MethodBitIndex(/*startup*/ true, index), /*value*/ true);
+ }
+ if ((flags & MethodHotness::kFlagPostStartup) != 0) {
+ method_bitmap.StoreBit(MethodBitIndex(/*startup*/ false, index), /*value*/ true);
+ }
+ if ((flags & MethodHotness::kFlagHot) != 0) {
+ method_map.FindOrAdd(
+ index,
+ InlineCacheMap(std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ }
}
void MergeBitmap(const DexFileData& other) {
@@ -386,6 +442,22 @@ class ProfileCompilationInfo {
}
}
+ MethodHotness GetHotnessInfo(uint32_t dex_method_index) const {
+ MethodHotness ret;
+ if (method_bitmap.LoadBit(MethodBitIndex(/*startup*/ true, dex_method_index))) {
+ ret.AddFlag(MethodHotness::kFlagStartup);
+ }
+ if (method_bitmap.LoadBit(MethodBitIndex(/*startup*/ false, dex_method_index))) {
+ ret.AddFlag(MethodHotness::kFlagPostStartup);
+ }
+ auto it = method_map.find(dex_method_index);
+ if (it != method_map.end()) {
+ ret.SetInlineCacheMap(&it->second);
+ ret.AddFlag(MethodHotness::kFlagHot);
+ }
+ return ret;
+ }
+
// The arena used to allocate new inline cache maps.
ArenaAllocator* arena_;
// The profile key this data belongs to.
@@ -409,9 +481,9 @@ class ProfileCompilationInfo {
private:
enum BitmapIndex {
- kBitmapStartup,
- kBitmapPostStartup,
- kBitmapCount,
+ kBitmapIndexStartup,
+ kBitmapIndexPostStartup,
+ kBitmapIndexCount,
};
size_t MethodBitIndex(bool startup, size_t index) const {
@@ -420,8 +492,8 @@ class ProfileCompilationInfo {
// This compresses better than ([startup bit][post statup bit])*
return index + (startup
- ? kBitmapStartup * num_method_ids
- : kBitmapPostStartup * num_method_ids);
+ ? kBitmapIndexStartup * num_method_ids
+ : kBitmapIndexPostStartup * num_method_ids);
}
};
@@ -454,19 +526,19 @@ class ProfileCompilationInfo {
// Add all classes from the given dex cache to the the profile.
bool AddResolvedClasses(const DexCacheResolvedClasses& classes);
- // Search for the given method in the profile.
- // If found, its inline cache map is returned, otherwise the method returns null.
- const InlineCacheMap* FindMethod(const std::string& dex_location,
- uint32_t dex_checksum,
- uint16_t dex_method_index) const;
-
// Encode the known dex_files into a vector. The index of a dex_reference will
// be the same as the profile index of the dex file (used to encode the ClassReferences).
void DexFileToProfileIndex(/*out*/std::vector<DexReference>* dex_references) const;
// Return the dex data associated with the given profile key or null if the profile
// doesn't contain the key.
- const DexFileData* FindDexData(const std::string& profile_key) const;
+ const DexFileData* FindDexData(const std::string& profile_key,
+ uint32_t checksum,
+ bool verify_checksum = true) const;
+
+ // Return the dex data associated with the given dex file or null if the profile doesn't contain
+ // the key or the checksum mismatches.
+ const DexFileData* FindDexData(const DexFile* dex_file) const;
// Checks if the profile is empty.
bool IsEmpty() const;
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 39670afb0c..c3a34156ad 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -32,6 +32,8 @@
namespace art {
+using Hotness = ProfileCompilationInfo::MethodHotness;
+
static constexpr size_t kMaxMethodIds = 65535;
class ProfileCompilationInfoTest : public CommonRuntimeTest {
@@ -63,7 +65,11 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
uint32_t checksum,
uint16_t method_index,
ProfileCompilationInfo* info) {
- return info->AddMethodIndex(dex_location, checksum, method_index, kMaxMethodIds);
+ return info->AddMethodIndex(Hotness::kFlagHot,
+ dex_location,
+ checksum,
+ method_index,
+ kMaxMethodIds);
}
bool AddMethod(const std::string& dex_location,
@@ -76,9 +82,11 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
bool AddClass(const std::string& dex_location,
uint32_t checksum,
- uint16_t class_index,
+ dex::TypeIndex type_index,
ProfileCompilationInfo* info) {
- return info->AddMethodIndex(dex_location, checksum, class_index, kMaxMethodIds);
+ DexCacheResolvedClasses classes(dex_location, dex_location, checksum, kMaxMethodIds);
+ classes.AddClass(type_index);
+ return info->AddClasses({classes});
}
uint32_t GetFd(const ScratchFile& file) {
@@ -93,9 +101,10 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
std::vector<ProfileMethodInfo> profile_methods;
ScopedObjectAccess soa(Thread::Current());
for (ArtMethod* method : methods) {
- profile_methods.emplace_back(method->GetDexFile(), method->GetDexMethodIndex());
+ profile_methods.emplace_back(
+ MethodReference(method->GetDexFile(), method->GetDexMethodIndex()));
}
- if (!info.AddMethodsAndClasses(profile_methods, resolved_classes)) {
+ if (!info.AddMethods(profile_methods) || !info.AddClasses(resolved_classes)) {
return false;
}
if (info.GetNumberOfMethods() != profile_methods.size()) {
@@ -151,17 +160,14 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
std::vector<TypeReference> classes;
caches.emplace_back(dex_pc, /*is_missing_types*/true, classes);
}
- ProfileMethodInfo pmi(method->GetDexFile(),
- method->GetDexMethodIndex(),
+ ProfileMethodInfo pmi(MethodReference(method->GetDexFile(),
+ method->GetDexMethodIndex()),
caches);
profile_methods.push_back(pmi);
profile_methods_map->Put(method, pmi);
}
- if (!info.AddMethodsAndClasses(profile_methods, std::set<DexCacheResolvedClasses>())) {
- return false;
- }
- if (info.GetNumberOfMethods() != profile_methods.size()) {
+ if (!info.AddMethods(profile_methods) || info.GetNumberOfMethods() != profile_methods.size()) {
return false;
}
return info.Save(filename, nullptr);
@@ -298,8 +304,8 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethods) {
{
ScopedObjectAccess soa(self);
for (ArtMethod* m : main_methods) {
- ASSERT_TRUE(info1.ContainsHotMethod(
- MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
+ ASSERT_TRUE(info1.GetMethodHotness(
+ MethodReference(m->GetDexFile(), m->GetDexMethodIndex())).IsHot());
}
}
@@ -316,11 +322,11 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethods) {
ScopedObjectAccess soa(self);
for (ArtMethod* m : main_methods) {
ASSERT_TRUE(
- info2.ContainsHotMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
+ info2.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())).IsHot());
}
for (ArtMethod* m : second_methods) {
ASSERT_TRUE(
- info2.ContainsHotMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
+ info2.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())).IsHot());
}
}
}
@@ -392,8 +398,8 @@ TEST_F(ProfileCompilationInfoTest, SaveMaxMethods) {
}
// Save the maximum number of classes
for (uint16_t i = 0; i < std::numeric_limits<uint16_t>::max(); i++) {
- ASSERT_TRUE(AddClass("dex_location1", /* checksum */ 1, /* class_idx */ i, &saved_info));
- ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, /* class_idx */ i, &saved_info));
+ ASSERT_TRUE(AddClass("dex_location1", /* checksum */ 1, dex::TypeIndex(i), &saved_info));
+ ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, dex::TypeIndex(i), &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -667,7 +673,7 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
ScopedObjectAccess soa(self);
for (ArtMethod* m : main_methods) {
ASSERT_TRUE(
- info.ContainsHotMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
+ info.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())).IsHot());
const ProfileMethodInfo& pmi = profile_methods_map.find(m)->second;
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> offline_pmi =
info.GetMethod(m->GetDexFile()->GetLocation(),
@@ -857,18 +863,19 @@ TEST_F(ProfileCompilationInfoTest, SampledMethodsTest) {
static constexpr size_t kChecksum2 = 4321;
static const std::string kDex1 = "dex1";
static const std::string kDex2 = "dex2";
- test_info.AddSampledMethod(true, kDex1, kChecksum1, 1, kNumMethods);
- test_info.AddSampledMethod(true, kDex1, kChecksum1, 5, kNumMethods);
- test_info.AddSampledMethod(false, kDex2, kChecksum2, 1, kNumMethods);
- test_info.AddSampledMethod(false, kDex2, kChecksum2, 5, kNumMethods);
+ test_info.AddMethodIndex(Hotness::kFlagStartup, kDex1, kChecksum1, 1, kNumMethods);
+ test_info.AddMethodIndex(Hotness::kFlagPostStartup, kDex1, kChecksum1, 5, kNumMethods);
+ test_info.AddMethodIndex(Hotness::kFlagStartup, kDex2, kChecksum2, 2, kNumMethods);
+ test_info.AddMethodIndex(Hotness::kFlagPostStartup, kDex2, kChecksum2, 4, kNumMethods);
auto run_test = [](const ProfileCompilationInfo& info) {
- EXPECT_FALSE(info.IsStartupOrHotMethod(kDex1, kChecksum1, 0));
- EXPECT_TRUE(info.IsStartupOrHotMethod(kDex1, kChecksum1, 1));
- EXPECT_FALSE(info.IsStartupOrHotMethod(kDex1, kChecksum1, 3));
- EXPECT_TRUE(info.IsStartupOrHotMethod(kDex1, kChecksum1, 5));
- EXPECT_FALSE(info.IsStartupOrHotMethod(kDex1, kChecksum1, 6));
- EXPECT_FALSE(info.IsStartupOrHotMethod(kDex2, kChecksum2, 5));
- EXPECT_FALSE(info.IsStartupOrHotMethod(kDex2, kChecksum2, 5));
+ EXPECT_FALSE(info.GetMethodHotness(kDex1, kChecksum1, 2).HasAnyFlags());
+ EXPECT_FALSE(info.GetMethodHotness(kDex1, kChecksum1, 4).HasAnyFlags());
+ EXPECT_TRUE(info.GetMethodHotness(kDex1, kChecksum1, 1).IsStartup());
+ EXPECT_FALSE(info.GetMethodHotness(kDex1, kChecksum1, 3).IsStartup());
+ EXPECT_TRUE(info.GetMethodHotness(kDex1, kChecksum1, 5).IsPostStartup());
+ EXPECT_FALSE(info.GetMethodHotness(kDex1, kChecksum1, 6).IsStartup());
+ EXPECT_TRUE(info.GetMethodHotness(kDex2, kChecksum2, 2).IsStartup());
+ EXPECT_TRUE(info.GetMethodHotness(kDex2, kChecksum2, 4).IsPostStartup());
};
run_test(test_info);
@@ -884,13 +891,53 @@ TEST_F(ProfileCompilationInfoTest, SampledMethodsTest) {
run_test(loaded_info);
// Test that the bitmap gets merged properly.
- EXPECT_FALSE(test_info.IsStartupOrHotMethod(kDex1, kChecksum1, 11));
+ EXPECT_FALSE(test_info.GetMethodHotness(kDex1, kChecksum1, 11).IsStartup());
{
ProfileCompilationInfo merge_info;
- merge_info.AddSampledMethod(true, kDex1, kChecksum1, 11, kNumMethods);
+ merge_info.AddMethodIndex(Hotness::kFlagStartup, kDex1, kChecksum1, 11, kNumMethods);
test_info.MergeWith(merge_info);
}
- EXPECT_TRUE(test_info.IsStartupOrHotMethod(kDex1, kChecksum1, 11));
+ EXPECT_TRUE(test_info.GetMethodHotness(kDex1, kChecksum1, 11).IsStartup());
+
+ // Test bulk adding.
+ {
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+ ProfileCompilationInfo info;
+ std::vector<uint16_t> hot_methods = {1, 3, 5};
+ std::vector<uint16_t> startup_methods = {1, 2};
+ std::vector<uint16_t> post_methods = {0, 2, 6};
+ ASSERT_GE(dex->NumMethodIds(), 7u);
+ info.AddMethodsForDex(static_cast<Hotness::Flag>(Hotness::kFlagHot | Hotness::kFlagStartup),
+ dex.get(),
+ hot_methods.begin(),
+ hot_methods.end());
+ info.AddMethodsForDex(Hotness::kFlagStartup,
+ dex.get(),
+ startup_methods.begin(),
+ startup_methods.end());
+ info.AddMethodsForDex(Hotness::kFlagPostStartup,
+ dex.get(),
+ post_methods.begin(),
+ post_methods.end());
+ for (uint16_t id : hot_methods) {
+ EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex.get(), id)).IsHot());
+ EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex.get(), id)).IsStartup());
+ }
+ for (uint16_t id : startup_methods) {
+ EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex.get(), id)).IsStartup());
+ }
+ for (uint16_t id : post_methods) {
+ EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex.get(), id)).IsPostStartup());
+ }
+ EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex.get(), 6)).IsPostStartup());
+ // Check that methods that shouldn't have been touched are OK.
+ EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex.get(), 0)).HasAnyFlags());
+ EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex.get(), 4)).HasAnyFlags());
+ EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex.get(), 7)).HasAnyFlags());
+ EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex.get(), 1)).IsPostStartup());
+ EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex.get(), 4)).IsStartup());
+ EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex.get(), 6)).IsStartup());
+ }
}
} // namespace art
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index edce9cd96c..94363c6f2d 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -34,7 +34,7 @@
#include "gc/collector_type.h"
#include "gc/gc_cause.h"
#include "gc/scoped_gc_critical_section.h"
-#include "jit/profile_compilation_info-inl.h"
+#include "jit/profile_compilation_info.h"
#include "oat_file_manager.h"
#include "scoped_thread_state_change-inl.h"
@@ -194,21 +194,24 @@ class GetClassesAndMethodsVisitor : public ClassVisitor {
GetClassesAndMethodsVisitor(MethodReferenceCollection* hot_methods,
MethodReferenceCollection* sampled_methods,
TypeReferenceCollection* resolved_classes,
- uint32_t hot_method_sample_threshold)
+ uint32_t hot_method_sample_threshold,
+ bool profile_boot_class_path)
: hot_methods_(hot_methods),
sampled_methods_(sampled_methods),
resolved_classes_(resolved_classes),
- hot_method_sample_threshold_(hot_method_sample_threshold) {}
+ hot_method_sample_threshold_(hot_method_sample_threshold),
+ profile_boot_class_path_(profile_boot_class_path) {}
virtual bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
if (klass->IsProxyClass() ||
klass->IsArrayClass() ||
+ klass->IsPrimitive() ||
!klass->IsResolved() ||
klass->IsErroneousResolved() ||
- klass->GetClassLoader() == nullptr) {
+ (!profile_boot_class_path_ && klass->GetClassLoader() == nullptr)) {
return true;
}
- DCHECK(klass->GetDexCache() != nullptr) << klass->PrettyClass();
+ CHECK(klass->GetDexCache() != nullptr) << klass->PrettyClass();
resolved_classes_->AddReference(&klass->GetDexFile(), klass->GetDexTypeIndex());
for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
if (!method.IsNative()) {
@@ -235,6 +238,7 @@ class GetClassesAndMethodsVisitor : public ClassVisitor {
MethodReferenceCollection* const sampled_methods_;
TypeReferenceCollection* const resolved_classes_;
uint32_t hot_method_sample_threshold_;
+ const bool profile_boot_class_path_;
};
void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() {
@@ -263,13 +267,15 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() {
GetClassesAndMethodsVisitor visitor(&hot_methods,
&startup_methods,
&resolved_classes,
- hot_threshold);
+ hot_threshold,
+ options_.GetProfileBootClassPath());
runtime->GetClassLinker()->VisitClasses(&visitor);
}
}
MutexLock mu(self, *Locks::profiler_lock_);
uint64_t total_number_of_profile_entries_cached = 0;
+ using Hotness = ProfileCompilationInfo::MethodHotness;
for (const auto& it : tracked_dex_base_locations_) {
std::set<DexCacheResolvedClasses> resolved_classes_for_location;
@@ -283,21 +289,22 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() {
for (const auto& pair : hot_methods.GetMap()) {
const DexFile* const dex_file = pair.first;
if (locations.find(dex_file->GetBaseLocation()) != locations.end()) {
- cached_info->AddSampledMethodsForDex(/*startup*/ true,
- dex_file,
- pair.second.begin(),
- pair.second.end());
- // Adding hot methods is a bit slow, TODO: optimize.
- cached_info->AddHotMethodsForDex(dex_file, pair.second.begin(), pair.second.end());
+ const MethodReferenceCollection::IndexVector& indices = pair.second;
+ cached_info->AddMethodsForDex(
+ static_cast<Hotness::Flag>(Hotness::kFlagHot | Hotness::kFlagStartup),
+ dex_file,
+ indices.begin(),
+ indices.end());
}
}
for (const auto& pair : startup_methods.GetMap()) {
const DexFile* const dex_file = pair.first;
if (locations.find(dex_file->GetBaseLocation()) != locations.end()) {
- cached_info->AddSampledMethodsForDex(/*startup*/ true,
- dex_file,
- pair.second.begin(),
- pair.second.end());
+ const MethodReferenceCollection::IndexVector& indices = pair.second;
+ cached_info->AddMethodsForDex(Hotness::kFlagStartup,
+ dex_file,
+ indices.begin(),
+ indices.end());
}
}
for (const auto& pair : resolved_classes.GetMap()) {
@@ -366,7 +373,7 @@ bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number
uint64_t last_save_number_of_methods = info.GetNumberOfMethods();
uint64_t last_save_number_of_classes = info.GetNumberOfResolvedClasses();
- info.AddMethodsAndClasses(profile_methods, std::set<DexCacheResolvedClasses>());
+ info.AddMethods(profile_methods);
auto profile_cache_it = profile_cache_.find(filename);
if (profile_cache_it != profile_cache_.end()) {
info.MergeWith(*(profile_cache_it->second));
@@ -469,24 +476,49 @@ void ProfileSaver::Start(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
const std::vector<std::string>& code_paths) {
+ Runtime* const runtime = Runtime::Current();
DCHECK(options.IsEnabled());
- DCHECK(Runtime::Current()->GetJit() != nullptr);
+ DCHECK(runtime->GetJit() != nullptr);
DCHECK(!output_filename.empty());
DCHECK(jit_code_cache != nullptr);
std::vector<std::string> code_paths_to_profile;
-
for (const std::string& location : code_paths) {
if (ShouldProfileLocation(location)) {
code_paths_to_profile.push_back(location);
}
}
+
+ MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
+ // Support getting profile samples for the boot class path. This will be used to generate the boot
+ // image profile. The intention is to use this code to generate to boot image but not use it in
+ // production. b/37966211
+ if (options.GetProfileBootClassPath()) {
+ std::set<std::string> code_paths_keys;
+ for (const std::string& location : code_paths) {
+ code_paths_keys.insert(ProfileCompilationInfo::GetProfileDexFileKey(location));
+ }
+ for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
+ // Don't check ShouldProfileLocation since the boot class path may be speed compiled.
+ const std::string& location = dex_file->GetLocation();
+ const std::string key = ProfileCompilationInfo::GetProfileDexFileKey(location);
+ VLOG(profiler) << "Registering boot dex file " << location;
+ if (code_paths_keys.find(key) != code_paths_keys.end()) {
+ LOG(WARNING) << "Boot class path location key conflicts with code path " << location;
+ } else if (instance_ == nullptr) {
+ // Only add the boot class path once since Start may be called multiple times for secondary
+ // dexes.
+ // We still do the collision check above. This handles any secondary dexes that conflict
+ // with the boot class path dex files.
+ code_paths_to_profile.push_back(location);
+ }
+ }
+ }
if (code_paths_to_profile.empty()) {
VLOG(profiler) << "No code paths should be profiled.";
return;
}
- MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
if (instance_ != nullptr) {
// If we already have an instance, make sure it uses the same jit_code_cache.
// This may be called multiple times via Runtime::registerAppInfo (e.g. for
@@ -649,7 +681,7 @@ bool ProfileSaver::HasSeenMethod(const std::string& profile,
if (!info.Load(profile, /*clear_if_invalid*/false)) {
return false;
}
- return info.ContainsHotMethod(MethodReference(dex_file, method_idx));
+ return info.GetMethodHotness(MethodReference(dex_file, method_idx)).HasAnyFlags();
}
return false;
}
diff --git a/runtime/jit/profile_saver_options.h b/runtime/jit/profile_saver_options.h
index 44550f4ddb..251227e89c 100644
--- a/runtime/jit/profile_saver_options.h
+++ b/runtime/jit/profile_saver_options.h
@@ -40,7 +40,8 @@ struct ProfileSaverOptions {
min_classes_to_save_(kMinClassesToSave),
min_notification_before_wake_(kMinNotificationBeforeWake),
max_notification_before_wake_(kMaxNotificationBeforeWake),
- profile_path_("") {}
+ profile_path_(""),
+ profile_boot_class_path_(false) {}
ProfileSaverOptions(
bool enabled,
@@ -51,8 +52,9 @@ struct ProfileSaverOptions {
uint32_t min_classes_to_save,
uint32_t min_notification_before_wake,
uint32_t max_notification_before_wake,
- const std::string& profile_path):
- enabled_(enabled),
+ const std::string& profile_path,
+ bool profile_boot_class_path)
+ : enabled_(enabled),
min_save_period_ms_(min_save_period_ms),
save_resolved_classes_delay_ms_(save_resolved_classes_delay_ms),
hot_startup_method_samples_(hot_startup_method_samples),
@@ -60,7 +62,8 @@ struct ProfileSaverOptions {
min_classes_to_save_(min_classes_to_save),
min_notification_before_wake_(min_notification_before_wake),
max_notification_before_wake_(max_notification_before_wake),
- profile_path_(profile_path) {}
+ profile_path_(profile_path),
+ profile_boot_class_path_(profile_boot_class_path) {}
bool IsEnabled() const {
return enabled_;
@@ -97,6 +100,9 @@ struct ProfileSaverOptions {
std::string GetProfilePath() const {
return profile_path_;
}
+ bool GetProfileBootClassPath() const {
+ return profile_boot_class_path_;
+ }
friend std::ostream & operator<<(std::ostream &os, const ProfileSaverOptions& pso) {
os << "enabled_" << pso.enabled_
@@ -106,7 +112,8 @@ struct ProfileSaverOptions {
<< ", min_methods_to_save_" << pso.min_methods_to_save_
<< ", min_classes_to_save_" << pso.min_classes_to_save_
<< ", min_notification_before_wake_" << pso.min_notification_before_wake_
- << ", max_notification_before_wake_" << pso.max_notification_before_wake_;
+ << ", max_notification_before_wake_" << pso.max_notification_before_wake_
+ << ", profile_boot_class_path_" << pso.profile_boot_class_path_;
return os;
}
@@ -121,6 +128,7 @@ struct ProfileSaverOptions {
uint32_t min_notification_before_wake_;
uint32_t max_notification_before_wake_;
std::string profile_path_;
+ bool profile_boot_class_path_;
};
} // namespace art
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6be0953727..0aabbea2c0 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -40,6 +40,7 @@
#include "interpreter/interpreter.h"
#include "jni_env_ext.h"
#include "java_vm_ext.h"
+#include "jvalue-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field-inl.h"
@@ -64,6 +65,84 @@ namespace art {
// things not rendering correctly. E.g. b/16858794
static constexpr bool kWarnJniAbort = false;
+// Helpers to call instrumentation functions for fields. These take jobjects so we don't need to set
+// up handles for the rare case where these actually do something. Once these functions return it is
+// possible there will be a pending exception if the instrumentation happens to throw one.
+static void NotifySetObjectField(ArtField* field, jobject obj, jobject jval)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_EQ(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
+ Thread* self = Thread::Current();
+ ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
+ /*check_suspended*/ true,
+ /*abort_on_error*/ false);
+
+ if (cur_method == nullptr) {
+ // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
+ // of these changes.
+ return;
+ }
+ DCHECK(cur_method->IsNative());
+ JValue val;
+ val.SetL(self->DecodeJObject(jval));
+ instrumentation->FieldWriteEvent(self,
+ self->DecodeJObject(obj).Ptr(),
+ cur_method,
+ 0, // dex_pc is always 0 since this is a native method.
+ field,
+ val);
+ }
+}
+
+static void NotifySetPrimitiveField(ArtField* field, jobject obj, JValue val)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
+ Thread* self = Thread::Current();
+ ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
+ /*check_suspended*/ true,
+ /*abort_on_error*/ false);
+
+ if (cur_method == nullptr) {
+ // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
+ // of these changes.
+ return;
+ }
+ DCHECK(cur_method->IsNative());
+ instrumentation->FieldWriteEvent(self,
+ self->DecodeJObject(obj).Ptr(),
+ cur_method,
+ 0, // dex_pc is always 0 since this is a native method.
+ field,
+ val);
+ }
+}
+
+static void NotifyGetField(ArtField* field, jobject obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
+ Thread* self = Thread::Current();
+ ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
+ /*check_suspended*/ true,
+ /*abort_on_error*/ false);
+
+ if (cur_method == nullptr) {
+ // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
+ // of these changes.
+ return;
+ }
+ DCHECK(cur_method->IsNative());
+ instrumentation->FieldReadEvent(self,
+ self->DecodeJObject(obj).Ptr(),
+ cur_method,
+ 0, // dex_pc is always 0 since this is a native method.
+ field);
+ }
+}
+
// Section 12.3.2 of the JNI spec describes JNI class descriptors. They're
// separated with slashes but aren't wrapped with "L;" like regular descriptors
// (i.e. "a/b/C" rather than "La/b/C;"). Arrays of reference types are an
@@ -1235,8 +1314,9 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(obj);
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
- ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(obj);
ArtField* f = jni::DecodeArtField(fid);
+ NotifyGetField(f, obj);
+ ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(obj);
return soa.AddLocalReference<jobject>(f->GetObject(o));
}
@@ -1244,6 +1324,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
ArtField* f = jni::DecodeArtField(fid);
+ NotifyGetField(f, nullptr);
return soa.AddLocalReference<jobject>(f->GetObject(f->GetDeclaringClass()));
}
@@ -1251,17 +1332,19 @@ class JNI {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_object);
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid);
ScopedObjectAccess soa(env);
+ ArtField* f = jni::DecodeArtField(fid);
+ NotifySetObjectField(f, java_object, java_value);
ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(java_object);
ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
- ArtField* f = jni::DecodeArtField(fid);
f->SetObject<false>(o, v);
}
static void SetStaticObjectField(JNIEnv* env, jclass, jfieldID fid, jobject java_value) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid);
ScopedObjectAccess soa(env);
- ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
ArtField* f = jni::DecodeArtField(fid);
+ NotifySetObjectField(f, nullptr, java_value);
+ ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
f->SetObject<false>(f->GetDeclaringClass(), v);
}
@@ -1269,28 +1352,32 @@ class JNI {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(instance); \
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
ScopedObjectAccess soa(env); \
- ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
ArtField* f = jni::DecodeArtField(fid); \
+ NotifyGetField(f, instance); \
+ ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
return f->Get ##fn (o)
#define GET_STATIC_PRIMITIVE_FIELD(fn) \
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
ScopedObjectAccess soa(env); \
ArtField* f = jni::DecodeArtField(fid); \
+ NotifyGetField(f, nullptr); \
return f->Get ##fn (f->GetDeclaringClass())
#define SET_PRIMITIVE_FIELD(fn, instance, value) \
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(instance); \
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
ScopedObjectAccess soa(env); \
- ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
ArtField* f = jni::DecodeArtField(fid); \
+ NotifySetPrimitiveField(f, instance, JValue::FromPrimitive<decltype(value)>(value)); \
+ ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
f->Set ##fn <false>(o, value)
#define SET_STATIC_PRIMITIVE_FIELD(fn, value) \
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
ScopedObjectAccess soa(env); \
ArtField* f = jni::DecodeArtField(fid); \
+ NotifySetPrimitiveField(f, nullptr, JValue::FromPrimitive<decltype(value)>(value)); \
f->Set ##fn <false>(f->GetDeclaringClass(), value)
static jboolean GetBooleanField(JNIEnv* env, jobject obj, jfieldID fid) {
diff --git a/runtime/jvalue-inl.h b/runtime/jvalue-inl.h
index b33686c6c5..25e34b2a74 100644
--- a/runtime/jvalue-inl.h
+++ b/runtime/jvalue-inl.h
@@ -27,6 +27,24 @@ inline void JValue::SetL(ObjPtr<mirror::Object> new_l) {
l = new_l.Ptr();
}
+#define DEFINE_FROM(type, chr) \
+ template <> inline JValue JValue::FromPrimitive(type v) { \
+ JValue res; \
+ res.Set ## chr(v); \
+ return res; \
+ }
+
+DEFINE_FROM(uint8_t, Z);
+DEFINE_FROM(int8_t, B);
+DEFINE_FROM(uint16_t, C);
+DEFINE_FROM(int16_t, S);
+DEFINE_FROM(int32_t, I);
+DEFINE_FROM(int64_t, J);
+DEFINE_FROM(float, F);
+DEFINE_FROM(double, D);
+
+#undef DEFINE_FROM
+
} // namespace art
#endif // ART_RUNTIME_JVALUE_INL_H_
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index f61a07c0c0..266abcf399 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -33,6 +33,8 @@ union PACKED(alignof(mirror::Object*)) JValue {
// We default initialize JValue instances to all-zeros.
JValue() : j(0) {}
+ template<typename T> static JValue FromPrimitive(T v);
+
int8_t GetB() const { return b; }
void SetB(int8_t new_b) {
j = ((static_cast<int64_t>(new_b) << 56) >> 56); // Sign-extend to 64 bits.
diff --git a/runtime/managed_stack-inl.h b/runtime/managed_stack-inl.h
index f3f31cf8e8..bdf8100cc0 100644
--- a/runtime/managed_stack-inl.h
+++ b/runtime/managed_stack-inl.h
@@ -23,7 +23,7 @@
#include <stdint.h>
#include <string>
-#include "stack.h"
+#include "interpreter/shadow_frame.h"
namespace art {
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index c847942fd1..7b4160876c 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -806,6 +806,99 @@ void MemMap::SetSize(size_t new_size) {
size_ = new_size;
}
+void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
+ int prot,
+ int flags,
+ int fd,
+ off_t offset) {
+#if USE_ART_LOW_4G_ALLOCATOR
+ void* actual = MAP_FAILED;
+
+ bool first_run = true;
+
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
+ // Use gMaps as an optimization to skip over large maps.
+ // Find the first map which is address > ptr.
+ auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
+ if (it != gMaps->begin()) {
+ auto before_it = it;
+ --before_it;
+ // Start at the end of the map before the upper bound.
+ ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
+ CHECK_ALIGNED(ptr, kPageSize);
+ }
+ while (it != gMaps->end()) {
+ // How much space do we have until the next map?
+ size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
+ // If the space may be sufficient, break out of the loop.
+ if (delta >= length) {
+ break;
+ }
+ // Otherwise, skip to the end of the map.
+ ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
+ CHECK_ALIGNED(ptr, kPageSize);
+ ++it;
+ }
+
+ // Try to see if we get lucky with this address since none of the ART maps overlap.
+ actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
+ if (actual != MAP_FAILED) {
+ next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
+ return actual;
+ }
+
+ if (4U * GB - ptr < length) {
+ // Not enough memory until 4GB.
+ if (first_run) {
+ // Try another time from the bottom;
+ ptr = LOW_MEM_START - kPageSize;
+ first_run = false;
+ continue;
+ } else {
+ // Second try failed.
+ break;
+ }
+ }
+
+ uintptr_t tail_ptr;
+
+ // Check pages are free.
+ bool safe = true;
+ for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
+ if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
+ safe = false;
+ break;
+ } else {
+ DCHECK_EQ(errno, ENOMEM);
+ }
+ }
+
+ next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region
+
+ if (safe == true) {
+ actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
+ if (actual != MAP_FAILED) {
+ return actual;
+ }
+ } else {
+ // Skip over last page.
+ ptr = tail_ptr;
+ }
+ }
+
+ if (actual == MAP_FAILED) {
+ LOG(ERROR) << "Could not find contiguous low-memory space.";
+ errno = ENOMEM;
+ }
+ return actual;
+#else
+ UNUSED(length, prot, flags, fd, offset);
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+#endif
+}
+
void* MemMap::MapInternal(void* addr,
size_t length,
int prot,
@@ -840,87 +933,32 @@ void* MemMap::MapInternal(void* addr,
#if USE_ART_LOW_4G_ALLOCATOR
// MAP_32BIT only available on x86_64.
if (low_4gb && addr == nullptr) {
- bool first_run = true;
-
- std::lock_guard<std::mutex> mu(*mem_maps_lock_);
- for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
- // Use gMaps as an optimization to skip over large maps.
- // Find the first map which is address > ptr.
- auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
- if (it != gMaps->begin()) {
- auto before_it = it;
- --before_it;
- // Start at the end of the map before the upper bound.
- ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
- CHECK_ALIGNED(ptr, kPageSize);
- }
- while (it != gMaps->end()) {
- // How much space do we have until the next map?
- size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
- // If the space may be sufficient, break out of the loop.
- if (delta >= length) {
- break;
- }
- // Otherwise, skip to the end of the map.
- ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
- CHECK_ALIGNED(ptr, kPageSize);
- ++it;
- }
-
- // Try to see if we get lucky with this address since none of the ART maps overlap.
- actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
- if (actual != MAP_FAILED) {
- next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
- return actual;
- }
+ // The linear-scan allocator has an issue when executable pages are denied (e.g., by selinux
+ // policies in sensitive processes). In that case, the error code will still be ENOMEM. So
+ // the allocator will scan all low 4GB twice, and still fail. This is *very* slow.
+ //
+ // To avoid the issue, always map non-executable first, and mprotect if necessary.
+ const int orig_prot = prot;
+ const int prot_non_exec = prot & ~PROT_EXEC;
+ actual = MapInternalArtLow4GBAllocator(length, prot_non_exec, flags, fd, offset);
- if (4U * GB - ptr < length) {
- // Not enough memory until 4GB.
- if (first_run) {
- // Try another time from the bottom;
- ptr = LOW_MEM_START - kPageSize;
- first_run = false;
- continue;
- } else {
- // Second try failed.
- break;
- }
- }
-
- uintptr_t tail_ptr;
-
- // Check pages are free.
- bool safe = true;
- for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
- if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
- safe = false;
- break;
- } else {
- DCHECK_EQ(errno, ENOMEM);
- }
- }
-
- next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region
-
- if (safe == true) {
- actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
- if (actual != MAP_FAILED) {
- return actual;
- }
- } else {
- // Skip over last page.
- ptr = tail_ptr;
- }
+ if (actual == MAP_FAILED) {
+ return MAP_FAILED;
}
- if (actual == MAP_FAILED) {
- LOG(ERROR) << "Could not find contiguous low-memory space.";
- errno = ENOMEM;
+ // See if we need to remap with the executable bit now.
+ if (orig_prot != prot_non_exec) {
+ if (mprotect(actual, length, orig_prot) != 0) {
+ PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
+ munmap(actual, length);
+ errno = ENOMEM;
+ return MAP_FAILED;
+ }
}
- } else {
- actual = mmap(addr, length, prot, flags, fd, offset);
+ return actual;
}
+ actual = mmap(addr, length, prot, flags, fd, offset);
#else
#if defined(__LP64__)
if (low_4gb && addr == nullptr) {
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 140877eab8..5603963eac 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -28,9 +28,12 @@
namespace art {
-#if defined(__LP64__) && (!defined(__x86_64__) || defined(__APPLE__))
+#if defined(__LP64__) && (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
#define USE_ART_LOW_4G_ALLOCATOR 1
#else
+#if defined(__LP64__) && !defined(__x86_64__)
+#error "Unrecognized 64-bit architecture."
+#endif
#define USE_ART_LOW_4G_ALLOCATOR 0
#endif
@@ -221,7 +224,14 @@ class MemMap {
int flags,
int fd,
off_t offset,
- bool low_4gb);
+ bool low_4gb)
+ REQUIRES(!MemMap::mem_maps_lock_);
+ static void* MapInternalArtLow4GBAllocator(size_t length,
+ int prot,
+ int flags,
+ int fd,
+ off_t offset)
+ REQUIRES(!MemMap::mem_maps_lock_);
const std::string name_;
uint8_t* begin_; // Start of data. May be changed by AlignBy.
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 090bac1173..f0d3cae4b4 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -822,7 +822,7 @@ inline bool DoFieldPutForInvokePolymorphic(Thread* self,
ObjPtr<mirror::Object>& obj,
ArtField* field,
Primitive::Type field_type,
- const JValue& value)
+ JValue& value)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!Runtime::Current()->IsActiveTransaction());
static const bool kTransaction = false; // Not in a transaction.
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index e02e62052c..55680f09e7 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -21,9 +21,9 @@
#include "dex_instruction.h"
#include "handle.h"
+#include "interpreter/shadow_frame.h"
#include "jvalue.h"
#include "mirror/class.h"
-#include "stack.h"
namespace art {
@@ -32,8 +32,6 @@ namespace mirror {
class MethodType;
} // namespace mirror
-class ShadowFrame;
-
// Returns true if there is a possible conversion from |from| to |to|
// for a MethodHandle parameter.
bool IsParameterTypeConvertible(ObjPtr<mirror::Class> from,
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index c8d455711d..12baf387d2 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -127,11 +127,9 @@ inline ArraySlice<ArtMethod> Class::GetDirectMethodsSlice(PointerSize pointer_si
}
inline ArraySlice<ArtMethod> Class::GetDirectMethodsSliceUnchecked(PointerSize pointer_size) {
- return ArraySlice<ArtMethod>(GetMethodsPtr(),
- GetDirectMethodsStartOffset(),
- GetVirtualMethodsStartOffset(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size,
+ GetDirectMethodsStartOffset(),
+ GetVirtualMethodsStartOffset());
}
template<VerifyObjectFlags kVerifyFlags>
@@ -141,11 +139,9 @@ inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSlice(PointerSize pointer_
}
inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSliceUnchecked(PointerSize pointer_size) {
- return ArraySlice<ArtMethod>(GetMethodsPtr(),
- GetDirectMethodsStartOffset(),
- GetCopiedMethodsStartOffset(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size,
+ GetDirectMethodsStartOffset(),
+ GetCopiedMethodsStartOffset());
}
template<VerifyObjectFlags kVerifyFlags>
inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSlice(PointerSize pointer_size) {
@@ -155,11 +151,9 @@ inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSlice(PointerSize p
inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSliceUnchecked(
PointerSize pointer_size) {
- return ArraySlice<ArtMethod>(GetMethodsPtr(),
- GetVirtualMethodsStartOffset(),
- GetCopiedMethodsStartOffset(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size,
+ GetVirtualMethodsStartOffset(),
+ GetCopiedMethodsStartOffset());
}
template<VerifyObjectFlags kVerifyFlags>
@@ -169,12 +163,9 @@ inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSlice(PointerSize pointer_s
}
inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSliceUnchecked(PointerSize pointer_size) {
- LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
- return ArraySlice<ArtMethod>(methods,
- GetVirtualMethodsStartOffset(),
- NumMethods(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size,
+ GetVirtualMethodsStartOffset(),
+ NumMethods());
}
template<VerifyObjectFlags kVerifyFlags>
@@ -184,12 +175,7 @@ inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSlice(PointerSize pointer_si
}
inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSliceUnchecked(PointerSize pointer_size) {
- LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
- return ArraySlice<ArtMethod>(methods,
- GetCopiedMethodsStartOffset(),
- NumMethods(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size, GetCopiedMethodsStartOffset(), NumMethods());
}
inline LengthPrefixedArray<ArtMethod>* Class::GetMethodsPtr() {
@@ -200,14 +186,28 @@ inline LengthPrefixedArray<ArtMethod>* Class::GetMethodsPtr() {
template<VerifyObjectFlags kVerifyFlags>
inline ArraySlice<ArtMethod> Class::GetMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
- return ArraySlice<ArtMethod>(methods,
- 0,
- NumMethods(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size, 0, NumMethods());
}
+inline ArraySlice<ArtMethod> Class::GetMethodsSliceRangeUnchecked(PointerSize pointer_size,
+ uint32_t start_offset,
+ uint32_t end_offset) {
+ DCHECK_LE(start_offset, end_offset);
+ DCHECK_LE(end_offset, NumMethods());
+ uint32_t size = end_offset - start_offset;
+ if (size == 0u) {
+ return ArraySlice<ArtMethod>();
+ }
+ LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
+ DCHECK(methods != nullptr);
+ DCHECK_LE(end_offset, methods->size());
+ size_t method_size = ArtMethod::Size(pointer_size);
+ size_t method_alignment = ArtMethod::Alignment(pointer_size);
+ ArraySlice<ArtMethod> slice(&methods->At(0u, method_size, method_alignment),
+ methods->size(),
+ method_size);
+ return slice.SubArray(start_offset, size);
+}
inline uint32_t Class::NumMethods() {
LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
@@ -216,12 +216,12 @@ inline uint32_t Class::NumMethods() {
inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return &GetDirectMethodsSliceUnchecked(pointer_size).At(i);
+ return &GetDirectMethodsSliceUnchecked(pointer_size)[i];
}
inline ArtMethod* Class::GetDirectMethod(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return &GetDirectMethodsSlice(pointer_size).At(i);
+ return &GetDirectMethodsSlice(pointer_size)[i];
}
inline void Class::SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
@@ -264,7 +264,7 @@ inline ArtMethod* Class::GetVirtualMethodDuringLinking(size_t i, PointerSize poi
inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return &GetVirtualMethodsSliceUnchecked(pointer_size).At(i);
+ return &GetVirtualMethodsSliceUnchecked(pointer_size)[i];
}
template<VerifyObjectFlags kVerifyFlags,
@@ -944,38 +944,36 @@ inline uint32_t Class::NumDirectInterfaces() {
}
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(PointerSize pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDirectMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return GetDirectMethodsSliceUnchecked(pointer_size).AsRange();
+ return GetDirectMethodsSliceUnchecked(pointer_size);
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetDeclaredMethods(
+inline ArraySlice<ArtMethod> Class::GetDeclaredMethods(
PointerSize pointer_size) {
- return GetDeclaredMethodsSliceUnchecked(pointer_size).AsRange();
+ return GetDeclaredMethodsSliceUnchecked(pointer_size);
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetDeclaredVirtualMethods(
+inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethods(
PointerSize pointer_size) {
- return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size).AsRange();
+ return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size);
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(
+inline ArraySlice<ArtMethod> Class::GetVirtualMethods(
PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return GetVirtualMethodsSliceUnchecked(pointer_size).AsRange();
+ return GetVirtualMethodsSliceUnchecked(pointer_size);
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetCopiedMethods(PointerSize pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetCopiedMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return GetCopiedMethodsSliceUnchecked(pointer_size).AsRange();
+ return GetCopiedMethodsSliceUnchecked(pointer_size);
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetMethods(PointerSize pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return MakeIterationRangeFromLengthPrefixedArray(GetMethodsPtr(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size, 0u, NumMethods());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetIFields() {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 913ab796a1..61d6e05416 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -716,7 +716,7 @@ class MANAGED Class FINAL : public Object {
// Also updates the dex_cache_strings_ variable from new_dex_cache.
void SetDexCache(ObjPtr<DexCache> new_dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(PointerSize pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethods(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
@@ -726,7 +726,7 @@ class MANAGED Class FINAL : public Object {
return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
}
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetMethods(PointerSize pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetMethods(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
void SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
@@ -763,7 +763,7 @@ class MANAGED Class FINAL : public Object {
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSlice(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredMethods(
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethods(
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -784,7 +784,7 @@ class MANAGED Class FINAL : public Object {
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredVirtualMethods(
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethods(
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -792,14 +792,14 @@ class MANAGED Class FINAL : public Object {
ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSlice(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetCopiedMethods(PointerSize pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethods(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSlice(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethods(
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1347,6 +1347,11 @@ class MANAGED Class FINAL : public Object {
ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetMethodsSliceRangeUnchecked(PointerSize pointer_size,
+ uint32_t start_offset,
+ uint32_t end_offset)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
template <bool throw_on_failure, bool use_referrers_cache>
bool ResolvedFieldAccessTest(ObjPtr<Class> access_to,
ArtField* field,
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 2e2e8c3ef6..4820feb56c 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -141,8 +141,8 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
OatFileAssistant::~OatFileAssistant() {
// Clean up the lock file.
- if (flock_.HasFile()) {
- unlink(flock_.GetFile()->GetPath().c_str());
+ if (flock_.get() != nullptr) {
+ unlink(flock_->GetPath().c_str());
}
}
@@ -165,7 +165,7 @@ bool OatFileAssistant::IsInBootClassPath() {
bool OatFileAssistant::Lock(std::string* error_msg) {
CHECK(error_msg != nullptr);
- CHECK(!flock_.HasFile()) << "OatFileAssistant::Lock already acquired";
+ CHECK(flock_.get() == nullptr) << "OatFileAssistant::Lock already acquired";
// Note the lock will only succeed for secondary dex files and in test
// environment.
@@ -179,7 +179,8 @@ bool OatFileAssistant::Lock(std::string* error_msg) {
// to generate oat files anyway.
std::string lock_file_name = dex_location_ + "." + GetInstructionSetString(isa_) + ".flock";
- if (!flock_.Init(lock_file_name.c_str(), error_msg)) {
+ flock_ = LockedFile::Open(lock_file_name.c_str(), error_msg);
+ if (flock_.get() == nullptr) {
unlink(lock_file_name.c_str());
return false;
}
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 3ec5b323c8..0896210f1c 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -635,36 +635,28 @@ class JvmtiFunctions {
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError SetFieldAccessWatch(jvmtiEnv* env,
- jclass klass ATTRIBUTE_UNUSED,
- jfieldID field ATTRIBUTE_UNUSED) {
+ static jvmtiError SetFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_access_events);
- return ERR(NOT_IMPLEMENTED);
+ return FieldUtil::SetFieldAccessWatch(env, klass, field);
}
- static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env,
- jclass klass ATTRIBUTE_UNUSED,
- jfieldID field ATTRIBUTE_UNUSED) {
+ static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_access_events);
- return ERR(NOT_IMPLEMENTED);
+ return FieldUtil::ClearFieldAccessWatch(env, klass, field);
}
- static jvmtiError SetFieldModificationWatch(jvmtiEnv* env,
- jclass klass ATTRIBUTE_UNUSED,
- jfieldID field ATTRIBUTE_UNUSED) {
+ static jvmtiError SetFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_modification_events);
- return ERR(NOT_IMPLEMENTED);
+ return FieldUtil::SetFieldModificationWatch(env, klass, field);
}
- static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env,
- jclass klass ATTRIBUTE_UNUSED,
- jfieldID field ATTRIBUTE_UNUSED) {
+ static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_modification_events);
- return ERR(NOT_IMPLEMENTED);
+ return FieldUtil::ClearFieldModificationWatch(env, klass, field);
}
static jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr) {
@@ -694,12 +686,10 @@ class JvmtiFunctions {
return ClassUtil::GetClassStatus(env, klass, status_ptr);
}
- static jvmtiError GetSourceFileName(jvmtiEnv* env,
- jclass klass ATTRIBUTE_UNUSED,
- char** source_name_ptr ATTRIBUTE_UNUSED) {
+ static jvmtiError GetSourceFileName(jvmtiEnv* env, jclass klass, char** source_name_ptr) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_source_file_name);
- return ERR(NOT_IMPLEMENTED);
+ return ClassUtil::GetSourceFileName(env, klass, source_name_ptr);
}
static jvmtiError GetClassModifiers(jvmtiEnv* env, jclass klass, jint* modifiers_ptr) {
@@ -774,11 +764,11 @@ class JvmtiFunctions {
}
static jvmtiError GetSourceDebugExtension(jvmtiEnv* env,
- jclass klass ATTRIBUTE_UNUSED,
- char** source_debug_extension_ptr ATTRIBUTE_UNUSED) {
+ jclass klass,
+ char** source_debug_extension_ptr) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_source_debug_extension);
- return ERR(NOT_IMPLEMENTED);
+ return ClassUtil::GetSourceDebugExtension(env, klass, source_debug_extension_ptr);
}
static jvmtiError RetransformClasses(jvmtiEnv* env, jint class_count, const jclass* classes) {
diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h
index af85fb0f4c..b5f12191e6 100644
--- a/runtime/openjdkjvmti/art_jvmti.h
+++ b/runtime/openjdkjvmti/art_jvmti.h
@@ -34,10 +34,10 @@
#include <memory>
#include <type_traits>
+#include <unordered_set>
#include <jni.h>
-#include "base/array_slice.h"
#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -47,6 +47,10 @@
#include "jni_env_ext.h"
#include "jvmti.h"
+namespace art {
+class ArtField;
+}
+
namespace openjdkjvmti {
class ObjectTagTable;
@@ -63,6 +67,15 @@ struct ArtJvmTiEnv : public jvmtiEnv {
// Tagging is specific to the jvmtiEnv.
std::unique_ptr<ObjectTagTable> object_tag_table;
+ // Set of watched fields is unique to each jvmtiEnv.
+ // TODO It might be good to follow the RI and only let one jvmtiEnv ever have the watch caps so
+ // we can record this on the field directly. We could do this either using free access-flag bits
+ // or by putting a list in the ClassExt of a field's DeclaringClass.
+ // TODO Maybe just have an extension to let one put a watch on every field, that would probably be
+ // good enough maybe since you probably want either a few or all/almost all of them.
+ std::unordered_set<art::ArtField*> access_watched_fields;
+ std::unordered_set<art::ArtField*> modify_watched_fields;
+
ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler);
static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) {
@@ -195,8 +208,8 @@ static inline JvmtiUniquePtr<char[]> CopyString(jvmtiEnv* env, const char* src,
const jvmtiCapabilities kPotentialCapabilities = {
.can_tag_objects = 1,
- .can_generate_field_modification_events = 0,
- .can_generate_field_access_events = 0,
+ .can_generate_field_modification_events = 1,
+ .can_generate_field_access_events = 1,
.can_get_bytecodes = 0,
.can_get_synthetic_attribute = 1,
.can_get_owned_monitor_info = 0,
@@ -205,9 +218,9 @@ const jvmtiCapabilities kPotentialCapabilities = {
.can_pop_frame = 0,
.can_redefine_classes = 1,
.can_signal_thread = 0,
- .can_get_source_file_name = 0,
+ .can_get_source_file_name = 1,
.can_get_line_numbers = 1,
- .can_get_source_debug_extension = 0,
+ .can_get_source_debug_extension = 1,
.can_access_local_variables = 0,
.can_maintain_original_method_order = 0,
.can_generate_single_step_events = 0,
diff --git a/runtime/openjdkjvmti/events-inl.h b/runtime/openjdkjvmti/events-inl.h
index cb7e6a9ad0..af99233f90 100644
--- a/runtime/openjdkjvmti/events-inl.h
+++ b/runtime/openjdkjvmti/events-inl.h
@@ -20,6 +20,7 @@
#include <array>
#include "events.h"
+#include "jni_internal.h"
#include "ScopedLocalRef.h"
#include "art_jvmti.h"
@@ -216,6 +217,71 @@ inline void EventHandler::DispatchEvent(ArtJvmTiEnv* env, art::Thread* thread, A
}
}
+// Need to give custom specializations for FieldAccess and FieldModification since they need to
+// filter out which particular fields agents want to get notified on.
+// TODO The spec allows us to do shortcuts like only allow one agent to ever set these watches. This
+// could make the system more performant.
+template <>
+inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kFieldModification>(art::Thread* thread,
+ JNIEnv* jnienv,
+ jthread jni_thread,
+ jmethodID method,
+ jlocation location,
+ jclass field_klass,
+ jobject object,
+ jfieldID field,
+ char type_char,
+ jvalue val) const {
+ for (ArtJvmTiEnv* env : envs) {
+ if (env != nullptr &&
+ ShouldDispatch<ArtJvmtiEvent::kFieldModification>(env, thread) &&
+ env->modify_watched_fields.find(
+ art::jni::DecodeArtField(field)) != env->modify_watched_fields.end()) {
+ ScopedLocalRef<jthrowable> thr(jnienv, jnienv->ExceptionOccurred());
+ jnienv->ExceptionClear();
+ auto callback = impl::GetCallback<ArtJvmtiEvent::kFieldModification>(env);
+ (*callback)(env,
+ jnienv,
+ jni_thread,
+ method,
+ location,
+ field_klass,
+ object,
+ field,
+ type_char,
+ val);
+ if (thr.get() != nullptr && !jnienv->ExceptionCheck()) {
+ jnienv->Throw(thr.get());
+ }
+ }
+ }
+}
+
+template <>
+inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kFieldAccess>(art::Thread* thread,
+ JNIEnv* jnienv,
+ jthread jni_thread,
+ jmethodID method,
+ jlocation location,
+ jclass field_klass,
+ jobject object,
+ jfieldID field) const {
+ for (ArtJvmTiEnv* env : envs) {
+ if (env != nullptr &&
+ ShouldDispatch<ArtJvmtiEvent::kFieldAccess>(env, thread) &&
+ env->access_watched_fields.find(
+ art::jni::DecodeArtField(field)) != env->access_watched_fields.end()) {
+ ScopedLocalRef<jthrowable> thr(jnienv, jnienv->ExceptionOccurred());
+ jnienv->ExceptionClear();
+ auto callback = impl::GetCallback<ArtJvmtiEvent::kFieldAccess>(env);
+ (*callback)(env, jnienv, jni_thread, method, location, field_klass, object, field);
+ if (thr.get() != nullptr && !jnienv->ExceptionCheck()) {
+ jnienv->Throw(thr.get());
+ }
+ }
+ }
+}
+
// Need to give a custom specialization for NativeMethodBind since it has to deal with an out
// variable.
template <>
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
index 90bc122220..989b9af591 100644
--- a/runtime/openjdkjvmti/events.cc
+++ b/runtime/openjdkjvmti/events.cc
@@ -33,6 +33,7 @@
#include "art_jvmti.h"
#include "art_method-inl.h"
+#include "art_field-inl.h"
#include "base/logging.h"
#include "gc/allocation_listener.h"
#include "gc/gc_pause_listener.h"
@@ -433,24 +434,92 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
}
// Call-back for when we read from a field.
- void FieldRead(art::Thread* self ATTRIBUTE_UNUSED,
- art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
- art::ArtMethod* method ATTRIBUTE_UNUSED,
- uint32_t dex_pc ATTRIBUTE_UNUSED,
- art::ArtField* field ATTRIBUTE_UNUSED)
+ void FieldRead(art::Thread* self,
+ art::Handle<art::mirror::Object> this_object,
+ art::ArtMethod* method,
+ uint32_t dex_pc,
+ art::ArtField* field)
REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
- return;
+ if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldAccess)) {
+ art::JNIEnvExt* jnienv = self->GetJniEnv();
+ // DCHECK(!self->IsExceptionPending());
+ ScopedLocalRef<jobject> this_ref(jnienv, AddLocalRef<jobject>(jnienv, this_object.Get()));
+ ScopedLocalRef<jobject> fklass(jnienv,
+ AddLocalRef<jobject>(jnienv,
+ field->GetDeclaringClass().Ptr()));
+ RunEventCallback<ArtJvmtiEvent::kFieldAccess>(self,
+ jnienv,
+ art::jni::EncodeArtMethod(method),
+ static_cast<jlocation>(dex_pc),
+ static_cast<jclass>(fklass.get()),
+ this_ref.get(),
+ art::jni::EncodeArtField(field));
+ }
+ }
+
+ void FieldWritten(art::Thread* self,
+ art::Handle<art::mirror::Object> this_object,
+ art::ArtMethod* method,
+ uint32_t dex_pc,
+ art::ArtField* field,
+ art::Handle<art::mirror::Object> new_val)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
+ art::JNIEnvExt* jnienv = self->GetJniEnv();
+ // DCHECK(!self->IsExceptionPending());
+ ScopedLocalRef<jobject> this_ref(jnienv, AddLocalRef<jobject>(jnienv, this_object.Get()));
+ ScopedLocalRef<jobject> fklass(jnienv,
+ AddLocalRef<jobject>(jnienv,
+ field->GetDeclaringClass().Ptr()));
+ ScopedLocalRef<jobject> fval(jnienv, AddLocalRef<jobject>(jnienv, new_val.Get()));
+ jvalue val;
+ val.l = fval.get();
+ RunEventCallback<ArtJvmtiEvent::kFieldModification>(
+ self,
+ jnienv,
+ art::jni::EncodeArtMethod(method),
+ static_cast<jlocation>(dex_pc),
+ static_cast<jclass>(fklass.get()),
+ field->IsStatic() ? nullptr : this_ref.get(),
+ art::jni::EncodeArtField(field),
+ 'L', // type_char
+ val);
+ }
}
// Call-back for when we write into a field.
- void FieldWritten(art::Thread* self ATTRIBUTE_UNUSED,
- art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
- art::ArtMethod* method ATTRIBUTE_UNUSED,
- uint32_t dex_pc ATTRIBUTE_UNUSED,
- art::ArtField* field ATTRIBUTE_UNUSED,
- const art::JValue& field_value ATTRIBUTE_UNUSED)
+ void FieldWritten(art::Thread* self,
+ art::Handle<art::mirror::Object> this_object,
+ art::ArtMethod* method,
+ uint32_t dex_pc,
+ art::ArtField* field,
+ const art::JValue& field_value)
REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
- return;
+ if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
+ art::JNIEnvExt* jnienv = self->GetJniEnv();
+ DCHECK(!self->IsExceptionPending());
+ ScopedLocalRef<jobject> this_ref(jnienv, AddLocalRef<jobject>(jnienv, this_object.Get()));
+ ScopedLocalRef<jobject> fklass(jnienv,
+ AddLocalRef<jobject>(jnienv,
+ field->GetDeclaringClass().Ptr()));
+ char type_char = art::Primitive::Descriptor(field->GetTypeAsPrimitiveType())[0];
+ jvalue val;
+ // 64bit integer is the largest value in the union so we should be fine simply copying it into
+ // the union.
+ val.j = field_value.GetJ();
+ RunEventCallback<ArtJvmtiEvent::kFieldModification>(
+ self,
+ jnienv,
+ art::jni::EncodeArtMethod(method),
+ static_cast<jlocation>(dex_pc),
+ static_cast<jclass>(fklass.get()),
+ field->IsStatic() ? nullptr : this_ref.get(), // nb static field modification get given
+ // the class as this_object for some
+ // reason.
+ art::jni::EncodeArtField(field),
+ type_char,
+ val);
+ }
}
// Call-back when an exception is caught.
@@ -490,15 +559,20 @@ static uint32_t GetInstrumentationEventsFor(ArtJvmtiEvent event) {
case ArtJvmtiEvent::kMethodExit:
return art::instrumentation::Instrumentation::kMethodExited |
art::instrumentation::Instrumentation::kMethodUnwind;
+ case ArtJvmtiEvent::kFieldModification:
+ return art::instrumentation::Instrumentation::kFieldWritten;
+ case ArtJvmtiEvent::kFieldAccess:
+ return art::instrumentation::Instrumentation::kFieldRead;
default:
LOG(FATAL) << "Unknown event ";
return 0;
}
}
-static void SetupMethodTraceListener(JvmtiMethodTraceListener* listener,
- ArtJvmtiEvent event,
- bool enable) {
+static void SetupTraceListener(JvmtiMethodTraceListener* listener,
+ ArtJvmtiEvent event,
+ bool enable) {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative);
uint32_t new_events = GetInstrumentationEventsFor(event);
art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation();
art::gc::ScopedGCCriticalSection gcs(art::Thread::Current(),
@@ -529,7 +603,9 @@ void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
case ArtJvmtiEvent::kMethodEntry:
case ArtJvmtiEvent::kMethodExit:
- SetupMethodTraceListener(method_trace_listener_.get(), event, enable);
+ case ArtJvmtiEvent::kFieldAccess:
+ case ArtJvmtiEvent::kFieldModification:
+ SetupTraceListener(method_trace_listener_.get(), event, enable);
return;
default:
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index ed54cd13c3..0ac08d9cb8 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -37,6 +37,7 @@
#include <unordered_set>
#include "art_jvmti.h"
+#include "base/array_ref.h"
#include "base/macros.h"
#include "class_table-inl.h"
#include "class_linker.h"
@@ -83,7 +84,7 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
REQUIRES_SHARED(art::Locks::mutator_lock_) {
// Make the mmap
std::string error_msg;
- art::ArraySlice<const unsigned char> final_data(final_dex_data, final_len);
+ art::ArrayRef<const unsigned char> final_data(final_dex_data, final_len);
std::unique_ptr<art::MemMap> map(Redefiner::MoveDataToMemMap(orig_location,
final_data,
&error_msg));
@@ -312,8 +313,10 @@ struct ClassCallback : public art::ClassLoadCallback {
art::Thread* thread = art::Thread::Current();
ScopedLocalRef<jclass> jklass(thread->GetJniEnv(),
thread->GetJniEnv()->AddLocalReference<jclass>(klass.Get()));
+ art::ObjPtr<art::mirror::Object> peer(thread->GetPeer());
ScopedLocalRef<jthread> thread_jni(
- thread->GetJniEnv(), thread->GetJniEnv()->AddLocalReference<jthread>(thread->GetPeer()));
+ thread->GetJniEnv(),
+ peer.IsNull() ? nullptr : thread->GetJniEnv()->AddLocalReference<jthread>(peer));
{
art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
event_handler->DispatchEvent<ArtJvmtiEvent::kClassLoad>(
@@ -340,8 +343,10 @@ struct ClassCallback : public art::ClassLoadCallback {
}
ScopedLocalRef<jclass> jklass(thread->GetJniEnv(),
thread->GetJniEnv()->AddLocalReference<jclass>(klass.Get()));
+ art::ObjPtr<art::mirror::Object> peer(thread->GetPeer());
ScopedLocalRef<jthread> thread_jni(
- thread->GetJniEnv(), thread->GetJniEnv()->AddLocalReference<jthread>(thread->GetPeer()));
+ thread->GetJniEnv(),
+ peer.IsNull() ? nullptr : thread->GetJniEnv()->AddLocalReference<jthread>(peer));
art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
event_handler->DispatchEvent<ArtJvmtiEvent::kClassPrepare>(
thread,
@@ -1012,4 +1017,61 @@ jvmtiError ClassUtil::GetClassVersionNumbers(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(NONE);
}
+jvmtiError ClassUtil::GetSourceFileName(jvmtiEnv* env, jclass jklass, char** source_name_ptr) {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ if (jklass == nullptr) {
+ return ERR(INVALID_CLASS);
+ }
+ art::ObjPtr<art::mirror::Object> jklass_obj = soa.Decode<art::mirror::Object>(jklass);
+ if (!jklass_obj->IsClass()) {
+ return ERR(INVALID_CLASS);
+ }
+ art::ObjPtr<art::mirror::Class> klass = jklass_obj->AsClass();
+ if (klass->IsPrimitive() || klass->IsArrayClass()) {
+ return ERR(ABSENT_INFORMATION);
+ }
+ JvmtiUniquePtr<char[]> source_copy;
+ const char* file_name = klass->GetSourceFile();
+ if (file_name == nullptr) {
+ return ERR(ABSENT_INFORMATION);
+ }
+ jvmtiError ret;
+ source_copy = CopyString(env, file_name, &ret);
+ if (source_copy == nullptr) {
+ return ret;
+ }
+ *source_name_ptr = source_copy.release();
+ return OK;
+}
+
+jvmtiError ClassUtil::GetSourceDebugExtension(jvmtiEnv* env,
+ jclass jklass,
+ char** source_debug_extension_ptr) {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ if (jklass == nullptr) {
+ return ERR(INVALID_CLASS);
+ }
+ art::ObjPtr<art::mirror::Object> jklass_obj = soa.Decode<art::mirror::Object>(jklass);
+ if (!jklass_obj->IsClass()) {
+ return ERR(INVALID_CLASS);
+ }
+ art::StackHandleScope<1> hs(art::Thread::Current());
+ art::Handle<art::mirror::Class> klass(hs.NewHandle(jklass_obj->AsClass()));
+ if (klass->IsPrimitive() || klass->IsArrayClass()) {
+ return ERR(ABSENT_INFORMATION);
+ }
+ JvmtiUniquePtr<char[]> ext_copy;
+ const char* data = art::annotations::GetSourceDebugExtension(klass);
+ if (data == nullptr) {
+ return ERR(ABSENT_INFORMATION);
+ }
+ jvmtiError ret;
+ ext_copy = CopyString(env, data, &ret);
+ if (ext_copy == nullptr) {
+ return ret;
+ }
+ *source_debug_extension_ptr = ext_copy.release();
+ return OK;
+}
+
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_class.h b/runtime/openjdkjvmti/ti_class.h
index aa2260f035..7bb6b3e5de 100644
--- a/runtime/openjdkjvmti/ti_class.h
+++ b/runtime/openjdkjvmti/ti_class.h
@@ -82,6 +82,12 @@ class ClassUtil {
jclass klass,
jint* minor_version_ptr,
jint* major_version_ptr);
+
+ static jvmtiError GetSourceFileName(jvmtiEnv* env, jclass klass, char** source_name_ptr);
+
+ static jvmtiError GetSourceDebugExtension(jvmtiEnv* env,
+ jclass klass,
+ char** source_debug_extension_ptr);
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_class_definition.cc b/runtime/openjdkjvmti/ti_class_definition.cc
index 180895b597..8e8ab196f6 100644
--- a/runtime/openjdkjvmti/ti_class_definition.cc
+++ b/runtime/openjdkjvmti/ti_class_definition.cc
@@ -56,7 +56,7 @@ bool ArtClassDefinition::IsModified() const {
// be that agents were removed in the mean-time so we still have a different dex file. The dex
// checksum means this is likely to be fairly fast.
return static_cast<jint>(original_dex_file_.size()) != dex_len_ ||
- memcmp(&original_dex_file_.At(0), dex_data_.get(), dex_len_) != 0;
+ memcmp(original_dex_file_.data(), dex_data_.get(), dex_len_) != 0;
}
jvmtiError ArtClassDefinition::InitCommon(ArtJvmTiEnv* env, jclass klass) {
@@ -152,12 +152,12 @@ jvmtiError ArtClassDefinition::Init(ArtJvmTiEnv* env, jclass klass) {
unsigned char* original_data_memory = nullptr;
res = CopyDataIntoJvmtiBuffer(env, dex_data_.get(), dex_len_, &original_data_memory);
original_dex_file_memory_ = MakeJvmtiUniquePtr(env, original_data_memory);
- original_dex_file_ = art::ArraySlice<const unsigned char>(original_data_memory, dex_len_);
+ original_dex_file_ = art::ArrayRef<const unsigned char>(original_data_memory, dex_len_);
} else {
// We know that we have been redefined at least once (there is an original_dex_file set in
// the class) so we can just use the current dex file directly.
const art::DexFile& dex_file = m_klass->GetDexFile();
- original_dex_file_ = art::ArraySlice<const unsigned char>(dex_file.Begin(), dex_file.Size());
+ original_dex_file_ = art::ArrayRef<const unsigned char>(dex_file.Begin(), dex_file.Size());
}
return res;
}
@@ -168,7 +168,7 @@ jvmtiError ArtClassDefinition::Init(ArtJvmTiEnv* env, const jvmtiClassDefinition
return res;
}
unsigned char* new_data = nullptr;
- original_dex_file_ = art::ArraySlice<const unsigned char>(def.class_bytes, def.class_byte_count);
+ original_dex_file_ = art::ArrayRef<const unsigned char>(def.class_bytes, def.class_byte_count);
redefined_ = true;
dex_len_ = def.class_byte_count;
res = CopyDataIntoJvmtiBuffer(env, def.class_bytes, def.class_byte_count, /*out*/ &new_data);
diff --git a/runtime/openjdkjvmti/ti_class_definition.h b/runtime/openjdkjvmti/ti_class_definition.h
index 43d0c3fc62..2c268ddd4c 100644
--- a/runtime/openjdkjvmti/ti_class_definition.h
+++ b/runtime/openjdkjvmti/ti_class_definition.h
@@ -34,6 +34,8 @@
#include "art_jvmti.h"
+#include "base/array_ref.h"
+
namespace openjdkjvmti {
// A struct that stores data needed for redefining/transforming classes. This structure should only
@@ -68,12 +70,12 @@ class ArtClassDefinition {
}
}
- art::ArraySlice<const unsigned char> GetNewOriginalDexFile() const {
+ art::ArrayRef<const unsigned char> GetNewOriginalDexFile() const {
DCHECK(IsInitialized());
if (redefined_) {
return original_dex_file_;
} else {
- return art::ArraySlice<const unsigned char>();
+ return art::ArrayRef<const unsigned char>();
}
}
@@ -103,9 +105,9 @@ class ArtClassDefinition {
return protection_domain_;
}
- art::ArraySlice<const unsigned char> GetDexData() const {
+ art::ArrayRef<const unsigned char> GetDexData() const {
DCHECK(IsInitialized());
- return art::ArraySlice<const unsigned char>(dex_data_.get(), dex_len_);
+ return art::ArrayRef<const unsigned char>(dex_data_.get(), dex_len_);
}
private:
@@ -118,7 +120,7 @@ class ArtClassDefinition {
jint dex_len_;
JvmtiUniquePtr<unsigned char> dex_data_;
JvmtiUniquePtr<unsigned char> original_dex_file_memory_;
- art::ArraySlice<const unsigned char> original_dex_file_;
+ art::ArrayRef<const unsigned char> original_dex_file_;
bool redefined_;
DISALLOW_COPY_AND_ASSIGN(ArtClassDefinition);
diff --git a/runtime/openjdkjvmti/ti_class_loader.cc b/runtime/openjdkjvmti/ti_class_loader.cc
index 5544dde219..205046c894 100644
--- a/runtime/openjdkjvmti/ti_class_loader.cc
+++ b/runtime/openjdkjvmti/ti_class_loader.cc
@@ -37,7 +37,6 @@
#include "art_field-inl.h"
#include "art_jvmti.h"
-#include "base/array_slice.h"
#include "base/logging.h"
#include "dex_file.h"
#include "dex_file_types.h"
diff --git a/runtime/openjdkjvmti/ti_field.cc b/runtime/openjdkjvmti/ti_field.cc
index 342d8be2b0..32c064e89c 100644
--- a/runtime/openjdkjvmti/ti_field.cc
+++ b/runtime/openjdkjvmti/ti_field.cc
@@ -187,4 +187,68 @@ jvmtiError FieldUtil::IsFieldSynthetic(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(NONE);
}
+jvmtiError FieldUtil::SetFieldModificationWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
+ ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ if (klass == nullptr) {
+ return ERR(INVALID_CLASS);
+ }
+ if (field == nullptr) {
+ return ERR(INVALID_FIELDID);
+ }
+ auto res_pair = env->modify_watched_fields.insert(art::jni::DecodeArtField(field));
+ if (!res_pair.second) {
+ // Didn't get inserted because it's already present!
+ return ERR(DUPLICATE);
+ }
+ return OK;
+}
+
+jvmtiError FieldUtil::ClearFieldModificationWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
+ ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ if (klass == nullptr) {
+ return ERR(INVALID_CLASS);
+ }
+ if (field == nullptr) {
+ return ERR(INVALID_FIELDID);
+ }
+ auto pos = env->modify_watched_fields.find(art::jni::DecodeArtField(field));
+ if (pos == env->modify_watched_fields.end()) {
+ return ERR(NOT_FOUND);
+ }
+ env->modify_watched_fields.erase(pos);
+ return OK;
+}
+
+jvmtiError FieldUtil::SetFieldAccessWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
+ ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ if (klass == nullptr) {
+ return ERR(INVALID_CLASS);
+ }
+ if (field == nullptr) {
+ return ERR(INVALID_FIELDID);
+ }
+ auto res_pair = env->access_watched_fields.insert(art::jni::DecodeArtField(field));
+ if (!res_pair.second) {
+ // Didn't get inserted because it's already present!
+ return ERR(DUPLICATE);
+ }
+ return OK;
+}
+
+jvmtiError FieldUtil::ClearFieldAccessWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
+ ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ if (klass == nullptr) {
+ return ERR(INVALID_CLASS);
+ }
+ if (field == nullptr) {
+ return ERR(INVALID_FIELDID);
+ }
+ auto pos = env->access_watched_fields.find(art::jni::DecodeArtField(field));
+ if (pos == env->access_watched_fields.end()) {
+ return ERR(NOT_FOUND);
+ }
+ env->access_watched_fields.erase(pos);
+ return OK;
+}
+
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_field.h b/runtime/openjdkjvmti/ti_field.h
index 9a29f81d76..880949eecb 100644
--- a/runtime/openjdkjvmti/ti_field.h
+++ b/runtime/openjdkjvmti/ti_field.h
@@ -60,6 +60,11 @@ class FieldUtil {
jclass klass,
jfieldID field,
jboolean* is_synthetic_ptr);
+
+ static jvmtiError SetFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field);
+ static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field);
+ static jvmtiError SetFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field);
+ static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field);
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index 319b1c2a9c..b3bc6764c9 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -23,6 +23,7 @@
#include "class_linker.h"
#include "gc/heap.h"
#include "gc_root-inl.h"
+#include "java_frame_root_info.h"
#include "jni_env_ext.h"
#include "jni_internal.h"
#include "jvmti_weak_table-inl.h"
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index b382a3e7c3..5422f48664 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -38,7 +38,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "art_jvmti.h"
-#include "base/array_slice.h"
+#include "base/array_ref.h"
#include "base/logging.h"
#include "class_linker-inl.h"
#include "debugger.h"
@@ -265,7 +265,7 @@ jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class>
// Moves dex data to an anonymous, read-only mmap'd region.
std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
- art::ArraySlice<const unsigned char> data,
+ art::ArrayRef<const unsigned char> data,
std::string* error_msg) {
std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
@@ -278,7 +278,7 @@ std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& orig
if (map == nullptr) {
return map;
}
- memcpy(map->Begin(), &data.At(0), data.size());
+ memcpy(map->Begin(), data.data(), data.size());
// Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
// programs from corrupting it.
map->Protect(PROT_READ);
@@ -290,7 +290,7 @@ Redefiner::ClassRedefinition::ClassRedefinition(
jclass klass,
const art::DexFile* redefined_dex_file,
const char* class_sig,
- art::ArraySlice<const unsigned char> orig_dex_file) :
+ art::ArrayRef<const unsigned char> orig_dex_file) :
driver_(driver),
klass_(klass),
dex_file_(redefined_dex_file),
@@ -493,7 +493,7 @@ art::mirror::Object* Redefiner::ClassRedefinition::AllocateOrGetOriginalDexFile(
if (original_dex_file_.size() != 0) {
return art::mirror::ByteArray::AllocateAndFill(
driver_->self_,
- reinterpret_cast<const signed char*>(&original_dex_file_.At(0)),
+ reinterpret_cast<const signed char*>(original_dex_file_.data()),
original_dex_file_.size());
}
@@ -601,9 +601,7 @@ bool Redefiner::ClassRedefinition::CheckSameMethods() {
}
// Skip all of the fields. We should have already checked this.
- while (new_iter.HasNextStaticField() || new_iter.HasNextInstanceField()) {
- new_iter.Next();
- }
+ new_iter.SkipAllFields();
// Check each of the methods. NB we don't need to specifically check for removals since the 2 dex
// files have the same number of methods, which means there must be an equal amount of additions
// and removals.
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 5e31627694..ec4a8b2789 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -38,7 +38,7 @@
#include "art_jvmti.h"
#include "art_method.h"
-#include "base/array_slice.h"
+#include "base/array_ref.h"
#include "class_linker.h"
#include "dex_file.h"
#include "gc_root-inl.h"
@@ -95,7 +95,7 @@ class Redefiner {
static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
- art::ArraySlice<const unsigned char> data,
+ art::ArrayRef<const unsigned char> data,
std::string* error_msg);
private:
@@ -105,7 +105,7 @@ class Redefiner {
jclass klass,
const art::DexFile* redefined_dex_file,
const char* class_sig,
- art::ArraySlice<const unsigned char> orig_dex_file)
+ art::ArrayRef<const unsigned char> orig_dex_file)
REQUIRES_SHARED(art::Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS so we can unlock the class in the destructor.
@@ -205,7 +205,7 @@ class Redefiner {
jclass klass_;
std::unique_ptr<const art::DexFile> dex_file_;
std::string class_sig_;
- art::ArraySlice<const unsigned char> original_dex_file_;
+ art::ArrayRef<const unsigned char> original_dex_file_;
};
jvmtiError result_;
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 22da2d2f65..550b97272d 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -59,13 +59,18 @@
namespace openjdkjvmti {
+template <typename FrameFn>
struct GetStackTraceVisitor : public art::StackVisitor {
GetStackTraceVisitor(art::Thread* thread_in,
size_t start_,
- size_t stop_)
+ size_t stop_,
+ FrameFn fn_)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ fn(fn_),
start(start_),
stop(stop_) {}
+ GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
+ GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::ArtMethod* m = GetMethod();
@@ -81,7 +86,7 @@ struct GetStackTraceVisitor : public art::StackVisitor {
jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
jvmtiFrameInfo info = { id, dex_location };
- frames.push_back(info);
+ fn(info);
if (stop == 1) {
return false; // We're done.
@@ -95,24 +100,34 @@ struct GetStackTraceVisitor : public art::StackVisitor {
return true;
}
- std::vector<jvmtiFrameInfo> frames;
+ FrameFn fn;
size_t start;
size_t stop;
};
-struct GetStackTraceClosure : public art::Closure {
+template <typename FrameFn>
+GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
+ size_t start,
+ size_t stop,
+ FrameFn fn) {
+ return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
+}
+
+struct GetStackTraceVectorClosure : public art::Closure {
public:
- GetStackTraceClosure(size_t start, size_t stop)
+ GetStackTraceVectorClosure(size_t start, size_t stop)
: start_input(start),
stop_input(stop),
start_result(0),
stop_result(0) {}
void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
- GetStackTraceVisitor visitor(self, start_input, stop_input);
- visitor.WalkStack(false);
+ auto frames_fn = [&](jvmtiFrameInfo info) {
+ frames.push_back(info);
+ };
+ auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
+ visitor.WalkStack(/* include_transitions */ false);
- frames.swap(visitor.frames);
start_result = visitor.start;
stop_result = visitor.stop;
}
@@ -163,6 +178,33 @@ static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames
return ERR(NONE);
}
+struct GetStackTraceDirectClosure : public art::Closure {
+ public:
+ GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
+ : frame_buffer(frame_buffer_),
+ start_input(start),
+ stop_input(stop),
+ index(0) {
+ DCHECK_GE(start_input, 0u);
+ }
+
+ void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ auto frames_fn = [&](jvmtiFrameInfo info) {
+ frame_buffer[index] = info;
+ ++index;
+ };
+ auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
+ visitor.WalkStack(/* include_transitions */ false);
+ }
+
+ jvmtiFrameInfo* frame_buffer;
+
+ const size_t start_input;
+ const size_t stop_input;
+
+ size_t index = 0;
+};
+
static jvmtiError GetThread(JNIEnv* env, jthread java_thread, art::Thread** thread) {
if (java_thread == nullptr) {
*thread = art::Thread::Current();
@@ -220,8 +262,20 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
return ERR(NONE);
}
- GetStackTraceClosure closure(start_depth >= 0 ? static_cast<size_t>(start_depth) : 0,
- start_depth >= 0 ? static_cast<size_t>(max_frame_count) : 0);
+ if (start_depth >= 0) {
+ // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
+ GetStackTraceDirectClosure closure(frame_buffer,
+ static_cast<size_t>(start_depth),
+ static_cast<size_t>(max_frame_count));
+ thread->RequestSynchronousCheckpoint(&closure);
+ *count_ptr = static_cast<jint>(closure.index);
+ if (closure.index < static_cast<size_t>(start_depth)) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ return ERR(NONE);
+ }
+
+ GetStackTraceVectorClosure closure(0, 0);
thread->RequestSynchronousCheckpoint(&closure);
return TranslateFrameVector(closure.frames,
@@ -232,42 +286,6 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
count_ptr);
}
-struct GetAllStackTraceClosure : public art::Closure {
- public:
- explicit GetAllStackTraceClosure(size_t stop)
- : start_input(0),
- stop_input(stop),
- frames_lock("GetAllStackTraceGuard", art::LockLevel::kAbortLock),
- start_result(0),
- stop_result(0) {}
-
- void Run(art::Thread* self)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) REQUIRES(!frames_lock) {
- // self should be live here (so it could be suspended). No need to filter.
-
- art::Thread* current = art::Thread::Current();
- std::vector<jvmtiFrameInfo> self_frames;
-
- GetStackTraceVisitor visitor(self, start_input, stop_input);
- visitor.WalkStack(false);
-
- self_frames.swap(visitor.frames);
-
- art::MutexLock mu(current, frames_lock);
- frames.emplace(self, self_frames);
- }
-
- const size_t start_input;
- const size_t stop_input;
-
- art::Mutex frames_lock;
- std::unordered_map<art::Thread*, std::vector<jvmtiFrameInfo>> frames GUARDED_BY(frames_lock);
- size_t start_result;
- size_t stop_result;
-};
-
-
-
jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr,
@@ -300,7 +318,7 @@ jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
continue;
}
- GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
+ GetStackTraceVectorClosure closure(0u, static_cast<size_t>(max_frame_count));
thread->RequestSynchronousCheckpoint(&closure);
threads.push_back(thread);
@@ -460,7 +478,7 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
for (size_t index = 0; index != handles.size(); ++index) {
if (peer == handles[index].Get()) {
// Found the thread.
- GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
+ GetStackTraceVectorClosure closure(0u, static_cast<size_t>(max_frame_count));
thread->RequestSynchronousCheckpoint(&closure);
threads.push_back(thread);
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index 15d8dd0fc2..1d7f137f2b 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -35,6 +35,7 @@
#include "transform.h"
#include "art_method.h"
+#include "base/array_ref.h"
#include "class_linker.h"
#include "dex_file.h"
#include "dex_file_types.h"
@@ -70,7 +71,7 @@ jvmtiError Transformer::RetransformClassesDirect(
for (ArtClassDefinition& def : *definitions) {
jint new_len = -1;
unsigned char* new_data = nullptr;
- art::ArraySlice<const unsigned char> dex_data = def.GetDexData();
+ art::ArrayRef<const unsigned char> dex_data = def.GetDexData();
event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
self,
GetJniEnv(env),
@@ -79,7 +80,7 @@ jvmtiError Transformer::RetransformClassesDirect(
def.GetName().c_str(),
def.GetProtectionDomain(),
static_cast<jint>(dex_data.size()),
- &dex_data.At(0),
+ dex_data.data(),
/*out*/&new_len,
/*out*/&new_data);
def.SetNewDexData(env, new_len, new_data);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index eec0460015..19df0d26a1 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -27,6 +27,7 @@
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
+#include "interpreter/shadow_frame.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "linear_alloc.h"
@@ -39,7 +40,6 @@
#include "runtime.h"
#include "thread.h"
#include "thread_list.h"
-#include "verify_object.h"
namespace art {
@@ -47,29 +47,6 @@ using android::base::StringPrintf;
static constexpr bool kDebugStackWalk = false;
-mirror::Object* ShadowFrame::GetThisObject() const {
- ArtMethod* m = GetMethod();
- if (m->IsStatic()) {
- return nullptr;
- } else if (m->IsNative()) {
- return GetVRegReference(0);
- } else {
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- CHECK(code_item != nullptr) << ArtMethod::PrettyMethod(m);
- uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
- return GetVRegReference(reg);
- }
-}
-
-mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
- ArtMethod* m = GetMethod();
- if (m->IsStatic()) {
- return nullptr;
- } else {
- return GetVRegReference(NumberOfVRegs() - num_ins);
- }
-}
-
StackVisitor::StackVisitor(Thread* thread,
Context* context,
StackWalkKind walk_kind,
@@ -97,9 +74,10 @@ StackVisitor::StackVisitor(Thread* thread,
}
}
-InlineInfo StackVisitor::GetCurrentInlineInfo() const {
- const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
+static InlineInfo GetCurrentInlineInfo(const OatQuickMethodHeader* method_header,
+ uintptr_t cur_quick_frame_pc)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc);
CodeInfo code_info = method_header->GetOptimizedCodeInfo();
CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
@@ -113,7 +91,8 @@ ArtMethod* StackVisitor::GetMethod() const {
} else if (cur_quick_frame_ != nullptr) {
if (IsInInlinedFrame()) {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
- InlineInfo inline_info = GetCurrentInlineInfo();
+ InlineInfo inline_info = GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(),
+ cur_quick_frame_pc_);
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
MethodInfo method_info = method_header->GetOptimizedMethodInfo();
@@ -138,8 +117,8 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
- return GetCurrentInlineInfo().GetDexPcAtDepth(encoding.inline_info.encoding,
- depth_in_stack_map);
+ return GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(), cur_quick_frame_pc_).
+ GetDexPcAtDepth(encoding.inline_info.encoding, depth_in_stack_map);
} else if (cur_oat_quick_method_header_ == nullptr) {
return DexFile::kDexNoIndex;
} else {
@@ -924,134 +903,4 @@ void StackVisitor::WalkStack(bool include_transitions) {
template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kYes>(bool);
template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kNo>(bool);
-void JavaFrameRootInfo::Describe(std::ostream& os) const {
- const StackVisitor* visitor = stack_visitor_;
- CHECK(visitor != nullptr);
- os << "Type=" << GetType() << " thread_id=" << GetThreadId() << " location=" <<
- visitor->DescribeLocation() << " vreg=" << vreg_;
-}
-
-int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
- uint32_t core_spills, uint32_t fp_spills,
- size_t frame_size, int reg, InstructionSet isa) {
- PointerSize pointer_size = InstructionSetPointerSize(isa);
- if (kIsDebugBuild) {
- auto* runtime = Runtime::Current();
- if (runtime != nullptr) {
- CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size);
- }
- }
- DCHECK_ALIGNED(frame_size, kStackAlignment);
- DCHECK_NE(reg, -1);
- int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
- + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
- + sizeof(uint32_t); // Filler.
- int num_regs = code_item->registers_size_ - code_item->ins_size_;
- int temp_threshold = code_item->registers_size_;
- const int max_num_special_temps = 1;
- if (reg == temp_threshold) {
- // The current method pointer corresponds to special location on stack.
- return 0;
- } else if (reg >= temp_threshold + max_num_special_temps) {
- /*
- * Special temporaries may have custom locations and the logic above deals with that.
- * However, non-special temporaries are placed relative to the outs.
- */
- int temps_start = code_item->outs_size_ * sizeof(uint32_t)
- + static_cast<size_t>(pointer_size) /* art method */;
- int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
- return temps_start + relative_offset;
- } else if (reg < num_regs) {
- int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
- return locals_start + (reg * sizeof(uint32_t));
- } else {
- // Handle ins.
- return frame_size + ((reg - num_regs) * sizeof(uint32_t))
- + static_cast<size_t>(pointer_size) /* art method */;
- }
-}
-
-void LockCountData::AddMonitor(Thread* self, mirror::Object* obj) {
- if (obj == nullptr) {
- return;
- }
-
- // If there's an error during enter, we won't have locked the monitor. So check there's no
- // exception.
- if (self->IsExceptionPending()) {
- return;
- }
-
- if (monitors_ == nullptr) {
- monitors_.reset(new std::vector<mirror::Object*>());
- }
- monitors_->push_back(obj);
-}
-
-void LockCountData::RemoveMonitorOrThrow(Thread* self, const mirror::Object* obj) {
- if (obj == nullptr) {
- return;
- }
- bool found_object = false;
- if (monitors_ != nullptr) {
- // We need to remove one pointer to ref, as duplicates are used for counting recursive locks.
- // We arbitrarily choose the first one.
- auto it = std::find(monitors_->begin(), monitors_->end(), obj);
- if (it != monitors_->end()) {
- monitors_->erase(it);
- found_object = true;
- }
- }
- if (!found_object) {
- // The object wasn't found. Time for an IllegalMonitorStateException.
- // The order here isn't fully clear. Assume that any other pending exception is swallowed.
- // TODO: Maybe make already pending exception a suppressed exception.
- self->ClearException();
- self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
- "did not lock monitor on object of type '%s' before unlocking",
- const_cast<mirror::Object*>(obj)->PrettyTypeOf().c_str());
- }
-}
-
-// Helper to unlock a monitor. Must be NO_THREAD_SAFETY_ANALYSIS, as we can't statically show
-// that the object was locked.
-void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
- DCHECK(self != nullptr);
- DCHECK(obj != nullptr);
- obj->MonitorExit(self);
-}
-
-bool LockCountData::CheckAllMonitorsReleasedOrThrow(Thread* self) {
- DCHECK(self != nullptr);
- if (monitors_ != nullptr) {
- if (!monitors_->empty()) {
- // There may be an exception pending, if the method is terminating abruptly. Clear it.
- // TODO: Should we add this as a suppressed exception?
- self->ClearException();
-
- // OK, there are monitors that are still locked. To enforce structured locking (and avoid
- // deadlocks) we unlock all of them before we raise the IllegalMonitorState exception.
- for (mirror::Object* obj : *monitors_) {
- MonitorExitHelper(self, obj);
- // If this raised an exception, ignore. TODO: Should we add this as suppressed
- // exceptions?
- if (self->IsExceptionPending()) {
- self->ClearException();
- }
- }
- // Raise an exception, just give the first object as the sample.
- mirror::Object* first = (*monitors_)[0];
- self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
- "did not unlock monitor on object of type '%s'",
- mirror::Object::PrettyTypeOf(first).c_str());
-
- // To make sure this path is not triggered again, clean out the monitors.
- monitors_->clear();
-
- return false;
- }
- }
- return true;
-}
-
} // namespace art
diff --git a/runtime/stack.h b/runtime/stack.h
index 8c74a8c405..4ef9487724 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,15 +20,9 @@
#include <stdint.h>
#include <string>
-#include "arch/instruction_set.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "dex_file.h"
-#include "gc_root.h"
#include "quick/quick_method_frame_info.h"
-#include "read_barrier.h"
-#include "stack_reference.h"
-#include "verify_object.h"
namespace art {
@@ -39,11 +33,8 @@ namespace mirror {
class ArtMethod;
class Context;
class HandleScope;
-class InlineInfo;
class OatQuickMethodHeader;
-class ScopedObjectAccess;
class ShadowFrame;
-class StackVisitor;
class Thread;
union JValue;
@@ -62,455 +53,60 @@ enum VRegKind {
};
std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
-// Forward declaration. Just calls the destructor.
-struct ShadowFrameDeleter;
-using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>;
-
// Size in bytes of the should_deoptimize flag on stack.
// We just need 4 bytes for our purpose regardless of the architecture. Frame size
// calculation will automatically do alignment for the final frame size.
static constexpr size_t kShouldDeoptimizeFlagSize = 4;
-// Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks.
-// The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are
-// thread roots).
-// Note: implementation is split so that the call sites may be optimized to no-ops in case no
-// lock counting is necessary. The actual implementation is in the cc file to avoid
-// dependencies.
-class LockCountData {
- public:
- // Add the given object to the list of monitors, that is, objects that have been locked. This
- // will not throw (but be skipped if there is an exception pending on entry).
- void AddMonitor(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Try to remove the given object from the monitor list, indicating an unlock operation.
- // This will throw an IllegalMonitorStateException (clearing any already pending exception), in
- // case that there wasn't a lock recorded for the object.
- void RemoveMonitorOrThrow(Thread* self,
- const mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Check whether all acquired monitors have been released. This will potentially throw an
- // IllegalMonitorStateException, clearing any already pending exception. Returns true if the
- // check shows that everything is OK wrt/ lock counting, false otherwise.
- bool CheckAllMonitorsReleasedOrThrow(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
-
- template <typename T, typename... Args>
- void VisitMonitors(T visitor, Args&&... args) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (monitors_ != nullptr) {
- // Visitors may change the Object*. Be careful with the foreach loop.
- for (mirror::Object*& obj : *monitors_) {
- visitor(/* inout */ &obj, std::forward<Args>(args)...);
- }
- }
- }
-
- private:
- // Stores references to the locked-on objects. As noted, this should be visited during thread
- // marking.
- std::unique_ptr<std::vector<mirror::Object*>> monitors_;
-};
-
-// ShadowFrame has 2 possible layouts:
-// - interpreter - separate VRegs and reference arrays. References are in the reference array.
-// - JNI - just VRegs, but where every VReg holds a reference.
-class ShadowFrame {
- public:
- // Compute size of ShadowFrame in bytes assuming it has a reference array.
- static size_t ComputeSize(uint32_t num_vregs) {
- return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) +
- (sizeof(StackReference<mirror::Object>) * num_vregs);
- }
-
- // Create ShadowFrame in heap for deoptimization.
- static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link,
- ArtMethod* method, uint32_t dex_pc) {
- uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
- return CreateShadowFrameImpl(num_vregs, link, method, dex_pc, memory);
- }
-
- // Delete a ShadowFrame allocated on the heap for deoptimization.
- static void DeleteDeoptimizedFrame(ShadowFrame* sf) {
- sf->~ShadowFrame(); // Explicitly destruct.
- uint8_t* memory = reinterpret_cast<uint8_t*>(sf);
- delete[] memory;
- }
-
- // Create a shadow frame in a fresh alloca. This needs to be in the context of the caller.
- // Inlining doesn't work, the compiler will still undo the alloca. So this needs to be a macro.
-#define CREATE_SHADOW_FRAME(num_vregs, link, method, dex_pc) ({ \
- size_t frame_size = ShadowFrame::ComputeSize(num_vregs); \
- void* alloca_mem = alloca(frame_size); \
- ShadowFrameAllocaUniquePtr( \
- ShadowFrame::CreateShadowFrameImpl((num_vregs), (link), (method), (dex_pc), \
- (alloca_mem))); \
- })
-
- ~ShadowFrame() {}
-
- // TODO(iam): Clean references array up since they're always there,
- // we don't need to do conditionals.
- bool HasReferenceArray() const {
- return true;
- }
-
- uint32_t NumberOfVRegs() const {
- return number_of_vregs_;
- }
-
- uint32_t GetDexPC() const {
- return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - code_item_->insns_;
- }
-
- int16_t GetCachedHotnessCountdown() const {
- return cached_hotness_countdown_;
- }
-
- void SetCachedHotnessCountdown(int16_t cached_hotness_countdown) {
- cached_hotness_countdown_ = cached_hotness_countdown;
- }
-
- int16_t GetHotnessCountdown() const {
- return hotness_countdown_;
- }
-
- void SetHotnessCountdown(int16_t hotness_countdown) {
- hotness_countdown_ = hotness_countdown;
- }
-
- void SetDexPC(uint32_t dex_pc) {
- dex_pc_ = dex_pc;
- dex_pc_ptr_ = nullptr;
- }
-
- ShadowFrame* GetLink() const {
- return link_;
- }
-
- void SetLink(ShadowFrame* frame) {
- DCHECK_NE(this, frame);
- link_ = frame;
- }
-
- int32_t GetVReg(size_t i) const {
- DCHECK_LT(i, NumberOfVRegs());
- const uint32_t* vreg = &vregs_[i];
- return *reinterpret_cast<const int32_t*>(vreg);
- }
-
- // Shorts are extended to Ints in VRegs. Interpreter intrinsics needs them as shorts.
- int16_t GetVRegShort(size_t i) const {
- return static_cast<int16_t>(GetVReg(i));
- }
-
- uint32_t* GetVRegAddr(size_t i) {
- return &vregs_[i];
- }
-
- uint32_t* GetShadowRefAddr(size_t i) {
- DCHECK(HasReferenceArray());
- DCHECK_LT(i, NumberOfVRegs());
- return &vregs_[i + NumberOfVRegs()];
- }
-
- void SetCodeItem(const DexFile::CodeItem* code_item) {
- code_item_ = code_item;
- }
-
- const DexFile::CodeItem* GetCodeItem() const {
- return code_item_;
- }
-
- float GetVRegFloat(size_t i) const {
- DCHECK_LT(i, NumberOfVRegs());
- // NOTE: Strict-aliasing?
- const uint32_t* vreg = &vregs_[i];
- return *reinterpret_cast<const float*>(vreg);
- }
-
- int64_t GetVRegLong(size_t i) const {
- DCHECK_LT(i, NumberOfVRegs());
- const uint32_t* vreg = &vregs_[i];
- typedef const int64_t unaligned_int64 __attribute__ ((aligned (4)));
- return *reinterpret_cast<unaligned_int64*>(vreg);
- }
-
- double GetVRegDouble(size_t i) const {
- DCHECK_LT(i, NumberOfVRegs());
- const uint32_t* vreg = &vregs_[i];
- typedef const double unaligned_double __attribute__ ((aligned (4)));
- return *reinterpret_cast<unaligned_double*>(vreg);
- }
-
- // Look up the reference given its virtual register number.
- // If this returns non-null then this does not mean the vreg is currently a reference
- // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain.
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK_LT(i, NumberOfVRegs());
- mirror::Object* ref;
- if (HasReferenceArray()) {
- ref = References()[i].AsMirrorPtr();
- } else {
- const uint32_t* vreg_ptr = &vregs_[i];
- ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
- }
- if (kUseReadBarrier) {
- ReadBarrier::AssertToSpaceInvariant(ref);
- }
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(ref);
- }
- return ref;
- }
-
- // Get view of vregs as range of consecutive arguments starting at i.
- uint32_t* GetVRegArgs(size_t i) {
- return &vregs_[i];
- }
-
- void SetVReg(size_t i, int32_t val) {
- DCHECK_LT(i, NumberOfVRegs());
- uint32_t* vreg = &vregs_[i];
- *reinterpret_cast<int32_t*>(vreg) = val;
- // This is needed for moving collectors since these can update the vreg references if they
- // happen to agree with references in the reference array.
- if (kMovingCollector && HasReferenceArray()) {
- References()[i].Clear();
- }
- }
-
- void SetVRegFloat(size_t i, float val) {
- DCHECK_LT(i, NumberOfVRegs());
- uint32_t* vreg = &vregs_[i];
- *reinterpret_cast<float*>(vreg) = val;
- // This is needed for moving collectors since these can update the vreg references if they
- // happen to agree with references in the reference array.
- if (kMovingCollector && HasReferenceArray()) {
- References()[i].Clear();
- }
- }
-
- void SetVRegLong(size_t i, int64_t val) {
- DCHECK_LT(i, NumberOfVRegs());
- uint32_t* vreg = &vregs_[i];
- typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
- *reinterpret_cast<unaligned_int64*>(vreg) = val;
- // This is needed for moving collectors since these can update the vreg references if they
- // happen to agree with references in the reference array.
- if (kMovingCollector && HasReferenceArray()) {
- References()[i].Clear();
- References()[i + 1].Clear();
- }
- }
-
- void SetVRegDouble(size_t i, double val) {
- DCHECK_LT(i, NumberOfVRegs());
- uint32_t* vreg = &vregs_[i];
- typedef double unaligned_double __attribute__ ((aligned (4)));
- *reinterpret_cast<unaligned_double*>(vreg) = val;
- // This is needed for moving collectors since these can update the vreg references if they
- // happen to agree with references in the reference array.
- if (kMovingCollector && HasReferenceArray()) {
- References()[i].Clear();
- References()[i + 1].Clear();
- }
- }
-
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetVRegReference(size_t i, mirror::Object* val) REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK_LT(i, NumberOfVRegs());
- if (kVerifyFlags & kVerifyWrites) {
- VerifyObject(val);
- }
- if (kUseReadBarrier) {
- ReadBarrier::AssertToSpaceInvariant(val);
- }
- uint32_t* vreg = &vregs_[i];
- reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val);
- if (HasReferenceArray()) {
- References()[i].Assign(val);
- }
- }
-
- void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_) {
- DCHECK(method != nullptr);
- DCHECK(method_ != nullptr);
- method_ = method;
- }
-
- ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(method_ != nullptr);
- return method_;
- }
-
- mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
-
- mirror::Object* GetThisObject(uint16_t num_ins) const REQUIRES_SHARED(Locks::mutator_lock_);
-
- bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
- if (HasReferenceArray()) {
- return ((&References()[0] <= shadow_frame_entry_obj) &&
- (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1])));
- } else {
- uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj);
- return ((&vregs_[0] <= shadow_frame_entry) &&
- (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1])));
- }
- }
-
- LockCountData& GetLockCountData() {
- return lock_count_data_;
- }
-
- static size_t LockCountDataOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_);
- }
-
- static size_t LinkOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, link_);
- }
-
- static size_t MethodOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, method_);
- }
-
- static size_t DexPCOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
- }
-
- static size_t NumberOfVRegsOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_);
- }
-
- static size_t VRegsOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, vregs_);
- }
-
- static size_t ResultRegisterOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, result_register_);
- }
-
- static size_t DexPCPtrOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_);
- }
-
- static size_t CodeItemOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, code_item_);
- }
-
- static size_t CachedHotnessCountdownOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_);
- }
-
- static size_t HotnessCountdownOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_);
- }
-
- // Create ShadowFrame for interpreter using provided memory.
- static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs,
- ShadowFrame* link,
- ArtMethod* method,
- uint32_t dex_pc,
- void* memory) {
- return new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
- }
-
- const uint16_t* GetDexPCPtr() {
- return dex_pc_ptr_;
- }
-
- void SetDexPCPtr(uint16_t* dex_pc_ptr) {
- dex_pc_ptr_ = dex_pc_ptr;
- }
-
- JValue* GetResultRegister() {
- return result_register_;
- }
-
- private:
- ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
- uint32_t dex_pc, bool has_reference_array)
- : link_(link),
- method_(method),
- result_register_(nullptr),
- dex_pc_ptr_(nullptr),
- code_item_(nullptr),
- number_of_vregs_(num_vregs),
- dex_pc_(dex_pc),
- cached_hotness_countdown_(0),
- hotness_countdown_(0) {
- // TODO(iam): Remove this parameter, it's an an artifact of portable removal
- DCHECK(has_reference_array);
- if (has_reference_array) {
- memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>)));
- } else {
- memset(vregs_, 0, num_vregs * sizeof(uint32_t));
- }
- }
-
- const StackReference<mirror::Object>* References() const {
- DCHECK(HasReferenceArray());
- const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
- return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end);
- }
-
- StackReference<mirror::Object>* References() {
- return const_cast<StackReference<mirror::Object>*>(
- const_cast<const ShadowFrame*>(this)->References());
- }
-
- // Link to previous shadow frame or null.
- ShadowFrame* link_;
- ArtMethod* method_;
- JValue* result_register_;
- const uint16_t* dex_pc_ptr_;
- const DexFile::CodeItem* code_item_;
- LockCountData lock_count_data_; // This may contain GC roots when lock counting is active.
- const uint32_t number_of_vregs_;
- uint32_t dex_pc_;
- int16_t cached_hotness_countdown_;
- int16_t hotness_countdown_;
-
- // This is a two-part array:
- // - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4
- // bytes.
- // - [number_of_vregs..number_of_vregs*2) holds only reference registers. Each element here is
- // ptr-sized.
- // In other words when a primitive is stored in vX, the second (reference) part of the array will
- // be null. When a reference is stored in vX, the second (reference) part of the array will be a
- // copy of vX.
- uint32_t vregs_[0];
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame);
-};
-
-struct ShadowFrameDeleter {
- inline void operator()(ShadowFrame* frame) {
- if (frame != nullptr) {
- frame->~ShadowFrame();
- }
- }
-};
-
-class JavaFrameRootInfo FINAL : public RootInfo {
- public:
- JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
- : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
- }
- void Describe(std::ostream& os) const OVERRIDE
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- size_t GetVReg() const {
- return vreg_;
- }
- const StackVisitor* GetVisitor() const {
- return stack_visitor_;
- }
-
- private:
- const StackVisitor* const stack_visitor_;
- const size_t vreg_;
-};
+/*
+ * Our current stack layout.
+ * The Dalvik registers come first, followed by the
+ * Method*, followed by other special temporaries if any, followed by
+ * regular compiler temporary. As of now we only have the Method* as
+ * as a special compiler temporary.
+ * A compiler temporary can be thought of as a virtual register that
+ * does not exist in the dex but holds intermediate values to help
+ * optimizations and code generation. A special compiler temporary is
+ * one whose location in frame is well known while non-special ones
+ * do not have a requirement on location in frame as long as code
+ * generator itself knows how to access them.
+ *
+ * TODO: Update this documentation?
+ *
+ * +-------------------------------+
+ * | IN[ins-1] | {Note: resides in caller's frame}
+ * | . |
+ * | IN[0] |
+ * | caller's ArtMethod | ... ArtMethod*
+ * +===============================+ {Note: start of callee's frame}
+ * | core callee-save spill | {variable sized}
+ * +-------------------------------+
+ * | fp callee-save spill |
+ * +-------------------------------+
+ * | filler word | {For compatibility, if V[locals-1] used as wide
+ * +-------------------------------+
+ * | V[locals-1] |
+ * | V[locals-2] |
+ * | . |
+ * | . | ... (reg == 2)
+ * | V[1] | ... (reg == 1)
+ * | V[0] | ... (reg == 0) <---- "locals_start"
+ * +-------------------------------+
+ * | stack alignment padding | {0 to (kStackAlignWords-1) of padding}
+ * +-------------------------------+
+ * | Compiler temp region | ... (reg >= max_num_special_temps)
+ * | . |
+ * | . |
+ * | V[max_num_special_temps + 1] |
+ * | V[max_num_special_temps + 0] |
+ * +-------------------------------+
+ * | OUT[outs-1] |
+ * | OUT[outs-2] |
+ * | . |
+ * | OUT[0] |
+ * | ArtMethod* | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
+ * +===============================+
+ */
class StackVisitor {
public:
@@ -532,6 +128,8 @@ class StackVisitor {
public:
virtual ~StackVisitor() {}
+ StackVisitor(const StackVisitor&) = default;
+ StackVisitor(StackVisitor&&) = default;
// Return 'true' if we should continue to visit more frames, 'false' to stop.
virtual bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
@@ -617,80 +215,10 @@ class StackVisitor {
uintptr_t* GetGPRAddress(uint32_t reg) const;
- // This is a fast-path for getting/setting values in a quick frame.
- uint32_t* GetVRegAddrFromQuickCode(ArtMethod** cur_quick_frame,
- const DexFile::CodeItem* code_item,
- uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
- uint16_t vreg) const {
- int offset = GetVRegOffsetFromQuickCode(
- code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
- DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
- uint8_t* vreg_addr = reinterpret_cast<uint8_t*>(cur_quick_frame) + offset;
- return reinterpret_cast<uint32_t*>(vreg_addr);
- }
-
uintptr_t GetReturnPc() const REQUIRES_SHARED(Locks::mutator_lock_);
void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_);
- /*
- * Return sp-relative offset for a Dalvik virtual register, compiler
- * spill or Method* in bytes using Method*.
- * Note that (reg == -1) denotes an invalid Dalvik register. For the
- * positive values, the Dalvik registers come first, followed by the
- * Method*, followed by other special temporaries if any, followed by
- * regular compiler temporary. As of now we only have the Method* as
- * as a special compiler temporary.
- * A compiler temporary can be thought of as a virtual register that
- * does not exist in the dex but holds intermediate values to help
- * optimizations and code generation. A special compiler temporary is
- * one whose location in frame is well known while non-special ones
- * do not have a requirement on location in frame as long as code
- * generator itself knows how to access them.
- *
- * +-------------------------------+
- * | IN[ins-1] | {Note: resides in caller's frame}
- * | . |
- * | IN[0] |
- * | caller's ArtMethod | ... ArtMethod*
- * +===============================+ {Note: start of callee's frame}
- * | core callee-save spill | {variable sized}
- * +-------------------------------+
- * | fp callee-save spill |
- * +-------------------------------+
- * | filler word | {For compatibility, if V[locals-1] used as wide
- * +-------------------------------+
- * | V[locals-1] |
- * | V[locals-2] |
- * | . |
- * | . | ... (reg == 2)
- * | V[1] | ... (reg == 1)
- * | V[0] | ... (reg == 0) <---- "locals_start"
- * +-------------------------------+
- * | stack alignment padding | {0 to (kStackAlignWords-1) of padding}
- * +-------------------------------+
- * | Compiler temp region | ... (reg >= max_num_special_temps)
- * | . |
- * | . |
- * | V[max_num_special_temps + 1] |
- * | V[max_num_special_temps + 0] |
- * +-------------------------------+
- * | OUT[outs-1] |
- * | OUT[outs-2] |
- * | . |
- * | OUT[0] |
- * | ArtMethod* | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
- * +===============================+
- */
- static int GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
- uint32_t core_spills, uint32_t fp_spills,
- size_t frame_size, int reg, InstructionSet isa);
-
- static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
- // According to stack model, the first out is above the Method referernce.
- return static_cast<size_t>(InstructionSetPointerSize(isa)) + out_num * sizeof(uint32_t);
- }
-
bool IsInInlinedFrame() const {
return current_inlining_depth_ != 0;
}
@@ -772,8 +300,6 @@ class StackVisitor {
void SanityCheckFrame() const REQUIRES_SHARED(Locks::mutator_lock_);
- InlineInfo GetCurrentInlineInfo() const REQUIRES_SHARED(Locks::mutator_lock_);
-
Thread* const thread_;
const StackWalkKind walk_kind_;
ShadowFrame* cur_shadow_frame_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 789f571253..4ddf217ca1 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -58,6 +58,8 @@
#include "gc_root.h"
#include "handle_scope-inl.h"
#include "indirect_reference_table-inl.h"
+#include "interpreter/shadow_frame.h"
+#include "java_frame_root_info.h"
#include "java_vm_ext.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 842aa04dfb..464af04cd5 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -171,6 +171,10 @@ void VdexFile::Unquicken(const std::vector<const DexFile*>& dex_files,
// least the size of quickening data for each method that has a code item.
return;
}
+ // We do not decompile a RETURN_VOID_NO_BARRIER into a RETURN_VOID, as the quickening
+ // optimization does not depend on the boot image (the optimization relies on not
+ // having final fields in a class, which does not change for an app).
+ constexpr bool kDecompileReturnInstruction = false;
const uint8_t* quickening_info_ptr = quickening_info.data();
const uint8_t* const quickening_info_end = quickening_info.data() + quickening_info.size();
for (const DexFile* dex_file : dex_files) {
@@ -181,13 +185,7 @@ void VdexFile::Unquicken(const std::vector<const DexFile*>& dex_files,
continue;
}
ClassDataItemIterator it(*dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
while (it.HasNextDirectMethod()) {
const DexFile::CodeItem* code_item = it.GetMethodCodeItem();
@@ -196,7 +194,7 @@ void VdexFile::Unquicken(const std::vector<const DexFile*>& dex_files,
quickening_info_ptr += sizeof(uint32_t);
optimizer::ArtDecompileDEX(*code_item,
ArrayRef<const uint8_t>(quickening_info_ptr, quickening_size),
- /* decompile_return_instruction */ false);
+ kDecompileReturnInstruction);
quickening_info_ptr += quickening_size;
}
it.Next();
@@ -209,7 +207,7 @@ void VdexFile::Unquicken(const std::vector<const DexFile*>& dex_files,
quickening_info_ptr += sizeof(uint32_t);
optimizer::ArtDecompileDEX(*code_item,
ArrayRef<const uint8_t>(quickening_info_ptr, quickening_size),
- /* decompile_return_instruction */ false);
+ kDecompileReturnInstruction);
quickening_info_ptr += quickening_size;
}
it.Next();
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 12f791c1f1..9b652553df 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -300,9 +300,7 @@ FailureKind MethodVerifier::VerifyClass(Thread* self,
return FailureKind::kNoFailure;
}
ClassDataItemIterator it(*dex_file, class_data);
- while (it.HasNextStaticField() || it.HasNextInstanceField()) {
- it.Next();
- }
+ it.SkipAllFields();
ClassLinker* linker = Runtime::Current()->GetClassLinker();
// Direct methods.
MethodVerifier::FailureData data1 = VerifyMethods<true>(self,
@@ -1986,10 +1984,7 @@ static uint32_t GetFirstFinalInstanceFieldIndex(const DexFile& dex_file, dex::Ty
const uint8_t* class_data = dex_file.GetClassData(*class_def);
DCHECK(class_data != nullptr);
ClassDataItemIterator it(dex_file, class_data);
- // Skip static fields.
- while (it.HasNextStaticField()) {
- it.Next();
- }
+ it.SkipStaticFields();
while (it.HasNextInstanceField()) {
if ((it.GetFieldAccessFlags() & kAccFinal) != 0) {
return it.GetMemberIndex();
diff --git a/test/004-JniTest/build b/test/004-JniTest/build
index e8e9f31ef4..c8440fcb73 100755
--- a/test/004-JniTest/build
+++ b/test/004-JniTest/build
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2016 The Android Open Source Project
+# Copyright 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,16 +13,29 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-# Make us exit on a failure.
+
+#
+# Perform a mostly normal build.
+# Since this test imports 'dalvik.annotation.optimization.FastNative' (and CriticalNative),
+# we do a bit of trickery to allow the annotations to be used at javac-compile time,
+# but remove them afterwards so it doesn't end up in the dex file.
#
-set -e
+# This enables the test to compile with vanilla RI javac and work on either ART or RI.
+#
+
+export ORIGINAL_JAVAC="$JAVAC"
+
+# Delete CriticalNative.java, FastNative.java annotations after building the .class files.
+function javac_wrapper {
+ $ORIGINAL_JAVAC "$@"
+ local stat=$?
+
+ [[ -d classes ]] && (find classes/dalvik -name '*.class' | xargs rm -rf)
+
+ return $stat
+}
-# Hard-wired use of experimental jack.
-# TODO: fix this temporary work-around for lambdas, see b/19467889
-export USE_JACK=true
-# export JACK_SERVER=false
-# export JACK_REPOSITORY="${ANDROID_BUILD_TOP}/prebuilts/sdk/tools/jacks"
+export -f javac_wrapper
+export JAVAC=javac_wrapper
-# e.g. /foo/bar/jack-3.10.ALPHA.jar -> 3.10.ALPHA
-# export JACK_VERSION="$(find "$JACK_REPOSITORY" -name '*ALPHA*' | sed 's/.*jack-//g' | sed 's/[.]jar//g')"
-./default-build "$@" --experimental lambdas
+./default-build "$@"
diff --git a/test/004-JniTest/src/dalvik/annotation/optimization/CriticalNative.java b/test/004-JniTest/src/dalvik/annotation/optimization/CriticalNative.java
new file mode 100644
index 0000000000..9c17337fb8
--- /dev/null
+++ b/test/004-JniTest/src/dalvik/annotation/optimization/CriticalNative.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dalvik.annotation.optimization;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Placeholder for the real FastNative annotation in the Android platform.
+ *
+ * Allows the run-test to compile without an Android bootclasspath.
+ */
+@Retention(RetentionPolicy.CLASS)
+@Target(ElementType.METHOD)
+public @interface CriticalNative {}
diff --git a/test/004-JniTest/src/dalvik/annotation/optimization/FastNative.java b/test/004-JniTest/src/dalvik/annotation/optimization/FastNative.java
new file mode 100644
index 0000000000..3a8fbb0106
--- /dev/null
+++ b/test/004-JniTest/src/dalvik/annotation/optimization/FastNative.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dalvik.annotation.optimization;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Placeholder for the real FastNative annotation in the Android platform.
+ *
+ * Allows the run-test to compile without an Android bootclasspath.
+ */
+@Retention(RetentionPolicy.CLASS)
+@Target(ElementType.METHOD)
+public @interface FastNative {}
diff --git a/test/604-hot-static-interface/build b/test/065-mismatched-implements/build
index 1ca2daf8b9..41823b5025 100755
--- a/test/604-hot-static-interface/build
+++ b/test/065-mismatched-implements/build
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2016 The Android Open Source Project
+# Copyright 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,15 +13,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+# Make us exit on a failure.
-# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
+# Don't use desugar because the build fails when it encounters ICCE.
+#
+# Exception in thread "main" java.lang.IllegalArgumentException
+# at com.google.common.base.Preconditions.checkArgument(Preconditions.java:108)
+# at com.google.devtools.build.android.desugar.DefaultMethodClassFixer$DefaultMethodFinder.visit(DefaultMethodClassFixer.java:295)
+export USE_DESUGAR=false
-./default-build "$@" --experimental default-methods
+./default-build "$@"
diff --git a/test/104-growth-limit/src/Main.java b/test/104-growth-limit/src/Main.java
index d31cbf1fe1..24acb91c1d 100644
--- a/test/104-growth-limit/src/Main.java
+++ b/test/104-growth-limit/src/Main.java
@@ -21,7 +21,6 @@ import java.util.List;
public class Main {
public static void main(String[] args) throws Exception {
- int alloc1 = 1;
// Setup reflection stuff before allocating to prevent OOME caused by allocations from
// Class.forName or getDeclaredMethod.
// Reflective equivalent of: dalvik.system.VMRuntime.getRuntime().clearGrowthLimit();
@@ -29,34 +28,38 @@ public class Main {
final Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
final Object runtime = get_runtime.invoke(null);
final Method clear_growth_limit = vm_runtime.getDeclaredMethod("clearGrowthLimit");
- List<byte[]> l = new ArrayList<byte[]>();
- try {
- while (true) {
- // Allocate a MB at a time
- l.add(new byte[1048576]);
- alloc1++;
- }
- } catch (OutOfMemoryError e) {
- l = null;
- }
+
+ int alloc1 = allocateTillOOME();
+
+ // Proactively clean up.
+ Runtime.getRuntime().gc();
+
// Expand the heap to the maximum size.
clear_growth_limit.invoke(runtime);
- int alloc2 = 1;
- l = new ArrayList<byte[]>();
- try {
- while (true) {
- // Allocate a MB at a time
- l.add(new byte[1048576]);
- alloc2++;
- }
- } catch (OutOfMemoryError e2) {
- l = null;
- if (alloc1 > alloc2) {
- System.out.println("ERROR: Allocated less memory after growth" +
+
+ int alloc2 = allocateTillOOME();
+
+ if (alloc1 > alloc2) {
+ System.out.println("ERROR: Allocated less memory after growth" +
"limit cleared (" + alloc1 + " MBs > " + alloc2 + " MBs");
- System.exit(1);
- }
+ } else {
+ System.out.println("Test complete");
}
- System.out.println("Test complete");
+ }
+
+ private static int allocateTillOOME() {
+ int allocations = 0;
+ List<byte[]> l = new ArrayList<byte[]>();
+ try {
+ while (true) {
+ // Allocate a MB at a time
+ l.add(new byte[1048576]);
+ allocations++;
+ }
+ } catch (OutOfMemoryError e) {
+ // Help clean up.
+ l.clear();
+ }
+ return allocations;
}
}
diff --git a/test/551-checker-shifter-operand/build b/test/551-checker-shifter-operand/build
deleted file mode 100644
index 027a0ea5cd..0000000000
--- a/test/551-checker-shifter-operand/build
+++ /dev/null
@@ -1,212 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2008 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# This is an almost exact copy of `art/test/etc/default-build`. Only the parsing
-# of `dx` option has been overriden.
-
-# Stop if something fails.
-set -e
-
-# Set default values for directories.
-if [ -d smali ]; then
- HAS_SMALI=true
-else
- HAS_SMALI=false
-fi
-
-if [ -d src ]; then
- HAS_SRC=true
-else
- HAS_SRC=false
-fi
-
-if [ -d src2 ]; then
- HAS_SRC2=true
-else
- HAS_SRC2=false
-fi
-
-if [ -d src-multidex ]; then
- HAS_SRC_MULTIDEX=true
-else
- HAS_SRC_MULTIDEX=false
-fi
-
-if [ -d src-ex ]; then
- HAS_SRC_EX=true
-else
- HAS_SRC_EX=false
-fi
-
-DX_FLAGS=""
-SKIP_DX_MERGER="false"
-EXPERIMENTAL=""
-
-# Setup experimental flag mappings in a bash associative array.
-declare -A JACK_EXPERIMENTAL_ARGS
-JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
-JACK_EXPERIMENTAL_ARGS["lambdas"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
-
-while true; do
- if [ "x$1" = "x--dx-option" ]; then
- shift
- option="$1"
- # Make sure we run this test *with* `dx` optimizations.
- if [ "x$option" != "x--no-optimize" ]; then
- DX_FLAGS="${DX_FLAGS} $option"
- fi
- shift
- elif [ "x$1" = "x--jvm" ]; then
- shift
- elif [ "x$1" = "x--no-src" ]; then
- HAS_SRC=false
- shift
- elif [ "x$1" = "x--no-src2" ]; then
- HAS_SRC2=false
- shift
- elif [ "x$1" = "x--no-src-multidex" ]; then
- HAS_SRC_MULTIDEX=false
- shift
- elif [ "x$1" = "x--no-src-ex" ]; then
- HAS_SRC_EX=false
- shift
- elif [ "x$1" = "x--no-smali" ]; then
- HAS_SMALI=false
- shift
- elif [ "x$1" = "x--experimental" ]; then
- shift
- EXPERIMENTAL="${EXPERIMENTAL} $1"
- shift
- elif expr "x$1" : "x--" >/dev/null 2>&1; then
- echo "unknown $0 option: $1" 1>&2
- exit 1
- else
- break
- fi
-done
-
-# Add args from the experimental mappings.
-for experiment in ${EXPERIMENTAL}; do
- JACK_ARGS="${JACK_ARGS} ${JACK_EXPERIMENTAL_ARGS[${experiment}]}"
-done
-
-if [ -e classes.dex ]; then
- zip $TEST_NAME.jar classes.dex
- exit 0
-fi
-
-if ! [ "${HAS_SRC}" = "true" ] && ! [ "${HAS_SRC2}" = "true" ]; then
- # No src directory? Then forget about trying to run dx.
- SKIP_DX_MERGER="true"
-fi
-
-if [ "${HAS_SRC_MULTIDEX}" = "true" ]; then
- # Jack does not support this configuration unless we specify how to partition the DEX file
- # with a .jpp file.
- USE_JACK="false"
-fi
-
-if [ ${USE_JACK} = "true" ]; then
- # Jack toolchain
- if [ "${HAS_SRC}" = "true" ]; then
- ${JACK} ${JACK_ARGS} --output-jack src.jack src
- imported_jack_files="--import src.jack"
- fi
-
- if [ "${HAS_SRC2}" = "true" ]; then
- ${JACK} ${JACK_ARGS} --output-jack src2.jack src2
- imported_jack_files="--import src2.jack ${imported_jack_files}"
- fi
-
- # Compile jack files into a DEX file. We set jack.import.type.policy=keep-first to consider
- # class definitions from src2 first.
- if [ "${HAS_SRC}" = "true" ] || [ "${HAS_SRC2}" = "true" ]; then
- ${JACK} ${JACK_ARGS} ${imported_jack_files} -D jack.import.type.policy=keep-first --output-dex .
- fi
-else
- # Legacy toolchain with javac+dx
- if [ "${HAS_SRC}" = "true" ]; then
- mkdir classes
- ${JAVAC} ${JAVAC_ARGS} -implicit:none -classpath src-multidex -d classes `find src -name '*.java'`
- fi
-
- if [ "${HAS_SRC_MULTIDEX}" = "true" ]; then
- mkdir classes2
- ${JAVAC} -implicit:none -classpath src -d classes2 `find src-multidex -name '*.java'`
- if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex \
- --dump-width=1000 ${DX_FLAGS} classes2
- fi
- fi
-
- if [ "${HAS_SRC2}" = "true" ]; then
- mkdir -p classes
- ${JAVAC} ${JAVAC_ARGS} -d classes `find src2 -name '*.java'`
- fi
-
- if [ "${HAS_SRC}" = "true" ] || [ "${HAS_SRC2}" = "true" ]; then
- if [ ${NEED_DEX} = "true" -a ${SKIP_DX_MERGER} = "false" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
- --dump-width=1000 ${DX_FLAGS} classes
- fi
- fi
-fi
-
-if [ "${HAS_SMALI}" = "true" ]; then
- # Compile Smali classes
- ${SMALI} -JXmx512m assemble ${SMALI_ARGS} --output smali_classes.dex `find smali -name '*.smali'`
-
- # Don't bother with dexmerger if we provide our own main function in a smali file.
- if [ ${SKIP_DX_MERGER} = "false" ]; then
- ${DXMERGER} classes.dex classes.dex smali_classes.dex
- else
- mv smali_classes.dex classes.dex
- fi
-fi
-
-if [ ${HAS_SRC_EX} = "true" ]; then
- if [ ${USE_JACK} = "true" ]; then
- # Rename previous "classes.dex" so it is not overwritten.
- mv classes.dex classes-1.dex
- #TODO find another way to append src.jack to the jack classpath
- ${JACK}:src.jack ${JACK_ARGS} --output-dex . src-ex
- zip $TEST_NAME-ex.jar classes.dex
- # Restore previous "classes.dex" so it can be zipped.
- mv classes-1.dex classes.dex
- else
- mkdir classes-ex
- ${JAVAC} ${JAVAC_ARGS} -d classes-ex -cp classes `find src-ex -name '*.java'`
- if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes-ex.dex \
- --dump-width=1000 ${DX_FLAGS} classes-ex
-
- # quick shuffle so that the stored name is "classes.dex"
- mv classes.dex classes-1.dex
- mv classes-ex.dex classes.dex
- zip $TEST_NAME-ex.jar classes.dex
- mv classes.dex classes-ex.dex
- mv classes-1.dex classes.dex
- fi
- fi
-fi
-
-# Create a single jar with two dex files for multidex.
-if [ ${HAS_SRC_MULTIDEX} = "true" ]; then
- zip $TEST_NAME.jar classes.dex classes2.dex
-elif [ ${NEED_DEX} = "true" ]; then
- zip $TEST_NAME.jar classes.dex
-fi
diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc
index 019ddad595..0bdbadef48 100644
--- a/test/595-profile-saving/profile-saving.cc
+++ b/test/595-profile-saving/profile-saving.cc
@@ -22,63 +22,41 @@
#include "jni.h"
#include "method_reference.h"
#include "mirror/class-inl.h"
+#include "mirror/executable.h"
#include "oat_file_assistant.h"
#include "oat_file_manager.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedUtfChars.h"
-#include "stack.h"
#include "thread.h"
namespace art {
namespace {
-class CreateProfilingInfoVisitor : public StackVisitor {
- public:
- explicit CreateProfilingInfoVisitor(Thread* thread, const char* method_name)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_name_(method_name) {}
-
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- if (m_name.compare(method_name_) == 0) {
- ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
- method_index_ = m->GetDexMethodIndex();
- return false;
- }
- return true;
- }
-
- int method_index_ = -1;
- const char* const method_name_;
-};
-
-extern "C" JNIEXPORT jint JNICALL Java_Main_ensureProfilingInfo(JNIEnv* env,
+extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfilingInfo(JNIEnv* env,
jclass,
- jstring method_name) {
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- CreateProfilingInfoVisitor visitor(soa.Self(), chars.c_str());
- visitor.WalkStack();
- return visitor.method_index_;
+ jobject method) {
+ CHECK(method != nullptr);
+ ScopedObjectAccess soa(env);
+ ObjPtr<mirror::Executable> exec = soa.Decode<mirror::Executable>(method);
+ ProfilingInfo::Create(soa.Self(), exec->GetArtMethod(), /* retry_allocation */ true);
}
extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfileProcessing(JNIEnv*, jclass) {
ProfileSaver::ForceProcessProfiles();
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_presentInProfile(
- JNIEnv* env, jclass cls, jstring filename, jint method_index) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_presentInProfile(JNIEnv* env,
+ jclass,
+ jstring filename,
+ jobject method) {
ScopedUtfChars filename_chars(env, filename);
CHECK(filename_chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- const DexFile* dex_file = soa.Decode<mirror::Class>(cls)->GetDexCache()->GetDexFile();
+ ScopedObjectAccess soa(env);
+ ObjPtr<mirror::Executable> exec = soa.Decode<mirror::Executable>(method);
+ ArtMethod* art_method = exec->GetArtMethod();
return ProfileSaver::HasSeenMethod(std::string(filename_chars.c_str()),
- dex_file,
- static_cast<uint16_t>(method_index));
+ art_method->GetDexFile(),
+ art_method->GetDexMethodIndex());
}
} // namespace
diff --git a/test/595-profile-saving/run b/test/595-profile-saving/run
index fce6ac15d8..055035b3e0 100644
--- a/test/595-profile-saving/run
+++ b/test/595-profile-saving/run
@@ -24,4 +24,5 @@ exec ${RUN} \
--runtime-option '-Xcompiler-option --compiler-filter=quicken' \
--runtime-option -Xjitsaveprofilinginfo \
--runtime-option -Xusejit:false \
+ --runtime-option -Xps-profile-boot-class-path \
"${@}"
diff --git a/test/595-profile-saving/src/Main.java b/test/595-profile-saving/src/Main.java
index faf94c4fcc..18c0598bef 100644
--- a/test/595-profile-saving/src/Main.java
+++ b/test/595-profile-saving/src/Main.java
@@ -31,11 +31,17 @@ public class Main {
VMRuntime.registerAppInfo(file.getPath(),
new String[] {codePath});
- int methodIdx = $opt$noinline$testProfile();
- ensureProfileProcessing();
- if (!presentInProfile(file.getPath(), methodIdx)) {
- throw new RuntimeException("Method with index " + methodIdx + " not in the profile");
+ // Test that the profile saves an app method with a profiling info.
+ Method appMethod = Main.class.getDeclaredMethod("testAddMethodToProfile",
+ File.class, Method.class);
+ testAddMethodToProfile(file, appMethod);
+
+ // Test that the profile saves a boot class path method with a profiling info.
+ Method bootMethod = File.class.getDeclaredMethod("delete");
+ if (bootMethod.getDeclaringClass().getClassLoader() != Object.class.getClassLoader()) {
+ System.out.println("Class loader does not match boot class");
}
+ testAddMethodToProfile(file, bootMethod);
} finally {
if (file != null) {
file.delete();
@@ -43,20 +49,24 @@ public class Main {
}
}
- public static int $opt$noinline$testProfile() {
- if (doThrow) throw new Error();
+ static void testAddMethodToProfile(File file, Method m) {
// Make sure we have a profile info for this method without the need to loop.
- return ensureProfilingInfo("$opt$noinline$testProfile");
+ ensureProfilingInfo(m);
+ // Make sure the profile gets saved.
+ ensureProfileProcessing();
+ // Verify that the profile was saved and contains the method.
+ if (!presentInProfile(file.getPath(), m)) {
+ throw new RuntimeException("Method with index " + m + " not in the profile");
+ }
}
- // Return the dex method index.
- public static native int ensureProfilingInfo(String methodName);
+ // Ensure a method has a profiling info.
+ public static native void ensureProfilingInfo(Method method);
// Ensures the profile saver does its usual processing.
public static native void ensureProfileProcessing();
// Checks if the profiles saver knows about the method.
- public static native boolean presentInProfile(String profile, int methodIdx);
+ public static native boolean presentInProfile(String profile, Method method);
- public static boolean doThrow = false;
private static final String TEMP_FILE_NAME_PREFIX = "dummy";
private static final String TEMP_FILE_NAME_SUFFIX = "-file";
diff --git a/test/636-arm64-veneer-pool/build b/test/636-arm64-veneer-pool/build
index 27cc4d6d98..eba22fc02d 100755
--- a/test/636-arm64-veneer-pool/build
+++ b/test/636-arm64-veneer-pool/build
@@ -19,4 +19,9 @@ set -e
# Use javac+dx instead of jack.
export USE_JACK=false
-./default-build "$@"
+
+# Don't use desugar because the bootclasspath jars will be missing
+# on a platform build compiled with ANDROID_COMPILE_WITH_JACK=true.
+export USE_DESUGAR=false
+
+./default-build
diff --git a/test/641-checker-arraycopy/build b/test/641-checker-arraycopy/build
index 9abc61897a..12e4423b56 100644
--- a/test/641-checker-arraycopy/build
+++ b/test/641-checker-arraycopy/build
@@ -21,4 +21,8 @@ set -e
# the typed System.arraycopy versions directly.
export USE_JACK=false
-./default-build
+# Don't use desugar because the bootclasspath jars will be missing
+# on a platform build compiled with ANDROID_COMPILE_WITH_JACK=true.
+export USE_DESUGAR=false
+
+./default-build "$@"
diff --git a/test/701-easy-div-rem/build b/test/701-easy-div-rem/build
index d83ee82b47..affb432b41 100644
--- a/test/701-easy-div-rem/build
+++ b/test/701-easy-div-rem/build
@@ -21,4 +21,4 @@ set -e
mkdir src
python ./genMain.py
-./default-build
+./default-build "$@"
diff --git a/test/702-LargeBranchOffset/build b/test/702-LargeBranchOffset/build
index 20030fa466..dab7b0d263 100644
--- a/test/702-LargeBranchOffset/build
+++ b/test/702-LargeBranchOffset/build
@@ -20,4 +20,4 @@ set -e
# Write out the source file.
cpp -P src/Main.java.in src/Main.java
-./default-build
+./default-build "$@"
diff --git a/test/708-jit-cache-churn/expected.txt b/test/708-jit-cache-churn/expected.txt
new file mode 100644
index 0000000000..77a1486479
--- /dev/null
+++ b/test/708-jit-cache-churn/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+Done
diff --git a/test/708-jit-cache-churn/info.txt b/test/708-jit-cache-churn/info.txt
new file mode 100644
index 0000000000..4aaa3d48c9
--- /dev/null
+++ b/test/708-jit-cache-churn/info.txt
@@ -0,0 +1 @@
+Tests JIT cache for page permission updates and CPU cache inconsistencies. Only runs when test runner permits JIT, e.g. --jit.
diff --git a/test/708-jit-cache-churn/jit.cc b/test/708-jit-cache-churn/jit.cc
new file mode 100644
index 0000000000..1284a8703d
--- /dev/null
+++ b/test/708-jit-cache-churn/jit.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+
+#include "art_method.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "jni_internal.h"
+#include "mirror/class.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread_list.h"
+
+namespace art {
+
+extern "C" JNIEXPORT
+jboolean
+Java_JitCacheChurnTest_removeJitCompiledMethod(JNIEnv* env,
+ jclass,
+ jobject javaMethod,
+ jboolean releaseMemory) {
+ if (!Runtime::Current()->UseJitCompilation()) {
+ return JNI_FALSE;
+ }
+
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ jit->WaitForCompilationToFinish(Thread::Current());
+
+ ScopedObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+
+ jit::JitCodeCache* code_cache = jit->GetCodeCache();
+
+ // Drop the shared mutator lock
+ ScopedThreadSuspension selfSuspension(Thread::Current(), art::ThreadState::kNative);
+ // Get exclusive mutator lock with suspend all.
+ ScopedSuspendAll suspend("Removing JIT compiled method", /*long_suspend*/true);
+ bool removed = code_cache->RemoveMethod(method, static_cast<bool>(releaseMemory));
+ return removed ? JNI_TRUE : JNI_FALSE;
+}
+
+} // namespace art
diff --git a/test/708-jit-cache-churn/src/JitCacheChurnTest.java b/test/708-jit-cache-churn/src/JitCacheChurnTest.java
new file mode 100644
index 0000000000..abc5f35f70
--- /dev/null
+++ b/test/708-jit-cache-churn/src/JitCacheChurnTest.java
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A test driver for JIT compiling methods and looking for JIT
+ * cache issues.
+ */
+public class JitCacheChurnTest {
+ /* The name of methods to JIT */
+ private static final String JITTED_METHOD = "$noinline$Call";
+
+ /* The number of cores to oversubscribe load by. */
+ private static final int OVERSUBSCRIBED_CORES = 1;
+
+ /* The number of concurrent executions of methods to be JIT compiled. */
+ private static final int CONCURRENCY =
+ Runtime.getRuntime().availableProcessors() + OVERSUBSCRIBED_CORES;
+
+ /* The number of times the methods to be JIT compiled should be executed per thread. */
+ private static final int METHOD_ITERATIONS = 10;
+
+ /* Number of test iterations JIT methods and removing methods from JIT cache. */
+ private static final int TEST_ITERATIONS = 512;
+
+ /* Tasks to run and generate compiled code of various sizes */
+ private static final BaseTask [] TASKS = {
+ new TaskOne(), new TaskTwo(), new TaskThree(), new TaskFour(), new TaskFive(), new TaskSix(),
+ new TaskSeven(), new TaskEight(), new TaskNine(), new TaskTen()
+ };
+ private static final int TASK_BITMASK = (1 << TASKS.length) - 1;
+
+ private final ExecutorService executorService;
+ private int runMask = 0;
+
+ private JitCacheChurnTest() {
+ this.executorService = new ThreadPoolExecutor(CONCURRENCY, CONCURRENCY, 5000,
+ TimeUnit.MILLISECONDS,new LinkedBlockingQueue<>());
+ }
+
+ private void shutdown() {
+ this.executorService.shutdown();
+ }
+
+ private void runTasks(Callable<Integer> task) {
+ // Force JIT compilation of tasks method.
+ ensureJitCompiled(task.getClass(), JITTED_METHOD);
+
+ // Launch worker threads to run JIT compiled method.
+ try {
+ ArrayList<Callable<Integer>> tasks = new ArrayList<>(CONCURRENCY);
+ for (int i = 0; i < CONCURRENCY; ++i) {
+ tasks.add(i, task);
+ }
+
+ List<Future<Integer>> results = executorService.invokeAll(tasks);
+ for (Future<?> result : results) {
+ result.get();
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ System.err.println(e);
+ System.exit(-1);
+ }
+ }
+
+ private static abstract class BaseTask implements Callable<Integer> {
+ private static CyclicBarrier barrier = new CyclicBarrier(CONCURRENCY);
+
+ public Integer call() throws Exception {
+ barrier.await();
+ int iterations = METHOD_ITERATIONS + 1;
+ for (int i = 0; i < iterations; ++i) {
+ $noinline$Call();
+ }
+ return $noinline$Call();
+ }
+
+ protected abstract Integer $noinline$Call();
+ }
+
+ private static class TaskOne extends BaseTask {
+ @Override
+ protected Integer $noinline$Call() {
+ return null;
+ }
+ }
+
+ private static class TaskTwo extends BaseTask {
+ @Override
+ protected Integer $noinline$Call() {
+ return 0;
+ }
+ }
+
+ private static class TaskThree extends BaseTask {
+ @Override
+ protected Integer $noinline$Call() {
+ int sum = 0;
+ for (int i = 0; i < 3; ++i) {
+ sum = i * (i + 1);
+ }
+ return sum;
+ }
+ }
+
+ private static class TaskFour extends BaseTask {
+ @Override
+ protected Integer $noinline$Call() {
+ int sum = 0;
+ for (int i = 0; i < 10; ++i) {
+ int bits = i;
+ bits = ((bits >>> 1) & 0x55555555) | ((bits << 1) & 0x55555555);
+ bits = ((bits >>> 2) & 0x33333333) | ((bits << 2) & 0x33333333);
+ bits = ((bits >>> 4) & 0x0f0f0f0f) | ((bits << 4) & 0x0f0f0f0f);
+ bits = ((bits >>> 8) & 0x00ff00ff) | ((bits << 8) & 0x00ff00ff);
+ bits = (bits >>> 16) | (bits << 16);
+ sum += bits;
+ }
+ return sum;
+ }
+ }
+
+ private static class TaskFive extends BaseTask {
+ static final AtomicInteger instances = new AtomicInteger(0);
+ int instance;
+ TaskFive() {
+ instance = instances.getAndIncrement();
+ }
+ protected Integer $noinline$Call() {
+ return instance;
+ }
+ }
+
+ private static class TaskSix extends TaskFive {
+ protected Integer $noinline$Call() {
+ return instance + 1;
+ }
+ }
+
+ private static class TaskSeven extends TaskFive {
+ protected Integer $noinline$Call() {
+ return 2 * instance + 1;
+ }
+ }
+
+ private static class TaskEight extends TaskFive {
+ protected Integer $noinline$Call() {
+ double a = Math.cosh(2.22 * instance);
+ double b = a / 2;
+ double c = b * 3;
+ double d = a + b + c;
+ if (d > 42) {
+ d *= Math.max(Math.sin(d), Math.sinh(d));
+ d *= Math.max(1.33, 0.17 * Math.sinh(d));
+ d *= Math.max(1.34, 0.21 * Math.sinh(d));
+ d *= Math.max(1.35, 0.32 * Math.sinh(d));
+ d *= Math.max(1.36, 0.41 * Math.sinh(d));
+ d *= Math.max(1.37, 0.57 * Math.sinh(d));
+ d *= Math.max(1.38, 0.61 * Math.sinh(d));
+ d *= Math.max(1.39, 0.79 * Math.sinh(d));
+ d += Double.parseDouble("3.711e23");
+ }
+
+ if (d > 3) {
+ return (int) a;
+ } else {
+ return (int) b;
+ }
+ }
+ }
+
+ private static class TaskNine extends TaskFive {
+ private final String [] numbers = { "One", "Two", "Three", "Four", "Five", "Six" };
+
+ protected Integer $noinline$Call() {
+ String number = numbers[instance % numbers.length];
+ return number.length();
+ }
+ }
+
+ private static class TaskTen extends TaskFive {
+ private final String [] numbers = { "12345", "23451", "34512", "78901", "89012" };
+
+ protected Integer $noinline$Call() {
+ int odd = 0;
+ String number = numbers[instance % numbers.length];
+ for (int i = 0; i < number.length(); i += 2) {
+ odd += Integer.parseInt(numbers[i]);
+ }
+ odd *= 3;
+
+ int even = 0;
+ for (int i = 1; i < number.length(); i += 2) {
+ even += Integer.parseInt(numbers[i]);
+ }
+ return (odd + even) % 10;
+ }
+ }
+
+ private void runAndJitMethods(int mask) {
+ runMask |= mask;
+ for (int index = 0; mask != 0; mask >>= 1, index++) {
+ if ((mask & 1) == 1) {
+ runTasks(TASKS[index]);
+ }
+ }
+ }
+
+ private static void ensureJitCompiled(Class<?> klass, String name) {
+ Main.ensureJitCompiled(klass, name);
+ }
+
+ private void removeJittedMethod(Class<?> klass, String name) {
+ Method method = null;
+ try {
+ method = klass.getDeclaredMethod(name);
+ } catch (NoSuchMethodException e) {
+ System.err.println(e);
+ System.exit(-1);
+ }
+ removeJitCompiledMethod(method, false);
+ }
+
+ private void removeJittedMethods(int mask) {
+ mask = mask & runMask;
+ runMask ^= mask;
+ for (int index = 0; mask != 0; mask >>= 1, index++) {
+ if ((mask & 1) == 1) {
+ removeJittedMethod(TASKS[index].getClass(), JITTED_METHOD);
+ }
+ }
+ }
+
+ private static int getMethodsAsMask(Random rng) {
+ return rng.nextInt(TASK_BITMASK) + 1;
+ }
+
+ public static void run() {
+ JitCacheChurnTest concurrentExecution = new JitCacheChurnTest();
+ Random invokeMethodGenerator = new Random(5);
+ Random removeMethodGenerator = new Random(7);
+ try {
+ for (int i = 0; i < TEST_ITERATIONS; ++i) {
+ concurrentExecution.runAndJitMethods(getMethodsAsMask(invokeMethodGenerator));
+ concurrentExecution.removeJittedMethods(getMethodsAsMask(removeMethodGenerator));
+ }
+ } finally {
+ concurrentExecution.shutdown();
+ }
+ }
+
+ private static native void removeJitCompiledMethod(Method method, boolean releaseMemory);
+}
diff --git a/test/708-jit-cache-churn/src/Main.java b/test/708-jit-cache-churn/src/Main.java
new file mode 100644
index 0000000000..0595aae506
--- /dev/null
+++ b/test/708-jit-cache-churn/src/Main.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void main(String[] args) throws Exception {
+ // Explicit loadLibrary here to pull JNI exports from arttestd.
+ System.loadLibrary(args[0]);
+ if (hasJit()) {
+ JitCacheChurnTest.run();
+ }
+ System.out.println("Done");
+ }
+
+ static native boolean hasJit();
+
+ static native void ensureJitCompiled(Class<?> klass, String methodName);
+}
diff --git a/test/952-invoke-custom/build b/test/952-invoke-custom/build
index a423ca6b4e..2b0b2c1274 100644
--- a/test/952-invoke-custom/build
+++ b/test/952-invoke-custom/build
@@ -17,9 +17,4 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm.
- export USE_JACK=true
-fi
-
./default-build "$@" --experimental method-handles
diff --git a/test/953-invoke-polymorphic-compiler/build b/test/953-invoke-polymorphic-compiler/build
index a423ca6b4e..2b0b2c1274 100755
--- a/test/953-invoke-polymorphic-compiler/build
+++ b/test/953-invoke-polymorphic-compiler/build
@@ -17,9 +17,4 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm.
- export USE_JACK=true
-fi
-
./default-build "$@" --experimental method-handles
diff --git a/test/954-invoke-polymorphic-verifier/build b/test/954-invoke-polymorphic-verifier/build
index a423ca6b4e..2b0b2c1274 100755
--- a/test/954-invoke-polymorphic-verifier/build
+++ b/test/954-invoke-polymorphic-verifier/build
@@ -17,9 +17,4 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm.
- export USE_JACK=true
-fi
-
./default-build "$@" --experimental method-handles
diff --git a/test/955-methodhandles-smali/build b/test/955-methodhandles-smali/build
index a423ca6b4e..2b0b2c1274 100755
--- a/test/955-methodhandles-smali/build
+++ b/test/955-methodhandles-smali/build
@@ -17,9 +17,4 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm.
- export USE_JACK=true
-fi
-
./default-build "$@" --experimental method-handles
diff --git a/test/956-methodhandles/build b/test/956-methodhandles/build
index a423ca6b4e..2b0b2c1274 100755
--- a/test/956-methodhandles/build
+++ b/test/956-methodhandles/build
@@ -17,9 +17,4 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm.
- export USE_JACK=true
-fi
-
./default-build "$@" --experimental method-handles
diff --git a/test/957-methodhandle-transforms/build b/test/957-methodhandle-transforms/build
index a423ca6b4e..2b0b2c1274 100755
--- a/test/957-methodhandle-transforms/build
+++ b/test/957-methodhandle-transforms/build
@@ -17,9 +17,4 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm.
- export USE_JACK=true
-fi
-
./default-build "$@" --experimental method-handles
diff --git a/test/958-methodhandle-stackframe/build b/test/958-methodhandle-stackframe/build
index a423ca6b4e..2b0b2c1274 100755
--- a/test/958-methodhandle-stackframe/build
+++ b/test/958-methodhandle-stackframe/build
@@ -17,9 +17,4 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm.
- export USE_JACK=true
-fi
-
./default-build "$@" --experimental method-handles
diff --git a/test/959-invoke-polymorphic-accessors/build b/test/959-invoke-polymorphic-accessors/build
index a423ca6b4e..2b0b2c1274 100644
--- a/test/959-invoke-polymorphic-accessors/build
+++ b/test/959-invoke-polymorphic-accessors/build
@@ -17,9 +17,4 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm.
- export USE_JACK=true
-fi
-
./default-build "$@" --experimental method-handles
diff --git a/test/960-default-smali/build b/test/960-default-smali/build
index e8f4ed084a..44d6bd2046 100755
--- a/test/960-default-smali/build
+++ b/test/960-default-smali/build
@@ -17,13 +17,6 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
-
# Generate the Main.java file or fail
${ANDROID_BUILD_TOP}/art/test/utils/python/generate_java_main.py ./src
diff --git a/test/961-default-iface-resolution-gen/build b/test/961-default-iface-resolution-gen/build
index 2f7e3ba553..f2c222524e 100755
--- a/test/961-default-iface-resolution-gen/build
+++ b/test/961-default-iface-resolution-gen/build
@@ -17,16 +17,10 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
-
mkdir -p ./src
# Generate the smali files and expected.txt or fail
./util-src/generate_java.py ./src ./expected.txt
-./default-build "$@" --experimental default-methods
+# dx runs out of memory with default 256M, give it more memory.
+./default-build "$@" --experimental default-methods --dx-vm-option -JXmx512M
diff --git a/test/962-iface-static/build b/test/962-iface-static/build
index 0dd8573f54..82f49312ef 100755..100644
--- a/test/962-iface-static/build
+++ b/test/962-iface-static/build
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2015 The Android Open Source Project
+# Copyright 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,11 +17,4 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
-
./default-build "$@" --experimental default-methods
diff --git a/test/963-default-range-smali/build b/test/963-default-range-smali/build
deleted file mode 100755
index 0dd8573f54..0000000000
--- a/test/963-default-range-smali/build
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# make us exit on a failure
-set -e
-
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
-
-./default-build "$@" --experimental default-methods
diff --git a/test/964-default-iface-init-gen/build b/test/964-default-iface-init-gen/build
index 2f7e3ba553..a800151670 100755
--- a/test/964-default-iface-init-gen/build
+++ b/test/964-default-iface-init-gen/build
@@ -17,16 +17,10 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
-
mkdir -p ./src
# Generate the smali files and expected.txt or fail
./util-src/generate_java.py ./src ./expected.txt
-./default-build "$@" --experimental default-methods
+# dx runs out of memory with just 256m, so increase it.
+./default-build "$@" --experimental default-methods --dx-vm-option -JXmx512M
diff --git a/test/965-default-verify/build b/test/965-default-verify/build
deleted file mode 100755
index 0dd8573f54..0000000000
--- a/test/965-default-verify/build
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# make us exit on a failure
-set -e
-
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
-
-./default-build "$@" --experimental default-methods
diff --git a/test/966-default-conflict/build b/test/966-default-conflict/build
deleted file mode 100755
index 0dd8573f54..0000000000
--- a/test/966-default-conflict/build
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# make us exit on a failure
-set -e
-
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
-
-./default-build "$@" --experimental default-methods
diff --git a/test/967-default-ame/build b/test/967-default-ame/build
deleted file mode 100755
index 0dd8573f54..0000000000
--- a/test/967-default-ame/build
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# make us exit on a failure
-set -e
-
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
-
-./default-build "$@" --experimental default-methods
diff --git a/test/969-iface-super/build b/test/969-iface-super/build
index e8f4ed084a..44d6bd2046 100755
--- a/test/969-iface-super/build
+++ b/test/969-iface-super/build
@@ -17,13 +17,6 @@
# make us exit on a failure
set -e
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
-
# Generate the Main.java file or fail
${ANDROID_BUILD_TOP}/art/test/utils/python/generate_java_main.py ./src
diff --git a/test/988-method-trace/expected.txt b/test/988-method-trace/expected.txt
index d3d9249b1f..30ad532f6c 100644
--- a/test/988-method-trace/expected.txt
+++ b/test/988-method-trace/expected.txt
@@ -1,4 +1,5 @@
-<= public static native void art.Trace.enableMethodTracing(java.lang.Class,java.lang.reflect.Method,java.lang.reflect.Method,java.lang.Thread) -> <null: null>
+.<= public static native void art.Trace.enableTracing(java.lang.Class,java.lang.reflect.Method,java.lang.reflect.Method,java.lang.reflect.Method,java.lang.reflect.Method,java.lang.Thread) -> <null: null>
+<= public static void art.Trace.enableMethodTracing(java.lang.Class,java.lang.reflect.Method,java.lang.reflect.Method,java.lang.Thread) -> <null: null>
=> art.Test988$IterOp()
.=> public java.lang.Object()
.<= public java.lang.Object() -> <null: null>
@@ -142,10 +143,10 @@ fibonacci(5)=5
......=> private static native java.lang.Object java.lang.Throwable.nativeFillInStackTrace()
......<= private static native java.lang.Object java.lang.Throwable.nativeFillInStackTrace() -> <class [Ljava.lang.Object;: <non-deterministic>>
.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: Bad argument: -19 < 0
- at art.Test988.iter_fibonacci(Test988.java:207)
- at art.Test988$IterOp.applyAsInt(Test988.java:202)
- at art.Test988.doFibTest(Test988.java:295)
- at art.Test988.run(Test988.java:265)
+ at art.Test988.iter_fibonacci(Test988.java:209)
+ at art.Test988$IterOp.applyAsInt(Test988.java:204)
+ at art.Test988.doFibTest(Test988.java:297)
+ at art.Test988.run(Test988.java:267)
at Main.main(Main.java:19)
>
....<= public java.lang.Throwable(java.lang.String) -> <null: null>
@@ -162,10 +163,10 @@ fibonacci(5)=5
...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
- at art.Test988.iter_fibonacci(Test988.java:207)
- at art.Test988$IterOp.applyAsInt(Test988.java:202)
- at art.Test988.doFibTest(Test988.java:295)
- at art.Test988.run(Test988.java:265)
+ at art.Test988.iter_fibonacci(Test988.java:209)
+ at art.Test988$IterOp.applyAsInt(Test988.java:204)
+ at art.Test988.doFibTest(Test988.java:297)
+ at art.Test988.run(Test988.java:267)
at Main.main(Main.java:19)
.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
@@ -243,10 +244,10 @@ fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
......=> private static native java.lang.Object java.lang.Throwable.nativeFillInStackTrace()
......<= private static native java.lang.Object java.lang.Throwable.nativeFillInStackTrace() -> <class [Ljava.lang.Object;: <non-deterministic>>
.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: Bad argument: -19 < 0
- at art.Test988.fibonacci(Test988.java:229)
- at art.Test988$RecurOp.applyAsInt(Test988.java:224)
- at art.Test988.doFibTest(Test988.java:295)
- at art.Test988.run(Test988.java:266)
+ at art.Test988.fibonacci(Test988.java:231)
+ at art.Test988$RecurOp.applyAsInt(Test988.java:226)
+ at art.Test988.doFibTest(Test988.java:297)
+ at art.Test988.run(Test988.java:268)
at Main.main(Main.java:19)
>
....<= public java.lang.Throwable(java.lang.String) -> <null: null>
@@ -263,14 +264,14 @@ fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
- at art.Test988.fibonacci(Test988.java:229)
- at art.Test988$RecurOp.applyAsInt(Test988.java:224)
- at art.Test988.doFibTest(Test988.java:295)
- at art.Test988.run(Test988.java:266)
+ at art.Test988.fibonacci(Test988.java:231)
+ at art.Test988$RecurOp.applyAsInt(Test988.java:226)
+ at art.Test988.doFibTest(Test988.java:297)
+ at art.Test988.run(Test988.java:268)
at Main.main(Main.java:19)
.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
=> public static native java.lang.Thread java.lang.Thread.currentThread()
<= public static native java.lang.Thread java.lang.Thread.currentThread() -> <class java.lang.Thread: <non-deterministic>>
-=> public static native void art.Trace.disableMethodTracing(java.lang.Thread)
+=> public static native void art.Trace.disableTracing(java.lang.Thread)
diff --git a/test/988-method-trace/src/art/Test988.java b/test/988-method-trace/src/art/Test988.java
index 37ff136b6c..6a45c0eaa2 100644
--- a/test/988-method-trace/src/art/Test988.java
+++ b/test/988-method-trace/src/art/Test988.java
@@ -194,7 +194,9 @@ public class Test988 {
}
private static List<Printable> results = new ArrayList<>();
- private static int cnt = 1;
+ // Starts with => enableMethodTracing
+ // .=> enableTracing
+ private static int cnt = 2;
// Iterative version
static final class IterOp implements IntUnaryOperator {
@@ -253,7 +255,7 @@ public class Test988 {
public static void run() throws Exception {
// call this here so it is linked. It doesn't actually do anything here.
loadAllClasses();
- Trace.disableMethodTracing(Thread.currentThread());
+ Trace.disableTracing(Thread.currentThread());
Trace.enableMethodTracing(
Test988.class,
Test988.class.getDeclaredMethod("notifyMethodEntry", Object.class),
@@ -265,7 +267,7 @@ public class Test988 {
doFibTest(-19, new IterOp());
doFibTest(-19, new RecurOp());
// Turn off method tracing so we don't have to deal with print internals.
- Trace.disableMethodTracing(Thread.currentThread());
+ Trace.disableTracing(Thread.currentThread());
printResults();
}
diff --git a/test/988-method-trace/src/art/Trace.java b/test/988-method-trace/src/art/Trace.java
index 3370996df3..9c27c9f69e 100644
--- a/test/988-method-trace/src/art/Trace.java
+++ b/test/988-method-trace/src/art/Trace.java
@@ -16,10 +16,34 @@
package art;
+import java.lang.reflect.Field;
import java.lang.reflect.Method;
public class Trace {
- public static native void enableMethodTracing(
- Class<?> methodClass, Method entryMethod, Method exitMethod, Thread thr);
- public static native void disableMethodTracing(Thread thr);
+ public static native void enableTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Thread thr);
+ public static native void disableTracing(Thread thr);
+
+ public static void enableFieldTracing(Class<?> methodClass,
+ Method fieldAccess,
+ Method fieldModify,
+ Thread thr) {
+ enableTracing(methodClass, null, null, fieldAccess, fieldModify, thr);
+ }
+
+ public static void enableMethodTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Thread thr) {
+ enableTracing(methodClass, entryMethod, exitMethod, null, null, thr);
+ }
+
+ public static native void watchFieldAccess(Field f);
+ public static native void watchFieldModification(Field f);
+ public static native void watchAllFieldAccesses();
+ public static native void watchAllFieldModifications();
}
diff --git a/test/989-method-trace-throw/src/art/Test989.java b/test/989-method-trace-throw/src/art/Test989.java
index 18421bd08b..4feb29cf9d 100644
--- a/test/989-method-trace-throw/src/art/Test989.java
+++ b/test/989-method-trace-throw/src/art/Test989.java
@@ -56,7 +56,7 @@ public class Test989 {
// to an infinite loop on the RI.
private static void disableTraceForRI() {
if (!System.getProperty("java.vm.name").equals("Dalvik")) {
- Trace.disableMethodTracing(Thread.currentThread());
+ Trace.disableTracing(Thread.currentThread());
}
}
@@ -158,7 +158,7 @@ public class Test989 {
private static void maybeDisableTracing() throws Exception {
if (DISABLE_TRACING) {
- Trace.disableMethodTracing(Thread.currentThread());
+ Trace.disableTracing(Thread.currentThread());
}
}
@@ -179,7 +179,7 @@ public class Test989 {
}
private static void setEntry(MethodTracer type) throws Exception {
if (DISABLE_TRACING || !System.getProperty("java.vm.name").equals("Dalvik")) {
- Trace.disableMethodTracing(Thread.currentThread());
+ Trace.disableTracing(Thread.currentThread());
setupTracing();
}
currentTracer = type;
@@ -274,7 +274,7 @@ public class Test989 {
maybeDisableTracing();
System.out.println("Finished!");
- Trace.disableMethodTracing(Thread.currentThread());
+ Trace.disableTracing(Thread.currentThread());
}
private static final class throwAClass implements MyRunnable {
diff --git a/test/989-method-trace-throw/src/art/Trace.java b/test/989-method-trace-throw/src/art/Trace.java
index 3370996df3..9c27c9f69e 100644
--- a/test/989-method-trace-throw/src/art/Trace.java
+++ b/test/989-method-trace-throw/src/art/Trace.java
@@ -16,10 +16,34 @@
package art;
+import java.lang.reflect.Field;
import java.lang.reflect.Method;
public class Trace {
- public static native void enableMethodTracing(
- Class<?> methodClass, Method entryMethod, Method exitMethod, Thread thr);
- public static native void disableMethodTracing(Thread thr);
+ public static native void enableTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Thread thr);
+ public static native void disableTracing(Thread thr);
+
+ public static void enableFieldTracing(Class<?> methodClass,
+ Method fieldAccess,
+ Method fieldModify,
+ Thread thr) {
+ enableTracing(methodClass, null, null, fieldAccess, fieldModify, thr);
+ }
+
+ public static void enableMethodTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Thread thr) {
+ enableTracing(methodClass, entryMethod, exitMethod, null, null, thr);
+ }
+
+ public static native void watchFieldAccess(Field f);
+ public static native void watchFieldModification(Field f);
+ public static native void watchAllFieldAccesses();
+ public static native void watchAllFieldModifications();
}
diff --git a/test/990-field-trace/expected.txt b/test/990-field-trace/expected.txt
new file mode 100644
index 0000000000..cceb008383
--- /dev/null
+++ b/test/990-field-trace/expected.txt
@@ -0,0 +1,52 @@
+MODIFY of int art.Test990$TestClass1.xyz on object of type: class art.Test990$TestClass1 in method public art.Test990$TestClass1(int,java.lang.Object). New value: 1 (type: class java.lang.Integer)
+MODIFY of java.lang.Object art.Test990$TestClass1.abc on object of type: class art.Test990$TestClass1 in method public art.Test990$TestClass1(int,java.lang.Object). New value: tc1 (type: class java.lang.String)
+MODIFY of static long art.Test990$TestClass2.TOTAL on object of type: null in method art.Test990$TestClass2(). New value: 0 (type: class java.lang.Long)
+MODIFY of int art.Test990$TestClass1.xyz on object of type: class art.Test990$TestClass2 in method public art.Test990$TestClass1(int,java.lang.Object). New value: 1337 (type: class java.lang.Integer)
+MODIFY of java.lang.Object art.Test990$TestClass1.abc on object of type: class art.Test990$TestClass2 in method public art.Test990$TestClass1(int,java.lang.Object). New value: TESTING (type: class java.lang.String)
+MODIFY of long art.Test990$TestClass2.baz on object of type: class art.Test990$TestClass2 in method public art.Test990$TestClass2(long). New value: 2 (type: class java.lang.Long)
+MODIFY of int art.Test990$TestClass1.xyz on object of type: class art.Test990$TestClass1 in method public art.Test990$TestClass1(int,java.lang.Object). New value: 3 (type: class java.lang.Integer)
+MODIFY of java.lang.Object art.Test990$TestClass1.abc on object of type: class art.Test990$TestClass1 in method public art.Test990$TestClass1(int,java.lang.Object). New value: TestClass1 { abc: "tc1", xyz: 1, foobar: 0 } (type: class art.Test990$TestClass1)
+MODIFY of int art.Test990$TestClass1.xyz on object of type: class art.Test990$TestClass1 in method public art.Test990$TestClass1(int,java.lang.Object). New value: 4 (type: class java.lang.Integer)
+MODIFY of java.lang.Object art.Test990$TestClass1.abc on object of type: class art.Test990$TestClass1 in method public art.Test990$TestClass1(int,java.lang.Object). New value: TestClass1 { abc: "TESTING", xyz: 1337, foobar: 0 } (type: class art.Test990$TestClass2)
+ACCESS of int art.Test990$TestClass1.xyz on object of type class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int)
+ACCESS of int art.Test990$TestClass1.foobar on object of type class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int)
+MODIFY of int art.Test990$TestClass1.foobar on object of type: class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int). New value: 1 (type: class java.lang.Integer)
+ACCESS of int art.Test990$TestClass1.xyz on object of type class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int)
+ACCESS of int art.Test990$TestClass1.foobar on object of type class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int)
+MODIFY of int art.Test990$TestClass1.foobar on object of type: class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int). New value: 2 (type: class java.lang.Integer)
+ACCESS of static long art.Test990$TestClass2.TOTAL on object of type null in method public void art.Test990$TestClass2.tweak(int)
+MODIFY of static long art.Test990$TestClass2.TOTAL on object of type: null in method public void art.Test990$TestClass2.tweak(int). New value: 1 (type: class java.lang.Long)
+ACCESS of int art.Test990$TestClass1.xyz on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass1.tweak(int)
+ACCESS of long art.Test990$TestClass2.baz on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass2.tweak(int)
+MODIFY of long art.Test990$TestClass2.baz on object of type: class art.Test990$TestClass2 in method public void art.Test990$TestClass2.tweak(int). New value: 3 (type: class java.lang.Long)
+ACCESS of static long art.Test990$TestClass2.TOTAL on object of type null in method public void art.Test990$TestClass2.tweak(int)
+MODIFY of static long art.Test990$TestClass2.TOTAL on object of type: null in method public void art.Test990$TestClass2.tweak(int). New value: 2 (type: class java.lang.Long)
+ACCESS of int art.Test990$TestClass1.xyz on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass1.tweak(int)
+ACCESS of int art.Test990$TestClass1.foobar on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass1.tweak(int)
+MODIFY of int art.Test990$TestClass1.foobar on object of type: class art.Test990$TestClass2 in method public void art.Test990$TestClass1.tweak(int). New value: 1 (type: class java.lang.Integer)
+ACCESS of long art.Test990$TestClass2.baz on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass2.tweak(int)
+MODIFY of long art.Test990$TestClass2.baz on object of type: class art.Test990$TestClass2 in method public void art.Test990$TestClass2.tweak(int). New value: 4 (type: class java.lang.Long)
+ACCESS of static long art.Test990$TestClass2.TOTAL on object of type null in method public void art.Test990$TestClass2.tweak(int)
+MODIFY of static long art.Test990$TestClass2.TOTAL on object of type: null in method public void art.Test990$TestClass2.tweak(int). New value: 3 (type: class java.lang.Long)
+ACCESS of int art.Test990$TestClass1.xyz on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass1.tweak(int)
+ACCESS of long art.Test990$TestClass2.baz on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass2.tweak(int)
+MODIFY of long art.Test990$TestClass2.baz on object of type: class art.Test990$TestClass2 in method public void art.Test990$TestClass2.tweak(int). New value: 5 (type: class java.lang.Long)
+ACCESS of static long art.Test990$TestClass2.TOTAL on object of type null in method public void art.Test990$TestClass2.tweak(int)
+MODIFY of static long art.Test990$TestClass2.TOTAL on object of type: null in method public void art.Test990$TestClass2.tweak(int). New value: 4 (type: class java.lang.Long)
+ACCESS of int art.Test990$TestClass1.xyz on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass1.tweak(int)
+ACCESS of long art.Test990$TestClass2.baz on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass2.tweak(int)
+MODIFY of long art.Test990$TestClass2.baz on object of type: class art.Test990$TestClass2 in method public void art.Test990$TestClass2.tweak(int). New value: 6 (type: class java.lang.Long)
+ACCESS of int art.Test990$TestClass1.foobar on object of type class art.Test990$TestClass1 in method public static void art.Test990.run() throws java.lang.Exception
+ACCESS of int art.Test990$TestClass1.xyz on object of type class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int)
+ACCESS of long art.Test990$TestClass2.baz on object of type class art.Test990$TestClass2 in method public static void art.Test990.run() throws java.lang.Exception
+ACCESS of int art.Test990$TestClass1.xyz on object of type class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int)
+ACCESS of static long art.Test990$TestClass2.TOTAL on object of type null in method public static void art.Test990.run() throws java.lang.Exception
+ACCESS of int art.Test990$TestClass1.xyz on object of type class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int)
+ACCESS of int art.Test990$TestClass1.foobar on object of type class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int)
+MODIFY of int art.Test990$TestClass1.foobar on object of type: class art.Test990$TestClass1 in method public void art.Test990$TestClass1.tweak(int). New value: 1 (type: class java.lang.Integer)
+ACCESS of static long art.Test990$TestClass2.TOTAL on object of type null in method public static void art.Test990.run() throws java.lang.Exception
+ACCESS of static long art.Test990$TestClass2.TOTAL on object of type null in method public void art.Test990$TestClass2.tweak(int)
+MODIFY of static long art.Test990$TestClass2.TOTAL on object of type: null in method public void art.Test990$TestClass2.tweak(int). New value: 5 (type: class java.lang.Long)
+ACCESS of int art.Test990$TestClass1.xyz on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass1.tweak(int)
+ACCESS of long art.Test990$TestClass2.baz on object of type class art.Test990$TestClass2 in method public void art.Test990$TestClass2.tweak(int)
+MODIFY of long art.Test990$TestClass2.baz on object of type: class art.Test990$TestClass2 in method public void art.Test990$TestClass2.tweak(int). New value: 7 (type: class java.lang.Long)
diff --git a/test/990-field-trace/info.txt b/test/990-field-trace/info.txt
new file mode 100644
index 0000000000..67d164e3bf
--- /dev/null
+++ b/test/990-field-trace/info.txt
@@ -0,0 +1 @@
+Tests field access and modification watches in JVMTI
diff --git a/test/048-reflect-v8/build b/test/990-field-trace/run
index 3552b5c46c..51875a7e86 100644..100755
--- a/test/048-reflect-v8/build
+++ b/test/990-field-trace/run
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2016 The Android Open Source Project
+# Copyright 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,11 +14,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Make us exit on a failure.
-set -e
-
-# Hard-wired use of experimental jack.
-# TODO: fix this temporary work-around for lambdas, see b/19467889
-export USE_JACK=true
-
-./default-build "$@" --experimental default-methods
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti
diff --git a/test/990-field-trace/src/Main.java b/test/990-field-trace/src/Main.java
new file mode 100644
index 0000000000..cb14f5d511
--- /dev/null
+++ b/test/990-field-trace/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test990.run();
+ }
+}
diff --git a/test/990-field-trace/src/art/Test990.java b/test/990-field-trace/src/art/Test990.java
new file mode 100644
index 0000000000..d766876412
--- /dev/null
+++ b/test/990-field-trace/src/art/Test990.java
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Vector;
+import java.util.function.Function;
+
+public class Test990 {
+
+ // Fields of these classes are accessed/modified differently in the RI and ART so we ignore them.
+ static Collection<Class<?>> IGNORED_CLASSES = Arrays.asList(new Class<?>[] {
+ ClassLoader.class,
+ Vector.class,
+ });
+
+ static interface Printable { public void Print(); }
+
+ static final class FieldWrite implements Printable {
+ private Executable method;
+ private Object target;
+ private Field f;
+ private String initialValue;
+ private Class<?> initialValueType;
+
+ public FieldWrite(Executable method, Object target, Field f, Object v) {
+ this.method = method;
+ this.target = target;
+ this.f = f;
+ this.initialValue = genericToString(v);
+ this.initialValueType = v != null ? v.getClass() : null;
+ }
+
+ @Override
+ public void Print() {
+ System.out.println("MODIFY of " + f + " on object of" +
+ " type: " + (target == null ? null : target.getClass()) +
+ " in method " + method +
+ ". New value: " + initialValue + " (type: " + initialValueType + ")");
+ }
+ }
+
+ static final class FieldRead implements Printable {
+ private Executable method;
+ private Object target;
+ private Field f;
+
+ public FieldRead(Executable method, Object target, Field f) {
+ this.method = method;
+ this.target = target;
+ this.f = f;
+ }
+
+ @Override
+ public void Print() {
+ System.out.println("ACCESS of " + f + " on object of" +
+ " type " + (target == null ? null : target.getClass()) +
+ " in method " + method);
+ }
+ }
+
+ private static String genericToString(Object val) {
+ if (val == null) {
+ return "null";
+ } else if (val.getClass().isArray()) {
+ return arrayToString(val);
+ } else if (val instanceof Throwable) {
+ StringWriter w = new StringWriter();
+ ((Throwable) val).printStackTrace(new PrintWriter(w));
+ return w.toString();
+ } else {
+ return val.toString();
+ }
+ }
+
+ private static String charArrayToString(char[] src) {
+ String[] res = new String[src.length];
+ for (int i = 0; i < src.length; i++) {
+ if (Character.isISOControl(src[i])) {
+ res[i] = Character.getName(src[i]);
+ } else {
+ res[i] = Character.toString(src[i]);
+ }
+ }
+ return Arrays.toString(res);
+ }
+
+ private static String arrayToString(Object val) {
+ Class<?> klass = val.getClass();
+ if ((new Object[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString(
+ Arrays.stream((Object[])val).map(new Function<Object, String>() {
+ public String apply(Object o) {
+ return genericToString(o);
+ }
+ }).toArray());
+ } else if ((new byte[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((byte[])val);
+ } else if ((new char[0]).getClass().isAssignableFrom(klass)) {
+ return charArrayToString((char[])val);
+ } else if ((new short[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((short[])val);
+ } else if ((new int[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((int[])val);
+ } else if ((new long[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((long[])val);
+ } else if ((new float[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((float[])val);
+ } else if ((new double[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((double[])val);
+ } else {
+ throw new Error("Unknown type " + klass);
+ }
+ }
+
+ private static List<Printable> results = new ArrayList<>();
+
+ public static void notifyFieldModify(
+ Executable m, long location, Class<?> f_klass, Object target, Field f, Object value) {
+ if (IGNORED_CLASSES.contains(f_klass)) {
+ return;
+ }
+ results.add(new FieldWrite(m, target, f, value));
+ }
+
+ public static void notifyFieldAccess(
+ Executable m, long location, Class<?> f_klass, Object target, Field f) {
+ if (IGNORED_CLASSES.contains(f_klass)) {
+ return;
+ }
+ results.add(new FieldRead(m, target, f));
+ }
+
+ static class TestClass1 {
+ Object abc;
+ int xyz;
+ int foobar;
+ public TestClass1(int xyz, Object abc) {
+ this.xyz = xyz;
+ this.abc = abc;
+ }
+
+ public void tweak(int def) {
+ if (def == xyz) {
+ foobar++;
+ }
+ }
+ public String toString() {
+ return "TestClass1 { abc: \"" + genericToString(abc) + "\", xyz: " + xyz
+ + ", foobar: " + foobar + " }";
+ }
+ }
+
+ static class TestClass2 extends TestClass1 {
+ static long TOTAL = 0;
+ long baz;
+ public TestClass2(long baz) {
+ super(1337, "TESTING");
+ this.baz = baz;
+ }
+
+ public void tweak(int def) {
+ TOTAL++;
+ super.tweak(def);
+ baz++;
+ }
+
+ public String toString() {
+ return "TestClass2 { super: \"%s\", TOTAL: %d, baz: %d }".format(
+ super.toString(), TOTAL, baz);
+ }
+ }
+
+
+ public static void run() throws Exception {
+ Trace.disableTracing(Thread.currentThread());
+ Trace.enableFieldTracing(
+ Test990.class,
+ Test990.class.getDeclaredMethod("notifyFieldAccess",
+ Executable.class, Long.TYPE, Class.class, Object.class, Field.class),
+ Test990.class.getDeclaredMethod("notifyFieldModify",
+ Executable.class, Long.TYPE, Class.class, Object.class, Field.class, Object.class),
+ Thread.currentThread());
+ Trace.watchAllFieldAccesses();
+ Trace.watchAllFieldModifications();
+ TestClass1 t1 = new TestClass1(1, "tc1");
+ TestClass1 t2 = new TestClass2(2);
+ TestClass1 t3 = new TestClass1(3, t1);
+ TestClass1 t4 = new TestClass1(4, t2);
+ t1.tweak(1);
+ t1.tweak(1);
+ t2.tweak(12);
+ t2.tweak(1337);
+ t2.tweak(12);
+ t2.tweak(1338);
+ t1.tweak(t3.foobar);
+ t4.tweak((int)((TestClass2)t2).baz);
+ t4.tweak((int)TestClass2.TOTAL);
+ t2.tweak((int)TestClass2.TOTAL);
+
+ // Turn off tracing so we don't have to deal with print internals.
+ Trace.disableTracing(Thread.currentThread());
+ printResults();
+ }
+
+ public static void printResults() {
+ for (Printable p : results) {
+ p.Print();
+ }
+ }
+}
diff --git a/test/990-field-trace/src/art/Trace.java b/test/990-field-trace/src/art/Trace.java
new file mode 100644
index 0000000000..9c27c9f69e
--- /dev/null
+++ b/test/990-field-trace/src/art/Trace.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+
+public class Trace {
+ public static native void enableTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Thread thr);
+ public static native void disableTracing(Thread thr);
+
+ public static void enableFieldTracing(Class<?> methodClass,
+ Method fieldAccess,
+ Method fieldModify,
+ Thread thr) {
+ enableTracing(methodClass, null, null, fieldAccess, fieldModify, thr);
+ }
+
+ public static void enableMethodTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Thread thr) {
+ enableTracing(methodClass, entryMethod, exitMethod, null, null, thr);
+ }
+
+ public static native void watchFieldAccess(Field f);
+ public static native void watchFieldModification(Field f);
+ public static native void watchAllFieldAccesses();
+ public static native void watchAllFieldModifications();
+}
diff --git a/test/991-field-trace-2/expected.txt b/test/991-field-trace-2/expected.txt
new file mode 100644
index 0000000000..8da8ffdae6
--- /dev/null
+++ b/test/991-field-trace-2/expected.txt
@@ -0,0 +1,118 @@
+Test is class art.Test991$DoNothingFieldTracer & class art.Test991$JavaReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$DoNothingFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1)
+normal read: xyz = 0
+FieldTracer: class art.Test991$DoNothingFieldTracer
+ MODIFY of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1). New value: 1 (type: class java.lang.Integer)
+Final state: xyz = 1
+Test is class art.Test991$ThrowReadFieldTracer & class art.Test991$JavaReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$ThrowReadFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1)
+Caught error. art.Test991$TestError: Throwing error during access
+Final state: xyz = 0
+Test is class art.Test991$ThrowWriteFieldTracer & class art.Test991$JavaReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$ThrowWriteFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1)
+normal read: xyz = 0
+FieldTracer: class art.Test991$ThrowWriteFieldTracer
+ MODIFY of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1). New value: 1 (type: class java.lang.Integer)
+Caught error. art.Test991$TestError: Throwing error during modify
+Final state: xyz = 0
+Test is class art.Test991$ModifyDuringReadFieldTracer & class art.Test991$JavaReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$ModifyDuringReadFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1)
+normal read: xyz = 20
+FieldTracer: class art.Test991$ModifyDuringReadFieldTracer
+ MODIFY of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1). New value: 21 (type: class java.lang.Integer)
+Final state: xyz = 21
+Test is class art.Test991$ModifyDuringWriteFieldTracer & class art.Test991$JavaReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$ModifyDuringWriteFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1)
+normal read: xyz = 0
+FieldTracer: class art.Test991$ModifyDuringWriteFieldTracer
+ MODIFY of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1). New value: 1 (type: class java.lang.Integer)
+Final state: xyz = 1
+Test is class art.Test991$ModifyDuringReadAndWriteFieldTracer & class art.Test991$JavaReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$ModifyDuringReadAndWriteFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1)
+normal read: xyz = 10
+FieldTracer: class art.Test991$ModifyDuringReadAndWriteFieldTracer
+ MODIFY of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public void art.Test991$JavaReadWrite.accept(art.Test991$TestClass1). New value: 11 (type: class java.lang.Integer)
+Final state: xyz = 11
+Test is class art.Test991$DoNothingFieldTracer & class art.Test991$ReflectiveReadWrite
+Initial state: xyz = 0
+reflective read: xyz = 0
+Final state: xyz = 1
+Test is class art.Test991$ThrowReadFieldTracer & class art.Test991$ReflectiveReadWrite
+Initial state: xyz = 0
+reflective read: xyz = 0
+Final state: xyz = 1
+Test is class art.Test991$ThrowWriteFieldTracer & class art.Test991$ReflectiveReadWrite
+Initial state: xyz = 0
+reflective read: xyz = 0
+Final state: xyz = 1
+Test is class art.Test991$ModifyDuringReadFieldTracer & class art.Test991$ReflectiveReadWrite
+Initial state: xyz = 0
+reflective read: xyz = 0
+Final state: xyz = 1
+Test is class art.Test991$ModifyDuringWriteFieldTracer & class art.Test991$ReflectiveReadWrite
+Initial state: xyz = 0
+reflective read: xyz = 0
+Final state: xyz = 1
+Test is class art.Test991$ModifyDuringReadAndWriteFieldTracer & class art.Test991$ReflectiveReadWrite
+Initial state: xyz = 0
+reflective read: xyz = 0
+Final state: xyz = 1
+Test is class art.Test991$DoNothingFieldTracer & class art.Test991$NativeReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$DoNothingFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1)
+native read: xyz = 0
+FieldTracer: class art.Test991$DoNothingFieldTracer
+ MODIFY of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1). New value: 1 (type: class java.lang.Integer)
+Final state: xyz = 1
+Test is class art.Test991$ThrowReadFieldTracer & class art.Test991$NativeReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$ThrowReadFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1)
+Caught error. art.Test991$TestError: Throwing error during access
+Final state: xyz = 0
+Test is class art.Test991$ThrowWriteFieldTracer & class art.Test991$NativeReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$ThrowWriteFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1)
+native read: xyz = 0
+FieldTracer: class art.Test991$ThrowWriteFieldTracer
+ MODIFY of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1). New value: 1 (type: class java.lang.Integer)
+Caught error. art.Test991$TestError: Throwing error during modify
+Final state: xyz = 1
+Test is class art.Test991$ModifyDuringReadFieldTracer & class art.Test991$NativeReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$ModifyDuringReadFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1)
+native read: xyz = 20
+FieldTracer: class art.Test991$ModifyDuringReadFieldTracer
+ MODIFY of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1). New value: 21 (type: class java.lang.Integer)
+Final state: xyz = 21
+Test is class art.Test991$ModifyDuringWriteFieldTracer & class art.Test991$NativeReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$ModifyDuringWriteFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1)
+native read: xyz = 0
+FieldTracer: class art.Test991$ModifyDuringWriteFieldTracer
+ MODIFY of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1). New value: 1 (type: class java.lang.Integer)
+Final state: xyz = 1
+Test is class art.Test991$ModifyDuringReadAndWriteFieldTracer & class art.Test991$NativeReadWrite
+Initial state: xyz = 0
+FieldTracer: class art.Test991$ModifyDuringReadAndWriteFieldTracer
+ ACCESS of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1)
+native read: xyz = 10
+FieldTracer: class art.Test991$ModifyDuringReadAndWriteFieldTracer
+ MODIFY of public int art.Test991$TestClass1.xyz on object of type: class art.Test991$TestClass1 in method public static native void art.Test991.doNativeReadWrite(art.Test991$TestClass1). New value: 11 (type: class java.lang.Integer)
+Final state: xyz = 11
diff --git a/test/991-field-trace-2/field_trace.cc b/test/991-field-trace-2/field_trace.cc
new file mode 100644
index 0000000000..823f9fd9c8
--- /dev/null
+++ b/test/991-field-trace-2/field_trace.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include "android-base/macros.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test991FieldTrace {
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test991_doNativeReadWrite(
+ JNIEnv* env, jclass klass, jobject testclass) {
+ CHECK(testclass != nullptr);
+ ScopedLocalRef<jclass> testclass_klass(env, env->GetObjectClass(testclass));
+ jmethodID notifyMethod = env->GetStaticMethodID(klass, "doPrintNativeNotification", "(I)V");
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ jfieldID xyz_field = env->GetFieldID(testclass_klass.get(), "xyz", "I");
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ jint val = env->GetIntField(testclass, xyz_field);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ env->CallStaticVoidMethod(klass, notifyMethod, val);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ val += 1;
+ env->SetIntField(testclass, xyz_field, val);
+}
+
+} // namespace Test991FieldTrace
+} // namespace art
+
diff --git a/test/991-field-trace-2/info.txt b/test/991-field-trace-2/info.txt
new file mode 100644
index 0000000000..c2a1b68a56
--- /dev/null
+++ b/test/991-field-trace-2/info.txt
@@ -0,0 +1,5 @@
+Tests field access and modification watches in JVMTI.
+
+This test specifically examines how the runtime responds to exceptions occurring
+while handling these events. It also verifies the situations in which these
+events are sent.
diff --git a/test/563-checker-invoke-super/build b/test/991-field-trace-2/run
index 32f84ef5ab..51875a7e86 100755
--- a/test/563-checker-invoke-super/build
+++ b/test/991-field-trace-2/run
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2016 The Android Open Source Project
+# Copyright 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,12 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-# Make us exit on a failure.
-#
-set -e
-
-# Hard-wired use of experimental jack.
-# TODO: fix this temporary work-around for lambdas, see b/19467889
-export USE_JACK=true
-./default-build "$@" --experimental default-methods
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti
diff --git a/test/991-field-trace-2/src/Main.java b/test/991-field-trace-2/src/Main.java
new file mode 100644
index 0000000000..d945a5c183
--- /dev/null
+++ b/test/991-field-trace-2/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test991.run();
+ }
+}
diff --git a/test/991-field-trace-2/src/art/Test991.java b/test/991-field-trace-2/src/art/Test991.java
new file mode 100644
index 0000000000..644f4e10ed
--- /dev/null
+++ b/test/991-field-trace-2/src/art/Test991.java
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.util.Arrays;
+import java.util.List;
+import java.util.function.Consumer;
+
+public class Test991 {
+ static List<Field> WATCH_FIELDS = Arrays.asList(TestClass1.class.getDeclaredFields());
+
+ static FieldTracer TRACE = null;
+
+ static abstract class FieldTracer {
+ public final void notifyFieldAccess(
+ Executable method, long location, Class<?> f_klass, Object target, Field f) {
+ System.out.println("FieldTracer: " + this.getClass());
+ System.out.println("\tACCESS of " + f + " on object of" +
+ " type: " + (target == null ? null : target.getClass()) +
+ " in method " + method);
+ handleFieldAccess(method, location, f_klass, target, f);
+ }
+
+ public final void notifyFieldModify(
+ Executable method, long location, Class<?> f_klass, Object target, Field f, Object value) {
+ System.out.println("FieldTracer: " + this.getClass());
+ System.out.println("\tMODIFY of " + f + " on object of" +
+ " type: " + (target == null ? null : target.getClass()) +
+ " in method " + method +
+ ". New value: " + value + " (type: " + value.getClass() + ")");
+ handleFieldModify(method, location, f_klass, target, f, value);
+ }
+
+ public void handleFieldAccess(Executable m, long l, Class<?> fk, Object t, Field f) {}
+ public void handleFieldModify(Executable m, long l, Class<?> fk, Object t, Field f, Object v) {}
+ }
+
+ private static class TestError extends Error {
+ private static final long serialVersionUID = 0;
+ public TestError(String s) { super(s); }
+ }
+ static class DoNothingFieldTracer extends FieldTracer {}
+ static class ThrowReadFieldTracer extends FieldTracer {
+ @Override
+ public void handleFieldAccess(Executable m, long l, Class<?> fk, Object t, Field f) {
+ throw new TestError("Throwing error during access");
+ }
+ }
+ static class ThrowWriteFieldTracer extends FieldTracer {
+ @Override
+ public void handleFieldModify(Executable m, long l, Class<?> fk, Object t, Field f, Object v) {
+ throw new TestError("Throwing error during modify");
+ }
+ }
+ static class ModifyDuringReadAndWriteFieldTracer extends FieldTracer {
+ @Override
+ public void handleFieldModify(Executable m, long l, Class<?> fk, Object t, Field f, Object v) {
+ // NB This is only safe because the agent doesn't send recursive access/modification events up
+ // to the java layer here.
+ ((TestClass1)t).xyz += 100;
+ }
+ @Override
+ public void handleFieldAccess(Executable m, long l, Class<?> fk, Object t, Field f) {
+ // NB This is only safe because the agent doesn't send recursive access/modification events up
+ // to the java layer here.
+ ((TestClass1)t).xyz += 10;
+ }
+ }
+
+ static class ModifyDuringWriteFieldTracer extends FieldTracer {
+ @Override
+ public void handleFieldModify(Executable m, long l, Class<?> fk, Object t, Field f, Object v) {
+ // NB This is only safe because the agent doesn't send recursive access/modification events up
+ // to the java layer here.
+ ((TestClass1)t).xyz += 200;
+ }
+ }
+
+ static class ModifyDuringReadFieldTracer extends FieldTracer {
+ @Override
+ public void handleFieldAccess(Executable m, long l, Class<?> fk, Object t, Field f) {
+ // NB This is only safe because the agent doesn't send recursive access/modification events up
+ // to the java layer here.
+ ((TestClass1)t).xyz += 20;
+ }
+ }
+
+ public static void notifyFieldModify(
+ Executable m, long location, Class<?> f_klass, Object target, Field f, Object value) {
+ if (TRACE != null) {
+ TRACE.notifyFieldModify(m, location, f_klass, target, f, value);
+ }
+ }
+
+ public static void notifyFieldAccess(
+ Executable m, long location, Class<?> f_klass, Object target, Field f) {
+ if (TRACE != null) {
+ TRACE.notifyFieldAccess(m, location, f_klass, target, f);
+ }
+ }
+
+ public static class TestClass1 {
+ public int xyz;
+ public TestClass1(int xyz) {
+ this.xyz = xyz;
+ }
+ }
+
+ public static int readFieldUntraced(TestClass1 target) {
+ FieldTracer tmp = TRACE;
+ TRACE = null;
+ int res = target.xyz;
+ TRACE = tmp;
+ return res;
+ }
+
+ public static class JavaReadWrite implements Consumer<TestClass1> {
+ public void accept(TestClass1 t1) {
+ int val = t1.xyz;
+ System.out.println("normal read: xyz = " + val);
+ t1.xyz = val + 1;
+ }
+ }
+
+ public static class ReflectiveReadWrite implements Consumer<TestClass1> {
+ public void accept(TestClass1 t1) {
+ try {
+ Field f = t1.getClass().getDeclaredField("xyz");
+ int val = f.getInt(t1);
+ System.out.println("reflective read: xyz = " + val);
+ f.setInt(t1, val + 1);
+ } catch (IllegalAccessException iae) {
+ throw new InternalError("Could not set field xyz", iae);
+ } catch (NoSuchFieldException nsfe) {
+ throw new InternalError("Could not find field xyz", nsfe);
+ }
+ }
+ }
+
+ public static class NativeReadWrite implements Consumer<TestClass1> {
+ public void accept(TestClass1 t1) {
+ doNativeReadWrite(t1);
+ }
+ }
+
+ public static TestClass1 createTestClassNonTraced() {
+ FieldTracer tmp = TRACE;
+ TRACE = null;
+ TestClass1 n = new TestClass1(0);
+ TRACE = tmp;
+ return n;
+ }
+
+ public static void run() throws Exception {
+ Trace.disableTracing(Thread.currentThread());
+ Trace.enableFieldTracing(
+ Test991.class,
+ Test991.class.getDeclaredMethod("notifyFieldAccess",
+ Executable.class, Long.TYPE, Class.class, Object.class, Field.class),
+ Test991.class.getDeclaredMethod("notifyFieldModify",
+ Executable.class, Long.TYPE, Class.class, Object.class, Field.class, Object.class),
+ Thread.currentThread());
+ for (Field f : WATCH_FIELDS) {
+ Trace.watchFieldAccess(f);
+ Trace.watchFieldModification(f);
+ }
+ FieldTracer[] tracers = new FieldTracer[] {
+ new DoNothingFieldTracer(),
+ new ThrowReadFieldTracer(),
+ new ThrowWriteFieldTracer(),
+ new ModifyDuringReadFieldTracer(),
+ new ModifyDuringWriteFieldTracer(),
+ new ModifyDuringReadAndWriteFieldTracer(),
+ };
+ Consumer<TestClass1>[] field_modification = new Consumer[] {
+ new JavaReadWrite(),
+ new ReflectiveReadWrite(),
+ new NativeReadWrite(),
+ };
+ for (Consumer<TestClass1> c : field_modification) {
+ for (FieldTracer trace : tracers) {
+ System.out.println("Test is " + trace.getClass() + " & " + c.getClass());
+ TestClass1 t1 = createTestClassNonTraced();
+ TRACE = trace;
+ System.out.println("Initial state: xyz = " + readFieldUntraced(t1));
+ try {
+ c.accept(t1);
+ } catch (TestError e) {
+ System.out.println("Caught error. " + e);
+ } finally {
+ System.out.println("Final state: xyz = " + readFieldUntraced(t1));
+ }
+ }
+ }
+ Trace.disableTracing(Thread.currentThread());
+ }
+
+ public static native void doNativeReadWrite(TestClass1 t1);
+
+ public static void doPrintNativeNotification(int val) {
+ System.out.println("native read: xyz = " + val);
+ }
+}
diff --git a/test/991-field-trace-2/src/art/Trace.java b/test/991-field-trace-2/src/art/Trace.java
new file mode 100644
index 0000000000..9c27c9f69e
--- /dev/null
+++ b/test/991-field-trace-2/src/art/Trace.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+
+public class Trace {
+ public static native void enableTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Thread thr);
+ public static native void disableTracing(Thread thr);
+
+ public static void enableFieldTracing(Class<?> methodClass,
+ Method fieldAccess,
+ Method fieldModify,
+ Thread thr) {
+ enableTracing(methodClass, null, null, fieldAccess, fieldModify, thr);
+ }
+
+ public static void enableMethodTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Thread thr) {
+ enableTracing(methodClass, entryMethod, exitMethod, null, null, thr);
+ }
+
+ public static native void watchFieldAccess(Field f);
+ public static native void watchFieldModification(Field f);
+ public static native void watchAllFieldAccesses();
+ public static native void watchAllFieldModifications();
+}
diff --git a/test/992-source-data/expected.txt b/test/992-source-data/expected.txt
new file mode 100644
index 0000000000..480d8a4fe7
--- /dev/null
+++ b/test/992-source-data/expected.txt
@@ -0,0 +1,10 @@
+class art.Test992 is defined in file "Test992.java"
+class art.Test992$Target1 is defined in file "Test992.java"
+class art.Test2 is defined in file "Test2.java"
+int does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+class java.lang.Integer is defined in file "Integer.java"
+class java.lang.Object is defined in file "Object.java"
+interface java.lang.Runnable is defined in file "Runnable.java"
+class [Ljava.lang.Object; does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+class [I does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+null does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_INVALID_CLASS
diff --git a/test/992-source-data/info.txt b/test/992-source-data/info.txt
new file mode 100644
index 0000000000..5d487a4bde
--- /dev/null
+++ b/test/992-source-data/info.txt
@@ -0,0 +1 @@
+Tests that we can get the source file of a class from JVMTI.
diff --git a/test/146-bad-interface/build b/test/992-source-data/run
index 0dd8573f54..e92b873956 100755
--- a/test/146-bad-interface/build
+++ b/test/992-source-data/run
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2015 The Android Open Source Project
+# Copyright 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,14 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# make us exit on a failure
-set -e
-
-if [[ $@ != *"--jvm"* ]]; then
- # Don't do anything with jvm
- # Hard-wired use of experimental jack.
- # TODO: fix this temporary work-around for default-methods, see b/19467889
- export USE_JACK=true
-fi
-
-./default-build "$@" --experimental default-methods
+./default-run "$@" --jvmti
diff --git a/test/992-source-data/source_file.cc b/test/992-source-data/source_file.cc
new file mode 100644
index 0000000000..3e8989e403
--- /dev/null
+++ b/test/992-source-data/source_file.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+#include <memory>
+#include <stdio.h>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test992SourceFile {
+
+extern "C" JNIEXPORT
+jstring JNICALL Java_art_Test992_getSourceFileName(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jclass target) {
+ char* file = nullptr;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetSourceFileName(target, &file))) {
+ return nullptr;
+ }
+ jstring ret = env->NewStringUTF(file);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(file));
+ return ret;
+}
+
+} // namespace Test992SourceFile
+} // namespace art
+
diff --git a/test/992-source-data/src/Main.java b/test/992-source-data/src/Main.java
new file mode 100644
index 0000000000..31106f41fb
--- /dev/null
+++ b/test/992-source-data/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test992.run();
+ }
+}
diff --git a/test/992-source-data/src/art/Test2.java b/test/992-source-data/src/art/Test2.java
new file mode 100644
index 0000000000..dbb1089c5e
--- /dev/null
+++ b/test/992-source-data/src/art/Test2.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Test2 {}
diff --git a/test/992-source-data/src/art/Test992.java b/test/992-source-data/src/art/Test992.java
new file mode 100644
index 0000000000..db6ea73856
--- /dev/null
+++ b/test/992-source-data/src/art/Test992.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+
+public class Test992 {
+
+ static class Target1 { }
+
+ public static void run() {
+ doTest(Test992.class);
+ doTest(Target1.class);
+ doTest(Test2.class);
+ doTest(Integer.TYPE);
+ doTest(Integer.class);
+ doTest(Object.class);
+ doTest(Runnable.class);
+ doTest(new Object[0].getClass());
+ doTest(new int[0].getClass());
+ doTest(null);
+ }
+
+ public static void doTest(Class<?> k) {
+ try {
+ System.out.println(k + " is defined in file \"" + getSourceFileName(k) + "\"");
+ } catch (Exception e) {
+ System.out.println(k + " does not have a known source file because " + e);
+ }
+ }
+
+ public static native String getSourceFileName(Class<?> k) throws Exception;
+}
diff --git a/test/Android.bp b/test/Android.bp
index 35c3d9c332..9e6ecffe79 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -279,6 +279,8 @@ art_cc_defaults {
"986-native-method-bind/native_bind.cc",
"987-agent-bind/agent_bind.cc",
"989-method-trace-throw/method_trace.cc",
+ "991-field-trace-2/field_trace.cc",
+ "992-source-data/source_file.cc",
],
shared_libs: [
"libbase",
@@ -394,6 +396,7 @@ cc_defaults {
"626-const-class-linking/clear_dex_cache_types.cc",
"642-fp-callees/fp_callees.cc",
"647-jni-get-field-id/get_field_id.cc",
+ "708-jit-cache-churn/jit.cc"
],
shared_libs: [
"libbacktrace",
diff --git a/test/etc/default-build b/test/etc/default-build
index 0508b85529..a88ef924a6 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -60,16 +60,25 @@ else
HAS_SRC_DEX2OAT_UNRESOLVED=false
fi
+# DESUGAR=false run-test... will disable desugar.
+if [[ "$DESUGAR" == false ]]; then
+ USE_DESUGAR=false
+fi
+
# Allow overriding ZIP_COMPRESSION_METHOD with e.g. 'store'
ZIP_COMPRESSION_METHOD="deflate"
# Align every ZIP file made by calling $ZIPALIGN command?
WITH_ZIP_ALIGN=false
ZIP_ALIGN_BYTES="-1"
-DX_FLAGS=""
+DX_FLAGS="--min-sdk-version=24"
+DX_VM_FLAGS=""
SKIP_DX_MERGER="false"
EXPERIMENTAL=""
+BUILD_MODE="target"
+DEV_MODE="no"
+
# The key for default arguments if no experimental things are enabled.
DEFAULT_EXPERIMENT="no-experiment"
@@ -94,12 +103,20 @@ JAVAC_EXPERIMENTAL_ARGS["method-handles"]="-source 1.8 -target 1.8"
JAVAC_EXPERIMENTAL_ARGS[${DEFAULT_EXPERIMENT}]="-source 1.8 -target 1.8"
JAVAC_EXPERIMENTAL_ARGS["agents"]="-source 1.8 -target 1.8"
+declare -A DX_EXPERIMENTAL_ARGS
+DX_EXPERIMENTAL_ARGS["method-handles"]="--min-sdk-version=26"
+
while true; do
if [ "x$1" = "x--dx-option" ]; then
shift
option="$1"
DX_FLAGS="${DX_FLAGS} $option"
shift
+ elif [ "x$1" = "x--dx-vm-option" ]; then
+ shift
+ option="$1"
+ DX_VM_FLAGS="${DX_VM_FLAGS} $option"
+ shift
elif [ "x$1" = "x--jvm" ]; then
shift
elif [ "x$1" = "x--no-src" ]; then
@@ -137,6 +154,15 @@ while true; do
WITH_ZIP_ALIGN=true
ZIP_ALIGN_BYTES="$1"
shift
+ elif [ "x$1" = "x--host" ]; then
+ BUILD_MODE="host"
+ shift
+ elif [ "x$1" = "x--target" ]; then
+ BUILD_MODE="target"
+ shift
+ elif [ "x$1" = "x--dev" ]; then
+ DEV_MODE="yes"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
exit 1
@@ -163,6 +189,7 @@ for experiment in ${EXPERIMENTAL}; do
JACK_ARGS="${JACK_ARGS} ${JACK_EXPERIMENTAL_ARGS[${experiment}]}"
SMALI_ARGS="${SMALI_ARGS} ${SMALI_EXPERIMENTAL_ARGS[${experiment}]}"
JAVAC_ARGS="${JAVAC_ARGS} ${JAVAC_EXPERIMENTAL_ARGS[${experiment}]}"
+ DX_FLAGS="${DX_FLAGS} ${DX_EXPERIMENTAL_ARGS[${experiment}]}"
done
#########################################
@@ -185,6 +212,41 @@ function zip() {
fi
}
+function desugar() {
+ local desugar_args=--mode=host
+ if [[ $BUILD_MODE == target ]]; then
+ desugar_args=--mode=target
+ fi
+
+ if [[ $DEV_MODE == yes ]]; then
+ desugar_args="$desugar_args --show-commands"
+ fi
+
+ "$DESUGAR" --core-only $desugar_args "$@"
+}
+
+# Make a "dex" file given a directory of classes in $1.
+# Also calls desugar on the classes first to convert lambdas.
+function make_dex() {
+ local name="$1"
+
+ local dx_input
+ if [[ "$USE_DESUGAR" == "true" ]]; then
+ # Make a jar first so desugar doesn't need every .class file individually.
+ jar cf "$name.before-desugar.jar" -C "$name" .
+
+ dx_input="${name}.desugar.jar"
+
+ # Make desugared JAR.
+ desugar --input "$name.before-desugar.jar" --output "$dx_input"
+ else
+ dx_input="${name}"
+ fi
+
+ # Make dex file from desugared JAR.
+ ${DX} -JXmx256m ${DX_VM_FLAGS} --debug --dex --dump-to=${name}.lst --output=${name}.dex --dump-width=1000 ${DX_FLAGS} "${dx_input}"
+}
+
if [ -e classes.dex ]; then
zip $TEST_NAME.jar classes.dex
exit 0
@@ -209,9 +271,10 @@ if [ ${HAS_SRC_DEX2OAT_UNRESOLVED} = "true" ]; then
${JACK} --import classes.jill.jar --output-dex .
else
if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 ${DX_FLAGS} classes-ex
+ make_dex classes-ex
+ mv classes-ex.dex classes.dex # rename it so it shows up as "classes.dex" in the zip file.
zip ${TEST_NAME}-ex.jar classes.dex
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 ${DX_FLAGS} classes
+ make_dex classes
fi
fi
else
@@ -254,8 +317,7 @@ else
mkdir classes2
${JAVAC} -implicit:none -classpath src -d classes2 `find src-multidex -name '*.java'`
if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex \
- --dump-width=1000 ${DX_FLAGS} classes2
+ make_dex classes2
fi
fi
@@ -266,8 +328,7 @@ else
if [ "${HAS_SRC}" = "true" ] || [ "${HAS_SRC2}" = "true" ]; then
if [ ${NEED_DEX} = "true" -a ${SKIP_DX_MERGER} = "false" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
- --dump-width=1000 ${DX_FLAGS} classes
+ make_dex classes
fi
fi
fi
@@ -311,8 +372,7 @@ if [ ${HAS_SRC_EX} = "true" ]; then
mkdir classes-ex
${JAVAC} ${JAVAC_ARGS} -d classes-ex -cp classes `find src-ex -name '*.java'`
if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes-ex.dex \
- --dump-width=1000 ${DX_FLAGS} classes-ex
+ make_dex classes-ex
# quick shuffle so that the stored name is "classes.dex"
mv classes.dex classes-1.dex
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 8aacc8c9b7..c44fb97c50 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -63,6 +63,7 @@ TEST_VDEX="n"
TEST_IS_NDEBUG="n"
APP_IMAGE="y"
JVMTI_STRESS="n"
+JVMTI_FIELD_STRESS="n"
JVMTI_TRACE_STRESS="n"
JVMTI_REDEFINE_STRESS="n"
VDEX_FILTER=""
@@ -159,6 +160,10 @@ while true; do
JVMTI_STRESS="y"
JVMTI_REDEFINE_STRESS="y"
shift
+ elif [ "x$1" = "x--jvmti-field-stress" ]; then
+ JVMTI_STRESS="y"
+ JVMTI_FIELD_STRESS="y"
+ shift
elif [ "x$1" = "x--jvmti-trace-stress" ]; then
JVMTI_STRESS="y"
JVMTI_TRACE_STRESS="y"
@@ -415,6 +420,9 @@ if [[ "$JVMTI_STRESS" = "y" ]]; then
agent_args="${agent_args},redefine,${DEXTER_BINARY},${file_1},${file_2}"
fi
fi
+ if [[ "$JVMTI_FIELD_STRESS" = "y" ]]; then
+ agent_args="${agent_args},field"
+ fi
if [[ "$JVMTI_TRACE_STRESS" = "y" ]]; then
agent_args="${agent_args},trace"
fi
diff --git a/test/knownfailures.json b/test/knownfailures.json
index f51522669c..073df61144 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -511,7 +511,7 @@
"645-checker-abs-simd",
"706-checker-scheduler"],
"description": ["Checker tests are not compatible with jvmti."],
- "variant": "jvmti-stress | redefine-stress | trace-stress"
+ "variant": "jvmti-stress | redefine-stress | trace-stress | field-stress"
},
{
"tests": [
@@ -550,7 +550,7 @@
"981-dedup-original-dex"
],
"description": ["Tests that require exact knowledge of the number of plugins and agents."],
- "variant": "jvmti-stress | redefine-stress | trace-stress"
+ "variant": "jvmti-stress | redefine-stress | trace-stress | field-stress"
},
{
"tests": [
@@ -585,6 +585,13 @@
},
{
"tests": [
+ "004-ThreadStress"
+ ],
+ "description": "The thread stress test just takes too long with field-stress",
+ "variant": "jvmti-stress | field-stress"
+ },
+ {
+ "tests": [
"031-class-attributes",
"911-get-stack-trace"
],
@@ -596,19 +603,15 @@
},
{
"tests": [
- "004-JniTest",
"004-NativeAllocations",
"004-ReferenceMap",
"004-StackWalk",
- "048-reflect-v8",
"089-many-methods",
"138-duplicate-classes-check",
"146-bad-interface",
"157-void-class",
- "563-checker-invoke-super",
"580-checker-string-fact-intrinsics",
"596-monitor-inflation",
- "604-hot-static-interface",
"612-jit-dex-cache",
"613-inlining-dex-cache",
"616-cha-interface-default",
@@ -618,39 +621,12 @@
"911-get-stack-trace",
"912-classes",
"913-heaps",
- "914-hello-obsolescence",
- "915-obsolete-2",
- "916-obsolete-jit",
- "919-obsolete-fields",
- "921-hello-failure",
- "926-multi-obsolescence",
- "940-recursive-obsolete",
- "941-recurive-obsolete-jit",
- "942-private-recursive",
- "943-private-recursive-jit",
- "945-obsolete-native",
- "946-obsolete-throw",
"948-change-annotations",
- "950-redefine-intrinsic",
- "951-threaded-obsolete",
"952-invoke-custom",
"953-invoke-polymorphic-compiler",
- "956-methodhandles",
- "957-methodhandle-transforms",
"958-methodhandle-stackframe",
- "959-invoke-polymorphic-accessors",
"960-default-smali",
- "961-default-iface-resolution-gen",
- "962-iface-static",
- "963-default-range-smali",
- "964-default-iface-init-gen",
- "965-default-verify",
- "966-default-conflict",
- "967-default-ame",
- "969-iface-super",
- "981-dedup-original-dex",
- "984-obsolete-invoke",
- "985-re-obsolete"
+ "981-dedup-original-dex"
],
"description": "The tests above fail with --build-with-javac-dx.",
"env_vars": {"ANDROID_COMPILE_WITH_JACK": "false"},
@@ -706,16 +682,20 @@
"description": "Test disabled due to redefine-stress disabling intrinsics which changes the trace output slightly."
},
{
- "tests": "137-cfi",
- "description": [ "ASan is reporting out-of-bounds reads in libunwind."],
- "variant": "host",
- "env_vars": {"SANITIZE_HOST": "address"},
- "bug": "b/62350406"
- },
- {
"tests": ["137-cfi", "629-vdex-speed"],
"description": [ "Tests require speed compilation which is no longer the default for",
"no-prebuild or no-image configs."],
"variant": "no-prebuild | no-image"
+ },
+ {
+ "tests": ["059-finalizer-throw", "063-process-manager"],
+ "description": [ "Tests that take too long on target with gcstress and debug" ],
+ "variant": "gcstress & target & debug"
+ },
+ {
+ "tests": ["905-object-free"],
+ "description": [ "Flake on gcstress" ],
+ "bug": "b/62562923",
+ "variant": "gcstress & jit & target"
}
]
diff --git a/test/run-test b/test/run-test
index 41a0dc2a84..044f63fa1e 100755
--- a/test/run-test
+++ b/test/run-test
@@ -46,6 +46,7 @@ export RUN="${progdir}/etc/run-test-jar"
export DEX_LOCATION=/data/run-test/${test_dir}
export NEED_DEX="true"
export USE_JACK="true"
+export USE_DESUGAR="true"
export SMALI_ARGS=""
# If dx was not set by the environment variable, assume it is in the path.
@@ -90,6 +91,11 @@ fi
export JACK="$JACK -g -cp $JACK_CLASSPATH"
+# Allow changing DESUGAR script to something else, or to disable it with DESUGAR=false.
+if [ -z "$DESUGAR"]; then
+ export DESUGAR="$ANDROID_BUILD_TOP/art/tools/desugar.sh"
+fi
+
# Zipalign is not on the PATH in some configs, auto-detect it.
if [ -z "$ZIPALIGN" ]; then
if which zipalign >/dev/null; then
@@ -138,6 +144,7 @@ basic_verify="false"
gc_verify="false"
gc_stress="false"
jvmti_trace_stress="false"
+jvmti_field_stress="false"
jvmti_redefine_stress="false"
strace="false"
always_clean="no"
@@ -238,6 +245,9 @@ while true; do
elif [ "x$1" = "x--jvmti-redefine-stress" ]; then
jvmti_redefine_stress="true"
shift
+ elif [ "x$1" = "x--jvmti-field-stress" ]; then
+ jvmti_field_stress="true"
+ shift
elif [ "x$1" = "x--jvmti-trace-stress" ]; then
jvmti_trace_stress="true"
shift
@@ -454,6 +464,9 @@ fi
if [ "$jvmti_redefine_stress" = "true" ]; then
run_args="${run_args} --no-app-image --jvmti-redefine-stress"
fi
+if [ "$jvmti_field_stress" = "true" ]; then
+ run_args="${run_args} --no-app-image --jvmti-field-stress"
+fi
if [ "$jvmti_trace_stress" = "true" ]; then
run_args="${run_args} --no-app-image --jvmti-trace-stress"
fi
@@ -771,6 +784,16 @@ if ! ulimit -f 128000; then
err_echo "ulimit file size setting failed"
fi
+if [[ "$target_mode" == "yes" ]]; then
+ build_args="$build_args --target"
+else
+ build_args="$build_args --host"
+fi
+
+if [[ "$dev_mode" == "yes" ]]; then
+ build_args="$build_args --dev"
+fi
+
good="no"
good_build="yes"
good_run="yes"
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 474c25947c..baf7600349 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -324,18 +324,23 @@ target_config = {
# ASAN (host) configurations.
+ # These configurations need detect_leaks=0 to work in non-setup environments like build bots,
+ # as our build tools leak. b/37751350
+
'art-gtest-asan': {
'make' : 'test-art-host-gtest',
'env': {
- 'SANITIZE_HOST' : 'address'
+ 'SANITIZE_HOST' : 'address',
+ 'ASAN_OPTIONS' : 'detect_leaks=0'
}
},
- 'art-run-test-asan': {
+ 'art-asan': {
'run-test' : ['--interpreter',
'--optimizing',
'--jit'],
'env': {
- 'SANITIZE_HOST' : 'address'
+ 'SANITIZE_HOST' : 'address',
+ 'ASAN_OPTIONS' : 'detect_leaks=0'
}
},
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 344507115b..b6a5963cf4 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -147,7 +147,8 @@ def gather_test_info():
VARIANT_TYPE_DICT['relocate'] = {'relocate-npatchoat', 'relocate', 'no-relocate'}
VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
- VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress'}
+ VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
+ 'field-stress'}
VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'optimizing',
'regalloc_gc', 'speed-profile'}
@@ -437,7 +438,9 @@ def run_tests(tests):
options_test += ' --debuggable'
if jvmti == 'jvmti-stress':
- options_test += ' --jvmti-trace-stress --jvmti-redefine-stress'
+ options_test += ' --jvmti-trace-stress --jvmti-redefine-stress --jvmti-field-stress'
+ elif jvmti == 'field-stress':
+ options_test += ' --jvmti-field-stress'
elif jvmti == 'trace-stress':
options_test += ' --jvmti-trace-stress'
elif jvmti == 'redefine-stress':
@@ -960,6 +963,8 @@ def parse_option():
JVMTI_TYPES.add('jvmti-stress')
if options['redefine_stress']:
JVMTI_TYPES.add('redefine-stress')
+ if options['field_stress']:
+ JVMTI_TYPES.add('field-stress')
if options['trace_stress']:
JVMTI_TYPES.add('trace-stress')
if options['no_jvmti']:
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index 6eaa5c37df..4fe58db169 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -78,9 +78,23 @@ struct TraceData {
jclass test_klass;
jmethodID enter_method;
jmethodID exit_method;
+ jmethodID field_access;
+ jmethodID field_modify;
bool in_callback;
+ bool access_watch_on_load;
+ bool modify_watch_on_load;
};
+static jobject GetJavaField(jvmtiEnv* jvmti, JNIEnv* env, jclass field_klass, jfieldID f) {
+ jint mods = 0;
+ if (JvmtiErrorToException(env, jvmti, jvmti->GetFieldModifiers(field_klass, f, &mods))) {
+ return nullptr;
+ }
+
+ bool is_static = (mods & kAccStatic) != 0;
+ return env->ToReflectedField(field_klass, f, is_static);
+}
+
static jobject GetJavaMethod(jvmtiEnv* jvmti, JNIEnv* env, jmethodID m) {
jint mods = 0;
if (JvmtiErrorToException(env, jvmti, jvmti->GetMethodModifiers(m, &mods))) {
@@ -97,21 +111,9 @@ static jobject GetJavaMethod(jvmtiEnv* jvmti, JNIEnv* env, jmethodID m) {
return res;
}
-static jobject GetJavaValue(jvmtiEnv* jvmtienv,
- JNIEnv* env,
- jmethodID m,
- jvalue value) {
- char *fname, *fsig, *fgen;
- if (JvmtiErrorToException(env, jvmtienv, jvmtienv->GetMethodName(m, &fname, &fsig, &fgen))) {
- return nullptr;
- }
- std::string type(fsig);
- type = type.substr(type.find(")") + 1);
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
+static jobject GetJavaValueByType(JNIEnv* env, char type, jvalue value) {
std::string name;
- switch (type[0]) {
+ switch (type) {
case 'V':
return nullptr;
case '[':
@@ -146,7 +148,7 @@ static jobject GetJavaValue(jvmtiEnv* jvmtienv,
return nullptr;
}
std::ostringstream oss;
- oss << "(" << type[0] << ")L" << name << ";";
+ oss << "(" << type << ")L" << name << ";";
std::string args = oss.str();
jclass target = env->FindClass(name.c_str());
jmethodID valueOfMethod = env->GetStaticMethodID(target, "valueOf", args.c_str());
@@ -157,6 +159,98 @@ static jobject GetJavaValue(jvmtiEnv* jvmtienv,
return res;
}
+static jobject GetJavaValue(jvmtiEnv* jvmtienv,
+ JNIEnv* env,
+ jmethodID m,
+ jvalue value) {
+ char *fname, *fsig, *fgen;
+ if (JvmtiErrorToException(env, jvmtienv, jvmtienv->GetMethodName(m, &fname, &fsig, &fgen))) {
+ return nullptr;
+ }
+ std::string type(fsig);
+ type = type.substr(type.find(")") + 1);
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
+ return GetJavaValueByType(env, type[0], value);
+}
+
+static void fieldAccessCB(jvmtiEnv* jvmti,
+ JNIEnv* jnienv,
+ jthread thr ATTRIBUTE_UNUSED,
+ jmethodID method,
+ jlocation location,
+ jclass field_klass,
+ jobject object,
+ jfieldID field) {
+ TraceData* data = nullptr;
+ if (JvmtiErrorToException(jnienv, jvmti,
+ jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ if (data->in_callback) {
+ // Don't do callback for either of these to prevent an infinite loop.
+ return;
+ }
+ CHECK(data->field_access != nullptr);
+ data->in_callback = true;
+ jobject method_arg = GetJavaMethod(jvmti, jnienv, method);
+ jobject field_arg = GetJavaField(jvmti, jnienv, field_klass, field);
+ jnienv->CallStaticVoidMethod(data->test_klass,
+ data->field_access,
+ method_arg,
+ static_cast<jlong>(location),
+ field_klass,
+ object,
+ field_arg);
+ jnienv->DeleteLocalRef(method_arg);
+ jnienv->DeleteLocalRef(field_arg);
+ data->in_callback = false;
+}
+
+static void fieldModificationCB(jvmtiEnv* jvmti,
+ JNIEnv* jnienv,
+ jthread thr ATTRIBUTE_UNUSED,
+ jmethodID method,
+ jlocation location,
+ jclass field_klass,
+ jobject object,
+ jfieldID field,
+ char type_char,
+ jvalue new_value) {
+ TraceData* data = nullptr;
+ if (JvmtiErrorToException(jnienv, jvmti,
+ jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ if (data->in_callback) {
+ // Don't do callback recursively to prevent an infinite loop.
+ return;
+ }
+ CHECK(data->field_modify != nullptr);
+ data->in_callback = true;
+ jobject method_arg = GetJavaMethod(jvmti, jnienv, method);
+ jobject field_arg = GetJavaField(jvmti, jnienv, field_klass, field);
+ jobject value = GetJavaValueByType(jnienv, type_char, new_value);
+ if (jnienv->ExceptionCheck()) {
+ data->in_callback = false;
+ jnienv->DeleteLocalRef(method_arg);
+ jnienv->DeleteLocalRef(field_arg);
+ return;
+ }
+ jnienv->CallStaticVoidMethod(data->test_klass,
+ data->field_modify,
+ method_arg,
+ static_cast<jlong>(location),
+ field_klass,
+ object,
+ field_arg,
+ value);
+ jnienv->DeleteLocalRef(method_arg);
+ jnienv->DeleteLocalRef(field_arg);
+ data->in_callback = false;
+}
+
static void methodExitCB(jvmtiEnv* jvmti,
JNIEnv* jnienv,
jthread thr ATTRIBUTE_UNUSED,
@@ -172,6 +266,7 @@ static void methodExitCB(jvmtiEnv* jvmti,
// Don't do callback for either of these to prevent an infinite loop.
return;
}
+ CHECK(data->exit_method != nullptr);
data->in_callback = true;
jobject method_arg = GetJavaMethod(jvmti, jnienv, method);
jobject result =
@@ -198,6 +293,7 @@ static void methodEntryCB(jvmtiEnv* jvmti,
jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)))) {
return;
}
+ CHECK(data->enter_method != nullptr);
if (method == data->exit_method || method == data->enter_method || data->in_callback) {
// Don't do callback for either of these to prevent an infinite loop.
return;
@@ -212,12 +308,179 @@ static void methodEntryCB(jvmtiEnv* jvmti,
data->in_callback = false;
}
-extern "C" JNIEXPORT void JNICALL Java_art_Trace_enableMethodTracing(
+static void classPrepareCB(jvmtiEnv* jvmti,
+ JNIEnv* jnienv,
+ jthread thr ATTRIBUTE_UNUSED,
+ jclass klass) {
+ TraceData* data = nullptr;
+ if (JvmtiErrorToException(jnienv, jvmti,
+ jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ if (data->access_watch_on_load || data->modify_watch_on_load) {
+ jint nfields;
+ jfieldID* fields;
+ if (JvmtiErrorToException(jnienv, jvmti, jvmti->GetClassFields(klass, &nfields, &fields))) {
+ return;
+ }
+ for (jint i = 0; i < nfields; i++) {
+ jfieldID f = fields[i];
+ // Ignore errors
+ if (data->access_watch_on_load) {
+ jvmti->SetFieldAccessWatch(klass, f);
+ }
+
+ if (data->modify_watch_on_load) {
+ jvmti->SetFieldModificationWatch(klass, f);
+ }
+ }
+ jvmti->Deallocate(reinterpret_cast<unsigned char*>(fields));
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Trace_watchAllFieldAccesses(JNIEnv* env) {
+ TraceData* data = nullptr;
+ if (JvmtiErrorToException(
+ env, jvmti_env, jvmti_env->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ data->access_watch_on_load = true;
+ // We need the classPrepareCB to watch new fields as the classes are loaded/prepared.
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_CLASS_PREPARE,
+ nullptr))) {
+ return;
+ }
+ jint nklasses;
+ jclass* klasses;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetLoadedClasses(&nklasses, &klasses))) {
+ return;
+ }
+ for (jint i = 0; i < nklasses; i++) {
+ jclass k = klasses[i];
+
+ jint nfields;
+ jfieldID* fields;
+ jvmtiError err = jvmti_env->GetClassFields(k, &nfields, &fields);
+ if (err == JVMTI_ERROR_CLASS_NOT_PREPARED) {
+ continue;
+ } else if (JvmtiErrorToException(env, jvmti_env, err)) {
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(klasses));
+ return;
+ }
+ for (jint j = 0; j < nfields; j++) {
+ jvmti_env->SetFieldAccessWatch(k, fields[j]);
+ }
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(fields));
+ }
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(klasses));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Trace_watchAllFieldModifications(JNIEnv* env) {
+ TraceData* data = nullptr;
+ if (JvmtiErrorToException(
+ env, jvmti_env, jvmti_env->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ data->modify_watch_on_load = true;
+ // We need the classPrepareCB to watch new fields as the classes are loaded/prepared.
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_CLASS_PREPARE,
+ nullptr))) {
+ return;
+ }
+ jint nklasses;
+ jclass* klasses;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetLoadedClasses(&nklasses, &klasses))) {
+ return;
+ }
+ for (jint i = 0; i < nklasses; i++) {
+ jclass k = klasses[i];
+
+ jint nfields;
+ jfieldID* fields;
+ jvmtiError err = jvmti_env->GetClassFields(k, &nfields, &fields);
+ if (err == JVMTI_ERROR_CLASS_NOT_PREPARED) {
+ continue;
+ } else if (JvmtiErrorToException(env, jvmti_env, err)) {
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(klasses));
+ return;
+ }
+ for (jint j = 0; j < nfields; j++) {
+ jvmti_env->SetFieldModificationWatch(k, fields[j]);
+ }
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(fields));
+ }
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(klasses));
+}
+
+static bool GetFieldAndClass(JNIEnv* env,
+ jobject ref_field,
+ jclass* out_klass,
+ jfieldID* out_field) {
+ *out_field = env->FromReflectedField(ref_field);
+ if (env->ExceptionCheck()) {
+ return false;
+ }
+ jclass field_klass = env->FindClass("java/lang/reflect/Field");
+ if (env->ExceptionCheck()) {
+ return false;
+ }
+ jmethodID get_declaring_class_method =
+ env->GetMethodID(field_klass, "getDeclaringClass", "()Ljava/lang/Class;");
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(field_klass);
+ return false;
+ }
+ *out_klass = static_cast<jclass>(env->CallObjectMethod(ref_field, get_declaring_class_method));
+ if (env->ExceptionCheck()) {
+ *out_klass = nullptr;
+ env->DeleteLocalRef(field_klass);
+ return false;
+ }
+ env->DeleteLocalRef(field_klass);
+ return true;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Trace_watchFieldModification(
+ JNIEnv* env,
+ jclass trace ATTRIBUTE_UNUSED,
+ jobject field_obj) {
+ jfieldID field;
+ jclass klass;
+ if (!GetFieldAndClass(env, field_obj, &klass, &field)) {
+ return;
+ }
+
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->SetFieldModificationWatch(klass, field));
+ env->DeleteLocalRef(klass);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Trace_watchFieldAccess(
+ JNIEnv* env,
+ jclass trace ATTRIBUTE_UNUSED,
+ jobject field_obj) {
+ jfieldID field;
+ jclass klass;
+ if (!GetFieldAndClass(env, field_obj, &klass, &field)) {
+ return;
+ }
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->SetFieldAccessWatch(klass, field));
+ env->DeleteLocalRef(klass);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Trace_enableTracing(
JNIEnv* env,
jclass trace ATTRIBUTE_UNUSED,
jclass klass,
jobject enter,
jobject exit,
+ jobject field_access,
+ jobject field_modify,
jthread thr) {
TraceData* data = nullptr;
if (JvmtiErrorToException(env,
@@ -228,8 +491,10 @@ extern "C" JNIEXPORT void JNICALL Java_art_Trace_enableMethodTracing(
}
memset(data, 0, sizeof(TraceData));
data->test_klass = reinterpret_cast<jclass>(env->NewGlobalRef(klass));
- data->enter_method = env->FromReflectedMethod(enter);
- data->exit_method = env->FromReflectedMethod(exit);
+ data->enter_method = enter != nullptr ? env->FromReflectedMethod(enter) : nullptr;
+ data->exit_method = exit != nullptr ? env->FromReflectedMethod(exit) : nullptr;
+ data->field_access = field_access != nullptr ? env->FromReflectedMethod(field_access) : nullptr;
+ data->field_modify = field_modify != nullptr ? env->FromReflectedMethod(field_modify) : nullptr;
data->in_callback = false;
if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEnvironmentLocalStorage(data))) {
@@ -240,29 +505,62 @@ extern "C" JNIEXPORT void JNICALL Java_art_Trace_enableMethodTracing(
memset(&cb, 0, sizeof(cb));
cb.MethodEntry = methodEntryCB;
cb.MethodExit = methodExitCB;
+ cb.FieldAccess = fieldAccessCB;
+ cb.FieldModification = fieldModificationCB;
+ cb.ClassPrepare = classPrepareCB;
if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventCallbacks(&cb, sizeof(cb)))) {
return;
}
- if (JvmtiErrorToException(env,
+ if (enter != nullptr &&
+ JvmtiErrorToException(env,
jvmti_env,
jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
JVMTI_EVENT_METHOD_ENTRY,
thr))) {
return;
}
- if (JvmtiErrorToException(env,
+ if (exit != nullptr &&
+ JvmtiErrorToException(env,
jvmti_env,
jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
JVMTI_EVENT_METHOD_EXIT,
thr))) {
return;
}
+ if (field_access != nullptr &&
+ JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_FIELD_ACCESS,
+ thr))) {
+ return;
+ }
+ if (field_modify != nullptr &&
+ JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_FIELD_MODIFICATION,
+ thr))) {
+ return;
+ }
}
-extern "C" JNIEXPORT void JNICALL Java_art_Trace_disableMethodTracing(
+extern "C" JNIEXPORT void JNICALL Java_art_Trace_disableTracing(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
if (JvmtiErrorToException(env, jvmti_env,
jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_FIELD_ACCESS,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env, jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_FIELD_MODIFICATION,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env, jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
JVMTI_EVENT_METHOD_ENTRY,
thr))) {
return;
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index 497db1cd3e..40fcc4f11d 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <iostream>
#include <fstream>
+#include <memory>
#include <stdio.h>
#include <sstream>
#include <strstream>
@@ -38,6 +39,7 @@ struct StressData {
bool vm_class_loader_initialized;
bool trace_stress;
bool redefine_stress;
+ bool field_stress;
};
static void WriteToFile(const std::string& fname, jint data_len, const unsigned char* data) {
@@ -87,6 +89,261 @@ static bool DoExtractClassFromData(StressData* data,
return ReadIntoBuffer(data->out_temp_dex, dex);
}
+class ScopedThreadInfo {
+ public:
+ ScopedThreadInfo(jvmtiEnv* jvmtienv, JNIEnv* env, jthread thread)
+ : jvmtienv_(jvmtienv), env_(env), free_name_(false) {
+ memset(&info_, 0, sizeof(info_));
+ if (thread == nullptr) {
+ info_.name = const_cast<char*>("<NULLPTR>");
+ } else if (jvmtienv->GetThreadInfo(thread, &info_) != JVMTI_ERROR_NONE) {
+ info_.name = const_cast<char*>("<UNKNOWN THREAD>");
+ } else {
+ free_name_ = true;
+ }
+ }
+
+ ~ScopedThreadInfo() {
+ if (free_name_) {
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(info_.name));
+ }
+ env_->DeleteLocalRef(info_.thread_group);
+ env_->DeleteLocalRef(info_.context_class_loader);
+ }
+
+ const char* GetName() const {
+ return info_.name;
+ }
+
+ private:
+ jvmtiEnv* jvmtienv_;
+ JNIEnv* env_;
+ bool free_name_;
+ jvmtiThreadInfo info_;
+};
+
+class ScopedClassInfo {
+ public:
+ ScopedClassInfo(jvmtiEnv* jvmtienv, jclass c)
+ : jvmtienv_(jvmtienv),
+ class_(c),
+ name_(nullptr),
+ generic_(nullptr),
+ file_(nullptr),
+ debug_ext_(nullptr) {}
+
+ ~ScopedClassInfo() {
+ if (class_ != nullptr) {
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(file_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_));
+ }
+ }
+
+ bool Init() {
+ if (class_ == nullptr) {
+ name_ = const_cast<char*>("<NONE>");
+ generic_ = const_cast<char*>("<NONE>");
+ return true;
+ } else {
+ jvmtiError ret1 = jvmtienv_->GetSourceFileName(class_, &file_);
+ jvmtiError ret2 = jvmtienv_->GetSourceDebugExtension(class_, &debug_ext_);
+ return jvmtienv_->GetClassSignature(class_, &name_, &generic_) == JVMTI_ERROR_NONE &&
+ ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+ ret1 != JVMTI_ERROR_INVALID_CLASS &&
+ ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+ ret2 != JVMTI_ERROR_INVALID_CLASS;
+ }
+ }
+
+ jclass GetClass() const {
+ return class_;
+ }
+ const char* GetName() const {
+ return name_;
+ }
+ const char* GetGeneric() const {
+ return generic_;
+ }
+ const char* GetSourceDebugExtension() const {
+ if (debug_ext_ == nullptr) {
+ return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>";
+ } else {
+ return debug_ext_;
+ }
+ }
+ const char* GetSourceFileName() const {
+ if (file_ == nullptr) {
+ return "<UNKNOWN_FILE>";
+ } else {
+ return file_;
+ }
+ }
+
+ private:
+ jvmtiEnv* jvmtienv_;
+ jclass class_;
+ char* name_;
+ char* generic_;
+ char* file_;
+ char* debug_ext_;
+};
+
+class ScopedMethodInfo {
+ public:
+ ScopedMethodInfo(jvmtiEnv* jvmtienv, JNIEnv* env, jmethodID m)
+ : jvmtienv_(jvmtienv),
+ env_(env),
+ method_(m),
+ declaring_class_(nullptr),
+ class_info_(nullptr),
+ name_(nullptr),
+ signature_(nullptr),
+ generic_(nullptr),
+ first_line_(-1) {}
+
+ ~ScopedMethodInfo() {
+ env_->DeleteLocalRef(declaring_class_);
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ }
+
+ bool Init() {
+ if (jvmtienv_->GetMethodDeclaringClass(method_, &declaring_class_) != JVMTI_ERROR_NONE) {
+ return false;
+ }
+ class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_));
+ jint nlines;
+ jvmtiLineNumberEntry* lines;
+ jvmtiError err = jvmtienv_->GetLineNumberTable(method_, &nlines, &lines);
+ if (err == JVMTI_ERROR_NONE) {
+ if (nlines > 0) {
+ first_line_ = lines[0].line_number;
+ }
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(lines));
+ } else if (err != JVMTI_ERROR_ABSENT_INFORMATION &&
+ err != JVMTI_ERROR_NATIVE_METHOD) {
+ return false;
+ }
+ return class_info_->Init() &&
+ (jvmtienv_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+ }
+
+ const ScopedClassInfo& GetDeclaringClassInfo() const {
+ return *class_info_;
+ }
+
+ jclass GetDeclaringClass() const {
+ return declaring_class_;
+ }
+
+ const char* GetName() const {
+ return name_;
+ }
+
+ const char* GetSignature() const {
+ return signature_;
+ }
+
+ const char* GetGeneric() const {
+ return generic_;
+ }
+
+ jint GetFirstLine() const {
+ return first_line_;
+ }
+
+ private:
+ jvmtiEnv* jvmtienv_;
+ JNIEnv* env_;
+ jmethodID method_;
+ jclass declaring_class_;
+ std::unique_ptr<ScopedClassInfo> class_info_;
+ char* name_;
+ char* signature_;
+ char* generic_;
+ jint first_line_;
+
+ friend std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m);
+};
+
+class ScopedFieldInfo {
+ public:
+ ScopedFieldInfo(jvmtiEnv* jvmtienv, jclass field_klass, jfieldID field)
+ : jvmtienv_(jvmtienv),
+ declaring_class_(field_klass),
+ field_(field),
+ class_info_(nullptr),
+ name_(nullptr),
+ type_(nullptr),
+ generic_(nullptr) {}
+
+ ~ScopedFieldInfo() {
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(type_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ }
+
+ bool Init() {
+ class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_));
+ return class_info_->Init() &&
+ (jvmtienv_->GetFieldName(
+ declaring_class_, field_, &name_, &type_, &generic_) == JVMTI_ERROR_NONE);
+ }
+
+ const ScopedClassInfo& GetDeclaringClassInfo() const {
+ return *class_info_;
+ }
+
+ jclass GetDeclaringClass() const {
+ return declaring_class_;
+ }
+
+ const char* GetName() const {
+ return name_;
+ }
+
+ const char* GetType() const {
+ return type_;
+ }
+
+ const char* GetGeneric() const {
+ return generic_;
+ }
+
+ private:
+ jvmtiEnv* jvmtienv_;
+ jclass declaring_class_;
+ jfieldID field_;
+ std::unique_ptr<ScopedClassInfo> class_info_;
+ char* name_;
+ char* type_;
+ char* generic_;
+
+ friend std::ostream& operator<<(std::ostream &os, ScopedFieldInfo const& m);
+};
+
+std::ostream& operator<<(std::ostream &os, const ScopedFieldInfo* m) {
+ return os << *m;
+}
+
+std::ostream& operator<<(std::ostream &os, ScopedFieldInfo const& m) {
+ return os << m.GetDeclaringClassInfo().GetName() << "->" << m.GetName()
+ << ":" << m.GetType();
+}
+
+std::ostream& operator<<(std::ostream &os, const ScopedMethodInfo* m) {
+ return os << *m;
+}
+
+std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m) {
+ return os << m.GetDeclaringClassInfo().GetName() << "->" << m.GetName() << m.GetSignature()
+ << " (source: " << m.GetDeclaringClassInfo().GetSourceFileName() << ":"
+ << m.GetFirstLine() << ")";
+}
+
static void doJvmtiMethodBind(jvmtiEnv* jvmtienv,
JNIEnv* env,
jthread thread,
@@ -94,38 +351,14 @@ static void doJvmtiMethodBind(jvmtiEnv* jvmtienv,
void* address,
/*out*/void** out_address) {
*out_address = address;
- jvmtiThreadInfo info;
- if (thread == nullptr) {
- info.name = const_cast<char*>("<NULLPTR>");
- } else if (jvmtienv->GetThreadInfo(thread, &info) != JVMTI_ERROR_NONE) {
- info.name = const_cast<char*>("<UNKNOWN THREAD>");
- }
- char *fname, *fsig, *fgen;
- char *cname, *cgen;
- jclass klass = nullptr;
- if (jvmtienv->GetMethodDeclaringClass(m, &klass) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method declaring class!";
- return;
- }
- if (jvmtienv->GetMethodName(m, &fname, &fsig, &fgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method name!";
- env->DeleteLocalRef(klass);
+ ScopedThreadInfo thread_info(jvmtienv, env, thread);
+ ScopedMethodInfo method_info(jvmtienv, env, m);
+ if (!method_info.Init()) {
+ LOG(ERROR) << "Unable to get method info!";
return;
}
- if (jvmtienv->GetClassSignature(klass, &cname, &cgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get class name!";
- env->DeleteLocalRef(klass);
- return;
- }
- LOG(INFO) << "Loading native method \"" << cname << "->" << fname << fsig << "\". Thread is \""
- << info.name << "\"";
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
- env->DeleteLocalRef(klass);
- return;
+ LOG(INFO) << "Loading native method \"" << method_info << "\". Thread is "
+ << thread_info.GetName();
}
static std::string GetName(jvmtiEnv* jvmtienv, JNIEnv* jnienv, jobject obj) {
@@ -191,86 +424,166 @@ static std::string GetValOf(jvmtiEnv* env, JNIEnv* jnienv, std::string type, jva
}
}
+void JNICALL FieldAccessHook(jvmtiEnv* jvmtienv,
+ JNIEnv* env,
+ jthread thread,
+ jmethodID m,
+ jlocation location,
+ jclass field_klass,
+ jobject object,
+ jfieldID field) {
+ ScopedThreadInfo info(jvmtienv, env, thread);
+ ScopedMethodInfo method_info(jvmtienv, env, m);
+ ScopedFieldInfo field_info(jvmtienv, field_klass, field);
+ jclass oklass = (object != nullptr) ? env->GetObjectClass(object) : nullptr;
+ ScopedClassInfo obj_class_info(jvmtienv, oklass);
+ if (!method_info.Init() || !field_info.Init() || !obj_class_info.Init()) {
+ LOG(ERROR) << "Unable to get callback info!";
+ return;
+ }
+ LOG(INFO) << "ACCESS field \"" << field_info << "\" on object of "
+ << "type \"" << obj_class_info.GetName() << "\" in method \"" << method_info
+ << "\" at location 0x" << std::hex << location << ". Thread is \""
+ << info.GetName() << "\".";
+ env->DeleteLocalRef(oklass);
+}
+
+static std::string PrintJValue(jvmtiEnv* jvmtienv, JNIEnv* env, char type, jvalue new_value) {
+ std::ostringstream oss;
+ switch (type) {
+ case 'L': {
+ jobject nv = new_value.l;
+ if (nv == nullptr) {
+ oss << "\"null\"";
+ } else {
+ jclass nv_klass = env->GetObjectClass(nv);
+ ScopedClassInfo nv_class_info(jvmtienv, nv_klass);
+ if (!nv_class_info.Init()) {
+ oss << "with unknown type";
+ } else {
+ oss << "of type \"" << nv_class_info.GetName() << "\"";
+ }
+ env->DeleteLocalRef(nv_klass);
+ }
+ break;
+ }
+ case 'Z': {
+ if (new_value.z) {
+ oss << "true";
+ } else {
+ oss << "false";
+ }
+ break;
+ }
+#define SEND_VALUE(chr, sym, type) \
+ case chr: { \
+ oss << static_cast<type>(new_value.sym); \
+ break; \
+ }
+ SEND_VALUE('B', b, int8_t);
+ SEND_VALUE('C', c, uint16_t);
+ SEND_VALUE('S', s, int16_t);
+ SEND_VALUE('I', i, int32_t);
+ SEND_VALUE('J', j, int64_t);
+ SEND_VALUE('F', f, float);
+ SEND_VALUE('D', d, double);
+#undef SEND_VALUE
+ }
+ return oss.str();
+}
+
+void JNICALL FieldModificationHook(jvmtiEnv* jvmtienv,
+ JNIEnv* env,
+ jthread thread,
+ jmethodID m,
+ jlocation location,
+ jclass field_klass,
+ jobject object,
+ jfieldID field,
+ char type,
+ jvalue new_value) {
+ ScopedThreadInfo info(jvmtienv, env, thread);
+ ScopedMethodInfo method_info(jvmtienv, env, m);
+ ScopedFieldInfo field_info(jvmtienv, field_klass, field);
+ jclass oklass = (object != nullptr) ? env->GetObjectClass(object) : nullptr;
+ ScopedClassInfo obj_class_info(jvmtienv, oklass);
+ if (!method_info.Init() || !field_info.Init() || !obj_class_info.Init()) {
+ LOG(ERROR) << "Unable to get callback info!";
+ return;
+ }
+ LOG(INFO) << "MODIFY field \"" << field_info << "\" on object of "
+ << "type \"" << obj_class_info.GetName() << "\" in method \"" << method_info
+ << "\" at location 0x" << std::hex << location << std::dec << ". New value is "
+ << PrintJValue(jvmtienv, env, type, new_value) << ". Thread is \""
+ << info.GetName() << "\".";
+ env->DeleteLocalRef(oklass);
+}
void JNICALL MethodExitHook(jvmtiEnv* jvmtienv,
JNIEnv* env,
jthread thread,
jmethodID m,
jboolean was_popped_by_exception,
jvalue val) {
- jvmtiThreadInfo info;
- if (thread == nullptr) {
- info.name = const_cast<char*>("<NULLPTR>");
- } else if (jvmtienv->GetThreadInfo(thread, &info) != JVMTI_ERROR_NONE) {
- // LOG(WARNING) << "Unable to get thread info!";
- info.name = const_cast<char*>("<UNKNOWN THREAD>");
- }
- char *fname, *fsig, *fgen;
- char *cname, *cgen;
- jclass klass = nullptr;
- if (jvmtienv->GetMethodDeclaringClass(m, &klass) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method declaring class!";
+ ScopedThreadInfo info(jvmtienv, env, thread);
+ ScopedMethodInfo method_info(jvmtienv, env, m);
+ if (!method_info.Init()) {
+ LOG(ERROR) << "Unable to get method info!";
return;
}
- if (jvmtienv->GetMethodName(m, &fname, &fsig, &fgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method name!";
- env->DeleteLocalRef(klass);
- return;
- }
- if (jvmtienv->GetClassSignature(klass, &cname, &cgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get class name!";
- env->DeleteLocalRef(klass);
- return;
- }
- std::string type(fsig);
+ std::string type(method_info.GetSignature());
type = type.substr(type.find(")") + 1);
std::string out_val(was_popped_by_exception ? "" : GetValOf(jvmtienv, env, type, val));
- LOG(INFO) << "Leaving method \"" << cname << "->" << fname << fsig << "\". Thread is \""
- << info.name << "\"." << std::endl
+ LOG(INFO) << "Leaving method \"" << method_info << "\". Thread is \"" << info.GetName() << "\"."
+ << std::endl
<< " Cause: " << (was_popped_by_exception ? "exception" : "return ")
<< out_val << ".";
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
- env->DeleteLocalRef(klass);
}
void JNICALL MethodEntryHook(jvmtiEnv* jvmtienv,
JNIEnv* env,
jthread thread,
jmethodID m) {
- jvmtiThreadInfo info;
- if (thread == nullptr) {
- info.name = const_cast<char*>("<NULLPTR>");
- } else if (jvmtienv->GetThreadInfo(thread, &info) != JVMTI_ERROR_NONE) {
- info.name = const_cast<char*>("<UNKNOWN THREAD>");
- }
- char *fname, *fsig, *fgen;
- char *cname, *cgen;
- jclass klass = nullptr;
- if (jvmtienv->GetMethodDeclaringClass(m, &klass) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method declaring class!";
+ ScopedThreadInfo info(jvmtienv, env, thread);
+ ScopedMethodInfo method_info(jvmtienv, env, m);
+ if (!method_info.Init()) {
+ LOG(ERROR) << "Unable to get method info!";
return;
}
- if (jvmtienv->GetMethodName(m, &fname, &fsig, &fgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method name!";
- env->DeleteLocalRef(klass);
- return;
+ LOG(INFO) << "Entering method \"" << method_info << "\". Thread is \"" << info.GetName() << "\"";
+}
+
+void JNICALL ClassPrepareHook(jvmtiEnv* jvmtienv,
+ JNIEnv* env,
+ jthread thread,
+ jclass klass) {
+ StressData* data = nullptr;
+ CHECK_EQ(jvmtienv->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)),
+ JVMTI_ERROR_NONE);
+ if (data->field_stress) {
+ jint nfields;
+ jfieldID* fields;
+ if (jvmtienv->GetClassFields(klass, &nfields, &fields) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to get a classes fields!";
+ return;
+ }
+ for (jint i = 0; i < nfields; i++) {
+ jfieldID f = fields[i];
+ // Ignore errors
+ jvmtienv->SetFieldAccessWatch(klass, f);
+ jvmtienv->SetFieldModificationWatch(klass, f);
+ }
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fields));
}
- if (jvmtienv->GetClassSignature(klass, &cname, &cgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get class name!";
- env->DeleteLocalRef(klass);
- return;
+ if (data->trace_stress) {
+ ScopedThreadInfo info(jvmtienv, env, thread);
+ ScopedClassInfo class_info(jvmtienv, klass);
+ if (!class_info.Init()) {
+ LOG(ERROR) << "Unable to get class info!";
+ return;
+ }
+ LOG(INFO) << "Prepared class \"" << class_info.GetName() << "\". Thread is \""
+ << info.GetName() << "\"";
}
- LOG(INFO) << "Entering method \"" << cname << "->" << fname << fsig << "\". Thread is \""
- << info.name << "\"";
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
- env->DeleteLocalRef(klass);
}
// The hook we are using.
@@ -323,7 +636,7 @@ static std::string GetOption(const std::string& in) {
}
// Options are
-// jvmti-stress,[redefine,${DEXTER_BINARY},${TEMP_FILE_1},${TEMP_FILE_2},][trace]
+// jvmti-stress,[redefine,${DEXTER_BINARY},${TEMP_FILE_1},${TEMP_FILE_2},][trace,][field]
static void ReadOptions(StressData* data, char* options) {
std::string ops(options);
CHECK_EQ(GetOption(ops), "jvmti-stress") << "Options should start with jvmti-stress";
@@ -332,6 +645,8 @@ static void ReadOptions(StressData* data, char* options) {
std::string cur = GetOption(ops);
if (cur == "trace") {
data->trace_stress = true;
+ } else if (cur == "field") {
+ data->field_stress = true;
} else if (cur == "redefine") {
data->redefine_stress = true;
ops = AdvanceOption(ops);
@@ -372,18 +687,54 @@ static void JNICALL PerformFinalSetupVMInit(jvmtiEnv *jvmti_env,
jni_env->DeleteLocalRef(klass);
data->vm_class_loader_initialized = true;
}
- if (data->trace_stress) {
- if (jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
- JVMTI_EVENT_METHOD_ENTRY,
- nullptr) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to enable JVMTI_EVENT_METHOD_ENTRY event!";
+}
+
+static bool WatchAllFields(JavaVM* vm, jvmtiEnv* jvmti) {
+ if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_CLASS_PREPARE,
+ nullptr) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Couldn't set prepare event!";
+ return false;
+ }
+ // TODO We really shouldn't need to do this step here.
+ jint nklass;
+ jclass* klasses;
+ if (jvmti->GetLoadedClasses(&nklass, &klasses) != JVMTI_ERROR_NONE) {
+ LOG(WARNING) << "Couldn't get loaded classes! Ignoring.";
+ return true;
+ }
+ JNIEnv* jni = nullptr;
+ if (vm->GetEnv(reinterpret_cast<void**>(&jni), JNI_VERSION_1_6)) {
+ LOG(ERROR) << "Unable to get jni env. Ignoring and potentially leaking jobjects.";
+ return false;
+ }
+ for (jint i = 0; i < nklass; i++) {
+ jclass k = klasses[i];
+ ScopedClassInfo sci(jvmti, k);
+ if (sci.Init()) {
+ LOG(INFO) << "NOTE: class " << sci.GetName() << " already loaded.";
}
- if (jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
- JVMTI_EVENT_METHOD_EXIT,
- nullptr) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to enable JVMTI_EVENT_METHOD_EXIT event!";
+ jint nfields;
+ jfieldID* fields;
+ jvmtiError err = jvmti->GetClassFields(k, &nfields, &fields);
+ if (err == JVMTI_ERROR_NONE) {
+ for (jint j = 0; j < nfields; j++) {
+ jfieldID f = fields[j];
+ if (jvmti->SetFieldModificationWatch(k, f) != JVMTI_ERROR_NONE ||
+ jvmti->SetFieldAccessWatch(k, f) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to set watches on a field.";
+ return false;
+ }
+ }
+ } else if (err != JVMTI_ERROR_CLASS_NOT_PREPARED) {
+ LOG(ERROR) << "Unexpected error getting class fields!";
+ return false;
}
+ jvmti->Deallocate(reinterpret_cast<unsigned char*>(fields));
+ jni->DeleteLocalRef(k);
}
+ jvmti->Deallocate(reinterpret_cast<unsigned char*>(klasses));
+ return true;
}
extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm,
@@ -422,17 +773,14 @@ extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm,
cb.VMInit = PerformFinalSetupVMInit;
cb.MethodEntry = MethodEntryHook;
cb.MethodExit = MethodExitHook;
+ cb.FieldAccess = FieldAccessHook;
+ cb.FieldModification = FieldModificationHook;
+ cb.ClassPrepare = ClassPrepareHook;
if (jvmti->SetEventCallbacks(&cb, sizeof(cb)) != JVMTI_ERROR_NONE) {
LOG(ERROR) << "Unable to set class file load hook cb!";
return 1;
}
if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
- JVMTI_EVENT_NATIVE_METHOD_BIND,
- nullptr) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to enable JVMTI_EVENT_NATIVE_METHOD_BIND event!";
- return 1;
- }
- if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
JVMTI_EVENT_VM_INIT,
nullptr) != JVMTI_ERROR_NONE) {
LOG(ERROR) << "Unable to enable JVMTI_EVENT_VM_INIT event!";
@@ -446,6 +794,49 @@ extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm,
return 1;
}
}
+ if (data->trace_stress) {
+ if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_CLASS_PREPARE,
+ nullptr) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable CLASS_PREPARE event!";
+ return 1;
+ }
+ if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_NATIVE_METHOD_BIND,
+ nullptr) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable JVMTI_EVENT_NATIVE_METHOD_BIND event!";
+ return 1;
+ }
+ if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_METHOD_ENTRY,
+ nullptr) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable JVMTI_EVENT_METHOD_ENTRY event!";
+ return 1;
+ }
+ if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_METHOD_EXIT,
+ nullptr) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable JVMTI_EVENT_METHOD_EXIT event!";
+ return 1;
+ }
+ }
+ if (data->field_stress) {
+ if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_FIELD_MODIFICATION,
+ nullptr) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable FIELD_MODIFICATION event!";
+ return 1;
+ }
+ if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_FIELD_ACCESS,
+ nullptr) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable FIELD_ACCESS event!";
+ return 1;
+ }
+ if (!WatchAllFields(vm, jvmti)) {
+ return 1;
+ }
+ }
return 0;
}
diff --git a/tools/bootjars.sh b/tools/bootjars.sh
new file mode 100755
index 0000000000..bb47e55183
--- /dev/null
+++ b/tools/bootjars.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This script lists the boot jars that an ART bootclasspath would need.
+#
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+TOP="$DIR/../.."
+
+source "${TOP}/build/envsetup.sh" >&/dev/null # import get_build_var
+
+selected_env_var=
+core_jars_only=n
+print_file_path=n
+mode=target
+while true; do
+ case $1 in
+ --help)
+ echo "Usage: $0 [--core] [--path] [--host|--target] [--help]"
+ exit 0
+ ;;
+ --core)
+ core_jars_only=y
+ ;;
+ --path)
+ print_file_path=y
+ ;;
+ --host)
+ mode=host
+ ;;
+ --target)
+ mode=target
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+done
+
+if [[ $mode == target ]]; then
+ if [[ $core_jars_only == y ]]; then
+ selected_env_var=TARGET_CORE_JARS
+ else
+ selected_env_var=PRODUCT_BOOT_JARS
+ fi
+ intermediates_env_var=TARGET_OUT_COMMON_INTERMEDIATES
+elif [[ $mode == host ]]; then
+ if [[ $core_jars_only == n ]]; then
+ echo "Error: --host does not have non-core boot jars, --core required" >&2
+ exit 1
+ fi
+ selected_env_var=HOST_CORE_JARS
+ intermediates_env_var=HOST_OUT_COMMON_INTERMEDIATES
+fi
+
+boot_jars_list=$(get_build_var "$selected_env_var")
+
+# Print only the list of boot jars.
+if [[ $print_file_path == n ]]; then
+ echo $boot_jars_list
+ exit 0
+fi
+
+# Print the file path (relative to $TOP) to the classes.jar of each boot jar in the intermediates directory.
+intermediates_dir=$(get_build_var "$intermediates_env_var")
+
+# turn the file path into an absolute path
+intermediates_dir=$(readlink -f $TOP/$intermediates_dir)
+
+for jar in $boot_jars_list; do
+ echo "$intermediates_dir/JAVA_LIBRARIES/${jar}_intermediates/classes.jar"
+done
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 963efa49a5..bf7692ab15 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -30,8 +30,13 @@ else
out_dir=${OUT_DIR}
fi
+using_jack=true
+if [[ $ANDROID_COMPILE_WITH_JACK == false ]]; then
+ using_jack=false
+fi
+
java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
-common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target ${out_dir}/host/linux-x86/bin/jack"
+common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target"
mode="target"
j_arg="-j$(nproc)"
showcommands=
@@ -58,6 +63,10 @@ while true; do
fi
done
+if $using_jack; then
+ common_targets="$common_targets ${out_dir}/host/linux-x86/bin/jack"
+fi
+
if [[ $mode == "host" ]]; then
make_command="make $j_arg $showcommands build-art-host-tests $common_targets"
make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so "
diff --git a/tools/desugar.sh b/tools/desugar.sh
new file mode 100755
index 0000000000..ae7bf0a48b
--- /dev/null
+++ b/tools/desugar.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Calls desugar.jar with the --bootclasspath_entry values passed in automatically.
+# (This avoids having to manually set a boot class path).
+#
+#
+# Script-specific args:
+# --mode=[host|target]: Select between host or target bootclasspath (default target).
+# --core-only: Use only "core" bootclasspath (e.g. do not include framework).
+# --show-commands: Print the desugar command being executed.
+# --help: Print above list of args.
+#
+# All other args are forwarded to desugar.jar
+#
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+TOP=$DIR/../..
+
+pushd "$TOP" >/dev/null # back to android root.
+
+out=${OUT_DIR:-out}
+desugar_jar=$out/host/linux-x86/framework/desugar.jar
+
+if ! [[ -f $desugar_jar ]]; then
+ echo "Error: Missing $desugar_jar; did you do a build?" >&2
+ exit 1
+fi
+
+desugar_jar=$(readlink -f "$desugar_jar") # absolute path to desugar jar
+popd >/dev/null
+
+bootjars_args=
+mode=target
+showcommands=n
+while true; do
+ case $1 in
+ --help)
+ echo "Usage: $0 [--mode=host|target] [--core-only] [--show-commands] <desugar args>"
+ exit 0
+ ;;
+ --mode=host)
+ bootjars_args="$bootjars_args --host"
+ ;;
+ --mode=target)
+ bootjars_args="$bootjars_args --target"
+ ;;
+ --core-only)
+ bootjars_args="$bootjars_args --core"
+ ;;
+ --show-commands)
+ showcommands=y
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+done
+
+desugar_args=(--min_sdk_version=10000)
+boot_class_path_list=$($TOP/art/tools/bootjars.sh $bootjars_args --path)
+
+for path in $boot_class_path_list; do
+ desugar_args+=(--bootclasspath_entry="$path")
+done
+
+if [[ ${#desugar_args[@]} -eq 0 ]]; then
+ echo "FATAL: Missing bootjars.sh file path list" >&2
+ exit 1
+fi
+
+if [[ $showcommands == y ]]; then
+ echo java -jar "$desugar_jar" "${desugar_args[@]}" "$@"
+fi
+
+java -jar "$desugar_jar" "${desugar_args[@]}" "$@"
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 0c58585701..f340fa1d25 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -1,6 +1,6 @@
/*
* This file contains expectations for ART's buildbot. The purpose of this file is
- * to temporary and quickly list failing tests and not break the bots, until the
+ * to temporarily list failing tests and not break the bots, until the
* libcore expectation files get properly updated. The script that uses this file
* is art/tools/run-libcore-tests.sh.
*
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
new file mode 100644
index 0000000000..b4c6f2bda9
--- /dev/null
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -0,0 +1,16 @@
+/*
+ * This file contains expectations for ART's buildbot when running gcstress in debug mode.
+ * The script that uses this file is art/tools/run-libcore-tests.sh.
+ */
+
+[
+{
+ description: "Timeouts on target with gcstress and debug.",
+ result: EXEC_FAILED,
+ modes: [device],
+ names: ["libcore.icu.TransliteratorTest#testAll",
+ "libcore.java.lang.ref.ReferenceQueueTest#testRemoveWithDelayedResultAndTimeout",
+ "libcore.java.util.TimeZoneTest#testSetDefaultDeadlock",
+ "org.apache.harmony.tests.java.util.TimerTest#testThrowingTaskKillsTimerThread"]
+}
+]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index d48d8579be..f7427676eb 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -23,10 +23,24 @@ if [ -z "$ANDROID_HOST_OUT" ] ; then
ANDROID_HOST_OUT=${OUT_DIR-$ANDROID_BUILD_TOP/out}/host/linux-x86
fi
+using_jack=true
+if [[ $ANDROID_COMPILE_WITH_JACK == false ]]; then
+ using_jack=false
+fi
+
+function jlib_suffix {
+ local str=$1
+ local suffix="jar"
+ if $using_jack; then
+ suffix="jack"
+ fi
+ echo "$str.$suffix"
+}
+
# Jar containing all the tests.
-test_jack=${ANDROID_HOST_OUT}/../common/obj/JAVA_LIBRARIES/apache-harmony-jdwp-tests-hostdex_intermediates/classes.jack
+test_jar=$(jlib_suffix "${ANDROID_HOST_OUT}/../common/obj/JAVA_LIBRARIES/apache-harmony-jdwp-tests-hostdex_intermediates/classes")
-if [ ! -f $test_jack ]; then
+if [ ! -f $test_jar ]; then
echo "Before running, you must build jdwp tests and vogar:" \
"make apache-harmony-jdwp-tests-hostdex vogar"
exit 1
@@ -147,6 +161,12 @@ if [[ $verbose == "yes" ]]; then
art_debugee="$art_debugee -verbose:jdwp"
fi
+if $using_jack; then
+ toolchain_args="--toolchain jack --language JN --jack-arg -g"
+else
+ toolchain_args="--toolchain jdk --language CUR"
+fi
+
# Run the tests using vogar.
vogar $vm_command \
$vm_args \
@@ -160,10 +180,9 @@ vogar $vm_command \
--vm-arg -Djpda.settings.waitingTime=$jdwp_test_timeout \
--vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \
--vm-arg -Djpda.settings.debuggeeJavaPath="$art_debugee $image $debuggee_args" \
- --classpath $test_jack \
- --toolchain jack --language JN \
+ --classpath "$test_jar" \
+ $toolchain_args \
--vm-arg -Xcompiler-option --vm-arg --debuggable \
- --jack-arg -g \
$test
vogar_exit_status=$?
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index b860a6273f..8b3df3a28c 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -25,10 +25,26 @@ else
JAVA_LIBRARIES=${ANDROID_PRODUCT_OUT}/../../common/obj/JAVA_LIBRARIES
fi
+using_jack=true
+if [[ $ANDROID_COMPILE_WITH_JACK == false ]]; then
+ using_jack=false
+fi
+
+function classes_jar_path {
+ local var="$1"
+ local suffix="jar"
+
+ if $using_jack; then
+ suffix="jack"
+ fi
+
+ echo "${JAVA_LIBRARIES}/${var}_intermediates/classes.${suffix}"
+}
+
function cparg {
for var
do
- printf -- "--classpath ${JAVA_LIBRARIES}/${var}_intermediates/classes.jack ";
+ printf -- "--classpath $(classes_jar_path "$var") ";
done
}
@@ -36,7 +52,7 @@ DEPS="core-tests jsr166-tests mockito-target"
for lib in $DEPS
do
- if [ ! -f "${JAVA_LIBRARIES}/${lib}_intermediates/classes.jack" ]; then
+ if [[ ! -f "$(classes_jar_path "$lib")" ]]; then
echo "${lib} is missing. Before running, you must run art/tools/buildbot-build.sh"
exit 1
fi
@@ -87,6 +103,9 @@ working_packages=("dalvik.system"
# "org.apache.harmony.security"
vogar_args=$@
+gcstress=false
+debug=false
+
while true; do
if [[ "$1" == "--mode=device" ]]; then
vogar_args="$vogar_args --device-dir=/data/local/tmp"
@@ -109,6 +128,10 @@ while true; do
# Remove the --debug from the arguments.
vogar_args=${vogar_args/$1}
vogar_args="$vogar_args --vm-arg -XXlib:libartd.so"
+ debug=true
+ shift
+ elif [[ "$1" == "-Xgc:gcstress" ]]; then
+ gcstress=true
shift
elif [[ "$1" == "" ]]; then
break
@@ -122,8 +145,12 @@ done
# the default timeout.
vogar_args="$vogar_args --timeout 480"
-# Use Jack with "1.8" configuration.
-vogar_args="$vogar_args --toolchain jack --language JO"
+# Switch between using jack or javac+desugar+dx
+if $using_jack; then
+ vogar_args="$vogar_args --toolchain jack --language JO"
+else
+ vogar_args="$vogar_args --toolchain jdk --language CUR"
+fi
# JIT settings.
if $use_jit; then
@@ -131,6 +158,11 @@ if $use_jit; then
fi
vogar_args="$vogar_args --vm-arg -Xusejit:$use_jit"
+# gcstress and debug may lead to timeouts, so we need a dedicated expectations file for it.
+if [[ $gcstress && $debug ]]; then
+ expectations="$expectations --expectations art/tools/libcore_gcstress_debug_failures.txt"
+fi
+
# Run the tests using vogar.
echo "Running tests for the following test packages:"
echo ${working_packages[@]} | tr " " "\n"