summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.gtest.mk11
-rw-r--r--compiler/Android.bp1
-rw-r--r--compiler/common_compiler_test.cc4
-rw-r--r--compiler/compiled_method.h5
-rw-r--r--compiler/dex/dex_to_dex_decompiler.cc2
-rw-r--r--compiler/driver/compiler_driver.cc114
-rw-r--r--compiler/driver/compiler_driver_test.cc5
-rw-r--r--compiler/driver/compiler_options.cc7
-rw-r--r--compiler/driver/compiler_options.h7
-rw-r--r--compiler/elf_writer.h1
-rw-r--r--compiler/elf_writer_quick.cc12
-rw-r--r--compiler/image_test.cc1
-rw-r--r--compiler/image_writer.cc5
-rw-r--r--compiler/jit/jit_compiler.cc1
-rw-r--r--compiler/oat_test.cc24
-rw-r--r--compiler/oat_writer.cc8
-rw-r--r--compiler/oat_writer.h4
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc2
-rw-r--r--compiler/optimizing/code_generator_arm.cc16
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc14
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc16
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h2
-rw-r--r--compiler/optimizing/code_generator_mips.cc384
-rw-r--r--compiler/optimizing/code_generator_mips.h3
-rw-r--r--compiler/optimizing/code_generator_mips64.cc464
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc15
-rw-r--r--compiler/optimizing/code_generator_x86.h3
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc15
-rw-r--r--compiler/optimizing/code_generator_x86_64.h3
-rw-r--r--compiler/optimizing/induction_var_range.cc108
-rw-r--r--compiler/optimizing/induction_var_range.h16
-rw-r--r--compiler/optimizing/induction_var_range_test.cc112
-rw-r--r--compiler/optimizing/inliner.cc4
-rw-r--r--compiler/optimizing/instruction_builder.cc62
-rw-r--r--compiler/optimizing/instruction_builder.h4
-rw-r--r--compiler/optimizing/instruction_simplifier.cc58
-rw-r--r--compiler/optimizing/instruction_simplifier.h8
-rw-r--r--compiler/optimizing/load_store_elimination.cc157
-rw-r--r--compiler/optimizing/loop_optimization.cc6
-rw-r--r--compiler/optimizing/loop_optimization.h9
-rw-r--r--compiler/optimizing/loop_optimization_test.cc2
-rw-r--r--compiler/optimizing/nodes.h19
-rw-r--r--compiler/optimizing/optimizing_compiler.cc30
-rw-r--r--compiler/optimizing/sharpening.cc34
-rw-r--r--compiler/optimizing/sharpening.h12
-rw-r--r--compiler/optimizing/side_effects_analysis.h6
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h2
-rw-r--r--compiler/utils/mips/assembler_mips.h4
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc148
-rw-r--r--compiler/utils/mips64/assembler_mips64.h185
-rw-r--r--compiler/verifier_deps_test.cc23
-rw-r--r--dex2oat/dex2oat.cc25
-rw-r--r--dex2oat/dex2oat_test.cc97
-rw-r--r--dexlayout/dex_ir.cc29
-rw-r--r--dexlayout/dexlayout_test.cc104
-rw-r--r--profman/profile_assistant_test.cc219
-rw-r--r--profman/profman.cc242
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S1
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S1
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S1
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S1
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S1
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S1
-rw-r--r--runtime/base/arena_allocator.h1
-rw-r--r--runtime/base/mutex.cc72
-rw-r--r--runtime/base/mutex.h18
-rw-r--r--runtime/bytecode_utils.h (renamed from compiler/optimizing/bytecode_utils.h)6
-rw-r--r--runtime/class_linker.cc69
-rw-r--r--runtime/class_linker.h3
-rw-r--r--runtime/dex_file.cc8
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc55
-rw-r--r--runtime/gc/collector/concurrent_copying.cc27
-rw-r--r--runtime/gc/collector_type.h2
-rw-r--r--runtime/gc/gc_cause.cc1
-rw-r--r--runtime/gc/gc_cause.h4
-rw-r--r--runtime/gc/heap.cc5
-rw-r--r--runtime/gc/scoped_gc_critical_section.cc8
-rw-r--r--runtime/gc/space/region_space-inl.h14
-rw-r--r--runtime/gc/space/region_space.cc33
-rw-r--r--runtime/gc/space/region_space.h2
-rw-r--r--runtime/interpreter/interpreter_common.h68
-rw-r--r--runtime/interpreter/interpreter_intrinsics.cc121
-rw-r--r--runtime/interpreter/interpreter_intrinsics.h37
-rw-r--r--runtime/interpreter/mterp/mterp.cc19
-rw-r--r--runtime/jdwp/jdwp.h42
-rw-r--r--runtime/jdwp/jdwp_event.cc22
-rw-r--r--runtime/jdwp/jdwp_main.cc6
-rw-r--r--runtime/jdwp/object_registry.cc5
-rw-r--r--runtime/jdwp/object_registry.h1
-rw-r--r--runtime/jit/jit.cc8
-rw-r--r--runtime/jit/jit.h8
-rw-r--r--runtime/jit/jit_code_cache.cc6
-rw-r--r--runtime/jit/profile_compilation_info.cc137
-rw-r--r--runtime/jit/profile_compilation_info.h40
-rw-r--r--runtime/jit/profile_compilation_info_test.cc139
-rw-r--r--runtime/jit/profile_saver.cc158
-rw-r--r--runtime/jit/profile_saver.h25
-rw-r--r--runtime/mirror/class.h6
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc24
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc46
-rw-r--r--runtime/non_debuggable_classes.cc8
-rw-r--r--runtime/non_debuggable_classes.h7
-rw-r--r--runtime/oat_file_assistant.cc110
-rw-r--r--runtime/oat_file_assistant.h9
-rw-r--r--runtime/oat_file_manager.cc271
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc2
-rw-r--r--runtime/openjdkjvmti/jvmti_weak_table-inl.h389
-rw-r--r--runtime/openjdkjvmti/jvmti_weak_table.h219
-rw-r--r--runtime/openjdkjvmti/object_tagging.cc335
-rw-r--r--runtime/openjdkjvmti/object_tagging.h185
-rw-r--r--runtime/openjdkjvmti/ti_heap.cc38
-rw-r--r--runtime/openjdkjvmti/ti_heap.h3
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc265
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h13
-rw-r--r--runtime/openjdkjvmti/transform.cc7
-rw-r--r--runtime/runtime.cc17
-rw-r--r--runtime/runtime.h5
-rw-r--r--runtime/scoped_thread_state_change.h2
-rw-r--r--runtime/stack_map.h2
-rw-r--r--runtime/thread-inl.h4
-rw-r--r--test/154-gc-loop/src/Main.java2
-rw-r--r--test/530-checker-lse/src/Main.java152
-rw-r--r--test/532-checker-nonnull-arrayset/src/Main.java4
-rw-r--r--test/577-profile-foreign-dex/info.txt1
-rw-r--r--test/577-profile-foreign-dex/src-ex/OtherDex.java17
-rw-r--r--test/577-profile-foreign-dex/src/Main.java175
-rw-r--r--test/595-profile-saving/src/Main.java12
-rw-r--r--test/618-checker-induction/src/Main.java18
-rw-r--r--test/623-checker-loop-regressions/src/Main.java45
-rw-r--r--test/641-checker-arraycopy/build24
-rw-r--r--test/641-checker-arraycopy/expected.txt (renamed from test/577-profile-foreign-dex/expected.txt)0
-rw-r--r--test/641-checker-arraycopy/info.txt2
-rw-r--r--test/641-checker-arraycopy/src/Main.java83
-rw-r--r--test/641-iterations/expected.txt1
-rw-r--r--test/641-iterations/info.txt1
-rw-r--r--test/641-iterations/src/Main.java73
-rw-r--r--test/921-hello-failure/expected.txt3
-rw-r--r--test/921-hello-failure/src/Main.java1
-rw-r--r--test/921-hello-failure/src/Unmodifiable.java52
-rw-r--r--test/Android.run-test.mk16
-rw-r--r--test/ProfileTestMultiDex/Main.java42
-rw-r--r--test/ProfileTestMultiDex/Second.java5
-rw-r--r--test/ProfileTestMultiDex/main.jpp22
-rw-r--r--test/ProfileTestMultiDex/main.list6
-rw-r--r--test/VerifierDeps/MySub1SoftVerificationFailure.smali (renamed from test/577-profile-foreign-dex/run)10
-rw-r--r--test/VerifierDeps/MySub2SoftVerificationFailure.smali16
-rw-r--r--test/VerifierDepsMulti/MySoftVerificationFailure.smali24
-rwxr-xr-xtest/etc/run-test-jar17
-rw-r--r--test/knownfailures.json20
-rwxr-xr-xtest/run-test2
-rwxr-xr-xtest/testrunner/run_build_test_target.py58
-rw-r--r--test/testrunner/target_config.py257
-rwxr-xr-xtest/testrunner/testrunner.py111
-rwxr-xr-xtools/setup-buildbot-device.sh24
158 files changed, 5102 insertions, 2218 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index b661e001c8..c27f8dbe4a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -75,8 +75,11 @@ $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX)
$(call dexpreopt-remove-classes.dex,$@)
ART_TEST_GTEST_VerifierDeps_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDeps/*.smali))
+ART_TEST_GTEST_VerifierDepsMulti_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDepsMulti/*.smali))
ART_TEST_HOST_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
ART_TEST_TARGET_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
+ART_TEST_HOST_GTEST_VerifierDepsMulti_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifierDepsMulti,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
+ART_TEST_TARGET_GTEST_VerifierDepsMulti_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifierDepsMulti,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
$(ART_TEST_HOST_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HOST_OUT_EXECUTABLES)/smali
$(HOST_OUT_EXECUTABLES)/smali --output=$@ $(filter %.smali,$^)
@@ -84,6 +87,12 @@ $(ART_TEST_HOST_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HO
$(ART_TEST_TARGET_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HOST_OUT_EXECUTABLES)/smali
$(HOST_OUT_EXECUTABLES)/smali --output=$@ $(filter %.smali,$^)
+$(ART_TEST_HOST_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMulti_SRC) $(HOST_OUT_EXECUTABLES)/smali
+ $(HOST_OUT_EXECUTABLES)/smali --output=$@ $(filter %.smali,$^)
+
+$(ART_TEST_TARGET_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMulti_SRC) $(HOST_OUT_EXECUTABLES)/smali
+ $(HOST_OUT_EXECUTABLES)/smali --output=$@ $(filter %.smali,$^)
+
# Dex file dependencies for each gtest.
ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
@@ -115,7 +124,7 @@ ART_GTEST_stub_test_DEX_DEPS := AllFields
ART_GTEST_transaction_test_DEX_DEPS := Transaction
ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup
ART_GTEST_unstarted_runtime_test_DEX_DEPS := Nested
-ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps MultiDex
+ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps VerifierDepsMulti MultiDex
ART_GTEST_dex_to_dex_decompiler_test_DEX_DEPS := VerifierDeps DexToDexDecompiler
# The elf writer test has dependencies on core.oat.
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 1ee2a21b18..c59e36b597 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -417,6 +417,7 @@ art_cc_test {
shared_libs: [
"libartd-compiler",
+ "libartd-simulator",
"libvixld-arm",
"libvixld-arm64",
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index d89cdbabf8..9a45379a05 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -52,10 +52,10 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) {
compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
method->GetDexMethodIndex()));
}
- if (compiled_method != nullptr) {
+ // If the code size is 0 it means the method was skipped due to profile guided compilation.
+ if (compiled_method != nullptr && compiled_method->GetQuickCode().size() != 0u) {
ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
uint32_t code_size = code.size();
- CHECK_NE(0u, code_size);
ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable();
uint32_t vmap_table_offset = vmap_table.empty() ? 0u
: sizeof(OatQuickMethodHeader) + vmap_table.size();
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index e2a0942724..00e2d62bff 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -170,7 +170,6 @@ class LinkerPatch {
// choose to squeeze the Type into fewer than 8 bits, we'll have to declare
// patch_type_ as an uintN_t and do explicit static_cast<>s.
enum class Type : uint8_t {
- kRecordPosition, // Just record patch position for patchoat.
kMethod,
kCall,
kCallRelative, // NOTE: Actual patching is instruction_set-dependent.
@@ -183,10 +182,6 @@ class LinkerPatch {
kDexCacheArray, // NOTE: Actual patching is instruction_set-dependent.
};
- static LinkerPatch RecordPosition(size_t literal_offset) {
- return LinkerPatch(literal_offset, Type::kRecordPosition, /* target_dex_file */ nullptr);
- }
-
static LinkerPatch MethodPatch(size_t literal_offset,
const DexFile* target_dex_file,
uint32_t target_method_idx) {
diff --git a/compiler/dex/dex_to_dex_decompiler.cc b/compiler/dex/dex_to_dex_decompiler.cc
index bfd485d126..53601033da 100644
--- a/compiler/dex/dex_to_dex_decompiler.cc
+++ b/compiler/dex/dex_to_dex_decompiler.cc
@@ -20,7 +20,7 @@
#include "base/mutex.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
-#include "optimizing/bytecode_utils.h"
+#include "bytecode_utils.h"
namespace art {
namespace optimizer {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7e91453741..057e3c9960 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -535,9 +535,8 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
if (klass->IsVerified()) {
// Class is verified so we can enable DEX-to-DEX compilation for performance.
return max_level;
- } else if (klass->IsCompileTimeVerified()) {
+ } else if (klass->ShouldVerifyAtRuntime()) {
// Class verification has soft-failed. Anyway, ensure at least correctness.
- DCHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime);
return optimizer::DexToDexCompilationLevel::kRequired;
} else {
// Class verification has failed: do not run DEX-to-DEX compilation.
@@ -964,7 +963,7 @@ static void EnsureVerifiedOrVerifyAtRuntime(jobject jclass_loader,
if (cls == nullptr) {
soa.Self()->ClearException();
} else if (&cls->GetDexFile() == dex_file) {
- DCHECK(cls->IsErroneous() || cls->IsVerified() || cls->IsCompileTimeVerified())
+ DCHECK(cls->IsErroneous() || cls->IsVerified() || cls->ShouldVerifyAtRuntime())
<< cls->PrettyClass()
<< " " << cls->GetStatus();
}
@@ -2160,6 +2159,14 @@ class VerifyClassVisitor : public CompilationVisitor {
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
<< " because: " << error_msg;
manager_->GetCompiler()->SetHadHardVerifierFailure();
+ } else {
+ // Force a soft failure for the VerifierDeps. This is a sanity measure, as
+ // the vdex file already records that the class hasn't been resolved. It avoids
+ // trying to do future verification optimizations when processing the vdex file.
+ DCHECK(failure_kind == verifier::MethodVerifier::kSoftFailure ||
+ failure_kind == verifier::MethodVerifier::kNoFailure)
+ << failure_kind;
+ failure_kind = verifier::MethodVerifier::kSoftFailure;
}
} else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
CHECK(klass->IsResolved()) << klass->PrettyClass();
@@ -2172,7 +2179,7 @@ class VerifyClassVisitor : public CompilationVisitor {
manager_->GetCompiler()->SetHadHardVerifierFailure();
}
- CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
+ CHECK(klass->ShouldVerifyAtRuntime() || klass->IsVerified() || klass->IsErroneous())
<< klass->PrettyDescriptor() << ": state=" << klass->GetStatus();
// It is *very* problematic if there are verification errors in the boot classpath. For example,
@@ -2186,6 +2193,13 @@ class VerifyClassVisitor : public CompilationVisitor {
DCHECK(klass->IsVerified()) << "Boot classpath class " << klass->PrettyClass()
<< " failed to fully verify: state= " << klass->GetStatus();
}
+ if (klass->IsVerified()) {
+ DCHECK_EQ(failure_kind, verifier::MethodVerifier::kNoFailure);
+ } else if (klass->ShouldVerifyAtRuntime()) {
+ DCHECK_EQ(failure_kind, verifier::MethodVerifier::kSoftFailure);
+ } else {
+ DCHECK_EQ(failure_kind, verifier::MethodVerifier::kHardFailure);
+ }
}
} else {
// Make the skip a soft failure, essentially being considered as verify at runtime.
@@ -2283,7 +2297,7 @@ class InitializeClassVisitor : public CompilationVisitor {
public:
explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
ATRACE_CALL();
jobject jclass_loader = manager_->GetClassLoader();
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2343,23 +2357,32 @@ class InitializeClassVisitor : public CompilationVisitor {
// mode which prevents the GC from visiting objects modified during the transaction.
// Ensure GC is not run so don't access freed objects when aborting transaction.
- ScopedAssertNoThreadSuspension ants("Transaction end");
- runtime->ExitTransactionMode();
+ {
+ ScopedAssertNoThreadSuspension ants("Transaction end");
+ runtime->ExitTransactionMode();
+
+ if (!success) {
+ CHECK(soa.Self()->IsExceptionPending());
+ mirror::Throwable* exception = soa.Self()->GetException();
+ VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
+ << exception->Dump();
+ std::ostream* file_log = manager_->GetCompiler()->
+ GetCompilerOptions().GetInitFailureOutput();
+ if (file_log != nullptr) {
+ *file_log << descriptor << "\n";
+ *file_log << exception->Dump() << "\n";
+ }
+ soa.Self()->ClearException();
+ transaction.Rollback();
+ CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
+ }
+ }
if (!success) {
- CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException();
- VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
- << exception->Dump();
- std::ostream* file_log = manager_->GetCompiler()->
- GetCompilerOptions().GetInitFailureOutput();
- if (file_log != nullptr) {
- *file_log << descriptor << "\n";
- *file_log << exception->Dump() << "\n";
- }
- soa.Self()->ClearException();
- transaction.Rollback();
- CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
+ // On failure, still intern strings of static fields and seen in <clinit>, as these
+ // will be created in the zygote. This is separated from the transaction code just
+ // above as we will allocate strings, so must be allowed to suspend.
+ InternStrings(klass, class_loader);
}
}
}
@@ -2375,6 +2398,57 @@ class InitializeClassVisitor : public CompilationVisitor {
}
private:
+ void InternStrings(Handle<mirror::Class> klass, Handle<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(manager_->GetCompiler()->GetCompilerOptions().IsBootImage());
+ DCHECK(klass->IsVerified());
+ DCHECK(!klass->IsInitialized());
+
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> h_dex_cache = hs.NewHandle(klass->GetDexCache());
+ const DexFile* dex_file = manager_->GetDexFile();
+ const DexFile::ClassDef* class_def = klass->GetClassDef();
+ ClassLinker* class_linker = manager_->GetClassLinker();
+
+ // Check encoded final field values for strings and intern.
+ annotations::RuntimeEncodedStaticFieldValueIterator value_it(*dex_file,
+ &h_dex_cache,
+ &class_loader,
+ manager_->GetClassLinker(),
+ *class_def);
+ for ( ; value_it.HasNext(); value_it.Next()) {
+ if (value_it.GetValueType() == annotations::RuntimeEncodedStaticFieldValueIterator::kString) {
+ // Resolve the string. This will intern the string.
+ art::ObjPtr<mirror::String> resolved = class_linker->ResolveString(
+ *dex_file, dex::StringIndex(value_it.GetJavaValue().i), h_dex_cache);
+ CHECK(resolved != nullptr);
+ }
+ }
+
+ // Intern strings seen in <clinit>.
+ ArtMethod* clinit = klass->FindClassInitializer(class_linker->GetImagePointerSize());
+ if (clinit != nullptr) {
+ const DexFile::CodeItem* code_item = clinit->GetCodeItem();
+ DCHECK(code_item != nullptr);
+ const Instruction* inst = Instruction::At(code_item->insns_);
+
+ const uint32_t insns_size = code_item->insns_size_in_code_units_;
+ for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
+ if (inst->Opcode() == Instruction::CONST_STRING) {
+ ObjPtr<mirror::String> s = class_linker->ResolveString(
+ *dex_file, dex::StringIndex(inst->VRegB_21c()), h_dex_cache);
+ CHECK(s != nullptr);
+ } else if (inst->Opcode() == Instruction::CONST_STRING_JUMBO) {
+ ObjPtr<mirror::String> s = class_linker->ResolveString(
+ *dex_file, dex::StringIndex(inst->VRegB_31c()), h_dex_cache);
+ CHECK(s != nullptr);
+ }
+ dex_pc += inst->SizeInCodeUnits();
+ inst = inst->Next();
+ }
+ }
+ }
+
const ParallelCompilationManager* const manager_;
};
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 97954f3c29..562f97b3ae 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -240,9 +240,8 @@ class CompilerDriverProfileTest : public CompilerDriverTest {
ProfileCompilationInfo info;
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
- std::string key = ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation());
- profile_info_.AddMethodIndex(key, dex_file->GetLocationChecksum(), 1);
- profile_info_.AddMethodIndex(key, dex_file->GetLocationChecksum(), 2);
+ profile_info_.AddMethodIndex(dex_file->GetLocation(), dex_file->GetLocationChecksum(), 1);
+ profile_info_.AddMethodIndex(dex_file->GetLocation(), dex_file->GetLocationChecksum(), 2);
}
return &profile_info_;
}
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index c222f90043..34ad1c5c08 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -32,7 +32,6 @@ CompilerOptions::CompilerOptions()
no_inline_from_(nullptr),
boot_image_(false),
app_image_(false),
- include_patch_information_(kDefaultIncludePatchInformation),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
debuggable_(false),
generate_debug_info_(kDefaultGenerateDebugInfo),
@@ -66,7 +65,6 @@ CompilerOptions::CompilerOptions(CompilerFilter::Filter compiler_filter,
size_t inline_depth_limit,
size_t inline_max_code_units,
const std::vector<const DexFile*>* no_inline_from,
- bool include_patch_information,
double top_k_profile_threshold,
bool debuggable,
bool generate_debug_info,
@@ -93,7 +91,6 @@ CompilerOptions::CompilerOptions(CompilerFilter::Filter compiler_filter,
no_inline_from_(no_inline_from),
boot_image_(false),
app_image_(false),
- include_patch_information_(include_patch_information),
top_k_profile_threshold_(top_k_profile_threshold),
debuggable_(debuggable),
generate_debug_info_(generate_debug_info),
@@ -206,10 +203,6 @@ bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usa
debuggable_ = true;
} else if (option.starts_with("--top-k-profile-threshold=")) {
ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage);
- } else if (option == "--include-patch-information") {
- include_patch_information_ = true;
- } else if (option == "--no-include-patch-information") {
- include_patch_information_ = false;
} else if (option == "--abort-on-hard-verifier-error") {
abort_on_hard_verifier_failure_ = true;
} else if (option.starts_with("--dump-init-failures=")) {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 6894cd5028..2e3e55f6c6 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -46,7 +46,6 @@ class CompilerOptions FINAL {
static constexpr double kDefaultTopKProfileThreshold = 90.0;
static const bool kDefaultGenerateDebugInfo = false;
static const bool kDefaultGenerateMiniDebugInfo = false;
- static const bool kDefaultIncludePatchInformation = false;
static const size_t kDefaultInlineDepthLimit = 3;
static const size_t kDefaultInlineMaxCodeUnits = 32;
static constexpr size_t kUnsetInlineDepthLimit = -1;
@@ -68,7 +67,6 @@ class CompilerOptions FINAL {
size_t inline_depth_limit,
size_t inline_max_code_units,
const std::vector<const DexFile*>* no_inline_from,
- bool include_patch_information,
double top_k_profile_threshold,
bool debuggable,
bool generate_debug_info,
@@ -213,10 +211,6 @@ class CompilerOptions FINAL {
return implicit_suspend_checks_;
}
- bool GetIncludePatchInformation() const {
- return include_patch_information_;
- }
-
bool IsBootImage() const {
return boot_image_;
}
@@ -305,7 +299,6 @@ class CompilerOptions FINAL {
bool boot_image_;
bool app_image_;
- bool include_patch_information_;
// When using a profile file only the top K% of the profiled samples will be compiled.
double top_k_profile_threshold_;
bool debuggable_;
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index d55f7458b2..7baae527ff 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -63,7 +63,6 @@ class ElfWriter {
virtual void EndText(OutputStream* text) = 0;
virtual void WriteDynamicSection() = 0;
virtual void WriteDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
- virtual void WritePatchLocations(const ArrayRef<const uintptr_t>& patch_locations) = 0;
virtual bool End() = 0;
// Get the ELF writer's stream. This stream can be used for writing data directly
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 0d6575cffd..28c35e96b4 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -105,7 +105,6 @@ class ElfWriterQuick FINAL : public ElfWriter {
void EndText(OutputStream* text) OVERRIDE;
void WriteDynamicSection() OVERRIDE;
void WriteDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
- void WritePatchLocations(const ArrayRef<const uintptr_t>& patch_locations) OVERRIDE;
bool End() OVERRIDE;
virtual OutputStream* GetStream() OVERRIDE;
@@ -268,17 +267,6 @@ void ElfWriterQuick<ElfTypes>::WriteDebugInfo(
}
template <typename ElfTypes>
-void ElfWriterQuick<ElfTypes>::WritePatchLocations(
- const ArrayRef<const uintptr_t>& patch_locations) {
- // Add relocation section for .text.
- if (compiler_options_->GetIncludePatchInformation()) {
- // Note that ElfWriter::Fixup will be called regardless and therefore
- // we need to include oat_patches for debug sections unconditionally.
- builder_->WritePatches(".text.oat_patches", patch_locations);
- }
-}
-
-template <typename ElfTypes>
bool ElfWriterQuick<ElfTypes>::End() {
builder_->End();
if (compiler_options_->GetGenerateBuildId()) {
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index b0225a3625..89e8a678b1 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -318,7 +318,6 @@ void CompilationHelper::Compile(CompilerDriver* driver,
elf_writer->WriteDynamicSection();
elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
- elf_writer->WritePatchLocations(oat_writer->GetAbsolutePatchLocations());
bool success = elf_writer->End();
ASSERT_TRUE(success);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 117d1131b5..65d82ed980 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -238,10 +238,11 @@ bool ImageWriter::Write(int image_fd,
case ImageHeader::kStorageModeLZ4: {
const size_t compressed_max_size = LZ4_compressBound(image_data_size);
compressed_data.reset(new char[compressed_max_size]);
- data_size = LZ4_compress(
+ data_size = LZ4_compress_default(
reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader),
&compressed_data[0],
- image_data_size);
+ image_data_size,
+ compressed_max_size);
break;
}
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index cbd831a60f..3ae7974038 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -100,7 +100,6 @@ JitCompiler::JitCompiler() {
CompilerOptions::kDefaultInlineDepthLimit,
CompilerOptions::kDefaultInlineMaxCodeUnits,
/* no_inline_from */ nullptr,
- /* include_patch_information */ false,
CompilerOptions::kDefaultTopKProfileThreshold,
Runtime::Current()->IsJavaDebuggable(),
CompilerOptions::kDefaultGenerateDebugInfo,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 66111f6e23..97b13746fc 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -250,7 +250,6 @@ class OatTest : public CommonCompilerTest {
elf_writer->WriteDynamicSection();
elf_writer->WriteDebugInfo(oat_writer.GetMethodDebugInfo());
- elf_writer->WritePatchLocations(oat_writer.GetAbsolutePatchLocations());
if (!elf_writer->End()) {
return false;
@@ -265,6 +264,7 @@ class OatTest : public CommonCompilerTest {
void TestDexFileInput(bool verify, bool low_4gb, bool use_profile);
void TestZipFileInput(bool verify);
+ void TestZipFileInputWithEmptyDex();
std::unique_ptr<const InstructionSetFeatures> insn_features_;
std::unique_ptr<QuickCompilerCallbacks> callbacks_;
@@ -821,6 +821,28 @@ TEST_F(OatTest, ZipFileInputCheckVerifier) {
TestZipFileInput(true);
}
+void OatTest::TestZipFileInputWithEmptyDex() {
+ ScratchFile zip_file;
+ ZipBuilder zip_builder(zip_file.GetFile());
+ bool success = zip_builder.AddFile("classes.dex", nullptr, 0);
+ ASSERT_TRUE(success);
+ success = zip_builder.Finish();
+ ASSERT_TRUE(success) << strerror(errno);
+
+ SafeMap<std::string, std::string> key_value_store;
+ key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
+ std::vector<const char*> input_filenames { zip_file.GetFilename().c_str() }; // NOLINT [readability/braces] [4]
+ ScratchFile oat_file, vdex_file(oat_file, ".vdex");
+ std::unique_ptr<ProfileCompilationInfo> profile_compilation_info(new ProfileCompilationInfo());
+ success = WriteElf(vdex_file.GetFile(), oat_file.GetFile(), input_filenames,
+ key_value_store, /*verify*/false, profile_compilation_info.get());
+ ASSERT_FALSE(success);
+}
+
+TEST_F(OatTest, ZipFileInputWithEmptyDex) {
+ TestZipFileInputWithEmptyDex();
+}
+
TEST_F(OatTest, UpdateChecksum) {
InstructionSet insn_set = kX86;
std::string error_msg;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 34d2ec9cde..43f606af65 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1225,7 +1225,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
break;
}
default: {
- DCHECK_EQ(patch.GetType(), LinkerPatch::Type::kRecordPosition);
+ DCHECK(false) << "Unexpected linker patch type: " << patch.GetType();
break;
}
}
@@ -2260,6 +2260,10 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
ZipEntry* zip_entry = oat_dex_file->source_.GetZipEntry();
std::unique_ptr<MemMap> mem_map(
zip_entry->ExtractToMemMap(location.c_str(), "classes.dex", &error_msg));
+ if (mem_map == nullptr) {
+ LOG(ERROR) << "Failed to extract dex file to mem map for layout: " << error_msg;
+ return false;
+ }
dex_file = DexFile::Open(location,
zip_entry->GetCrc32(),
std::move(mem_map),
@@ -2283,7 +2287,7 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
dex_file = DexFile::Open(raw_dex_file,
header->file_size_,
location,
- header->checksum_,
+ oat_dex_file->dex_file_location_checksum_,
nullptr,
/* verify */ true,
/* verify_checksum */ false,
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index db84166ad3..511371480a 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -225,10 +225,6 @@ class OatWriter {
return oat_data_offset_;
}
- ArrayRef<const uintptr_t> GetAbsolutePatchLocations() const {
- return ArrayRef<const uintptr_t>(absolute_patch_locations_);
- }
-
~OatWriter();
void AddMethodDebugInfos(const std::vector<debug::MethodDebugInfo>& infos) {
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 5d58207511..cb6e14b2bd 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -43,7 +43,7 @@ class BoundsCheckEliminationTest : public testing::Test {
void RunBCE() {
graph_->BuildDominatorTree();
- InstructionSimplifier(graph_).Run();
+ InstructionSimplifier(graph_, /* codegen */ nullptr).Run();
SideEffectsAnalysis side_effects(graph_);
side_effects.Run();
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 7b84ef83cd..511bd9b7ef 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1437,8 +1437,6 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_address_patches_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
@@ -7626,9 +7624,7 @@ Literal* CodeGeneratorARM::DeduplicateBootImageTypeLiteral(const DexFile& dex_fi
}
Literal* CodeGeneratorARM::DeduplicateBootImageAddressLiteral(uint32_t address) {
- bool needs_patch = GetCompilerOptions().GetIncludePatchInformation();
- Uint32ToLiteralMap* map = needs_patch ? &boot_image_address_patches_ : &uint32_literals_;
- return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
+ return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
}
Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file,
@@ -7679,8 +7675,7 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
- /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
- boot_image_address_patches_.size();
+ /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -7714,13 +7709,6 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
target_type.dex_file,
target_type.type_index.index_));
}
- for (const auto& entry : boot_image_address_patches_) {
- DCHECK(GetCompilerOptions().GetIncludePatchInformation());
- Literal* literal = entry.second;
- DCHECK(literal->GetLabel()->IsBound());
- uint32_t literal_offset = literal->GetLabel()->Position();
- linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
- }
DCHECK_EQ(size, linker_patches->size());
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index df2dbc74ab..e993756b3b 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -642,8 +642,6 @@ class CodeGeneratorARM : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
- // Deduplication map for patchable boot image addresses.
- Uint32ToLiteralMap boot_image_address_patches_;
// Patches for string literals in JIT compiled code.
StringToLiteralMap jit_string_patches_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 18c95b3c41..f5038fb1c0 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1200,8 +1200,6 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_address_patches_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
@@ -4328,9 +4326,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageTypeLi
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddressLiteral(
uint64_t address) {
- bool needs_patch = GetCompilerOptions().GetIncludePatchInformation();
- Uint32ToLiteralMap* map = needs_patch ? &boot_image_address_patches_ : &uint32_literals_;
- return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
+ return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
@@ -4398,8 +4394,7 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
pc_relative_type_patches_.size() +
- type_bss_entry_patches_.size() +
- boot_image_address_patches_.size();
+ type_bss_entry_patches_.size();
linker_patches->reserve(size);
for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.GetLocation(),
@@ -4433,11 +4428,6 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
target_type.dex_file,
target_type.type_index.index_));
}
- for (const auto& entry : boot_image_address_patches_) {
- DCHECK(GetCompilerOptions().GetIncludePatchInformation());
- vixl::aarch64::Literal<uint32_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::RecordPosition(literal->GetOffset()));
- }
DCHECK_EQ(size, linker_patches->size());
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 5faf29a90f..24a602400e 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -761,8 +761,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
- // Deduplication map for patchable boot image addresses.
- Uint32ToLiteralMap boot_image_address_patches_;
// Patches for string literals in JIT compiled code.
StringToLiteralMap jit_string_patches_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 6bfbe4a9c9..58bf2de70b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -1481,8 +1481,6 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_address_patches_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
@@ -7738,9 +7736,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateBootImageTypeLiteral(
}
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateBootImageAddressLiteral(uint32_t address) {
- bool needs_patch = GetCompilerOptions().GetIncludePatchInformation();
- Uint32ToLiteralMap* map = needs_patch ? &boot_image_address_patches_ : &uint32_literals_;
- return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
+ return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
}
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateDexCacheAddressLiteral(uint32_t address) {
@@ -7800,8 +7796,7 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
- /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
- boot_image_address_patches_.size();
+ /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -7835,13 +7830,6 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
target_type.dex_file,
target_type.type_index.index_));
}
- for (const auto& entry : boot_image_address_patches_) {
- DCHECK(GetCompilerOptions().GetIncludePatchInformation());
- VIXLUInt32Literal* literal = entry.second;
- DCHECK(literal->IsBound());
- uint32_t literal_offset = literal->GetLocation();
- linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
- }
DCHECK_EQ(size, linker_patches->size());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 3f52c72bd4..92e922d8f9 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -738,8 +738,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
- // Deduplication map for patchable boot image addresses.
- Uint32ToLiteralMap boot_image_address_patches_;
// Patches for string literals in JIT compiled code.
StringToLiteralMap jit_string_patches_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 791e63265e..5f02a52417 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -391,7 +391,8 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
public:
- explicit TypeCheckSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
+ explicit TypeCheckSlowPathMIPS(HInstruction* instruction, bool is_fatal)
+ : SlowPathCodeMIPS(instruction), is_fatal_(is_fatal) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
@@ -401,7 +402,9 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
+ if (!is_fatal_) {
+ SaveLiveRegisters(codegen, locations);
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
@@ -424,13 +427,19 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
- RestoreLiveRegisters(codegen, locations);
- __ B(GetExitLabel());
+ if (!is_fatal_) {
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
}
const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
+ bool IsFatal() const OVERRIDE { return is_fatal_; }
+
private:
+ const bool is_fatal_;
+
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS);
};
@@ -482,8 +491,6 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_address_patches_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
@@ -1026,8 +1033,7 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
pc_relative_type_patches_.size() +
type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
- boot_image_type_patches_.size() +
- boot_image_address_patches_.size();
+ boot_image_type_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -1061,13 +1067,6 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
target_type.dex_file,
target_type.type_index.index_));
}
- for (const auto& entry : boot_image_address_patches_) {
- DCHECK(GetCompilerOptions().GetIncludePatchInformation());
- Literal* literal = entry.second;
- DCHECK(literal->GetLabel()->IsBound());
- uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
- linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
- }
DCHECK_EQ(size, linker_patches->size());
}
@@ -1125,9 +1124,7 @@ Literal* CodeGeneratorMIPS::DeduplicateBootImageTypeLiteral(const DexFile& dex_f
}
Literal* CodeGeneratorMIPS::DeduplicateBootImageAddressLiteral(uint32_t address) {
- bool needs_patch = GetCompilerOptions().GetIncludePatchInformation();
- Uint32ToLiteralMap* map = needs_patch ? &boot_image_address_patches_ : &uint32_literals_;
- return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
+ return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
}
void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info,
@@ -1899,9 +1896,9 @@ void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
}
}
-auto InstructionCodeGeneratorMIPS::GetImplicitNullChecker(HInstruction* instruction) {
- auto null_checker = [this, instruction]() {
- this->codegen_->MaybeRecordImplicitNullCheck(instruction);
+static auto GetImplicitNullChecker(HInstruction* instruction, CodeGeneratorMIPS* codegen) {
+ auto null_checker = [codegen, instruction]() {
+ codegen->MaybeRecordImplicitNullCheck(instruction);
};
return null_checker;
}
@@ -1911,7 +1908,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
Register obj = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
- auto null_checker = GetImplicitNullChecker(instruction);
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
Primitive::Type type = instruction->GetType();
const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
@@ -2148,7 +2145,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
bool needs_runtime_call = locations->WillCall();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
- auto null_checker = GetImplicitNullChecker(instruction);
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
Register base_reg = index.IsConstant() ? obj : TMP;
switch (value_type) {
@@ -2331,30 +2328,178 @@ void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
}
void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction,
- LocationSummary::kCallOnSlowPath);
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
+
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ switch (type_check_kind) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = throws_into_catch
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
+ break;
+ case TypeCheckKind::kArrayCheck:
+ case TypeCheckKind::kUnresolvedCheck:
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- // Note that TypeCheckSlowPathMIPS uses this register too.
locations->AddTemp(Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).AsRegister<Register>();
Register cls = locations->InAt(1).AsRegister<Register>();
- Register obj_cls = locations->GetTemp(0).AsRegister<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
+ const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
+ const uint32_t object_array_data_offset =
+ mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ MipsLabel done;
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction);
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
+ bool is_type_check_slow_path_fatal = false;
+ if (!kEmitCompilerReadBarrier) {
+ is_type_check_slow_path_fatal =
+ (type_check_kind == TypeCheckKind::kExactCheck ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
+ !instruction->CanThrowIntoCatchBlock();
+ }
+ SlowPathCodeMIPS* slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
- // TODO: avoid this check if we know obj is not null.
- __ Beqz(obj, slow_path->GetExitLabel());
- // Compare the class of `obj` with `cls`.
- __ LoadFromOffset(kLoadWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
- __ MaybeUnpoisonHeapReference(obj_cls);
- __ Bne(obj_cls, cls, slow_path->GetEntryLabel());
+ // Avoid this check if we know `obj` is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ Beqz(obj, &done);
+ }
+
+ switch (type_check_kind) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kArrayCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // Jump to slow path for throwing the exception or doing a
+ // more involved array check.
+ __ Bne(temp, cls, slow_path->GetEntryLabel());
+ break;
+ }
+
+ case TypeCheckKind::kAbstractClassCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ MipsLabel loop;
+ __ Bind(&loop);
+ // /* HeapReference<Class> */ temp = temp->super_class_
+ __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
+ __ Beqz(temp, slow_path->GetEntryLabel());
+ // Otherwise, compare the classes.
+ __ Bne(temp, cls, &loop);
+ break;
+ }
+
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // Walk over the class hierarchy to find a match.
+ MipsLabel loop;
+ __ Bind(&loop);
+ __ Beq(temp, cls, &done);
+ // /* HeapReference<Class> */ temp = temp->super_class_
+ __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception. Otherwise, jump to the beginning of the loop.
+ __ Bnez(temp, &loop);
+ __ B(slow_path->GetEntryLabel());
+ break;
+ }
+
+ case TypeCheckKind::kArrayObjectCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // Do an exact check.
+ __ Beq(temp, cls, &done);
+ // Otherwise, we need to check that the object's class is a non-primitive array.
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ __ LoadFromOffset(kLoadWord, temp, temp, component_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // If the component type is null, jump to the slow path to throw the exception.
+ __ Beqz(temp, slow_path->GetEntryLabel());
+ // Otherwise, the object is indeed an array, further check that this component
+ // type is not a primitive type.
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Bnez(temp, slow_path->GetEntryLabel());
+ break;
+ }
+
+ case TypeCheckKind::kUnresolvedCheck:
+ // We always go into the type check slow path for the unresolved check case.
+ // We cannot directly call the CheckCast runtime entry point
+ // without resorting to a type checking slow path here (i.e. by
+ // calling InvokeRuntime directly), as it would require to
+ // assign fixed registers for the inputs of this HInstanceOf
+ // instruction (following the runtime calling convention), which
+ // might be cluttered by the potential first read barrier
+ // emission at the beginning of this method.
+ __ B(slow_path->GetEntryLabel());
+ break;
+
+ case TypeCheckKind::kInterfaceCheck: {
+ // Avoid read barriers to improve performance of the fast path. We can not get false
+ // positives by doing this.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // /* HeapReference<Class> */ temp = temp->iftable_
+ __ LoadFromOffset(kLoadWord, temp, temp, iftable_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // Iftable is never null.
+ __ Lw(TMP, temp, array_length_offset);
+ // Loop through the iftable and check if any class matches.
+ MipsLabel loop;
+ __ Bind(&loop);
+ __ Addiu(temp, temp, 2 * kHeapReferenceSize); // Possibly in delay slot on R2.
+ __ Beqz(TMP, slow_path->GetEntryLabel());
+ __ Lw(AT, temp, object_array_data_offset - 2 * kHeapReferenceSize);
+ __ MaybeUnpoisonHeapReference(AT);
+ // Go to next interface.
+ __ Addiu(TMP, TMP, -2);
+ // Compare the classes and continue the loop if they do not match.
+ __ Bne(AT, cls, &loop);
+ break;
+ }
+ }
+
+ __ Bind(&done);
__ Bind(slow_path->GetExitLabel());
}
@@ -4923,7 +5068,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
LoadOperandType load_type = kLoadUnsignedByte;
bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
- auto null_checker = GetImplicitNullChecker(instruction);
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
switch (type) {
case Primitive::kPrimBoolean:
@@ -5052,7 +5197,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
- auto null_checker = GetImplicitNullChecker(instruction);
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
switch (type) {
case Primitive::kPrimBoolean:
@@ -5193,8 +5338,22 @@ void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(
}
void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
- LocationSummary::CallKind call_kind =
- instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ switch (type_check_kind) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = LocationSummary::kNoCall;
+ break;
+ case TypeCheckKind::kArrayCheck:
+ case TypeCheckKind::kUnresolvedCheck:
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -5204,36 +5363,143 @@ void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).AsRegister<Register>();
Register cls = locations->InAt(1).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
-
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
MipsLabel done;
+ SlowPathCodeMIPS* slow_path = nullptr;
// Return 0 if `obj` is null.
- // TODO: Avoid this check if we know `obj` is not null.
- __ Move(out, ZERO);
- __ Beqz(obj, &done);
-
- // Compare the class of `obj` with `cls`.
- __ LoadFromOffset(kLoadWord, out, obj, mirror::Object::ClassOffset().Int32Value());
- __ MaybeUnpoisonHeapReference(out);
- if (instruction->IsExactCheck()) {
- // Classes must be equal for the instanceof to succeed.
- __ Xor(out, out, cls);
- __ Sltiu(out, out, 1);
- } else {
- // If the classes are not equal, we go into a slow path.
- DCHECK(locations->OnlyCallsOnSlowPath());
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction);
- codegen_->AddSlowPath(slow_path);
- __ Bne(out, cls, slow_path->GetEntryLabel());
- __ LoadConst32(out, 1);
- __ Bind(slow_path->GetExitLabel());
+ // Avoid this check if we know `obj` is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ Move(out, ZERO);
+ __ Beqz(obj, &done);
+ }
+
+ switch (type_check_kind) {
+ case TypeCheckKind::kExactCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ LoadFromOffset(kLoadWord, out, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // Classes must be equal for the instanceof to succeed.
+ __ Xor(out, out, cls);
+ __ Sltiu(out, out, 1);
+ break;
+ }
+
+ case TypeCheckKind::kAbstractClassCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ LoadFromOffset(kLoadWord, out, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ MipsLabel loop;
+ __ Bind(&loop);
+ // /* HeapReference<Class> */ out = out->super_class_
+ __ LoadFromOffset(kLoadWord, out, out, super_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ Beqz(out, &done);
+ __ Bne(out, cls, &loop);
+ __ LoadConst32(out, 1);
+ break;
+ }
+
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ LoadFromOffset(kLoadWord, out, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // Walk over the class hierarchy to find a match.
+ MipsLabel loop, success;
+ __ Bind(&loop);
+ __ Beq(out, cls, &success);
+ // /* HeapReference<Class> */ out = out->super_class_
+ __ LoadFromOffset(kLoadWord, out, out, super_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ __ Bnez(out, &loop);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ B(&done);
+ __ Bind(&success);
+ __ LoadConst32(out, 1);
+ break;
+ }
+
+ case TypeCheckKind::kArrayObjectCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ LoadFromOffset(kLoadWord, out, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // Do an exact check.
+ MipsLabel success;
+ __ Beq(out, cls, &success);
+ // Otherwise, we need to check that the object's class is a non-primitive array.
+ // /* HeapReference<Class> */ out = out->component_type_
+ __ LoadFromOffset(kLoadWord, out, out, component_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ Beqz(out, &done);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Sltiu(out, out, 1);
+ __ B(&done);
+ __ Bind(&success);
+ __ LoadConst32(out, 1);
+ break;
+ }
+
+ case TypeCheckKind::kArrayCheck: {
+ // No read barrier since the slow path will retry upon failure.
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ LoadFromOffset(kLoadWord, out, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
+ /* is_fatal */ false);
+ codegen_->AddSlowPath(slow_path);
+ __ Bne(out, cls, slow_path->GetEntryLabel());
+ __ LoadConst32(out, 1);
+ break;
+ }
+
+ case TypeCheckKind::kUnresolvedCheck:
+ case TypeCheckKind::kInterfaceCheck: {
+ // Note that we indeed only call on slow path, but we always go
+ // into the slow path for the unresolved and interface check
+ // cases.
+ //
+ // We cannot directly call the InstanceofNonTrivial runtime
+ // entry point without resorting to a type checking slow path
+ // here (i.e. by calling InvokeRuntime directly), as it would
+ // require to assign fixed registers for the inputs of this
+ // HInstanceOf instruction (following the runtime calling
+ // convention), which might be cluttered by the potential first
+ // read barrier emission at the beginning of this method.
+ //
+ // TODO: Introduce a new runtime entry point taking the object
+ // to test (instead of its class) as argument, and let it deal
+ // with the read barrier issues. This will let us refactor this
+ // case of the `switch` code as it was previously (with a direct
+ // call to the runtime not using a type checking slow path).
+ // This should also be beneficial for the other cases above.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
+ /* is_fatal */ false);
+ codegen_->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ break;
+ }
}
__ Bind(&done);
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 47eba50248..98fee24a74 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -297,7 +297,6 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void HandleGoto(HInstruction* got, HBasicBlock* successor);
- auto GetImplicitNullChecker(HInstruction* instruction);
void GenPackedSwitchWithCompares(Register value_reg,
int32_t lower_bound,
uint32_t num_entries,
@@ -536,8 +535,6 @@ class CodeGeneratorMIPS : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
- // Deduplication map for patchable boot image addresses.
- Uint32ToLiteralMap boot_image_address_patches_;
// Patches for string root accesses in JIT compiled code.
ArenaDeque<JitPatchInfo> jit_string_patches_;
// Patches for class root accesses in JIT compiled code.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 817854b507..02c3ad6e39 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -336,7 +336,8 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {}
+ explicit TypeCheckSlowPathMIPS64(HInstruction* instruction, bool is_fatal)
+ : SlowPathCodeMIPS64(instruction), is_fatal_(is_fatal) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
@@ -347,7 +348,9 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
+ if (!is_fatal_) {
+ SaveLiveRegisters(codegen, locations);
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
@@ -370,13 +373,19 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
- RestoreLiveRegisters(codegen, locations);
- __ Bc(GetExitLabel());
+ if (!is_fatal_) {
+ RestoreLiveRegisters(codegen, locations);
+ __ Bc(GetExitLabel());
+ }
}
const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
+ bool IsFatal() const OVERRIDE { return is_fatal_; }
+
private:
+ const bool is_fatal_;
+
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
};
@@ -430,8 +439,6 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_address_patches_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
@@ -937,8 +944,7 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
pc_relative_type_patches_.size() +
type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
- boot_image_type_patches_.size() +
- boot_image_address_patches_.size();
+ boot_image_type_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -972,13 +978,6 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
target_type.dex_file,
target_type.type_index.index_));
}
- for (const auto& entry : boot_image_address_patches_) {
- DCHECK(GetCompilerOptions().GetIncludePatchInformation());
- Literal* literal = entry.second;
- DCHECK(literal->GetLabel()->IsBound());
- uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
- linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
- }
DCHECK_EQ(size, linker_patches->size());
}
@@ -1042,9 +1041,7 @@ Literal* CodeGeneratorMIPS64::DeduplicateBootImageTypeLiteral(const DexFile& dex
}
Literal* CodeGeneratorMIPS64::DeduplicateBootImageAddressLiteral(uint64_t address) {
- bool needs_patch = GetCompilerOptions().GetIncludePatchInformation();
- Uint32ToLiteralMap* map = needs_patch ? &boot_image_address_patches_ : &uint32_literals_;
- return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
+ return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
}
void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info,
@@ -1483,11 +1480,19 @@ void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
}
}
+static auto GetImplicitNullChecker(HInstruction* instruction, CodeGeneratorMIPS64* codegen) {
+ auto null_checker = [codegen, instruction]() {
+ codegen->MaybeRecordImplicitNullCheck(instruction);
+ };
+ return null_checker;
+}
+
void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
Location index = locations->InAt(1);
uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
Primitive::Type type = instruction->GetType();
const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
@@ -1498,10 +1503,10 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
+ __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset, null_checker);
} else {
__ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
- __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+ __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1511,10 +1516,10 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
+ __ LoadFromOffset(kLoadSignedByte, out, obj, offset, null_checker);
} else {
__ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
- __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
+ __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1524,11 +1529,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
+ __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
__ Daddu(TMP, obj, TMP);
- __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
+ __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1537,8 +1542,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
if (maybe_compressed_char_at) {
uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
- __ LoadFromOffset(kLoadWord, TMP, obj, count_offset);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
__ Dext(TMP, TMP, 0, 1);
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
@@ -1563,7 +1567,8 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
__ LoadFromOffset(kLoadUnsignedHalfword,
out,
obj,
- data_offset + (const_index << TIMES_2));
+ data_offset + (const_index << TIMES_2),
+ null_checker);
}
} else {
GpuRegister index_reg = index.AsRegister<GpuRegister>();
@@ -1581,7 +1586,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
} else {
__ Dsll(TMP, index_reg, TIMES_2);
__ Daddu(TMP, obj, TMP);
- __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
}
}
break;
@@ -1595,11 +1600,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ LoadFromOffset(load_type, out, obj, offset);
+ __ LoadFromOffset(load_type, out, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
__ Daddu(TMP, obj, TMP);
- __ LoadFromOffset(load_type, out, TMP, data_offset);
+ __ LoadFromOffset(load_type, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1609,11 +1614,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
+ __ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
__ Daddu(TMP, obj, TMP);
- __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
+ __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1623,11 +1628,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
+ __ LoadFpuFromOffset(kLoadWord, out, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
__ Daddu(TMP, obj, TMP);
- __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
+ __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1637,11 +1642,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
+ __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
__ Daddu(TMP, obj, TMP);
- __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
+ __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1650,9 +1655,6 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
- if (!maybe_compressed_char_at) {
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- }
if (type == Primitive::kPrimNot) {
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
@@ -1708,6 +1710,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
bool needs_runtime_call = locations->WillCall();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
switch (value_type) {
case Primitive::kPrimBoolean:
@@ -1717,10 +1720,10 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- __ StoreToOffset(kStoreByte, value, obj, offset);
+ __ StoreToOffset(kStoreByte, value, obj, offset, null_checker);
} else {
__ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
- __ StoreToOffset(kStoreByte, value, TMP, data_offset);
+ __ StoreToOffset(kStoreByte, value, TMP, data_offset, null_checker);
}
break;
}
@@ -1732,11 +1735,11 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ StoreToOffset(kStoreHalfword, value, obj, offset);
+ __ StoreToOffset(kStoreHalfword, value, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
__ Daddu(TMP, obj, TMP);
- __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
+ __ StoreToOffset(kStoreHalfword, value, TMP, data_offset, null_checker);
}
break;
}
@@ -1786,10 +1789,10 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
}
__ PoisonHeapReference(AT, value);
__ Sw(AT, base_reg, data_offset);
+ null_checker();
} else {
- __ StoreToOffset(kStoreWord, value, base_reg, data_offset);
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
if (needs_write_barrier) {
DCHECK_EQ(value_type, Primitive::kPrimNot);
codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
@@ -1810,11 +1813,11 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreToOffset(kStoreDoubleword, value, obj, offset);
+ __ StoreToOffset(kStoreDoubleword, value, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
__ Daddu(TMP, obj, TMP);
- __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
+ __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset, null_checker);
}
break;
}
@@ -1826,11 +1829,11 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreFpuToOffset(kStoreWord, value, obj, offset);
+ __ StoreFpuToOffset(kStoreWord, value, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
__ Daddu(TMP, obj, TMP);
- __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
+ __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset, null_checker);
}
break;
}
@@ -1842,11 +1845,11 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
+ __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
__ Daddu(TMP, obj, TMP);
- __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
+ __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset, null_checker);
}
break;
}
@@ -1855,11 +1858,6 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
-
- // Ints and objects are handled in the switch.
- if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- }
}
void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -1888,31 +1886,178 @@ void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction)
}
void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction,
- LocationSummary::kCallOnSlowPath);
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
+
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ switch (type_check_kind) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = throws_into_catch
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
+ break;
+ case TypeCheckKind::kArrayCheck:
+ case TypeCheckKind::kUnresolvedCheck:
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- // Note that TypeCheckSlowPathMIPS64 uses this register too.
locations->AddTemp(Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
- GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
+ GpuRegister temp = locations->GetTemp(0).AsRegister<GpuRegister>();
+ const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
+ const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
+ const uint32_t object_array_data_offset =
+ mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ Mips64Label done;
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
+ bool is_type_check_slow_path_fatal = false;
+ if (!kEmitCompilerReadBarrier) {
+ is_type_check_slow_path_fatal =
+ (type_check_kind == TypeCheckKind::kExactCheck ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
+ !instruction->CanThrowIntoCatchBlock();
+ }
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
+ new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
- // TODO: avoid this check if we know obj is not null.
- __ Beqzc(obj, slow_path->GetExitLabel());
- // Compare the class of `obj` with `cls`.
- __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
- __ MaybeUnpoisonHeapReference(obj_cls);
- __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
+ // Avoid this check if we know `obj` is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ Beqzc(obj, &done);
+ }
+
+ switch (type_check_kind) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kArrayCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // Jump to slow path for throwing the exception or doing a
+ // more involved array check.
+ __ Bnec(temp, cls, slow_path->GetEntryLabel());
+ break;
+ }
+
+ case TypeCheckKind::kAbstractClassCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ Mips64Label loop;
+ __ Bind(&loop);
+ // /* HeapReference<Class> */ temp = temp->super_class_
+ __ LoadFromOffset(kLoadUnsignedWord, temp, temp, super_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
+ __ Beqzc(temp, slow_path->GetEntryLabel());
+ // Otherwise, compare the classes.
+ __ Bnec(temp, cls, &loop);
+ break;
+ }
+
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // Walk over the class hierarchy to find a match.
+ Mips64Label loop;
+ __ Bind(&loop);
+ __ Beqc(temp, cls, &done);
+ // /* HeapReference<Class> */ temp = temp->super_class_
+ __ LoadFromOffset(kLoadUnsignedWord, temp, temp, super_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception. Otherwise, jump to the beginning of the loop.
+ __ Bnezc(temp, &loop);
+ __ Bc(slow_path->GetEntryLabel());
+ break;
+ }
+
+ case TypeCheckKind::kArrayObjectCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // Do an exact check.
+ __ Beqc(temp, cls, &done);
+ // Otherwise, we need to check that the object's class is a non-primitive array.
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ __ LoadFromOffset(kLoadUnsignedWord, temp, temp, component_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // If the component type is null, jump to the slow path to throw the exception.
+ __ Beqzc(temp, slow_path->GetEntryLabel());
+ // Otherwise, the object is indeed an array, further check that this component
+ // type is not a primitive type.
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Bnezc(temp, slow_path->GetEntryLabel());
+ break;
+ }
+
+ case TypeCheckKind::kUnresolvedCheck:
+ // We always go into the type check slow path for the unresolved check case.
+ // We cannot directly call the CheckCast runtime entry point
+ // without resorting to a type checking slow path here (i.e. by
+ // calling InvokeRuntime directly), as it would require to
+ // assign fixed registers for the inputs of this HInstanceOf
+ // instruction (following the runtime calling convention), which
+ // might be cluttered by the potential first read barrier
+ // emission at the beginning of this method.
+ __ Bc(slow_path->GetEntryLabel());
+ break;
+
+ case TypeCheckKind::kInterfaceCheck: {
+ // Avoid read barriers to improve performance of the fast path. We can not get false
+ // positives by doing this.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // /* HeapReference<Class> */ temp = temp->iftable_
+ __ LoadFromOffset(kLoadUnsignedWord, temp, temp, iftable_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // Iftable is never null.
+ __ Lw(TMP, temp, array_length_offset);
+ // Loop through the iftable and check if any class matches.
+ Mips64Label loop;
+ __ Bind(&loop);
+ __ Beqzc(TMP, slow_path->GetEntryLabel());
+ __ Lwu(AT, temp, object_array_data_offset);
+ __ MaybeUnpoisonHeapReference(AT);
+ // Go to next interface.
+ __ Daddiu(temp, temp, 2 * kHeapReferenceSize);
+ __ Addiu(TMP, TMP, -2);
+ // Compare the classes and continue the loop if they do not match.
+ __ Bnec(AT, cls, &loop);
+ break;
+ }
+ }
+
+ __ Bind(&done);
__ Bind(slow_path->GetExitLabel());
}
@@ -3128,6 +3273,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
LoadOperandType load_type = kLoadUnsignedByte;
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
+
switch (type) {
case Primitive::kPrimBoolean:
load_type = kLoadUnsignedByte;
@@ -3159,14 +3306,12 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
if (!Primitive::IsFloatingPointType(type)) {
DCHECK(locations->Out().IsRegister());
GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
- __ LoadFromOffset(load_type, dst, obj, offset);
+ __ LoadFromOffset(load_type, dst, obj, offset, null_checker);
} else {
DCHECK(locations->Out().IsFpuRegister());
FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
- __ LoadFpuFromOffset(load_type, dst, obj, offset);
+ __ LoadFpuFromOffset(load_type, dst, obj, offset, null_checker);
}
-
- codegen_->MaybeRecordImplicitNullCheck(instruction);
// TODO: memory barrier?
if (type == Primitive::kPrimNot) {
@@ -3196,6 +3341,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
StoreOperandType store_type = kStoreByte;
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
+
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -3227,17 +3374,16 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
// need poisoning.
DCHECK_EQ(type, Primitive::kPrimNot);
__ PoisonHeapReference(TMP, src);
- __ StoreToOffset(store_type, TMP, obj, offset);
+ __ StoreToOffset(store_type, TMP, obj, offset, null_checker);
} else {
- __ StoreToOffset(store_type, src, obj, offset);
+ __ StoreToOffset(store_type, src, obj, offset, null_checker);
}
} else {
DCHECK(locations->InAt(1).IsFpuRegister());
FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
- __ StoreFpuToOffset(store_type, src, obj, offset);
+ __ StoreFpuToOffset(store_type, src, obj, offset, null_checker);
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
// TODO: memory barriers?
if (needs_write_barrier) {
DCHECK(locations->InAt(1).IsRegister());
@@ -3280,8 +3426,22 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(
}
void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
- LocationSummary::CallKind call_kind =
- instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ switch (type_check_kind) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = LocationSummary::kNoCall;
+ break;
+ case TypeCheckKind::kArrayCheck:
+ case TypeCheckKind::kUnresolvedCheck:
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -3291,37 +3451,143 @@ void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
Mips64Label done;
+ SlowPathCodeMIPS64* slow_path = nullptr;
// Return 0 if `obj` is null.
- // TODO: Avoid this check if we know `obj` is not null.
- __ Move(out, ZERO);
- __ Beqzc(obj, &done);
-
- // Compare the class of `obj` with `cls`.
- __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
- __ MaybeUnpoisonHeapReference(out);
- if (instruction->IsExactCheck()) {
- // Classes must be equal for the instanceof to succeed.
- __ Xor(out, out, cls);
- __ Sltiu(out, out, 1);
- } else {
- // If the classes are not equal, we go into a slow path.
- DCHECK(locations->OnlyCallsOnSlowPath());
- SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
- codegen_->AddSlowPath(slow_path);
- __ Bnec(out, cls, slow_path->GetEntryLabel());
- __ LoadConst32(out, 1);
- __ Bind(slow_path->GetExitLabel());
+ // Avoid this check if we know `obj` is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ Move(out, ZERO);
+ __ Beqzc(obj, &done);
+ }
+
+ switch (type_check_kind) {
+ case TypeCheckKind::kExactCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, out, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // Classes must be equal for the instanceof to succeed.
+ __ Xor(out, out, cls);
+ __ Sltiu(out, out, 1);
+ break;
+ }
+
+ case TypeCheckKind::kAbstractClassCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, out, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ Mips64Label loop;
+ __ Bind(&loop);
+ // /* HeapReference<Class> */ out = out->super_class_
+ __ LoadFromOffset(kLoadUnsignedWord, out, out, super_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ Beqzc(out, &done);
+ __ Bnec(out, cls, &loop);
+ __ LoadConst32(out, 1);
+ break;
+ }
+
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, out, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // Walk over the class hierarchy to find a match.
+ Mips64Label loop, success;
+ __ Bind(&loop);
+ __ Beqc(out, cls, &success);
+ // /* HeapReference<Class> */ out = out->super_class_
+ __ LoadFromOffset(kLoadUnsignedWord, out, out, super_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ __ Bnezc(out, &loop);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ Bc(&done);
+ __ Bind(&success);
+ __ LoadConst32(out, 1);
+ break;
+ }
+
+ case TypeCheckKind::kArrayObjectCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, out, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // Do an exact check.
+ Mips64Label success;
+ __ Beqc(out, cls, &success);
+ // Otherwise, we need to check that the object's class is a non-primitive array.
+ // /* HeapReference<Class> */ out = out->component_type_
+ __ LoadFromOffset(kLoadUnsignedWord, out, out, component_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ Beqzc(out, &done);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Sltiu(out, out, 1);
+ __ Bc(&done);
+ __ Bind(&success);
+ __ LoadConst32(out, 1);
+ break;
+ }
+
+ case TypeCheckKind::kArrayCheck: {
+ // No read barrier since the slow path will retry upon failure.
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ LoadFromOffset(kLoadUnsignedWord, out, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
+ /* is_fatal */ false);
+ codegen_->AddSlowPath(slow_path);
+ __ Bnec(out, cls, slow_path->GetEntryLabel());
+ __ LoadConst32(out, 1);
+ break;
+ }
+
+ case TypeCheckKind::kUnresolvedCheck:
+ case TypeCheckKind::kInterfaceCheck: {
+ // Note that we indeed only call on slow path, but we always go
+ // into the slow path for the unresolved and interface check
+ // cases.
+ //
+ // We cannot directly call the InstanceofNonTrivial runtime
+ // entry point without resorting to a type checking slow path
+ // here (i.e. by calling InvokeRuntime directly), as it would
+ // require to assign fixed registers for the inputs of this
+ // HInstanceOf instruction (following the runtime calling
+ // convention), which might be cluttered by the potential first
+ // read barrier emission at the beginning of this method.
+ //
+ // TODO: Introduce a new runtime entry point taking the object
+ // to test (instead of its class) as argument, and let it deal
+ // with the read barrier issues. This will let us refactor this
+ // case of the `switch` code as it was previously (with a direct
+ // call to the runtime not using a type checking slow path).
+ // This should also be beneficial for the other cases above.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
+ /* is_fatal */ false);
+ codegen_->AddSlowPath(slow_path);
+ __ Bc(slow_path->GetEntryLabel());
+ break;
+ }
}
__ Bind(&done);
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 26cc7dc788..3056f7f464 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -492,8 +492,6 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
- // Deduplication map for patchable boot image addresses.
- Uint32ToLiteralMap boot_image_address_patches_;
// Patches for string root accesses in JIT compiled code.
StringToLiteralMap jit_string_patches_;
// Patches for class root accesses in JIT compiled code.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index b779aed763..0b50619a66 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1015,7 +1015,6 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
assembler_(graph->GetArena()),
isa_features_(isa_features),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -4603,13 +4602,6 @@ void CodeGeneratorX86::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp
temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize).Int32Value()));
}
-void CodeGeneratorX86::RecordSimplePatch() {
- if (GetCompilerOptions().GetIncludePatchInformation()) {
- simple_patches_.emplace_back();
- __ Bind(&simple_patches_.back());
- }
-}
-
void CodeGeneratorX86::RecordBootStringPatch(HLoadString* load_string) {
DCHECK(GetCompilerOptions().IsBootImage());
HX86ComputeBaseMethodAddress* address = nullptr;
@@ -4682,17 +4674,12 @@ void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
DCHECK(linker_patches->empty());
size_t size =
pc_relative_dex_cache_patches_.size() +
- simple_patches_.size() +
string_patches_.size() +
boot_image_type_patches_.size() +
type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
- for (const Label& label : simple_patches_) {
- uint32_t literal_offset = label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
- }
if (!GetCompilerOptions().IsBootImage()) {
DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
@@ -6154,7 +6141,6 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
DCHECK_NE(address, 0u);
__ movl(out, Immediate(address));
- codegen_->RecordSimplePatch();
break;
}
case HLoadClass::LoadKind::kBssEntry: {
@@ -6311,7 +6297,6 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S
reinterpret_cast<uintptr_t>(load->GetString().Get()));
DCHECK_NE(address, 0u);
__ movl(out, Immediate(address));
- codegen_->RecordSimplePatch();
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBssEntry: {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 5360dc9209..65ee383b54 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -412,7 +412,6 @@ class CodeGeneratorX86 : public CodeGenerator {
// Generate a call to a virtual method.
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
- void RecordSimplePatch();
void RecordBootStringPatch(HLoadString* load_string);
void RecordBootTypePatch(HLoadClass* load_class);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
@@ -633,8 +632,6 @@ class CodeGeneratorX86 : public CodeGenerator {
// PC-relative DexCache access info.
ArenaDeque<X86PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // Patch locations for patchoat where the linker doesn't do any other work.
- ArenaDeque<Label> simple_patches_;
// String patch locations; type depends on configuration (app .bss or boot image PIC/non-PIC).
ArenaDeque<X86PcRelativePatchInfo> string_patches_;
// Type patch locations for boot image; type depends on configuration (boot image PIC/non-PIC).
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 179bf6d3d1..644fceebe4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1070,13 +1070,6 @@ void CodeGeneratorX86_64::GenerateVirtualCall(HInvokeVirtual* invoke, Location t
kX86_64PointerSize).SizeValue()));
}
-void CodeGeneratorX86_64::RecordSimplePatch() {
- if (GetCompilerOptions().GetIncludePatchInformation()) {
- simple_patches_.emplace_back();
- __ Bind(&simple_patches_.back());
- }
-}
-
void CodeGeneratorX86_64::RecordBootStringPatch(HLoadString* load_string) {
DCHECK(GetCompilerOptions().IsBootImage());
string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
@@ -1126,17 +1119,12 @@ void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
DCHECK(linker_patches->empty());
size_t size =
pc_relative_dex_cache_patches_.size() +
- simple_patches_.size() +
string_patches_.size() +
boot_image_type_patches_.size() +
type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
- for (const Label& label : simple_patches_) {
- uint32_t literal_offset = label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
- }
if (!GetCompilerOptions().IsBootImage()) {
DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
@@ -1227,7 +1215,6 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
isa_features_(isa_features),
constant_area_start_(0),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -5545,7 +5532,6 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
DCHECK_NE(address, 0u);
__ movl(out, Immediate(address)); // Zero-extended.
- codegen_->RecordSimplePatch();
break;
}
case HLoadClass::LoadKind::kBssEntry: {
@@ -5681,7 +5667,6 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
reinterpret_cast<uintptr_t>(load->GetString().Get()));
DCHECK_NE(address, 0u);
__ movl(out, Immediate(address)); // Zero-extended.
- codegen_->RecordSimplePatch();
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBssEntry: {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 3a83731b3f..376c3ce381 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -406,7 +406,6 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
- void RecordSimplePatch();
void RecordBootStringPatch(HLoadString* load_string);
void RecordBootTypePatch(HLoadClass* load_class);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
@@ -602,8 +601,6 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// PC-relative DexCache access info.
ArenaDeque<PatchInfo<Label>> pc_relative_dex_cache_patches_;
- // Patch locations for patchoat where the linker doesn't do any other work.
- ArenaDeque<Label> simple_patches_;
// String patch locations; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PatchInfo<Label>> string_patches_;
// Type patch locations for boot image (always PIC).
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 5539413aad..d6513c8e34 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -57,21 +57,27 @@ static bool IsIntAndGet(HInstruction* instruction, int64_t* value) {
return false;
}
-/** Returns b^e for b,e >= 1. Sets overflow if arithmetic wrap-around occurred. */
+/** Computes a * b for a,b > 0 (at least until first overflow happens). */
+static int64_t SafeMul(int64_t a, int64_t b, /*out*/ bool* overflow) {
+ if (a > 0 && b > 0 && a > (std::numeric_limits<int64_t>::max() / b)) {
+ *overflow = true;
+ }
+ return a * b;
+}
+
+/** Returns b^e for b,e > 0. Sets overflow if arithmetic wrap-around occurred. */
static int64_t IntPow(int64_t b, int64_t e, /*out*/ bool* overflow) {
- DCHECK_GE(b, 1);
- DCHECK_GE(e, 1);
+ DCHECK_LT(0, b);
+ DCHECK_LT(0, e);
int64_t pow = 1;
while (e) {
if (e & 1) {
- int64_t oldpow = pow;
- pow *= b;
- if (pow < oldpow) {
- *overflow = true;
- }
+ pow = SafeMul(pow, b, overflow);
}
e >>= 1;
- b *= b;
+ if (e) {
+ b = SafeMul(b, b, overflow);
+ }
}
return pow;
}
@@ -377,6 +383,54 @@ bool InductionVarRange::IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) co
return false;
}
+bool InductionVarRange::IsUnitStride(HInstruction* instruction,
+ /*out*/ HInstruction** offset) const {
+ HLoopInformation* loop = nullptr;
+ HInductionVarAnalysis::InductionInfo* info = nullptr;
+ HInductionVarAnalysis::InductionInfo* trip = nullptr;
+ if (HasInductionInfo(instruction, instruction, &loop, &info, &trip)) {
+ if (info->induction_class == HInductionVarAnalysis::kLinear &&
+ info->op_b->operation == HInductionVarAnalysis::kFetch &&
+ !HInductionVarAnalysis::IsNarrowingLinear(info)) {
+ int64_t stride_value = 0;
+ if (IsConstant(info->op_a, kExact, &stride_value) && stride_value == 1) {
+ int64_t off_value = 0;
+ if (IsConstant(info->op_b, kExact, &off_value) && off_value == 0) {
+ *offset = nullptr;
+ } else {
+ *offset = info->op_b->fetch;
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+HInstruction* InductionVarRange::GenerateTripCount(HLoopInformation* loop,
+ HGraph* graph,
+ HBasicBlock* block) {
+ HInductionVarAnalysis::InductionInfo *trip =
+ induction_analysis_->LookupInfo(loop, GetLoopControl(loop));
+ if (trip != nullptr && !IsUnsafeTripCount(trip)) {
+ HInstruction* taken_test = nullptr;
+ HInstruction* trip_expr = nullptr;
+ if (IsBodyTripCount(trip)) {
+ if (!GenerateCode(trip->op_b, nullptr, graph, block, &taken_test, false, false)) {
+ return nullptr;
+ }
+ }
+ if (GenerateCode(trip->op_a, nullptr, graph, block, &trip_expr, false, false)) {
+ if (taken_test != nullptr) {
+ HInstruction* zero = graph->GetConstant(trip->type, 0);
+ trip_expr = Insert(block, new (graph->GetArena()) HSelect(taken_test, trip_expr, zero, kNoDexPc));
+ }
+ return trip_expr;
+ }
+ }
+ return nullptr;
+}
+
//
// Private class methods.
//
@@ -1157,12 +1211,15 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
HInstruction* opb = nullptr;
switch (info->induction_class) {
case HInductionVarAnalysis::kInvariant:
- // Invariants (note that even though is_min does not impact code generation for
- // invariants, some effort is made to keep this parameter consistent).
+ // Invariants (note that since invariants only have other invariants as
+ // sub expressions, viz. no induction, there is no need to adjust is_min).
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
- case HInductionVarAnalysis::kRem: // no proper is_min for second arg
- case HInductionVarAnalysis::kXor: // no proper is_min for second arg
+ case HInductionVarAnalysis::kSub:
+ case HInductionVarAnalysis::kMul:
+ case HInductionVarAnalysis::kDiv:
+ case HInductionVarAnalysis::kRem:
+ case HInductionVarAnalysis::kXor:
case HInductionVarAnalysis::kLT:
case HInductionVarAnalysis::kLE:
case HInductionVarAnalysis::kGT:
@@ -1174,6 +1231,12 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
operation = new (graph->GetArena()) HAdd(type, opa, opb); break;
+ case HInductionVarAnalysis::kSub:
+ operation = new (graph->GetArena()) HSub(type, opa, opb); break;
+ case HInductionVarAnalysis::kMul:
+ operation = new (graph->GetArena()) HMul(type, opa, opb, kNoDexPc); break;
+ case HInductionVarAnalysis::kDiv:
+ operation = new (graph->GetArena()) HDiv(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kRem:
operation = new (graph->GetArena()) HRem(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kXor:
@@ -1194,16 +1257,7 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
return true;
}
break;
- case HInductionVarAnalysis::kSub: // second reversed!
- if (GenerateCode(info->op_a, trip, graph, block, &opa, in_body, is_min) &&
- GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
- if (graph != nullptr) {
- *result = Insert(block, new (graph->GetArena()) HSub(type, opa, opb));
- }
- return true;
- }
- break;
- case HInductionVarAnalysis::kNeg: // reversed!
+ case HInductionVarAnalysis::kNeg:
if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
if (graph != nullptr) {
*result = Insert(block, new (graph->GetArena()) HNeg(type, opb));
@@ -1240,9 +1294,9 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
}
}
break;
- default:
- break;
- }
+ case HInductionVarAnalysis::kNop:
+ LOG(FATAL) << "unexpected invariant nop";
+ } // switch invariant operation
break;
case HInductionVarAnalysis::kLinear: {
// Linear induction a * i + b, for normalized 0 <= i < TC. For ranges, this should
@@ -1293,7 +1347,7 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
}
break;
}
- }
+ } // switch induction class
}
return false;
}
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 6c424b78b9..0858d73982 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -24,7 +24,8 @@ namespace art {
/**
* This class implements range analysis on expressions within loops. It takes the results
* of induction variable analysis in the constructor and provides a public API to obtain
- * a conservative lower and upper bound value on each instruction in the HIR.
+ * a conservative lower and upper bound value or last value on each instruction in the HIR.
+ * The public API also provides a few general-purpose utility methods related to induction.
*
* The range analysis is done with a combination of symbolic and partial integral evaluation
* of expressions. The analysis avoids complications with wrap-around arithmetic on the integral
@@ -154,6 +155,19 @@ class InductionVarRange {
*/
bool IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const;
+ /**
+ * Checks if instruction is a unit stride induction inside the closest enveloping loop.
+ * Returns invariant offset on success.
+ */
+ bool IsUnitStride(HInstruction* instruction, /*out*/ HInstruction** offset) const;
+
+ /**
+ * Generates the trip count expression for the given loop. Code is generated in given block
+ * and graph. The expression is guarded by a taken test if needed. Returns the trip count
+ * expression on success or null otherwise.
+ */
+ HInstruction* GenerateTripCount(HLoopInformation* loop, HGraph* graph, HBasicBlock* block);
+
private:
/*
* Enum used in IsConstant() request.
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index d81817fb09..fcdf8eb7dc 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -48,6 +48,11 @@ class InductionVarRangeTest : public CommonCompilerTest {
EXPECT_EQ(v1.is_known, v2.is_known);
}
+ void ExpectInt(int32_t value, HInstruction* i) {
+ ASSERT_TRUE(i->IsIntConstant());
+ EXPECT_EQ(value, i->AsIntConstant()->GetValue());
+ }
+
//
// Construction methods.
//
@@ -757,10 +762,20 @@ TEST_F(InductionVarRangeTest, ConstantTripCountUp) {
// Last value (unsimplified).
HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_);
ASSERT_TRUE(last->IsAdd());
- ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(last->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, last->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1000, last->InputAt(0));
+ ExpectInt(0, last->InputAt(1));
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(1000, tc);
+ HInstruction* offset = nullptr;
+ EXPECT_TRUE(range_.IsUnitStride(phi, &offset));
+ EXPECT_TRUE(offset == nullptr);
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ ExpectInt(1000, tce);
}
TEST_F(InductionVarRangeTest, ConstantTripCountDown) {
@@ -799,15 +814,27 @@ TEST_F(InductionVarRangeTest, ConstantTripCountDown) {
// Last value (unsimplified).
HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_);
ASSERT_TRUE(last->IsSub());
- ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, last->InputAt(0));
ASSERT_TRUE(last->InputAt(1)->IsNeg());
last = last->InputAt(1)->InputAt(0);
ASSERT_TRUE(last->IsSub());
- ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
- EXPECT_EQ(0, last->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(last->InputAt(1)->IsIntConstant());
- EXPECT_EQ(1000, last->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(0, last->InputAt(0));
+ ExpectInt(1000, last->InputAt(1));
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(1000, tc);
+ HInstruction* offset = nullptr;
+ EXPECT_FALSE(range_.IsUnitStride(phi, &offset));
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ ASSERT_TRUE(tce->IsNeg());
+ last = tce->InputAt(0);
+ EXPECT_TRUE(last->IsSub());
+ ExpectInt(0, last->InputAt(0));
+ ExpectInt(1000, last->InputAt(1));
}
TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
@@ -851,27 +878,22 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
// Verify lower is 0+0.
ASSERT_TRUE(lower != nullptr);
ASSERT_TRUE(lower->IsAdd());
- ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
- EXPECT_EQ(0, lower->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(lower->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, lower->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(0, lower->InputAt(0));
+ ExpectInt(0, lower->InputAt(1));
// Verify upper is (V-1)+0.
ASSERT_TRUE(upper != nullptr);
ASSERT_TRUE(upper->IsAdd());
ASSERT_TRUE(upper->InputAt(0)->IsSub());
EXPECT_TRUE(upper->InputAt(0)->InputAt(0)->IsParameterValue());
- ASSERT_TRUE(upper->InputAt(0)->InputAt(1)->IsIntConstant());
- EXPECT_EQ(1, upper->InputAt(0)->InputAt(1)->AsIntConstant()->GetValue());
- ASSERT_TRUE(upper->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1, upper->InputAt(0)->InputAt(1));
+ ExpectInt(0, upper->InputAt(1));
// Verify taken-test is 0<V.
HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_);
ASSERT_TRUE(taken != nullptr);
ASSERT_TRUE(taken->IsLessThan());
- ASSERT_TRUE(taken->InputAt(0)->IsIntConstant());
- EXPECT_EQ(0, taken->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(0, taken->InputAt(0));
EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
// Replacement.
@@ -880,6 +902,21 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(1), v1);
ExpectEqual(Value(y_, 1, 0), v2);
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(0, tc); // unknown
+ HInstruction* offset = nullptr;
+ EXPECT_TRUE(range_.IsUnitStride(phi, &offset));
+ EXPECT_TRUE(offset == nullptr);
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ EXPECT_TRUE(tce->IsSelect()); // guarded by taken-test
+ ExpectInt(0, tce->InputAt(0));
+ EXPECT_TRUE(tce->InputAt(1)->IsParameterValue());
+ EXPECT_TRUE(tce->InputAt(2)->IsLessThan());
}
TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
@@ -923,32 +960,26 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
// Verify lower is 1000-((1000-V)-1).
ASSERT_TRUE(lower != nullptr);
ASSERT_TRUE(lower->IsSub());
- ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, lower->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, lower->InputAt(0));
lower = lower->InputAt(1);
ASSERT_TRUE(lower->IsSub());
- ASSERT_TRUE(lower->InputAt(1)->IsIntConstant());
- EXPECT_EQ(1, lower->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1, lower->InputAt(1));
lower = lower->InputAt(0);
ASSERT_TRUE(lower->IsSub());
- ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, lower->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, lower->InputAt(0));
EXPECT_TRUE(lower->InputAt(1)->IsParameterValue());
// Verify upper is 1000-0.
ASSERT_TRUE(upper != nullptr);
ASSERT_TRUE(upper->IsSub());
- ASSERT_TRUE(upper->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, upper->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(upper->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1000, upper->InputAt(0));
+ ExpectInt(0, upper->InputAt(1));
// Verify taken-test is 1000>V.
HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_);
ASSERT_TRUE(taken != nullptr);
ASSERT_TRUE(taken->IsGreaterThan());
- ASSERT_TRUE(taken->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, taken->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, taken->InputAt(0));
EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
// Replacement.
@@ -957,6 +988,23 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(y_, 1, 0), v1);
ExpectEqual(Value(999), v2);
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(0, tc); // unknown
+ HInstruction* offset = nullptr;
+ EXPECT_FALSE(range_.IsUnitStride(phi, &offset));
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ EXPECT_TRUE(tce->IsSelect()); // guarded by taken-test
+ ExpectInt(0, tce->InputAt(0));
+ EXPECT_TRUE(tce->InputAt(1)->IsSub());
+ EXPECT_TRUE(tce->InputAt(2)->IsGreaterThan());
+ tce = tce->InputAt(1);
+ ExpectInt(1000, taken->InputAt(0));
+ EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
}
} // namespace art
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 14437dea39..0b96005a17 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -549,7 +549,7 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
is_referrer,
invoke_instruction->GetDexPc(),
/* needs_access_check */ false);
- HLoadClass::LoadKind kind = HSharpening::SharpenClass(
+ HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind(
load_class, codegen_, compiler_driver_, caller_compilation_unit_);
DCHECK(kind != HLoadClass::LoadKind::kInvalid)
<< "We should always be able to reference a class for inline caches";
@@ -1498,7 +1498,7 @@ size_t HInliner::RunOptimizations(HGraph* callee_graph,
HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
HConstantFolding fold(callee_graph, "constant_folding$inliner");
HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_, handles_);
- InstructionSimplifier simplify(callee_graph, inline_stats_);
+ InstructionSimplifier simplify(callee_graph, codegen_, inline_stats_);
IntrinsicsRecognizer intrinsics(callee_graph, inline_stats_);
HOptimization* optimizations[] = {
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index c60f6e5393..88f67fae04 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -37,37 +37,45 @@ HBasicBlock* HInstructionBuilder::FindBlockStartingAt(uint32_t dex_pc) const {
return block_builder_->GetBlockAt(dex_pc);
}
-ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock* block) {
+inline ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock* block) {
ArenaVector<HInstruction*>* locals = &locals_for_[block->GetBlockId()];
const size_t vregs = graph_->GetNumberOfVRegs();
- if (locals->size() != vregs) {
- locals->resize(vregs, nullptr);
-
- if (block->IsCatchBlock()) {
- // We record incoming inputs of catch phis at throwing instructions and
- // must therefore eagerly create the phis. Phis for undefined vregs will
- // be deleted when the first throwing instruction with the vreg undefined
- // is encountered. Unused phis will be removed by dead phi analysis.
- for (size_t i = 0; i < vregs; ++i) {
- // No point in creating the catch phi if it is already undefined at
- // the first throwing instruction.
- HInstruction* current_local_value = (*current_locals_)[i];
- if (current_local_value != nullptr) {
- HPhi* phi = new (arena_) HPhi(
- arena_,
- i,
- 0,
- current_local_value->GetType());
- block->AddPhi(phi);
- (*locals)[i] = phi;
- }
+ if (locals->size() == vregs) {
+ return locals;
+ }
+ return GetLocalsForWithAllocation(block, locals, vregs);
+}
+
+ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation(
+ HBasicBlock* block,
+ ArenaVector<HInstruction*>* locals,
+ const size_t vregs) {
+ DCHECK_NE(locals->size(), vregs);
+ locals->resize(vregs, nullptr);
+ if (block->IsCatchBlock()) {
+ // We record incoming inputs of catch phis at throwing instructions and
+ // must therefore eagerly create the phis. Phis for undefined vregs will
+ // be deleted when the first throwing instruction with the vreg undefined
+ // is encountered. Unused phis will be removed by dead phi analysis.
+ for (size_t i = 0; i < vregs; ++i) {
+ // No point in creating the catch phi if it is already undefined at
+ // the first throwing instruction.
+ HInstruction* current_local_value = (*current_locals_)[i];
+ if (current_local_value != nullptr) {
+ HPhi* phi = new (arena_) HPhi(
+ arena_,
+ i,
+ 0,
+ current_local_value->GetType());
+ block->AddPhi(phi);
+ (*locals)[i] = phi;
}
}
}
return locals;
}
-HInstruction* HInstructionBuilder::ValueOfLocalAt(HBasicBlock* block, size_t local) {
+inline HInstruction* HInstructionBuilder::ValueOfLocalAt(HBasicBlock* block, size_t local) {
ArenaVector<HInstruction*>* locals = GetLocalsFor(block);
return (*locals)[local];
}
@@ -1676,10 +1684,10 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
dex_pc,
needs_access_check);
- HLoadClass::LoadKind load_kind = HSharpening::SharpenClass(load_class,
- code_generator_,
- compiler_driver_,
- *dex_compilation_unit_);
+ HLoadClass::LoadKind load_kind = HSharpening::ComputeLoadClassKind(load_class,
+ code_generator_,
+ compiler_driver_,
+ *dex_compilation_unit_);
if (load_kind == HLoadClass::LoadKind::kInvalid) {
// We actually cannot reference this class, we're forced to bail.
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index e735a0c46d..7fdc1883ca 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -93,6 +93,10 @@ class HInstructionBuilder : public ValueObject {
HBasicBlock* FindBlockStartingAt(uint32_t dex_pc) const;
ArenaVector<HInstruction*>* GetLocalsFor(HBasicBlock* block);
+ // Out of line version of GetLocalsFor(), which has a fast path that is
+ // beneficial to get inlined by callers.
+ ArenaVector<HInstruction*>* GetLocalsForWithAllocation(
+ HBasicBlock* block, ArenaVector<HInstruction*>* locals, const size_t vregs);
HInstruction* ValueOfLocalAt(HBasicBlock* block, size_t local);
HInstruction* LoadLocal(uint32_t register_index, Primitive::Type type) const;
HInstruction* LoadNullCheckedLocal(uint32_t register_index, uint32_t dex_pc);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 35f59cb4a4..17421fc364 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -19,14 +19,18 @@
#include "escape.h"
#include "intrinsics.h"
#include "mirror/class-inl.h"
+#include "sharpening.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
public:
- InstructionSimplifierVisitor(HGraph* graph, OptimizingCompilerStats* stats)
+ InstructionSimplifierVisitor(HGraph* graph,
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats)
: HGraphDelegateVisitor(graph),
+ codegen_(codegen),
stats_(stats) {}
void Run();
@@ -112,6 +116,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void SimplifyAllocationIntrinsic(HInvoke* invoke);
void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind);
+ CodeGenerator* codegen_;
OptimizingCompilerStats* stats_;
bool simplification_occurred_ = false;
int simplifications_at_current_position_ = 0;
@@ -123,7 +128,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
};
void InstructionSimplifier::Run() {
- InstructionSimplifierVisitor visitor(graph_, stats_);
+ InstructionSimplifierVisitor visitor(graph_, codegen_, stats_);
visitor.Run();
}
@@ -1805,6 +1810,8 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction)
{
ScopedObjectAccess soa(Thread::Current());
+ Primitive::Type source_component_type = Primitive::kPrimVoid;
+ Primitive::Type destination_component_type = Primitive::kPrimVoid;
ReferenceTypeInfo destination_rti = destination->GetReferenceTypeInfo();
if (destination_rti.IsValid()) {
if (destination_rti.IsObjectArray()) {
@@ -1814,6 +1821,8 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction)
optimizations.SetDestinationIsTypedObjectArray();
}
if (destination_rti.IsPrimitiveArrayClass()) {
+ destination_component_type =
+ destination_rti.GetTypeHandle()->GetComponentType()->GetPrimitiveType();
optimizations.SetDestinationIsPrimitiveArray();
} else if (destination_rti.IsNonPrimitiveArrayClass()) {
optimizations.SetDestinationIsNonPrimitiveArray();
@@ -1826,10 +1835,55 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction)
}
if (source_rti.IsPrimitiveArrayClass()) {
optimizations.SetSourceIsPrimitiveArray();
+ source_component_type = source_rti.GetTypeHandle()->GetComponentType()->GetPrimitiveType();
} else if (source_rti.IsNonPrimitiveArrayClass()) {
optimizations.SetSourceIsNonPrimitiveArray();
}
}
+ // For primitive arrays, use their optimized ArtMethod implementations.
+ if ((source_component_type != Primitive::kPrimVoid) &&
+ (source_component_type == destination_component_type)) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ PointerSize image_size = class_linker->GetImagePointerSize();
+ HInvokeStaticOrDirect* invoke = instruction->AsInvokeStaticOrDirect();
+ mirror::Class* system = invoke->GetResolvedMethod()->GetDeclaringClass();
+ ArtMethod* method = nullptr;
+ switch (source_component_type) {
+ case Primitive::kPrimBoolean:
+ method = system->FindDeclaredDirectMethod("arraycopy", "([ZI[ZII)V", image_size);
+ break;
+ case Primitive::kPrimByte:
+ method = system->FindDeclaredDirectMethod("arraycopy", "([BI[BII)V", image_size);
+ break;
+ case Primitive::kPrimChar:
+ method = system->FindDeclaredDirectMethod("arraycopy", "([CI[CII)V", image_size);
+ break;
+ case Primitive::kPrimShort:
+ method = system->FindDeclaredDirectMethod("arraycopy", "([SI[SII)V", image_size);
+ break;
+ case Primitive::kPrimInt:
+ method = system->FindDeclaredDirectMethod("arraycopy", "([II[III)V", image_size);
+ break;
+ case Primitive::kPrimFloat:
+ method = system->FindDeclaredDirectMethod("arraycopy", "([FI[FII)V", image_size);
+ break;
+ case Primitive::kPrimLong:
+ method = system->FindDeclaredDirectMethod("arraycopy", "([JI[JII)V", image_size);
+ break;
+ case Primitive::kPrimDouble:
+ method = system->FindDeclaredDirectMethod("arraycopy", "([DI[DII)V", image_size);
+ break;
+ default:
+ LOG(FATAL) << "Unreachable";
+ }
+ DCHECK(method != nullptr);
+ invoke->SetResolvedMethod(method);
+ // Sharpen the new invoke. Note that we do not update the dex method index of
+ // the invoke, as we would need to look it up in the current dex file, and it
+ // is unlikely that it exists. The most usual situation for such typed
+ // arraycopy methods is a direct pointer to the boot image.
+ HSharpening::SharpenInvokeStaticOrDirect(invoke, codegen_);
+ }
}
}
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index 7fe1067aa9..f7329a4a1f 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -23,6 +23,8 @@
namespace art {
+class CodeGenerator;
+
/**
* Implements optimizations specific to each instruction.
*
@@ -36,15 +38,19 @@ namespace art {
class InstructionSimplifier : public HOptimization {
public:
explicit InstructionSimplifier(HGraph* graph,
+ CodeGenerator* codegen,
OptimizingCompilerStats* stats = nullptr,
const char* name = kInstructionSimplifierPassName)
- : HOptimization(graph, name, stats) {}
+ : HOptimization(graph, name, stats),
+ codegen_(codegen) {}
static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
void Run() OVERRIDE;
private:
+ CodeGenerator* codegen_;
+
DISALLOW_COPY_AND_ASSIGN(InstructionSimplifier);
};
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 2d3c00fb97..48699b33ae 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -38,7 +38,8 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
position_(pos),
is_singleton_(true),
is_singleton_and_not_returned_(true),
- is_singleton_and_not_deopt_visible_(true) {
+ is_singleton_and_not_deopt_visible_(true),
+ has_index_aliasing_(false) {
CalculateEscape(reference_,
nullptr,
&is_singleton_,
@@ -68,13 +69,36 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
return is_singleton_and_not_returned_ && is_singleton_and_not_deopt_visible_;
}
+ // Returns true if reference_ is a singleton and returned to the caller or
+ // used as an environment local of an HDeoptimize instruction.
+ bool IsSingletonAndNonRemovable() const {
+ return is_singleton_ &&
+ (!is_singleton_and_not_returned_ || !is_singleton_and_not_deopt_visible_);
+ }
+
+ bool HasIndexAliasing() {
+ return has_index_aliasing_;
+ }
+
+ void SetHasIndexAliasing(bool has_index_aliasing) {
+ // Only allow setting to true.
+ DCHECK(has_index_aliasing);
+ has_index_aliasing_ = has_index_aliasing;
+ }
+
private:
HInstruction* const reference_;
const size_t position_; // position in HeapLocationCollector's ref_info_array_.
- bool is_singleton_; // can only be referred to by a single name in the method,
- bool is_singleton_and_not_returned_; // and not returned to caller,
- bool is_singleton_and_not_deopt_visible_; // and not used as an environment local of HDeoptimize.
+ // Can only be referred to by a single name in the method.
+ bool is_singleton_;
+ // Is singleton and not returned to caller.
+ bool is_singleton_and_not_returned_;
+ // Is singleton and not used as an environment local of HDeoptimize.
+ bool is_singleton_and_not_deopt_visible_;
+ // Some heap locations with reference_ have array index aliasing,
+ // e.g. arr[i] and arr[j] may be the same location.
+ bool has_index_aliasing_;
DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
};
@@ -321,6 +345,8 @@ class HeapLocationCollector : public HGraphVisitor {
// Different constant indices do not alias.
return false;
}
+ ReferenceInfo* ref_info = loc1->GetReferenceInfo();
+ ref_info->SetHasIndexAliasing(true);
}
return true;
}
@@ -497,7 +523,8 @@ class LSEVisitor : public HGraphVisitor {
removed_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)),
substitute_instructions_for_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)),
possibly_removed_stores_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)) {
+ singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+ singleton_new_arrays_(graph->GetArena()->Adapter(kArenaAllocLSE)) {
}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
@@ -534,20 +561,24 @@ class LSEVisitor : public HGraphVisitor {
}
// At this point, stores in possibly_removed_stores_ can be safely removed.
- for (size_t i = 0, e = possibly_removed_stores_.size(); i < e; i++) {
- HInstruction* store = possibly_removed_stores_[i];
+ for (HInstruction* store : possibly_removed_stores_) {
DCHECK(store->IsInstanceFieldSet() || store->IsStaticFieldSet() || store->IsArraySet());
store->GetBlock()->RemoveInstruction(store);
}
// Eliminate allocations that are not used.
- for (size_t i = 0, e = singleton_new_instances_.size(); i < e; i++) {
- HInstruction* new_instance = singleton_new_instances_[i];
+ for (HInstruction* new_instance : singleton_new_instances_) {
if (!new_instance->HasNonEnvironmentUses()) {
new_instance->RemoveEnvironmentUsers();
new_instance->GetBlock()->RemoveInstruction(new_instance);
}
}
+ for (HInstruction* new_array : singleton_new_arrays_) {
+ if (!new_array->HasNonEnvironmentUses()) {
+ new_array->RemoveEnvironmentUsers();
+ new_array->GetBlock()->RemoveInstruction(new_array);
+ }
+ }
}
private:
@@ -558,7 +589,7 @@ class LSEVisitor : public HGraphVisitor {
void KeepIfIsStore(HInstruction* heap_value) {
if (heap_value == kDefaultHeapValue ||
heap_value == kUnknownHeapValue ||
- !heap_value->IsInstanceFieldSet()) {
+ !(heap_value->IsInstanceFieldSet() || heap_value->IsArraySet())) {
return;
}
auto idx = std::find(possibly_removed_stores_.begin(),
@@ -600,14 +631,17 @@ class LSEVisitor : public HGraphVisitor {
for (size_t i = 0; i < heap_values.size(); i++) {
HeapLocation* location = heap_location_collector_.GetHeapLocation(i);
ReferenceInfo* ref_info = location->GetReferenceInfo();
- if (!ref_info->IsSingleton() || location->IsValueKilledByLoopSideEffects()) {
- // heap value is killed by loop side effects (stored into directly, or due to
- // aliasing).
+ if (ref_info->IsSingletonAndRemovable() &&
+ !location->IsValueKilledByLoopSideEffects()) {
+ // A removable singleton's field that's not stored into inside a loop is
+ // invariant throughout the loop. Nothing to do.
+ DCHECK(ref_info->IsSingletonAndRemovable());
+ } else {
+ // heap value is killed by loop side effects (stored into directly, or
+ // due to aliasing). Or the heap value may be needed after method return
+ // or deoptimization.
KeepIfIsStore(pre_header_heap_values[i]);
heap_values[i] = kUnknownHeapValue;
- } else {
- // A singleton's field that's not stored into inside a loop is invariant throughout
- // the loop.
}
}
}
@@ -626,7 +660,7 @@ class LSEVisitor : public HGraphVisitor {
bool from_all_predecessors = true;
ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
HInstruction* singleton_ref = nullptr;
- if (ref_info->IsSingletonAndRemovable()) {
+ if (ref_info->IsSingleton()) {
// We do more analysis of liveness when merging heap values for such
// cases since stores into such references may potentially be eliminated.
singleton_ref = ref_info->GetReference();
@@ -652,8 +686,9 @@ class LSEVisitor : public HGraphVisitor {
}
}
- if (merged_value == kUnknownHeapValue) {
- // There are conflicting heap values from different predecessors.
+ if (merged_value == kUnknownHeapValue || ref_info->IsSingletonAndNonRemovable()) {
+ // There are conflicting heap values from different predecessors,
+ // or the heap value may be needed after method return or deoptimization.
// Keep the last store in each predecessor since future loads cannot be eliminated.
for (HBasicBlock* predecessor : predecessors) {
ArenaVector<HInstruction*>& pred_values = heap_values_for_[predecessor->GetBlockId()];
@@ -734,13 +769,16 @@ class LSEVisitor : public HGraphVisitor {
heap_values[idx] = constant;
return;
}
- if (heap_value != kUnknownHeapValue && heap_value->IsInstanceFieldSet()) {
- HInstruction* store = heap_value;
- // This load must be from a singleton since it's from the same field
- // that a "removed" store puts the value. That store must be to a singleton's field.
- DCHECK(ref_info->IsSingleton());
- // Get the real heap value of the store.
- heap_value = store->InputAt(1);
+ if (heap_value != kUnknownHeapValue) {
+ if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
+ HInstruction* store = heap_value;
+ // This load must be from a singleton since it's from the same
+ // field/element that a "removed" store puts the value. That store
+ // must be to a singleton's field/element.
+ DCHECK(ref_info->IsSingleton());
+ // Get the real heap value of the store.
+ heap_value = heap_value->IsInstanceFieldSet() ? store->InputAt(1) : store->InputAt(2);
+ }
}
if (heap_value == kUnknownHeapValue) {
// Load isn't eliminated. Put the load as the value into the HeapLocation.
@@ -796,19 +834,19 @@ class LSEVisitor : public HGraphVisitor {
if (Equal(heap_value, value)) {
// Store into the heap location with the same value.
same_value = true;
- } else if (index != nullptr) {
- // For array element, don't eliminate stores since it can be easily aliased
- // with non-constant index.
- } else if (ref_info->IsSingletonAndRemovable()) {
- // Store into a field of a singleton that's not returned. The value cannot be
- // killed due to aliasing/invocation. It can be redundant since future loads can
+ } else if (index != nullptr && ref_info->HasIndexAliasing()) {
+ // For array element, don't eliminate stores if the index can be aliased.
+ } else if (ref_info->IsSingleton()) {
+ // Store into a field of a singleton. The value cannot be killed due to
+ // aliasing/invocation. It can be redundant since future loads can
// directly get the value set by this instruction. The value can still be killed due to
// merging or loop side effects. Stores whose values are killed due to merging/loop side
// effects later will be removed from possibly_removed_stores_ when that is detected.
+ // Stores whose values may be needed after method return or deoptimization
+ // are also removed from possibly_removed_stores_ when that is detected.
possibly_redundant = true;
HNewInstance* new_instance = ref_info->GetReference()->AsNewInstance();
- DCHECK(new_instance != nullptr);
- if (new_instance->IsFinalizable()) {
+ if (new_instance != nullptr && new_instance->IsFinalizable()) {
// Finalizable objects escape globally. Need to keep the store.
possibly_redundant = false;
} else {
@@ -834,7 +872,7 @@ class LSEVisitor : public HGraphVisitor {
if (!same_value) {
if (possibly_redundant) {
- DCHECK(instruction->IsInstanceFieldSet());
+ DCHECK(instruction->IsInstanceFieldSet() || instruction->IsArraySet());
// Put the store as the heap value. If the value is loaded from heap
// by a load later, this store isn't really redundant.
heap_values[idx] = instruction;
@@ -914,6 +952,33 @@ class LSEVisitor : public HGraphVisitor {
value);
}
+ void VisitDeoptimize(HDeoptimize* instruction) {
+ const ArenaVector<HInstruction*>& heap_values =
+ heap_values_for_[instruction->GetBlock()->GetBlockId()];
+ for (HInstruction* heap_value : heap_values) {
+ // Filter out fake instructions before checking instruction kind below.
+ if (heap_value == kUnknownHeapValue || heap_value == kDefaultHeapValue) {
+ continue;
+ }
+ // A store is kept as the heap value for possibly removed stores.
+ if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
+ // Check whether the reference for a store is used by an environment local of
+ // HDeoptimize.
+ HInstruction* reference = heap_value->InputAt(0);
+ DCHECK(heap_location_collector_.FindReferenceInfoOf(reference)->IsSingleton());
+ for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) {
+ HEnvironment* user = use.GetUser();
+ if (user->GetHolder() == instruction) {
+ // The singleton for the store is visible at this deoptimization
+ // point. Need to keep the store so that the heap value is
+ // seen by the interpreter.
+ KeepIfIsStore(heap_value);
+ }
+ }
+ }
+ }
+ }
+
void HandleInvoke(HInstruction* invoke) {
ArenaVector<HInstruction*>& heap_values =
heap_values_for_[invoke->GetBlock()->GetBlockId()];
@@ -995,6 +1060,27 @@ class LSEVisitor : public HGraphVisitor {
}
}
+ void VisitNewArray(HNewArray* new_array) OVERRIDE {
+ ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_array);
+ if (ref_info == nullptr) {
+ // new_array isn't used for array accesses. No need to process it.
+ return;
+ }
+ if (ref_info->IsSingletonAndRemovable()) {
+ singleton_new_arrays_.push_back(new_array);
+ }
+ ArenaVector<HInstruction*>& heap_values =
+ heap_values_for_[new_array->GetBlock()->GetBlockId()];
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ HeapLocation* location = heap_location_collector_.GetHeapLocation(i);
+ HInstruction* ref = location->GetReferenceInfo()->GetReference();
+ if (ref == new_array && location->GetIndex() != nullptr) {
+ // Array elements are set to default heap values.
+ heap_values[i] = kDefaultHeapValue;
+ }
+ }
+ }
+
// Find an instruction's substitute if it should be removed.
// Return the same instruction if it should not be removed.
HInstruction* FindSubstitute(HInstruction* instruction) {
@@ -1023,6 +1109,7 @@ class LSEVisitor : public HGraphVisitor {
ArenaVector<HInstruction*> possibly_removed_stores_;
ArenaVector<HInstruction*> singleton_new_instances_;
+ ArenaVector<HInstruction*> singleton_new_arrays_;
DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
};
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 26c9ab83c2..8df513f410 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -16,6 +16,7 @@
#include "loop_optimization.h"
+#include "driver/compiler_driver.h"
#include "linear_order.h"
namespace art {
@@ -57,8 +58,10 @@ static bool IsEarlyExit(HLoopInformation* loop_info) {
//
HLoopOptimization::HLoopOptimization(HGraph* graph,
+ CompilerDriver* compiler_driver,
HInductionVarAnalysis* induction_analysis)
: HOptimization(graph, kLoopOptimizationPassName),
+ compiler_driver_(compiler_driver),
induction_range_(induction_analysis),
loop_allocator_(nullptr),
top_loop_(nullptr),
@@ -69,7 +72,7 @@ HLoopOptimization::HLoopOptimization(HGraph* graph,
}
void HLoopOptimization::Run() {
- // Well-behaved loops only.
+ // Skip if there is no loop or the graph has try-catch/irreducible loops.
// TODO: make this less of a sledgehammer.
if (!graph_->HasLoops() || graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) {
return;
@@ -85,6 +88,7 @@ void HLoopOptimization::Run() {
LocalRun();
if (top_loop_ == nullptr) {
+ // All loops have been eliminated.
graph_->SetHasLoops(false);
}
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 9ddab4150c..0b798fc7a9 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -23,13 +23,17 @@
namespace art {
+class CompilerDriver;
+
/**
* Loop optimizations. Builds a loop hierarchy and applies optimizations to
* the detected nested loops, such as removal of dead induction and empty loops.
*/
class HLoopOptimization : public HOptimization {
public:
- HLoopOptimization(HGraph* graph, HInductionVarAnalysis* induction_analysis);
+ HLoopOptimization(HGraph* graph,
+ CompilerDriver* compiler_driver,
+ HInductionVarAnalysis* induction_analysis);
void Run() OVERRIDE;
@@ -76,6 +80,9 @@ class HLoopOptimization : public HOptimization {
bool TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block);
void RemoveDeadInstructions(const HInstructionList& list);
+ // Compiler driver (to query ISA features).
+ const CompilerDriver* compiler_driver_;
+
// Range information based on prior induction variable analysis.
InductionVarRange induction_range_;
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index 9a6b4935b2..5b9350689e 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -31,7 +31,7 @@ class LoopOptimizationTest : public CommonCompilerTest {
allocator_(&pool_),
graph_(CreateGraph(&allocator_)),
iva_(new (&allocator_) HInductionVarAnalysis(graph_)),
- loop_opt_(new (&allocator_) HLoopOptimization(graph_, iva_)) {
+ loop_opt_(new (&allocator_) HLoopOptimization(graph_, nullptr, iva_)) {
BuildGraph();
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index c39aed2c6a..542b218cf8 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1734,11 +1734,11 @@ class SideEffects : public ValueObject {
// A HEnvironment object contains the values of virtual registers at a given location.
class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
public:
- HEnvironment(ArenaAllocator* arena,
- size_t number_of_vregs,
- ArtMethod* method,
- uint32_t dex_pc,
- HInstruction* holder)
+ ALWAYS_INLINE HEnvironment(ArenaAllocator* arena,
+ size_t number_of_vregs,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ HInstruction* holder)
: vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)),
locations_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentLocations)),
parent_(nullptr),
@@ -1747,7 +1747,7 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
holder_(holder) {
}
- HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
+ ALWAYS_INLINE HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
: HEnvironment(arena,
to_copy.Size(),
to_copy.GetMethod(),
@@ -3915,6 +3915,7 @@ class HInvoke : public HVariableInputSizeInstruction {
bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
ArtMethod* GetResolvedMethod() const { return resolved_method_; }
+ void SetResolvedMethod(ArtMethod* method) { resolved_method_ = method; }
DECLARE_ABSTRACT_INSTRUCTION(Invoke);
@@ -3957,7 +3958,7 @@ class HInvoke : public HVariableInputSizeInstruction {
}
uint32_t number_of_arguments_;
- ArtMethod* const resolved_method_;
+ ArtMethod* resolved_method_;
const uint32_t dex_method_index_;
Intrinsics intrinsic_;
@@ -5544,8 +5545,6 @@ class HLoadClass FINAL : public HInstruction {
// Use a known boot image Class* address, embedded in the code by the codegen.
// Used for boot image classes referenced by apps in AOT- and JIT-compiled code.
- // Note: codegen needs to emit a linker patch if indicated by compiler options'
- // GetIncludePatchInformation().
kBootImageAddress,
// Load from an entry in the .bss section using a PC-relative load.
@@ -5749,8 +5748,6 @@ class HLoadString FINAL : public HInstruction {
// Use a known boot image String* address, embedded in the code by the codegen.
// Used for boot image strings referenced by apps in AOT- and JIT-compiled code.
- // Note: codegen needs to emit a linker patch if indicated by compiler options'
- // GetIncludePatchInformation().
kBootImageAddress,
// Load from an entry in the .bss section using a PC-relative load.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 3842ef98da..d6153b091c 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -507,7 +507,7 @@ static HOptimization* BuildOptimization(
} else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
return new (arena) HInductionVarAnalysis(graph);
} else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
- return new (arena) InstructionSimplifier(graph, stats, pass_name.c_str());
+ return new (arena) InstructionSimplifier(graph, codegen, stats, pass_name.c_str());
} else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
return new (arena) IntrinsicsRecognizer(graph, stats);
} else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
@@ -519,7 +519,7 @@ static HOptimization* BuildOptimization(
} else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
return new (arena) SideEffectsAnalysis(graph);
} else if (opt_name == HLoopOptimization::kLoopOptimizationPassName) {
- return new (arena) HLoopOptimization(graph, most_recent_induction);
+ return new (arena) HLoopOptimization(graph, driver, most_recent_induction);
} else if (opt_name == CHAGuardOptimization::kCHAGuardOptimizationPassName) {
return new (arena) CHAGuardOptimization(graph);
} else if (opt_name == CodeSinking::kCodeSinkingPassName) {
@@ -768,26 +768,29 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination(
graph, stats, "dead_code_elimination$final");
HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding");
- InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
+ InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, codegen, stats);
HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats);
HConstantFolding* fold2 = new (arena) HConstantFolding(
graph, "constant_folding$after_inlining");
HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce");
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
- LICM* licm = new (arena) LICM(graph, *side_effects, stats);
- LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects);
+ SideEffectsAnalysis* side_effects1 = new (arena) SideEffectsAnalysis(
+ graph, "side_effects$before_gvn");
+ SideEffectsAnalysis* side_effects2 = new (arena) SideEffectsAnalysis(
+ graph, "side_effects$before_lse");
+ GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects1);
+ LICM* licm = new (arena) LICM(graph, *side_effects1, stats);
HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
- BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction);
- HLoopOptimization* loop = new (arena) HLoopOptimization(graph, induction);
+ BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects1, induction);
+ HLoopOptimization* loop = new (arena) HLoopOptimization(graph, driver, induction);
+ LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects2);
HSharpening* sharpening = new (arena) HSharpening(
graph, codegen, dex_compilation_unit, driver, handles);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
- graph, stats, "instruction_simplifier$after_inlining");
+ graph, codegen, stats, "instruction_simplifier$after_inlining");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
- graph, stats, "instruction_simplifier$after_bce");
+ graph, codegen, stats, "instruction_simplifier$after_bce");
InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier(
- graph, stats, "instruction_simplifier$before_codegen");
+ graph, codegen, stats, "instruction_simplifier$before_codegen");
IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats);
CHAGuardOptimization* cha_guard = new (arena) CHAGuardOptimization(graph);
CodeSinking* code_sinking = new (arena) CodeSinking(graph, stats);
@@ -810,7 +813,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
fold2, // TODO: if we don't inline we can also skip fold2.
simplify2,
dce2,
- side_effects,
+ side_effects1,
gvn,
licm,
induction,
@@ -818,6 +821,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
loop,
fold3, // evaluates code generated by dynamic bce
simplify3,
+ side_effects2,
lse,
cha_guard,
dce3,
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index be400925d5..7bd38c7a8c 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -41,7 +41,7 @@ void HSharpening::Run() {
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
if (instruction->IsInvokeStaticOrDirect()) {
- ProcessInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect());
+ SharpenInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect(), codegen_);
} else if (instruction->IsLoadString()) {
ProcessLoadString(instruction->AsLoadString());
}
@@ -65,12 +65,12 @@ static bool IsInBootImage(ArtMethod* method) {
}
static bool AOTCanEmbedMethod(ArtMethod* method, const CompilerOptions& options) {
- // Including patch information means the AOT code will be patched, which we don't
- // support in the compiler, and is anyways moving away b/33192586.
- return IsInBootImage(method) && !options.GetCompilePic() && !options.GetIncludePatchInformation();
+ return IsInBootImage(method) && !options.GetCompilePic();
}
-void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+
+void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
+ CodeGenerator* codegen) {
if (invoke->IsStringInit()) {
// Not using the dex cache arrays. But we could still try to use a better dispatch...
// TODO: Use direct_method and direct_code for the appropriate StringFactory method.
@@ -97,12 +97,12 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
// We don't optimize for debuggable as it would prevent us from obsoleting the method in some
// situations.
- if (callee == codegen_->GetGraph()->GetArtMethod() && !codegen_->GetGraph()->IsDebuggable()) {
+ if (callee == codegen->GetGraph()->GetArtMethod() && !codegen->GetGraph()->IsDebuggable()) {
// Recursive call.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
} else if (Runtime::Current()->UseJitCompilation() ||
- AOTCanEmbedMethod(callee, codegen_->GetCompilerOptions())) {
+ AOTCanEmbedMethod(callee, codegen->GetCompilerOptions())) {
// JIT or on-device AOT compilation referencing a boot image method.
// Use the method address directly.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
@@ -111,13 +111,17 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
} else {
// Use PC-relative access to the dex cache arrays.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
- DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen_->GetInstructionSet()),
- &graph_->GetDexFile());
+ // Note: we use the invoke's graph instead of the codegen graph, which are
+ // different when inlining (the codegen graph is the most outer graph). The
+ // invoke's dex method index is relative to the dex file where the invoke's graph
+ // was built from.
+ DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen->GetInstructionSet()),
+ &invoke->GetBlock()->GetGraph()->GetDexFile());
method_load_data = layout.MethodOffset(invoke->GetDexMethodIndex());
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
}
- if (graph_->IsDebuggable()) {
+ if (codegen->GetGraph()->IsDebuggable()) {
// For debuggable apps always use the code pointer from ArtMethod
// so that we don't circumvent instrumentation stubs if installed.
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
@@ -127,14 +131,14 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
method_load_kind, code_ptr_location, method_load_data
};
HInvokeStaticOrDirect::DispatchInfo dispatch_info =
- codegen_->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, invoke);
+ codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, invoke);
invoke->SetDispatchInfo(dispatch_info);
}
-HLoadClass::LoadKind HSharpening::SharpenClass(HLoadClass* load_class,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
- const DexCompilationUnit& dex_compilation_unit) {
+HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(HLoadClass* load_class,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver,
+ const DexCompilationUnit& dex_compilation_unit) {
Handle<mirror::Class> klass = load_class->GetClass();
DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index 4240b2f339..10707c796f 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -48,14 +48,16 @@ class HSharpening : public HOptimization {
static constexpr const char* kSharpeningPassName = "sharpening";
// Used by the builder and the inliner.
- static HLoadClass::LoadKind SharpenClass(HLoadClass* load_class,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
- const DexCompilationUnit& dex_compilation_unit)
+ static HLoadClass::LoadKind ComputeLoadClassKind(HLoadClass* load_class,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver,
+ const DexCompilationUnit& dex_compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Used by Sharpening and InstructionSimplifier.
+ static void SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke, CodeGenerator* codegen);
+
private:
- void ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke);
void ProcessLoadString(HLoadString* load_string);
CodeGenerator* codegen_;
diff --git a/compiler/optimizing/side_effects_analysis.h b/compiler/optimizing/side_effects_analysis.h
index bac6088bf7..fea47e66d9 100644
--- a/compiler/optimizing/side_effects_analysis.h
+++ b/compiler/optimizing/side_effects_analysis.h
@@ -25,8 +25,8 @@ namespace art {
class SideEffectsAnalysis : public HOptimization {
public:
- explicit SideEffectsAnalysis(HGraph* graph)
- : HOptimization(graph, kSideEffectsAnalysisPassName),
+ SideEffectsAnalysis(HGraph* graph, const char* pass_name = kSideEffectsAnalysisPassName)
+ : HOptimization(graph, pass_name),
graph_(graph),
block_effects_(graph->GetBlocks().size(),
graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)),
@@ -41,7 +41,7 @@ class SideEffectsAnalysis : public HOptimization {
bool HasRun() const { return has_run_; }
- static constexpr const char* kSideEffectsAnalysisPassName = "SideEffects";
+ static constexpr const char* kSideEffectsAnalysisPassName = "side_effects";
private:
void UpdateLoopEffects(HLoopInformation* info, SideEffects effects);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index b62bf4e5f9..a239bd50c2 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -331,7 +331,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
instruction, /* environment */ nullptr, input_index, block->GetLifetimeEnd(), first_use_);
}
- void AddRange(size_t start, size_t end) {
+ ALWAYS_INLINE void AddRange(size_t start, size_t end) {
if (first_range_ == nullptr) {
first_range_ = last_range_ = range_search_start_ =
new (allocator_) LiveRange(start, end, first_range_);
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 47ddf2547a..1a5a23d10b 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -501,8 +501,10 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
bool is_float = false);
private:
+ // This will be used as an argument for loads/stores
+ // when there is no need for implicit null checks.
struct NoImplicitNullChecker {
- void operator()() {}
+ void operator()() const {}
};
public:
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 0f86f8843d..39eb5893d8 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -2020,80 +2020,18 @@ void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label) {
Bcond(label, kCondT, static_cast<GpuRegister>(ft), ZERO);
}
-void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base,
+void Mips64Assembler::LoadFromOffset(LoadOperandType type,
+ GpuRegister reg,
+ GpuRegister base,
int32_t offset) {
- if (!IsInt<16>(offset) ||
- (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
- !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
- LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
- Daddu(AT, AT, base);
- base = AT;
- offset &= (kMips64DoublewordSize - 1);
- }
-
- switch (type) {
- case kLoadSignedByte:
- Lb(reg, base, offset);
- break;
- case kLoadUnsignedByte:
- Lbu(reg, base, offset);
- break;
- case kLoadSignedHalfword:
- Lh(reg, base, offset);
- break;
- case kLoadUnsignedHalfword:
- Lhu(reg, base, offset);
- break;
- case kLoadWord:
- CHECK_ALIGNED(offset, kMips64WordSize);
- Lw(reg, base, offset);
- break;
- case kLoadUnsignedWord:
- CHECK_ALIGNED(offset, kMips64WordSize);
- Lwu(reg, base, offset);
- break;
- case kLoadDoubleword:
- if (!IsAligned<kMips64DoublewordSize>(offset)) {
- CHECK_ALIGNED(offset, kMips64WordSize);
- Lwu(reg, base, offset);
- Lwu(TMP2, base, offset + kMips64WordSize);
- Dinsu(reg, TMP2, 32, 32);
- } else {
- Ld(reg, base, offset);
- }
- break;
- }
+ LoadFromOffset<>(type, reg, base, offset);
}
-void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base,
+void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type,
+ FpuRegister reg,
+ GpuRegister base,
int32_t offset) {
- if (!IsInt<16>(offset) ||
- (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
- !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
- LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
- Daddu(AT, AT, base);
- base = AT;
- offset &= (kMips64DoublewordSize - 1);
- }
-
- switch (type) {
- case kLoadWord:
- CHECK_ALIGNED(offset, kMips64WordSize);
- Lwc1(reg, base, offset);
- break;
- case kLoadDoubleword:
- if (!IsAligned<kMips64DoublewordSize>(offset)) {
- CHECK_ALIGNED(offset, kMips64WordSize);
- Lwc1(reg, base, offset);
- Lw(TMP2, base, offset + kMips64WordSize);
- Mthc1(TMP2, reg);
- } else {
- Ldc1(reg, base, offset);
- }
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
+ LoadFpuFromOffset<>(type, reg, base, offset);
}
void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset,
@@ -2123,72 +2061,18 @@ void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register,
}
}
-void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base,
+void Mips64Assembler::StoreToOffset(StoreOperandType type,
+ GpuRegister reg,
+ GpuRegister base,
int32_t offset) {
- if (!IsInt<16>(offset) ||
- (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
- !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
- LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
- Daddu(AT, AT, base);
- base = AT;
- offset &= (kMips64DoublewordSize - 1);
- }
-
- switch (type) {
- case kStoreByte:
- Sb(reg, base, offset);
- break;
- case kStoreHalfword:
- Sh(reg, base, offset);
- break;
- case kStoreWord:
- CHECK_ALIGNED(offset, kMips64WordSize);
- Sw(reg, base, offset);
- break;
- case kStoreDoubleword:
- if (!IsAligned<kMips64DoublewordSize>(offset)) {
- CHECK_ALIGNED(offset, kMips64WordSize);
- Sw(reg, base, offset);
- Dsrl32(TMP2, reg, 0);
- Sw(TMP2, base, offset + kMips64WordSize);
- } else {
- Sd(reg, base, offset);
- }
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
+ StoreToOffset<>(type, reg, base, offset);
}
-void Mips64Assembler::StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base,
+void Mips64Assembler::StoreFpuToOffset(StoreOperandType type,
+ FpuRegister reg,
+ GpuRegister base,
int32_t offset) {
- if (!IsInt<16>(offset) ||
- (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
- !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
- LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
- Daddu(AT, AT, base);
- base = AT;
- offset &= (kMips64DoublewordSize - 1);
- }
-
- switch (type) {
- case kStoreWord:
- CHECK_ALIGNED(offset, kMips64WordSize);
- Swc1(reg, base, offset);
- break;
- case kStoreDoubleword:
- if (!IsAligned<kMips64DoublewordSize>(offset)) {
- CHECK_ALIGNED(offset, kMips64WordSize);
- Mfhc1(TMP2, reg);
- Swc1(reg, base, offset);
- Sw(TMP2, base, offset + kMips64WordSize);
- } else {
- Sdc1(reg, base, offset);
- }
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
+ StoreFpuToOffset<>(type, reg, base, offset);
}
static dwarf::Reg DWARFReg(GpuRegister reg) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index ee15c6da80..8bbe862d19 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -772,6 +772,191 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Bc1nez(FpuRegister ft, Mips64Label* label);
void EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, size_t size);
+
+ private:
+ // This will be used as an argument for loads/stores
+ // when there is no need for implicit null checks.
+ struct NoImplicitNullChecker {
+ void operator()() const {}
+ };
+
+ public:
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void LoadFromOffset(LoadOperandType type,
+ GpuRegister reg,
+ GpuRegister base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ if (!IsInt<16>(offset) ||
+ (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
+ LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
+ Daddu(AT, AT, base);
+ base = AT;
+ offset &= (kMips64DoublewordSize - 1);
+ }
+
+ switch (type) {
+ case kLoadSignedByte:
+ Lb(reg, base, offset);
+ break;
+ case kLoadUnsignedByte:
+ Lbu(reg, base, offset);
+ break;
+ case kLoadSignedHalfword:
+ Lh(reg, base, offset);
+ break;
+ case kLoadUnsignedHalfword:
+ Lhu(reg, base, offset);
+ break;
+ case kLoadWord:
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Lw(reg, base, offset);
+ break;
+ case kLoadUnsignedWord:
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Lwu(reg, base, offset);
+ break;
+ case kLoadDoubleword:
+ if (!IsAligned<kMips64DoublewordSize>(offset)) {
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Lwu(reg, base, offset);
+ null_checker();
+ Lwu(TMP2, base, offset + kMips64WordSize);
+ Dinsu(reg, TMP2, 32, 32);
+ } else {
+ Ld(reg, base, offset);
+ null_checker();
+ }
+ break;
+ }
+ if (type != kLoadDoubleword) {
+ null_checker();
+ }
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void LoadFpuFromOffset(LoadOperandType type,
+ FpuRegister reg,
+ GpuRegister base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ if (!IsInt<16>(offset) ||
+ (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
+ LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
+ Daddu(AT, AT, base);
+ base = AT;
+ offset &= (kMips64DoublewordSize - 1);
+ }
+
+ switch (type) {
+ case kLoadWord:
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Lwc1(reg, base, offset);
+ null_checker();
+ break;
+ case kLoadDoubleword:
+ if (!IsAligned<kMips64DoublewordSize>(offset)) {
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Lwc1(reg, base, offset);
+ null_checker();
+ Lw(TMP2, base, offset + kMips64WordSize);
+ Mthc1(TMP2, reg);
+ } else {
+ Ldc1(reg, base, offset);
+ null_checker();
+ }
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void StoreToOffset(StoreOperandType type,
+ GpuRegister reg,
+ GpuRegister base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ if (!IsInt<16>(offset) ||
+ (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
+ LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
+ Daddu(AT, AT, base);
+ base = AT;
+ offset &= (kMips64DoublewordSize - 1);
+ }
+
+ switch (type) {
+ case kStoreByte:
+ Sb(reg, base, offset);
+ break;
+ case kStoreHalfword:
+ Sh(reg, base, offset);
+ break;
+ case kStoreWord:
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Sw(reg, base, offset);
+ break;
+ case kStoreDoubleword:
+ if (!IsAligned<kMips64DoublewordSize>(offset)) {
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Sw(reg, base, offset);
+ null_checker();
+ Dsrl32(TMP2, reg, 0);
+ Sw(TMP2, base, offset + kMips64WordSize);
+ } else {
+ Sd(reg, base, offset);
+ null_checker();
+ }
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ if (type != kStoreDoubleword) {
+ null_checker();
+ }
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void StoreFpuToOffset(StoreOperandType type,
+ FpuRegister reg,
+ GpuRegister base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ if (!IsInt<16>(offset) ||
+ (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
+ LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
+ Daddu(AT, AT, base);
+ base = AT;
+ offset &= (kMips64DoublewordSize - 1);
+ }
+
+ switch (type) {
+ case kStoreWord:
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Swc1(reg, base, offset);
+ null_checker();
+ break;
+ case kStoreDoubleword:
+ if (!IsAligned<kMips64DoublewordSize>(offset)) {
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Mfhc1(TMP2, reg);
+ Swc1(reg, base, offset);
+ null_checker();
+ Sw(TMP2, base, offset + kMips64WordSize);
+ } else {
+ Sdc1(reg, base, offset);
+ null_checker();
+ }
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ }
+
void LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
void LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
void StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index c892b25ed3..01c33591e5 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -246,9 +246,13 @@ class VerifierDepsTest : public CommonCompilerTest {
}
bool HasUnverifiedClass(const std::string& cls) {
- const DexFile::TypeId* type_id = primary_dex_file_->FindTypeId(cls.c_str());
+ return HasUnverifiedClass(cls, *primary_dex_file_);
+ }
+
+ bool HasUnverifiedClass(const std::string& cls, const DexFile& dex_file) {
+ const DexFile::TypeId* type_id = dex_file.FindTypeId(cls.c_str());
DCHECK(type_id != nullptr);
- dex::TypeIndex index = primary_dex_file_->GetIndexForTypeId(*type_id);
+ dex::TypeIndex index = dex_file.GetIndexForTypeId(*type_id);
for (const auto& dex_dep : verifier_deps_->dex_deps_) {
for (dex::TypeIndex entry : dex_dep.second->unverified_classes_) {
if (index == entry) {
@@ -1141,7 +1145,7 @@ TEST_F(VerifierDepsTest, UnverifiedClasses) {
// Test that a class with hard failure is recorded.
ASSERT_TRUE(HasUnverifiedClass("LMyVerificationFailure;"));
// Test that a class with unresolved super is recorded.
- ASSERT_FALSE(HasUnverifiedClass("LMyClassWithNoSuper;"));
+ ASSERT_TRUE(HasUnverifiedClass("LMyClassWithNoSuper;"));
// Test that a class with unresolved super and hard failure is recorded.
ASSERT_TRUE(HasUnverifiedClass("LMyClassWithNoSuperButFailures;"));
}
@@ -1511,5 +1515,18 @@ TEST_F(VerifierDepsTest, CompilerDriver) {
}
}
+TEST_F(VerifierDepsTest, MultiDexVerification) {
+ VerifyDexFile("VerifierDepsMulti");
+ ASSERT_EQ(NumberOfCompiledDexFiles(), 2u);
+
+ ASSERT_TRUE(HasUnverifiedClass("LMySoftVerificationFailure;", *dex_files_[1]));
+ ASSERT_TRUE(HasUnverifiedClass("LMySub1SoftVerificationFailure;", *dex_files_[0]));
+ ASSERT_TRUE(HasUnverifiedClass("LMySub2SoftVerificationFailure;", *dex_files_[0]));
+
+ std::vector<uint8_t> buffer;
+ verifier_deps_->Encode(dex_files_, &buffer);
+ ASSERT_FALSE(buffer.empty());
+}
+
} // namespace verifier
} // namespace art
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index be756286fc..2684f3d8e2 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -328,11 +328,6 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError("");
UsageError(" --dump-timing: display a breakdown of where time was spent");
UsageError("");
- UsageError(" --include-patch-information: Include patching information so the generated code");
- UsageError(" can have its base address moved without full recompilation.");
- UsageError("");
- UsageError(" --no-include-patch-information: Do not include patching information.");
- UsageError("");
UsageError(" -g");
UsageError(" --generate-debug-info: Generate debug information for native debugging,");
UsageError(" such as stack unwinding information, ELF symbols and DWARF sections.");
@@ -1426,25 +1421,15 @@ class Dex2Oat FINAL {
if (profile_compilation_info_ != nullptr && IsAppImage()) {
Runtime* runtime = Runtime::Current();
CHECK(runtime != nullptr);
- std::set<DexCacheResolvedClasses> resolved_classes(
- profile_compilation_info_->GetResolvedClasses());
-
// Filter out class path classes since we don't want to include these in the image.
std::unordered_set<std::string> dex_files_locations;
for (const DexFile* dex_file : dex_files_) {
dex_files_locations.insert(dex_file->GetLocation());
}
- for (auto it = resolved_classes.begin(); it != resolved_classes.end(); ) {
- if (dex_files_locations.find(it->GetDexLocation()) == dex_files_locations.end()) {
- VLOG(compiler) << "Removed profile samples for non-app dex file " << it->GetDexLocation();
- it = resolved_classes.erase(it);
- } else {
- ++it;
- }
- }
-
+ std::set<DexCacheResolvedClasses> resolved_classes(
+ profile_compilation_info_->GetResolvedClasses(dex_files_locations));
image_classes_.reset(new std::unordered_set<std::string>(
- runtime->GetClassLinker()->GetClassDescriptorsForProfileKeys(resolved_classes)));
+ runtime->GetClassLinker()->GetClassDescriptorsForResolvedClasses(resolved_classes)));
VLOG(compiler) << "Loaded " << image_classes_->size()
<< " image class descriptors from profile";
if (VLOG_IS_ON(compiler)) {
@@ -1968,7 +1953,6 @@ class Dex2Oat FINAL {
elf_writer->WriteDynamicSection();
elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
- elf_writer->WritePatchLocations(oat_writer->GetAbsolutePatchLocations());
if (!elf_writer->End()) {
LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath();
@@ -2821,6 +2805,9 @@ static int CompileImage(Dex2Oat& dex2oat) {
// When given --host, finish early without stripping.
if (dex2oat.IsHost()) {
+ if (!dex2oat.FlushCloseOutputFiles()) {
+ return EXIT_FAILURE;
+ }
dex2oat.DumpTiming();
return EXIT_SUCCESS;
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index b79050e9d0..e7277bceae 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -63,6 +63,7 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
oat_file.reset(OS::CreateEmptyFile(odex_location.c_str()));
CHECK(oat_file != nullptr) << odex_location;
args.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
+ args.push_back("--oat-location=" + odex_location);
} else {
args.push_back("--oat-file=" + odex_location);
}
@@ -583,13 +584,16 @@ class Dex2oatLayoutTest : public Dex2oatTest {
// Emits a profile with a single dex file with the given location and a single class index of 1.
void GenerateProfile(const std::string& test_profile,
const std::string& dex_location,
+ size_t num_classes,
uint32_t checksum) {
int profile_test_fd = open(test_profile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
CHECK_GE(profile_test_fd, 0);
ProfileCompilationInfo info;
std::string profile_key = ProfileCompilationInfo::GetProfileDexFileKey(dex_location);
- info.AddClassIndex(profile_key, checksum, dex::TypeIndex(1));
+ for (size_t i = 0; i < num_classes; ++i) {
+ info.AddClassIndex(profile_key, checksum, dex::TypeIndex(1 + i));
+ }
bool result = info.Save(profile_test_fd);
close(profile_test_fd);
ASSERT_TRUE(result);
@@ -597,7 +601,9 @@ class Dex2oatLayoutTest : public Dex2oatTest {
void CompileProfileOdex(const std::string& dex_location,
const std::string& odex_location,
+ const std::string& app_image_file_name,
bool use_fd,
+ size_t num_profile_classes,
const std::vector<std::string>& extra_args = {}) {
const std::string profile_location = GetScratchDir() + "/primary.prof";
const char* location = dex_location.c_str();
@@ -606,33 +612,86 @@ class Dex2oatLayoutTest : public Dex2oatTest {
ASSERT_TRUE(DexFile::Open(location, location, true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& dex_file = dex_files[0];
- GenerateProfile(profile_location, dex_location, dex_file->GetLocationChecksum());
+ GenerateProfile(profile_location,
+ dex_location,
+ num_profile_classes,
+ dex_file->GetLocationChecksum());
std::vector<std::string> copy(extra_args);
copy.push_back("--profile-file=" + profile_location);
+ std::unique_ptr<File> app_image_file;
+ if (!app_image_file_name.empty()) {
+ if (use_fd) {
+ app_image_file.reset(OS::CreateEmptyFile(app_image_file_name.c_str()));
+ copy.push_back("--app-image-fd=" + std::to_string(app_image_file->Fd()));
+ } else {
+ copy.push_back("--app-image-file=" + app_image_file_name);
+ }
+ }
GenerateOdexForTest(dex_location,
odex_location,
CompilerFilter::kSpeedProfile,
copy,
/* expect_success */ true,
use_fd);
+ if (app_image_file != nullptr) {
+ ASSERT_EQ(app_image_file->FlushCloseOrErase(), 0) << "Could not flush and close art file";
+ }
}
- void RunTest() {
+ uint64_t GetImageSize(const std::string& image_file_name) {
+ EXPECT_FALSE(image_file_name.empty());
+ std::unique_ptr<File> file(OS::OpenFileForReading(image_file_name.c_str()));
+ CHECK(file != nullptr);
+ ImageHeader image_header;
+ const bool success = file->ReadFully(&image_header, sizeof(image_header));
+ CHECK(success);
+ CHECK(image_header.IsValid());
+ ReaderMutexLock mu(Thread::Current(), *Locks::mutator_lock_);
+ return image_header.GetImageSize();
+ }
+
+ void RunTest(bool app_image) {
std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
+ std::string app_image_file = app_image ? (GetOdexDir() + "/DexOdexNoOat.art"): "";
Copy(GetDexSrc2(), dex_location);
- CompileProfileOdex(dex_location, odex_location, /* use_fd */ false);
-
+ uint64_t image_file_empty_profile = 0;
+ if (app_image) {
+ CompileProfileOdex(dex_location,
+ odex_location,
+ app_image_file,
+ /* use_fd */ false,
+ /* num_profile_classes */ 0);
+ CheckValidity();
+ ASSERT_TRUE(success_);
+ // Don't check the result since CheckResult relies on the class being in the profile.
+ image_file_empty_profile = GetImageSize(app_image_file);
+ EXPECT_GT(image_file_empty_profile, 0u);
+ }
+
+ // Small profile.
+ CompileProfileOdex(dex_location,
+ odex_location,
+ app_image_file,
+ /* use_fd */ false,
+ /* num_profile_classes */ 1);
CheckValidity();
ASSERT_TRUE(success_);
- CheckResult(dex_location, odex_location);
+ CheckResult(dex_location, odex_location, app_image_file);
+
+ if (app_image) {
+ // Test that the profile made a difference by adding more classes.
+ const uint64_t image_file_small_profile = GetImageSize(app_image_file);
+ CHECK_LT(image_file_empty_profile, image_file_small_profile);
+ }
}
void RunTestVDex() {
std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
std::string vdex_location = GetOdexDir() + "/DexOdexNoOat.vdex";
+ std::string app_image_file_name = GetOdexDir() + "/DexOdexNoOat.art";
Copy(GetDexSrc2(), dex_location);
std::unique_ptr<File> vdex_file1(OS::CreateEmptyFile(vdex_location.c_str()));
@@ -643,7 +702,9 @@ class Dex2oatLayoutTest : public Dex2oatTest {
std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file1->Fd());
CompileProfileOdex(dex_location,
odex_location,
+ app_image_file_name,
/* use_fd */ true,
+ /* num_profile_classes */ 1,
{ input_vdex, output_vdex });
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
@@ -652,17 +713,21 @@ class Dex2oatLayoutTest : public Dex2oatTest {
std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file2.GetFd());
CompileProfileOdex(dex_location,
odex_location,
+ app_image_file_name,
/* use_fd */ true,
+ /* num_profile_classes */ 1,
{ input_vdex, output_vdex });
EXPECT_GT(vdex_file2.GetFile()->GetLength(), 0u);
}
ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
CheckValidity();
ASSERT_TRUE(success_);
- CheckResult(dex_location, odex_location);
+ CheckResult(dex_location, odex_location, app_image_file_name);
}
- void CheckResult(const std::string& dex_location, const std::string& odex_location) {
+ void CheckResult(const std::string& dex_location,
+ const std::string& odex_location,
+ const std::string& app_image_file_name) {
// Host/target independent checks.
std::string error_msg;
std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
@@ -698,6 +763,16 @@ class Dex2oatLayoutTest : public Dex2oatTest {
}
EXPECT_EQ(odex_file->GetCompilerFilter(), CompilerFilter::kSpeedProfile);
+
+ if (!app_image_file_name.empty()) {
+ // Go peek at the image header to make sure it was large enough to contain the class.
+ std::unique_ptr<File> file(OS::OpenFileForReading(app_image_file_name.c_str()));
+ ImageHeader image_header;
+ bool success = file->ReadFully(&image_header, sizeof(image_header));
+ ASSERT_TRUE(success);
+ ASSERT_TRUE(image_header.IsValid());
+ EXPECT_GT(image_header.GetImageSection(ImageHeader::kSectionObjects).Size(), 0u);
+ }
}
// Check whether the dex2oat run was really successful.
@@ -720,7 +795,11 @@ class Dex2oatLayoutTest : public Dex2oatTest {
};
TEST_F(Dex2oatLayoutTest, TestLayout) {
- RunTest();
+ RunTest(/* app-image */ false);
+}
+
+TEST_F(Dex2oatLayoutTest, TestLayoutAppImage) {
+ RunTest(/* app-image */ true);
}
TEST_F(Dex2oatLayoutTest, TestVdexLayout) {
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 131f4b9f63..34983cf5fb 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -64,23 +64,18 @@ static uint32_t GetCodeItemSize(const DexFile& dex_file, const DexFile::CodeItem
uintptr_t insns_end = reinterpret_cast<uintptr_t>(&disk_code_item.insns_[insns_size]);
return insns_end - code_item_start;
} else {
- uint32_t last_handler_off = 0;
- for (uint32_t i = 0; i < tries_size; ++i) {
- // Iterate over the try items to find the last catch handler.
- const DexFile::TryItem* disk_try_item = dex_file.GetTryItems(disk_code_item, i);
- uint16_t handler_off = disk_try_item->handler_off_;
- if (handler_off > last_handler_off) {
- last_handler_off = handler_off;
+ // Get the start of the handler data.
+ const uint8_t* handler_data = DexFile::GetCatchHandlerData(disk_code_item, 0);
+ uint32_t handlers_size = DecodeUnsignedLeb128(&handler_data);
+ // Manually read each handler.
+ for (uint32_t i = 0; i < handlers_size; ++i) {
+ int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
+ if (uleb128_count <= 0) {
+ uleb128_count = -uleb128_count + 1;
+ }
+ for (int32_t j = 0; j < uleb128_count; ++j) {
+ DecodeUnsignedLeb128(&handler_data);
}
- }
- // Decode the final handler to see where it ends.
- const uint8_t* handler_data = DexFile::GetCatchHandlerData(disk_code_item, last_handler_off);
- int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
- if (uleb128_count <= 0) {
- uleb128_count = -uleb128_count + 1;
- }
- for (int32_t i = 0; i < uleb128_count; ++i) {
- DecodeUnsignedLeb128(&handler_data);
}
return reinterpret_cast<uintptr_t>(handler_data) - code_item_start;
}
@@ -649,7 +644,7 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
}
}
int32_t size = DecodeSignedLeb128(&handlers_data);
- bool has_catch_all = size < 0;
+ bool has_catch_all = size <= 0;
if (has_catch_all) {
size = -size;
}
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 9f0593a5cd..bd6548e65b 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -41,7 +41,7 @@ static const char kDexFileLayoutInputDex[] =
"AAAAdQEAAAAQAAABAAAAjAEAAA==";
static const char kDexFileLayoutInputProfile[] =
- "cHJvADAwMwABCwABAAAAAAD1KW3+Y2xhc3Nlcy5kZXgBAA==";
+ "cHJvADAwNAABCwABAAAAAAD1KW3+Y2xhc3Nlcy5kZXgBAA==";
static const char kDexFileLayoutExpectedOutputDex[] =
"ZGV4CjAzNQD1KW3+B8NAB0f2A/ZVIBJ0aHrGIqcpVTAUAgAAcAAAAHhWNBIAAAAAAAAAAIwBAAAH"
@@ -75,6 +75,49 @@ static const char kUnreferencedCatchHandlerInputDex[] =
"AAAEAQAABgAAAAEAAAAkAQAAASAAAAIAAABEAQAAARAAAAIAAADIAQAAAiAAABIAAADWAQAAAyAA"
"AAIAAAC1AgAAACAAAAEAAADIAgAAABAAAAEAAADYAgAA";
+// Dex file with 0-size (catch all only) catch handler unreferenced by try blocks.
+// Constructed by building a dex file with try/catch blocks and hex editing.
+static const char kUnreferenced0SizeCatchHandlerInputDex[] =
+ "ZGV4CjAzNQCEbEEvMstSNpQpjPdfMEfUBS48cis2QRJoAwAAcAAAAHhWNBIAAAAAAAAAAMgCAAAR"
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAQAAAD8AAAAAQAAABwBAAAsAgAAPAEAAOoB"
+ "AADyAQAABAIAABMCAAAqAgAAPgIAAFICAABmAgAAaQIAAG0CAACCAgAAhgIAAIoCAACQAgAAlQIA"
+ "AJ4CAACiAgAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAACQAAAAcAAAAFAAAAAAAAAAgAAAAFAAAA"
+ "3AEAAAgAAAAFAAAA5AEAAAQAAQANAAAAAAAAAAAAAAAAAAIADAAAAAEAAQAOAAAAAgAAAAAAAAAA"
+ "AAAAAQAAAAIAAAAAAAAAAQAAAAAAAAC5AgAAAAAAAAEAAQABAAAApgIAAAQAAABwEAMAAAAOAAQA"
+ "AQACAAIAqwIAAC8AAABiAAAAGgEPAG4gAgAQAGIAAAAaAQoAbiACABAAYgAAABoBEABuIAIAEABi"
+ "AAAAGgELAG4gAgAQAA4ADQBiAQAAGgIKAG4gAgAhACcADQBiAQAAGgILAG4gAgAhACcAAAAAAAAA"
+ "BwABAA4AAAAHAAEAAgAdACYAAAABAAAAAwAAAAEAAAAGAAY8aW5pdD4AEEhhbmRsZXJUZXN0Lmph"
+ "dmEADUxIYW5kbGVyVGVzdDsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmpl"
+ "Y3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xhbmcvU3lzdGVtOwABVgACVkwAE1tMamF2"
+ "YS9sYW5nL1N0cmluZzsAAmYxAAJmMgAEbWFpbgADb3V0AAdwcmludGxuAAJ0MQACdDIAAQAHDgAE"
+ "AQAHDnl7eXkCeB2bAAAAAgAAgYAEvAIBCdQCAA0AAAAAAAAAAQAAAAAAAAABAAAAEQAAAHAAAAAC"
+ "AAAABwAAALQAAAADAAAAAwAAANAAAAAEAAAAAQAAAPQAAAAFAAAABAAAAPwAAAAGAAAAAQAAABwB"
+ "AAABIAAAAgAAADwBAAABEAAAAgAAANwBAAACIAAAEQAAAOoBAAADIAAAAgAAAKYCAAAAIAAAAQAA"
+ "ALkCAAAAEAAAAQAAAMgCAAA=";
+
+// Dex file with an unreferenced catch handler at end of code item.
+// Constructed by building a dex file with try/catch blocks and hex editing.
+static const char kUnreferencedEndingCatchHandlerInputDex[] =
+ "ZGV4CjAzNQCEflufI6xGTDDRmLpbfYi6ujPrDLIwvYcEBAAAcAAAAHhWNBIAAAAAAAAAAGQDAAAT"
+ "AAAAcAAAAAgAAAC8AAAAAwAAANwAAAABAAAAAAEAAAUAAAAIAQAAAQAAADABAAC0AgAAUAEAAE4C"
+ "AABWAgAAXgIAAGYCAAB4AgAAhwIAAJ4CAAC1AgAAyQIAAN0CAADxAgAA9wIAAP0CAAAAAwAABAMA"
+ "ABkDAAAcAwAAIgMAACcDAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAMAAAADgAAAAwAAAAGAAAA"
+ "AAAAAA0AAAAGAAAAQAIAAA0AAAAGAAAASAIAAAUAAQARAAAAAAAAAAAAAAAAAAAADwAAAAAAAgAQ"
+ "AAAAAQABABIAAAADAAAAAAAAAAAAAAABAAAAAwAAAAAAAAADAAAAAAAAAFADAAAAAAAAAQABAAEA"
+ "AAAwAwAABAAAAHAQBAAAAA4AAgAAAAIAAgA1AwAAIQAAAGIAAAAaAQoAbiADABAAYgAAABoBCwBu"
+ "IAMAEAAOAA0AYgAAABoBAQBuIAMAEAAo8A0AYgAAABoBAgBuIAMAEAAo7gAAAAAAAAcAAQAHAAAA"
+ "BwABAAIBAg8BAhgAAwABAAIAAgBCAwAAIQAAAGIAAAAaAQoAbiADABAAYgAAABoBCwBuIAMAEAAO"
+ "AA0AYgAAABoBAQBuIAMAEAAo8A0AYgAAABoBAgBuIAMAEAAo7gAAAAAAAAcAAQAHAAAABwABAAIB"
+ "Ag8BAhgAAQAAAAQAAAABAAAABwAGPGluaXQ+AAZDYXRjaDEABkNhdGNoMgAQSGFuZGxlclRlc3Qu"
+ "amF2YQANTEhhbmRsZXJUZXN0OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABVMamF2YS9sYW5nL0V4"
+ "Y2VwdGlvbjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2YS9s"
+ "YW5nL1N5c3RlbTsABFRyeTEABFRyeTIAAVYAAlZMABNbTGphdmEvbGFuZy9TdHJpbmc7AAFhAARt"
+ "YWluAANvdXQAB3ByaW50bG4AAQAHDgAEAAcOfHsCeB0eih4AEQEABw59ewJ3HR6LHgAAAAMAAIGA"
+ "BNACAQnoAgEJ1AMAAA0AAAAAAAAAAQAAAAAAAAABAAAAEwAAAHAAAAACAAAACAAAALwAAAADAAAA"
+ "AwAAANwAAAAEAAAAAQAAAAABAAAFAAAABQAAAAgBAAAGAAAAAQAAADABAAABIAAAAwAAAFABAAAB"
+ "EAAAAgAAAEACAAACIAAAEwAAAE4CAAADIAAAAwAAADADAAAAIAAAAQAAAFADAAAAEAAAAQAAAGQD"
+ "AAA=";
+
// Dex file with multiple code items that have the same debug_info_off_. Constructed by a modified
// dexlayout on XandY.
static const char kDexFileDuplicateOffset[] =
@@ -145,6 +188,21 @@ static const char kUnalignedCodeInfoInputDex[] =
"AAEAAAC4AAAAASAAAAIAAADYAAAAAiAAAAYAAAACAQAAAyAAAAIAAAAxAQAAACAAAAEAAAA7AQAA"
"ABAAAAEAAABMAQAA";
+// Dex file with class data section preceding code items.
+// Constructed by passing dex file through dexmerger tool and hex editing.
+static const char kClassDataBeforeCodeInputDex[] =
+ "ZGV4CjAzNQCZKmCu3XXn4zvxCh5VH0gZNNobEAcsc49EAgAAcAAAAHhWNBIAAAAAAAAAAAQBAAAJ"
+ "AAAAcAAAAAQAAACUAAAAAgAAAKQAAAAAAAAAAAAAAAUAAAC8AAAAAQAAAOQAAABAAQAABAEAAPgB"
+ "AAAAAgAACAIAAAsCAAAQAgAAJAIAACcCAAAqAgAALQIAAAIAAAADAAAABAAAAAUAAAACAAAAAAAA"
+ "AAAAAAAFAAAAAwAAAAAAAAABAAEAAAAAAAEAAAAGAAAAAQAAAAcAAAABAAAACAAAAAIAAQAAAAAA"
+ "AQAAAAEAAAACAAAAAAAAAAEAAAAAAAAAjAEAAAAAAAALAAAAAAAAAAEAAAAAAAAAAQAAAAkAAABw"
+ "AAAAAgAAAAQAAACUAAAAAwAAAAIAAACkAAAABQAAAAUAAAC8AAAABgAAAAEAAADkAAAAABAAAAEA"
+ "AAAEAQAAACAAAAEAAACMAQAAASAAAAQAAACkAQAAAiAAAAkAAAD4AQAAAyAAAAQAAAAwAgAAAAAB"
+ "AwCBgASkAwEBvAMBAdADAQHkAwAAAQABAAEAAAAwAgAABAAAAHAQBAAAAA4AAgABAAAAAAA1AgAA"
+ "AgAAABIQDwACAAEAAAAAADoCAAACAAAAEiAPAAIAAQAAAAAAPwIAAAIAAAASMA8ABjxpbml0PgAG"
+ "QS5qYXZhAAFJAANMQTsAEkxqYXZhL2xhbmcvT2JqZWN0OwABVgABYQABYgABYwABAAcOAAMABw4A"
+ "BgAHDgAJAAcOAA==";
+
static void WriteBase64ToFile(const char* base64, File* file) {
// Decode base64.
CHECK(base64 != nullptr);
@@ -282,8 +340,8 @@ class DexLayoutTest : public CommonRuntimeTest {
return true;
}
- // Runs UnreferencedCatchHandlerTest.
- bool UnreferencedCatchHandlerExec(std::string* error_msg) {
+ // Runs UnreferencedCatchHandlerTest & Unreferenced0SizeCatchHandlerTest.
+ bool UnreferencedCatchHandlerExec(std::string* error_msg, const char* filename) {
ScratchFile tmp_file;
std::string tmp_name = tmp_file.GetFilename();
size_t tmp_last_slash = tmp_name.rfind("/");
@@ -291,7 +349,7 @@ class DexLayoutTest : public CommonRuntimeTest {
// Write inputs and expected outputs.
std::string input_dex = tmp_dir + "classes.dex";
- WriteFileBase64(kUnreferencedCatchHandlerInputDex, input_dex.c_str());
+ WriteFileBase64(filename, input_dex.c_str());
std::string output_dex = tmp_dir + "classes.dex.new";
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
@@ -343,8 +401,26 @@ TEST_F(DexLayoutTest, UnreferencedCatchHandler) {
// Disable test on target.
TEST_DISABLED_FOR_TARGET();
std::string error_msg;
- ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg)) << error_msg;
+ ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg,
+ kUnreferencedCatchHandlerInputDex)) << error_msg;
+}
+
+TEST_F(DexLayoutTest, Unreferenced0SizeCatchHandler) {
+ // Disable test on target.
+ TEST_DISABLED_FOR_TARGET();
+ std::string error_msg;
+ ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg,
+ kUnreferenced0SizeCatchHandlerInputDex)) << error_msg;
+}
+
+TEST_F(DexLayoutTest, UnreferencedEndingCatchHandler) {
+ // Disable test on target.
+ TEST_DISABLED_FOR_TARGET();
+ std::string error_msg;
+ ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg,
+ kUnreferencedEndingCatchHandlerInputDex)) << error_msg;
}
+
TEST_F(DexLayoutTest, DuplicateOffset) {
ScratchFile temp;
WriteBase64ToFile(kDexFileDuplicateOffset, temp.GetFile());
@@ -418,4 +494,22 @@ TEST_F(DexLayoutTest, UnalignedCodeInfo) {
}
}
+TEST_F(DexLayoutTest, ClassDataBeforeCode) {
+ ScratchFile temp;
+ WriteBase64ToFile(kClassDataBeforeCodeInputDex, temp.GetFile());
+ ScratchFile temp2;
+ WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
+ EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
+ std::string error_msg;
+ const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
+ EXPECT_TRUE(result);
+ if (!result) {
+ LOG(ERROR) << "Error " << error_msg;
+ }
+}
+
} // namespace art
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index d395c170bf..52f3b52ee2 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -16,11 +16,14 @@
#include <gtest/gtest.h>
+#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
#include "exec_utils.h"
-#include "profile_assistant.h"
#include "jit/profile_compilation_info.h"
+#include "mirror/class-inl.h"
+#include "profile_assistant.h"
+#include "scoped_thread_state_change-inl.h"
#include "utils.h"
namespace art {
@@ -95,10 +98,12 @@ class ProfileAssistantTest : public CommonRuntimeTest {
return ExecAndReturnCode(argv_str, &error);
}
- bool CreateProfile(std::string class_file_contents, const std::string& filename) {
+ bool CreateProfile(std::string profile_file_contents,
+ const std::string& filename,
+ const std::string& dex_location) {
ScratchFile class_names_file;
File* file = class_names_file.GetFile();
- EXPECT_TRUE(file->WriteFully(class_file_contents.c_str(), class_file_contents.length()));
+ EXPECT_TRUE(file->WriteFully(profile_file_contents.c_str(), profile_file_contents.length()));
EXPECT_EQ(0, file->Flush());
EXPECT_TRUE(file->ResetOffset());
std::string profman_cmd = GetProfmanCmd();
@@ -106,8 +111,8 @@ class ProfileAssistantTest : public CommonRuntimeTest {
argv_str.push_back(profman_cmd);
argv_str.push_back("--create-profile-from=" + class_names_file.GetFilename());
argv_str.push_back("--reference-profile-file=" + filename);
- argv_str.push_back("--apk=" + GetLibCoreDexFileNames()[0]);
- argv_str.push_back("--dex-location=classes.dex");
+ argv_str.push_back("--apk=" + dex_location);
+ argv_str.push_back("--dex-location=" + dex_location);
std::string error;
EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0);
return true;
@@ -121,7 +126,7 @@ class ProfileAssistantTest : public CommonRuntimeTest {
argv_str.push_back("--dump-classes");
argv_str.push_back("--profile-file=" + filename);
argv_str.push_back("--apk=" + GetLibCoreDexFileNames()[0]);
- argv_str.push_back("--dex-location=classes.dex");
+ argv_str.push_back("--dex-location=" + GetLibCoreDexFileNames()[0]);
argv_str.push_back("--dump-output-to-fd=" + std::to_string(GetFd(class_names_file)));
std::string error;
EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0);
@@ -137,11 +142,74 @@ class ProfileAssistantTest : public CommonRuntimeTest {
bool CreateAndDump(const std::string& input_file_contents, std::string* output_file_contents) {
ScratchFile profile_file;
- EXPECT_TRUE(CreateProfile(input_file_contents, profile_file.GetFilename()));
+ EXPECT_TRUE(CreateProfile(input_file_contents,
+ profile_file.GetFilename(),
+ GetLibCoreDexFileNames()[0]));
profile_file.GetFile()->ResetOffset();
EXPECT_TRUE(DumpClasses(profile_file.GetFilename(), output_file_contents));
return true;
}
+
+ mirror::Class* GetClass(jobject class_loader, const std::string& clazz) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> h_loader(
+ hs.NewHandle(self->DecodeJObject(class_loader)->AsClassLoader()));
+ return class_linker->FindClass(self, clazz.c_str(), h_loader);
+ }
+
+ ArtMethod* GetVirtualMethod(jobject class_loader,
+ const std::string& clazz,
+ const std::string& name) {
+ mirror::Class* klass = GetClass(class_loader, clazz);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const auto pointer_size = class_linker->GetImagePointerSize();
+ ArtMethod* method = nullptr;
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ for (auto& m : klass->GetVirtualMethods(pointer_size)) {
+ if (name == m.GetName()) {
+ EXPECT_TRUE(method == nullptr);
+ method = &m;
+ }
+ }
+ return method;
+ }
+
+ // Verify that given method has the expected inline caches and nothing else.
+ void AssertInlineCaches(ArtMethod* method,
+ const std::set<mirror::Class*>& expected_clases,
+ const ProfileCompilationInfo& info,
+ bool is_megamorphic,
+ bool is_missing_types)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ ASSERT_TRUE(info.GetMethod(method->GetDexFile()->GetLocation(),
+ method->GetDexFile()->GetLocationChecksum(),
+ method->GetDexMethodIndex(),
+ &pmi));
+ ASSERT_EQ(pmi.inline_caches.size(), 1u);
+ ProfileCompilationInfo::DexPcData dex_pc_data = pmi.inline_caches.begin()->second;
+
+ ASSERT_EQ(dex_pc_data.is_megamorphic, is_megamorphic);
+ ASSERT_EQ(dex_pc_data.is_missing_types, is_missing_types);
+ ASSERT_EQ(expected_clases.size(), dex_pc_data.classes.size());
+ size_t found = 0;
+ for (mirror::Class* it : expected_clases) {
+ for (const auto& class_ref : dex_pc_data.classes) {
+ ProfileCompilationInfo::DexReference dex_ref =
+ pmi.dex_references[class_ref.dex_profile_index];
+ if (dex_ref.MatchesDex(&(it->GetDexFile())) &&
+ class_ref.type_index == it->GetDexTypeIndex()) {
+ found++;
+ }
+ }
+ }
+
+ ASSERT_EQ(expected_clases.size(), found);
+ }
};
TEST_F(ProfileAssistantTest, AdviseCompilationEmptyReferences) {
@@ -358,25 +426,28 @@ TEST_F(ProfileAssistantTest, TestProfileGeneration) {
TEST_F(ProfileAssistantTest, TestProfileCreationAllMatch) {
// Class names put here need to be in sorted order.
std::vector<std::string> class_names = {
- "java.lang.Comparable",
- "java.lang.Math",
- "java.lang.Object"
+ "Ljava/lang/Comparable;",
+ "Ljava/lang/Math;",
+ "Ljava/lang/Object;"
};
std::string input_file_contents;
+ std::string expected_contents;
for (std::string& class_name : class_names) {
input_file_contents += class_name + std::string("\n");
+ expected_contents += DescriptorToDot(class_name.c_str()) +
+ std::string("\n");
}
std::string output_file_contents;
ASSERT_TRUE(CreateAndDump(input_file_contents, &output_file_contents));
- ASSERT_EQ(output_file_contents, input_file_contents);
+ ASSERT_EQ(output_file_contents, expected_contents);
}
TEST_F(ProfileAssistantTest, TestProfileCreationOneNotMatched) {
// Class names put here need to be in sorted order.
std::vector<std::string> class_names = {
- "doesnt.match.this.one",
- "java.lang.Comparable",
- "java.lang.Object"
+ "Ldoesnt/match/this/one;",
+ "Ljava/lang/Comparable;",
+ "Ljava/lang/Object;"
};
std::string input_file_contents;
for (std::string& class_name : class_names) {
@@ -385,16 +456,17 @@ TEST_F(ProfileAssistantTest, TestProfileCreationOneNotMatched) {
std::string output_file_contents;
ASSERT_TRUE(CreateAndDump(input_file_contents, &output_file_contents));
std::string expected_contents =
- class_names[1] + std::string("\n") + class_names[2] + std::string("\n");
+ DescriptorToDot(class_names[1].c_str()) + std::string("\n") +
+ DescriptorToDot(class_names[2].c_str()) + std::string("\n");
ASSERT_EQ(output_file_contents, expected_contents);
}
TEST_F(ProfileAssistantTest, TestProfileCreationNoneMatched) {
// Class names put here need to be in sorted order.
std::vector<std::string> class_names = {
- "doesnt.match.this.one",
- "doesnt.match.this.one.either",
- "nor.this.one"
+ "Ldoesnt/match/this/one;",
+ "Ldoesnt/match/this/one/either;",
+ "Lnor/this/one;"
};
std::string input_file_contents;
for (std::string& class_name : class_names) {
@@ -406,4 +478,115 @@ TEST_F(ProfileAssistantTest, TestProfileCreationNoneMatched) {
ASSERT_EQ(output_file_contents, expected_contents);
}
+TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) {
+ // Create the profile content.
+ std::vector<std::string> methods = {
+ "LTestInline;->inlineMonomorphic(LSuper;)I+LSubA;",
+ "LTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;",
+ "LTestInline;->inlineMegamorphic(LSuper;)I+LSubA;,LSubB;,LSubC;,LSubD;,LSubE;",
+ "LTestInline;->inlineMissingTypes(LSuper;)I+missing_types",
+ "LTestInline;->noInlineCache(LSuper;)I"
+ };
+ std::string input_file_contents;
+ for (std::string& m : methods) {
+ input_file_contents += m + std::string("\n");
+ }
+
+ // Create the profile and save it to disk.
+ ScratchFile profile_file;
+ ASSERT_TRUE(CreateProfile(input_file_contents,
+ profile_file.GetFilename(),
+ GetTestDexFileName("ProfileTestMultiDex")));
+
+ // Load the profile from disk.
+ ProfileCompilationInfo info;
+ profile_file.GetFile()->ResetOffset();
+ ASSERT_TRUE(info.Load(GetFd(profile_file)));
+
+ // Load the dex files and verify that the profile contains the expected methods info.
+ ScopedObjectAccess soa(Thread::Current());
+ jobject class_loader = LoadDex("ProfileTestMultiDex");
+ ASSERT_NE(class_loader, nullptr);
+
+ mirror::Class* sub_a = GetClass(class_loader, "LSubA;");
+ mirror::Class* sub_b = GetClass(class_loader, "LSubB;");
+ mirror::Class* sub_c = GetClass(class_loader, "LSubC;");
+
+ ASSERT_TRUE(sub_a != nullptr);
+ ASSERT_TRUE(sub_b != nullptr);
+ ASSERT_TRUE(sub_c != nullptr);
+
+ {
+ // Verify that method inlineMonomorphic has the expected inline caches and nothing else.
+ ArtMethod* inline_monomorphic = GetVirtualMethod(class_loader,
+ "LTestInline;",
+ "inlineMonomorphic");
+ ASSERT_TRUE(inline_monomorphic != nullptr);
+ std::set<mirror::Class*> expected_monomorphic;
+ expected_monomorphic.insert(sub_a);
+ AssertInlineCaches(inline_monomorphic,
+ expected_monomorphic,
+ info,
+ /*megamorphic*/false,
+ /*missing_types*/false);
+ }
+
+ {
+ // Verify that method inlinePolymorphic has the expected inline caches and nothing else.
+ ArtMethod* inline_polymorhic = GetVirtualMethod(class_loader,
+ "LTestInline;",
+ "inlinePolymorphic");
+ ASSERT_TRUE(inline_polymorhic != nullptr);
+ std::set<mirror::Class*> expected_polymorphic;
+ expected_polymorphic.insert(sub_a);
+ expected_polymorphic.insert(sub_b);
+ expected_polymorphic.insert(sub_c);
+ AssertInlineCaches(inline_polymorhic,
+ expected_polymorphic,
+ info,
+ /*megamorphic*/false,
+ /*missing_types*/false);
+ }
+
+ {
+ // Verify that method inlineMegamorphic has the expected inline caches and nothing else.
+ ArtMethod* inline_megamorphic = GetVirtualMethod(class_loader,
+ "LTestInline;",
+ "inlineMegamorphic");
+ ASSERT_TRUE(inline_megamorphic != nullptr);
+ std::set<mirror::Class*> expected_megamorphic;
+ AssertInlineCaches(inline_megamorphic,
+ expected_megamorphic,
+ info,
+ /*megamorphic*/true,
+ /*missing_types*/false);
+ }
+
+ {
+ // Verify that method inlineMegamorphic has the expected inline caches and nothing else.
+ ArtMethod* inline_missing_types = GetVirtualMethod(class_loader,
+ "LTestInline;",
+ "inlineMissingTypes");
+ ASSERT_TRUE(inline_missing_types != nullptr);
+ std::set<mirror::Class*> expected_missing_Types;
+ AssertInlineCaches(inline_missing_types,
+ expected_missing_Types,
+ info,
+ /*megamorphic*/false,
+ /*missing_types*/true);
+ }
+
+ {
+ // Verify that method noInlineCache has no inline caches in the profile.
+ ArtMethod* no_inline_cache = GetVirtualMethod(class_loader, "LTestInline;", "noInlineCache");
+ ASSERT_TRUE(no_inline_cache != nullptr);
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi_no_inline_cache;
+ ASSERT_TRUE(info.GetMethod(no_inline_cache->GetDexFile()->GetLocation(),
+ no_inline_cache->GetDexFile()->GetLocationChecksum(),
+ no_inline_cache->GetDexMethodIndex(),
+ &pmi_no_inline_cache));
+ ASSERT_TRUE(pmi_no_inline_cache.inline_caches.empty());
+ }
+}
+
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index a42e4f1db1..f7316cc129 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -36,6 +36,7 @@
#include "base/stringpiece.h"
#include "base/time_utils.h"
#include "base/unix_file/fd_file.h"
+#include "bytecode_utils.h"
#include "dex_file.h"
#include "jit/profile_compilation_info.h"
#include "runtime.h"
@@ -136,6 +137,15 @@ static constexpr uint16_t kDefaultTestProfileNumDex = 20;
static constexpr uint16_t kDefaultTestProfileMethodRatio = 5;
static constexpr uint16_t kDefaultTestProfileClassRatio = 5;
+// Separators used when parsing human friendly representation of profiles.
+static const std::string kMethodSep = "->";
+static const std::string kMissingTypesMarker = "missing_types";
+static constexpr char kProfileParsingInlineChacheSep = '+';
+static constexpr char kProfileParsingTypeSep = ',';
+static constexpr char kProfileParsingFirstCharInSignature = '(';
+
+// TODO(calin): This class has grown too much from its initial design. Split the functionality
+// into smaller, more contained pieces.
class ProfMan FINAL {
public:
ProfMan() :
@@ -522,6 +532,187 @@ class ProfMan FINAL {
return output.release();
}
+ // Find class klass_descriptor in the given dex_files and store its reference
+ // in the out parameter class_ref.
+ // Return true if the definition of the class was found in any of the dex_files.
+ bool FindClass(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
+ const std::string& klass_descriptor,
+ /*out*/ProfileMethodInfo::ProfileClassReference* class_ref) {
+ for (const std::unique_ptr<const DexFile>& dex_file_ptr : dex_files) {
+ const DexFile* dex_file = dex_file_ptr.get();
+ const DexFile::TypeId* type_id = dex_file->FindTypeId(klass_descriptor.c_str());
+ if (type_id == nullptr) {
+ continue;
+ }
+ dex::TypeIndex type_index = dex_file->GetIndexForTypeId(*type_id);
+ if (dex_file->FindClassDef(type_index) == nullptr) {
+ // Class is only referenced in the current dex file but not defined in it.
+ continue;
+ }
+ class_ref->dex_file = dex_file;
+ class_ref->type_index = type_index;
+ return true;
+ }
+ return false;
+ }
+
+ // Find the method specified by method_spec in the class class_ref. The method
+ // must have a single INVOKE_VIRTUAL in its byte code.
+ // Upon success it returns true and stores the method index and the invoke dex pc
+ // in the output parameters.
+ // The format of the method spec is "inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;".
+ //
+ // TODO(calin): support INVOKE_INTERFACE and the range variants.
+ bool FindMethodWithSingleInvoke(const ProfileMethodInfo::ProfileClassReference& class_ref,
+ const std::string& method_spec,
+ /*out*/uint16_t* method_index,
+ /*out*/uint32_t* dex_pc) {
+ std::vector<std::string> name_and_signature;
+ Split(method_spec, kProfileParsingFirstCharInSignature, &name_and_signature);
+ if (name_and_signature.size() != 2) {
+ LOG(ERROR) << "Invalid method name and signature " << method_spec;
+ }
+ const std::string& name = name_and_signature[0];
+ const std::string& signature = kProfileParsingFirstCharInSignature + name_and_signature[1];
+ const DexFile* dex_file = class_ref.dex_file;
+
+ const DexFile::StringId* name_id = dex_file->FindStringId(name.c_str());
+ if (name_id == nullptr) {
+ LOG(ERROR) << "Could not find name: " << name;
+ return false;
+ }
+ dex::TypeIndex return_type_idx;
+ std::vector<dex::TypeIndex> param_type_idxs;
+ if (!dex_file->CreateTypeList(signature, &return_type_idx, &param_type_idxs)) {
+ LOG(ERROR) << "Could not create type list" << signature;
+ return false;
+ }
+ const DexFile::ProtoId* proto_id = dex_file->FindProtoId(return_type_idx, param_type_idxs);
+ if (proto_id == nullptr) {
+ LOG(ERROR) << "Could not find proto_id: " << name;
+ return false;
+ }
+ const DexFile::MethodId* method_id = dex_file->FindMethodId(
+ dex_file->GetTypeId(class_ref.type_index), *name_id, *proto_id);
+ if (method_id == nullptr) {
+ LOG(ERROR) << "Could not find method_id: " << name;
+ return false;
+ }
+
+ *method_index = dex_file->GetIndexForMethodId(*method_id);
+
+ uint32_t offset = dex_file->FindCodeItemOffset(
+ *dex_file->FindClassDef(class_ref.type_index),
+ *method_index);
+ const DexFile::CodeItem* code_item = dex_file->GetCodeItem(offset);
+
+ bool found_invoke = false;
+ for (CodeItemIterator it(*code_item); !it.Done(); it.Advance()) {
+ if (it.CurrentInstruction().Opcode() == Instruction::INVOKE_VIRTUAL) {
+ if (found_invoke) {
+ LOG(ERROR) << "Multiple invoke INVOKE_VIRTUAL found: " << name;
+ return false;
+ }
+ found_invoke = true;
+ *dex_pc = it.CurrentDexPc();
+ }
+ }
+ if (!found_invoke) {
+ LOG(ERROR) << "Could not find any INVOKE_VIRTUAL: " << name;
+ }
+ return found_invoke;
+ }
+
+ // Process a line defining a class or a method and its inline caches.
+ // Upon success return true and add the class or the method info to profile.
+ // The possible line formats are:
+ // "LJustTheCass;".
+ // "LTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;".
+ // "LTestInline;->inlineMissingTypes(LSuper;)I+missing_types".
+ // "LTestInline;->inlineNoInlineCaches(LSuper;)I".
+ // The method and classes are searched only in the given dex files.
+ bool ProcessLine(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
+ const std::string& line,
+ /*out*/ProfileCompilationInfo* profile) {
+ std::string klass;
+ std::string method_str;
+ size_t method_sep_index = line.find(kMethodSep);
+ if (method_sep_index == std::string::npos) {
+ klass = line;
+ } else {
+ klass = line.substr(0, method_sep_index);
+ method_str = line.substr(method_sep_index + kMethodSep.size());
+ }
+
+ ProfileMethodInfo::ProfileClassReference class_ref;
+ if (!FindClass(dex_files, klass, &class_ref)) {
+ LOG(WARNING) << "Could not find class: " << klass;
+ return false;
+ }
+
+ if (method_str.empty()) {
+ // No method to add. Just add the class.
+ std::set<DexCacheResolvedClasses> resolved_class_set;
+ const DexFile* dex_file = class_ref.dex_file;
+ const auto& dex_resolved_classes = resolved_class_set.emplace(
+ dex_file->GetLocation(),
+ dex_file->GetBaseLocation(),
+ dex_file->GetLocationChecksum());
+ dex_resolved_classes.first->AddClass(class_ref.type_index);
+ profile->AddMethodsAndClasses(std::vector<ProfileMethodInfo>(), resolved_class_set);
+ return true;
+ }
+
+ // Process the method.
+ std::string method_spec;
+ std::vector<std::string> inline_cache_elems;
+
+ std::vector<std::string> method_elems;
+ bool is_missing_types = false;
+ Split(method_str, kProfileParsingInlineChacheSep, &method_elems);
+ if (method_elems.size() == 2) {
+ method_spec = method_elems[0];
+ is_missing_types = method_elems[1] == kMissingTypesMarker;
+ if (!is_missing_types) {
+ Split(method_elems[1], kProfileParsingTypeSep, &inline_cache_elems);
+ }
+ } else if (method_elems.size() == 1) {
+ method_spec = method_elems[0];
+ } else {
+ LOG(ERROR) << "Invalid method line: " << line;
+ return false;
+ }
+
+ uint16_t method_index;
+ uint32_t dex_pc;
+ if (!FindMethodWithSingleInvoke(class_ref, method_spec, &method_index, &dex_pc)) {
+ return false;
+ }
+ std::vector<ProfileMethodInfo::ProfileClassReference> classes(inline_cache_elems.size());
+ size_t class_it = 0;
+ for (const std::string ic_class : inline_cache_elems) {
+ if (!FindClass(dex_files, ic_class, &(classes[class_it++]))) {
+ LOG(ERROR) << "Could not find class: " << ic_class;
+ return false;
+ }
+ }
+ std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
+ inline_caches.emplace_back(dex_pc, is_missing_types, classes);
+ std::vector<ProfileMethodInfo> pmi;
+ pmi.emplace_back(class_ref.dex_file, method_index, inline_caches);
+
+ profile->AddMethodsAndClasses(pmi, std::set<DexCacheResolvedClasses>());
+ return true;
+ }
+
+ // Creates a profile from a human friendly textual representation.
+ // The expected input format is:
+ // # Classes
+ // Ljava/lang/Comparable;
+ // Ljava/lang/Math;
+ // # Methods with inline caches
+ // LTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;
+ // LTestInline;->noInlineCache(LSuper;)I
int CreateProfile() {
// Validate parameters for this command.
if (apk_files_.empty() && apks_fd_.empty()) {
@@ -550,51 +741,22 @@ class ProfMan FINAL {
return -1;
}
}
- // Read the user-specified list of classes (dot notation rather than descriptors).
+ // Read the user-specified list of classes and methods.
std::unique_ptr<std::unordered_set<std::string>>
- user_class_list(ReadCommentedInputFromFile<std::unordered_set<std::string>>(
+ user_lines(ReadCommentedInputFromFile<std::unordered_set<std::string>>(
create_profile_from_file_.c_str(), nullptr)); // No post-processing.
- std::unordered_set<std::string> matched_user_classes;
- // Open the dex files to look up class names.
+
+ // Open the dex files to look up classes and methods.
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
- // Iterate over the dex files looking for class names in the input stream.
- std::set<DexCacheResolvedClasses> resolved_class_set;
- for (auto& dex_file : dex_files) {
- // Compute the set of classes to be added for this dex file first. This
- // avoids creating an entry in the profile information for dex files that
- // contribute no classes.
- std::unordered_set<dex::TypeIndex> classes_to_be_added;
- for (const auto& klass : *user_class_list) {
- std::string descriptor = DotToDescriptor(klass.c_str());
- const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor.c_str());
- if (type_id == nullptr) {
- continue;
- }
- classes_to_be_added.insert(dex_file->GetIndexForTypeId(*type_id));
- matched_user_classes.insert(klass);
- }
- if (classes_to_be_added.empty()) {
- continue;
- }
- // Insert the DexCacheResolved Classes into the set expected for
- // AddMethodsAndClasses.
- std::set<DexCacheResolvedClasses>::iterator dex_resolved_classes =
- resolved_class_set.emplace(dex_file->GetLocation(),
- dex_file->GetBaseLocation(),
- dex_file->GetLocationChecksum()).first;
- dex_resolved_classes->AddClasses(classes_to_be_added.begin(), classes_to_be_added.end());
- }
- // Warn the user if we didn't find matches for every class.
- for (const auto& klass : *user_class_list) {
- if (matched_user_classes.find(klass) == matched_user_classes.end()) {
- LOG(WARNING) << "requested class '" << klass << "' was not matched in any dex file";
- }
- }
- // Generate the profile data structure.
+
+ // Process the lines one by one and add the successful ones to the profile.
ProfileCompilationInfo info;
- std::vector<ProfileMethodInfo> methods; // No methods for now.
- info.AddMethodsAndClasses(methods, resolved_class_set);
+
+ for (const auto& line : *user_lines) {
+ ProcessLine(dex_files, line, &info);
+ }
+
// Write the profile file.
CHECK(info.Save(fd));
if (close(fd) < 0) {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 9958814f58..d075c58d27 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -99,6 +99,7 @@ cc_defaults {
"intern_table.cc",
"interpreter/interpreter.cc",
"interpreter/interpreter_common.cc",
+ "interpreter/interpreter_intrinsics.cc",
"interpreter/interpreter_switch_impl.cc",
"interpreter/unstarted_runtime.cc",
"java_vm_ext.cc",
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 85310911be..72aa785973 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1487,6 +1487,7 @@ ENTRY art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
+ mov r0, r12 // Load interface method
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 7cb50b7118..5b5d2ef0dc 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1966,6 +1966,7 @@ ENTRY art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
+ mov x0, xIP0 // Load interface method
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 4f7b4957b6..5d6153949a 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1763,6 +1763,7 @@ ENTRY art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
# Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
+ move $a0, $t7 # Load interface method.
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 28d7c77938..3ee9c4a9c8 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1715,6 +1715,7 @@ ENTRY art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
# Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
+ move $a0, $t0 # Load interface method.
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 8c907e0790..ff7ba92f93 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1806,6 +1806,7 @@ DEFINE_FUNCTION art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
+ movl %edi, %eax // Load interface method
POP EDI
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END_FUNCTION art_quick_imt_conflict_trampoline
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index f1be52eeb6..8a663d15c5 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1662,6 +1662,7 @@ DEFINE_FUNCTION art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
+ movq %r10, %rdi // Load interface method
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
#endif // __APPLE__
END_FUNCTION art_quick_imt_conflict_trampoline
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 245ab3b24f..f92fbea15d 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -89,6 +89,7 @@ enum ArenaAllocKind {
kArenaAllocRegisterAllocator,
kArenaAllocRegisterAllocatorValidate,
kArenaAllocStackMapStream,
+ kArenaAllocVectorNode,
kArenaAllocCodeGenerator,
kArenaAllocAssembler,
kArenaAllocParallelMoveResolver,
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 24846e5ceb..b0394a5255 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -46,7 +46,6 @@ Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
Mutex* Locks::intern_table_lock_ = nullptr;
-Mutex* Locks::jdwp_event_list_lock_ = nullptr;
Mutex* Locks::jni_function_table_lock_ = nullptr;
Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
@@ -74,6 +73,7 @@ ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
Mutex* Locks::jni_weak_globals_lock_ = nullptr;
ReaderWriterMutex* Locks::dex_lock_ = nullptr;
std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
+Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -118,6 +118,26 @@ class ScopedAllMutexesLock FINAL {
const BaseMutex* const mutex_;
};
+class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL {
+ public:
+ explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
+ while (!Locks::expected_mutexes_on_weak_ref_access_guard_.CompareExchangeWeakAcquire(0,
+ mutex)) {
+ NanoSleep(100);
+ }
+ }
+
+ ~ScopedExpectedMutexesOnWeakRefAccessLock() {
+ while (!Locks::expected_mutexes_on_weak_ref_access_guard_.CompareExchangeWeakRelease(mutex_,
+ 0)) {
+ NanoSleep(100);
+ }
+ }
+
+ private:
+ const BaseMutex* const mutex_;
+};
+
// Scoped class that generates events at the beginning and end of lock contention.
class ScopedContentionRecorder FINAL : public ValueObject {
public:
@@ -999,7 +1019,6 @@ void Locks::Init() {
DCHECK(verifier_deps_lock_ != nullptr);
DCHECK(host_dlopen_handles_lock_ != nullptr);
DCHECK(intern_table_lock_ != nullptr);
- DCHECK(jdwp_event_list_lock_ != nullptr);
DCHECK(jni_function_table_lock_ != nullptr);
DCHECK(jni_libraries_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
@@ -1042,10 +1061,6 @@ void Locks::Init() {
DCHECK(runtime_shutdown_lock_ == nullptr);
runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kJdwpEventListLock);
- DCHECK(jdwp_event_list_lock_ == nullptr);
- jdwp_event_list_lock_ = new Mutex("JDWP event list lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
DCHECK(profiler_lock_ == nullptr);
profiler_lock_ = new Mutex("profiler lock", current_lock_level);
@@ -1169,14 +1184,9 @@ void Locks::Init() {
#undef UPDATE_CURRENT_LOCK_LEVEL
// List of mutexes that we may hold when accessing a weak ref.
- dex_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
- expected_mutexes_on_weak_ref_access_.push_back(dex_lock_);
- classlinker_classes_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
- expected_mutexes_on_weak_ref_access_.push_back(classlinker_classes_lock_);
- jdwp_event_list_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
- expected_mutexes_on_weak_ref_access_.push_back(jdwp_event_list_lock_);
- jni_libraries_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
- expected_mutexes_on_weak_ref_access_.push_back(jni_libraries_lock_);
+ AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock*/ false);
+ AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock*/ false);
+ AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock*/ false);
InitConditions();
}
@@ -1196,4 +1206,38 @@ bool Locks::IsSafeToCallAbortRacy() {
return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb();
}
+void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
+ if (need_lock) {
+ ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+ mutex->SetShouldRespondToEmptyCheckpointRequest(true);
+ expected_mutexes_on_weak_ref_access_.push_back(mutex);
+ } else {
+ mutex->SetShouldRespondToEmptyCheckpointRequest(true);
+ expected_mutexes_on_weak_ref_access_.push_back(mutex);
+ }
+}
+
+void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
+ if (need_lock) {
+ ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+ mutex->SetShouldRespondToEmptyCheckpointRequest(false);
+ std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+ auto it = std::find(list.begin(), list.end(), mutex);
+ DCHECK(it != list.end());
+ list.erase(it);
+ } else {
+ mutex->SetShouldRespondToEmptyCheckpointRequest(false);
+ std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+ auto it = std::find(list.begin(), list.end(), mutex);
+ DCHECK(it != list.end());
+ list.erase(it);
+ }
+}
+
+bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) {
+ ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+ std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+ return std::find(list.begin(), list.end(), mutex) != list.end();
+}
+
} // namespace art
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index c59664b9cd..038aeb33e7 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -516,12 +516,12 @@ class SCOPED_CAPABILITY MutexLock {
// construction and releases it upon destruction.
class SCOPED_CAPABILITY ReaderMutexLock {
public:
- ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
+ ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) ALWAYS_INLINE :
self_(self), mu_(mu) {
mu_.SharedLock(self_);
}
- ~ReaderMutexLock() RELEASE() {
+ ~ReaderMutexLock() RELEASE() ALWAYS_INLINE {
mu_.SharedUnlock(self_);
}
@@ -583,6 +583,12 @@ class Locks {
// Checks for whether it is safe to call Abort() without using locks.
static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
+ // Add a mutex to expected_mutexes_on_weak_ref_access_.
+ static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
+ // Remove a mutex from expected_mutexes_on_weak_ref_access_.
+ static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
+ // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
+ static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
// Guards allocation entrypoint instrumenting.
static Mutex* instrument_entrypoints_lock_;
@@ -630,12 +636,8 @@ class Locks {
// Guards shutdown of the runtime.
static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
- static Mutex* jdwp_event_list_lock_
- ACQUIRED_AFTER(runtime_shutdown_lock_)
- ACQUIRED_BEFORE(breakpoint_lock_);
-
// Guards background profiler global state.
- static Mutex* profiler_lock_ ACQUIRED_AFTER(jdwp_event_list_lock_);
+ static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
// Guards trace (ie traceview) requests.
static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
@@ -738,6 +740,8 @@ class Locks {
// encounter an unexpected mutex on accessing weak refs,
// Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
+ static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
+ class ScopedExpectedMutexesOnWeakRefAccessLock;
};
class Roles {
diff --git a/compiler/optimizing/bytecode_utils.h b/runtime/bytecode_utils.h
index 133afa47fe..fa87b1d6da 100644
--- a/compiler/optimizing/bytecode_utils.h
+++ b/runtime/bytecode_utils.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_OPTIMIZING_BYTECODE_UTILS_H_
-#define ART_COMPILER_OPTIMIZING_BYTECODE_UTILS_H_
+#ifndef ART_RUNTIME_BYTECODE_UTILS_H_
+#define ART_RUNTIME_BYTECODE_UTILS_H_
#include "base/arena_object.h"
#include "dex_file.h"
@@ -177,4 +177,4 @@ inline bool IsThrowingDexInstruction(const Instruction& instruction) {
} // namespace art
-#endif // ART_COMPILER_OPTIMIZING_BYTECODE_UTILS_H_
+#endif // ART_RUNTIME_BYTECODE_UTILS_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index eaa35fe12d..b611aa2132 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1025,7 +1025,8 @@ bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
class_loader->GetClass();
}
-static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element)
+static bool GetDexPathListElementName(ObjPtr<mirror::Object> element,
+ ObjPtr<mirror::String>* out_name)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dex_file_field =
jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
@@ -1037,17 +1038,20 @@ static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element)
CHECK_EQ(dex_file_field->GetDeclaringClass(), element->GetClass()) << element->PrettyTypeOf();
ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
if (dex_file == nullptr) {
- return nullptr;
+ // Null dex file means it was probably a jar with no dex files, return a null string.
+ *out_name = nullptr;
+ return true;
}
ObjPtr<mirror::Object> name_object = dex_file_name_field->GetObject(dex_file);
if (name_object != nullptr) {
- return name_object->AsString();
+ *out_name = name_object->AsString();
+ return true;
}
- return nullptr;
+ return false;
}
static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
- std::list<mirror::String*>* out_dex_file_names,
+ std::list<ObjPtr<mirror::String>>* out_dex_file_names,
std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(out_dex_file_names != nullptr);
@@ -1083,12 +1087,14 @@ static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
*error_msg = StringPrintf("Null dex element at index %d", i);
return false;
}
- ObjPtr<mirror::String> const name = GetDexPathListElementName(element);
- if (name == nullptr) {
- *error_msg = StringPrintf("Null name for dex element at index %d", i);
+ ObjPtr<mirror::String> name;
+ if (!GetDexPathListElementName(element, &name)) {
+ *error_msg = StringPrintf("Invalid dex path list element at index %d", i);
return false;
}
- out_dex_file_names->push_front(name.Ptr());
+ if (name != nullptr) {
+ out_dex_file_names->push_front(name.Ptr());
+ }
}
}
}
@@ -1769,14 +1775,14 @@ bool ClassLinker::AddImageSpace(
*error_msg = "Unexpected BootClassLoader in app image";
return false;
}
- std::list<mirror::String*> image_dex_file_names;
+ std::list<ObjPtr<mirror::String>> image_dex_file_names;
std::string temp_error_msg;
if (!FlattenPathClassLoader(image_class_loader.Get(), &image_dex_file_names, &temp_error_msg)) {
*error_msg = StringPrintf("Failed to flatten image class loader hierarchy '%s'",
temp_error_msg.c_str());
return false;
}
- std::list<mirror::String*> loader_dex_file_names;
+ std::list<ObjPtr<mirror::String>> loader_dex_file_names;
if (!FlattenPathClassLoader(class_loader.Get(), &loader_dex_file_names, &temp_error_msg)) {
*error_msg = StringPrintf("Failed to flatten class loader hierarchy '%s'",
temp_error_msg.c_str());
@@ -1788,7 +1794,10 @@ bool ClassLinker::AddImageSpace(
ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i);
if (element != nullptr) {
// If we are somewhere in the middle of the array, there may be nulls at the end.
- loader_dex_file_names.push_back(GetDexPathListElementName(element));
+ ObjPtr<mirror::String> name;
+ if (GetDexPathListElementName(element, &name) && name != nullptr) {
+ loader_dex_file_names.push_back(name);
+ }
}
}
// Ignore the number of image dex files since we are adding those to the class loader anyways.
@@ -3891,8 +3900,10 @@ bool ClassLinker::AttemptSupertypeVerification(Thread* self,
if (!supertype->IsVerified() && !supertype->IsErroneous()) {
VerifyClass(self, supertype);
}
- if (supertype->IsCompileTimeVerified()) {
- // Either we are verified or we soft failed and need to retry at runtime.
+
+ if (supertype->IsVerified() || supertype->ShouldVerifyAtRuntime()) {
+ // The supertype is either verified, or we soft failed at AOT time.
+ DCHECK(supertype->IsVerified() || Runtime::Current()->IsAotCompiler());
return true;
}
// If we got this far then we have a hard failure.
@@ -3958,13 +3969,16 @@ verifier::MethodVerifier::FailureKind ClassLinker::VerifyClass(
return verifier::MethodVerifier::kHardFailure;
}
- // Don't attempt to re-verify if already sufficiently verified.
+ // Don't attempt to re-verify if already verified.
if (klass->IsVerified()) {
EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
return verifier::MethodVerifier::kNoFailure;
}
- if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
- return verifier::MethodVerifier::kNoFailure;
+
+ // For AOT, don't attempt to re-verify if we have already found we should
+ // verify at runtime.
+ if (Runtime::Current()->IsAotCompiler() && klass->ShouldVerifyAtRuntime()) {
+ return verifier::MethodVerifier::kSoftFailure;
}
if (klass->GetStatus() == mirror::Class::kStatusResolved) {
@@ -4917,7 +4931,15 @@ bool ClassLinker::InitializeDefaultInterfaceRecursive(Thread* self,
// First we initialize all of iface's super-interfaces recursively.
for (size_t i = 0; i < num_direct_ifaces; i++) {
ObjPtr<mirror::Class> super_iface = mirror::Class::GetDirectInterface(self, iface.Get(), i);
- DCHECK(super_iface != nullptr);
+ if (UNLIKELY(super_iface == nullptr)) {
+ const char* iface_descriptor =
+ iface->GetDexFile().StringByTypeIdx(iface->GetDirectInterfaceTypeIdx(i));
+ LOG(FATAL) << "Check failed: super_iface != nullptr "
+ << "Debug data for bug 34839984: "
+ << iface->PrettyDescriptor() << " iface #" << i << " " << iface_descriptor
+ << " space: " << DescribeSpace(iface.Get())
+ << " loaders: " << DescribeLoaders(iface.Get(), iface_descriptor);
+ }
if (!super_iface->HasBeenRecursivelyInitialized()) {
// Recursive step
handle_super_iface.Assign(super_iface);
@@ -8924,7 +8946,7 @@ std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_bo
return ret;
}
-std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
+std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForResolvedClasses(
const std::set<DexCacheResolvedClasses>& classes) {
ScopedTrace trace(__PRETTY_FUNCTION__);
std::unordered_set<std::string> ret;
@@ -8939,14 +8961,13 @@ std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
if (dex_cache != nullptr) {
const DexFile* dex_file = dex_cache->GetDexFile();
// There could be duplicates if two dex files with the same location are mapped.
- location_to_dex_file.emplace(
- ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation()), dex_file);
+ location_to_dex_file.emplace(dex_file->GetLocation(), dex_file);
}
}
}
for (const DexCacheResolvedClasses& info : classes) {
- const std::string& profile_key = info.GetDexLocation();
- auto found = location_to_dex_file.find(profile_key);
+ const std::string& location = info.GetDexLocation();
+ auto found = location_to_dex_file.find(location);
if (found != location_to_dex_file.end()) {
const DexFile* dex_file = found->second;
VLOG(profiler) << "Found opened dex file for " << dex_file->GetLocation() << " with "
@@ -8958,7 +8979,7 @@ std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
ret.insert(descriptor);
}
} else {
- VLOG(class_linker) << "Failed to find opened dex file for profile key " << profile_key;
+ VLOG(class_linker) << "Failed to find opened dex file for location " << location;
}
}
return ret;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 33eed3c8e3..a5d26c7a88 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -617,7 +617,8 @@ class ClassLinker {
std::set<DexCacheResolvedClasses> GetResolvedClasses(bool ignore_boot_classes)
REQUIRES(!Locks::dex_lock_);
- std::unordered_set<std::string> GetClassDescriptorsForProfileKeys(
+ // Returns the class descriptors for loaded dex files.
+ std::unordered_set<std::string> GetClassDescriptorsForResolvedClasses(
const std::set<DexCacheResolvedClasses>& classes)
REQUIRES(!Locks::dex_lock_);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index b6a2e09719..35e9d5db29 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -179,6 +179,14 @@ std::unique_ptr<const DexFile> DexFile::Open(const std::string& location,
std::string* error_msg) {
ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
CHECK(map.get() != nullptr);
+
+ if (map->Size() < sizeof(DexFile::Header)) {
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
+ return nullptr;
+ }
+
std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
map->Size(),
location,
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 4c3990aad6..3fd20a66c2 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2323,48 +2323,26 @@ extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
}
-// Determine target of interface dispatch. This object is known non-null. First argument
-// is there for consistency but should not be used, as some architectures overwrite it
-// in the assembly trampoline.
-extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUTE_UNUSED,
+// Determine target of interface dispatch. The interface method and this object are known non-null.
+// The interface method is the method returned by the dex cache in the conflict trampoline.
+extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method,
mirror::Object* raw_this_object,
Thread* self,
ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK(interface_method != nullptr);
ObjPtr<mirror::Object> this_object(raw_this_object);
ScopedQuickEntrypointChecks sqec(self);
StackHandleScope<1> hs(self);
Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));
ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
-
- // Fetch the dex_method_idx of the target interface method from the caller.
- uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
-
- const DexFile::CodeItem* code_item = caller_method->GetCodeItem();
- CHECK_LT(dex_pc, code_item->insns_size_in_code_units_);
- const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]);
- Instruction::Code instr_code = instr->Opcode();
- CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
- instr_code == Instruction::INVOKE_INTERFACE_RANGE)
- << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr);
- uint32_t dex_method_idx;
- if (instr_code == Instruction::INVOKE_INTERFACE) {
- dex_method_idx = instr->VRegB_35c();
- } else {
- CHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
- dex_method_idx = instr->VRegB_3rc();
- }
-
- ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod(
- dex_method_idx, kRuntimePointerSize);
- DCHECK(interface_method != nullptr) << dex_method_idx << " " << caller_method->PrettyMethod();
ArtMethod* method = nullptr;
ImTable* imt = cls->GetImt(kRuntimePointerSize);
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
- // If the dex cache already resolved the interface method, look whether we have
- // a match in the ImtConflictTable.
+ // If the interface method is already resolved, look whether we have a match in the
+ // ImtConflictTable.
ArtMethod* conflict_method = imt->Get(ImTable::GetImtIndex(interface_method),
kRuntimePointerSize);
if (LIKELY(conflict_method->IsRuntimeMethod())) {
@@ -2389,9 +2367,26 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
return GetTwoWordFailureValue(); // Failure.
}
} else {
- // The dex cache did not resolve the method, look it up in the dex file
- // of the caller,
+ // The interface method is unresolved, so look it up in the dex file of the caller.
DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod());
+
+ // Fetch the dex_method_idx of the target interface method from the caller.
+ uint32_t dex_method_idx;
+ uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
+ const DexFile::CodeItem* code_item = caller_method->GetCodeItem();
+ DCHECK_LT(dex_pc, code_item->insns_size_in_code_units_);
+ const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]);
+ Instruction::Code instr_code = instr->Opcode();
+ DCHECK(instr_code == Instruction::INVOKE_INTERFACE ||
+ instr_code == Instruction::INVOKE_INTERFACE_RANGE)
+ << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr);
+ if (instr_code == Instruction::INVOKE_INTERFACE) {
+ dex_method_idx = instr->VRegB_35c();
+ } else {
+ DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
+ dex_method_idx = instr->VRegB_3rc();
+ }
+
const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()
->GetDexFile();
uint32_t shorty_len;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8f9c187e1d..aea9708ddc 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1644,10 +1644,10 @@ void ConcurrentCopying::ReclaimPhase() {
// Record freed objects.
TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
// Don't include thread-locals that are in the to-space.
- uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
- uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
- uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
- uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
+ const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
+ const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
+ const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
+ const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
@@ -1658,8 +1658,18 @@ void ConcurrentCopying::ReclaimPhase() {
}
CHECK_LE(to_objects, from_objects);
CHECK_LE(to_bytes, from_bytes);
- int64_t freed_bytes = from_bytes - to_bytes;
- int64_t freed_objects = from_objects - to_objects;
+ // cleared_bytes and cleared_objects may be greater than the from space equivalents since
+ // ClearFromSpace may clear empty unevac regions.
+ uint64_t cleared_bytes;
+ uint64_t cleared_objects;
+ {
+ TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
+ region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
+ CHECK_GE(cleared_bytes, from_bytes);
+ CHECK_GE(cleared_objects, from_objects);
+ }
+ int64_t freed_bytes = cleared_bytes - to_bytes;
+ int64_t freed_objects = cleared_objects - to_objects;
if (kVerboseMode) {
LOG(INFO) << "RecordFree:"
<< " from_bytes=" << from_bytes << " from_objects=" << from_objects
@@ -1678,11 +1688,6 @@ void ConcurrentCopying::ReclaimPhase() {
}
{
- TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
- region_space_->ClearFromSpace();
- }
-
- {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Sweep(false);
SwapBitmaps();
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index eef4fba20d..f0e1029f85 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -59,6 +59,8 @@ enum CollectorType {
kCollectorTypeHprof,
// Fake collector for installing/removing a system-weak holder.
kCollectorTypeAddRemoveSystemWeakHolder,
+ // Fake collector type for GetObjectsAllocated
+ kCollectorTypeGetObjectsAllocated,
};
std::ostream& operator<<(std::ostream& os, const CollectorType& collector_type);
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 9e34346686..c1c1cad861 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -40,6 +40,7 @@ const char* PrettyCause(GcCause cause) {
case kGcCauseJitCodeCache: return "JitCodeCache";
case kGcCauseAddRemoveSystemWeakHolder: return "SystemWeakHolder";
case kGcCauseHprof: return "Hprof";
+ case kGcCauseGetObjectsAllocated: return "ObjectsAllocated";
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 9b285b12a4..eb27547768 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -53,8 +53,10 @@ enum GcCause {
kGcCauseJitCodeCache,
// Not a real GC cause, used to add or remove system-weak holders.
kGcCauseAddRemoveSystemWeakHolder,
- // Not a real GC cause, used to hprof running in the middle of GC.
+ // Not a real GC cause, used to prevent hprof running in the middle of GC.
kGcCauseHprof,
+ // Not a real GC cause, used to prevent GetObjectsAllocated running in the middle of GC.
+ kGcCauseGetObjectsAllocated,
};
const char* PrettyCause(GcCause cause);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b857ea3eef..a7697484e2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1835,6 +1835,11 @@ void Heap::SetTargetHeapUtilization(float target) {
size_t Heap::GetObjectsAllocated() const {
Thread* const self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
+ // Prevent GC running during GetObjectsALlocated since we may get a checkpoint request that tells
+ // us to suspend while we are doing SuspendAll. b/35232978
+ gc::ScopedGCCriticalSection gcs(Thread::Current(),
+ gc::kGcCauseGetObjectsAllocated,
+ gc::kCollectorTypeGetObjectsAllocated);
// Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
ScopedSuspendAll ssa(__FUNCTION__);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/scoped_gc_critical_section.cc b/runtime/gc/scoped_gc_critical_section.cc
index b5eb9795de..f937d2c778 100644
--- a/runtime/gc/scoped_gc_critical_section.cc
+++ b/runtime/gc/scoped_gc_critical_section.cc
@@ -29,10 +29,14 @@ ScopedGCCriticalSection::ScopedGCCriticalSection(Thread* self,
CollectorType collector_type)
: self_(self) {
Runtime::Current()->GetHeap()->StartGC(self, cause, collector_type);
- old_cause_ = self->StartAssertNoThreadSuspension("ScopedGCCriticalSection");
+ if (self != nullptr) {
+ old_cause_ = self->StartAssertNoThreadSuspension("ScopedGCCriticalSection");
+ }
}
ScopedGCCriticalSection::~ScopedGCCriticalSection() {
- self_->EndAssertNoThreadSuspension(old_cause_);
+ if (self_ != nullptr) {
+ self_->EndAssertNoThreadSuspension(old_cause_);
+ }
Runtime::Current()->GetHeap()->FinishGC(self_, collector::kGcTypeNone);
}
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index fffcee64ad..5809027235 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -233,14 +233,12 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
continue;
}
if (r->IsLarge()) {
- if (r->LiveBytes() > 0) {
- // Avoid visiting dead large objects since they may contain dangling pointers to the
- // from-space.
- DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
- DCHECK(obj->GetClass() != nullptr);
- callback(obj, arg);
- }
+ // Avoid visiting dead large objects since they may contain dangling pointers to the
+ // from-space.
+ DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
+ DCHECK(obj->GetClass() != nullptr);
+ callback(obj, arg);
} else if (r->IsLargeTail()) {
// Do nothing.
} else {
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 560abe121a..1ad48438ba 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -246,16 +246,45 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
evac_region_ = &full_region_;
}
-void RegionSpace::ClearFromSpace() {
+void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) {
+ DCHECK(cleared_bytes != nullptr);
+ DCHECK(cleared_objects != nullptr);
+ *cleared_bytes = 0;
+ *cleared_objects = 0;
MutexLock mu(Thread::Current(), region_lock_);
VerifyNonFreeRegionLimit();
size_t new_non_free_region_index_limit = 0;
for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
Region* r = &regions_[i];
if (r->IsInFromSpace()) {
- r->Clear();
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
--num_non_free_regions_;
+ r->Clear();
} else if (r->IsInUnevacFromSpace()) {
+ if (r->LiveBytes() == 0) {
+ // Special case for 0 live bytes, this means all of the objects in the region are dead and
+ // we can clear it. This is important for large objects since we must not visit dead ones in
+ // RegionSpace::Walk because they may contain dangling references to invalid objects.
+ // It is also better to clear these regions now instead of at the end of the next GC to
+ // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
+ // live percent evacuation logic.
+ size_t free_regions = 1;
+ // Also release RAM for large tails.
+ while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
+ DCHECK(r->IsLarge());
+ regions_[i + free_regions].Clear();
+ ++free_regions;
+ }
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
+ num_non_free_regions_ -= free_regions;
+ r->Clear();
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(r->Begin()),
+ reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ continue;
+ }
size_t full_count = 0;
while (r->IsInUnevacFromSpace()) {
Region* const cur = &regions_[i + full_count];
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 95f293bc12..253792993b 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -215,7 +215,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t FromSpaceSize() REQUIRES(!region_lock_);
size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
size_t ToSpaceSize() REQUIRES(!region_lock_);
- void ClearFromSpace() REQUIRES(!region_lock_);
+ void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Region* reg = RefToRegionUnlocked(ref);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 6b22af9829..2589ad046b 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
#include "interpreter.h"
+#include "interpreter_intrinsics.h"
#include <math.h>
@@ -104,13 +105,58 @@ void AbortTransactionV(Thread* self, const char* fmt, va_list args)
void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
-// Invokes the given method. This is part of the invocation support and is used by DoInvoke and
-// DoInvokeVirtualQuick functions.
+// Invokes the given method. This is part of the invocation support and is used by DoInvoke,
+// DoFastInvoke and DoInvokeVirtualQuick functions.
// Returns true on success, otherwise throws an exception and returns false.
template<bool is_range, bool do_assignability_check>
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result);
+// Handles streamlined non-range invoke static, direct and virtual instructions originating in
+// mterp. Access checks and instrumentation other than jit profiling are not supported, but does
+// support interpreter intrinsics if applicable.
+// Returns true on success, otherwise throws an exception and returns false.
+template<InvokeType type>
+static inline bool DoFastInvoke(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) {
+ const uint32_t method_idx = inst->VRegB_35c();
+ const uint32_t vregC = inst->VRegC_35c();
+ ObjPtr<mirror::Object> receiver = (type == kStatic)
+ ? nullptr
+ : shadow_frame.GetVRegReference(vregC);
+ ArtMethod* sf_method = shadow_frame.GetMethod();
+ ArtMethod* const called_method = FindMethodFromCode<type, false>(
+ method_idx, &receiver, sf_method, self);
+ // The shadow frame should already be pushed, so we don't need to update it.
+ if (UNLIKELY(called_method == nullptr)) {
+ CHECK(self->IsExceptionPending());
+ result->SetJ(0);
+ return false;
+ } else if (UNLIKELY(!called_method->IsInvokable())) {
+ called_method->ThrowInvocationTimeError();
+ result->SetJ(0);
+ return false;
+ } else {
+ if (called_method->IsIntrinsic()) {
+ if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
+ shadow_frame.GetResultRegister())) {
+ return !self->IsExceptionPending();
+ }
+ }
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ if (type == kVirtual) {
+ jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+ }
+ jit->AddSamples(self, sf_method, 1, /*with_backedges*/false);
+ }
+ return DoCall<false, false>(called_method, self, shadow_frame, inst, inst_data, result);
+ }
+}
+
// Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
// Returns true on success, otherwise throws an exception and returns false.
template<InvokeType type, bool is_range, bool do_access_check>
@@ -495,8 +541,9 @@ void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
// Explicitly instantiate all DoInvoke functions.
#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
- template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
+ bool DoInvoke<_type, _is_range, _do_check>(Thread* self, \
+ ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
@@ -514,6 +561,19 @@ EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kInterface) // invoke-interface/range.
#undef EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL
#undef EXPLICIT_DO_INVOKE_TEMPLATE_DECL
+// Explicitly instantiate all DoFastInvoke functions.
+#define EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(_type) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
+ bool DoFastInvoke<_type>(Thread* self, \
+ ShadowFrame& shadow_frame, \
+ const Instruction* inst, uint16_t inst_data, \
+ JValue* result)
+
+EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kStatic); // invoke-static
+EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kDirect); // invoke-direct
+EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kVirtual); // invoke-virtual
+#undef EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL
+
// Explicitly instantiate all DoInvokeVirtualQuick functions.
#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
new file mode 100644
index 0000000000..5e901cdfd8
--- /dev/null
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter/interpreter_common.h"
+#include "interpreter/interpreter_intrinsics.h"
+
+namespace art {
+namespace interpreter {
+
+#define BINARY_SIMPLE_INTRINSIC(name, op, get, set, offset) \
+static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result_register) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
+ inst->GetVarArgs(arg, inst_data); \
+ result_register->set(op(shadow_frame->get(arg[0]), shadow_frame->get(arg[offset]))); \
+ return true; \
+}
+
+#define UNARY_SIMPLE_INTRINSIC(name, op, get, set) \
+static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result_register) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
+ inst->GetVarArgs(arg, inst_data); \
+ result_register->set(op(shadow_frame->get(arg[0]))); \
+ return true; \
+}
+
+// java.lang.Math.min(II)I
+BINARY_SIMPLE_INTRINSIC(MterpMathMinIntInt, std::min, GetVReg, SetI, 1);
+// java.lang.Math.min(JJ)J
+BINARY_SIMPLE_INTRINSIC(MterpMathMinLongLong, std::min, GetVRegLong, SetJ, 2);
+// java.lang.Math.max(II)I
+BINARY_SIMPLE_INTRINSIC(MterpMathMaxIntInt, std::max, GetVReg, SetI, 1);
+// java.lang.Math.max(JJ)J
+BINARY_SIMPLE_INTRINSIC(MterpMathMaxLongLong, std::max, GetVRegLong, SetJ, 2);
+// java.lang.Math.abs(I)I
+UNARY_SIMPLE_INTRINSIC(MterpMathAbsInt, std::abs, GetVReg, SetI);
+// java.lang.Math.abs(J)J
+UNARY_SIMPLE_INTRINSIC(MterpMathAbsLong, std::abs, GetVRegLong, SetJ);
+// java.lang.Math.abs(F)F
+UNARY_SIMPLE_INTRINSIC(MterpMathAbsFloat, 0x7fffffff&, GetVReg, SetI);
+// java.lang.Math.abs(D)D
+UNARY_SIMPLE_INTRINSIC(MterpMathAbsDouble, INT64_C(0x7fffffffffffffff)&, GetVRegLong, SetJ);
+// java.lang.Math.sqrt(D)D
+UNARY_SIMPLE_INTRINSIC(MterpMathSqrt, std::sqrt, GetVRegDouble, SetD);
+// java.lang.Math.ceil(D)D
+UNARY_SIMPLE_INTRINSIC(MterpMathCeil, std::ceil, GetVRegDouble, SetD);
+// java.lang.Math.floor(D)D
+UNARY_SIMPLE_INTRINSIC(MterpMathFloor, std::floor, GetVRegDouble, SetD);
+// java.lang.Math.sin(D)D
+UNARY_SIMPLE_INTRINSIC(MterpMathSin, std::sin, GetVRegDouble, SetD);
+// java.lang.Math.cos(D)D
+UNARY_SIMPLE_INTRINSIC(MterpMathCos, std::cos, GetVRegDouble, SetD);
+// java.lang.Math.tan(D)D
+UNARY_SIMPLE_INTRINSIC(MterpMathTan, std::tan, GetVRegDouble, SetD);
+// java.lang.Math.asin(D)D
+UNARY_SIMPLE_INTRINSIC(MterpMathAsin, std::asin, GetVRegDouble, SetD);
+// java.lang.Math.acos(D)D
+UNARY_SIMPLE_INTRINSIC(MterpMathAcos, std::acos, GetVRegDouble, SetD);
+// java.lang.Math.atan(D)D
+UNARY_SIMPLE_INTRINSIC(MterpMathAtan, std::atan, GetVRegDouble, SetD);
+
+#define INTRINSIC_CASE(name) \
+ case Intrinsics::k##name: \
+ res = Mterp##name(shadow_frame, inst, inst_data, result_register); \
+ break;
+
+bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
+ ArtMethod* const called_method,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result_register)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Intrinsics intrinsic = static_cast<Intrinsics>(called_method->GetIntrinsic());
+ bool res = false; // Assume failure
+ switch (intrinsic) {
+ INTRINSIC_CASE(MathMinIntInt)
+ INTRINSIC_CASE(MathMinLongLong)
+ INTRINSIC_CASE(MathMaxIntInt)
+ INTRINSIC_CASE(MathMaxLongLong)
+ INTRINSIC_CASE(MathAbsInt)
+ INTRINSIC_CASE(MathAbsLong)
+ INTRINSIC_CASE(MathAbsFloat)
+ INTRINSIC_CASE(MathAbsDouble)
+ INTRINSIC_CASE(MathSqrt)
+ INTRINSIC_CASE(MathCeil)
+ INTRINSIC_CASE(MathFloor)
+ INTRINSIC_CASE(MathSin)
+ INTRINSIC_CASE(MathCos)
+ INTRINSIC_CASE(MathTan)
+ INTRINSIC_CASE(MathAsin)
+ INTRINSIC_CASE(MathAcos)
+ INTRINSIC_CASE(MathAtan)
+ default:
+ res = false; // Punt
+ break;
+ }
+ return res;
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/interpreter_intrinsics.h b/runtime/interpreter/interpreter_intrinsics.h
new file mode 100644
index 0000000000..ae45679b8a
--- /dev/null
+++ b/runtime/interpreter/interpreter_intrinsics.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
+
+#include "compiler/intrinsics_enum.h"
+#include "dex_instruction.h"
+
+namespace art {
+namespace interpreter {
+
+// Invokes to methods identified as intrinics are routed here. If there is
+// no interpreter implementation, return false and a normal invoke will proceed.
+bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
+ ArtMethod* const called_method,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result_register);
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 8bf094e1b8..a53040c2df 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -18,6 +18,7 @@
* Mterp entry point and support functions.
*/
#include "interpreter/interpreter_common.h"
+#include "interpreter/interpreter_intrinsics.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "mterp.h"
#include "debugger.h"
@@ -157,7 +158,7 @@ extern "C" size_t MterpInvokeVirtual(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kVirtual, false, false>(
+ return DoFastInvoke<kVirtual>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -190,7 +191,7 @@ extern "C" size_t MterpInvokeDirect(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kDirect, false, false>(
+ return DoFastInvoke<kDirect>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -201,7 +202,7 @@ extern "C" size_t MterpInvokeStatic(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kStatic, false, false>(
+ return DoFastInvoke<kStatic>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -267,6 +268,18 @@ extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
+ const uint32_t vregC = inst->VRegC_35c();
+ const uint32_t vtable_idx = inst->VRegB_35c();
+ ObjPtr<mirror::Object> const receiver = shadow_frame->GetVRegReference(vregC);
+ if (receiver != nullptr) {
+ ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
+ vtable_idx, kRuntimePointerSize);
+ if ((called_method != nullptr) && called_method->IsIntrinsic()) {
+ if (MterpHandleIntrinsic(shadow_frame, called_method, inst, inst_data, result_register)) {
+ return !self->IsExceptionPending();
+ }
+ }
+ }
return DoInvokeVirtualQuick<false>(
self, *shadow_frame, inst, inst_data, result_register);
}
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index af29468062..86af6d44db 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -203,8 +203,7 @@ struct JdwpState {
*/
void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags,
const JValue* returnValue)
- REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* A field of interest has been accessed or modified. This is used for field access and field
@@ -215,8 +214,7 @@ struct JdwpState {
*/
void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr,
const JValue* fieldValue, bool is_modification)
- REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* An exception has been thrown.
@@ -225,22 +223,19 @@ struct JdwpState {
*/
void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
const EventLocation* pCatchLoc, mirror::Object* thisPtr)
- REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* A thread has started or stopped.
*/
void PostThreadChange(Thread* thread, bool start)
- REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Class has been prepared.
*/
void PostClassPrepare(mirror::Class* klass)
- REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* The VM is about to stop.
@@ -264,7 +259,7 @@ struct JdwpState {
void SendRequest(ExpandBuf* pReq);
void ResetState()
- REQUIRES(!Locks::jdwp_event_list_lock_)
+ REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
/* atomic ops to get next serial number */
@@ -273,7 +268,7 @@ struct JdwpState {
void Run()
REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_, !thread_start_lock_,
- !attach_lock_, !Locks::jdwp_event_list_lock_);
+ !attach_lock_, !event_list_lock_);
/*
* Register an event by adding it to the event list.
@@ -282,25 +277,25 @@ struct JdwpState {
* may discard its pointer after calling this.
*/
JdwpError RegisterEvent(JdwpEvent* pEvent)
- REQUIRES(!Locks::jdwp_event_list_lock_)
+ REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Unregister an event, given the requestId.
*/
void UnregisterEventById(uint32_t requestId)
- REQUIRES(!Locks::jdwp_event_list_lock_)
+ REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass)
- REQUIRES(!Locks::jdwp_event_list_lock_)
+ REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Unregister all events.
*/
void UnregisterAll()
- REQUIRES(!Locks::jdwp_event_list_lock_)
+ REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -315,16 +310,16 @@ struct JdwpState {
ObjectId threadId)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
void CleanupMatchList(const std::vector<JdwpEvent*>& match_list)
- REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void EventFinish(ExpandBuf* pReq);
bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- REQUIRES(!Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void UnregisterEvent(JdwpEvent* pEvent)
- REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov);
/*
@@ -392,8 +387,9 @@ struct JdwpState {
AtomicInteger event_serial_;
// Linked list of events requested by the debugger (breakpoints, class prep, etc).
- JdwpEvent* event_list_ GUARDED_BY(Locks::jdwp_event_list_lock_);
- size_t event_list_size_ GUARDED_BY(Locks::jdwp_event_list_lock_); // Number of elements in event_list_.
+ Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_BEFORE(Locks::breakpoint_lock_);
+ JdwpEvent* event_list_ GUARDED_BY(event_list_lock_);
+ size_t event_list_size_ GUARDED_BY(event_list_lock_); // Number of elements in event_list_.
// Used to synchronize JDWP command handler thread and event threads so only one
// thread does JDWP stuff at a time. This prevent from interleaving command handling
@@ -414,7 +410,7 @@ struct JdwpState {
// When the runtime shuts down, it needs to stop JDWP command handler thread by closing the
// JDWP connection. However, if the JDWP thread is processing a command, it needs to wait
// for the command to finish so we can send its reply before closing the connection.
- Mutex shutdown_lock_ ACQUIRED_AFTER(Locks::jdwp_event_list_lock_);
+ Mutex shutdown_lock_ ACQUIRED_AFTER(event_list_lock_);
ConditionVariable shutdown_cond_ GUARDED_BY(shutdown_lock_);
bool processing_request_ GUARDED_BY(shutdown_lock_);
};
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 36d733ea08..96249f9b58 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -237,7 +237,7 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
/*
* Add to list.
*/
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
if (event_list_ != nullptr) {
pEvent->next = event_list_;
event_list_->prev = pEvent;
@@ -256,7 +256,7 @@ void JdwpState::UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass) {
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::Class> h_klass(hs.NewHandle(klass));
std::vector<JdwpEvent*> to_remove;
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
for (JdwpEvent* cur_event = event_list_; cur_event != nullptr; cur_event = cur_event->next) {
// Fill in the to_remove list
bool found_event = false;
@@ -356,7 +356,7 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
void JdwpState::UnregisterEventById(uint32_t requestId) {
bool found = false;
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
for (JdwpEvent* pEvent = event_list_; pEvent != nullptr; pEvent = pEvent->next) {
if (pEvent->requestId == requestId) {
@@ -383,7 +383,7 @@ void JdwpState::UnregisterEventById(uint32_t requestId) {
* Remove all entries from the event list.
*/
void JdwpState::UnregisterAll() {
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
JdwpEvent* pEvent = event_list_;
while (pEvent != nullptr) {
@@ -593,7 +593,7 @@ void JdwpState::FindMatchingEventsLocked(JdwpEventKind event_kind, const ModBask
*/
bool JdwpState::FindMatchingEvents(JdwpEventKind event_kind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list) {
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
match_list->reserve(event_list_size_);
FindMatchingEventsLocked(event_kind, basket, match_list);
return !match_list->empty();
@@ -908,7 +908,7 @@ void JdwpState::PostLocationEvent(const EventLocation* pLoc, mirror::Object* thi
std::vector<JdwpEvent*> match_list;
{
// We use the locked version because we have multiple possible match events.
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
match_list.reserve(event_list_size_);
if ((eventFlags & Dbg::kBreakpoint) != 0) {
FindMatchingEventsLocked(EK_BREAKPOINT, basket, &match_list);
@@ -955,7 +955,7 @@ void JdwpState::PostLocationEvent(const EventLocation* pLoc, mirror::Object* thi
}
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1041,7 +1041,7 @@ void JdwpState::PostFieldEvent(const EventLocation* pLoc, ArtField* field,
}
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1103,7 +1103,7 @@ void JdwpState::PostThreadChange(Thread* thread, bool start) {
}
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1213,7 +1213,7 @@ void JdwpState::PostException(const EventLocation* pThrowLoc, mirror::Throwable*
}
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1295,7 +1295,7 @@ void JdwpState::PostClassPrepare(mirror::Class* klass) {
}
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CleanupMatchList(match_list);
}
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 64ed724afc..e6c60685cc 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -227,6 +227,7 @@ JdwpState::JdwpState(const JdwpOptions* options)
last_activity_time_ms_(0),
request_serial_(0x10000000),
event_serial_(0x20000000),
+ event_list_lock_("JDWP event list lock", kJdwpEventListLock),
event_list_(nullptr),
event_list_size_(0),
jdwp_token_lock_("JDWP token lock"),
@@ -238,6 +239,7 @@ JdwpState::JdwpState(const JdwpOptions* options)
shutdown_lock_("JDWP shutdown lock", kJdwpShutdownLock),
shutdown_cond_("JDWP shutdown condition variable", shutdown_lock_),
processing_request_(false) {
+ Locks::AddToExpectedMutexesOnWeakRefAccess(&event_list_lock_);
}
/*
@@ -330,7 +332,7 @@ void JdwpState::ResetState() {
UnregisterAll();
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CHECK(event_list_ == nullptr);
}
@@ -380,6 +382,8 @@ JdwpState::~JdwpState() {
CHECK(netState == nullptr);
ResetState();
+
+ Locks::RemoveFromExpectedMutexesOnWeakRefAccess(&event_list_lock_);
}
/*
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index bd7251baeb..510f5f00a6 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -35,6 +35,11 @@ std::ostream& operator<<(std::ostream& os, const ObjectRegistryEntry& rhs) {
ObjectRegistry::ObjectRegistry()
: lock_("ObjectRegistry lock", kJdwpObjectRegistryLock), next_id_(1) {
+ Locks::AddToExpectedMutexesOnWeakRefAccess(&lock_);
+}
+
+ObjectRegistry::~ObjectRegistry() {
+ Locks::RemoveFromExpectedMutexesOnWeakRefAccess(&lock_);
}
JDWP::RefTypeId ObjectRegistry::AddRefType(ObjPtr<mirror::Class> c) {
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 9cacc66c32..8754631e1b 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -62,6 +62,7 @@ std::ostream& operator<<(std::ostream& os, const ObjectRegistryEntry& rhs);
class ObjectRegistry {
public:
ObjectRegistry();
+ ~ObjectRegistry();
JDWP::ObjectId Add(ObjPtr<mirror::Object> o)
REQUIRES_SHARED(Locks::mutator_lock_)
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 1ec4749146..3631a9d467 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -325,16 +325,12 @@ void Jit::DeleteThreadPool() {
}
void Jit::StartProfileSaver(const std::string& filename,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir) {
+ const std::vector<std::string>& code_paths) {
if (profile_saver_options_.IsEnabled()) {
ProfileSaver::Start(profile_saver_options_,
filename,
code_cache_.get(),
- code_paths,
- foreign_dex_profile_path,
- app_dir);
+ code_paths);
}
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index d566799340..5da1ea1196 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -136,14 +136,8 @@ class Jit {
// Starts the profile saver if the config options allow profile recording.
// The profile will be stored in the specified `filename` and will contain
// information collected from the given `code_paths` (a set of dex locations).
- // The `foreign_dex_profile_path` is the path where the saver will put the
- // profile markers for loaded dex files which are not owned by the application.
- // The `app_dir` is the application directory and is used to decide which
- // dex files belong to the application.
void StartProfileSaver(const std::string& filename,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir);
+ const std::vector<std::string>& code_paths);
void StopProfileSaver();
void DumpForSigQuit(std::ostream& os) REQUIRES(!lock_);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 62acedfb1b..8b2a2b4d13 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1262,6 +1262,7 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
std::vector<ProfileMethodInfo::ProfileClassReference> profile_classes;
const InlineCache& cache = info->cache_[i];
+ bool is_missing_types = false;
for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) {
mirror::Class* cls = cache.classes_[k].Read();
if (cls == nullptr) {
@@ -1284,17 +1285,20 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
}
if (!type_index.IsValid()) {
// Could be a proxy class or an array for which we couldn't find the type index.
+ is_missing_types = true;
continue;
}
if (ContainsElement(dex_base_locations, class_dex_file->GetBaseLocation())) {
// Only consider classes from the same apk (including multidex).
profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
class_dex_file, type_index);
+ } else {
+ is_missing_types = true;
}
}
if (!profile_classes.empty()) {
inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/
- cache.dex_pc_, profile_classes);
+ cache.dex_pc_, is_missing_types, profile_classes);
}
}
methods.emplace_back(/*ProfileMethodInfo*/
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 627cc93f38..b23a86313f 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -37,7 +37,8 @@
namespace art {
const uint8_t ProfileCompilationInfo::kProfileMagic[] = { 'p', 'r', 'o', '\0' };
-const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '3', '\0' }; // inline caches
+// Last profile version: fix the order of dex files in the profile.
+const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '4', '\0' };
static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
@@ -46,16 +47,19 @@ static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
// using the same test profile.
static constexpr bool kDebugIgnoreChecksum = false;
-static constexpr uint8_t kMegamorphicEncoding = 7;
+static constexpr uint8_t kIsMissingTypesEncoding = 6;
+static constexpr uint8_t kIsMegamorphicEncoding = 7;
static_assert(sizeof(InlineCache::kIndividualCacheSize) == sizeof(uint8_t),
"InlineCache::kIndividualCacheSize does not have the expect type size");
-static_assert(InlineCache::kIndividualCacheSize < kMegamorphicEncoding,
+static_assert(InlineCache::kIndividualCacheSize < kIsMegamorphicEncoding,
+ "InlineCache::kIndividualCacheSize is larger than expected");
+static_assert(InlineCache::kIndividualCacheSize < kIsMissingTypesEncoding,
"InlineCache::kIndividualCacheSize is larger than expected");
void ProfileCompilationInfo::DexPcData::AddClass(uint16_t dex_profile_idx,
const dex::TypeIndex& type_idx) {
- if (is_megamorphic) {
+ if (is_megamorphic || is_missing_types) {
return;
}
classes.emplace(dex_profile_idx, type_idx);
@@ -206,7 +210,8 @@ static constexpr size_t kLineHeaderSize =
* Classes are grouped per their dex files and the line
* `dex_profile_index,class_id1,class_id2...,dex_profile_index2,...` encodes the
* mapping from `dex_profile_index` to the set of classes `class_id1,class_id2...`
- * M stands for megamorphic and it's encoded as the byte kMegamorphicEncoding.
+ * M stands for megamorphic or missing types and it's encoded as either
+ * the byte kIsMegamorphicEncoding or kIsMissingTypesEncoding.
* When present, there will be no class ids following.
**/
bool ProfileCompilationInfo::Save(int fd) {
@@ -222,15 +227,23 @@ bool ProfileCompilationInfo::Save(int fd) {
DCHECK_LE(info_.size(), std::numeric_limits<uint8_t>::max());
AddUintToBuffer(&buffer, static_cast<uint8_t>(info_.size()));
+ // Make sure we write the dex files in order of their profile index. This
+ // avoids writing the index in the output file and simplifies the parsing logic.
+ std::vector<const std::string*> ordered_info_location(info_.size());
+ std::vector<const DexFileData*> ordered_info_data(info_.size());
for (const auto& it : info_) {
+ ordered_info_location[it.second.profile_index] = &(it.first);
+ ordered_info_data[it.second.profile_index] = &(it.second);
+ }
+ for (size_t i = 0; i < info_.size(); i++) {
if (buffer.size() > kMaxSizeToKeepBeforeWriting) {
if (!WriteBuffer(fd, buffer.data(), buffer.size())) {
return false;
}
buffer.clear();
}
- const std::string& dex_location = it.first;
- const DexFileData& dex_data = it.second;
+ const std::string& dex_location = *ordered_info_location[i];
+ const DexFileData& dex_data = *ordered_info_data[i];
// Note that we allow dex files without any methods or classes, so that
// inline caches can refer valid dex files.
@@ -289,10 +302,19 @@ void ProfileCompilationInfo::AddInlineCacheToBuffer(std::vector<uint8_t>* buffer
// Add the dex pc.
AddUintToBuffer(buffer, dex_pc);
- if (dex_pc_data.is_megamorphic) {
- // Add the megamorphic encoding if needed and continue.
- // If megamorphic, we don't add the rest of the classes.
- AddUintToBuffer(buffer, kMegamorphicEncoding);
+ // Add the megamorphic/missing_types encoding if needed and continue.
+ // In either cases we don't add any classes to the profiles and so there's
+ // no point to continue.
+ // TODO(calin): in case we miss types there is still value to add the
+ // rest of the classes. They can be added without bumping the profile version.
+ if (dex_pc_data.is_missing_types) {
+ DCHECK(!dex_pc_data.is_megamorphic); // at this point the megamorphic flag should not be set.
+ DCHECK_EQ(classes.size(), 0u);
+ AddUintToBuffer(buffer, kIsMissingTypesEncoding);
+ continue;
+ } else if (dex_pc_data.is_megamorphic) {
+ DCHECK_EQ(classes.size(), 0u);
+ AddUintToBuffer(buffer, kIsMegamorphicEncoding);
continue;
}
@@ -403,11 +425,21 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
for (const auto& pmi_inline_cache_it : pmi.inline_caches) {
uint16_t pmi_ic_dex_pc = pmi_inline_cache_it.first;
const DexPcData& pmi_ic_dex_pc_data = pmi_inline_cache_it.second;
- auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(pmi_ic_dex_pc);
+ DexPcData& dex_pc_data = inline_cache_it->second.FindOrAdd(pmi_ic_dex_pc)->second;
+ if (dex_pc_data.is_missing_types || dex_pc_data.is_megamorphic) {
+ // We are already megamorphic or we are missing types; no point in going forward.
+ continue;
+ }
+
+ if (pmi_ic_dex_pc_data.is_missing_types) {
+ dex_pc_data.SetIsMissingTypes();
+ continue;
+ }
if (pmi_ic_dex_pc_data.is_megamorphic) {
- dex_pc_data_it->second.SetMegamorphic();
+ dex_pc_data.SetIsMegamorphic();
continue;
}
+
for (const ClassReference& class_ref : pmi_ic_dex_pc_data.classes) {
const DexReference& dex_ref = pmi.dex_references[class_ref.dex_profile_index];
DexFileData* class_dex_data = GetOrAddDexFileData(
@@ -416,7 +448,7 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
if (class_dex_data == nullptr) { // checksum mismatch
return false;
}
- dex_pc_data_it->second.AddClass(class_dex_data->profile_index, class_ref.type_index);
+ dex_pc_data.AddClass(class_dex_data->profile_index, class_ref.type_index);
}
}
return true;
@@ -432,6 +464,11 @@ bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
auto inline_cache_it = data->method_map.FindOrAdd(pmi.dex_method_index);
for (const ProfileMethodInfo::ProfileInlineCache& cache : pmi.inline_caches) {
+ if (cache.is_missing_types) {
+ auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(cache.dex_pc);
+ dex_pc_data_it->second.SetIsMissingTypes();
+ continue;
+ }
for (const ProfileMethodInfo::ProfileClassReference& class_ref : cache.classes) {
DexFileData* class_dex_data = GetOrAddDexFileData(
GetProfileDexFileKey(class_ref.dex_file->GetLocation()),
@@ -440,6 +477,10 @@ bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
return false;
}
auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(cache.dex_pc);
+ if (dex_pc_data_it->second.is_missing_types) {
+ // Don't bother adding classes if we are missing types.
+ break;
+ }
dex_pc_data_it->second.AddClass(class_dex_data->profile_index, class_ref.type_index);
}
}
@@ -478,8 +519,12 @@ bool ProfileCompilationInfo::ReadInlineCache(SafeBuffer& buffer,
READ_UINT(uint16_t, buffer, dex_pc, error);
READ_UINT(uint8_t, buffer, dex_to_classes_map_size, error);
auto dex_pc_data_it = inline_cache->FindOrAdd(dex_pc);
- if (dex_to_classes_map_size == kMegamorphicEncoding) {
- dex_pc_data_it->second.SetMegamorphic();
+ if (dex_to_classes_map_size == kIsMissingTypesEncoding) {
+ dex_pc_data_it->second.SetIsMissingTypes();
+ continue;
+ }
+ if (dex_to_classes_map_size == kIsMegamorphicEncoding) {
+ dex_pc_data_it->second.SetIsMegamorphic();
continue;
}
for (; dex_to_classes_map_size > 0; dex_to_classes_map_size--) {
@@ -797,10 +842,13 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
SafeMap<uint8_t, uint8_t> dex_profile_index_remap;
for (const auto& other_it : other.info_) {
const std::string& other_dex_location = other_it.first;
+ uint32_t other_checksum = other_it.second.checksum;
const DexFileData& other_dex_data = other_it.second;
- auto info_it = info_.FindOrAdd(other_dex_location, DexFileData(other_dex_data.checksum, 0));
- const DexFileData& dex_data = info_it->second;
- dex_profile_index_remap.Put(other_dex_data.profile_index, dex_data.profile_index);
+ const DexFileData* dex_data = GetOrAddDexFileData(other_dex_location, other_checksum);
+ if (dex_data == nullptr) {
+ return false; // Could happen if we exceed the number of allowed dex files.
+ }
+ dex_profile_index_remap.Put(other_dex_data.profile_index, dex_data->profile_index);
}
// Merge the actual profile data.
@@ -823,8 +871,10 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
uint16_t other_dex_pc = other_ic_it.first;
const ClassSet& other_class_set = other_ic_it.second.classes;
auto class_set = method_it->second.FindOrAdd(other_dex_pc);
- if (other_ic_it.second.is_megamorphic) {
- class_set->second.SetMegamorphic();
+ if (other_ic_it.second.is_missing_types) {
+ class_set->second.SetIsMissingTypes();
+ } else if (other_ic_it.second.is_megamorphic) {
+ class_set->second.SetIsMegamorphic();
} else {
for (const auto& class_it : other_class_set) {
class_set->second.AddClass(dex_profile_index_remap.Get(
@@ -949,10 +999,17 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
os << "ProfileInfo:";
const std::string kFirstDexFileKeySubstitute = ":classes.dex";
+ // Write the entries in profile index order.
+ std::vector<const std::string*> ordered_info_location(info_.size());
+ std::vector<const DexFileData*> ordered_info_data(info_.size());
for (const auto& it : info_) {
+ ordered_info_location[it.second.profile_index] = &(it.first);
+ ordered_info_data[it.second.profile_index] = &(it.second);
+ }
+ for (size_t profile_index = 0; profile_index < info_.size(); profile_index++) {
os << "\n";
- const std::string& location = it.first;
- const DexFileData& dex_data = it.second;
+ const std::string& location = *ordered_info_location[profile_index];
+ const DexFileData& dex_data = *ordered_info_data[profile_index];
if (print_full_dex_location) {
os << location;
} else {
@@ -960,6 +1017,7 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
std::string multidex_suffix = DexFile::GetMultiDexSuffix(location);
os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix);
}
+ os << " [index=" << static_cast<uint32_t>(dex_data.profile_index) << "]";
const DexFile* dex_file = nullptr;
if (dex_files != nullptr) {
for (size_t i = 0; i < dex_files->size(); i++) {
@@ -979,8 +1037,10 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
os << "[";
for (const auto& inline_cache_it : method_it.second) {
os << "{" << std::hex << inline_cache_it.first << std::dec << ":";
- if (inline_cache_it.second.is_megamorphic) {
- os << "M";
+ if (inline_cache_it.second.is_missing_types) {
+ os << "MT";
+ } else if (inline_cache_it.second.is_megamorphic) {
+ os << "MM";
} else {
for (const ClassReference& class_ref : inline_cache_it.second.classes) {
os << "(" << static_cast<uint32_t>(class_ref.dex_profile_index)
@@ -1022,7 +1082,8 @@ void ProfileCompilationInfo::GetClassNames(const std::vector<const DexFile*>* de
const DexFile* dex_file = nullptr;
if (dex_files != nullptr) {
for (size_t i = 0; i < dex_files->size(); i++) {
- if (location == (*dex_files)[i]->GetLocation()) {
+ if (location == GetProfileDexFileKey((*dex_files)[i]->GetLocation()) &&
+ dex_data.checksum == (*dex_files)[i]->GetLocationChecksum()) {
dex_file = (*dex_files)[i];
}
}
@@ -1039,15 +1100,22 @@ bool ProfileCompilationInfo::Equals(const ProfileCompilationInfo& other) {
return info_.Equals(other.info_);
}
-std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses() const {
+std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses(
+ const std::unordered_set<std::string>& dex_files_locations) const {
+ std::unordered_map<std::string, std::string> key_to_location_map;
+ for (const std::string& location : dex_files_locations) {
+ key_to_location_map.emplace(GetProfileDexFileKey(location), location);
+ }
std::set<DexCacheResolvedClasses> ret;
for (auto&& pair : info_) {
const std::string& profile_key = pair.first;
- const DexFileData& data = pair.second;
- // TODO: Is it OK to use the same location for both base and dex location here?
- DexCacheResolvedClasses classes(profile_key, profile_key, data.checksum);
- classes.AddClasses(data.class_set.begin(), data.class_set.end());
- ret.insert(classes);
+ auto it = key_to_location_map.find(profile_key);
+ if (it != key_to_location_map.end()) {
+ const DexFileData& data = pair.second;
+ DexCacheResolvedClasses classes(it->second, it->second, data.checksum);
+ classes.AddClasses(data.class_set.begin(), data.class_set.end());
+ ret.insert(classes);
+ }
}
return ret;
}
@@ -1108,7 +1176,7 @@ bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
}
// We can't use a simple equality test because we need to match the dex files
- // of the inline caches which might have different profile indices.
+ // of the inline caches which might have different profile indexes.
for (const auto& inline_cache_it : inline_caches) {
uint16_t dex_pc = inline_cache_it.first;
const DexPcData dex_pc_data = inline_cache_it.second;
@@ -1117,7 +1185,8 @@ bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
return false;
}
const DexPcData& other_dex_pc_data = other_it->second;
- if (dex_pc_data.is_megamorphic != other_dex_pc_data.is_megamorphic) {
+ if (dex_pc_data.is_megamorphic != other_dex_pc_data.is_megamorphic ||
+ dex_pc_data.is_missing_types != other_dex_pc_data.is_missing_types) {
return false;
}
for (const ClassReference& class_ref : dex_pc_data.classes) {
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 4bfbfcd287..6ad528c805 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -36,18 +36,22 @@ namespace art {
*/
struct ProfileMethodInfo {
struct ProfileClassReference {
+ ProfileClassReference() : dex_file(nullptr) {}
ProfileClassReference(const DexFile* dex, const dex::TypeIndex& index)
: dex_file(dex), type_index(index) {}
const DexFile* dex_file;
- const dex::TypeIndex type_index;
+ dex::TypeIndex type_index;
};
struct ProfileInlineCache {
- ProfileInlineCache(uint32_t pc, const std::vector<ProfileClassReference>& profile_classes)
- : dex_pc(pc), classes(profile_classes) {}
+ ProfileInlineCache(uint32_t pc,
+ bool missing_types,
+ const std::vector<ProfileClassReference>& profile_classes)
+ : dex_pc(pc), is_missing_types(missing_types), classes(profile_classes) {}
const uint32_t dex_pc;
+ const bool is_missing_types;
const std::vector<ProfileClassReference> classes;
};
@@ -91,6 +95,11 @@ class ProfileCompilationInfo {
return dex_checksum == other.dex_checksum && dex_location == other.dex_location;
}
+ bool MatchesDex(const DexFile* dex_file) const {
+ return dex_checksum == dex_file->GetLocationChecksum() &&
+ dex_location == GetProfileDexFileKey(dex_file->GetLocation());
+ }
+
std::string dex_location;
uint32_t dex_checksum;
};
@@ -128,18 +137,30 @@ class ProfileCompilationInfo {
// Encodes the actual inline cache for a given dex pc (whether or not the receiver is
// megamorphic and its possible types).
- // If the receiver is megamorphic the set of classes will be empty.
+ // If the receiver is megamorphic or is missing types the set of classes will be empty.
struct DexPcData {
- DexPcData() : is_megamorphic(false) {}
+ DexPcData() : is_missing_types(false), is_megamorphic(false) {}
void AddClass(uint16_t dex_profile_idx, const dex::TypeIndex& type_idx);
- void SetMegamorphic() {
+ void SetIsMegamorphic() {
+ if (is_missing_types) return;
is_megamorphic = true;
classes.clear();
}
+ void SetIsMissingTypes() {
+ is_megamorphic = false;
+ is_missing_types = true;
+ classes.clear();
+ }
bool operator==(const DexPcData& other) const {
- return is_megamorphic == other.is_megamorphic && classes == other.classes;
+ return is_megamorphic == other.is_megamorphic &&
+ is_missing_types == other.is_missing_types &&
+ classes == other.classes;
}
+ // Not all runtime types can be encoded in the profile. For example if the receiver
+ // type is in a dex file which is not tracked for profiling its type cannot be
+ // encoded. When types are missing this field will be set to true.
+ bool is_missing_types;
bool is_megamorphic;
ClassSet classes;
};
@@ -218,9 +239,8 @@ class ProfileCompilationInfo {
bool Equals(const ProfileCompilationInfo& other);
// Return the class descriptors for all of the classes in the profiles' class sets.
- // Note the dex location is actually the profile key, the caller needs to call back in to the
- // profile info stuff to generate a map back to the dex location.
- std::set<DexCacheResolvedClasses> GetResolvedClasses() const;
+ std::set<DexCacheResolvedClasses> GetResolvedClasses(
+ const std::unordered_set<std::string>& dex_files_locations) const;
// Clear the resolved classes from the current object.
void ClearResolvedClasses();
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 332280a0e3..5cd8e8fef5 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -108,26 +108,31 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
for (ArtMethod* method : methods) {
std::vector<ProfileMethodInfo::ProfileInlineCache> caches;
// Monomorphic
- for (uint16_t dex_pc = 0; dex_pc < 1; dex_pc++) {
+ for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
std::vector<ProfileMethodInfo::ProfileClassReference> classes;
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(0));
- caches.emplace_back(dex_pc, classes);
+ caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
}
// Polymorphic
- for (uint16_t dex_pc = 1; dex_pc < 2; dex_pc++) {
+ for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
std::vector<ProfileMethodInfo::ProfileClassReference> classes;
for (uint16_t k = 0; k < InlineCache::kIndividualCacheSize / 2; k++) {
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k));
}
- caches.emplace_back(dex_pc, classes);
+ caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
}
// Megamorphic
- for (uint16_t dex_pc = 2; dex_pc < 3; dex_pc++) {
+ for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
std::vector<ProfileMethodInfo::ProfileClassReference> classes;
for (uint16_t k = 0; k < 2 * InlineCache::kIndividualCacheSize; k++) {
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k));
}
- caches.emplace_back(dex_pc, classes);
+ caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
+ }
+ // Missing types
+ for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
+ std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ caches.emplace_back(dex_pc, /*is_missing_types*/true, classes);
}
ProfileMethodInfo pmi(method->GetDexFile(), method->GetDexMethodIndex(), caches);
profile_methods.push_back(pmi);
@@ -148,12 +153,15 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
ProfileCompilationInfo::OfflineProfileMethodInfo offline_pmi;
SafeMap<DexFile*, uint8_t> dex_map; // dex files to profile index
for (const auto& inline_cache : pmi.inline_caches) {
+ ProfileCompilationInfo::DexPcData& dex_pc_data =
+ offline_pmi.inline_caches.FindOrAdd(inline_cache.dex_pc)->second;
+ if (inline_cache.is_missing_types) {
+ dex_pc_data.SetIsMissingTypes();
+ }
for (const auto& class_ref : inline_cache.classes) {
uint8_t dex_profile_index = dex_map.FindOrAdd(const_cast<DexFile*>(class_ref.dex_file),
static_cast<uint8_t>(dex_map.size()))->second;
- offline_pmi.inline_caches
- .FindOrAdd(inline_cache.dex_pc)->second
- .AddClass(dex_profile_index, class_ref.type_index);
+ dex_pc_data.AddClass(dex_profile_index, class_ref.type_index);
if (dex_profile_index >= offline_pmi.dex_references.size()) {
// This is a new dex.
const std::string& dex_key = ProfileCompilationInfo::GetProfileDexFileKey(
@@ -170,18 +178,18 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo() {
ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2);
- pmi.dex_references.emplace_back("dex_location3", /* checksum */ 3);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */1);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum */2);
+ pmi.dex_references.emplace_back("dex_location3", /* checksum */3);
// Monomorphic
- for (uint16_t dex_pc = 0; dex_pc < 10; dex_pc++) {
+ for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data;
dex_pc_data.AddClass(0, dex::TypeIndex(0));
pmi.inline_caches.Put(dex_pc, dex_pc_data);
}
// Polymorphic
- for (uint16_t dex_pc = 10; dex_pc < 20; dex_pc++) {
+ for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data;
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
@@ -190,9 +198,15 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
pmi.inline_caches.Put(dex_pc, dex_pc_data);
}
// Megamorphic
- for (uint16_t dex_pc = 20; dex_pc < 30; dex_pc++) {
+ for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data;
+ dex_pc_data.SetIsMegamorphic();
+ pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ }
+ // Missing types
+ for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data;
- dex_pc_data.is_megamorphic = true;
+ dex_pc_data.SetIsMissingTypes();
pmi.inline_caches.Put(dex_pc, dex_pc_data);
}
@@ -207,7 +221,13 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
}
}
- // Cannot sizeof the actual arrays so hardcode the values here.
+ void SetIsMissingTypes(/*out*/ProfileCompilationInfo::OfflineProfileMethodInfo* pmi) {
+ for (auto it : pmi->inline_caches) {
+ it.second.SetIsMissingTypes();
+ }
+ }
+
+ // Cannot sizeof the actual arrays so hard code the values here.
// They should not change anyway.
static constexpr int kProfileMagicSize = 4;
static constexpr int kProfileVersionSize = 4;
@@ -530,6 +550,58 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCaches) {
ASSERT_TRUE(loaded_pmi1 == pmi_extra);
}
+TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) {
+ ScratchFile profile;
+
+ ProfileCompilationInfo saved_info;
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+
+ // Add methods with inline caches.
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ }
+
+ ASSERT_TRUE(saved_info.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Make some inline caches megamorphic and add them to the profile again.
+ ProfileCompilationInfo saved_info_extra;
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
+ MakeMegamorphic(&pmi_extra);
+ for (uint16_t method_idx = 5; method_idx < 10; method_idx++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ }
+
+ // Mark all inline caches with missing types and add them to the profile again.
+ // This will verify that all inline caches (megamorphic or not) should be marked as missing types.
+ ProfileCompilationInfo::OfflineProfileMethodInfo missing_types = GetOfflineProfileMethodInfo();
+ SetIsMissingTypes(&missing_types);
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ }
+
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(saved_info_extra.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Merge the profiles so that we have the same view as the file.
+ ASSERT_TRUE(saved_info.MergeWith(saved_info_extra));
+
+ // Check that we get back what we saved.
+ ProfileCompilationInfo loaded_info;
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+
+ ASSERT_TRUE(loaded_info.Equals(saved_info));
+
+ ProfileCompilationInfo::OfflineProfileMethodInfo loaded_pmi1;
+ ASSERT_TRUE(loaded_info.GetMethod("dex_location1",
+ /* checksum */ 1,
+ /* method_idx */ 3,
+ &loaded_pmi1));
+ ASSERT_TRUE(loaded_pmi1 == pmi_extra);
+}
+
TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
ScratchFile profile;
@@ -570,7 +642,7 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
}
}
-TEST_F(ProfileCompilationInfoTest, InvalidChecksumInInlineCahce) {
+TEST_F(ProfileCompilationInfoTest, InvalidChecksumInInlineCache) {
ScratchFile profile;
ProfileCompilationInfo info;
@@ -662,7 +734,7 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
ProfileCompilationInfo::DexPcData dex_pc_data;
- dex_pc_data.is_megamorphic = true;
+ dex_pc_data.SetIsMegamorphic();
pmi.inline_caches.Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
@@ -686,4 +758,33 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
ASSERT_TRUE(info_no_inline_cache.Save(GetFd(profile)));
}
+TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCachesMerge) {
+ // Create an inline cache with missing types
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
+ ProfileCompilationInfo::DexPcData dex_pc_data;
+ dex_pc_data.SetIsMissingTypes();
+ pmi.inline_caches.Put(/*dex_pc*/ 0, dex_pc_data);
+
+ ProfileCompilationInfo info_megamorphic;
+ ASSERT_TRUE(AddMethod("dex_location1",
+ /*checksum*/ 1,
+ /*method_idx*/ 0,
+ pmi,
+ &info_megamorphic));
+
+ // Create a profile with no inline caches (for the same method).
+ ProfileCompilationInfo info_no_inline_cache;
+ ASSERT_TRUE(AddMethod("dex_location1",
+ /*checksum*/ 1,
+ /*method_idx*/ 0,
+ &info_no_inline_cache));
+
+ // Merge the missing type cache into the empty one.
+ // Everything should be saved without errors.
+ ASSERT_TRUE(info_no_inline_cache.MergeWith(info_megamorphic));
+ ScratchFile profile;
+ ASSERT_TRUE(info_no_inline_cache.Save(GetFd(profile)));
+}
+
} // namespace art
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 61e6c4126a..00487c6728 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -39,11 +39,8 @@ pthread_t ProfileSaver::profiler_pthread_ = 0U;
ProfileSaver::ProfileSaver(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir)
+ const std::vector<std::string>& code_paths)
: jit_code_cache_(jit_code_cache),
- foreign_dex_profile_path_(foreign_dex_profile_path),
shutting_down_(false),
last_save_number_of_methods_(0),
last_save_number_of_classes_(0),
@@ -58,13 +55,12 @@ ProfileSaver::ProfileSaver(const ProfileSaverOptions& options,
total_number_of_failed_writes_(0),
total_ms_of_sleep_(0),
total_ns_of_work_(0),
- total_number_of_foreign_dex_marks_(0),
max_number_of_profile_entries_cached_(0),
total_number_of_hot_spikes_(0),
total_number_of_wake_ups_(0),
options_(options) {
DCHECK(options_.IsEnabled());
- AddTrackedLocations(output_filename, app_data_dir, code_paths);
+ AddTrackedLocations(output_filename, code_paths);
}
void ProfileSaver::Run() {
@@ -382,9 +378,7 @@ static bool ShouldProfileLocation(const std::string& location) {
void ProfileSaver::Start(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir) {
+ const std::vector<std::string>& code_paths) {
DCHECK(options.IsEnabled());
DCHECK(Runtime::Current()->GetJit() != nullptr);
DCHECK(!output_filename.empty());
@@ -409,7 +403,7 @@ void ProfileSaver::Start(const ProfileSaverOptions& options,
// apps which share the same runtime).
DCHECK_EQ(instance_->jit_code_cache_, jit_code_cache);
// Add the code_paths to the tracked locations.
- instance_->AddTrackedLocations(output_filename, app_data_dir, code_paths_to_profile);
+ instance_->AddTrackedLocations(output_filename, code_paths_to_profile);
return;
}
@@ -419,9 +413,7 @@ void ProfileSaver::Start(const ProfileSaverOptions& options,
instance_ = new ProfileSaver(options,
output_filename,
jit_code_cache,
- code_paths_to_profile,
- foreign_dex_profile_path,
- app_data_dir);
+ code_paths_to_profile);
// Create a new thread which does the saving.
CHECK_PTHREAD_CALL(
@@ -481,154 +473,16 @@ bool ProfileSaver::IsStarted() {
}
void ProfileSaver::AddTrackedLocations(const std::string& output_filename,
- const std::string& app_data_dir,
const std::vector<std::string>& code_paths) {
auto it = tracked_dex_base_locations_.find(output_filename);
if (it == tracked_dex_base_locations_.end()) {
tracked_dex_base_locations_.Put(output_filename,
std::set<std::string>(code_paths.begin(), code_paths.end()));
- if (!app_data_dir.empty()) {
- app_data_dirs_.insert(app_data_dir);
- }
} else {
it->second.insert(code_paths.begin(), code_paths.end());
}
}
-// TODO(calin): This may lead to several calls to realpath.
-// Consider moving the logic to the saver thread (i.e. when notified,
-// only cache the location, and then wake up the saver thread to do the
-// comparisons with the real file paths and to create the markers).
-void ProfileSaver::NotifyDexUse(const std::string& dex_location) {
- if (!ShouldProfileLocation(dex_location)) {
- return;
- }
- std::set<std::string> app_code_paths;
- std::string foreign_dex_profile_path;
- std::set<std::string> app_data_dirs;
- {
- MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
- if (instance_ == nullptr) {
- return;
- }
- // Make a copy so that we don't hold the lock while doing I/O.
- for (const auto& it : instance_->tracked_dex_base_locations_) {
- app_code_paths.insert(it.second.begin(), it.second.end());
- }
- foreign_dex_profile_path = instance_->foreign_dex_profile_path_;
- app_data_dirs.insert(instance_->app_data_dirs_.begin(), instance_->app_data_dirs_.end());
- }
-
- bool mark_created = MaybeRecordDexUseInternal(dex_location,
- app_code_paths,
- foreign_dex_profile_path,
- app_data_dirs);
- if (mark_created) {
- MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
- if (instance_ != nullptr) {
- instance_->total_number_of_foreign_dex_marks_++;
- }
- }
-}
-
-static bool CheckContainsWithRealPath(const std::set<std::string>& paths_set,
- const std::string& path_to_check) {
- for (const auto& path : paths_set) {
- UniqueCPtr<const char[]> real_path(realpath(path.c_str(), nullptr));
- if (real_path == nullptr) {
- PLOG(WARNING) << "Could not get realpath for " << path;
- continue;
- }
- std::string real_path_str(real_path.get());
- if (real_path_str == path_to_check) {
- return true;
- }
- }
- return false;
-}
-
-// After the call, dex_location_real_path will contain the marker's name.
-static bool CreateForeignDexMarker(const std::string& foreign_dex_profile_path,
- /*in-out*/ std::string* dex_location_real_path) {
- // For foreign dex files we record a flag on disk. PackageManager will (potentially) take this
- // into account when deciding how to optimize the loaded dex file.
- // The expected flag name is the canonical path of the apk where '/' is substituted to '@'.
- // (it needs to be kept in sync with
- // frameworks/base/services/core/java/com/android/server/pm/PackageDexOptimizer.java)
- std::replace(dex_location_real_path->begin(), dex_location_real_path->end(), '/', '@');
- std::string flag_path = foreign_dex_profile_path + "/" + *dex_location_real_path;
- // We use O_RDONLY as the access mode because we must supply some access
- // mode, and there is no access mode that means 'create but do not read' the
- // file. We will not not actually read from the file.
- int fd = TEMP_FAILURE_RETRY(open(flag_path.c_str(),
- O_CREAT | O_RDONLY | O_EXCL | O_CLOEXEC | O_NOFOLLOW, 0));
- if (fd != -1) {
- if (close(fd) != 0) {
- PLOG(WARNING) << "Could not close file after flagging foreign dex use " << flag_path;
- }
- return true;
- } else {
- if (errno != EEXIST && errno != EACCES) {
- // Another app could have already created the file, and selinux may not
- // allow the read access to the file implied by the call to open.
- PLOG(WARNING) << "Could not create foreign dex use mark " << flag_path;
- return false;
- }
- return true;
- }
-}
-
-bool ProfileSaver::MaybeRecordDexUseInternal(
- const std::string& dex_location,
- const std::set<std::string>& app_code_paths,
- const std::string& foreign_dex_profile_path,
- const std::set<std::string>& app_data_dirs) {
- if (dex_location.empty()) {
- LOG(WARNING) << "Asked to record foreign dex use with an empty dex location.";
- return false;
- }
- if (foreign_dex_profile_path.empty()) {
- LOG(WARNING) << "Asked to record foreign dex use without a valid profile path ";
- return false;
- }
-
- if (app_code_paths.find(dex_location) != app_code_paths.end()) {
- // The dex location belongs to the application code paths. Nothing to record.
- return false;
- }
-
- if (app_data_dirs.find(dex_location) != app_data_dirs.end()) {
- // The dex location is under the application folder. Nothing to record.
- return false;
- }
-
- // Do another round of checks with the real paths.
- // Application directory could be a symlink (e.g. /data/data instead of /data/user/0), and we
- // don't have control over how the dex files are actually loaded (symlink or canonical path),
-
- // Note that we could cache all the real locations in the saver (since it's an expensive
- // operation). However we expect that app_code_paths is small (usually 1 element), and
- // NotifyDexUse is called just a few times in the app lifetime. So we make the compromise
- // to save some bytes of memory usage.
-
- UniqueCPtr<const char[]> dex_location_real_path(realpath(dex_location.c_str(), nullptr));
- if (dex_location_real_path == nullptr) {
- PLOG(WARNING) << "Could not get realpath for " << dex_location;
- return false;
- }
- std::string dex_location_real_path_str(dex_location_real_path.get());
-
- if (CheckContainsWithRealPath(app_code_paths, dex_location_real_path_str)) {
- return false;
- }
-
- if (CheckContainsWithRealPath(app_data_dirs, dex_location_real_path_str)) {
- return false;
- }
-
- return CreateForeignDexMarker(foreign_dex_profile_path, &dex_location_real_path_str);
-}
-
void ProfileSaver::DumpInstanceInfo(std::ostream& os) {
MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
if (instance_ != nullptr) {
@@ -645,8 +499,6 @@ void ProfileSaver::DumpInfo(std::ostream& os) {
<< "ProfileSaver total_number_of_failed_writes=" << total_number_of_failed_writes_ << '\n'
<< "ProfileSaver total_ms_of_sleep=" << total_ms_of_sleep_ << '\n'
<< "ProfileSaver total_ms_of_work=" << NsToMs(total_ns_of_work_) << '\n'
- << "ProfileSaver total_number_of_foreign_dex_marks="
- << total_number_of_foreign_dex_marks_ << '\n'
<< "ProfileSaver max_number_profile_entries_cached="
<< max_number_of_profile_entries_cached_ << '\n'
<< "ProfileSaver total_number_of_hot_spikes=" << total_number_of_hot_spikes_ << '\n'
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 9c5e41fd13..ec8342ad9e 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -32,9 +32,7 @@ class ProfileSaver {
static void Start(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir)
+ const std::vector<std::string>& code_paths)
REQUIRES(!Locks::profiler_lock_, !wait_lock_);
// Stops the profile saver thread.
@@ -46,8 +44,6 @@ class ProfileSaver {
// Returns true if the profile saver is started.
static bool IsStarted() REQUIRES(!Locks::profiler_lock_);
- static void NotifyDexUse(const std::string& dex_location);
-
// If the profile saver is running, dumps statistics to the `os`. Otherwise it does nothing.
static void DumpInstanceInfo(std::ostream& os);
@@ -66,9 +62,7 @@ class ProfileSaver {
ProfileSaver(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir);
+ const std::vector<std::string>& code_paths);
// NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
static void* RunProfileSaverThread(void* arg)
@@ -90,7 +84,6 @@ class ProfileSaver {
bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_);
void AddTrackedLocations(const std::string& output_filename,
- const std::string& app_data_dir,
const std::vector<std::string>& code_paths)
REQUIRES(Locks::profiler_lock_);
@@ -102,12 +95,6 @@ class ProfileSaver {
// profile_cache_ for later save.
void FetchAndCacheResolvedClassesAndMethods();
- static bool MaybeRecordDexUseInternal(
- const std::string& dex_location,
- const std::set<std::string>& tracked_locations,
- const std::string& foreign_dex_profile_path,
- const std::set<std::string>& app_data_dirs);
-
void DumpInfo(std::ostream& os);
// The only instance of the saver.
@@ -121,13 +108,6 @@ class ProfileSaver {
// It maps profile locations to code paths (dex base locations).
SafeMap<std::string, std::set<std::string>> tracked_dex_base_locations_
GUARDED_BY(Locks::profiler_lock_);
- // The directory were the we should store the code paths.
- std::string foreign_dex_profile_path_;
-
- // A list of application directories, used to infer if a loaded dex belongs
- // to the application or not. Multiple application data directories are possible when
- // different apps share the same runtime.
- std::set<std::string> app_data_dirs_ GUARDED_BY(Locks::profiler_lock_);
bool shutting_down_ GUARDED_BY(Locks::profiler_lock_);
uint32_t last_save_number_of_methods_;
@@ -152,7 +132,6 @@ class ProfileSaver {
uint64_t total_number_of_failed_writes_;
uint64_t total_ms_of_sleep_;
uint64_t total_ns_of_work_;
- uint64_t total_number_of_foreign_dex_marks_;
// TODO(calin): replace with an actual size.
uint64_t max_number_of_profile_entries_cached_;
uint64_t total_number_of_hot_spikes_;
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index d34f09c721..b68eedcb11 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -206,10 +206,10 @@ class MANAGED Class FINAL : public Object {
return status >= kStatusResolved || status == kStatusErrorResolved;
}
- // Returns true if the class was compile-time verified.
+ // Returns true if the class should be verified at runtime.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsCompileTimeVerified() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime;
+ bool ShouldVerifyAtRuntime() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetStatus<kVerifyFlags>() == kStatusRetryVerificationAtRuntime;
}
// Returns true if the class has been verified.
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index efc42fdac7..9b707f8e1c 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -580,9 +580,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
static void VMRuntime_registerAppInfo(JNIEnv* env,
jclass clazz ATTRIBUTE_UNUSED,
jstring profile_file,
- jstring app_dir,
- jobjectArray code_paths,
- jstring foreign_dex_profile_path) {
+ jobjectArray code_paths) {
std::vector<std::string> code_paths_vec;
int code_paths_length = env->GetArrayLength(code_paths);
for (int i = 0; i < code_paths_length; i++) {
@@ -596,22 +594,7 @@ static void VMRuntime_registerAppInfo(JNIEnv* env,
std::string profile_file_str(raw_profile_file);
env->ReleaseStringUTFChars(profile_file, raw_profile_file);
- std::string foreign_dex_profile_path_str = "";
- if (foreign_dex_profile_path != nullptr) {
- const char* raw_foreign_dex_profile_path =
- env->GetStringUTFChars(foreign_dex_profile_path, nullptr);
- foreign_dex_profile_path_str.assign(raw_foreign_dex_profile_path);
- env->ReleaseStringUTFChars(foreign_dex_profile_path, raw_foreign_dex_profile_path);
- }
-
- const char* raw_app_dir = env->GetStringUTFChars(app_dir, nullptr);
- std::string app_dir_str(raw_app_dir);
- env->ReleaseStringUTFChars(app_dir, raw_app_dir);
-
- Runtime::Current()->RegisterAppInfo(code_paths_vec,
- profile_file_str,
- foreign_dex_profile_path_str,
- app_dir_str);
+ Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file_str);
}
static jboolean VMRuntime_isBootClassPathOnDisk(JNIEnv* env, jclass, jstring java_instruction_set) {
@@ -674,8 +657,7 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(VMRuntime, is64Bit, "()Z"),
FAST_NATIVE_METHOD(VMRuntime, isCheckJniEnabled, "()Z"),
NATIVE_METHOD(VMRuntime, preloadDexCaches, "()V"),
- NATIVE_METHOD(VMRuntime, registerAppInfo,
- "(Ljava/lang/String;Ljava/lang/String;[Ljava/lang/String;Ljava/lang/String;)V"),
+ NATIVE_METHOD(VMRuntime, registerAppInfo, "(Ljava/lang/String;[Ljava/lang/String;)V"),
NATIVE_METHOD(VMRuntime, isBootClassPathOnDisk, "(Ljava/lang/String;)Z"),
NATIVE_METHOD(VMRuntime, getCurrentInstructionSet, "()Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, didPruneDalvikCache, "()Z"),
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index bb92ca7ae6..836ba81d8e 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -76,24 +76,29 @@ static void EnableDebugger() {
class ClassSet {
public:
- explicit ClassSet(Thread* const self) : hs_(self) {}
+ // The number of classes we reasonably expect to have to look at. Realistically the number is more
+ // ~10 but there is little harm in having some extra.
+ static constexpr int kClassSetCapacity = 100;
+
+ explicit ClassSet(Thread* const self) : self_(self) {
+ self_->GetJniEnv()->PushFrame(kClassSetCapacity);
+ }
+
+ ~ClassSet() {
+ self_->GetJniEnv()->PopFrame();
+ }
void AddClass(ObjPtr<mirror::Class> klass) REQUIRES(Locks::mutator_lock_) {
- for (Handle<mirror::Class> k : class_set_) {
- if (k.Get() == klass.Ptr()) {
- return;
- }
- }
- class_set_.push_back(hs_.NewHandle<mirror::Class>(klass));
+ class_set_.insert(self_->GetJniEnv()->AddLocalReference<jclass>(klass.Ptr()));
}
- const std::vector<Handle<mirror::Class>>& GetClasses() const {
+ const std::unordered_set<jclass>& GetClasses() const {
return class_set_;
}
private:
- VariableSizedHandleScope hs_;
- std::vector<Handle<mirror::Class>> class_set_;
+ Thread* const self_;
+ std::unordered_set<jclass> class_set_;
};
static void DoCollectNonDebuggableCallback(Thread* thread, void* data)
@@ -133,20 +138,15 @@ static void CollectNonDebuggableClasses() REQUIRES(!Locks::mutator_lock_) {
ScopedObjectAccess soa(self);
ClassSet classes(self);
{
- // Drop the mutator lock.
- self->TransitionFromRunnableToSuspended(art::ThreadState::kNative);
- {
- // Get it back with a suspend all.
- ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!",
- /*long_suspend*/false);
- MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, &classes);
- }
- // Recover the shared lock before we leave this scope.
- self->TransitionFromSuspendedToRunnable();
+ // Drop the shared mutator lock.
+ ScopedThreadSuspension sts(self, art::ThreadState::kNative);
+ // Get exclusive mutator lock with suspend all.
+ ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false);
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, &classes);
}
- for (Handle<mirror::Class> klass : classes.GetClasses()) {
- NonDebuggableClasses::AddNonDebuggableClass(klass.Get());
+ for (jclass klass : classes.GetClasses()) {
+ NonDebuggableClasses::AddNonDebuggableClass(klass);
}
}
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
index db121a90e2..829ea65876 100644
--- a/runtime/non_debuggable_classes.cc
+++ b/runtime/non_debuggable_classes.cc
@@ -27,16 +27,16 @@ namespace art {
std::vector<jclass> NonDebuggableClasses::non_debuggable_classes;
-void NonDebuggableClasses::AddNonDebuggableClass(ObjPtr<mirror::Class> klass) {
+void NonDebuggableClasses::AddNonDebuggableClass(jclass klass) {
Thread* self = Thread::Current();
JNIEnvExt* env = self->GetJniEnv();
+ ObjPtr<mirror::Class> mirror_klass(self->DecodeJObject(klass)->AsClass());
for (jclass c : non_debuggable_classes) {
- if (self->DecodeJObject(c)->AsClass() == klass.Ptr()) {
+ if (self->DecodeJObject(c)->AsClass() == mirror_klass.Ptr()) {
return;
}
}
- ScopedLocalRef<jclass> lr(env, env->AddLocalReference<jclass>(klass));
- non_debuggable_classes.push_back(reinterpret_cast<jclass>(env->NewGlobalRef(lr.get())));
+ non_debuggable_classes.push_back(reinterpret_cast<jclass>(env->NewGlobalRef(klass)));
}
} // namespace art
diff --git a/runtime/non_debuggable_classes.h b/runtime/non_debuggable_classes.h
index 0c94dc03a7..e1b563339d 100644
--- a/runtime/non_debuggable_classes.h
+++ b/runtime/non_debuggable_classes.h
@@ -21,21 +21,16 @@
#include "base/mutex.h"
#include "jni.h"
-#include "obj_ptr.h"
namespace art {
-namespace mirror {
-class Class;
-} // namespace mirror
-
struct NonDebuggableClasses {
public:
static const std::vector<jclass>& GetNonDebuggableClasses() {
return non_debuggable_classes;
}
- static void AddNonDebuggableClass(ObjPtr<mirror::Class> klass)
+ static void AddNonDebuggableClass(jclass klass)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 5ae2fc51b7..48bf1e72a4 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -430,8 +430,7 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile&
// starts up.
LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. "
<< "Allow oat file use. This is potentially dangerous.";
- } else if (file.GetOatHeader().GetImageFileLocationOatChecksum()
- != GetCombinedImageChecksum()) {
+ } else if (file.GetOatHeader().GetImageFileLocationOatChecksum() != image_info->oat_checksum) {
VLOG(oat) << "Oat image checksum does not match image checksum.";
return kOatBootImageOutOfDate;
}
@@ -726,68 +725,81 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() {
return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr;
}
-const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
- if (!image_info_load_attempted_) {
- image_info_load_attempted_ = true;
-
- Runtime* runtime = Runtime::Current();
- std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
- if (!image_spaces.empty()) {
- cached_image_info_.location = image_spaces[0]->GetImageLocation();
-
- if (isa_ == kRuntimeISA) {
- const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
- cached_image_info_.oat_checksum = image_header.GetOatChecksum();
- cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
- image_header.GetOatDataBegin());
- cached_image_info_.patch_delta = image_header.GetPatchDelta();
- } else {
- std::string error_msg;
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(cached_image_info_.location.c_str(),
- isa_,
- &error_msg));
- CHECK(image_header != nullptr) << error_msg;
- cached_image_info_.oat_checksum = image_header->GetOatChecksum();
- cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
- image_header->GetOatDataBegin());
- cached_image_info_.patch_delta = image_header->GetPatchDelta();
- }
- }
- image_info_load_succeeded_ = (!image_spaces.empty());
+// TODO: Use something better than xor for the combined image checksum.
+std::unique_ptr<OatFileAssistant::ImageInfo>
+OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) {
+ CHECK(error_msg != nullptr);
- combined_image_checksum_ = CalculateCombinedImageChecksum(isa_);
+ // Use the currently loaded image to determine the image locations for all
+ // the image spaces, regardless of the isa requested. Otherwise we would
+ // need to read from the boot image's oat file to determine the rest of the
+ // image locations in the case of multi-image.
+ Runtime* runtime = Runtime::Current();
+ std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
+ if (image_spaces.empty()) {
+ *error_msg = "There are no boot image spaces";
+ return nullptr;
}
- return image_info_load_succeeded_ ? &cached_image_info_ : nullptr;
-}
-// TODO: Use something better than xor.
-uint32_t OatFileAssistant::CalculateCombinedImageChecksum(InstructionSet isa) {
- uint32_t checksum = 0;
- std::vector<gc::space::ImageSpace*> image_spaces =
- Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ std::unique_ptr<ImageInfo> info(new ImageInfo());
+ info->location = image_spaces[0]->GetImageLocation();
+
+ // TODO: Special casing on isa == kRuntimeISA is presumably motivated by
+ // performance: 'it's faster to use an already loaded image header than read
+ // the image header from disk'. But the loaded image is not necessarily the
+ // same as kRuntimeISA, so this behavior is suspect (b/35659889).
if (isa == kRuntimeISA) {
+ const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
+ info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
+ info->patch_delta = image_header.GetPatchDelta();
+
+ info->oat_checksum = 0;
for (gc::space::ImageSpace* image_space : image_spaces) {
- checksum ^= image_space->GetImageHeader().GetOatChecksum();
+ info->oat_checksum ^= image_space->GetImageHeader().GetOatChecksum();
}
} else {
+ std::unique_ptr<ImageHeader> image_header(
+ gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg));
+ if (image_header == nullptr) {
+ return nullptr;
+ }
+ info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+ info->patch_delta = image_header->GetPatchDelta();
+
+ info->oat_checksum = 0;
for (gc::space::ImageSpace* image_space : image_spaces) {
std::string location = image_space->GetImageLocation();
- std::string error_msg;
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(location.c_str(), isa, &error_msg));
- CHECK(image_header != nullptr) << error_msg;
- checksum ^= image_header->GetOatChecksum();
+ image_header.reset(
+ gc::space::ImageSpace::ReadImageHeader(location.c_str(), isa, error_msg));
+ if (image_header == nullptr) {
+ return nullptr;
+ }
+ info->oat_checksum ^= image_header->GetOatChecksum();
}
}
- return checksum;
+ return info;
}
-uint32_t OatFileAssistant::GetCombinedImageChecksum() {
+const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
if (!image_info_load_attempted_) {
- GetImageInfo();
+ image_info_load_attempted_ = true;
+ std::string error_msg;
+ cached_image_info_ = ImageInfo::GetRuntimeImageInfo(isa_, &error_msg);
+ if (cached_image_info_ == nullptr) {
+ LOG(WARNING) << "Unable to get runtime image info: " << error_msg;
+ }
+ }
+ return cached_image_info_.get();
+}
+
+uint32_t OatFileAssistant::CalculateCombinedImageChecksum(InstructionSet isa) {
+ std::string error_msg;
+ std::unique_ptr<ImageInfo> info = ImageInfo::GetRuntimeImageInfo(isa, &error_msg);
+ if (info == nullptr) {
+ LOG(WARNING) << "Unable to get runtime image info for checksum: " << error_msg;
+ return 0;
}
- return combined_image_checksum_;
+ return info->oat_checksum;
}
OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() {
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 3ede29f5e0..eec87f0768 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -284,6 +284,9 @@ class OatFileAssistant {
uintptr_t oat_data_begin = 0;
int32_t patch_delta = 0;
std::string location;
+
+ static std::unique_ptr<ImageInfo> GetRuntimeImageInfo(InstructionSet isa,
+ std::string* error_msg);
};
class OatFileInfo {
@@ -414,8 +417,6 @@ class OatFileAssistant {
// The caller shouldn't clean up or free the returned pointer.
const ImageInfo* GetImageInfo();
- uint32_t GetCombinedImageChecksum();
-
// To implement Lock(), we lock a dummy file where the oat file would go
// (adding ".flock" to the target file name) and retain the lock for the
// remaining lifetime of the OatFileAssistant object.
@@ -445,9 +446,7 @@ class OatFileAssistant {
// TODO: The image info should probably be moved out of the oat file
// assistant to an image file manager.
bool image_info_load_attempted_ = false;
- bool image_info_load_succeeded_ = false;
- ImageInfo cached_image_info_;
- uint32_t combined_image_checksum_ = 0;
+ std::unique_ptr<ImageInfo> cached_image_info_;
DISALLOW_COPY_AND_ASSIGN(OatFileAssistant);
};
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 70796148a4..d04dbbee04 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -23,6 +23,7 @@
#include "android-base/stringprintf.h"
#include "art_field-inl.h"
+#include "base/bit_vector-inl.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -145,13 +146,52 @@ std::vector<const OatFile*> OatFileManager::RegisterImageOatFiles(
return oat_files;
}
+class TypeIndexInfo {
+ public:
+ explicit TypeIndexInfo(const DexFile* dex_file)
+ : type_indexes_(GenerateTypeIndexes(dex_file)),
+ iter_(type_indexes_.Indexes().begin()),
+ end_(type_indexes_.Indexes().end()) { }
+
+ BitVector& GetTypeIndexes() {
+ return type_indexes_;
+ }
+ BitVector::IndexIterator& GetIterator() {
+ return iter_;
+ }
+ BitVector::IndexIterator& GetIteratorEnd() {
+ return end_;
+ }
+ void AdvanceIterator() {
+ iter_++;
+ }
+
+ private:
+ static BitVector GenerateTypeIndexes(const DexFile* dex_file) {
+ BitVector type_indexes(/*start_bits*/0, /*expandable*/true, Allocator::GetMallocAllocator());
+ for (uint16_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ uint16_t type_idx = class_def.class_idx_.index_;
+ type_indexes.SetBit(type_idx);
+ }
+ return type_indexes;
+ }
+
+ // BitVector with bits set for the type indexes of all classes in the input dex file.
+ BitVector type_indexes_;
+ BitVector::IndexIterator iter_;
+ BitVector::IndexIterator end_;
+};
+
class DexFileAndClassPair : ValueObject {
public:
- DexFileAndClassPair(const DexFile* dex_file, size_t current_class_index, bool from_loaded_oat)
- : cached_descriptor_(GetClassDescriptor(dex_file, current_class_index)),
+ DexFileAndClassPair(const DexFile* dex_file, TypeIndexInfo* type_info, bool from_loaded_oat)
+ : type_info_(type_info),
dex_file_(dex_file),
- current_class_index_(current_class_index),
- from_loaded_oat_(from_loaded_oat) {}
+ cached_descriptor_(dex_file_->StringByTypeIdx(dex::TypeIndex(*type_info->GetIterator()))),
+ from_loaded_oat_(from_loaded_oat) {
+ type_info_->AdvanceIterator();
+ }
DexFileAndClassPair(const DexFileAndClassPair& rhs) = default;
@@ -172,16 +212,12 @@ class DexFileAndClassPair : ValueObject {
}
bool DexFileHasMoreClasses() const {
- return current_class_index_ + 1 < dex_file_->NumClassDefs();
+ return type_info_->GetIterator() != type_info_->GetIteratorEnd();
}
void Next() {
- ++current_class_index_;
- cached_descriptor_ = GetClassDescriptor(dex_file_, current_class_index_);
- }
-
- size_t GetCurrentClassIndex() const {
- return current_class_index_;
+ cached_descriptor_ = dex_file_->StringByTypeIdx(dex::TypeIndex(*type_info_->GetIterator()));
+ type_info_->AdvanceIterator();
}
bool FromLoadedOat() const {
@@ -193,42 +229,36 @@ class DexFileAndClassPair : ValueObject {
}
private:
- static const char* GetClassDescriptor(const DexFile* dex_file, size_t index) {
- DCHECK(IsUint<16>(index));
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(static_cast<uint16_t>(index));
- return dex_file->StringByTypeIdx(class_def.class_idx_);
- }
-
- const char* cached_descriptor_;
+ TypeIndexInfo* type_info_;
const DexFile* dex_file_;
- size_t current_class_index_;
+ const char* cached_descriptor_;
bool from_loaded_oat_; // We only need to compare mismatches between what we load now
// and what was loaded before. Any old duplicates must have been
// OK, and any new "internal" duplicates are as well (they must
// be from multidex, which resolves correctly).
};
-static void AddDexFilesFromOat(const OatFile* oat_file,
- bool already_loaded,
- /*out*/std::priority_queue<DexFileAndClassPair>* heap,
- std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
+static void AddDexFilesFromOat(
+ const OatFile* oat_file,
+ /*out*/std::vector<const DexFile*>* dex_files,
+ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
for (const OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
std::string error;
std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error);
if (dex_file == nullptr) {
LOG(WARNING) << "Could not create dex file from oat file: " << error;
} else if (dex_file->NumClassDefs() > 0U) {
- heap->emplace(dex_file.get(), /*current_class_index*/0U, already_loaded);
+ dex_files->push_back(dex_file.get());
opened_dex_files->push_back(std::move(dex_file));
}
}
}
-static void AddNext(/*inout*/DexFileAndClassPair* original,
- /*inout*/std::priority_queue<DexFileAndClassPair>* heap) {
- if (original->DexFileHasMoreClasses()) {
- original->Next();
- heap->push(std::move(*original));
+static void AddNext(/*inout*/DexFileAndClassPair& original,
+ /*inout*/std::priority_queue<DexFileAndClassPair>& heap) {
+ if (original.DexFileHasMoreClasses()) {
+ original.Next();
+ heap.push(std::move(original));
}
}
@@ -297,7 +327,8 @@ static void IterateOverPathClassLoader(
static bool GetDexFilesFromClassLoader(
ScopedObjectAccessAlreadyRunnable& soa,
mirror::ClassLoader* class_loader,
- std::priority_queue<DexFileAndClassPair>* queue) REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::vector<const DexFile*>* dex_files)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
// The boot class loader. We don't load any of these files, as we know we compiled against
// them correctly.
@@ -312,7 +343,7 @@ static bool GetDexFilesFromClassLoader(
return false;
}
- bool recursive_result = GetDexFilesFromClassLoader(soa, class_loader->GetParent(), queue);
+ bool recursive_result = GetDexFilesFromClassLoader(soa, class_loader->GetParent(), dex_files);
if (!recursive_result) {
// Something wrong up the chain.
return false;
@@ -322,7 +353,7 @@ static bool GetDexFilesFromClassLoader(
auto GetDexFilesFn = [&] (const DexFile* cp_dex_file)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (cp_dex_file->NumClassDefs() > 0) {
- queue->emplace(cp_dex_file, 0U, true);
+ dex_files->push_back(cp_dex_file);
}
return true; // Continue looking.
};
@@ -341,7 +372,8 @@ static bool GetDexFilesFromClassLoader(
static void GetDexFilesFromDexElementsArray(
ScopedObjectAccessAlreadyRunnable& soa,
Handle<mirror::ObjectArray<mirror::Object>> dex_elements,
- std::priority_queue<DexFileAndClassPair>* queue) REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::vector<const DexFile*>* dex_files)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (dex_elements == nullptr) {
// Nothing to do.
return;
@@ -360,7 +392,7 @@ static void GetDexFilesFromDexElementsArray(
auto GetDexFilesFn = [&] (const DexFile* cp_dex_file)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (cp_dex_file != nullptr && cp_dex_file->NumClassDefs() > 0) {
- queue->emplace(cp_dex_file, 0U, true);
+ dex_files->push_back(cp_dex_file);
}
return true; // Continue looking.
};
@@ -389,43 +421,95 @@ static void GetDexFilesFromDexElementsArray(
}
static bool AreSharedLibrariesOk(const std::string& shared_libraries,
- std::priority_queue<DexFileAndClassPair>& queue) {
+ std::vector<const DexFile*>& dex_files) {
+ // If no shared libraries, we expect no dex files.
if (shared_libraries.empty()) {
- if (queue.empty()) {
- // No shared libraries or oat files, as expected.
- return true;
+ return dex_files.empty();
+ }
+ // If we find the special shared library, skip the shared libraries check.
+ if (shared_libraries.compare(OatFile::kSpecialSharedLibrary) == 0) {
+ return true;
+ }
+ // Shared libraries is a series of dex file paths and their checksums, each separated by '*'.
+ std::vector<std::string> shared_libraries_split;
+ Split(shared_libraries, '*', &shared_libraries_split);
+
+ // Sanity check size of dex files and split shared libraries. Should be 2x as many entries in
+ // the split shared libraries since it contains pairs of filename/checksum.
+ if (dex_files.size() * 2 != shared_libraries_split.size()) {
+ return false;
+ }
+
+ for (size_t i = 0; i < dex_files.size(); ++i) {
+ if (dex_files[i]->GetLocation() != shared_libraries_split[i * 2]) {
+ return false;
}
- } else {
- if (shared_libraries.compare(OatFile::kSpecialSharedLibrary) == 0) {
- // If we find the special shared library, skip the shared libraries check.
- return true;
+ char* end;
+ size_t shared_lib_checksum = strtoul(shared_libraries_split[i * 2 + 1].c_str(), &end, 10);
+ uint32_t dex_checksum = dex_files[i]->GetLocationChecksum();
+ if (*end != '\0' || dex_checksum != shared_lib_checksum) {
+ return false;
}
- // Shared libraries is a series of dex file paths and their checksums, each separated by '*'.
- std::vector<std::string> shared_libraries_split;
- Split(shared_libraries, '*', &shared_libraries_split);
-
- size_t index = 0;
- std::priority_queue<DexFileAndClassPair> temp = queue;
- while (!temp.empty() && index < shared_libraries_split.size() - 1) {
- DexFileAndClassPair pair(temp.top());
- const DexFile* dex_file = pair.GetDexFile();
- const std::string& dex_filename = dex_file->GetLocation();
- if (dex_filename != shared_libraries_split[index]) {
- break;
- }
- char* end;
- size_t shared_lib_checksum = strtoul(shared_libraries_split[index + 1].c_str(), &end, 10);
- uint32_t dex_checksum = dex_file->GetLocationChecksum();
- if (*end != '\0' || dex_checksum != shared_lib_checksum) {
+ }
+
+ return true;
+}
+
+static bool CollisionCheck(std::vector<const DexFile*>& dex_files_loaded,
+ std::vector<const DexFile*>& dex_files_unloaded,
+ std::string* error_msg /*out*/) {
+ // Generate type index information for each dex file.
+ std::vector<TypeIndexInfo> loaded_types;
+ for (const DexFile* dex_file : dex_files_loaded) {
+ loaded_types.push_back(TypeIndexInfo(dex_file));
+ }
+ std::vector<TypeIndexInfo> unloaded_types;
+ for (const DexFile* dex_file : dex_files_unloaded) {
+ unloaded_types.push_back(TypeIndexInfo(dex_file));
+ }
+
+ // Populate the queue of dex file and class pairs with the loaded and unloaded dex files.
+ std::priority_queue<DexFileAndClassPair> queue;
+ for (size_t i = 0; i < dex_files_loaded.size(); ++i) {
+ if (loaded_types[i].GetIterator() != loaded_types[i].GetIteratorEnd()) {
+ queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat*/true);
+ }
+ }
+ for (size_t i = 0; i < dex_files_unloaded.size(); ++i) {
+ if (unloaded_types[i].GetIterator() != unloaded_types[i].GetIteratorEnd()) {
+ queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat*/false);
+ }
+ }
+
+ // Now drain the queue.
+ while (!queue.empty()) {
+ // Modifying the top element is only safe if we pop right after.
+ DexFileAndClassPair compare_pop(queue.top());
+ queue.pop();
+
+ // Compare against the following elements.
+ while (!queue.empty()) {
+ DexFileAndClassPair top(queue.top());
+ if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
+ // Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
+ if (compare_pop.FromLoadedOat() != top.FromLoadedOat()) {
+ *error_msg =
+ StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s",
+ compare_pop.GetCachedDescriptor(),
+ compare_pop.GetDexFile()->GetLocation().c_str(),
+ top.GetDexFile()->GetLocation().c_str());
+ return true;
+ }
+ queue.pop();
+ AddNext(top, queue);
+ } else {
+ // Something else. Done here.
break;
}
- temp.pop();
- index += 2;
}
-
- // Check is successful if it made it through the queue and all the shared libraries.
- return temp.empty() && index == shared_libraries_split.size();
+ AddNext(compare_pop, queue);
}
+
return false;
}
@@ -450,7 +534,7 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
DCHECK(oat_file != nullptr);
DCHECK(error_msg != nullptr);
- std::priority_queue<DexFileAndClassPair> queue;
+ std::vector<const DexFile*> dex_files_loaded;
// Try to get dex files from the given class loader. If the class loader is null, or we do
// not support one of the class loaders in the chain, conservatively compare against all
@@ -464,12 +548,12 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements =
hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements));
if (h_class_loader != nullptr &&
- GetDexFilesFromClassLoader(soa, h_class_loader.Get(), &queue)) {
+ GetDexFilesFromClassLoader(soa, h_class_loader.Get(), &dex_files_loaded)) {
class_loader_ok = true;
// In this case, also take into account the dex_elements array, if given. We don't need to
// read it otherwise, as we'll compare against all open oat files anyways.
- GetDexFilesFromDexElementsArray(soa, h_dex_elements, &queue);
+ GetDexFilesFromDexElementsArray(soa, h_dex_elements, &dex_files_loaded);
} else if (h_class_loader != nullptr) {
VLOG(class_linker) << "Something unsupported with "
<< mirror::Class::PrettyClass(h_class_loader->GetClass());
@@ -486,10 +570,8 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
if (!class_loader_ok) {
// Add dex files from already loaded oat files, but skip boot.
- // Clean up the queue.
- while (!queue.empty()) {
- queue.pop();
- }
+ // Clean up the dex files.
+ dex_files_loaded.clear();
std::vector<const OatFile*> boot_oat_files = GetBootOatFiles();
// The same OatFile can be loaded multiple times at different addresses. In this case, we don't
@@ -503,10 +585,7 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
boot_oat_files.end() && location != oat_file->GetLocation() &&
unique_locations.find(location) == unique_locations.end()) {
unique_locations.insert(location);
- AddDexFilesFromOat(loaded_oat_file.get(),
- /*already_loaded*/true,
- &queue,
- /*out*/&opened_dex_files);
+ AddDexFilesFromOat(loaded_oat_file.get(), &dex_files_loaded, &opened_dex_files);
}
}
}
@@ -514,46 +593,15 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
// Exit if shared libraries are ok. Do a full duplicate classes check otherwise.
const std::string
shared_libraries(oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
- if (AreSharedLibrariesOk(shared_libraries, queue)) {
+ if (AreSharedLibrariesOk(shared_libraries, dex_files_loaded)) {
return false;
}
ScopedTrace st("Collision check");
-
// Add dex files from the oat file to check.
- AddDexFilesFromOat(oat_file, /*already_loaded*/false, &queue, &opened_dex_files);
-
- // Now drain the queue.
- while (!queue.empty()) {
- // Modifying the top element is only safe if we pop right after.
- DexFileAndClassPair compare_pop(queue.top());
- queue.pop();
-
- // Compare against the following elements.
- while (!queue.empty()) {
- DexFileAndClassPair top(queue.top());
-
- if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
- // Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
- if (compare_pop.FromLoadedOat() != top.FromLoadedOat()) {
- *error_msg =
- StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s",
- compare_pop.GetCachedDescriptor(),
- compare_pop.GetDexFile()->GetLocation().c_str(),
- top.GetDexFile()->GetLocation().c_str());
- return true;
- }
- queue.pop();
- AddNext(&top, &queue);
- } else {
- // Something else. Done here.
- break;
- }
- }
- AddNext(&compare_pop, &queue);
- }
-
- return false;
+ std::vector<const DexFile*> dex_files_unloaded;
+ AddDexFilesFromOat(oat_file, &dex_files_unloaded, &opened_dex_files);
+ return CollisionCheck(dex_files_loaded, dex_files_unloaded, error_msg);
}
std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
@@ -729,9 +777,6 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
}
}
- // TODO(calin): Consider optimizing this knowing that is useless to record the
- // use of fully compiled apks.
- Runtime::Current()->NotifyDexLoaded(dex_location);
return dex_files;
}
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index c016728fa5..5e0d4bdd07 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -1426,6 +1426,7 @@ extern "C" bool ArtPlugin_Initialize() {
ClassUtil::Register(&gEventHandler);
DumpUtil::Register(&gEventHandler);
SearchUtil::Register();
+ HeapUtil::Register();
runtime->GetJavaVM()->AddEnvironmentHook(GetEnvHandler);
@@ -1438,6 +1439,7 @@ extern "C" bool ArtPlugin_Deinitialize() {
ClassUtil::Unregister();
DumpUtil::Unregister();
SearchUtil::Unregister();
+ HeapUtil::Unregister();
return true;
}
diff --git a/runtime/openjdkjvmti/jvmti_weak_table-inl.h b/runtime/openjdkjvmti/jvmti_weak_table-inl.h
new file mode 100644
index 0000000000..f67fffccbb
--- /dev/null
+++ b/runtime/openjdkjvmti/jvmti_weak_table-inl.h
@@ -0,0 +1,389 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
+#define ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
+
+#include "jvmti_weak_table.h"
+
+#include <limits>
+
+#include "art_jvmti.h"
+#include "base/logging.h"
+#include "gc/allocation_listener.h"
+#include "instrumentation.h"
+#include "jni_env_ext-inl.h"
+#include "jvmti_allocator.h"
+#include "mirror/class.h"
+#include "mirror/object.h"
+#include "runtime.h"
+#include "ScopedLocalRef.h"
+
+namespace openjdkjvmti {
+
+template <typename T>
+void JvmtiWeakTable<T>::Lock() {
+ allow_disallow_lock_.ExclusiveLock(art::Thread::Current());
+}
+template <typename T>
+void JvmtiWeakTable<T>::Unlock() {
+ allow_disallow_lock_.ExclusiveUnlock(art::Thread::Current());
+}
+template <typename T>
+void JvmtiWeakTable<T>::AssertLocked() {
+ allow_disallow_lock_.AssertHeld(art::Thread::Current());
+}
+
+template <typename T>
+void JvmtiWeakTable<T>::UpdateTableWithReadBarrier() {
+ update_since_last_sweep_ = true;
+
+ auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
+ art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return original_root.Read<art::kWithReadBarrier>();
+ };
+
+ UpdateTableWith<decltype(WithReadBarrierUpdater), kIgnoreNull>(WithReadBarrierUpdater);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, T* result) {
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+ UpdateTableWithReadBarrier();
+ return GetTagLocked(self, obj, result);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::Remove(art::mirror::Object* obj, /* out */ T* tag) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ return RemoveLocked(self, obj, tag);
+}
+template <typename T>
+bool JvmtiWeakTable<T>::RemoveLocked(art::mirror::Object* obj, T* tag) {
+ art::Thread* self = art::Thread::Current();
+ allow_disallow_lock_.AssertHeld(self);
+ Wait(self);
+
+ return RemoveLocked(self, obj, tag);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::RemoveLocked(art::Thread* self, art::mirror::Object* obj, T* tag) {
+ auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+ if (it != tagged_objects_.end()) {
+ if (tag != nullptr) {
+ *tag = it->second;
+ }
+ tagged_objects_.erase(it);
+ return true;
+ }
+
+ if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+
+ // Update the table.
+ UpdateTableWithReadBarrier();
+
+ // And try again.
+ return RemoveLocked(self, obj, tag);
+ }
+
+ // Not in here.
+ return false;
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::Set(art::mirror::Object* obj, T new_tag) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ return SetLocked(self, obj, new_tag);
+}
+template <typename T>
+bool JvmtiWeakTable<T>::SetLocked(art::mirror::Object* obj, T new_tag) {
+ art::Thread* self = art::Thread::Current();
+ allow_disallow_lock_.AssertHeld(self);
+ Wait(self);
+
+ return SetLocked(self, obj, new_tag);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::SetLocked(art::Thread* self, art::mirror::Object* obj, T new_tag) {
+ auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+ if (it != tagged_objects_.end()) {
+ it->second = new_tag;
+ return true;
+ }
+
+ if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+
+ // Update the table.
+ UpdateTableWithReadBarrier();
+
+ // And try again.
+ return SetLocked(self, obj, new_tag);
+ }
+
+ // New element.
+ auto insert_it = tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(obj), new_tag);
+ DCHECK(insert_it.second);
+ return false;
+}
+
+template <typename T>
+void JvmtiWeakTable<T>::Sweep(art::IsMarkedVisitor* visitor) {
+ if (DoesHandleNullOnSweep()) {
+ SweepImpl<true>(visitor);
+ } else {
+ SweepImpl<false>(visitor);
+ }
+
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. We explicitly update the table then
+ // to ensure we compare against to-space pointers. But we want to do this only once. Once
+ // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
+ // so we re-enable the explicit update for the next marking.
+ update_since_last_sweep_ = false;
+}
+
+template <typename T>
+template <bool kHandleNull>
+void JvmtiWeakTable<T>::SweepImpl(art::IsMarkedVisitor* visitor) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+
+ auto IsMarkedUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root ATTRIBUTE_UNUSED,
+ art::mirror::Object* original_obj) {
+ return visitor->IsMarked(original_obj);
+ };
+
+ UpdateTableWith<decltype(IsMarkedUpdater),
+ kHandleNull ? kCallHandleNull : kRemoveNull>(IsMarkedUpdater);
+}
+
+template <typename T>
+template <typename Updater, typename JvmtiWeakTable<T>::TableUpdateNullTarget kTargetNull>
+ALWAYS_INLINE inline void JvmtiWeakTable<T>::UpdateTableWith(Updater& updater) {
+ // We optimistically hope that elements will still be well-distributed when re-inserting them.
+ // So play with the map mechanics, and postpone rehashing. This avoids the need of a side
+ // vector and two passes.
+ float original_max_load_factor = tagged_objects_.max_load_factor();
+ tagged_objects_.max_load_factor(std::numeric_limits<float>::max());
+ // For checking that a max load-factor actually does what we expect.
+ size_t original_bucket_count = tagged_objects_.bucket_count();
+
+ for (auto it = tagged_objects_.begin(); it != tagged_objects_.end();) {
+ DCHECK(!it->first.IsNull());
+ art::mirror::Object* original_obj = it->first.template Read<art::kWithoutReadBarrier>();
+ art::mirror::Object* target_obj = updater(it->first, original_obj);
+ if (original_obj != target_obj) {
+ if (kTargetNull == kIgnoreNull && target_obj == nullptr) {
+ // Ignore null target, don't do anything.
+ } else {
+ T tag = it->second;
+ it = tagged_objects_.erase(it);
+ if (target_obj != nullptr) {
+ tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(target_obj), tag);
+ DCHECK_EQ(original_bucket_count, tagged_objects_.bucket_count());
+ } else if (kTargetNull == kCallHandleNull) {
+ HandleNullSweep(tag);
+ }
+ continue; // Iterator was implicitly updated by erase.
+ }
+ }
+ it++;
+ }
+
+ tagged_objects_.max_load_factor(original_max_load_factor);
+ // TODO: consider rehash here.
+}
+
+template <typename T>
+template <typename Storage, class Allocator>
+struct JvmtiWeakTable<T>::ReleasableContainer {
+ using allocator_type = Allocator;
+
+ explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
+ : allocator(alloc),
+ data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
+ size(0),
+ capacity(reserve) {
+ }
+
+ ~ReleasableContainer() {
+ if (data != nullptr) {
+ allocator.deallocate(data, capacity);
+ capacity = 0;
+ size = 0;
+ }
+ }
+
+ Storage* Release() {
+ Storage* tmp = data;
+
+ data = nullptr;
+ size = 0;
+ capacity = 0;
+
+ return tmp;
+ }
+
+ void Resize(size_t new_capacity) {
+ CHECK_GT(new_capacity, capacity);
+
+ Storage* tmp = allocator.allocate(new_capacity);
+ DCHECK(tmp != nullptr);
+ if (data != nullptr) {
+ memcpy(tmp, data, sizeof(Storage) * size);
+ }
+ Storage* old = data;
+ data = tmp;
+ allocator.deallocate(old, capacity);
+ capacity = new_capacity;
+ }
+
+ void Pushback(const Storage& elem) {
+ if (size == capacity) {
+ size_t new_capacity = 2 * capacity + 1;
+ Resize(new_capacity);
+ }
+ data[size++] = elem;
+ }
+
+ Allocator allocator;
+ Storage* data;
+ size_t size;
+ size_t capacity;
+};
+
+template <typename T>
+jvmtiError JvmtiWeakTable<T>::GetTaggedObjects(jvmtiEnv* jvmti_env,
+ jint tag_count,
+ const T* tags,
+ jint* count_ptr,
+ jobject** object_result_ptr,
+ T** tag_result_ptr) {
+ if (tag_count < 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ if (tag_count > 0) {
+ for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
+ if (tags[i] == 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ }
+ }
+ if (tags == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+ if (count_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ art::JNIEnvExt* jni_env = self->GetJniEnv();
+
+ constexpr size_t kDefaultSize = 10;
+ size_t initial_object_size;
+ size_t initial_tag_size;
+ if (tag_count == 0) {
+ initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
+ initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
+ } else {
+ initial_object_size = initial_tag_size = kDefaultSize;
+ }
+ JvmtiAllocator<void> allocator(jvmti_env);
+ ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator,
+ initial_object_size);
+ ReleasableContainer<T, JvmtiAllocator<T>> selected_tags(allocator, initial_tag_size);
+
+ size_t count = 0;
+ for (auto& pair : tagged_objects_) {
+ bool select;
+ if (tag_count > 0) {
+ select = false;
+ for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
+ if (tags[i] == pair.second) {
+ select = true;
+ break;
+ }
+ }
+ } else {
+ select = true;
+ }
+
+ if (select) {
+ art::mirror::Object* obj = pair.first.template Read<art::kWithReadBarrier>();
+ if (obj != nullptr) {
+ count++;
+ if (object_result_ptr != nullptr) {
+ selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
+ }
+ if (tag_result_ptr != nullptr) {
+ selected_tags.Pushback(pair.second);
+ }
+ }
+ }
+ }
+
+ if (object_result_ptr != nullptr) {
+ *object_result_ptr = selected_objects.Release();
+ }
+ if (tag_result_ptr != nullptr) {
+ *tag_result_ptr = selected_tags.Release();
+ }
+ *count_ptr = static_cast<jint>(count);
+ return ERR(NONE);
+}
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
diff --git a/runtime/openjdkjvmti/jvmti_weak_table.h b/runtime/openjdkjvmti/jvmti_weak_table.h
new file mode 100644
index 0000000000..ae3612208c
--- /dev/null
+++ b/runtime/openjdkjvmti/jvmti_weak_table.h
@@ -0,0 +1,219 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
+#define ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
+
+#include <unordered_map>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc/system_weak.h"
+#include "gc_root-inl.h"
+#include "globals.h"
+#include "jvmti.h"
+#include "mirror/object.h"
+#include "thread-inl.h"
+
+namespace openjdkjvmti {
+
+class EventHandler;
+
+// A system-weak container mapping objects to elements of the template type. This corresponds
+// to a weak hash map. For historical reasons the stored value is called "tag."
+template <typename T>
+class JvmtiWeakTable : public art::gc::SystemWeakHolder {
+ public:
+ JvmtiWeakTable()
+ : art::gc::SystemWeakHolder(kTaggingLockLevel),
+ update_since_last_sweep_(false) {
+ }
+
+ // Remove the mapping for the given object, returning whether such a mapping existed (and the old
+ // value).
+ bool Remove(art::mirror::Object* obj, /* out */ T* tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+ bool RemoveLocked(art::mirror::Object* obj, /* out */ T* tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ // Set the mapping for the given object. Returns true if this overwrites an already existing
+ // mapping.
+ virtual bool Set(art::mirror::Object* obj, T tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+ virtual bool SetLocked(art::mirror::Object* obj, T tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ // Return the value associated with the given object. Returns true if the mapping exists, false
+ // otherwise.
+ bool GetTag(art::mirror::Object* obj, /* out */ T* result)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ return GetTagLocked(self, obj, result);
+ }
+ bool GetTagLocked(art::mirror::Object* obj, /* out */ T* result)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_) {
+ art::Thread* self = art::Thread::Current();
+ allow_disallow_lock_.AssertHeld(self);
+ Wait(self);
+
+ return GetTagLocked(self, obj, result);
+ }
+
+ // Sweep the container. DO NOT CALL MANUALLY.
+ void Sweep(art::IsMarkedVisitor* visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+
+ // Return all objects that have a value mapping in tags.
+ jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
+ jint tag_count,
+ const T* tags,
+ /* out */ jint* count_ptr,
+ /* out */ jobject** object_result_ptr,
+ /* out */ T** tag_result_ptr)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+
+ // Locking functions, to allow coarse-grained locking and amortization.
+ void Lock() ACQUIRE(allow_disallow_lock_);
+ void Unlock() RELEASE(allow_disallow_lock_);
+ void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
+
+ protected:
+ // Should HandleNullSweep be called when Sweep detects the release of an object?
+ virtual bool DoesHandleNullOnSweep() {
+ return false;
+ }
+ // If DoesHandleNullOnSweep returns true, this function will be called.
+ virtual void HandleNullSweep(T tag ATTRIBUTE_UNUSED) {}
+
+ private:
+ bool SetLocked(art::Thread* self, art::mirror::Object* obj, T tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ bool RemoveLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ bool GetTagLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_) {
+ auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+ if (it != tagged_objects_.end()) {
+ *result = it->second;
+ return true;
+ }
+
+ // Performance optimization: To avoid multiple table updates, ensure that during GC we
+ // only update once. See the comment on the implementation of GetTagSlowPath.
+ if (art::kUseReadBarrier &&
+ self != nullptr &&
+ self->GetIsGcMarking() &&
+ !update_since_last_sweep_) {
+ return GetTagSlowPath(self, obj, result);
+ }
+
+ return false;
+ }
+
+ // Slow-path for GetTag. We didn't find the object, but we might be storing from-pointers and
+ // are asked to retrieve with a to-pointer.
+ bool GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ // Update the table by doing read barriers on each element, ensuring that to-space pointers
+ // are stored.
+ void UpdateTableWithReadBarrier()
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ template <bool kHandleNull>
+ void SweepImpl(art::IsMarkedVisitor* visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+
+ enum TableUpdateNullTarget {
+ kIgnoreNull,
+ kRemoveNull,
+ kCallHandleNull
+ };
+
+ template <typename Updater, TableUpdateNullTarget kTargetNull>
+ void UpdateTableWith(Updater& updater)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ template <typename Storage, class Allocator = std::allocator<T>>
+ struct ReleasableContainer;
+
+ struct HashGcRoot {
+ size_t operator()(const art::GcRoot<art::mirror::Object>& r) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return reinterpret_cast<uintptr_t>(r.Read<art::kWithoutReadBarrier>());
+ }
+ };
+
+ struct EqGcRoot {
+ bool operator()(const art::GcRoot<art::mirror::Object>& r1,
+ const art::GcRoot<art::mirror::Object>& r2) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return r1.Read<art::kWithoutReadBarrier>() == r2.Read<art::kWithoutReadBarrier>();
+ }
+ };
+
+ // The tag table is used when visiting roots. So it needs to have a low lock level.
+ static constexpr art::LockLevel kTaggingLockLevel =
+ static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1);
+
+ std::unordered_map<art::GcRoot<art::mirror::Object>,
+ T,
+ HashGcRoot,
+ EqGcRoot> tagged_objects_
+ GUARDED_BY(allow_disallow_lock_)
+ GUARDED_BY(art::Locks::mutator_lock_);
+ // To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
+ bool update_since_last_sweep_;
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
index b27c2a3834..4215588a09 100644
--- a/runtime/openjdkjvmti/object_tagging.cc
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -33,355 +33,34 @@
#include <limits>
-#include "art_jvmti.h"
-#include "base/logging.h"
#include "events-inl.h"
-#include "gc/allocation_listener.h"
-#include "instrumentation.h"
-#include "jni_env_ext-inl.h"
-#include "jvmti_allocator.h"
-#include "mirror/class.h"
-#include "mirror/object.h"
-#include "runtime.h"
-#include "ScopedLocalRef.h"
+#include "jvmti_weak_table-inl.h"
namespace openjdkjvmti {
-void ObjectTagTable::Lock() {
- allow_disallow_lock_.ExclusiveLock(art::Thread::Current());
-}
-void ObjectTagTable::Unlock() {
- allow_disallow_lock_.ExclusiveUnlock(art::Thread::Current());
-}
-void ObjectTagTable::AssertLocked() {
- allow_disallow_lock_.AssertHeld(art::Thread::Current());
-}
-
-void ObjectTagTable::UpdateTableWithReadBarrier() {
- update_since_last_sweep_ = true;
-
- auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
- art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return original_root.Read<art::kWithReadBarrier>();
- };
-
- UpdateTableWith<decltype(WithReadBarrierUpdater), kIgnoreNull>(WithReadBarrierUpdater);
-}
-
-bool ObjectTagTable::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result) {
- // Under concurrent GC, there is a window between moving objects and sweeping of system
- // weaks in which mutators are active. We may receive a to-space object pointer in obj,
- // but still have from-space pointers in the table. Explicitly update the table once.
- // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
- UpdateTableWithReadBarrier();
- return GetTagLocked(self, obj, result);
-}
-
-void ObjectTagTable::Add(art::mirror::Object* obj, jlong tag) {
- // Same as Set(), as we don't have duplicates in an unordered_map.
- Set(obj, tag);
-}
-
-bool ObjectTagTable::Remove(art::mirror::Object* obj, jlong* tag) {
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
-
- return RemoveLocked(self, obj, tag);
-}
-bool ObjectTagTable::RemoveLocked(art::mirror::Object* obj, jlong* tag) {
- art::Thread* self = art::Thread::Current();
- allow_disallow_lock_.AssertHeld(self);
- Wait(self);
-
- return RemoveLocked(self, obj, tag);
-}
-
-bool ObjectTagTable::RemoveLocked(art::Thread* self, art::mirror::Object* obj, jlong* tag) {
- auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
- if (it != tagged_objects_.end()) {
- if (tag != nullptr) {
- *tag = it->second;
- }
- tagged_objects_.erase(it);
- return true;
- }
-
- if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
- // Under concurrent GC, there is a window between moving objects and sweeping of system
- // weaks in which mutators are active. We may receive a to-space object pointer in obj,
- // but still have from-space pointers in the table. Explicitly update the table once.
- // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
-
- // Update the table.
- UpdateTableWithReadBarrier();
-
- // And try again.
- return RemoveLocked(self, obj, tag);
- }
-
- // Not in here.
- return false;
-}
+// Instantiate for jlong = JVMTI tags.
+template class JvmtiWeakTable<jlong>;
bool ObjectTagTable::Set(art::mirror::Object* obj, jlong new_tag) {
if (new_tag == 0) {
jlong tmp;
return Remove(obj, &tmp);
}
-
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
-
- return SetLocked(self, obj, new_tag);
+ return JvmtiWeakTable<jlong>::Set(obj, new_tag);
}
bool ObjectTagTable::SetLocked(art::mirror::Object* obj, jlong new_tag) {
if (new_tag == 0) {
jlong tmp;
return RemoveLocked(obj, &tmp);
}
-
- art::Thread* self = art::Thread::Current();
- allow_disallow_lock_.AssertHeld(self);
- Wait(self);
-
- return SetLocked(self, obj, new_tag);
-}
-
-bool ObjectTagTable::SetLocked(art::Thread* self, art::mirror::Object* obj, jlong new_tag) {
- auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
- if (it != tagged_objects_.end()) {
- it->second = new_tag;
- return true;
- }
-
- if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
- // Under concurrent GC, there is a window between moving objects and sweeping of system
- // weaks in which mutators are active. We may receive a to-space object pointer in obj,
- // but still have from-space pointers in the table. Explicitly update the table once.
- // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
-
- // Update the table.
- UpdateTableWithReadBarrier();
-
- // And try again.
- return SetLocked(self, obj, new_tag);
- }
-
- // New element.
- auto insert_it = tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(obj), new_tag);
- DCHECK(insert_it.second);
- return false;
+ return JvmtiWeakTable<jlong>::SetLocked(obj, new_tag);
}
-void ObjectTagTable::Sweep(art::IsMarkedVisitor* visitor) {
- if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree)) {
- SweepImpl<true>(visitor);
- } else {
- SweepImpl<false>(visitor);
- }
-
- // Under concurrent GC, there is a window between moving objects and sweeping of system
- // weaks in which mutators are active. We may receive a to-space object pointer in obj,
- // but still have from-space pointers in the table. We explicitly update the table then
- // to ensure we compare against to-space pointers. But we want to do this only once. Once
- // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
- // so we re-enable the explicit update for the next marking.
- update_since_last_sweep_ = false;
+bool ObjectTagTable::DoesHandleNullOnSweep() {
+ return event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree);
}
-
-template <bool kHandleNull>
-void ObjectTagTable::SweepImpl(art::IsMarkedVisitor* visitor) {
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
-
- auto IsMarkedUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root ATTRIBUTE_UNUSED,
- art::mirror::Object* original_obj) {
- return visitor->IsMarked(original_obj);
- };
-
- UpdateTableWith<decltype(IsMarkedUpdater),
- kHandleNull ? kCallHandleNull : kRemoveNull>(IsMarkedUpdater);
-}
-
void ObjectTagTable::HandleNullSweep(jlong tag) {
event_handler_->DispatchEvent<ArtJvmtiEvent::kObjectFree>(nullptr, tag);
}
-template <typename T, ObjectTagTable::TableUpdateNullTarget kTargetNull>
-ALWAYS_INLINE inline void ObjectTagTable::UpdateTableWith(T& updater) {
- // We optimistically hope that elements will still be well-distributed when re-inserting them.
- // So play with the map mechanics, and postpone rehashing. This avoids the need of a side
- // vector and two passes.
- float original_max_load_factor = tagged_objects_.max_load_factor();
- tagged_objects_.max_load_factor(std::numeric_limits<float>::max());
- // For checking that a max load-factor actually does what we expect.
- size_t original_bucket_count = tagged_objects_.bucket_count();
-
- for (auto it = tagged_objects_.begin(); it != tagged_objects_.end();) {
- DCHECK(!it->first.IsNull());
- art::mirror::Object* original_obj = it->first.Read<art::kWithoutReadBarrier>();
- art::mirror::Object* target_obj = updater(it->first, original_obj);
- if (original_obj != target_obj) {
- if (kTargetNull == kIgnoreNull && target_obj == nullptr) {
- // Ignore null target, don't do anything.
- } else {
- jlong tag = it->second;
- it = tagged_objects_.erase(it);
- if (target_obj != nullptr) {
- tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(target_obj), tag);
- DCHECK_EQ(original_bucket_count, tagged_objects_.bucket_count());
- } else if (kTargetNull == kCallHandleNull) {
- HandleNullSweep(tag);
- }
- continue; // Iterator was implicitly updated by erase.
- }
- }
- it++;
- }
-
- tagged_objects_.max_load_factor(original_max_load_factor);
- // TODO: consider rehash here.
-}
-
-template <typename T, class Allocator = std::allocator<T>>
-struct ReleasableContainer {
- using allocator_type = Allocator;
-
- explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
- : allocator(alloc),
- data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
- size(0),
- capacity(reserve) {
- }
-
- ~ReleasableContainer() {
- if (data != nullptr) {
- allocator.deallocate(data, capacity);
- capacity = 0;
- size = 0;
- }
- }
-
- T* Release() {
- T* tmp = data;
-
- data = nullptr;
- size = 0;
- capacity = 0;
-
- return tmp;
- }
-
- void Resize(size_t new_capacity) {
- CHECK_GT(new_capacity, capacity);
-
- T* tmp = allocator.allocate(new_capacity);
- DCHECK(tmp != nullptr);
- if (data != nullptr) {
- memcpy(tmp, data, sizeof(T) * size);
- }
- T* old = data;
- data = tmp;
- allocator.deallocate(old, capacity);
- capacity = new_capacity;
- }
-
- void Pushback(const T& elem) {
- if (size == capacity) {
- size_t new_capacity = 2 * capacity + 1;
- Resize(new_capacity);
- }
- data[size++] = elem;
- }
-
- Allocator allocator;
- T* data;
- size_t size;
- size_t capacity;
-};
-
-jvmtiError ObjectTagTable::GetTaggedObjects(jvmtiEnv* jvmti_env,
- jint tag_count,
- const jlong* tags,
- jint* count_ptr,
- jobject** object_result_ptr,
- jlong** tag_result_ptr) {
- if (tag_count < 0) {
- return ERR(ILLEGAL_ARGUMENT);
- }
- if (tag_count > 0) {
- for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
- if (tags[i] == 0) {
- return ERR(ILLEGAL_ARGUMENT);
- }
- }
- }
- if (tags == nullptr) {
- return ERR(NULL_POINTER);
- }
- if (count_ptr == nullptr) {
- return ERR(NULL_POINTER);
- }
-
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
-
- art::JNIEnvExt* jni_env = self->GetJniEnv();
-
- constexpr size_t kDefaultSize = 10;
- size_t initial_object_size;
- size_t initial_tag_size;
- if (tag_count == 0) {
- initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
- initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
- } else {
- initial_object_size = initial_tag_size = kDefaultSize;
- }
- JvmtiAllocator<void> allocator(jvmti_env);
- ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator, initial_object_size);
- ReleasableContainer<jlong, JvmtiAllocator<jlong>> selected_tags(allocator, initial_tag_size);
-
- size_t count = 0;
- for (auto& pair : tagged_objects_) {
- bool select;
- if (tag_count > 0) {
- select = false;
- for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
- if (tags[i] == pair.second) {
- select = true;
- break;
- }
- }
- } else {
- select = true;
- }
-
- if (select) {
- art::mirror::Object* obj = pair.first.Read<art::kWithReadBarrier>();
- if (obj != nullptr) {
- count++;
- if (object_result_ptr != nullptr) {
- selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
- }
- if (tag_result_ptr != nullptr) {
- selected_tags.Pushback(pair.second);
- }
- }
- }
- }
-
- if (object_result_ptr != nullptr) {
- *object_result_ptr = selected_objects.Release();
- }
- if (tag_result_ptr != nullptr) {
- *tag_result_ptr = selected_tags.Release();
- }
- *count_ptr = static_cast<jint>(count);
- return ERR(NONE);
-}
-
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h
index 0296f1ad80..b5a601cac9 100644
--- a/runtime/openjdkjvmti/object_tagging.h
+++ b/runtime/openjdkjvmti/object_tagging.h
@@ -1,17 +1,32 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
*/
#ifndef ART_RUNTIME_OPENJDKJVMTI_OBJECT_TAGGING_H_
@@ -20,62 +35,27 @@
#include <unordered_map>
#include "base/mutex.h"
-#include "gc/system_weak.h"
-#include "gc_root-inl.h"
#include "globals.h"
#include "jvmti.h"
+#include "jvmti_weak_table.h"
#include "mirror/object.h"
-#include "thread-inl.h"
namespace openjdkjvmti {
class EventHandler;
-class ObjectTagTable : public art::gc::SystemWeakHolder {
+class ObjectTagTable FINAL : public JvmtiWeakTable<jlong> {
public:
- explicit ObjectTagTable(EventHandler* event_handler)
- : art::gc::SystemWeakHolder(kTaggingLockLevel),
- update_since_last_sweep_(false),
- event_handler_(event_handler) {
+ explicit ObjectTagTable(EventHandler* event_handler) : event_handler_(event_handler) {
}
- void Add(art::mirror::Object* obj, jlong tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
-
- bool Remove(art::mirror::Object* obj, jlong* tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
- bool RemoveLocked(art::mirror::Object* obj, jlong* tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- bool Set(art::mirror::Object* obj, jlong tag)
+ bool Set(art::mirror::Object* obj, jlong tag) OVERRIDE
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_);
- bool SetLocked(art::mirror::Object* obj, jlong tag)
+ bool SetLocked(art::mirror::Object* obj, jlong tag) OVERRIDE
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
- bool GetTag(art::mirror::Object* obj, jlong* result)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_) {
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
-
- return GetTagLocked(self, obj, result);
- }
- bool GetTagLocked(art::mirror::Object* obj, jlong* result)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_) {
- art::Thread* self = art::Thread::Current();
- allow_disallow_lock_.AssertHeld(self);
- Wait(self);
-
- return GetTagLocked(self, obj, result);
- }
-
jlong GetTagOrZero(art::mirror::Object* obj)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
@@ -91,108 +71,11 @@ class ObjectTagTable : public art::gc::SystemWeakHolder {
return tmp;
}
- void Sweep(art::IsMarkedVisitor* visitor)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
-
- jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
- jint tag_count,
- const jlong* tags,
- jint* count_ptr,
- jobject** object_result_ptr,
- jlong** tag_result_ptr)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
-
- void Lock() ACQUIRE(allow_disallow_lock_);
- void Unlock() RELEASE(allow_disallow_lock_);
- void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
+ protected:
+ bool DoesHandleNullOnSweep() OVERRIDE;
+ void HandleNullSweep(jlong tag) OVERRIDE;
private:
- bool SetLocked(art::Thread* self, art::mirror::Object* obj, jlong tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- bool RemoveLocked(art::Thread* self, art::mirror::Object* obj, jlong* tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- bool GetTagLocked(art::Thread* self, art::mirror::Object* obj, jlong* result)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_) {
- auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
- if (it != tagged_objects_.end()) {
- *result = it->second;
- return true;
- }
-
- if (art::kUseReadBarrier &&
- self != nullptr &&
- self->GetIsGcMarking() &&
- !update_since_last_sweep_) {
- return GetTagSlowPath(self, obj, result);
- }
-
- return false;
- }
-
- // Slow-path for GetTag. We didn't find the object, but we might be storing from-pointers and
- // are asked to retrieve with a to-pointer.
- bool GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- // Update the table by doing read barriers on each element, ensuring that to-space pointers
- // are stored.
- void UpdateTableWithReadBarrier()
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- template <bool kHandleNull>
- void SweepImpl(art::IsMarkedVisitor* visitor)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
- void HandleNullSweep(jlong tag);
-
- enum TableUpdateNullTarget {
- kIgnoreNull,
- kRemoveNull,
- kCallHandleNull
- };
-
- template <typename T, TableUpdateNullTarget kTargetNull>
- void UpdateTableWith(T& updater)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- struct HashGcRoot {
- size_t operator()(const art::GcRoot<art::mirror::Object>& r) const
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return reinterpret_cast<uintptr_t>(r.Read<art::kWithoutReadBarrier>());
- }
- };
-
- struct EqGcRoot {
- bool operator()(const art::GcRoot<art::mirror::Object>& r1,
- const art::GcRoot<art::mirror::Object>& r2) const
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return r1.Read<art::kWithoutReadBarrier>() == r2.Read<art::kWithoutReadBarrier>();
- }
- };
-
- // The tag table is used when visiting roots. So it needs to have a low lock level.
- static constexpr art::LockLevel kTaggingLockLevel =
- static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1);
-
- std::unordered_map<art::GcRoot<art::mirror::Object>,
- jlong,
- HashGcRoot,
- EqGcRoot> tagged_objects_
- GUARDED_BY(allow_disallow_lock_)
- GUARDED_BY(art::Locks::mutator_lock_);
- // To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
- bool update_since_last_sweep_;
-
EventHandler* event_handler_;
};
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index d52f0ea290..c2495e3a6b 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -25,6 +25,7 @@
#include "gc_root-inl.h"
#include "jni_env_ext.h"
#include "jni_internal.h"
+#include "jvmti_weak_table-inl.h"
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -41,6 +42,21 @@ namespace openjdkjvmti {
namespace {
+struct IndexCache {
+ // The number of interface fields implemented by the class. This is a prefix to all assigned
+ // field indices.
+ size_t interface_fields;
+
+ // It would be nice to also cache the following, but it is complicated to wire up into the
+ // generic visit:
+ // The number of fields in interfaces and superclasses. This is the first index assigned to
+ // fields of the class.
+ // size_t superclass_fields;
+};
+using IndexCachingTable = JvmtiWeakTable<IndexCache>;
+
+static IndexCachingTable gIndexCachingTable;
+
// Report the contents of a string, if a callback is set.
jint ReportString(art::ObjPtr<art::mirror::Object> obj,
jvmtiEnv* env,
@@ -402,6 +418,12 @@ class FieldVisitor {
// "non-marker" interfaces (= interfaces with methods).
static size_t CountInterfaceFields(art::ObjPtr<art::mirror::Class> klass)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Do we have a cached value?
+ IndexCache tmp;
+ if (gIndexCachingTable.GetTag(klass.Ptr(), &tmp)) {
+ return tmp.interface_fields;
+ }
+
size_t count = 0;
auto visitor = [&count](art::ObjPtr<art::mirror::Class> inf_klass)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -410,9 +432,12 @@ class FieldVisitor {
count += inf_klass->NumStaticFields();
};
RecursiveInterfaceVisit<decltype(visitor)>::VisitStatic(art::Thread::Current(), klass, visitor);
- return count;
- // TODO: Implement caching.
+ // Store this into the cache.
+ tmp.interface_fields = count;
+ gIndexCachingTable.Set(klass.Ptr(), tmp);
+
+ return count;
}
UserData* user_data_;
@@ -618,6 +643,14 @@ struct HeapFilter {
} // namespace
+void HeapUtil::Register() {
+ art::Runtime::Current()->AddSystemWeakHolder(&gIndexCachingTable);
+}
+
+void HeapUtil::Unregister() {
+ art::Runtime::Current()->RemoveSystemWeakHolder(&gIndexCachingTable);
+}
+
struct IterateThroughHeapData {
IterateThroughHeapData(HeapUtil* _heap_util,
jvmtiEnv* _env,
@@ -1004,7 +1037,6 @@ class FollowReferencesHelper FINAL {
jvmtiHeapReferenceInfo reference_info;
memset(&reference_info, 0, sizeof(reference_info));
- // TODO: Implement spec-compliant numbering.
reference_info.field.index = field_index;
jvmtiHeapReferenceKind kind =
diff --git a/runtime/openjdkjvmti/ti_heap.h b/runtime/openjdkjvmti/ti_heap.h
index 72ee097566..dccecb4aa3 100644
--- a/runtime/openjdkjvmti/ti_heap.h
+++ b/runtime/openjdkjvmti/ti_heap.h
@@ -49,6 +49,9 @@ class HeapUtil {
return tags_;
}
+ static void Register();
+ static void Unregister();
+
private:
ObjectTagTable* tags_;
};
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index c4d20c007e..a173a4abdd 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -325,12 +325,19 @@ jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
std::vector<ArtClassDefinition> def_vector;
def_vector.reserve(class_count);
for (jint i = 0; i < class_count; i++) {
+ jboolean is_modifiable = JNI_FALSE;
+ jvmtiError res = env->IsModifiableClass(definitions[i].klass, &is_modifiable);
+ if (res != OK) {
+ return res;
+ } else if (!is_modifiable) {
+ return ERR(UNMODIFIABLE_CLASS);
+ }
// We make a copy of the class_bytes to pass into the retransformation.
// This makes cleanup easier (since we unambiguously own the bytes) and also is useful since we
// will need to keep the original bytes around unaltered for subsequent RetransformClasses calls
// to get the passed in bytes.
unsigned char* class_bytes_copy = nullptr;
- jvmtiError res = env->Allocate(definitions[i].class_byte_count, &class_bytes_copy);
+ res = env->Allocate(definitions[i].class_byte_count, &class_bytes_copy);
if (res != OK) {
return res;
}
@@ -779,6 +786,8 @@ bool Redefiner::ClassRedefinition::CheckRedefinitionIsValid() {
CheckSameMethods();
}
+class RedefinitionDataIter;
+
// A wrapper that lets us hold onto the arbitrary sized data needed for redefinitions in a
// reasonably sane way. This adds no fields to the normal ObjectArray. By doing this we can avoid
// having to deal with the fact that we need to hold an arbitrary number of references live.
@@ -802,13 +811,15 @@ class RedefinitionDataHolder {
RedefinitionDataHolder(art::StackHandleScope<1>* hs,
art::Runtime* runtime,
art::Thread* self,
- int32_t num_redefinitions) REQUIRES_SHARED(art::Locks::mutator_lock_) :
+ std::vector<Redefiner::ClassRedefinition>* redefinitions)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) :
arr_(
hs->NewHandle(
art::mirror::ObjectArray<art::mirror::Object>::Alloc(
self,
runtime->GetClassLinker()->GetClassRoot(art::ClassLinker::kObjectArrayClass),
- num_redefinitions * kNumSlots))) {}
+ redefinitions->size() * kNumSlots))),
+ redefinitions_(redefinitions) {}
bool IsNull() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
return arr_.IsNull();
@@ -870,8 +881,27 @@ class RedefinitionDataHolder {
return arr_->GetLength() / kNumSlots;
}
+ std::vector<Redefiner::ClassRedefinition>* GetRedefinitions()
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return redefinitions_;
+ }
+
+ bool operator==(const RedefinitionDataHolder& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return arr_.Get() == other.arr_.Get();
+ }
+
+ bool operator!=(const RedefinitionDataHolder& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return !(*this == other);
+ }
+
+ RedefinitionDataIter begin() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ RedefinitionDataIter end() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
private:
mutable art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
+ std::vector<Redefiner::ClassRedefinition>* redefinitions_;
art::mirror::Object* GetSlot(jint klass_index,
DataSlot slot) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -890,8 +920,115 @@ class RedefinitionDataHolder {
DISALLOW_COPY_AND_ASSIGN(RedefinitionDataHolder);
};
-bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
- const RedefinitionDataHolder& holder) {
+class RedefinitionDataIter {
+ public:
+ RedefinitionDataIter(int32_t idx, RedefinitionDataHolder& holder) : idx_(idx), holder_(holder) {}
+
+ RedefinitionDataIter(const RedefinitionDataIter&) = default;
+ RedefinitionDataIter(RedefinitionDataIter&&) = default;
+ RedefinitionDataIter& operator=(const RedefinitionDataIter&) = default;
+ RedefinitionDataIter& operator=(RedefinitionDataIter&&) = default;
+
+ bool operator==(const RedefinitionDataIter& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return idx_ == other.idx_ && holder_ == other.holder_;
+ }
+
+ bool operator!=(const RedefinitionDataIter& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return !(*this == other);
+ }
+
+ RedefinitionDataIter operator++() { // Value after modification.
+ idx_++;
+ return *this;
+ }
+
+ RedefinitionDataIter operator++(int) {
+ RedefinitionDataIter temp = *this;
+ idx_++;
+ return temp;
+ }
+
+ RedefinitionDataIter operator+(ssize_t delta) const {
+ RedefinitionDataIter temp = *this;
+ temp += delta;
+ return temp;
+ }
+
+ RedefinitionDataIter& operator+=(ssize_t delta) {
+ idx_ += delta;
+ return *this;
+ }
+
+ Redefiner::ClassRedefinition& GetRedefinition() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return (*holder_.GetRedefinitions())[idx_];
+ }
+
+ RedefinitionDataHolder& GetHolder() {
+ return holder_;
+ }
+
+ art::mirror::ClassLoader* GetSourceClassLoader() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetSourceClassLoader(idx_);
+ }
+ art::mirror::Object* GetJavaDexFile() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetJavaDexFile(idx_);
+ }
+ art::mirror::LongArray* GetNewDexFileCookie() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetNewDexFileCookie(idx_);
+ }
+ art::mirror::DexCache* GetNewDexCache() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetNewDexCache(idx_);
+ }
+ art::mirror::Class* GetMirrorClass() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetMirrorClass(idx_);
+ }
+ art::mirror::ByteArray* GetOriginalDexFileBytes() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetOriginalDexFileBytes(idx_);
+ }
+ int32_t GetIndex() const {
+ return idx_;
+ }
+
+ void SetSourceClassLoader(art::mirror::ClassLoader* loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetSourceClassLoader(idx_, loader);
+ }
+ void SetJavaDexFile(art::mirror::Object* dexfile) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetJavaDexFile(idx_, dexfile);
+ }
+ void SetNewDexFileCookie(art::mirror::LongArray* cookie)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetNewDexFileCookie(idx_, cookie);
+ }
+ void SetNewDexCache(art::mirror::DexCache* cache) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetNewDexCache(idx_, cache);
+ }
+ void SetMirrorClass(art::mirror::Class* klass) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetMirrorClass(idx_, klass);
+ }
+ void SetOriginalDexFileBytes(art::mirror::ByteArray* bytes)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetOriginalDexFileBytes(idx_, bytes);
+ }
+
+ private:
+ int32_t idx_;
+ RedefinitionDataHolder& holder_;
+};
+
+RedefinitionDataIter RedefinitionDataHolder::begin() {
+ return RedefinitionDataIter(0, *this);
+}
+
+RedefinitionDataIter RedefinitionDataHolder::end() {
+ return RedefinitionDataIter(Length(), *this);
+}
+
+bool Redefiner::ClassRedefinition::CheckVerification(const RedefinitionDataIter& iter) {
DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
art::StackHandleScope<2> hs(driver_->self_);
std::string error;
@@ -899,7 +1036,7 @@ bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
art::verifier::MethodVerifier::FailureKind failure =
art::verifier::MethodVerifier::VerifyClass(driver_->self_,
dex_file_.get(),
- hs.NewHandle(holder.GetNewDexCache(klass_index)),
+ hs.NewHandle(iter.GetNewDexCache()),
hs.NewHandle(GetClassLoader()),
dex_file_->GetClassDef(0), /*class_def*/
nullptr, /*compiler_callbacks*/
@@ -918,21 +1055,20 @@ bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
// dexfile. This is so that even if multiple classes with the same classloader are redefined at
// once they are all added to the classloader.
bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
- int32_t klass_index,
art::Handle<art::mirror::ClassLoader> source_class_loader,
art::Handle<art::mirror::Object> dex_file_obj,
- /*out*/RedefinitionDataHolder* holder) {
+ /*out*/RedefinitionDataIter* cur_data) {
art::StackHandleScope<2> hs(driver_->self_);
art::MutableHandle<art::mirror::LongArray> old_cookie(
hs.NewHandle<art::mirror::LongArray>(nullptr));
bool has_older_cookie = false;
// See if we already have a cookie that a previous redefinition got from the same classloader.
- for (int32_t i = 0; i < klass_index; i++) {
- if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
+ for (auto old_data = cur_data->GetHolder().begin(); old_data != *cur_data; ++old_data) {
+ if (old_data.GetSourceClassLoader() == source_class_loader.Get()) {
// Since every instance of this classloader should have the same cookie associated with it we
// can stop looking here.
has_older_cookie = true;
- old_cookie.Assign(holder->GetNewDexFileCookie(i));
+ old_cookie.Assign(old_data.GetNewDexFileCookie());
break;
}
}
@@ -953,14 +1089,14 @@ bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
}
// Save the cookie.
- holder->SetNewDexFileCookie(klass_index, new_cookie.Get());
+ cur_data->SetNewDexFileCookie(new_cookie.Get());
// If there are other copies of this same classloader we need to make sure that we all have the
// same cookie.
if (has_older_cookie) {
- for (int32_t i = 0; i < klass_index; i++) {
+ for (auto old_data = cur_data->GetHolder().begin(); old_data != *cur_data; ++old_data) {
// We will let the GC take care of the cookie we allocated for this one.
- if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
- holder->SetNewDexFileCookie(i, new_cookie.Get());
+ if (old_data.GetSourceClassLoader() == source_class_loader.Get()) {
+ old_data.SetNewDexFileCookie(new_cookie.Get());
}
}
}
@@ -969,32 +1105,32 @@ bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
}
bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
- int32_t klass_index, /*out*/RedefinitionDataHolder* holder) {
+ /*out*/RedefinitionDataIter* cur_data) {
art::ScopedObjectAccessUnchecked soa(driver_->self_);
art::StackHandleScope<2> hs(driver_->self_);
- holder->SetMirrorClass(klass_index, GetMirrorClass());
+ cur_data->SetMirrorClass(GetMirrorClass());
// This shouldn't allocate
art::Handle<art::mirror::ClassLoader> loader(hs.NewHandle(GetClassLoader()));
// The bootclasspath is handled specially so it doesn't have a j.l.DexFile.
if (!art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
- holder->SetSourceClassLoader(klass_index, loader.Get());
+ cur_data->SetSourceClassLoader(loader.Get());
art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(
ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader)));
- holder->SetJavaDexFile(klass_index, dex_file_obj.Get());
+ cur_data->SetJavaDexFile(dex_file_obj.Get());
if (dex_file_obj == nullptr) {
RecordFailure(ERR(INTERNAL), "Unable to find dex file!");
return false;
}
// Allocate the new dex file cookie.
- if (!AllocateAndRememberNewDexFileCookie(klass_index, loader, dex_file_obj, holder)) {
+ if (!AllocateAndRememberNewDexFileCookie(loader, dex_file_obj, cur_data)) {
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
return false;
}
}
- holder->SetNewDexCache(klass_index, CreateNewDexCache(loader));
- if (holder->GetNewDexCache(klass_index) == nullptr) {
+ cur_data->SetNewDexCache(CreateNewDexCache(loader));
+ if (cur_data->GetNewDexCache() == nullptr) {
driver_->self_->AssertPendingException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate DexCache");
@@ -1002,8 +1138,8 @@ bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
}
// We won't always need to set this field.
- holder->SetOriginalDexFileBytes(klass_index, AllocateOrGetOriginalDexFileBytes());
- if (holder->GetOriginalDexFileBytes(klass_index) == nullptr) {
+ cur_data->SetOriginalDexFileBytes(AllocateOrGetOriginalDexFileBytes());
+ if (cur_data->GetOriginalDexFileBytes() == nullptr) {
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate array for original dex file");
@@ -1048,13 +1184,11 @@ bool Redefiner::EnsureAllClassAllocationsFinished() {
}
bool Redefiner::FinishAllRemainingAllocations(RedefinitionDataHolder& holder) {
- int32_t cnt = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
// Allocate the data this redefinition requires.
- if (!redef.FinishRemainingAllocations(cnt, &holder)) {
+ if (!data.GetRedefinition().FinishRemainingAllocations(&data)) {
return false;
}
- cnt++;
}
return true;
}
@@ -1069,22 +1203,39 @@ void Redefiner::ReleaseAllDexFiles() {
}
}
-bool Redefiner::CheckAllClassesAreVerified(const RedefinitionDataHolder& holder) {
- int32_t cnt = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- if (!redef.CheckVerification(cnt, holder)) {
+bool Redefiner::CheckAllClassesAreVerified(RedefinitionDataHolder& holder) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ if (!data.GetRedefinition().CheckVerification(data)) {
return false;
}
- cnt++;
}
return true;
}
+class ScopedDisableConcurrentAndMovingGc {
+ public:
+ ScopedDisableConcurrentAndMovingGc(art::gc::Heap* heap, art::Thread* self)
+ : heap_(heap), self_(self) {
+ if (heap_->IsGcConcurrentAndMoving()) {
+ heap_->IncrementDisableMovingGC(self_);
+ }
+ }
+
+ ~ScopedDisableConcurrentAndMovingGc() {
+ if (heap_->IsGcConcurrentAndMoving()) {
+ heap_->DecrementDisableMovingGC(self_);
+ }
+ }
+ private:
+ art::gc::Heap* heap_;
+ art::Thread* self_;
+};
+
jvmtiError Redefiner::Run() {
art::StackHandleScope<1> hs(self_);
// Allocate an array to hold onto all java temporary objects associated with this redefinition.
// We will let this be collected after the end of this function.
- RedefinitionDataHolder holder(&hs, runtime_, self_, redefinitions_.size());
+ RedefinitionDataHolder holder(&hs, runtime_, self_, &redefinitions_);
if (holder.IsNull()) {
self_->AssertPendingOOMException();
self_->ClearException();
@@ -1107,57 +1258,43 @@ jvmtiError Redefiner::Run() {
// cleaned up by the GC eventually.
return result_;
}
+
// At this point we can no longer fail without corrupting the runtime state.
- int32_t counter = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- if (holder.GetSourceClassLoader(counter) == nullptr) {
- runtime_->GetClassLinker()->AppendToBootClassPath(self_, redef.GetDexFile());
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ if (data.GetSourceClassLoader() == nullptr) {
+ runtime_->GetClassLinker()->AppendToBootClassPath(self_, data.GetRedefinition().GetDexFile());
}
- counter++;
}
UnregisterAllBreakpoints();
+
// Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
// allocating so no deadlocks.
- art::gc::Heap* heap = runtime_->GetHeap();
- if (heap->IsGcConcurrentAndMoving()) {
- // GC moving objects can cause deadlocks as we are deoptimizing the stack.
- heap->IncrementDisableMovingGC(self_);
- }
+ ScopedDisableConcurrentAndMovingGc sdcamgc(runtime_->GetHeap(), self_);
+
// Do transition to final suspension
// TODO We might want to give this its own suspended state!
// TODO This isn't right. We need to change state without any chance of suspend ideally!
- self_->TransitionFromRunnableToSuspended(art::ThreadState::kNative);
- runtime_->GetThreadList()->SuspendAll(
- "Final installation of redefined Classes!", /*long_suspend*/true);
- counter = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
+ art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend*/true);
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
- if (holder.GetSourceClassLoader(counter) != nullptr) {
- ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(counter),
- holder.GetNewDexFileCookie(counter));
+ ClassRedefinition& redef = data.GetRedefinition();
+ if (data.GetSourceClassLoader() != nullptr) {
+ ClassLoaderHelper::UpdateJavaDexFile(data.GetJavaDexFile(), data.GetNewDexFileCookie());
}
- art::mirror::Class* klass = holder.GetMirrorClass(counter);
+ art::mirror::Class* klass = data.GetMirrorClass();
// TODO Rewrite so we don't do a stack walk for each and every class.
redef.FindAndAllocateObsoleteMethods(klass);
- redef.UpdateClass(klass, holder.GetNewDexCache(counter),
- holder.GetOriginalDexFileBytes(counter));
- counter++;
+ redef.UpdateClass(klass, data.GetNewDexCache(), data.GetOriginalDexFileBytes());
}
// TODO We should check for if any of the redefined methods are intrinsic methods here and, if any
// are, force a full-world deoptimization before finishing redefinition. If we don't do this then
// methods that have been jitted prior to the current redefinition being applied might continue
// to use the old versions of the intrinsics!
// TODO Shrink the obsolete method maps if possible?
- // TODO Put this into a scoped thing.
- runtime_->GetThreadList()->ResumeAll();
- // Get back shared mutator lock as expected for return.
- self_->TransitionFromSuspendedToRunnable();
// TODO Do the dex_file release at a more reasonable place. This works but it muddles who really
// owns the DexFile and when ownership is transferred.
ReleaseAllDexFiles();
- if (heap->IsGcConcurrentAndMoving()) {
- heap->DecrementDisableMovingGC(self_);
- }
return OK;
}
@@ -1259,8 +1396,6 @@ bool Redefiner::ClassRedefinition::EnsureClassAllocationsFinished() {
art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(driver_->self_)));
if (ext == nullptr) {
// No memory. Clear exception (it's not useful) and return error.
- // TODO This doesn't need to be fatal. We could just not support obsolete methods after hitting
- // this case.
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate ClassExt");
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 4e6d05f056..4313a9476e 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -66,6 +66,7 @@
namespace openjdkjvmti {
class RedefinitionDataHolder;
+class RedefinitionDataIter;
// Class that can redefine a single class's methods.
// TODO We should really make this be driven by an outside class so we can do multiple classes at
@@ -143,14 +144,13 @@ class Redefiner {
driver_->RecordFailure(e, class_sig_, err);
}
- bool FinishRemainingAllocations(int32_t klass_index, /*out*/RedefinitionDataHolder* holder)
+ bool FinishRemainingAllocations(/*out*/RedefinitionDataIter* cur_data)
REQUIRES_SHARED(art::Locks::mutator_lock_);
bool AllocateAndRememberNewDexFileCookie(
- int32_t klass_index,
art::Handle<art::mirror::ClassLoader> source_class_loader,
art::Handle<art::mirror::Object> dex_file_obj,
- /*out*/RedefinitionDataHolder* holder)
+ /*out*/RedefinitionDataIter* cur_data)
REQUIRES_SHARED(art::Locks::mutator_lock_);
void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
@@ -161,8 +161,7 @@ class Redefiner {
bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
// Checks that the contained class can be successfully verified.
- bool CheckVerification(int32_t klass_index,
- const RedefinitionDataHolder& holder)
+ bool CheckVerification(const RedefinitionDataIter& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
// Preallocates all needed allocations in klass so that we can pause execution safely.
@@ -241,7 +240,7 @@ class Redefiner {
jvmtiError Run() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
- bool CheckAllClassesAreVerified(const RedefinitionDataHolder& holder)
+ bool CheckAllClassesAreVerified(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
bool EnsureAllClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
@@ -255,6 +254,8 @@ class Redefiner {
}
friend struct CallbackCtx;
+ friend class RedefinitionDataHolder;
+ friend class RedefinitionDataIter;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index 36421b9137..bd52cbb7f9 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -109,6 +109,13 @@ jvmtiError Transformer::RetransformClasses(ArtJvmTiEnv* env,
std::vector<ArtClassDefinition> definitions;
jvmtiError res = OK;
for (jint i = 0; i < class_count; i++) {
+ jboolean is_modifiable = JNI_FALSE;
+ res = env->IsModifiableClass(classes[i], &is_modifiable);
+ if (res != OK) {
+ return res;
+ } else if (!is_modifiable) {
+ return ERR(UNMODIFIABLE_CLASS);
+ }
ArtClassDefinition def;
res = FillInTransformationData(env, classes[i], &def);
if (res != OK) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 42a0ca9373..9fd2c88c3c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1963,9 +1963,7 @@ void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
}
void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
- const std::string& profile_output_filename,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir) {
+ const std::string& profile_output_filename) {
if (jit_.get() == nullptr) {
// We are not JITing. Nothing to do.
return;
@@ -1987,18 +1985,7 @@ void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
return;
}
- jit_->StartProfileSaver(profile_output_filename,
- code_paths,
- foreign_dex_profile_path,
- app_dir);
-}
-
-void Runtime::NotifyDexLoaded(const std::string& dex_location) {
- VLOG(profiler) << "Notify dex loaded: " << dex_location;
- // We know that if the ProfileSaver is started then we can record profile information.
- if (ProfileSaver::IsStarted()) {
- ProfileSaver::NotifyDexUse(dex_location);
- }
+ jit_->StartProfileSaver(profile_output_filename, code_paths);
}
// Transaction support.
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 4a0169db68..d244a9b618 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -454,10 +454,7 @@ class Runtime {
}
void RegisterAppInfo(const std::vector<std::string>& code_paths,
- const std::string& profile_output_filename,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir);
- void NotifyDexLoaded(const std::string& dex_location);
+ const std::string& profile_output_filename);
// Transaction support.
bool IsActiveTransaction() const {
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index a3286ac3d4..5f03741d1f 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -141,6 +141,8 @@ class ScopedObjectAccessUnchecked : public ScopedObjectAccessAlreadyRunnable {
ALWAYS_INLINE explicit ScopedObjectAccessUnchecked(Thread* self)
REQUIRES(!Locks::thread_suspend_count_lock_);
+ ALWAYS_INLINE ~ScopedObjectAccessUnchecked() REQUIRES(!Locks::thread_suspend_count_lock_) {}
+
// Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't
// change into Runnable or acquire a share on the mutator_lock_.
explicit ScopedObjectAccessUnchecked(JavaVM* vm) ALWAYS_INLINE
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 67f0b5715d..d936ce938e 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -571,7 +571,7 @@ class DexRegisterMap {
}
}
- bool IsDexRegisterLive(uint16_t dex_register_number) const {
+ ALWAYS_INLINE bool IsDexRegisterLive(uint16_t dex_register_number) const {
size_t live_bit_mask_offset_in_bits = GetLiveBitMaskOffset() * kBitsPerByte;
return region_.LoadBit(live_bit_mask_offset_in_bits + dex_register_number);
}
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 482e0e39a9..02a1e4d8a5 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -94,9 +94,7 @@ inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mu
if (held_mutex != nullptr &&
held_mutex != Locks::mutator_lock_ &&
held_mutex != cond_var_mutex) {
- std::vector<BaseMutex*>& expected_mutexes = Locks::expected_mutexes_on_weak_ref_access_;
- CHECK(std::find(expected_mutexes.begin(), expected_mutexes.end(), held_mutex) !=
- expected_mutexes.end())
+ CHECK(Locks::IsExpectedOnWeakRefAccess(held_mutex))
<< "Holding unexpected mutex " << held_mutex->GetName()
<< " when accessing weak ref";
}
diff --git a/test/154-gc-loop/src/Main.java b/test/154-gc-loop/src/Main.java
index 2228ca2783..69015b65aa 100644
--- a/test/154-gc-loop/src/Main.java
+++ b/test/154-gc-loop/src/Main.java
@@ -38,7 +38,7 @@ public class Main {
}
} catch (Exception e) {}
System.out.println("Finalize count too large: " +
- ((finalizeCounter >= 12) ? Integer.toString(finalizeCounter) : "false"));
+ ((finalizeCounter >= 15) ? Integer.toString(finalizeCounter) : "false"));
}
private static native void backgroundProcessState();
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index 9f4be6c227..663250369d 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -747,6 +747,69 @@ public class Main {
return 1.0f;
}
+ /// CHECK-START: TestClass2 Main.testStoreStore() load_store_elimination (before)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: TestClass2 Main.testStoreStore() load_store_elimination (after)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ private static TestClass2 testStoreStore() {
+ TestClass2 obj = new TestClass2();
+ obj.i = 41;
+ obj.j = 42;
+ obj.i = 41;
+ obj.j = 43;
+ return obj;
+ }
+
+ /// CHECK-START: int Main.testStoreStoreWithDeoptimize(int[]) load_store_elimination (before)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: Deoptimize
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testStoreStoreWithDeoptimize(int[]) load_store_elimination (after)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+ /// CHECK: Deoptimize
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK-NOT: ArrayGet
+
+ private static int testStoreStoreWithDeoptimize(int[] arr) {
+ TestClass2 obj = new TestClass2();
+ obj.i = 41;
+ obj.j = 42;
+ obj.i = 41;
+ obj.j = 43;
+ arr[0] = 1; // One HDeoptimize here.
+ arr[1] = 1;
+ arr[2] = 1;
+ arr[3] = 1;
+ return arr[0] + arr[1] + arr[2] + arr[3];
+ }
+
/// CHECK-START: double Main.getCircleArea(double, boolean) load_store_elimination (before)
/// CHECK: NewInstance
@@ -785,6 +848,86 @@ public class Main {
return new Circle(Math.PI).getArea();
}
+ /// CHECK-START: int Main.testAllocationEliminationOfArray1() load_store_elimination (before)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray1() load_store_elimination (after)
+ /// CHECK-NOT: NewArray
+ /// CHECK-NOT: ArraySet
+ /// CHECK-NOT: ArrayGet
+ private static int testAllocationEliminationOfArray1() {
+ int[] array = new int[4];
+ array[2] = 4;
+ array[3] = 7;
+ return array[0] + array[1] + array[2] + array[3];
+ }
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray2() load_store_elimination (before)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray2() load_store_elimination (after)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+ private static int testAllocationEliminationOfArray2() {
+ // Cannot eliminate array allocation since array is accessed with non-constant
+ // index.
+ int[] array = new int[4];
+ array[2] = 4;
+ array[3] = 7;
+ int sum = 0;
+ for (int e : array) {
+ sum += e;
+ }
+ return sum;
+ }
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray3(int) load_store_elimination (before)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray3(int) load_store_elimination (after)
+ /// CHECK-NOT: NewArray
+ /// CHECK-NOT: ArraySet
+ /// CHECK-NOT: ArrayGet
+ private static int testAllocationEliminationOfArray3(int i) {
+ int[] array = new int[4];
+ array[i] = 4;
+ return array[i];
+ }
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray4(int) load_store_elimination (before)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray4(int) load_store_elimination (after)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK-NOT: ArrayGet
+ private static int testAllocationEliminationOfArray4(int i) {
+ // Cannot eliminate array allocation due to index aliasing between 1 and i.
+ int[] array = new int[4];
+ array[1] = 2;
+ array[i] = 4;
+ return array[1] + array[i];
+ }
+
static void assertIntEquals(int result, int expected) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -865,6 +1008,15 @@ public class Main {
assertDoubleEquals(darray[0], Math.PI);
assertDoubleEquals(darray[1], Math.PI);
assertDoubleEquals(darray[2], Math.PI);
+
+ assertIntEquals(testAllocationEliminationOfArray1(), 11);
+ assertIntEquals(testAllocationEliminationOfArray2(), 11);
+ assertIntEquals(testAllocationEliminationOfArray3(2), 4);
+ assertIntEquals(testAllocationEliminationOfArray4(2), 6);
+
+ assertIntEquals(testStoreStore().i, 41);
+ assertIntEquals(testStoreStore().j, 43);
+ assertIntEquals(testStoreStoreWithDeoptimize(new int[4]), 4);
}
static boolean sFlag;
diff --git a/test/532-checker-nonnull-arrayset/src/Main.java b/test/532-checker-nonnull-arrayset/src/Main.java
index 2c701bbb94..61c9e88e9e 100644
--- a/test/532-checker-nonnull-arrayset/src/Main.java
+++ b/test/532-checker-nonnull-arrayset/src/Main.java
@@ -30,10 +30,14 @@ public class Main {
/// CHECK: ReturnVoid
public static void test() {
Object[] array = new Object[2];
+ // Storing to static to avoid some lse optimization.
+ sArray = array;
Object nonNull = array[0];
nonNull.getClass(); // Ensure nonNull has an implicit null check.
array[1] = nonNull;
}
public static void main(String[] args) {}
+
+ static Object[] sArray;
}
diff --git a/test/577-profile-foreign-dex/info.txt b/test/577-profile-foreign-dex/info.txt
deleted file mode 100644
index 090db3fdc6..0000000000
--- a/test/577-profile-foreign-dex/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Check that we record the use of foreign dex files when profiles are enabled.
diff --git a/test/577-profile-foreign-dex/src-ex/OtherDex.java b/test/577-profile-foreign-dex/src-ex/OtherDex.java
deleted file mode 100644
index cba73b3094..0000000000
--- a/test/577-profile-foreign-dex/src-ex/OtherDex.java
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-public class OtherDex {
-}
diff --git a/test/577-profile-foreign-dex/src/Main.java b/test/577-profile-foreign-dex/src/Main.java
deleted file mode 100644
index ed7a625e75..0000000000
--- a/test/577-profile-foreign-dex/src/Main.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Constructor;
-import java.util.HashMap;
-
-public class Main {
-
- private static final String PROFILE_NAME = "primary.prof";
- private static final String APP_DIR_PREFIX = "app_dir_";
- private static final String FOREIGN_DEX_PROFILE_DIR = "foreign-dex";
- private static final String TEMP_FILE_NAME_PREFIX = "dummy";
- private static final String TEMP_FILE_NAME_SUFFIX = "-file";
-
- public static void main(String[] args) throws Exception {
- File tmpFile = null;
- File appDir = null;
- File profileFile = null;
- File foreignDexProfileDir = null;
-
- try {
- // Create the necessary files layout.
- tmpFile = createTempFile();
- appDir = new File(tmpFile.getParent(), APP_DIR_PREFIX + tmpFile.getName());
- appDir.mkdir();
- foreignDexProfileDir = new File(tmpFile.getParent(), FOREIGN_DEX_PROFILE_DIR);
- foreignDexProfileDir.mkdir();
- profileFile = createTempFile();
-
- String codePath = System.getenv("DEX_LOCATION") + "/577-profile-foreign-dex.jar";
-
- // Register the app with the runtime
- VMRuntime.registerAppInfo(profileFile.getPath(), appDir.getPath(),
- new String[] { codePath }, foreignDexProfileDir.getPath());
-
- testMarkerForForeignDex(foreignDexProfileDir);
- testMarkerForCodePath(foreignDexProfileDir);
- testMarkerForApplicationDexFile(foreignDexProfileDir, appDir);
- } finally {
- if (tmpFile != null) {
- tmpFile.delete();
- }
- if (profileFile != null) {
- profileFile.delete();
- }
- if (foreignDexProfileDir != null) {
- foreignDexProfileDir.delete();
- }
- if (appDir != null) {
- appDir.delete();
- }
- }
- }
-
- // Verify we actually create a marker on disk for foreign dex files.
- private static void testMarkerForForeignDex(File foreignDexProfileDir) throws Exception {
- String foreignDex = System.getenv("DEX_LOCATION") + "/577-profile-foreign-dex-ex.jar";
- loadDexFile(foreignDex);
- checkMarker(foreignDexProfileDir, foreignDex, /* exists */ true);
- }
-
- // Verify we do not create a marker on disk for dex files path of the code path.
- private static void testMarkerForCodePath(File foreignDexProfileDir) throws Exception {
- String codePath = System.getenv("DEX_LOCATION") + "/577-profile-foreign-dex.jar";
- loadDexFile(codePath);
- checkMarker(foreignDexProfileDir, codePath, /* exists */ false);
- }
-
- private static void testMarkerForApplicationDexFile(File foreignDexProfileDir, File appDir)
- throws Exception {
- // Copy the -ex jar to the application directory and load it from there.
- // This will record duplicate class conflicts but we don't care for this use case.
- File foreignDex = new File(System.getenv("DEX_LOCATION") + "/577-profile-foreign-dex-ex.jar");
- File appDex = new File(appDir, "appDex.jar");
- try {
- copyFile(foreignDex, appDex);
-
- loadDexFile(appDex.getAbsolutePath());
- checkMarker(foreignDexProfileDir, appDex.getAbsolutePath(), /* exists */ false);
- } finally {
- if (appDex != null) {
- appDex.delete();
- }
- }
- }
-
- private static void checkMarker(File foreignDexProfileDir, String dexFile, boolean exists) {
- File marker = new File(foreignDexProfileDir, dexFile.replace('/', '@'));
- boolean result_ok = exists ? marker.exists() : !marker.exists();
- if (!result_ok) {
- throw new RuntimeException("Marker test failed for:" + marker.getPath());
- }
- }
-
- private static void loadDexFile(String dexFile) throws Exception {
- Class<?> pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
- if (pathClassLoader == null) {
- throw new RuntimeException("Couldn't find path class loader class");
- }
- Constructor<?> constructor =
- pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class);
- constructor.newInstance(
- dexFile, ClassLoader.getSystemClassLoader());
- }
-
- private static class VMRuntime {
- private static final Method registerAppInfoMethod;
- static {
- try {
- Class<?> c = Class.forName("dalvik.system.VMRuntime");
- registerAppInfoMethod = c.getDeclaredMethod("registerAppInfo",
- String.class, String.class, String[].class, String.class);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- public static void registerAppInfo(String pkgName, String appDir,
- String[] codePath, String foreignDexProfileDir) throws Exception {
- registerAppInfoMethod.invoke(null, pkgName, appDir, codePath, foreignDexProfileDir);
- }
- }
-
- private static void copyFile(File fromFile, File toFile) throws Exception {
- FileInputStream in = new FileInputStream(fromFile);
- FileOutputStream out = new FileOutputStream(toFile);
- try {
- byte[] buffer = new byte[4096];
- int bytesRead;
- while ((bytesRead = in.read(buffer)) >= 0) {
- out.write(buffer, 0, bytesRead);
- }
- } finally {
- out.flush();
- try {
- out.getFD().sync();
- } catch (IOException e) {
- }
- out.close();
- in.close();
- }
- }
-
- private static File createTempFile() throws Exception {
- try {
- return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
- } catch (IOException e) {
- System.setProperty("java.io.tmpdir", "/data/local/tmp");
- try {
- return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
- } catch (IOException e2) {
- System.setProperty("java.io.tmpdir", "/sdcard");
- return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
- }
- }
- }
-}
diff --git a/test/595-profile-saving/src/Main.java b/test/595-profile-saving/src/Main.java
index 039503f7a4..faf94c4fcc 100644
--- a/test/595-profile-saving/src/Main.java
+++ b/test/595-profile-saving/src/Main.java
@@ -29,9 +29,7 @@ public class Main {
// String codePath = getDexBaseLocation();
String codePath = System.getenv("DEX_LOCATION") + "/595-profile-saving.jar";
VMRuntime.registerAppInfo(file.getPath(),
- System.getenv("DEX_LOCATION"),
- new String[] {codePath},
- /* foreignProfileDir */ null);
+ new String[] {codePath});
int methodIdx = $opt$noinline$testProfile();
ensureProfileProcessing();
@@ -85,15 +83,15 @@ public class Main {
try {
Class<? extends Object> c = Class.forName("dalvik.system.VMRuntime");
registerAppInfoMethod = c.getDeclaredMethod("registerAppInfo",
- String.class, String.class, String[].class, String.class);
+ String.class, String[].class);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
- public static void registerAppInfo(String profile, String appDir,
- String[] codePaths, String foreignDir) throws Exception {
- registerAppInfoMethod.invoke(null, profile, appDir, codePaths, foreignDir);
+ public static void registerAppInfo(String profile, String[] codePaths)
+ throws Exception {
+ registerAppInfoMethod.invoke(null, profile, codePaths);
}
}
}
diff --git a/test/618-checker-induction/src/Main.java b/test/618-checker-induction/src/Main.java
index ad3ff448d0..2d9daf1d43 100644
--- a/test/618-checker-induction/src/Main.java
+++ b/test/618-checker-induction/src/Main.java
@@ -21,6 +21,8 @@ public class Main {
static int[] a = new int[10];
+ static int[] novec = new int[20]; // to prevent vectorization
+
/// CHECK-START: void Main.deadSingleLoop() loop_optimization (before)
/// CHECK-DAG: Phi loop:{{B\d+}} outer_loop:none
//
@@ -132,16 +134,18 @@ public class Main {
/// CHECK-START: void Main.deadInduction() loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.deadInduction() loop_optimization (after)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-NOT: Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
static void deadInduction() {
int dead = 0;
for (int i = 0; i < a.length; i++) {
- a[i] = 1;
+ a[i] = novec[2 * i] + 1;
dead += 5;
}
}
@@ -151,17 +155,19 @@ public class Main {
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.deadManyInduction() loop_optimization (after)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-NOT: Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
static void deadManyInduction() {
int dead1 = 0, dead2 = 1, dead3 = 3;
for (int i = 0; i < a.length; i++) {
dead1 += 5;
- a[i] = 2;
+ a[i] = novec[2 * i] + 2;
dead2 += 10;
dead3 += 100;
}
@@ -170,16 +176,18 @@ public class Main {
/// CHECK-START: void Main.deadSequence() loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.deadSequence() loop_optimization (after)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-NOT: Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
static void deadSequence() {
int dead = 0;
for (int i = 0; i < a.length; i++) {
- a[i] = 3;
+ a[i] = novec[2 * i] + 3;
// Increment value defined inside loop,
// but sequence itself not used anywhere.
dead += i;
@@ -191,17 +199,19 @@ public class Main {
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-NOT: BoundsCheck
//
/// CHECK-START: void Main.deadCycleWithException(int) loop_optimization (after)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-NOT: Phi loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-NOT: ArrayGet loop:<<Loop>> outer_loop:none
static void deadCycleWithException(int k) {
int dead = 0;
for (int i = 0; i < a.length; i++) {
- a[i] = 4;
+ a[i] = novec[2 * i] + 4;
// Increment value of dead cycle may throw exception. Dynamic
// BCE takes care of the bounds check though, which enables
// removing the ArrayGet after removing the dead cycle.
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 7509d9b4f3..eee90ab285 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -213,6 +213,8 @@ public class Main {
/// CHECK-START: long Main.geoLongDivLastValue(long) instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<Long:j\d+>> LongConstant 0 loop:none
/// CHECK-DAG: Return [<<Long>>] loop:none
+ //
+ // Tests overflow in the divisor (while updating intermediate result).
static long geoLongDivLastValue(long x) {
for (int i = 0; i < 10; i++) {
x /= 1081788608;
@@ -220,6 +222,26 @@ public class Main {
return x;
}
+ /// CHECK-START: long Main.geoLongDivLastValue() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: long Main.geoLongDivLastValue() loop_optimization (after)
+ /// CHECK-NOT: Phi
+ //
+ /// CHECK-START: long Main.geoLongDivLastValue() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Long:j\d+>> LongConstant 0 loop:none
+ /// CHECK-DAG: Return [<<Long>>] loop:none
+ //
+ // Tests overflow in the divisor (while updating base).
+ static long geoLongDivLastValue() {
+ long x = -1;
+ for (int i2 = 0; i2 < 2; i2++) {
+ x /= (Long.MAX_VALUE);
+ }
+ return x;
+ }
+
/// CHECK-START: long Main.geoLongMulLastValue(long) loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
@@ -239,6 +261,15 @@ public class Main {
return x;
}
+ // If vectorized, the narrowing subscript should not cause
+ // type inconsistencies in the synthesized code.
+ static void narrowingSubscript(float[] a) {
+ float val = 2.0f;
+ for (long i = 0; i < a.length; i++) {
+ a[(int) i] += val;
+ }
+ }
+
public static void main(String[] args) {
expectEquals(10, earlyExitFirst(-1));
for (int i = 0; i <= 10; i++) {
@@ -286,6 +317,8 @@ public class Main {
expectEquals(0L, geoLongDivLastValue(9223372036854775807L));
expectEquals(0L, geoLongDivLastValue(-9223372036854775808L));
+ expectEquals(0L, geoLongDivLastValue());
+
expectEquals( 0L, geoLongMulLastValue(0L));
expectEquals(-8070450532247928832L, geoLongMulLastValue(1L));
expectEquals( 2305843009213693952L, geoLongMulLastValue(2L));
@@ -296,6 +329,12 @@ public class Main {
expectEquals( 8070450532247928832L, geoLongMulLastValue(9223372036854775807L));
expectEquals( 0L, geoLongMulLastValue(-9223372036854775808L));
+ float[] a = new float[16];
+ narrowingSubscript(a);
+ for (int i = 0; i < 16; i++) {
+ expectEquals(2.0f, a[i]);
+ }
+
System.out.println("passed");
}
@@ -310,4 +349,10 @@ public class Main {
throw new Error("Expected: " + expected + ", found: " + result);
}
}
+
+ private static void expectEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
}
diff --git a/test/641-checker-arraycopy/build b/test/641-checker-arraycopy/build
new file mode 100644
index 0000000000..9abc61897a
--- /dev/null
+++ b/test/641-checker-arraycopy/build
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# Don't use jack for this test, to ensure we don't use
+# the typed System.arraycopy versions directly.
+export USE_JACK=false
+
+./default-build
diff --git a/test/577-profile-foreign-dex/expected.txt b/test/641-checker-arraycopy/expected.txt
index e69de29bb2..e69de29bb2 100644
--- a/test/577-profile-foreign-dex/expected.txt
+++ b/test/641-checker-arraycopy/expected.txt
diff --git a/test/641-checker-arraycopy/info.txt b/test/641-checker-arraycopy/info.txt
new file mode 100644
index 0000000000..1a1111e145
--- /dev/null
+++ b/test/641-checker-arraycopy/info.txt
@@ -0,0 +1,2 @@
+Checker test for testing the arraycopy optimization in
+instruction simplifier.
diff --git a/test/641-checker-arraycopy/src/Main.java b/test/641-checker-arraycopy/src/Main.java
new file mode 100644
index 0000000000..f0fcf28d23
--- /dev/null
+++ b/test/641-checker-arraycopy/src/Main.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ // Note that this is testing we haven't intrinsified the byte[] arraycopy version.
+ // Once we eventually start doing it, we will need to re-adjust this test.
+
+ /// CHECK-START-X86: void Main.typedCopy(java.lang.Object, byte[]) disassembly (after)
+ /// CHECK: InvokeStaticOrDirect method_name:java.lang.System.arraycopy intrinsic:SystemArrayCopy
+ /// CHECK-NOT: call
+ /// CHECK: InvokeStaticOrDirect method_name:java.lang.System.arraycopy intrinsic:SystemArrayCopy
+ /// CHECK: call
+ /// CHECK: ReturnVoid
+ public static void typedCopy(Object o, byte[] foo) {
+ System.arraycopy(o, 1, o, 0, 1);
+ System.arraycopy(foo, 1, foo, 0, 1);
+ }
+
+ public static void untypedCopy(Object o, Object foo) {
+ System.arraycopy(o, 1, o, 0, 1);
+ System.arraycopy(foo, 1, foo, 0, 1);
+ }
+
+ // Test that we still do the optimization after inlining.
+
+ /// CHECK-START-X86: void Main.untypedCopyCaller(java.lang.Object, byte[]) disassembly (after)
+ /// CHECK: InvokeStaticOrDirect method_name:java.lang.System.arraycopy intrinsic:SystemArrayCopy
+ /// CHECK-NOT: call
+ /// CHECK: InvokeStaticOrDirect method_name:java.lang.System.arraycopy intrinsic:SystemArrayCopy
+ /// CHECK: call
+ /// CHECK: ReturnVoid
+ public static void untypedCopyCaller(Object o, byte[] array) {
+ untypedCopy(o, array);
+ }
+
+ public static void assertEquals(Object one, Object two) {
+ if (one != two) {
+ throw new Error("Expected " + one + ", got " + two);
+ }
+ }
+
+ public static void main(String[] args) {
+ // Simple sanity checks.
+ byte[] a = new byte[2];
+ Object[] o = new Object[2];
+
+ o[0] = a;
+ o[1] = o;
+ a[0] = 1;
+ a[1] = 2;
+
+ untypedCopyCaller(o, a);
+ assertEquals(o[0], o);
+ assertEquals(o[1], o);
+ assertEquals(a[0], (byte)2);
+ assertEquals(a[1], (byte)2);
+
+ o[0] = a;
+ o[1] = o;
+ a[0] = 1;
+ a[1] = 2;
+
+ typedCopy(o, a);
+ assertEquals(o[0], o);
+ assertEquals(o[1], o);
+ assertEquals(a[0], (byte)2);
+ assertEquals(a[1], (byte)2);
+ }
+}
diff --git a/test/641-iterations/expected.txt b/test/641-iterations/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/641-iterations/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/641-iterations/info.txt b/test/641-iterations/info.txt
new file mode 100644
index 0000000000..fd8059561c
--- /dev/null
+++ b/test/641-iterations/info.txt
@@ -0,0 +1 @@
+Tests on varying trip counts (to validate vector/cleanup loops).
diff --git a/test/641-iterations/src/Main.java b/test/641-iterations/src/Main.java
new file mode 100644
index 0000000000..6a27f80286
--- /dev/null
+++ b/test/641-iterations/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests of varying trip counts. Focused on testing
+ * core and cleanup loop after vectorization.
+ */
+public class Main {
+
+ static int[] sA;
+
+ static void init() {
+ for (int i = 0; i < sA.length; i++)
+ sA[i] = 100;
+ }
+
+ static void doitTo(int n) {
+ for (int i = 0; i < n; i++)
+ sA[i] += 1;
+ }
+
+ static void doitFrom(int n) {
+ for (int i = n; i < sA.length; i++)
+ sA[i] += 1;
+ }
+
+ static void verify(int n) {
+ for (int i = 0; i < n; i++)
+ if (sA[i] != 101)
+ throw new Error("failed inside loop");
+ for (int i = n; i < sA.length; i++)
+ if (sA[i] != 100)
+ throw new Error("failed outside loop");
+ }
+
+ static void verify() {
+ for (int i = 0; i < sA.length; i++)
+ if (sA[i] != 101)
+ throw new Error("failed inside loop");
+ }
+
+ static void driver() {
+ for (int n = 0; n <= sA.length; n++) {
+ init();
+ doitTo(n);
+ verify(n);
+ doitFrom(n);
+ verify();
+ }
+ }
+
+ public static void main(String[] args) {
+ sA = new int[17];
+ driver();
+ sA = new int[32];
+ driver();
+ System.out.println("passed");
+ }
+}
+
diff --git a/test/921-hello-failure/expected.txt b/test/921-hello-failure/expected.txt
index a5dc10d59c..fdbfbe2191 100644
--- a/test/921-hello-failure/expected.txt
+++ b/test/921-hello-failure/expected.txt
@@ -50,3 +50,6 @@ hello there - MissingField
hello there again - FieldChange
Transformation error : java.lang.Exception(Failed to redefine class <LTransform4;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
hello there again - FieldChange
+hello - Unmodifiable
+Transformation error : java.lang.Exception(Failed to redefine class <[LTransform;> due to JVMTI_ERROR_UNMODIFIABLE_CLASS)
+hello - Unmodifiable
diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java
index 5bbe2b5479..6779ed862a 100644
--- a/test/921-hello-failure/src/Main.java
+++ b/test/921-hello-failure/src/Main.java
@@ -32,6 +32,7 @@ public class Main {
NewField.doTest(new Transform());
MissingField.doTest(new Transform4("there"));
FieldChange.doTest(new Transform4("there again"));
+ Unmodifiable.doTest(new Transform[] { new Transform(), });
}
// Transforms the class. This throws an exception if something goes wrong.
diff --git a/test/921-hello-failure/src/Unmodifiable.java b/test/921-hello-failure/src/Unmodifiable.java
new file mode 100644
index 0000000000..ad05f51f9f
--- /dev/null
+++ b/test/921-hello-failure/src/Unmodifiable.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class Unmodifiable {
+ // The following is a base64 encoding of a valid class file.
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+ "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" +
+ "aWxlAQAOVHJhbnNmb3JtLmphdmEMAAcACAEAD2phdmEvbGFuZy9FcnJvcgEAFVNob3VsZCBub3Qg" +
+ "YmUgY2FsbGVkIQwABwAMAQAJVHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAAgAAUABgAAAAAA" +
+ "AgAAAAcACAABAAkAAAAdAAEAAQAAAAUqtwABsQAAAAEACgAAAAYAAQAAAAIAAAALAAwAAQAJAAAA" +
+ "IgADAAIAAAAKuwACWRIDtwAEvwAAAAEACgAAAAYAAQAAAAQAAQANAAAAAgAO");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCrV81cy4Q+YKMMMqc0bZEO5Y1X5u7irPeQAgAAcAAAAHhWNBIAAAAAAAAAAPwBAAAL" +
+ "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACIAQAACAEAAEoB" +
+ "AABSAQAAXwEAAHIBAACGAQAAmgEAALEBAADBAQAAxAEAAMgBAADcAQAAAQAAAAIAAAADAAAABAAA" +
+ "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+ "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAO4BAAAAAAAAAQABAAEAAADjAQAABAAAAHAQAwAA" +
+ "AA4ABAACAAIAAADoAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgALTFRy" +
+ "YW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xh" +
+ "bmcvU3RyaW5nOwAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhAA5UcmFuc2Zvcm0uamF2YQABVgACVkwA" +
+ "EmVtaXR0ZXI6IGphY2stNC4yNAAFc2F5SGkAAgAHDgAEAQAHDgAAAAEBAICABIgCAQCgAgwAAAAA" +
+ "AAAAAQAAAAAAAAABAAAACwAAAHAAAAACAAAABQAAAJwAAAADAAAAAgAAALAAAAAFAAAABAAAAMgA" +
+ "AAAGAAAAAQAAAOgAAAABIAAAAgAAAAgBAAABEAAAAQAAAEQBAAACIAAACwAAAEoBAAADIAAAAgAA" +
+ "AOMBAAAAIAAAAQAAAO4BAAAAEAAAAQAAAPwBAAA=");
+
+ public static void doTest(Transform[] ts) {
+ ts[0].sayHi("Unmodifiable");
+ try {
+ Main.doCommonClassRedefinition(Transform[].class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ ts[0].sayHi("Unmodifiable");
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index bfb04a4fba..01eb14eda2 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -222,6 +222,7 @@ define name-to-var
$(shell echo $(1) | tr '[:lower:]' '[:upper:]' | tr '-' '_')
endef # name-to-var
+# Disable 115-native-bridge, it fails when run through make b/35984597.
# Disable 153-reference-stress temporarily until a fix arrives. b/33389022.
# Disable 080-oom-fragmentation due to flakes. b/33795328
# Disable 497-inlining-and-class-loader and 542-unresolved-access-check until
@@ -229,6 +230,7 @@ endef # name-to-var
# register a dex file that's already registered with a different loader.
# b/34193123
ART_TEST_RUN_TEST_SKIP += \
+ 115-native-bridge \
153-reference-stress \
080-oom-fragmentation \
497-inlining-and-class-loader \
@@ -240,10 +242,8 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),
# Disable 149-suspend-all-stress, its output is flaky (b/28988206).
-# Disable 577-profile-foreign-dex (b/27454772).
TEST_ART_BROKEN_ALL_TARGET_TESTS := \
149-suspend-all-stress \
- 577-profile-foreign-dex \
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
@@ -527,12 +527,14 @@ TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
# Known broken tests for the JIT.
# CFI unwinding expects managed frames, and the test does not iterate enough to even compile. JIT
# also uses Generic JNI instead of the JNI compiler.
+# 154-gc-loop requires more deterministic GC behavior than what JIT does.
# Test 906 iterates the heap filtering with different options. No instances should be created
# between those runs to be able to have precise checks.
# Test 629 requires compilation.
# 912: b/34655682
TEST_ART_BROKEN_JIT_RUN_TESTS := \
137-cfi \
+ 154-gc-loop \
629-vdex-speed \
904-object-allocation \
906-iterate-heap \
@@ -622,16 +624,18 @@ TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS :=
TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
# Tests failing in non-Baker read barrier configurations with the Optimizing compiler (AOT).
-# 537: Expects an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet
+# 537 and 641: Expect an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet
# handled in non-Baker read barrier configurations.
TEST_ART_BROKEN_OPTIMIZING_NON_BAKER_READ_BARRIER_RUN_TESTS := \
- 537-checker-arraycopy
+ 537-checker-arraycopy \
+ 641-checker-arraycopy
# Tests failing in non-Baker read barrier configurations with JIT (Optimizing compiler).
-# 537: Expects an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet
+# 537 and 641: Expect an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet
# handled in non-Baker read barrier configurations.
TEST_ART_BROKEN_JIT_NON_BAKER_READ_BARRIER_RUN_TESTS := \
- 537-checker-arraycopy
+ 537-checker-arraycopy \
+ 641-checker-arraycopy
ifeq ($(ART_USE_READ_BARRIER),true)
ifneq (,$(filter interpreter,$(COMPILER_TYPES)))
diff --git a/test/ProfileTestMultiDex/Main.java b/test/ProfileTestMultiDex/Main.java
index 41532ea8f7..a8ced544c9 100644
--- a/test/ProfileTestMultiDex/Main.java
+++ b/test/ProfileTestMultiDex/Main.java
@@ -25,3 +25,45 @@ class Main {
return "C";
}
}
+
+class TestInline {
+ public int inlineMonomorphic(Super s) {
+ return s.getValue();
+ }
+
+ public int inlinePolymorphic(Super s) {
+ return s.getValue();
+ }
+
+ public int inlineMegamorphic(Super s) {
+ return s.getValue();
+ }
+
+ public int inlineMissingTypes(Super s) {
+ return s.getValue();
+ }
+
+ public int noInlineCache(Super s) {
+ return s.getValue();
+ }
+}
+
+abstract class Super {
+ abstract int getValue();
+}
+
+class SubA extends Super {
+ int getValue() { return 42; }
+}
+
+class SubB extends Super {
+ int getValue() { return 38; };
+}
+
+class SubD extends Super {
+ int getValue() { return 20; };
+}
+
+class SubE extends Super {
+ int getValue() { return 16; };
+}
diff --git a/test/ProfileTestMultiDex/Second.java b/test/ProfileTestMultiDex/Second.java
index 4ac5abc300..4b3c7a479b 100644
--- a/test/ProfileTestMultiDex/Second.java
+++ b/test/ProfileTestMultiDex/Second.java
@@ -25,3 +25,8 @@ class Second {
return "Z";
}
}
+
+class SubC extends Super {
+ int getValue() { return 24; }
+}
+
diff --git a/test/ProfileTestMultiDex/main.jpp b/test/ProfileTestMultiDex/main.jpp
index f2e3b4e14c..5e55e96874 100644
--- a/test/ProfileTestMultiDex/main.jpp
+++ b/test/ProfileTestMultiDex/main.jpp
@@ -1,3 +1,21 @@
-main:
+Main:
@@com.android.jack.annotations.ForceInMainDex
- class Second
+ class Main
+TestInqline:
+ @@com.android.jack.annotations.ForceInMainDex
+ class TestInline
+Super:
+ @@com.android.jack.annotations.ForceInMainDex
+ class Super
+SubA:
+ @@com.android.jack.annotations.ForceInMainDex
+ class SubA
+SubB:
+ @@com.android.jack.annotations.ForceInMainDex
+ class SubB
+SubD:
+ @@com.android.jack.annotations.ForceInMainDex
+ class SubD
+SubE:
+ @@com.android.jack.annotations.ForceInMainDex
+ class SubE
diff --git a/test/ProfileTestMultiDex/main.list b/test/ProfileTestMultiDex/main.list
index 44ba78ead5..ec131f0f71 100644
--- a/test/ProfileTestMultiDex/main.list
+++ b/test/ProfileTestMultiDex/main.list
@@ -1 +1,7 @@
Main.class
+TestInline.class
+Super.class
+SubA.class
+SubB.class
+SubD.class
+SubE.class
diff --git a/test/577-profile-foreign-dex/run b/test/VerifierDeps/MySub1SoftVerificationFailure.smali
index ad57d14c60..8123394e87 100644
--- a/test/577-profile-foreign-dex/run
+++ b/test/VerifierDeps/MySub1SoftVerificationFailure.smali
@@ -1,6 +1,4 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
+# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-exec ${RUN} \
- --runtime-option -Xjitsaveprofilinginfo \
- --runtime-option -Xusejit:true \
- "${@}"
+.class public LMySub1SoftVerificationFailure;
+.super LMySoftVerificationFailure;
diff --git a/test/VerifierDeps/MySub2SoftVerificationFailure.smali b/test/VerifierDeps/MySub2SoftVerificationFailure.smali
new file mode 100644
index 0000000000..8d003236c8
--- /dev/null
+++ b/test/VerifierDeps/MySub2SoftVerificationFailure.smali
@@ -0,0 +1,16 @@
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMySub2SoftVerificationFailure;
+.super LMySoftVerificationFailure;
diff --git a/test/VerifierDepsMulti/MySoftVerificationFailure.smali b/test/VerifierDepsMulti/MySoftVerificationFailure.smali
new file mode 100644
index 0000000000..6b56a3b6dc
--- /dev/null
+++ b/test/VerifierDepsMulti/MySoftVerificationFailure.smali
@@ -0,0 +1,24 @@
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMySoftVerificationFailure;
+.super Ljava/lang/Object;
+
+.method public final foo()V
+ .registers 1
+ sget-object v0, LMySoftVerificationFailure;->error:LUnknownType;
+ throw v0
+.end method
+
+.field public static error:LUnknownType;
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 161aa2340d..9d1f8a2b5f 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -441,8 +441,8 @@ fi
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
if [ "$RELOCATE" = "y" ]; then
- COMPILE_FLAGS="${COMPILE_FLAGS} --include-patch-information --runtime-arg -Xnorelocate"
- FLAGS="${FLAGS} -Xrelocate -Xcompiler-option --include-patch-information"
+ COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xnorelocate"
+ FLAGS="${FLAGS} -Xrelocate"
if [ "$HOST" = "y" ]; then
# Run test sets a fairly draconian ulimit that we will likely blow right over
# since we are relocating. Get the total size of the /system/framework directory
@@ -455,10 +455,12 @@ if [ "$RELOCATE" = "y" ]; then
else
FLAGS="$FLAGS -Xnorelocate"
COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xnorelocate"
- if [ "$HOST" = "y" ]; then
- # Increase ulimit to 64MB in case we are running hprof test.
- ulimit -S 64000 || exit 1
- fi
+fi
+
+if [ "$HOST" = "y" ]; then
+ # Increase ulimit to 128MB in case we are running hprof test,
+ # or string append test with art-debug-gc.
+ ulimit -S 128000 || exit 1
fi
if [ "$HOST" = "n" ]; then
@@ -504,7 +506,8 @@ fi
DEX_LOCATION_STRIPPED="${DEX_LOCATION#/}"
VDEX_NAME="${DEX_LOCATION_STRIPPED//\//@}@$TEST_NAME.jar@classes.vdex"
if [ ${#VDEX_NAME} -gt $max_filename_size ]; then
- echo "Dex location path too long."
+ echo "Dex location path too long:"
+ echo "$VDEX_NAME is ${#VDEX_NAME} character long, and the limit is $max_filename_size."
exit 1
fi
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 6caf7b0f36..50d70f18dc 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -26,11 +26,6 @@
"bug": "http://b/28988206"
},
{
- "test": "577-profile-foreign-dex",
- "description": "Disable 577-profile-foreign-dex",
- "bug": "http://b/27454772"
- },
- {
"tests": ["002-sleep",
"053-wait-some",
"055-enum-performance",
@@ -129,11 +124,15 @@
"lot."]
},
{
- "tests": ["964-default-iface-init-gen",
- "154-gc-loop"],
+ "test": "964-default-iface-init-gen",
"variant": "gcstress"
},
{
+ "tests": "154-gc-loop",
+ "variant": "gcstress | jit",
+ "description": ["154-gc-loop depends GC not happening too often"]
+ },
+ {
"test": "115-native-bridge",
"variant": "target",
"description": ["115-native-bridge setup is complicated. Need to",
@@ -298,7 +297,7 @@
"tests": ["000-nop",
"134-nodex2oat-nofallback",
"147-stripped-dex-fallback",
- "595-profile-saving"],
+ "595-profile-saving"],
"description": "The doesn't compile anything",
"env_vars": {"ART_TEST_BISECTION": "true"},
"variant": "optimizing | regalloc_gc"
@@ -323,7 +322,7 @@
},
{
"tests": ["115-native-bridge",
- "088-monitor-verification"],
+ "088-monitor-verification"],
"description": "The test assume they are always compiled.",
"env_vars": {"ART_TEST_BISECTION": "true"},
"variant": "optimizing | regalloc_gc"
@@ -336,7 +335,8 @@
"variant": "optimizing | regalloc_gc"
},
{
- "test": "537-checker-arraycopy",
+ "tests": ["537-checker-arraycopy",
+ "641-checker-arraycopy"],
"env_vars": {"ART_USE_READ_BARRIER": "true"},
"variant": "interpreter | optimizing | regalloc_gc | jit"
}
diff --git a/test/run-test b/test/run-test
index 7d3d813da0..1ac285769d 100755
--- a/test/run-test
+++ b/test/run-test
@@ -80,7 +80,7 @@ fi
# ANDROID_HOST_OUT is not set in a build environment.
if [ -z "$ANDROID_HOST_OUT" ]; then
- export ANDROID_HOST_OUT=${OUT_DIR:-$ANDROID_BUILD_TOP/out/}host/linux-x86
+ export ANDROID_HOST_OUT=${OUT_DIR:-$ANDROID_BUILD_TOP/out}/host/linux-x86
fi
# If JACK_CLASSPATH is not set, assume it only contains core-libart.
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
new file mode 100755
index 0000000000..0cd1ddee7b
--- /dev/null
+++ b/test/testrunner/run_build_test_target.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+#
+# Copyright 2017, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import subprocess
+
+from target_config import target_config
+import env
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--build-target', required=True, dest='build_target')
+parser.add_argument('-j', default='1', dest='n_threads')
+options = parser.parse_args()
+
+target = target_config[options.build_target]
+n_threads = options.n_threads
+custom_env = target.get('env', {})
+custom_env['SOONG_ALLOW_MISSING_DEPENDENCIES'] = 'true'
+print custom_env
+os.environ.update(custom_env)
+
+
+if target.get('target'):
+ build_command = 'make'
+ build_command += ' -j' + str(n_threads)
+ build_command += ' -C ' + env.ANDROID_BUILD_TOP
+ build_command += ' ' + target.get('target')
+ print build_command.split()
+ if subprocess.call(build_command.split()):
+ sys.exit(1)
+
+else:
+ run_test_command = [os.path.join(env.ANDROID_BUILD_TOP,
+ 'art/test/testrunner/testrunner.py')]
+ run_test_command += target.get('flags', [])
+ run_test_command += ['-j', str(n_threads)]
+ run_test_command += ['-b']
+ run_test_command += ['--verbose']
+
+ print run_test_command
+ if subprocess.call(run_test_command):
+ sys.exit(1)
+
+sys.exit(0)
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
new file mode 100644
index 0000000000..5387d6a8e8
--- /dev/null
+++ b/test/testrunner/target_config.py
@@ -0,0 +1,257 @@
+target_config = {
+ 'art-test' : {
+ 'flags' : [],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-interpreter' : {
+ 'flags' : ['--interpreter'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-interpreter-access-checks' : {
+ 'flags' : ['--interp-ac'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-jit' : {
+ 'flags' : ['--jit'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-gcstress-gcverify': {
+ 'flags' : ['--gcstress',
+ '--gcverify'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ 'ART_DEFAULT_GC_TYPE' : 'SS'
+ }
+ },
+ 'art-interpreter-gcstress' : {
+ 'flags': ['--interpreter',
+ '--gcstress'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ 'ART_DEFAULT_GC_TYPE' : 'SS'
+ }
+ },
+ 'art-optimizing-gcstress' : {
+ 'flags': ['--gcstress',
+ '--optimizing'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ 'ART_DEFAULT_GC_TYPE' : 'SS'
+ }
+ },
+ 'art-jit-gcstress' : {
+ 'flags': ['--jit',
+ '--gcstress'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-read-barrier' : {
+ 'flags': ['--interpreter',
+ '--optimizing'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'true',
+ 'ART_HEAP_POISONING' : 'true'
+ }
+ },
+ 'art-read-barrier-gcstress' : {
+ 'flags' : ['--interpreter',
+ '--optimizing',
+ '--gcstress'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'true',
+ 'ART_HEAP_POISONING' : 'true'
+ }
+ },
+ 'art-read-barrier-table-lookup' : {
+ 'flags' : ['--interpreter',
+ '--optimizing'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'true',
+ 'ART_READ_BARRIER_TYPE' : 'TABLELOOKUP',
+ 'ART_HEAP_POISONING' : 'true'
+ }
+ },
+ 'art-debug-gc' : {
+ 'flags' : ['--interpreter',
+ '--optimizing'],
+ 'env' : {
+ 'ART_TEST_DEBUG_GC' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-ss-gc' : {
+ 'flags' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
+ 'env' : {
+ 'ART_DEFAULT_GC_TYPE' : 'SS',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-gss-gc' : {
+ 'flags' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
+ 'env' : {
+ 'ART_DEFAULT_GC_TYPE' : 'GSS',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-ss-gc-tlab' : {
+ 'flags' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
+ 'env' : {
+ 'ART_DEFAULT_GC_TYPE' : 'SS',
+ 'ART_USE_TLAB' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-gss-gc-tlab' : {
+ 'flags' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
+ 'env' : {
+ 'ART_DEFAULT_GC_TYPE' : 'GSS',
+ 'ART_USE_TLAB' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-tracing' : {
+ 'flags' : ['--trace'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-interpreter-tracing' : {
+ 'flags' : ['--interpreter',
+ '--trace'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ }
+ },
+ 'art-forcecopy' : {
+ 'flags' : ['--forcecopy'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ }
+ },
+ 'art-no-prebuild' : {
+ 'flags' : ['--no-prebuild'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ }
+ },
+ 'art-no-image' : {
+ 'flags' : ['--no-image'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ }
+ },
+ 'art-interpreter-no-image' : {
+ 'flags' : ['--interpreter',
+ '--no-image'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ }
+ },
+ 'art-relocate-no-patchoat' : {
+ 'flags' : ['--relocate-npatchoat'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ }
+ },
+ 'art-no-dex2oat' : {
+ 'flags' : ['--no-dex2oat'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ }
+ },
+ 'art-heap-poisoning' : {
+ 'flags' : ['--interpreter',
+ '--optimizing'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'false',
+ 'ART_HEAP_POISONING' : 'true'
+ }
+ },
+ 'art-gtest' : {
+ 'target' : 'test-art-gtest',
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'true'
+ }
+ },
+ 'art-gtest-read-barrier': {
+ 'target' : 'test-art-gtest',
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'true',
+ 'ART_HEAP_POISONING' : 'true'
+ }
+ },
+ 'art-gtest-read-barrier-table-lookup': {
+ 'target' : 'test-art-gtest',
+ 'env': {
+ 'ART_USE_READ_BARRIER' : 'true',
+ 'ART_READ_BARRIER_TYPE' : 'TABLELOOKUP',
+ 'ART_HEAP_POISONING' : 'true'
+ }
+ },
+ 'art-gtest-ss-gc': {
+ 'target' : 'test-art-gtest',
+ 'env': {
+ 'ART_DEFAULT_GC_TYPE' : 'SS',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-gtest-gss-gc': {
+ 'target' : 'test-art-gtest',
+ 'env' : {
+ 'ART_DEFAULT_GC_TYPE' : 'GSS',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-gtest-ss-gc-tlab': {
+ 'target' : 'test-art-gtest',
+ 'env': {
+ 'ART_DEFAULT_GC_TYPE' : 'SS',
+ 'ART_USE_TLAB' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false',
+ }
+ },
+ 'art-gtest-gss-gc-tlab': {
+ 'target' : 'test-art-gtest',
+ 'env': {
+ 'ART_DEFAULT_GC_TYPE' : 'GSS',
+ 'ART_USE_TLAB' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-gtest-valgrind32': {
+ 'target' : 'valgrind-test-art-host32',
+ 'env': {
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-gtest-valgrind64': {
+ 'target' : 'valgrind-test-art-host64',
+ 'env': {
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
+ 'art-gtest-heap-poisoning': {
+ 'target' : 'valgrind-test-art-host64',
+ 'env' : {
+ 'ART_HEAP_POISONING' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ }
+}
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 9c8d3b870c..f77e9adb10 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -56,6 +56,7 @@ import threading
import time
import env
+from target_config import target_config
TARGET_TYPES = set()
RUN_TYPES = set()
@@ -704,6 +705,25 @@ def parse_test_name(test_name):
return {match.group(12)}
raise ValueError(test_name + " is not a valid test")
+
+def setup_env_for_build_target(build_target, parser, options):
+ """Setup environment for the build target
+
+ The method setup environment for the master-art-host targets.
+ """
+ os.environ.update(build_target['env'])
+ os.environ['SOONG_ALLOW_MISSING_DEPENDENCIES'] = 'true'
+ print_text('%s\n' % (str(os.environ)))
+
+ target_options = vars(parser.parse_args(build_target['flags']))
+ target_options['host'] = True
+ target_options['verbose'] = True
+ target_options['build'] = True
+ target_options['n_thread'] = options['n_thread']
+ target_options['dry_run'] = options['dry_run']
+
+ return target_options
+
def parse_option():
global verbose
global dry_run
@@ -733,90 +753,95 @@ def parse_option():
action='store_true', dest='build',
help="Build dependencies under all circumstances. By default we will " +
"not build dependencies unless ART_TEST_RUN_TEST_BUILD=true.")
+ parser.add_argument('--build-target', dest='build_target', help='master-art-host targets')
parser.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
parser.add_argument('--gdb', action='store_true', dest='gdb')
parser.add_argument('--gdb-arg', dest='gdb_arg')
- options = parser.parse_args()
+ options = vars(parser.parse_args())
+ if options['build_target']:
+ options = setup_env_for_build_target(target_config[options['build_target']],
+ parser, options)
+
test = ''
- env.EXTRA_DISABLED_TESTS.update(set(options.skips))
- if options.test:
- test = parse_test_name(options.test)
- if options.pictest:
+ env.EXTRA_DISABLED_TESTS.update(set(options['skips']))
+ if options['test']:
+ test = parse_test_name(options['test'])
+ if options['pictest']:
PICTEST_TYPES.add('pictest')
- if options.ndebug:
+ if options['ndebug']:
RUN_TYPES.add('ndebug')
- if options.interp_ac:
+ if options['interp_ac']:
COMPILER_TYPES.add('interp-ac')
- if options.picimage:
+ if options['picimage']:
IMAGE_TYPES.add('picimage')
- if options.n64:
+ if options['n64']:
ADDRESS_SIZES.add('64')
- if options.interpreter:
+ if options['interpreter']:
COMPILER_TYPES.add('interpreter')
- if options.jni:
+ if options['jni']:
JNI_TYPES.add('jni')
- if options.relocate_npatchoat:
+ if options['relocate_npatchoat']:
RELOCATE_TYPES.add('relocate-npatchoat')
- if options.no_prebuild:
+ if options['no_prebuild']:
PREBUILD_TYPES.add('no-prebuild')
- if options.npictest:
+ if options['npictest']:
PICTEST_TYPES.add('npictest')
- if options.no_dex2oat:
+ if options['no_dex2oat']:
PREBUILD_TYPES.add('no-dex2oat')
- if options.jit:
+ if options['jit']:
COMPILER_TYPES.add('jit')
- if options.relocate:
+ if options['relocate']:
RELOCATE_TYPES.add('relocate')
- if options.ndebuggable:
+ if options['ndebuggable']:
DEBUGGABLE_TYPES.add('ndebuggable')
- if options.no_image:
+ if options['no_image']:
IMAGE_TYPES.add('no-image')
- if options.optimizing:
+ if options['optimizing']:
COMPILER_TYPES.add('optimizing')
- if options.trace:
+ if options['trace']:
TRACE_TYPES.add('trace')
- if options.gcstress:
+ if options['gcstress']:
GC_TYPES.add('gcstress')
- if options.no_relocate:
+ if options['no_relocate']:
RELOCATE_TYPES.add('no-relocate')
- if options.target:
+ if options['target']:
TARGET_TYPES.add('target')
- if options.forcecopy:
+ if options['forcecopy']:
JNI_TYPES.add('forcecopy')
- if options.n32:
+ if options['n32']:
ADDRESS_SIZES.add('32')
- if options.host:
+ if options['host']:
TARGET_TYPES.add('host')
- if options.gcverify:
+ if options['gcverify']:
GC_TYPES.add('gcverify')
- if options.debuggable:
+ if options['debuggable']:
DEBUGGABLE_TYPES.add('debuggable')
- if options.prebuild:
+ if options['prebuild']:
PREBUILD_TYPES.add('prebuild')
- if options.debug:
+ if options['debug']:
RUN_TYPES.add('debug')
- if options.checkjni:
+ if options['checkjni']:
JNI_TYPES.add('checkjni')
- if options.ntrace:
+ if options['ntrace']:
TRACE_TYPES.add('ntrace')
- if options.cms:
+ if options['cms']:
GC_TYPES.add('cms')
- if options.multipicimage:
+ if options['multipicimage']:
IMAGE_TYPES.add('multipicimage')
- if options.verbose:
+ if options['verbose']:
verbose = True
- if options.n_thread:
- n_thread = max(1, options.n_thread)
- if options.dry_run:
+ if options['n_thread']:
+ n_thread = max(1, options['n_thread'])
+ if options['dry_run']:
dry_run = True
verbose = True
- build = options.build
- if options.gdb:
+ build = options['build']
+ if options['gdb']:
n_thread = 1
gdb = True
- if options.gdb_arg:
- gdb_arg = options.gdb_arg
+ if options['gdb_arg']:
+ gdb_arg = options['gdb_arg']
return test
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 1e9c763534..7eaaaf9cbd 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -17,9 +17,33 @@
green='\033[0;32m'
nc='\033[0m'
+# Setup as root, as the next buildbot step (device cleanup) requires it.
+# This is also required to set the date, if needed.
+adb root
+adb wait-for-device
+
+echo -e "${green}Date on host${nc}"
+date
+
echo -e "${green}Date on device${nc}"
adb shell date
+host_seconds_since_epoch=$(date -u +%s)
+device_seconds_since_epoch=$(adb shell date -u +%s)
+
+abs_time_difference_in_seconds=$(expr $host_seconds_since_epoch - $device_seconds_since_epoch)
+if [ $abs_time_difference_in_seconds -lt 0 ]; then
+ abs_time_difference_in_seconds=$(expr 0 - $abs_time_difference_in_seconds)
+fi
+
+seconds_per_hour=3600
+
+# Update date on device if the difference with host is more than one hour.
+if [ $abs_time_difference_in_seconds -gt $seconds_per_hour ]; then
+ echo -e "${green}Update date on device${nc}"
+ adb shell date -u @$host_seconds_since_epoch
+fi
+
echo -e "${green}Turn off selinux${nc}"
adb shell setenforce 0
adb shell getenforce