summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk9
-rw-r--r--benchmark/Android.bp2
-rw-r--r--build/Android.bp6
-rw-r--r--build/Android.common_path.mk5
-rw-r--r--build/art.go8
-rw-r--r--compiler/driver/compiler_driver.cc335
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.cc10
-rw-r--r--compiler/oat_writer.h2
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc38
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h5
-rw-r--r--compiler/optimizing/code_generator_mips.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/inliner.cc55
-rw-r--r--compiler/optimizing/inliner.h3
-rw-r--r--compiler/optimizing/instruction_builder.cc4
-rw-r--r--compiler/optimizing/intrinsics_arm.cc7
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc9
-rw-r--r--compiler/optimizing/loop_optimization.cc146
-rw-r--r--compiler/optimizing/loop_optimization.h6
-rw-r--r--compiler/optimizing/nodes.cc2
-rw-r--r--dex2oat/dex2oat.cc5
-rw-r--r--dexoptanalyzer/Android.bp2
-rw-r--r--disassembler/Android.bp2
-rw-r--r--imgdiag/Android.bp2
-rw-r--r--patchoat/Android.bp2
-rw-r--r--profman/Android.bp2
-rw-r--r--profman/profile_assistant_test.cc77
-rw-r--r--profman/profman.cc22
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/arch/instruction_set.cc24
-rw-r--r--runtime/arch/instruction_set.h72
-rw-r--r--runtime/base/arena_allocator.cc1
-rw-r--r--runtime/base/arena_allocator.h1
-rw-r--r--runtime/base/logging.cc2
-rw-r--r--runtime/base/safe_copy_test.cc60
-rw-r--r--runtime/elf_file.cc4
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h32
-rw-r--r--runtime/gc/heap.cc3
-rw-r--r--runtime/interpreter/interpreter_common.h12
-rw-r--r--runtime/interpreter/mterp/mips/op_double_to_int.S20
-rw-r--r--runtime/interpreter/mterp/mips/op_double_to_long.S15
-rw-r--r--runtime/interpreter/mterp/mips/op_float_to_int.S19
-rw-r--r--runtime/interpreter/mterp/mips/op_float_to_long.S14
-rw-r--r--runtime/interpreter/mterp/mips64/op_double_to_int.S22
-rw-r--r--runtime/interpreter/mterp/mips64/op_double_to_long.S22
-rw-r--r--runtime/interpreter/mterp/mips64/op_float_to_int.S22
-rw-r--r--runtime/interpreter/mterp/mips64/op_float_to_long.S22
-rw-r--r--runtime/interpreter/mterp/mterp.cc6
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips.S68
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips64.S161
-rw-r--r--runtime/jit/jit_code_cache.cc43
-rw-r--r--runtime/jit/profile_compilation_info.cc170
-rw-r--r--runtime/jit/profile_compilation_info.h106
-rw-r--r--runtime/jit/profile_compilation_info_test.cc176
-rw-r--r--runtime/jit/profile_saver.cc121
-rw-r--r--runtime/jit/profile_saver.h3
-rw-r--r--runtime/jni_internal.cc3
-rw-r--r--runtime/mirror/object_test.cc2
-rw-r--r--runtime/parsed_options.cc6
-rw-r--r--runtime/runtime.cc8
-rw-r--r--runtime/runtime.h16
-rw-r--r--runtime/runtime_options.def2
-rw-r--r--runtime/signal_catcher.cc93
-rw-r--r--runtime/signal_catcher.h26
-rw-r--r--runtime/type_reference.h (renamed from compiler/utils/type_reference.h)10
-rw-r--r--test/596-app-images/app_images.cc6
-rw-r--r--test/596-app-images/src/Main.java42
-rw-r--r--test/623-checker-loop-regressions/src/Main.java40
-rw-r--r--test/638-checker-inline-caches/src/Main.java75
-rw-r--r--test/640-checker-byte-simd/src/Main.java12
-rw-r--r--test/640-checker-char-simd/src/Main.java12
-rw-r--r--test/640-checker-double-simd/src/Main.java4
-rw-r--r--test/640-checker-int-simd/src/Main.java12
-rw-r--r--test/640-checker-long-simd/src/Main.java12
-rw-r--r--test/640-checker-short-simd/src/Main.java12
-rw-r--r--test/644-checker-deopt/expected.txt0
-rw-r--r--test/644-checker-deopt/info.txt2
-rw-r--r--test/644-checker-deopt/profile2
-rw-r--r--test/644-checker-deopt/run17
-rw-r--r--test/644-checker-deopt/src/Main.java74
-rw-r--r--test/645-checker-abs-simd/src/Main.java106
-rw-r--r--test/651-checker-byte-simd-minmax/src/Main.java76
-rw-r--r--test/651-checker-char-simd-minmax/src/Main.java18
-rw-r--r--test/651-checker-double-simd-minmax/src/Main.java2
-rw-r--r--test/651-checker-int-simd-minmax/src/Main.java4
-rw-r--r--test/651-checker-short-simd-minmax/src/Main.java76
-rw-r--r--test/652-deopt-intrinsic/expected.txt1
-rw-r--r--test/652-deopt-intrinsic/info.txt2
-rw-r--r--test/652-deopt-intrinsic/src/Main.java42
-rw-r--r--test/906-iterate-heap/expected.txt5
-rw-r--r--test/906-iterate-heap/iterate_heap.cc10
-rw-r--r--test/906-iterate-heap/src/art/Test906.java29
-rw-r--r--test/913-heaps/expected.txt2
-rw-r--r--test/913-heaps/heaps.cc9
-rw-r--r--test/913-heaps/src/art/Test913.java25
-rw-r--r--test/987-stack-trace-dumping/expected.txt0
-rw-r--r--test/987-stack-trace-dumping/info.txt0
-rwxr-xr-xtest/987-stack-trace-dumping/run18
-rw-r--r--test/987-stack-trace-dumping/src/Main.java60
-rw-r--r--test/Android.bp22
-rw-r--r--test/Android.run-test-jvmti-java-library.mk4
-rw-r--r--test/common/runtime_state.cc4
-rw-r--r--test/knownfailures.json4
-rwxr-xr-xtest/testrunner/testrunner.py3
-rw-r--r--tools/ahat/README.txt6
-rw-r--r--tools/ahat/src/DocString.java17
-rw-r--r--tools/ahat/src/DominatedList.java2
-rw-r--r--tools/ahat/src/HeapTable.java20
-rw-r--r--tools/ahat/src/ObjectHandler.java68
-rw-r--r--tools/ahat/src/ObjectsHandler.java11
-rw-r--r--tools/ahat/src/OverviewHandler.java50
-rw-r--r--tools/ahat/src/SiteHandler.java13
-rw-r--r--tools/ahat/src/SitePrinter.java2
-rw-r--r--tools/ahat/src/SizeTable.java106
-rw-r--r--tools/ahat/src/heapdump/AhatHeap.java8
-rw-r--r--tools/ahat/src/heapdump/AhatInstance.java77
-rw-r--r--tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java12
-rw-r--r--tools/ahat/src/heapdump/AhatPlaceHolderInstance.java12
-rw-r--r--tools/ahat/src/heapdump/AhatSnapshot.java17
-rw-r--r--tools/ahat/src/heapdump/Perflib.java91
-rw-r--r--tools/ahat/src/heapdump/Site.java38
-rw-r--r--tools/ahat/src/heapdump/Size.java89
-rw-r--r--tools/ahat/src/heapdump/Sort.java21
-rw-r--r--tools/ahat/src/manifest.txt2
-rw-r--r--tools/ahat/test-dump/Main.java6
-rw-r--r--tools/ahat/test/InstanceTest.java5
-rw-r--r--tools/ahat/test/NativeAllocationTest.java37
-rw-r--r--tools/ahat/test/Tests.java1
131 files changed, 2361 insertions, 1358 deletions
diff --git a/Android.mk b/Android.mk
index 803ba502cb..c0935a72bc 100644
--- a/Android.mk
+++ b/Android.mk
@@ -87,11 +87,20 @@ ART_HOST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
$(ART_HOST_DEX_DEPENDENCIES) \
$(ART_HOST_SHARED_LIBRARY_DEPENDENCIES)
+
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
+ART_HOST_DEPENDENCIES += $(ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES)
+endif
+
ART_TARGET_DEPENDENCIES := \
$(ART_TARGET_EXECUTABLES) \
$(ART_TARGET_DEX_DEPENDENCIES) \
$(ART_TARGET_SHARED_LIBRARY_DEPENDENCIES)
+ifeq ($(ART_BUILD_TARGET_DEBUG),true)
+ART_TARGET_DEPENDENCIES += $(ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES)
+endif
+
########################################################################
# test rules
diff --git a/benchmark/Android.bp b/benchmark/Android.bp
index e784508e14..d0dfec91ff 100644
--- a/benchmark/Android.bp
+++ b/benchmark/Android.bp
@@ -49,7 +49,7 @@ art_cc_library {
name: "libartbenchmark-micronative-host",
host_supported: true,
device_supported: false,
- defaults: ["art_defaults", "art_debug_defaults"],
+ defaults: ["art_debug_defaults", "art_defaults" ],
srcs: [
"jni_loader.cc",
"micro-native/micro_native.cc",
diff --git a/build/Android.bp b/build/Android.bp
index 6c9f1d4dd1..c54f436b35 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -64,12 +64,6 @@ art_global_defaults {
"-Wno-constant-conversion",
"-Wno-undefined-var-template",
- "-DART_STACK_OVERFLOW_GAP_arm=8192",
- "-DART_STACK_OVERFLOW_GAP_arm64=8192",
- "-DART_STACK_OVERFLOW_GAP_mips=16384",
- "-DART_STACK_OVERFLOW_GAP_mips64=16384",
- "-DART_STACK_OVERFLOW_GAP_x86=8192",
- "-DART_STACK_OVERFLOW_GAP_x86_64=8192",
// Enable thread annotations for std::mutex, etc.
"-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
],
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 6de5aefc0b..446611816a 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -97,14 +97,19 @@ ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_CORE_JARS),$(HOST_OUT_JAVA_LIB
ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
ART_CORE_SHARED_LIBRARIES := libjavacore libopenjdk libopenjdkjvm libopenjdkjvmti
+ART_CORE_SHARED_DEBUG_LIBRARIES := libopenjdkd libopenjdkjvmd libopenjdkjvmtid
ART_HOST_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(ART_HOST_OUT_SHARED_LIBRARIES)/$(lib)$(ART_HOST_SHLIB_EXTENSION))
+ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(ART_HOST_OUT_SHARED_LIBRARIES)/$(lib)$(ART_HOST_SHLIB_EXTENSION))
ifdef HOST_2ND_ARCH
ART_HOST_SHARED_LIBRARY_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(2ND_HOST_OUT_SHARED_LIBRARIES)/$(lib).so)
+ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(2ND_HOST_OUT_SHARED_LIBRARIES)/$(lib).so)
endif
ART_TARGET_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
+ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
ifdef TARGET_2ND_ARCH
ART_TARGET_SHARED_LIBRARY_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
+ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
endif
ART_CORE_DEBUGGABLE_EXECUTABLES := \
diff --git a/build/art.go b/build/art.go
index 61a97590ea..6dca793f29 100644
--- a/build/art.go
+++ b/build/art.go
@@ -76,6 +76,14 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) {
asflags = append(asflags, "-DART_USE_OLD_ARM_BACKEND=1")
}
+ cflags = append(cflags,
+ "-DART_STACK_OVERFLOW_GAP_arm=8192",
+ "-DART_STACK_OVERFLOW_GAP_arm64=8192",
+ "-DART_STACK_OVERFLOW_GAP_mips=16384",
+ "-DART_STACK_OVERFLOW_GAP_mips64=16384",
+ "-DART_STACK_OVERFLOW_GAP_x86=8192",
+ "-DART_STACK_OVERFLOW_GAP_x86_64=8192")
+
return cflags, asflags
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c2d792d352..70c3f6098a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2238,7 +2238,7 @@ class InitializeClassVisitor : public CompilationVisitor {
public:
explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ void Visit(size_t class_def_index) OVERRIDE {
ATRACE_CALL();
jobject jclass_loader = manager_->GetClassLoader();
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2253,89 +2253,123 @@ class InitializeClassVisitor : public CompilationVisitor {
Handle<mirror::Class> klass(
hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
- // Only try to initialize classes that were successfully verified.
- if (klass->IsVerified()) {
- // Attempt to initialize the class but bail if we either need to initialize the super-class
- // or static fields.
- manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
- if (!klass->IsInitialized()) {
- // We don't want non-trivial class initialization occurring on multiple threads due to
- // deadlock problems. For example, a parent class is initialized (holding its lock) that
- // refers to a sub-class in its static/class initializer causing it to try to acquire the
- // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock)
- // after first initializing its parents, whose locks are acquired. This leads to a
- // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
- // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
- // than use a special Object for the purpose we use the Class of java.lang.Class.
- Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
- ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
- // Attempt to initialize allowing initialization of parent classes but still not static
- // fields.
+ if (klass != nullptr && !SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) {
+ TryInitializeClass(klass, class_loader);
+ }
+ // Clear any class not found or verification exceptions.
+ soa.Self()->ClearException();
+ }
+
+ // A helper function for initializing klass.
+ void TryInitializeClass(Handle<mirror::Class> klass, Handle<mirror::ClassLoader>& class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const DexFile& dex_file = klass->GetDexFile();
+ const DexFile::ClassDef* class_def = klass->GetClassDef();
+ const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def->class_idx_);
+ const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ StackHandleScope<3> hs(soa.Self());
+
+ mirror::Class::Status old_status = klass->GetStatus();;
+ // Only try to initialize classes that were successfully verified.
+ if (klass->IsVerified()) {
+ // Attempt to initialize the class but bail if we either need to initialize the super-class
+ // or static fields.
+ manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
+ old_status = klass->GetStatus();
+ if (!klass->IsInitialized()) {
+ // We don't want non-trivial class initialization occurring on multiple threads due to
+ // deadlock problems. For example, a parent class is initialized (holding its lock) that
+ // refers to a sub-class in its static/class initializer causing it to try to acquire the
+ // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock)
+ // after first initializing its parents, whose locks are acquired. This leads to a
+ // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
+ // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
+ // than use a special Object for the purpose we use the Class of java.lang.Class.
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
+ ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
+ // Attempt to initialize allowing initialization of parent classes but still not static
+ // fields.
+ bool is_superclass_initialized = InitializeDependencies(klass, class_loader, soa.Self());
+ if (is_superclass_initialized) {
manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
- if (!klass->IsInitialized()) {
+ }
+ old_status = klass->GetStatus();
+ // If superclass cannot be initialized, no need to proceed.
+ if (!klass->IsInitialized() &&
+ is_superclass_initialized &&
+ manager_->GetCompiler()->IsImageClass(descriptor)) {
+ bool can_init_static_fields = false;
+ if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage()) {
// We need to initialize static fields, we only do this for image classes that aren't
// marked with the $NoPreloadHolder (which implies this should not be initialized early).
- bool can_init_static_fields =
- manager_->GetCompiler()->GetCompilerOptions().IsBootImage() &&
- manager_->GetCompiler()->IsImageClass(descriptor) &&
- !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
- if (can_init_static_fields) {
- VLOG(compiler) << "Initializing: " << descriptor;
- // TODO multithreading support. We should ensure the current compilation thread has
- // exclusive access to the runtime and the transaction. To achieve this, we could use
- // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
- // checks in Thread::AssertThreadSuspensionIsAllowable.
- Runtime* const runtime = Runtime::Current();
- Transaction transaction;
-
- // Run the class initializer in transaction mode.
- runtime->EnterTransactionMode(&transaction);
- const mirror::Class::Status old_status = klass->GetStatus();
- bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
- true);
- // TODO we detach transaction from runtime to indicate we quit the transactional
- // mode which prevents the GC from visiting objects modified during the transaction.
- // Ensure GC is not run so don't access freed objects when aborting transaction.
-
- {
- ScopedAssertNoThreadSuspension ants("Transaction end");
- runtime->ExitTransactionMode();
-
- if (!success) {
- CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException();
- VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
- << exception->Dump();
- std::ostream* file_log = manager_->GetCompiler()->
- GetCompilerOptions().GetInitFailureOutput();
- if (file_log != nullptr) {
- *file_log << descriptor << "\n";
- *file_log << exception->Dump() << "\n";
- }
- soa.Self()->ClearException();
- transaction.Rollback();
- CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
+ can_init_static_fields = !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
+ } else {
+ can_init_static_fields = manager_->GetCompiler()->GetCompilerOptions().IsAppImage() &&
+ !soa.Self()->IsExceptionPending() &&
+ NoClinitInDependency(klass, soa.Self(), &class_loader);
+ // TODO The checking for clinit can be removed since it's already
+ // checked when init superclass. Currently keep it because it contains
+ // processing of intern strings. Will be removed later when intern strings
+ // and clinit are both initialized.
+ }
+
+ if (can_init_static_fields) {
+ VLOG(compiler) << "Initializing: " << descriptor;
+ // TODO multithreading support. We should ensure the current compilation thread has
+ // exclusive access to the runtime and the transaction. To achieve this, we could use
+ // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
+ // checks in Thread::AssertThreadSuspensionIsAllowable.
+ Runtime* const runtime = Runtime::Current();
+ Transaction transaction;
+
+ // Run the class initializer in transaction mode.
+ runtime->EnterTransactionMode(&transaction);
+ bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
+ true);
+ // TODO we detach transaction from runtime to indicate we quit the transactional
+ // mode which prevents the GC from visiting objects modified during the transaction.
+ // Ensure GC is not run so don't access freed objects when aborting transaction.
+
+ {
+ ScopedAssertNoThreadSuspension ants("Transaction end");
+ runtime->ExitTransactionMode();
+
+ if (!success) {
+ CHECK(soa.Self()->IsExceptionPending());
+ mirror::Throwable* exception = soa.Self()->GetException();
+ VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
+ << exception->Dump();
+ std::ostream* file_log = manager_->GetCompiler()->
+ GetCompilerOptions().GetInitFailureOutput();
+ if (file_log != nullptr) {
+ *file_log << descriptor << "\n";
+ *file_log << exception->Dump() << "\n";
}
+ soa.Self()->ClearException();
+ transaction.Rollback();
+ CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
}
+ }
- if (!success) {
- // On failure, still intern strings of static fields and seen in <clinit>, as these
- // will be created in the zygote. This is separated from the transaction code just
- // above as we will allocate strings, so must be allowed to suspend.
+ if (!success) {
+ // On failure, still intern strings of static fields and seen in <clinit>, as these
+ // will be created in the zygote. This is separated from the transaction code just
+ // above as we will allocate strings, so must be allowed to suspend.
+ if (&klass->GetDexFile() == manager_->GetDexFile()) {
InternStrings(klass, class_loader);
}
}
}
- soa.Self()->AssertNoPendingException();
}
+ soa.Self()->AssertNoPendingException();
}
- // Record the final class status if necessary.
- ClassReference ref(manager_->GetDexFile(), class_def_index);
- manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
}
- // Clear any class not found or verification exceptions.
- soa.Self()->ClearException();
+ // Record the final class status if necessary.
+ ClassReference ref(&dex_file, klass->GetDexClassDefIndex());
+ // Back up the status before doing initialization for static encoded fields,
+ // because the static encoded branch wants to keep the status to uninitialized.
+ manager_->GetCompiler()->RecordClassStatus(ref, old_status);
}
private:
@@ -2390,6 +2424,160 @@ class InitializeClassVisitor : public CompilationVisitor {
}
}
+ bool NoPotentialInternStrings(Handle<mirror::Class> klass,
+ Handle<mirror::ClassLoader>* class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> h_dex_cache = hs.NewHandle(klass->GetDexCache());
+ const DexFile* dex_file = h_dex_cache->GetDexFile();
+ const DexFile::ClassDef* class_def = klass->GetClassDef();
+ annotations::RuntimeEncodedStaticFieldValueIterator value_it(*dex_file,
+ &h_dex_cache,
+ class_loader,
+ manager_->GetClassLinker(),
+ *class_def);
+
+ const auto jString = annotations::RuntimeEncodedStaticFieldValueIterator::kString;
+ for ( ; value_it.HasNext(); value_it.Next()) {
+ if (value_it.GetValueType() == jString) {
+ // We don't want cache the static encoded strings which is a potential intern.
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool ResolveTypesOfMethods(Thread* self, ArtMethod* m)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto rtn_type = m->GetReturnType(true); // return value is discarded because resolve will be done internally.
+ if (rtn_type == nullptr) {
+ self->ClearException();
+ return false;
+ }
+ const DexFile::TypeList* types = m->GetParameterTypeList();
+ if (types != nullptr) {
+ for (uint32_t i = 0; i < types->Size(); ++i) {
+ dex::TypeIndex param_type_idx = types->GetTypeItem(i).type_idx_;
+ auto param_type = m->GetClassFromTypeIndex(param_type_idx, true);
+ if (param_type == nullptr) {
+ self->ClearException();
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ // Pre resolve types mentioned in all method signatures before start a transaction
+ // since ResolveType doesn't work in transaction mode.
+ bool PreResolveTypes(Thread* self, const Handle<mirror::Class>& klass)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ PointerSize pointer_size = manager_->GetClassLinker()->GetImagePointerSize();
+ for (ArtMethod& m : klass->GetMethods(pointer_size)) {
+ if (!ResolveTypesOfMethods(self, &m)) {
+ return false;
+ }
+ }
+ if (klass->IsInterface()) {
+ return true;
+ } else if (klass->HasSuperClass()) {
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Class> super_klass(hs.NewHandle<mirror::Class>(klass->GetSuperClass()));
+ for (int i = super_klass->GetVTableLength() - 1; i >= 0; --i) {
+ ArtMethod* m = klass->GetVTableEntry(i, pointer_size);
+ ArtMethod* super_m = super_klass->GetVTableEntry(i, pointer_size);
+ if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
+ return false;
+ }
+ }
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ super_klass.Assign(klass->GetIfTable()->GetInterface(i));
+ if (klass->GetClassLoader() != super_klass->GetClassLoader()) {
+ uint32_t num_methods = super_klass->NumVirtualMethods();
+ for (uint32_t j = 0; j < num_methods; ++j) {
+ ArtMethod* m = klass->GetIfTable()->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>(
+ j, pointer_size);
+ ArtMethod* super_m = super_klass->GetVirtualMethod(j, pointer_size);
+ if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ // Initialize the klass's dependencies recursively before initializing itself.
+ // Checking for interfaces is also necessary since interfaces can contain
+ // both default methods and static encoded fields.
+ bool InitializeDependencies(const Handle<mirror::Class>& klass,
+ Handle<mirror::ClassLoader> class_loader,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (klass->HasSuperClass()) {
+ ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
+ if (!handle_scope_super->IsInitialized()) {
+ this->TryInitializeClass(handle_scope_super, class_loader);
+ if (!handle_scope_super->IsInitialized()) {
+ return false;
+ }
+ }
+ }
+
+ uint32_t num_if = klass->NumDirectInterfaces();
+ for (size_t i = 0; i < num_if; i++) {
+ ObjPtr<mirror::Class>
+ interface = mirror::Class::GetDirectInterface(self, klass.Get(), i);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_interface(hs.NewHandle(interface));
+
+ TryInitializeClass(handle_interface, class_loader);
+
+ if (!handle_interface->IsInitialized()) {
+ return false;
+ }
+ }
+
+ return PreResolveTypes(self, klass);
+ }
+
+ // In this phase the classes containing class initializers are ignored. Make sure no
+ // clinit appears in kalss's super class chain and interfaces.
+ bool NoClinitInDependency(const Handle<mirror::Class>& klass,
+ Thread* self,
+ Handle<mirror::ClassLoader>* class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* clinit =
+ klass->FindClassInitializer(manager_->GetClassLinker()->GetImagePointerSize());
+ if (clinit != nullptr) {
+ VLOG(compiler) << klass->PrettyClass() << ' ' << clinit->PrettyMethod(true);
+ return false;
+ }
+ if (klass->HasSuperClass()) {
+ ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
+ if (!NoClinitInDependency(handle_scope_super, self, class_loader))
+ return false;
+ }
+
+ uint32_t num_if = klass->NumDirectInterfaces();
+ for (size_t i = 0; i < num_if; i++) {
+ ObjPtr<mirror::Class>
+ interface = mirror::Class::GetDirectInterface(self, klass.Get(), i);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_interface(hs.NewHandle(interface));
+ if (!NoClinitInDependency(handle_interface, self, class_loader))
+ return false;
+ }
+
+ return NoPotentialInternStrings(klass, class_loader);
+ }
+
const ParallelCompilationManager* const manager_;
};
@@ -2409,7 +2597,10 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader,
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, dex_files,
init_thread_pool);
- if (GetCompilerOptions().IsBootImage()) {
+
+ if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsAppImage()) {
+ // Set the concurrency thread to 1 to support initialization for App Images since transaction
+ // doesn't support multithreading now.
// TODO: remove this when transactional mode supports multithreading.
init_thread_count = 1U;
}
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 6d2b243d07..117684a66b 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -54,11 +54,11 @@ constexpr uint32_t kAdrpThunkSize = 8u;
inline bool IsAdrpPatch(const LinkerPatch& patch) {
switch (patch.GetType()) {
- case LinkerPatch::Type::kMethodRelative:
case LinkerPatch::Type::kCall:
case LinkerPatch::Type::kCallRelative:
case LinkerPatch::Type::kBakerReadBarrierBranch:
return false;
+ case LinkerPatch::Type::kMethodRelative:
case LinkerPatch::Type::kTypeRelative:
case LinkerPatch::Type::kTypeBssEntry:
case LinkerPatch::Type::kStringRelative:
@@ -567,10 +567,10 @@ bool Arm64RelativePatcher::NeedsErratum843419Thunk(ArrayRef<const uint8_t> code,
return false;
}
- // And since LinkerPatch::Type::kStringRelative is using the result of the ADRP
- // for an ADD immediate, check for that as well. We generalize a bit to include
- // ADD/ADDS/SUB/SUBS immediate that either uses the ADRP destination or stores
- // the result to a different register.
+ // And since LinkerPatch::Type::k{Method,Type,String}Relative is using the result
+ // of the ADRP for an ADD immediate, check for that as well. We generalize a bit
+ // to include ADD/ADDS/SUB/SUBS immediate that either uses the ADRP destination
+ // or stores the result to a different register.
if ((next_insn & 0x1f000000) == 0x11000000 &&
((((next_insn >> 5) ^ adrp) & 0x1f) == 0 || ((next_insn ^ adrp) & 0x1f) != 0)) {
return false;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index e778f75551..66b70ade2e 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -31,7 +31,7 @@
#include "os.h"
#include "safe_map.h"
#include "string_reference.h"
-#include "utils/type_reference.h"
+#include "type_reference.h"
namespace art {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 9ef692aaf0..c2b2ebfade 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -33,8 +33,8 @@
#include "read_barrier_option.h"
#include "stack_map_stream.h"
#include "string_reference.h"
+#include "type_reference.h"
#include "utils/label.h"
-#include "utils/type_reference.h"
namespace art {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index fa1c14dcda..2409a4d38d 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -24,8 +24,8 @@
#include "nodes.h"
#include "string_reference.h"
#include "parallel_move_resolver.h"
+#include "type_reference.h"
#include "utils/arm/assembler_thumb2.h"
-#include "utils/type_reference.h"
namespace art {
namespace arm {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 71e221da22..7a4b3d4805 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -25,8 +25,8 @@
#include "nodes.h"
#include "parallel_move_resolver.h"
#include "string_reference.h"
+#include "type_reference.h"
#include "utils/arm64/assembler_arm64.h"
-#include "utils/type_reference.h"
// TODO(VIXL): Make VIXL compile with -Wshadow.
#pragma GCC diagnostic push
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 34821f83cd..1f8e1efd5e 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2139,7 +2139,8 @@ static void GenerateEqualLong(HCondition* cond, CodeGeneratorARMVIXL* codegen) {
static void GenerateLongComparesAndJumps(HCondition* cond,
vixl32::Label* true_label,
vixl32::Label* false_label,
- CodeGeneratorARMVIXL* codegen) {
+ CodeGeneratorARMVIXL* codegen,
+ bool is_far_target = true) {
LocationSummary* locations = cond->GetLocations();
Location left = locations->InAt(0);
Location right = locations->InAt(1);
@@ -2190,12 +2191,12 @@ static void GenerateLongComparesAndJumps(HCondition* cond,
__ Cmp(left_high, val_high);
if (if_cond == kCondNE) {
- __ B(ARMCondition(true_high_cond), true_label);
+ __ B(ARMCondition(true_high_cond), true_label, is_far_target);
} else if (if_cond == kCondEQ) {
- __ B(ARMCondition(false_high_cond), false_label);
+ __ B(ARMCondition(false_high_cond), false_label, is_far_target);
} else {
- __ B(ARMCondition(true_high_cond), true_label);
- __ B(ARMCondition(false_high_cond), false_label);
+ __ B(ARMCondition(true_high_cond), true_label, is_far_target);
+ __ B(ARMCondition(false_high_cond), false_label, is_far_target);
}
// Must be equal high, so compare the lows.
__ Cmp(left_low, val_low);
@@ -2205,19 +2206,19 @@ static void GenerateLongComparesAndJumps(HCondition* cond,
__ Cmp(left_high, right_high);
if (if_cond == kCondNE) {
- __ B(ARMCondition(true_high_cond), true_label);
+ __ B(ARMCondition(true_high_cond), true_label, is_far_target);
} else if (if_cond == kCondEQ) {
- __ B(ARMCondition(false_high_cond), false_label);
+ __ B(ARMCondition(false_high_cond), false_label, is_far_target);
} else {
- __ B(ARMCondition(true_high_cond), true_label);
- __ B(ARMCondition(false_high_cond), false_label);
+ __ B(ARMCondition(true_high_cond), true_label, is_far_target);
+ __ B(ARMCondition(false_high_cond), false_label, is_far_target);
}
// Must be equal high, so compare the lows.
__ Cmp(left_low, right_low);
}
// The last comparison might be unsigned.
// TODO: optimize cases where this is always true/false
- __ B(final_condition, true_label);
+ __ B(final_condition, true_label, is_far_target);
}
static void GenerateConditionLong(HCondition* cond, CodeGeneratorARMVIXL* codegen) {
@@ -2292,7 +2293,7 @@ static void GenerateConditionLong(HCondition* cond, CodeGeneratorARMVIXL* codege
vixl32::Label* const final_label = codegen->GetFinalLabel(cond, &done_label);
vixl32::Label true_label, false_label;
- GenerateLongComparesAndJumps(cond, &true_label, &false_label, codegen);
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label, codegen, /* is_far_target */ false);
// False case: result = 0.
__ Bind(&false_label);
@@ -2957,7 +2958,8 @@ void InstructionCodeGeneratorARMVIXL::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
void InstructionCodeGeneratorARMVIXL::GenerateCompareTestAndBranch(HCondition* condition,
vixl32::Label* true_target_in,
- vixl32::Label* false_target_in) {
+ vixl32::Label* false_target_in,
+ bool is_far_target) {
if (CanGenerateTest(condition, codegen_->GetAssembler())) {
vixl32::Label* non_fallthrough_target;
bool invert;
@@ -2973,7 +2975,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateCompareTestAndBranch(HCondition* c
const auto cond = GenerateTest(condition, invert, codegen_);
- __ B(cond.first, non_fallthrough_target);
+ __ B(cond.first, non_fallthrough_target, is_far_target);
if (false_target_in != nullptr && false_target_in != non_fallthrough_target) {
__ B(false_target_in);
@@ -2989,7 +2991,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateCompareTestAndBranch(HCondition* c
vixl32::Label* false_target = (false_target_in == nullptr) ? &fallthrough : false_target_in;
DCHECK_EQ(condition->InputAt(0)->GetType(), Primitive::kPrimLong);
- GenerateLongComparesAndJumps(condition, true_target, false_target, codegen_);
+ GenerateLongComparesAndJumps(condition, true_target, false_target, codegen_, is_far_target);
if (false_target != &fallthrough) {
__ B(false_target);
@@ -3057,7 +3059,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateTestAndBranch(HInstruction* instru
// the HCondition, generate the comparison directly.
Primitive::Type type = condition->InputAt(0)->GetType();
if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
- GenerateCompareTestAndBranch(condition, true_target, false_target);
+ GenerateCompareTestAndBranch(condition, true_target, false_target, far_target);
return;
}
@@ -3076,14 +3078,14 @@ void InstructionCodeGeneratorARMVIXL::GenerateTestAndBranch(HInstruction* instru
if (right.IsImmediate() && right.GetImmediate() == 0 && (arm_cond.Is(ne) || arm_cond.Is(eq))) {
if (arm_cond.Is(eq)) {
- __ CompareAndBranchIfZero(left, non_fallthrough_target);
+ __ CompareAndBranchIfZero(left, non_fallthrough_target, far_target);
} else {
DCHECK(arm_cond.Is(ne));
- __ CompareAndBranchIfNonZero(left, non_fallthrough_target);
+ __ CompareAndBranchIfNonZero(left, non_fallthrough_target, far_target);
}
} else {
__ Cmp(left, right);
- __ B(arm_cond, non_fallthrough_target);
+ __ B(arm_cond, non_fallthrough_target, far_target);
}
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 91f7524c8e..ef809510ad 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -24,8 +24,8 @@
#include "nodes.h"
#include "string_reference.h"
#include "parallel_move_resolver.h"
+#include "type_reference.h"
#include "utils/arm/assembler_arm_vixl.h"
-#include "utils/type_reference.h"
// TODO(VIXL): make vixl clean wrt -Wshadow.
#pragma GCC diagnostic push
@@ -400,7 +400,8 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
bool far_target = true);
void GenerateCompareTestAndBranch(HCondition* condition,
vixl::aarch32::Label* true_target,
- vixl::aarch32::Label* false_target);
+ vixl::aarch32::Label* false_target,
+ bool is_far_target = true);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index ff1fde6489..736b5070d9 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -23,8 +23,8 @@
#include "nodes.h"
#include "parallel_move_resolver.h"
#include "string_reference.h"
+#include "type_reference.h"
#include "utils/mips/assembler_mips.h"
-#include "utils/type_reference.h"
namespace art {
namespace mips {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index f49ad49fce..8405040386 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -21,8 +21,8 @@
#include "driver/compiler_options.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
+#include "type_reference.h"
#include "utils/mips64/assembler_mips64.h"
-#include "utils/type_reference.h"
namespace art {
namespace mips64 {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 0ec6ee2fe2..f203d7f47e 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -470,6 +470,33 @@ static Handle<mirror::ObjectArray<mirror::Class>> AllocateInlineCacheHolder(
return inline_cache;
}
+bool HInliner::UseOnlyPolymorphicInliningWithNoDeopt() {
+ // If we are compiling AOT or OSR, pretend the call using inline caches is polymorphic and
+ // do not generate a deopt.
+ //
+ // For AOT:
+ // Generating a deopt does not ensure that we will actually capture the new types;
+ // and the danger is that we could be stuck in a loop with "forever" deoptimizations.
+ // Take for example the following scenario:
+ // - we capture the inline cache in one run
+ // - the next run, we deoptimize because we miss a type check, but the method
+ // never becomes hot again
+ // In this case, the inline cache will not be updated in the profile and the AOT code
+ // will keep deoptimizing.
+ // Another scenario is if we use profile compilation for a process which is not allowed
+ // to JIT (e.g. system server). If we deoptimize we will run interpreted code for the
+ // rest of the lifetime.
+ // TODO(calin):
+ // This is a compromise because we will most likely never update the inline cache
+ // in the profile (unless there's another reason to deopt). So we might be stuck with
+ // a sub-optimal inline cache.
+ // We could be smarter when capturing inline caches to mitigate this.
+ // (e.g. by having different thresholds for new and old methods).
+ //
+ // For OSR:
+ // We may come from the interpreter and it may have seen different receiver types.
+ return Runtime::Current()->IsAotCompiler() || outermost_graph_->IsCompilingOsr();
+}
bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
HInvoke* invoke_instruction,
ArtMethod* resolved_method)
@@ -503,9 +530,7 @@ bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
case kInlineCacheMonomorphic: {
MaybeRecordStat(kMonomorphicCall);
- if (outermost_graph_->IsCompilingOsr()) {
- // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
- // interpreter and it may have seen different receiver types.
+ if (UseOnlyPolymorphicInliningWithNoDeopt()) {
return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
} else {
return TryInlineMonomorphicCall(invoke_instruction, resolved_method, inline_cache);
@@ -578,12 +603,11 @@ HInliner::InlineCacheType HInliner::GetInlineCacheAOT(
return kInlineCacheNoData;
}
- ProfileCompilationInfo::OfflineProfileMethodInfo offline_profile;
- bool found = pci->GetMethod(caller_dex_file.GetLocation(),
- caller_dex_file.GetLocationChecksum(),
- caller_compilation_unit_.GetDexMethodIndex(),
- &offline_profile);
- if (!found) {
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> offline_profile =
+ pci->GetMethod(caller_dex_file.GetLocation(),
+ caller_dex_file.GetLocationChecksum(),
+ caller_compilation_unit_.GetDexMethodIndex());
+ if (offline_profile == nullptr) {
return kInlineCacheNoData; // no profile information for this invocation.
}
@@ -593,7 +617,7 @@ HInliner::InlineCacheType HInliner::GetInlineCacheAOT(
return kInlineCacheNoData;
} else {
return ExtractClassesFromOfflineProfile(invoke_instruction,
- offline_profile,
+ *(offline_profile.get()),
*inline_cache);
}
}
@@ -603,8 +627,8 @@ HInliner::InlineCacheType HInliner::ExtractClassesFromOfflineProfile(
const ProfileCompilationInfo::OfflineProfileMethodInfo& offline_profile,
/*out*/Handle<mirror::ObjectArray<mirror::Class>> inline_cache)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const auto it = offline_profile.inline_caches.find(invoke_instruction->GetDexPc());
- if (it == offline_profile.inline_caches.end()) {
+ const auto it = offline_profile.inline_caches->find(invoke_instruction->GetDexPc());
+ if (it == offline_profile.inline_caches->end()) {
return kInlineCacheUninitialized;
}
@@ -926,14 +950,11 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
// If we have inlined all targets before, and this receiver is the last seen,
// we deoptimize instead of keeping the original invoke instruction.
- bool deoptimize = all_targets_inlined &&
+ bool deoptimize = !UseOnlyPolymorphicInliningWithNoDeopt() &&
+ all_targets_inlined &&
(i != InlineCache::kIndividualCacheSize - 1) &&
(classes->Get(i + 1) == nullptr);
- if (outermost_graph_->IsCompilingOsr()) {
- // We do not support HDeoptimize in OSR methods.
- deoptimize = false;
- }
HInstruction* compare = AddTypeGuard(receiver,
cursor,
bb_cursor,
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 9e4685cbf4..67476b6956 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -180,6 +180,9 @@ class HInliner : public HOptimization {
Handle<mirror::ObjectArray<mirror::Class>> classes)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns whether or not we should use only polymorphic inlining with no deoptimizations.
+ bool UseOnlyPolymorphicInliningWithNoDeopt();
+
// Try CHA-based devirtualization to change virtual method calls into
// direct calls.
// Returns the actual method that resolved_method can be devirtualized to.
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 40fafb0ae5..df9e7164ed 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1000,8 +1000,8 @@ HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, u
void HInstructionBuilder::BuildConstructorFenceForAllocation(HInstruction* allocation) {
DCHECK(allocation != nullptr &&
- allocation->IsNewInstance() ||
- allocation->IsNewArray()); // corresponding to "new" keyword in JLS.
+ (allocation->IsNewInstance() ||
+ allocation->IsNewArray())); // corresponding to "new" keyword in JLS.
if (allocation->IsNewInstance()) {
// STRING SPECIAL HANDLING:
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index e8a62aafae..9803c9a0e9 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -2758,12 +2758,15 @@ void IntrinsicCodeGeneratorARM::VisitThreadInterrupted(HInvoke* invoke) {
int32_t offset = Thread::InterruptedOffset<kArmPointerSize>().Int32Value();
__ LoadFromOffset(kLoadWord, out, TR, offset);
Label done;
- __ CompareAndBranchIfZero(out, &done);
+ Label* const final_label = codegen_->GetFinalLabel(invoke, &done);
+ __ CompareAndBranchIfZero(out, final_label);
__ dmb(ISH);
__ LoadImmediate(IP, 0);
__ StoreToOffset(kStoreWord, IP, TR, offset);
__ dmb(ISH);
- __ Bind(&done);
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
}
UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index ce3ba52b34..1a33b0ee01 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -3127,7 +3127,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
__ Add(out, in, -info.low);
__ Cmp(out, info.high - info.low + 1);
vixl32::Label allocate, done;
- __ B(hs, &allocate);
+ __ B(hs, &allocate, /* is_far_target */ false);
// If the value is within the bounds, load the j.l.Integer directly from the array.
uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
@@ -3164,12 +3164,15 @@ void IntrinsicCodeGeneratorARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
vixl32::Label done;
- __ CompareAndBranchIfZero(out, &done, /* far_target */ false);
+ vixl32::Label* const final_label = codegen_->GetFinalLabel(invoke, &done);
+ __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
__ Dmb(vixl32::ISH);
__ Mov(temp, 0);
assembler->StoreToOffset(kStoreWord, temp, tr, offset);
__ Dmb(vixl32::ISH);
- __ Bind(&done);
+ if (done.IsReferenced()) {
+ __ Bind(&done);
+ }
}
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 963df5a938..94787c99b2 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -173,6 +173,39 @@ static bool IsZeroExtensionAndGet(HInstruction* instruction,
return false;
}
+// Detect situations with same-extension narrower operands.
+// Returns true on success and sets is_unsigned accordingly.
+static bool IsNarrowerOperands(HInstruction* a,
+ HInstruction* b,
+ Primitive::Type type,
+ /*out*/ HInstruction** r,
+ /*out*/ HInstruction** s,
+ /*out*/ bool* is_unsigned) {
+ if (IsSignExtensionAndGet(a, type, r) && IsSignExtensionAndGet(b, type, s)) {
+ *is_unsigned = false;
+ return true;
+ } else if (IsZeroExtensionAndGet(a, type, r) && IsZeroExtensionAndGet(b, type, s)) {
+ *is_unsigned = true;
+ return true;
+ }
+ return false;
+}
+
+// As above, single operand.
+static bool IsNarrowerOperand(HInstruction* a,
+ Primitive::Type type,
+ /*out*/ HInstruction** r,
+ /*out*/ bool* is_unsigned) {
+ if (IsSignExtensionAndGet(a, type, r)) {
+ *is_unsigned = false;
+ return true;
+ } else if (IsZeroExtensionAndGet(a, type, r)) {
+ *is_unsigned = true;
+ return true;
+ }
+ return false;
+}
+
// Detect up to two instructions a and b, and an acccumulated constant c.
static bool IsAddConstHelper(HInstruction* instruction,
/*out*/ HInstruction** a,
@@ -756,7 +789,7 @@ bool HLoopOptimization::VectorizeDef(LoopNode* node,
return !IsUsedOutsideLoop(node->loop_info, instruction) && !instruction->DoesAnyWrite();
}
-// TODO: more operations and intrinsics, detect saturation arithmetic, etc.
+// TODO: saturation arithmetic.
bool HLoopOptimization::VectorizeUse(LoopNode* node,
HInstruction* instruction,
bool generate_code,
@@ -867,25 +900,38 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
return true;
}
// Deal with vector restrictions.
+ HInstruction* opa = instruction->InputAt(0);
+ HInstruction* opb = instruction->InputAt(1);
+ HInstruction* r = opa;
+ bool is_unsigned = false;
if ((HasVectorRestrictions(restrictions, kNoShift)) ||
(instruction->IsShr() && HasVectorRestrictions(restrictions, kNoShr))) {
return false; // unsupported instruction
- } else if ((instruction->IsShr() || instruction->IsUShr()) &&
- HasVectorRestrictions(restrictions, kNoHiBits)) {
- return false; // hibits may impact lobits; TODO: we can do better!
+ } else if (HasVectorRestrictions(restrictions, kNoHiBits)) {
+ // Shifts right need extra care to account for higher order bits.
+ // TODO: less likely shr/unsigned and ushr/signed can by flipping signess.
+ if (instruction->IsShr() &&
+ (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
+ return false; // reject, unless all operands are sign-extension narrower
+ } else if (instruction->IsUShr() &&
+ (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || !is_unsigned)) {
+ return false; // reject, unless all operands are zero-extension narrower
+ }
}
// Accept shift operator for vectorizable/invariant operands.
// TODO: accept symbolic, albeit loop invariant shift factors.
- HInstruction* opa = instruction->InputAt(0);
- HInstruction* opb = instruction->InputAt(1);
+ DCHECK(r != nullptr);
+ if (generate_code && vector_mode_ != kVector) { // de-idiom
+ r = opa;
+ }
int64_t distance = 0;
- if (VectorizeUse(node, opa, generate_code, type, restrictions) &&
+ if (VectorizeUse(node, r, generate_code, type, restrictions) &&
IsInt64AndGet(opb, /*out*/ &distance)) {
// Restrict shift distance to packed data type width.
int64_t max_distance = Primitive::ComponentSize(type) * 8;
if (0 <= distance && distance < max_distance) {
if (generate_code) {
- GenerateVecOp(instruction, vector_map_->Get(opa), opb, type);
+ GenerateVecOp(instruction, vector_map_->Get(r), opb, type);
}
return true;
}
@@ -899,16 +945,23 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
case Intrinsics::kMathAbsFloat:
case Intrinsics::kMathAbsDouble: {
// Deal with vector restrictions.
- if (HasVectorRestrictions(restrictions, kNoAbs) ||
- HasVectorRestrictions(restrictions, kNoHiBits)) {
- // TODO: we can do better for some hibits cases.
+ HInstruction* opa = instruction->InputAt(0);
+ HInstruction* r = opa;
+ bool is_unsigned = false;
+ if (HasVectorRestrictions(restrictions, kNoAbs)) {
return false;
+ } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
+ (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
+ return false; // reject, unless operand is sign-extension narrower
}
// Accept ABS(x) for vectorizable operand.
- HInstruction* opa = instruction->InputAt(0);
- if (VectorizeUse(node, opa, generate_code, type, restrictions)) {
+ DCHECK(r != nullptr);
+ if (generate_code && vector_mode_ != kVector) { // de-idiom
+ r = opa;
+ }
+ if (VectorizeUse(node, r, generate_code, type, restrictions)) {
if (generate_code) {
- GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
+ GenerateVecOp(instruction, vector_map_->Get(r), nullptr, type);
}
return true;
}
@@ -923,18 +976,28 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMaxDoubleDouble: {
// Deal with vector restrictions.
- if (HasVectorRestrictions(restrictions, kNoMinMax) ||
- HasVectorRestrictions(restrictions, kNoHiBits)) {
- // TODO: we can do better for some hibits cases.
+ HInstruction* opa = instruction->InputAt(0);
+ HInstruction* opb = instruction->InputAt(1);
+ HInstruction* r = opa;
+ HInstruction* s = opb;
+ bool is_unsigned = false;
+ if (HasVectorRestrictions(restrictions, kNoMinMax)) {
return false;
+ } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
+ !IsNarrowerOperands(opa, opb, type, &r, &s, &is_unsigned)) {
+ return false; // reject, unless all operands are same-extension narrower
}
// Accept MIN/MAX(x, y) for vectorizable operands.
- HInstruction* opa = instruction->InputAt(0);
- HInstruction* opb = instruction->InputAt(1);
- if (VectorizeUse(node, opa, generate_code, type, restrictions) &&
- VectorizeUse(node, opb, generate_code, type, restrictions)) {
+ DCHECK(r != nullptr && s != nullptr);
+ if (generate_code && vector_mode_ != kVector) { // de-idiom
+ r = opa;
+ s = opb;
+ }
+ if (VectorizeUse(node, r, generate_code, type, restrictions) &&
+ VectorizeUse(node, s, generate_code, type, restrictions)) {
if (generate_code) {
- GenerateVecOp(instruction, vector_map_->Get(opa), vector_map_->Get(opb), type);
+ GenerateVecOp(
+ instruction, vector_map_->Get(r), vector_map_->Get(s), type, is_unsigned);
}
return true;
}
@@ -959,11 +1022,11 @@ bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restric
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
- *restrictions |= kNoDiv | kNoAbs;
+ *restrictions |= kNoDiv;
return TrySetVectorLength(16);
case Primitive::kPrimChar:
case Primitive::kPrimShort:
- *restrictions |= kNoDiv | kNoAbs;
+ *restrictions |= kNoDiv;
return TrySetVectorLength(8);
case Primitive::kPrimInt:
*restrictions |= kNoDiv;
@@ -1098,13 +1161,14 @@ void HLoopOptimization::GenerateVecMem(HInstruction* org,
void HLoopOptimization::GenerateVecOp(HInstruction* org,
HInstruction* opa,
HInstruction* opb,
- Primitive::Type type) {
+ Primitive::Type type,
+ bool is_unsigned) {
if (vector_mode_ == kSequential) {
- // Scalar code follows implicit integral promotion.
- if (type == Primitive::kPrimBoolean ||
- type == Primitive::kPrimByte ||
- type == Primitive::kPrimChar ||
- type == Primitive::kPrimShort) {
+ // Non-converting scalar code follows implicit integral promotion.
+ if (!org->IsTypeConversion() && (type == Primitive::kPrimBoolean ||
+ type == Primitive::kPrimByte ||
+ type == Primitive::kPrimChar ||
+ type == Primitive::kPrimShort)) {
type = Primitive::kPrimInt;
}
}
@@ -1185,7 +1249,6 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org,
case Intrinsics::kMathMinLongLong:
case Intrinsics::kMathMinFloatFloat:
case Intrinsics::kMathMinDoubleDouble: {
- bool is_unsigned = false; // TODO: detect unsigned versions
vector = new (global_allocator_)
HVecMin(global_allocator_, opa, opb, type, vector_length_, is_unsigned);
break;
@@ -1194,7 +1257,6 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org,
case Intrinsics::kMathMaxLongLong:
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMaxDoubleDouble: {
- bool is_unsigned = false; // TODO: detect unsigned versions
vector = new (global_allocator_)
HVecMax(global_allocator_, opa, opb, type, vector_length_, is_unsigned);
break;
@@ -1258,7 +1320,7 @@ bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
Primitive::Type type,
uint64_t restrictions) {
// Test for top level arithmetic shift right x >> 1 or logical shift right x >>> 1
- // (note whether the sign bit in higher precision is shifted in has no effect
+ // (note whether the sign bit in wider precision is shifted in has no effect
// on the narrow precision computed by the idiom).
int64_t distance = 0;
if ((instruction->IsShr() ||
@@ -1269,6 +1331,7 @@ bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
HInstruction* b = nullptr;
int64_t c = 0;
if (IsAddConst(instruction->InputAt(0), /*out*/ &a, /*out*/ &b, /*out*/ &c)) {
+ DCHECK(a != nullptr && b != nullptr);
// Accept c == 1 (rounded) or c == 0 (not rounded).
bool is_rounded = false;
if (c == 1) {
@@ -1280,11 +1343,7 @@ bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
HInstruction* r = nullptr;
HInstruction* s = nullptr;
bool is_unsigned = false;
- if (IsZeroExtensionAndGet(a, type, &r) && IsZeroExtensionAndGet(b, type, &s)) {
- is_unsigned = true;
- } else if (IsSignExtensionAndGet(a, type, &r) && IsSignExtensionAndGet(b, type, &s)) {
- is_unsigned = false;
- } else {
+ if (!IsNarrowerOperands(a, b, type, &r, &s, &is_unsigned)) {
return false;
}
// Deal with vector restrictions.
@@ -1295,6 +1354,10 @@ bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
// Accept recognized halving add for vectorizable operands. Vectorized code uses the
// shorthand idiomatic operation. Sequential code uses the original scalar expressions.
DCHECK(r != nullptr && s != nullptr);
+ if (generate_code && vector_mode_ != kVector) { // de-idiom
+ r = instruction->InputAt(0);
+ s = instruction->InputAt(1);
+ }
if (VectorizeUse(node, r, generate_code, type, restrictions) &&
VectorizeUse(node, s, generate_code, type, restrictions)) {
if (generate_code) {
@@ -1308,12 +1371,7 @@ bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
is_unsigned,
is_rounded));
} else {
- VectorizeUse(node, instruction->InputAt(0), generate_code, type, restrictions);
- VectorizeUse(node, instruction->InputAt(1), generate_code, type, restrictions);
- GenerateVecOp(instruction,
- vector_map_->Get(instruction->InputAt(0)),
- vector_map_->Get(instruction->InputAt(1)),
- type);
+ GenerateVecOp(instruction, vector_map_->Get(r), vector_map_->Get(s), type);
}
}
return true;
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 6d5978d337..35298d4076 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -137,7 +137,11 @@ class HLoopOptimization : public HOptimization {
HInstruction* opa,
HInstruction* opb,
Primitive::Type type);
- void GenerateVecOp(HInstruction* org, HInstruction* opa, HInstruction* opb, Primitive::Type type);
+ void GenerateVecOp(HInstruction* org,
+ HInstruction* opa,
+ HInstruction* opb,
+ Primitive::Type type,
+ bool is_unsigned = false);
// Vectorization idioms.
bool VectorizeHalvingAddIdiom(LoopNode* node,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index bde7f2c1e0..689991010e 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2642,7 +2642,7 @@ std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
return os << "BootImageLinkTimePcRelative";
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- return os << "Direct";
+ return os << "DirectAddress";
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
return os << "DexCachePcRelative";
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod:
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 660409f6f9..a35b199346 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2130,7 +2130,10 @@ class Dex2Oat FINAL {
bool LoadProfile() {
DCHECK(UseProfile());
-
+ // TODO(calin): We should be using the runtime arena pool (instead of the
+ // default profile arena). However the setup logic is messy and needs
+ // cleaning up before that (e.g. the oat writers are created before the
+ // runtime).
profile_compilation_info_.reset(new ProfileCompilationInfo());
ScopedFlock flock;
bool success = true;
diff --git a/dexoptanalyzer/Android.bp b/dexoptanalyzer/Android.bp
index cf4c99ec6d..da6663dd6b 100644
--- a/dexoptanalyzer/Android.bp
+++ b/dexoptanalyzer/Android.bp
@@ -48,8 +48,8 @@ art_cc_binary {
art_cc_binary {
name: "dexoptanalyzerd",
defaults: [
- "dexoptanalyzer-defaults",
"art_debug_defaults",
+ "dexoptanalyzer-defaults",
],
shared_libs: [
"libartd",
diff --git a/disassembler/Android.bp b/disassembler/Android.bp
index 8dfada223b..086b8c7990 100644
--- a/disassembler/Android.bp
+++ b/disassembler/Android.bp
@@ -47,8 +47,8 @@ art_cc_library {
art_cc_library {
name: "libartd-disassembler",
defaults: [
- "libart-disassembler-defaults",
"art_debug_defaults",
+ "libart-disassembler-defaults",
],
shared_libs: [
// For disassembler_arm*.
diff --git a/imgdiag/Android.bp b/imgdiag/Android.bp
index eaeb78efa4..9459bb5504 100644
--- a/imgdiag/Android.bp
+++ b/imgdiag/Android.bp
@@ -64,8 +64,8 @@ art_cc_binary {
art_cc_binary {
name: "imgdiagd",
defaults: [
- "imgdiag-defaults",
"art_debug_defaults",
+ "imgdiag-defaults",
],
shared_libs: [
"libartd",
diff --git a/patchoat/Android.bp b/patchoat/Android.bp
index a78f97d54c..d3bc2a754b 100644
--- a/patchoat/Android.bp
+++ b/patchoat/Android.bp
@@ -40,8 +40,8 @@ art_cc_binary {
art_cc_binary {
name: "patchoatd",
defaults: [
- "patchoat-defaults",
"art_debug_defaults",
+ "patchoat-defaults",
],
shared_libs: [
"libartd",
diff --git a/profman/Android.bp b/profman/Android.bp
index 2dcbaee456..a327ef2c16 100644
--- a/profman/Android.bp
+++ b/profman/Android.bp
@@ -49,8 +49,8 @@ art_cc_binary {
art_cc_binary {
name: "profmand",
defaults: [
- "profman-defaults",
"art_debug_defaults",
+ "profman-defaults",
],
shared_libs: [
"libartd",
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 38254e2436..41b9f99207 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -21,6 +21,7 @@
#include "common_runtime_test.h"
#include "exec_utils.h"
#include "jit/profile_compilation_info.h"
+#include "linear_alloc.h"
#include "mirror/class-inl.h"
#include "obj_ptr-inl.h"
#include "profile_assistant.h"
@@ -30,6 +31,11 @@
namespace art {
class ProfileAssistantTest : public CommonRuntimeTest {
+ public:
+ void PostRuntimeCreate() OVERRIDE {
+ arena_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
+ }
+
protected:
void SetupProfile(const std::string& id,
uint32_t checksum,
@@ -66,38 +72,46 @@ class ProfileAssistantTest : public CommonRuntimeTest {
ASSERT_TRUE(profile.GetFile()->ResetOffset());
}
+ // Creates an inline cache which will be destructed at the end of the test.
+ ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
+ used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
+ std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ return used_inline_caches.back().get();
+ }
+
ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo(
const std::string& dex_location1, uint32_t dex_checksum1,
const std::string& dex_location2, uint32_t dex_checksum2) {
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back(dex_location1, dex_checksum1);
pmi.dex_references.emplace_back(dex_location2, dex_checksum2);
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Polymorphic
for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMegamorphic();
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Missing types
for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMissingTypes();
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
return pmi;
@@ -247,13 +261,13 @@ class ProfileAssistantTest : public CommonRuntimeTest {
bool is_megamorphic,
bool is_missing_types)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
- ASSERT_TRUE(info.GetMethod(method->GetDexFile()->GetLocation(),
- method->GetDexFile()->GetLocationChecksum(),
- method->GetDexMethodIndex(),
- &pmi));
- ASSERT_EQ(pmi.inline_caches.size(), 1u);
- ProfileCompilationInfo::DexPcData dex_pc_data = pmi.inline_caches.begin()->second;
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> pmi =
+ info.GetMethod(method->GetDexFile()->GetLocation(),
+ method->GetDexFile()->GetLocationChecksum(),
+ method->GetDexMethodIndex());
+ ASSERT_TRUE(pmi != nullptr);
+ ASSERT_EQ(pmi->inline_caches->size(), 1u);
+ const ProfileCompilationInfo::DexPcData& dex_pc_data = pmi->inline_caches->begin()->second;
ASSERT_EQ(dex_pc_data.is_megamorphic, is_megamorphic);
ASSERT_EQ(dex_pc_data.is_missing_types, is_missing_types);
@@ -262,7 +276,7 @@ class ProfileAssistantTest : public CommonRuntimeTest {
for (mirror::Class* it : expected_clases) {
for (const auto& class_ref : dex_pc_data.classes) {
ProfileCompilationInfo::DexReference dex_ref =
- pmi.dex_references[class_ref.dex_profile_index];
+ pmi->dex_references[class_ref.dex_profile_index];
if (dex_ref.MatchesDex(&(it->GetDexFile())) &&
class_ref.type_index == it->GetDexTypeIndex()) {
found++;
@@ -272,6 +286,13 @@ class ProfileAssistantTest : public CommonRuntimeTest {
ASSERT_EQ(expected_clases.size(), found);
}
+
+ std::unique_ptr<ArenaAllocator> arena_;
+
+ // Cache of inline caches generated during tests.
+ // This makes it easier to pass data between different utilities and ensure that
+ // caches are destructed at the end of the test.
+ std::vector<std::unique_ptr<ProfileCompilationInfo::InlineCacheMap>> used_inline_caches;
};
TEST_F(ProfileAssistantTest, AdviseCompilationEmptyReferences) {
@@ -541,11 +562,11 @@ TEST_F(ProfileAssistantTest, TestProfileCreationGenerateMethods) {
for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
if (!method.IsCopied() && method.GetCodeItem() != nullptr) {
++method_count;
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
- ASSERT_TRUE(info.GetMethod(method.GetDexFile()->GetLocation(),
- method.GetDexFile()->GetLocationChecksum(),
- method.GetDexMethodIndex(),
- &pmi));
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> pmi =
+ info.GetMethod(method.GetDexFile()->GetLocation(),
+ method.GetDexFile()->GetLocationChecksum(),
+ method.GetDexMethodIndex());
+ ASSERT_TRUE(pmi != nullptr);
}
}
EXPECT_GT(method_count, 0u);
@@ -689,12 +710,12 @@ TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) {
// Verify that method noInlineCache has no inline caches in the profile.
ArtMethod* no_inline_cache = GetVirtualMethod(class_loader, "LTestInline;", "noInlineCache");
ASSERT_TRUE(no_inline_cache != nullptr);
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi_no_inline_cache;
- ASSERT_TRUE(info.GetMethod(no_inline_cache->GetDexFile()->GetLocation(),
- no_inline_cache->GetDexFile()->GetLocationChecksum(),
- no_inline_cache->GetDexMethodIndex(),
- &pmi_no_inline_cache));
- ASSERT_TRUE(pmi_no_inline_cache.inline_caches.empty());
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> pmi_no_inline_cache =
+ info.GetMethod(no_inline_cache->GetDexFile()->GetLocation(),
+ no_inline_cache->GetDexFile()->GetLocationChecksum(),
+ no_inline_cache->GetDexMethodIndex());
+ ASSERT_TRUE(pmi_no_inline_cache != nullptr);
+ ASSERT_TRUE(pmi_no_inline_cache->inline_caches->empty());
}
}
diff --git a/profman/profman.cc b/profman/profman.cc
index 384e129150..e565171265 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -39,10 +39,11 @@
#include "bytecode_utils.h"
#include "dex_file.h"
#include "jit/profile_compilation_info.h"
+#include "profile_assistant.h"
#include "runtime.h"
+#include "type_reference.h"
#include "utils.h"
#include "zip_archive.h"
-#include "profile_assistant.h"
namespace art {
@@ -423,15 +424,13 @@ class ProfMan FINAL {
}
for (const std::unique_ptr<const DexFile>& dex_file : *dex_files) {
std::set<dex::TypeIndex> class_types;
- ProfileCompilationInfo::MethodMap methods;
- if (profile_info.GetClassesAndMethods(dex_file.get(), &class_types, &methods)) {
+ std::set<uint16_t> methods;
+ if (profile_info.GetClassesAndMethods(*dex_file.get(), &class_types, &methods)) {
for (const dex::TypeIndex& type_index : class_types) {
const DexFile::TypeId& type_id = dex_file->GetTypeId(type_index);
out_lines->insert(std::string(dex_file->GetTypeDescriptor(type_id)));
}
- for (const auto& pair : methods) {
- // TODO: Process inline caches.
- const uint16_t dex_method_idx = pair.first;
+ for (uint16_t dex_method_idx : methods) {
const DexFile::MethodId& id = dex_file->GetMethodId(dex_method_idx);
std::string signature_string(dex_file->GetMethodSignature(id).ToString());
std::string type_string(dex_file->GetTypeDescriptor(dex_file->GetTypeId(id.class_idx_)));
@@ -562,7 +561,7 @@ class ProfMan FINAL {
// Return true if the definition of the class was found in any of the dex_files.
bool FindClass(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
const std::string& klass_descriptor,
- /*out*/ProfileMethodInfo::ProfileClassReference* class_ref) {
+ /*out*/TypeReference* class_ref) {
for (const std::unique_ptr<const DexFile>& dex_file_ptr : dex_files) {
const DexFile* dex_file = dex_file_ptr.get();
const DexFile::TypeId* type_id = dex_file->FindTypeId(klass_descriptor.c_str());
@@ -582,8 +581,7 @@ class ProfMan FINAL {
}
// Find the method specified by method_spec in the class class_ref.
- uint32_t FindMethodIndex(const ProfileMethodInfo::ProfileClassReference& class_ref,
- const std::string& method_spec) {
+ uint32_t FindMethodIndex(const TypeReference& class_ref, const std::string& method_spec) {
std::vector<std::string> name_and_signature;
Split(method_spec, kProfileParsingFirstCharInSignature, &name_and_signature);
if (name_and_signature.size() != 2) {
@@ -625,7 +623,7 @@ class ProfMan FINAL {
// The format of the method spec is "inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;".
//
// TODO(calin): support INVOKE_INTERFACE and the range variants.
- bool HasSingleInvoke(const ProfileMethodInfo::ProfileClassReference& class_ref,
+ bool HasSingleInvoke(const TypeReference& class_ref,
uint16_t method_index,
/*out*/uint32_t* dex_pc) {
const DexFile* dex_file = class_ref.dex_file;
@@ -674,7 +672,7 @@ class ProfMan FINAL {
method_str = line.substr(method_sep_index + kMethodSep.size());
}
- ProfileMethodInfo::ProfileClassReference class_ref;
+ TypeReference class_ref;
if (!FindClass(dex_files, klass, &class_ref)) {
LOG(WARNING) << "Could not find class: " << klass;
return false;
@@ -745,7 +743,7 @@ class ProfMan FINAL {
if (!HasSingleInvoke(class_ref, method_index, &dex_pc)) {
return false;
}
- std::vector<ProfileMethodInfo::ProfileClassReference> classes(inline_cache_elems.size());
+ std::vector<TypeReference> classes(inline_cache_elems.size());
size_t class_it = 0;
for (const std::string& ic_class : inline_cache_elems) {
if (!FindClass(dex_files, ic_class, &(classes[class_it++]))) {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 8ee5498115..aa7dc65871 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -352,6 +352,7 @@ cc_defaults {
"libdl",
// For android::FileMap used by libziparchive.
"libutils",
+ "libtombstoned_client"
],
static_libs: [
// ZipArchive support, the order matters here to get all symbols.
diff --git a/runtime/arch/instruction_set.cc b/runtime/arch/instruction_set.cc
index 8f64dcd306..64af7eccd4 100644
--- a/runtime/arch/instruction_set.cc
+++ b/runtime/arch/instruction_set.cc
@@ -18,8 +18,8 @@
// Explicitly include our own elf.h to avoid Linux and other dependencies.
#include "../elf.h"
+#include "android-base/logging.h"
#include "base/bit_utils.h"
-#include "base/logging.h"
#include "globals.h"
namespace art {
@@ -36,11 +36,9 @@ void InstructionSetAbort(InstructionSet isa) {
case kNone:
LOG(FATAL) << "Unsupported instruction set " << isa;
UNREACHABLE();
-
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
}
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
}
const char* GetInstructionSetString(InstructionSet isa) {
@@ -60,10 +58,9 @@ const char* GetInstructionSetString(InstructionSet isa) {
return "mips64";
case kNone:
return "none";
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
}
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
}
InstructionSet GetInstructionSetFromString(const char* isa_str) {
@@ -128,10 +125,9 @@ size_t GetInstructionSetAlignment(InstructionSet isa) {
case kNone:
LOG(FATAL) << "ISA kNone does not have alignment.";
UNREACHABLE();
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
}
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
}
#if !defined(ART_STACK_OVERFLOW_GAP_arm) || !defined(ART_STACK_OVERFLOW_GAP_arm64) || \
@@ -197,11 +193,9 @@ size_t GetStackOverflowReservedBytes(InstructionSet isa) {
case kNone:
LOG(FATAL) << "kNone has no stack overflow size";
UNREACHABLE();
-
- default:
- LOG(FATAL) << "Unknown instruction set" << isa;
- UNREACHABLE();
}
+ LOG(FATAL) << "Unknown instruction set" << isa;
+ UNREACHABLE();
}
} // namespace art
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 7ef9a7abb5..7203b188c1 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -93,7 +93,7 @@ InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags);
// Fatal logging out of line to keep the header clean of logging.h.
NO_RETURN void InstructionSetAbort(InstructionSet isa);
-static inline PointerSize GetInstructionSetPointerSize(InstructionSet isa) {
+constexpr PointerSize GetInstructionSetPointerSize(InstructionSet isa) {
switch (isa) {
case kArm:
// Fall-through.
@@ -109,23 +109,37 @@ static inline PointerSize GetInstructionSetPointerSize(InstructionSet isa) {
return kMipsPointerSize;
case kMips64:
return kMips64PointerSize;
- default:
- InstructionSetAbort(isa);
+
+ case kNone:
+ break;
}
+ InstructionSetAbort(isa);
}
-ALWAYS_INLINE static inline constexpr size_t GetInstructionSetInstructionAlignment(
- InstructionSet isa) {
- return (isa == kThumb2 || isa == kArm) ? kThumb2InstructionAlignment :
- (isa == kArm64) ? kArm64InstructionAlignment :
- (isa == kX86) ? kX86InstructionAlignment :
- (isa == kX86_64) ? kX86_64InstructionAlignment :
- (isa == kMips) ? kMipsInstructionAlignment :
- (isa == kMips64) ? kMips64InstructionAlignment :
- 0; // Invalid case, but constexpr doesn't support asserts.
+constexpr size_t GetInstructionSetInstructionAlignment(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return kThumb2InstructionAlignment;
+ case kArm64:
+ return kArm64InstructionAlignment;
+ case kX86:
+ return kX86InstructionAlignment;
+ case kX86_64:
+ return kX86_64InstructionAlignment;
+ case kMips:
+ return kMipsInstructionAlignment;
+ case kMips64:
+ return kMips64InstructionAlignment;
+
+ case kNone:
+ break;
+ }
+ InstructionSetAbort(isa);
}
-static inline bool IsValidInstructionSet(InstructionSet isa) {
+constexpr bool IsValidInstructionSet(InstructionSet isa) {
switch (isa) {
case kArm:
case kThumb2:
@@ -135,15 +149,16 @@ static inline bool IsValidInstructionSet(InstructionSet isa) {
case kMips:
case kMips64:
return true;
+
case kNone:
- default:
return false;
}
+ return false;
}
size_t GetInstructionSetAlignment(InstructionSet isa);
-static inline bool Is64BitInstructionSet(InstructionSet isa) {
+constexpr bool Is64BitInstructionSet(InstructionSet isa) {
switch (isa) {
case kArm:
case kThumb2:
@@ -156,16 +171,17 @@ static inline bool Is64BitInstructionSet(InstructionSet isa) {
case kMips64:
return true;
- default:
- InstructionSetAbort(isa);
+ case kNone:
+ break;
}
+ InstructionSetAbort(isa);
}
-static inline PointerSize InstructionSetPointerSize(InstructionSet isa) {
+constexpr PointerSize InstructionSetPointerSize(InstructionSet isa) {
return Is64BitInstructionSet(isa) ? PointerSize::k64 : PointerSize::k32;
}
-static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
+constexpr size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
switch (isa) {
case kArm:
// Fall-through.
@@ -182,12 +198,13 @@ static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
case kMips64:
return 8;
- default:
- InstructionSetAbort(isa);
+ case kNone:
+ break;
}
+ InstructionSetAbort(isa);
}
-static inline size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
+constexpr size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
switch (isa) {
case kArm:
// Fall-through.
@@ -204,9 +221,10 @@ static inline size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
case kMips64:
return 8;
- default:
- InstructionSetAbort(isa);
+ case kNone:
+ break;
}
+ InstructionSetAbort(isa);
}
size_t GetStackOverflowReservedBytes(InstructionSet isa);
@@ -243,7 +261,7 @@ static inline constexpr TwoWordReturn GetTwoWordFailureValue() {
}
// Use the lower 32b for the method pointer and the upper 32b for the code pointer.
-static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
+static inline constexpr TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
static_assert(sizeof(uint32_t) == sizeof(uintptr_t), "Unexpected size difference");
uint32_t lo32 = lo;
uint64_t hi64 = static_cast<uint64_t>(hi);
@@ -251,6 +269,10 @@ static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
}
#elif defined(__x86_64__) || defined(__aarch64__) || (defined(__mips__) && defined(__LP64__))
+
+// Note: TwoWordReturn can't be constexpr for 64-bit targets. We'd need a constexpr constructor,
+// which would violate C-linkage in the entrypoint functions.
+
struct TwoWordReturn {
uintptr_t lo;
uintptr_t hi;
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index fc5b5b1d20..f672882254 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -88,6 +88,7 @@ const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
"CallingConv ",
"CHA ",
"Scheduler ",
+ "Profile ",
};
template <bool kCount>
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 543045897e..ebde82db55 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -98,6 +98,7 @@ enum ArenaAllocKind {
kArenaAllocCallingConvention,
kArenaAllocCHA,
kArenaAllocScheduler,
+ kArenaAllocProfile,
kNumArenaAllocKinds
};
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 55b4306427..553928d20a 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -112,7 +112,7 @@ void LogHelper::LogLineLowStack(const char* file,
if (priority == ANDROID_LOG_FATAL) {
// Allocate buffer for snprintf(buf, buf_size, "%s:%u] %s", file, line, message) below.
// If allocation fails, fall back to printing only the message.
- buf_size = strlen(file) + 1 /* ':' */ + std::numeric_limits<typeof(line)>::max_digits10 +
+ buf_size = strlen(file) + 1 /* ':' */ + std::numeric_limits<decltype(line)>::max_digits10 +
2 /* "] " */ + strlen(message) + 1 /* terminating 0 */;
buf = reinterpret_cast<char*>(malloc(buf_size));
}
diff --git a/runtime/base/safe_copy_test.cc b/runtime/base/safe_copy_test.cc
index 987895e6b7..a9ec9528a1 100644
--- a/runtime/base/safe_copy_test.cc
+++ b/runtime/base/safe_copy_test.cc
@@ -23,80 +23,86 @@
#include <sys/mman.h>
#include <sys/user.h>
+#include "globals.h"
+
namespace art {
#if defined(__linux__)
TEST(SafeCopyTest, smoke) {
+ DCHECK_EQ(kPageSize, static_cast<decltype(kPageSize)>(PAGE_SIZE));
+
// Map four pages, mark the second one as PROT_NONE, unmap the last one.
- void* map = mmap(nullptr, PAGE_SIZE * 4, PROT_READ | PROT_WRITE,
+ void* map = mmap(nullptr, kPageSize * 4, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, map);
char* page1 = static_cast<char*>(map);
- char* page2 = page1 + PAGE_SIZE;
- char* page3 = page2 + PAGE_SIZE;
- char* page4 = page3 + PAGE_SIZE;
- ASSERT_EQ(0, mprotect(page1 + PAGE_SIZE, PAGE_SIZE, PROT_NONE));
- ASSERT_EQ(0, munmap(page4, PAGE_SIZE));
+ char* page2 = page1 + kPageSize;
+ char* page3 = page2 + kPageSize;
+ char* page4 = page3 + kPageSize;
+ ASSERT_EQ(0, mprotect(page1 + kPageSize, kPageSize, PROT_NONE));
+ ASSERT_EQ(0, munmap(page4, kPageSize));
page1[0] = 'a';
- page1[PAGE_SIZE - 1] = 'z';
+ page1[kPageSize - 1] = 'z';
page3[0] = 'b';
- page3[PAGE_SIZE - 1] = 'y';
+ page3[kPageSize - 1] = 'y';
- char buf[PAGE_SIZE];
+ char buf[kPageSize];
// Completely valid read.
memset(buf, 0xCC, sizeof(buf));
- EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE), SafeCopy(buf, page1, PAGE_SIZE)) << strerror(errno);
- EXPECT_EQ(0, memcmp(buf, page1, PAGE_SIZE));
+ EXPECT_EQ(static_cast<ssize_t>(kPageSize), SafeCopy(buf, page1, kPageSize)) << strerror(errno);
+ EXPECT_EQ(0, memcmp(buf, page1, kPageSize));
// Reading into a guard page.
memset(buf, 0xCC, sizeof(buf));
- EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE - 1), SafeCopy(buf, page1 + 1, PAGE_SIZE));
- EXPECT_EQ(0, memcmp(buf, page1 + 1, PAGE_SIZE - 1));
+ EXPECT_EQ(static_cast<ssize_t>(kPageSize - 1), SafeCopy(buf, page1 + 1, kPageSize));
+ EXPECT_EQ(0, memcmp(buf, page1 + 1, kPageSize - 1));
// Reading from a guard page into a real page.
memset(buf, 0xCC, sizeof(buf));
- EXPECT_EQ(0, SafeCopy(buf, page2 + PAGE_SIZE - 1, PAGE_SIZE));
+ EXPECT_EQ(0, SafeCopy(buf, page2 + kPageSize - 1, kPageSize));
// Reading off of the end of a mapping.
memset(buf, 0xCC, sizeof(buf));
- EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE), SafeCopy(buf, page3, PAGE_SIZE * 2));
- EXPECT_EQ(0, memcmp(buf, page3, PAGE_SIZE));
+ EXPECT_EQ(static_cast<ssize_t>(kPageSize), SafeCopy(buf, page3, kPageSize * 2));
+ EXPECT_EQ(0, memcmp(buf, page3, kPageSize));
// Completely invalid.
- EXPECT_EQ(0, SafeCopy(buf, page1 + PAGE_SIZE, PAGE_SIZE));
+ EXPECT_EQ(0, SafeCopy(buf, page1 + kPageSize, kPageSize));
// Clean up.
- ASSERT_EQ(0, munmap(map, PAGE_SIZE * 3));
+ ASSERT_EQ(0, munmap(map, kPageSize * 3));
}
TEST(SafeCopyTest, alignment) {
+ DCHECK_EQ(kPageSize, static_cast<decltype(kPageSize)>(PAGE_SIZE));
+
// Copy the middle of a mapping to the end of another one.
- void* src_map = mmap(nullptr, PAGE_SIZE * 3, PROT_READ | PROT_WRITE,
+ void* src_map = mmap(nullptr, kPageSize * 3, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, src_map);
// Add a guard page to make sure we don't write past the end of the mapping.
- void* dst_map = mmap(nullptr, PAGE_SIZE * 4, PROT_READ | PROT_WRITE,
+ void* dst_map = mmap(nullptr, kPageSize * 4, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, dst_map);
char* src = static_cast<char*>(src_map);
char* dst = static_cast<char*>(dst_map);
- ASSERT_EQ(0, mprotect(dst + 3 * PAGE_SIZE, PAGE_SIZE, PROT_NONE));
+ ASSERT_EQ(0, mprotect(dst + 3 * kPageSize, kPageSize, PROT_NONE));
src[512] = 'a';
- src[PAGE_SIZE * 3 - 512 - 1] = 'z';
+ src[kPageSize * 3 - 512 - 1] = 'z';
- EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE * 3 - 1024),
- SafeCopy(dst + 1024, src + 512, PAGE_SIZE * 3 - 1024));
- EXPECT_EQ(0, memcmp(dst + 1024, src + 512, PAGE_SIZE * 3 - 1024));
+ EXPECT_EQ(static_cast<ssize_t>(kPageSize * 3 - 1024),
+ SafeCopy(dst + 1024, src + 512, kPageSize * 3 - 1024));
+ EXPECT_EQ(0, memcmp(dst + 1024, src + 512, kPageSize * 3 - 1024));
- ASSERT_EQ(0, munmap(src_map, PAGE_SIZE * 3));
- ASSERT_EQ(0, munmap(dst_map, PAGE_SIZE * 4));
+ ASSERT_EQ(0, munmap(src_map, kPageSize * 3));
+ ASSERT_EQ(0, munmap(dst_map, kPageSize * 4));
}
#endif // defined(__linux__)
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 5fbdc46cb0..afe4eeb059 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1698,7 +1698,7 @@ ElfFile* ElfFile::Open(File* file,
low_4gb,
file->GetPath().c_str(),
error_msg));
- if (map == nullptr && map->Size() != EI_NIDENT) {
+ if (map == nullptr || map->Size() != EI_NIDENT) {
return nullptr;
}
uint8_t* header = map->Begin();
@@ -1749,7 +1749,7 @@ ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, std::string* e
low_4gb,
file->GetPath().c_str(),
error_msg));
- if (map == nullptr && map->Size() != EI_NIDENT) {
+ if (map == nullptr || map->Size() != EI_NIDENT) {
return nullptr;
}
uint8_t* header = map->Begin();
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index e2d45acb34..74e7c180b8 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -145,22 +145,22 @@
V(A64Load, int64_t, volatile const int64_t *) \
V(A64Store, void, volatile int64_t *, int64_t) \
\
- V(NewEmptyString, void) \
- V(NewStringFromBytes_B, void) \
- V(NewStringFromBytes_BI, void) \
- V(NewStringFromBytes_BII, void) \
- V(NewStringFromBytes_BIII, void) \
- V(NewStringFromBytes_BIIString, void) \
- V(NewStringFromBytes_BString, void) \
- V(NewStringFromBytes_BIICharset, void) \
- V(NewStringFromBytes_BCharset, void) \
- V(NewStringFromChars_C, void) \
- V(NewStringFromChars_CII, void) \
- V(NewStringFromChars_IIC, void) \
- V(NewStringFromCodePoints, void) \
- V(NewStringFromString, void) \
- V(NewStringFromStringBuffer, void) \
- V(NewStringFromStringBuilder, void) \
+ V(NewEmptyString, void, void) \
+ V(NewStringFromBytes_B, void, void) \
+ V(NewStringFromBytes_BI, void, void) \
+ V(NewStringFromBytes_BII, void, void) \
+ V(NewStringFromBytes_BIII, void, void) \
+ V(NewStringFromBytes_BIIString, void, void) \
+ V(NewStringFromBytes_BString, void, void) \
+ V(NewStringFromBytes_BIICharset, void, void) \
+ V(NewStringFromBytes_BCharset, void, void) \
+ V(NewStringFromChars_C, void, void) \
+ V(NewStringFromChars_CII, void, void) \
+ V(NewStringFromChars_IIC, void, void) \
+ V(NewStringFromCodePoints, void, void) \
+ V(NewStringFromString, void, void) \
+ V(NewStringFromStringBuffer, void, void) \
+ V(NewStringFromStringBuilder, void, void) \
\
V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \
V(ReadBarrierMarkReg00, mirror::Object*, mirror::Object*) \
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ef4fa28f23..df097a0e60 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -4004,7 +4004,8 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
native_blocking_gcs_finished_++;
native_blocking_gc_cond_->Broadcast(self);
}
- } else if (new_value > NativeAllocationGcWatermark() && !IsGCRequestPending()) {
+ } else if (new_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
+ !IsGCRequestPending()) {
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
if (IsGcConcurrent()) {
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 2589ad046b..fdc0505e7f 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -140,12 +140,6 @@ static inline bool DoFastInvoke(Thread* self,
result->SetJ(0);
return false;
} else {
- if (called_method->IsIntrinsic()) {
- if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
- shadow_frame.GetResultRegister())) {
- return !self->IsExceptionPending();
- }
- }
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
if (type == kVirtual) {
@@ -153,6 +147,12 @@ static inline bool DoFastInvoke(Thread* self,
}
jit->AddSamples(self, sf_method, 1, /*with_backedges*/false);
}
+ if (called_method->IsIntrinsic()) {
+ if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
+ shadow_frame.GetResultRegister())) {
+ return !self->IsExceptionPending();
+ }
+ }
return DoCall<false, false>(called_method, self, shadow_frame, inst, inst_data, result);
}
}
diff --git a/runtime/interpreter/mterp/mips/op_double_to_int.S b/runtime/interpreter/mterp/mips/op_double_to_int.S
index 3b44964333..6d7c6cae61 100644
--- a/runtime/interpreter/mterp/mips/op_double_to_int.S
+++ b/runtime/interpreter/mterp/mips/op_double_to_int.S
@@ -3,7 +3,8 @@
*
* We have to clip values to int min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -11,29 +12,20 @@
EAS2(a3, rFP, a3) # a3 <- &fp[B]
LOAD64_F(fa0, fa0f, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
+#ifndef MIPS32REVGE6
li t0, INT_MIN_AS_DOUBLE_HIGH
mtc1 zero, fa1
MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- cmp.le.d ft0, fa1, fa0
- GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
- cmp.eq.d ft0, fa0, fa0
- selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-#else
c.ole.d fcc0, fa1, fa0
+#endif
GET_INST_OPCODE(t1) # extract opcode from rINST
+#ifndef MIPS32REVGE6
bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
c.eq.d fcc0, fa0, fa0
mtc1 zero, fa0
MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
movt.d fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-#endif
1:
+#endif
trunc.w.d fa0, fa0
SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_double_to_long.S b/runtime/interpreter/mterp/mips/op_double_to_long.S
index 78d4a8f5c7..459ab7eed0 100644
--- a/runtime/interpreter/mterp/mips/op_double_to_long.S
+++ b/runtime/interpreter/mterp/mips/op_double_to_long.S
@@ -3,7 +3,8 @@
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -13,19 +14,7 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, LONG_MIN_AS_DOUBLE_HIGH
- mtc1 zero, fa1
- mthc1 t0, fa1
- cmp.le.d ft0, fa1, fa0
GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
- cmp.eq.d ft0, fa0, fa0
- selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_DOUBLE : 0
-1:
trunc.l.d fa0, fa0
SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
#else
diff --git a/runtime/interpreter/mterp/mips/op_float_to_int.S b/runtime/interpreter/mterp/mips/op_float_to_int.S
index 087e50fe80..26a0988082 100644
--- a/runtime/interpreter/mterp/mips/op_float_to_int.S
+++ b/runtime/interpreter/mterp/mips/op_float_to_int.S
@@ -3,7 +3,8 @@
*
* We have to clip values to int min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -11,26 +12,18 @@
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+#ifndef MIPS32REVGE6
li t0, INT_MIN_AS_FLOAT
mtc1 t0, fa1
-#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- cmp.le.s ft0, fa1, fa0
- GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
- cmp.eq.s ft0, fa0, fa0
- selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-#else
c.ole.s fcc0, fa1, fa0
+#endif
GET_INST_OPCODE(t1) # extract opcode from rINST
+#ifndef MIPS32REVGE6
bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
c.eq.s fcc0, fa0, fa0
mtc1 zero, fa0
movt.s fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-#endif
1:
+#endif
trunc.w.s fa0, fa0
SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_float_to_long.S b/runtime/interpreter/mterp/mips/op_float_to_long.S
index dc88a78e7a..b8f8efbdcb 100644
--- a/runtime/interpreter/mterp/mips/op_float_to_long.S
+++ b/runtime/interpreter/mterp/mips/op_float_to_long.S
@@ -3,7 +3,8 @@
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -12,18 +13,7 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, fa1
- cmp.le.s ft0, fa1, fa0
GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
- cmp.eq.s ft0, fa0, fa0
- selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_FLOAT : 0
-1:
trunc.l.s fa0, fa0
SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
#else
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_int.S b/runtime/interpreter/mterp/mips64/op_double_to_int.S
index aa2cbcad38..d09952233c 100644
--- a/runtime/interpreter/mterp/mips64/op_double_to_int.S
+++ b/runtime/interpreter/mterp/mips64/op_double_to_int.S
@@ -1,23 +1,3 @@
%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- dli t0, INT_MIN_AS_DOUBLE
- dmtc1 t0, f1
- cmp.le.d f1, f1, f0
- bc1nez f1, .L${opcode}_trunc
- cmp.eq.d f1, f0, f0
- li t0, INT_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .L${opcode}_done
-%break
-.L${opcode}_trunc:
trunc.w.d f0, f0
- mfc1 t0, f0
-.L${opcode}_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG t0, a1
- GOTO_OPCODE v0 # jump to next instruction
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_long.S b/runtime/interpreter/mterp/mips64/op_double_to_long.S
index 777cfeb6c8..9b65da5602 100644
--- a/runtime/interpreter/mterp/mips64/op_double_to_long.S
+++ b/runtime/interpreter/mterp/mips64/op_double_to_long.S
@@ -1,23 +1,3 @@
%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- dli t0, LONG_MIN_AS_DOUBLE
- dmtc1 t0, f1
- cmp.le.d f1, f1, f0
- bc1nez f1, .L${opcode}_trunc
- cmp.eq.d f1, f0, f0
- dli t0, LONG_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .L${opcode}_done
-%break
-.L${opcode}_trunc:
trunc.l.d f0, f0
- dmfc1 t0, f0
-.L${opcode}_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE t0, a1
- GOTO_OPCODE v0 # jump to next instruction
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_int.S b/runtime/interpreter/mterp/mips64/op_float_to_int.S
index d957540a7b..2806973935 100644
--- a/runtime/interpreter/mterp/mips64/op_float_to_int.S
+++ b/runtime/interpreter/mterp/mips64/op_float_to_int.S
@@ -1,23 +1,3 @@
%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, INT_MIN_AS_FLOAT
- mtc1 t0, f1
- cmp.le.s f1, f1, f0
- bc1nez f1, .L${opcode}_trunc
- cmp.eq.s f1, f0, f0
- li t0, INT_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .L${opcode}_done
-%break
-.L${opcode}_trunc:
trunc.w.s f0, f0
- mfc1 t0, f0
-.L${opcode}_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG t0, a1
- GOTO_OPCODE v0 # jump to next instruction
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_long.S b/runtime/interpreter/mterp/mips64/op_float_to_long.S
index 5d036c8455..c40c8a6680 100644
--- a/runtime/interpreter/mterp/mips64/op_float_to_long.S
+++ b/runtime/interpreter/mterp/mips64/op_float_to_long.S
@@ -1,23 +1,3 @@
%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, f1
- cmp.le.s f1, f1, f0
- bc1nez f1, .L${opcode}_trunc
- cmp.eq.s f1, f0, f0
- dli t0, LONG_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .L${opcode}_done
-%break
-.L${opcode}_trunc:
trunc.l.s f0, f0
- dmfc1 t0, f0
-.L${opcode}_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE t0, a1
- GOTO_OPCODE v0 # jump to next instruction
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index a53040c2df..5f94d04d13 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -276,6 +276,12 @@ extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
vtable_idx, kRuntimePointerSize);
if ((called_method != nullptr) && called_method->IsIntrinsic()) {
if (MterpHandleIntrinsic(shadow_frame, called_method, inst, inst_data, result_register)) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit->InvokeVirtualOrInterface(
+ receiver, shadow_frame->GetMethod(), shadow_frame->GetDexPC(), called_method);
+ jit->AddSamples(self, shadow_frame->GetMethod(), 1, /*with_backedges*/false);
+ }
return !self->IsExceptionPending();
}
}
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index 579afc2387..636289798c 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -3967,7 +3967,8 @@ artMterpAsmInstructionStart = .L_op_nop
*
* We have to clip values to int min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -3975,27 +3976,19 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+#ifndef MIPS32REVGE6
li t0, INT_MIN_AS_FLOAT
mtc1 t0, fa1
-#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- cmp.le.s ft0, fa1, fa0
- GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
- cmp.eq.s ft0, fa0, fa0
- selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-#else
c.ole.s fcc0, fa1, fa0
+#endif
GET_INST_OPCODE(t1) # extract opcode from rINST
+#ifndef MIPS32REVGE6
bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
c.eq.s fcc0, fa0, fa0
mtc1 zero, fa0
movt.s fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-#endif
1:
+#endif
trunc.w.s fa0, fa0
SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
@@ -4008,7 +4001,8 @@ artMterpAsmInstructionStart = .L_op_nop
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -4017,18 +4011,7 @@ artMterpAsmInstructionStart = .L_op_nop
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, fa1
- cmp.le.s ft0, fa1, fa0
GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
- cmp.eq.s ft0, fa0, fa0
- selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_FLOAT : 0
-1:
trunc.l.s fa0, fa0
SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
#else
@@ -4084,7 +4067,8 @@ artMterpAsmInstructionStart = .L_op_nop
*
* We have to clip values to int min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -4092,30 +4076,21 @@ artMterpAsmInstructionStart = .L_op_nop
EAS2(a3, rFP, a3) # a3 <- &fp[B]
LOAD64_F(fa0, fa0f, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
+#ifndef MIPS32REVGE6
li t0, INT_MIN_AS_DOUBLE_HIGH
mtc1 zero, fa1
MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- cmp.le.d ft0, fa1, fa0
- GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
- cmp.eq.d ft0, fa0, fa0
- selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-#else
c.ole.d fcc0, fa1, fa0
+#endif
GET_INST_OPCODE(t1) # extract opcode from rINST
+#ifndef MIPS32REVGE6
bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
c.eq.d fcc0, fa0, fa0
mtc1 zero, fa0
MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
movt.d fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-#endif
1:
+#endif
trunc.w.d fa0, fa0
SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
@@ -4128,7 +4103,8 @@ artMterpAsmInstructionStart = .L_op_nop
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -4138,19 +4114,7 @@ artMterpAsmInstructionStart = .L_op_nop
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, LONG_MIN_AS_DOUBLE_HIGH
- mtc1 zero, fa1
- mthc1 t0, fa1
- cmp.le.d ft0, fa1, fa0
GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
- cmp.eq.d ft0, fa0, fa0
- selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_DOUBLE : 0
-1:
trunc.l.d fa0, fa0
SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
#else
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 3656df9a8e..bc0d90c7cb 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -3699,19 +3699,27 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG_FLOAT f0, a2
FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ trunc.w.s f0, f0
+/* File: mips64/fcvtFooter.S */
/*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
*/
- li t0, INT_MIN_AS_FLOAT
- mtc1 t0, f1
- cmp.le.s f1, f1, f0
- bc1nez f1, .Lop_float_to_int_trunc
- cmp.eq.s f1, f0, f0
- li t0, INT_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .Lop_float_to_int_done
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
/* ------------------------------ */
.balign 128
@@ -3734,19 +3742,28 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG_FLOAT f0, a2
FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ trunc.l.s f0, f0
+/* File: mips64/fcvtFooter.S */
/*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
*/
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, f1
- cmp.le.s f1, f1, f0
- bc1nez f1, .Lop_float_to_long_trunc
- cmp.eq.s f1, f0, f0
- dli t0, LONG_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .Lop_float_to_long_done
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+
/* ------------------------------ */
.balign 128
@@ -3817,19 +3834,27 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG_DOUBLE f0, a2
FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ trunc.w.d f0, f0
+/* File: mips64/fcvtFooter.S */
/*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
*/
- dli t0, INT_MIN_AS_DOUBLE
- dmtc1 t0, f1
- cmp.le.d f1, f1, f0
- bc1nez f1, .Lop_double_to_int_trunc
- cmp.eq.d f1, f0, f0
- li t0, INT_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .Lop_double_to_int_done
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
/* ------------------------------ */
.balign 128
@@ -3852,19 +3877,27 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG_DOUBLE f0, a2
FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ trunc.l.d f0, f0
+/* File: mips64/fcvtFooter.S */
/*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
*/
- dli t0, LONG_MIN_AS_DOUBLE
- dmtc1 t0, f1
- cmp.le.d f1, f1, f0
- bc1nez f1, .Lop_double_to_long_trunc
- cmp.eq.d f1, f0, f0
- dli t0, LONG_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .Lop_double_to_long_done
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
/* ------------------------------ */
.balign 128
@@ -7132,46 +7165,6 @@ artMterpAsmInstructionEnd:
.balign 4
artMterpAsmSisterStart:
-/* continuation for op_float_to_int */
-.Lop_float_to_int_trunc:
- trunc.w.s f0, f0
- mfc1 t0, f0
-.Lop_float_to_int_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG t0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* continuation for op_float_to_long */
-.Lop_float_to_long_trunc:
- trunc.l.s f0, f0
- dmfc1 t0, f0
-.Lop_float_to_long_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE t0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* continuation for op_double_to_int */
-.Lop_double_to_int_trunc:
- trunc.w.d f0, f0
- mfc1 t0, f0
-.Lop_double_to_int_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG t0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* continuation for op_double_to_long */
-.Lop_double_to_long_trunc:
- trunc.l.d f0, f0
- dmfc1 t0, f0
-.Lop_double_to_long_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE t0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
.size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 52322529c8..2377b5b5aa 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -192,19 +192,26 @@ bool JitCodeCache::ContainsMethod(ArtMethod* method) {
class ScopedCodeCacheWrite : ScopedTrace {
public:
- explicit ScopedCodeCacheWrite(MemMap* code_map)
+ explicit ScopedCodeCacheWrite(MemMap* code_map, bool only_for_tlb_shootdown = false)
: ScopedTrace("ScopedCodeCacheWrite"),
- code_map_(code_map) {
+ code_map_(code_map),
+ only_for_tlb_shootdown_(only_for_tlb_shootdown) {
ScopedTrace trace("mprotect all");
- CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
+ CHECKED_MPROTECT(
+ code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtAll);
}
~ScopedCodeCacheWrite() {
ScopedTrace trace("mprotect code");
- CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
+ CHECKED_MPROTECT(
+ code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtCode);
}
private:
MemMap* const code_map_;
+ // If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to
+ // one page.
+ const bool only_for_tlb_shootdown_;
+
DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
};
@@ -565,11 +572,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
core_spill_mask,
fp_spill_mask,
code_size);
- DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
- DCHECK_LE(roots_data, stack_map);
- // Flush data cache, as compiled code references literals in it.
- FlushDataCache(reinterpret_cast<char*>(roots_data),
- reinterpret_cast<char*>(roots_data + data_size));
// Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
// trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
// This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
@@ -621,10 +623,18 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
// possible that the compiled code is considered invalidated by some class linking,
// but below we still make the compiled code valid for the method.
MutexLock mu(self, lock_);
- method_code_map_.Put(code_ptr, method);
// Fill the root table before updating the entry point.
DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
+ DCHECK_LE(roots_data, stack_map);
FillRootTable(roots_data, roots);
+ {
+ // Flush data cache, as compiled code references literals in it.
+ // We also need a TLB shootdown to act as memory barrier across cores.
+ ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true);
+ FlushDataCache(reinterpret_cast<char*>(roots_data),
+ reinterpret_cast<char*>(roots_data + data_size));
+ }
+ method_code_map_.Put(code_ptr, method);
if (osr) {
number_of_osr_compilations_++;
osr_code_map_.Put(method, code_ptr);
@@ -1269,6 +1279,7 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
std::vector<ProfileMethodInfo>& methods) {
ScopedTrace trace(__FUNCTION__);
MutexLock mu(Thread::Current(), lock_);
+ uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold();
for (const ProfilingInfo* info : profiling_infos_) {
ArtMethod* method = info->GetMethod();
const DexFile* dex_file = method->GetDexFile();
@@ -1277,8 +1288,18 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
continue;
}
std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
+
+ // If the method didn't reach the compilation threshold don't save the inline caches.
+ // They might be incomplete and cause unnecessary deoptimizations.
+ // If the inline cache is empty the compiler will generate a regular invoke virtual/interface.
+ if (method->GetCounter() < jit_compile_threshold) {
+ methods.emplace_back(/*ProfileMethodInfo*/
+ dex_file, method->GetDexMethodIndex(), inline_caches);
+ continue;
+ }
+
for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
- std::vector<ProfileMethodInfo::ProfileClassReference> profile_classes;
+ std::vector<TypeReference> profile_classes;
const InlineCache& cache = info->cache_[i];
ArtMethod* caller = info->GetMethod();
bool is_missing_types = false;
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 220f2984d7..1e720c0cf4 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -31,6 +31,8 @@
#include <zlib.h>
#include <base/time_utils.h>
+#include "base/arena_allocator.h"
+#include "base/dumpable.h"
#include "base/mutex.h"
#include "base/scoped_flock.h"
#include "base/stl_util.h"
@@ -66,12 +68,25 @@ static_assert(InlineCache::kIndividualCacheSize < kIsMegamorphicEncoding,
static_assert(InlineCache::kIndividualCacheSize < kIsMissingTypesEncoding,
"InlineCache::kIndividualCacheSize is larger than expected");
-ProfileCompilationInfo::ProfileCompilationInfo(const ProfileCompilationInfo& pci) {
- MergeWith(pci);
+ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
+ : default_arena_pool_(),
+ arena_(custom_arena_pool),
+ info_(arena_.Adapter(kArenaAllocProfile)),
+ profile_key_map_(std::less<const std::string>(), arena_.Adapter(kArenaAllocProfile)) {
+}
+
+ProfileCompilationInfo::ProfileCompilationInfo()
+ : default_arena_pool_(/*use_malloc*/true, /*low_4gb*/false, "ProfileCompilationInfo"),
+ arena_(&default_arena_pool_),
+ info_(arena_.Adapter(kArenaAllocProfile)),
+ profile_key_map_(std::less<const std::string>(), arena_.Adapter(kArenaAllocProfile)) {
}
ProfileCompilationInfo::~ProfileCompilationInfo() {
- ClearProfile();
+ VLOG(profiler) << Dumpable<MemStats>(arena_.GetMemStats());
+ for (DexFileData* data : info_) {
+ delete data;
+ }
}
void ProfileCompilationInfo::DexPcData::AddClass(uint16_t dex_profile_idx,
@@ -79,11 +94,27 @@ void ProfileCompilationInfo::DexPcData::AddClass(uint16_t dex_profile_idx,
if (is_megamorphic || is_missing_types) {
return;
}
- classes.emplace(dex_profile_idx, type_idx);
- if (classes.size() >= InlineCache::kIndividualCacheSize) {
+
+ // Perform an explicit lookup for the type instead of directly emplacing the
+ // element. We do this because emplace() allocates the node before doing the
+ // lookup and if it then finds an identical element, it shall deallocate the
+ // node. For Arena allocations, that's essentially a leak.
+ ClassReference ref(dex_profile_idx, type_idx);
+ auto it = classes.find(ref);
+ if (it != classes.end()) {
+ // The type index exists.
+ return;
+ }
+
+ // Check if the adding the type will cause the cache to become megamorphic.
+ if (classes.size() + 1 >= InlineCache::kIndividualCacheSize) {
is_megamorphic = true;
classes.clear();
+ return;
}
+
+ // The type does not exist and the inline cache will not be megamorphic.
+ classes.insert(ref);
}
// Transform the actual dex location into relative paths.
@@ -460,7 +491,9 @@ ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData
uint8_t profile_index = profile_index_it->second;
if (info_.size() <= profile_index) {
// This is a new addition. Add it to the info_ array.
- info_.emplace_back(new DexFileData(profile_key, checksum, profile_index));
+ DexFileData* dex_file_data = new (&arena_) DexFileData(
+ &arena_, profile_key, checksum, profile_index);
+ info_.push_back(dex_file_data);
}
DexFileData* result = info_[profile_index];
// DCHECK that profile info map key is consistent with the one stored in the dex file data.
@@ -506,7 +539,7 @@ bool ProfileCompilationInfo::AddResolvedClasses(const DexCacheResolvedClasses& c
bool ProfileCompilationInfo::AddMethodIndex(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t method_index) {
- return AddMethod(dex_location, dex_checksum, method_index, OfflineProfileMethodInfo());
+ return AddMethod(dex_location, dex_checksum, method_index, OfflineProfileMethodInfo(nullptr));
}
bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
@@ -517,22 +550,28 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
if (data == nullptr) { // checksum mismatch
return false;
}
- auto inline_cache_it = data->method_map.FindOrAdd(method_index);
- for (const auto& pmi_inline_cache_it : pmi.inline_caches) {
+ // Add the method.
+ InlineCacheMap* inline_cache = data->FindOrAddMethod(method_index);
+
+ if (pmi.inline_caches == nullptr) {
+ // If we don't have inline caches return success right away.
+ return true;
+ }
+ for (const auto& pmi_inline_cache_it : *pmi.inline_caches) {
uint16_t pmi_ic_dex_pc = pmi_inline_cache_it.first;
const DexPcData& pmi_ic_dex_pc_data = pmi_inline_cache_it.second;
- DexPcData& dex_pc_data = inline_cache_it->second.FindOrAdd(pmi_ic_dex_pc)->second;
- if (dex_pc_data.is_missing_types || dex_pc_data.is_megamorphic) {
+ DexPcData* dex_pc_data = FindOrAddDexPc(inline_cache, pmi_ic_dex_pc);
+ if (dex_pc_data->is_missing_types || dex_pc_data->is_megamorphic) {
// We are already megamorphic or we are missing types; no point in going forward.
continue;
}
if (pmi_ic_dex_pc_data.is_missing_types) {
- dex_pc_data.SetIsMissingTypes();
+ dex_pc_data->SetIsMissingTypes();
continue;
}
if (pmi_ic_dex_pc_data.is_megamorphic) {
- dex_pc_data.SetIsMegamorphic();
+ dex_pc_data->SetIsMegamorphic();
continue;
}
@@ -544,7 +583,7 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
if (class_dex_data == nullptr) { // checksum mismatch
return false;
}
- dex_pc_data.AddClass(class_dex_data->profile_index, class_ref.type_index);
+ dex_pc_data->AddClass(class_dex_data->profile_index, class_ref.type_index);
}
}
return true;
@@ -557,27 +596,26 @@ bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
if (data == nullptr) { // checksum mismatch
return false;
}
- auto inline_cache_it = data->method_map.FindOrAdd(pmi.dex_method_index);
+ InlineCacheMap* inline_cache = data->FindOrAddMethod(pmi.dex_method_index);
for (const ProfileMethodInfo::ProfileInlineCache& cache : pmi.inline_caches) {
if (cache.is_missing_types) {
- auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(cache.dex_pc);
- dex_pc_data_it->second.SetIsMissingTypes();
+ FindOrAddDexPc(inline_cache, cache.dex_pc)->SetIsMissingTypes();
continue;
}
- for (const ProfileMethodInfo::ProfileClassReference& class_ref : cache.classes) {
+ for (const TypeReference& class_ref : cache.classes) {
DexFileData* class_dex_data = GetOrAddDexFileData(
GetProfileDexFileKey(class_ref.dex_file->GetLocation()),
class_ref.dex_file->GetLocationChecksum());
if (class_dex_data == nullptr) { // checksum mismatch
return false;
}
- auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(cache.dex_pc);
- if (dex_pc_data_it->second.is_missing_types) {
+ DexPcData* dex_pc_data = FindOrAddDexPc(inline_cache, cache.dex_pc);
+ if (dex_pc_data->is_missing_types) {
// Don't bother adding classes if we are missing types.
break;
}
- dex_pc_data_it->second.AddClass(class_dex_data->profile_index, class_ref.type_index);
+ dex_pc_data->AddClass(class_dex_data->profile_index, class_ref.type_index);
}
}
return true;
@@ -614,13 +652,13 @@ bool ProfileCompilationInfo::ReadInlineCache(SafeBuffer& buffer,
uint8_t dex_to_classes_map_size;
READ_UINT(uint16_t, buffer, dex_pc, error);
READ_UINT(uint8_t, buffer, dex_to_classes_map_size, error);
- auto dex_pc_data_it = inline_cache->FindOrAdd(dex_pc);
+ DexPcData* dex_pc_data = FindOrAddDexPc(inline_cache, dex_pc);
if (dex_to_classes_map_size == kIsMissingTypesEncoding) {
- dex_pc_data_it->second.SetIsMissingTypes();
+ dex_pc_data->SetIsMissingTypes();
continue;
}
if (dex_to_classes_map_size == kIsMegamorphicEncoding) {
- dex_pc_data_it->second.SetIsMegamorphic();
+ dex_pc_data->SetIsMegamorphic();
continue;
}
for (; dex_to_classes_map_size > 0; dex_to_classes_map_size--) {
@@ -636,7 +674,7 @@ bool ProfileCompilationInfo::ReadInlineCache(SafeBuffer& buffer,
for (; dex_classes_size > 0; dex_classes_size--) {
uint16_t type_index;
READ_UINT(uint16_t, buffer, type_index, error);
- dex_pc_data_it->second.AddClass(dex_profile_index, dex::TypeIndex(type_index));
+ dex_pc_data->AddClass(dex_profile_index, dex::TypeIndex(type_index));
}
}
}
@@ -661,8 +699,8 @@ bool ProfileCompilationInfo::ReadMethods(SafeBuffer& buffer,
READ_UINT(uint16_t, buffer, diff_with_last_method_index, error);
uint16_t method_index = last_method_index + diff_with_last_method_index;
last_method_index = method_index;
- auto it = data->method_map.FindOrAdd(method_index);
- if (!ReadInlineCache(buffer, number_of_dex_files, &(it->second), error)) {
+ InlineCacheMap* inline_cache = data->FindOrAddMethod(method_index);
+ if (!ReadInlineCache(buffer, number_of_dex_files, inline_cache, error)) {
return false;
}
}
@@ -1091,19 +1129,19 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
// Merge the methods and the inline caches.
for (const auto& other_method_it : other_dex_data->method_map) {
uint16_t other_method_index = other_method_it.first;
- auto method_it = dex_data->method_map.FindOrAdd(other_method_index);
+ InlineCacheMap* inline_cache = dex_data->FindOrAddMethod(other_method_index);
const auto& other_inline_cache = other_method_it.second;
for (const auto& other_ic_it : other_inline_cache) {
uint16_t other_dex_pc = other_ic_it.first;
const ClassSet& other_class_set = other_ic_it.second.classes;
- auto class_set = method_it->second.FindOrAdd(other_dex_pc);
+ DexPcData* dex_pc_data = FindOrAddDexPc(inline_cache, other_dex_pc);
if (other_ic_it.second.is_missing_types) {
- class_set->second.SetIsMissingTypes();
+ dex_pc_data->SetIsMissingTypes();
} else if (other_ic_it.second.is_megamorphic) {
- class_set->second.SetIsMegamorphic();
+ dex_pc_data->SetIsMegamorphic();
} else {
for (const auto& class_it : other_class_set) {
- class_set->second.AddClass(dex_profile_index_remap.Get(
+ dex_pc_data->AddClass(dex_profile_index_remap.Get(
class_it.dex_profile_index), class_it.type_index);
}
}
@@ -1143,24 +1181,24 @@ ProfileCompilationInfo::FindMethod(const std::string& dex_location,
return nullptr;
}
-bool ProfileCompilationInfo::GetMethod(const std::string& dex_location,
- uint32_t dex_checksum,
- uint16_t dex_method_index,
- /*out*/OfflineProfileMethodInfo* pmi) const {
+std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> ProfileCompilationInfo::GetMethod(
+ const std::string& dex_location,
+ uint32_t dex_checksum,
+ uint16_t dex_method_index) const {
const InlineCacheMap* inline_caches = FindMethod(dex_location, dex_checksum, dex_method_index);
if (inline_caches == nullptr) {
- return false;
+ return nullptr;
}
+ std::unique_ptr<OfflineProfileMethodInfo> pmi(new OfflineProfileMethodInfo(inline_caches));
+
pmi->dex_references.resize(info_.size());
for (const DexFileData* dex_data : info_) {
pmi->dex_references[dex_data->profile_index].dex_location = dex_data->profile_key;
pmi->dex_references[dex_data->profile_index].dex_checksum = dex_data->checksum;
}
- // TODO(calin): maybe expose a direct pointer to avoid copying
- pmi->inline_caches = *inline_caches;
- return true;
+ return pmi;
}
@@ -1170,7 +1208,7 @@ bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, dex::TypeInd
if (!ChecksumMatch(dex_file, dex_data->checksum)) {
return false;
}
- const std::set<dex::TypeIndex>& classes = dex_data->class_set;
+ const ArenaSet<dex::TypeIndex>& classes = dex_data->class_set;
return classes.find(type_idx) != classes.end();
}
return false;
@@ -1276,17 +1314,21 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
return os.str();
}
-bool ProfileCompilationInfo::GetClassesAndMethods(const DexFile* dex_file,
+bool ProfileCompilationInfo::GetClassesAndMethods(const DexFile& dex_file,
std::set<dex::TypeIndex>* class_set,
- MethodMap* method_map) const {
+ std::set<uint16_t>* method_set) const {
std::set<std::string> ret;
- std::string profile_key = GetProfileDexFileKey(dex_file->GetLocation());
+ std::string profile_key = GetProfileDexFileKey(dex_file.GetLocation());
const DexFileData* dex_data = FindDexData(profile_key);
- if (dex_data == nullptr || dex_data->checksum != dex_file->GetLocationChecksum()) {
+ if (dex_data == nullptr || dex_data->checksum != dex_file.GetLocationChecksum()) {
return false;
}
- *method_map = dex_data->method_map;
- *class_set = dex_data->class_set;
+ for (const auto& it : dex_data->method_map) {
+ method_set->insert(it.first);
+ }
+ for (const dex::TypeIndex& type_index : dex_data->class_set) {
+ class_set->insert(type_index);
+ }
return true;
}
@@ -1324,12 +1366,6 @@ std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses(
return ret;
}
-void ProfileCompilationInfo::ClearResolvedClasses() {
- for (DexFileData* dex_data : info_) {
- dex_data->class_set.clear();
- }
-}
-
// Naive implementation to generate a random profile file suitable for testing.
bool ProfileCompilationInfo::GenerateTestProfile(int fd,
uint16_t number_of_dex_files,
@@ -1402,17 +1438,17 @@ bool ProfileCompilationInfo::GenerateTestProfile(
bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
const OfflineProfileMethodInfo& other) const {
- if (inline_caches.size() != other.inline_caches.size()) {
+ if (inline_caches->size() != other.inline_caches->size()) {
return false;
}
// We can't use a simple equality test because we need to match the dex files
// of the inline caches which might have different profile indexes.
- for (const auto& inline_cache_it : inline_caches) {
+ for (const auto& inline_cache_it : *inline_caches) {
uint16_t dex_pc = inline_cache_it.first;
const DexPcData dex_pc_data = inline_cache_it.second;
- const auto other_it = other.inline_caches.find(dex_pc);
- if (other_it == other.inline_caches.end()) {
+ const auto& other_it = other.inline_caches->find(dex_pc);
+ if (other_it == other.inline_caches->end()) {
return false;
}
const DexPcData& other_dex_pc_data = other_it->second;
@@ -1441,17 +1477,21 @@ bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
return true;
}
-void ProfileCompilationInfo::ClearProfile() {
- for (DexFileData* dex_data : info_) {
- delete dex_data;
- }
- info_.clear();
- profile_key_map_.clear();
-}
-
bool ProfileCompilationInfo::IsEmpty() const {
DCHECK_EQ(info_.empty(), profile_key_map_.empty());
return info_.empty();
}
+ProfileCompilationInfo::InlineCacheMap*
+ProfileCompilationInfo::DexFileData::FindOrAddMethod(uint16_t method_index) {
+ return &(method_map.FindOrAdd(
+ method_index,
+ InlineCacheMap(std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)))->second);
+}
+
+ProfileCompilationInfo::DexPcData*
+ProfileCompilationInfo::FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc) {
+ return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&arena_))->second);
+}
+
} // namespace art
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index ee1935f926..e903e2d232 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -17,16 +17,18 @@
#ifndef ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_H_
#define ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_H_
-#include <memory>
#include <set>
#include <vector>
#include "atomic.h"
+#include "base/arena_object.h"
+#include "base/arena_containers.h"
#include "dex_cache_resolved_classes.h"
#include "dex_file.h"
#include "dex_file_types.h"
#include "method_reference.h"
#include "safe_map.h"
+#include "type_reference.h"
namespace art {
@@ -35,24 +37,15 @@ namespace art {
* without the need to hold GC-able objects.
*/
struct ProfileMethodInfo {
- struct ProfileClassReference {
- ProfileClassReference() : dex_file(nullptr) {}
- ProfileClassReference(const DexFile* dex, const dex::TypeIndex& index)
- : dex_file(dex), type_index(index) {}
-
- const DexFile* dex_file;
- dex::TypeIndex type_index;
- };
-
struct ProfileInlineCache {
ProfileInlineCache(uint32_t pc,
bool missing_types,
- const std::vector<ProfileClassReference>& profile_classes)
+ const std::vector<TypeReference>& profile_classes)
: dex_pc(pc), is_missing_types(missing_types), classes(profile_classes) {}
const uint32_t dex_pc;
const bool is_missing_types;
- const std::vector<ProfileClassReference> classes;
+ const std::vector<TypeReference> classes;
};
ProfileMethodInfo(const DexFile* dex, uint32_t method_index)
@@ -115,8 +108,8 @@ class ProfileCompilationInfo {
// We cannot rely on the actual multidex index because a single profile may store
// data from multiple splits. This means that a profile may contain a classes2.dex from split-A
// and one from split-B.
- struct ClassReference {
- ClassReference(uint8_t dex_profile_idx, const dex::TypeIndex& type_idx) :
+ struct ClassReference : public ValueObject {
+ ClassReference(uint8_t dex_profile_idx, const dex::TypeIndex type_idx) :
dex_profile_index(dex_profile_idx), type_index(type_idx) {}
bool operator==(const ClassReference& other) const {
@@ -133,13 +126,16 @@ class ProfileCompilationInfo {
};
// The set of classes that can be found at a given dex pc.
- using ClassSet = std::set<ClassReference>;
+ using ClassSet = ArenaSet<ClassReference>;
// Encodes the actual inline cache for a given dex pc (whether or not the receiver is
// megamorphic and its possible types).
// If the receiver is megamorphic or is missing types the set of classes will be empty.
- struct DexPcData {
- DexPcData() : is_missing_types(false), is_megamorphic(false) {}
+ struct DexPcData : public ArenaObject<kArenaAllocProfile> {
+ explicit DexPcData(ArenaAllocator* arena)
+ : is_missing_types(false),
+ is_megamorphic(false),
+ classes(std::less<ClassReference>(), arena->Adapter(kArenaAllocProfile)) {}
void AddClass(uint16_t dex_profile_idx, const dex::TypeIndex& type_idx);
void SetIsMegamorphic() {
if (is_missing_types) return;
@@ -166,26 +162,29 @@ class ProfileCompilationInfo {
};
// The inline cache map: DexPc -> DexPcData.
- using InlineCacheMap = SafeMap<uint16_t, DexPcData>;
+ using InlineCacheMap = ArenaSafeMap<uint16_t, DexPcData>;
// Maps a method dex index to its inline cache.
- using MethodMap = SafeMap<uint16_t, InlineCacheMap>;
+ using MethodMap = ArenaSafeMap<uint16_t, InlineCacheMap>;
// Encodes the full set of inline caches for a given method.
// The dex_references vector is indexed according to the ClassReference::dex_profile_index.
// i.e. the dex file of any ClassReference present in the inline caches can be found at
// dex_references[ClassReference::dex_profile_index].
struct OfflineProfileMethodInfo {
+ explicit OfflineProfileMethodInfo(const InlineCacheMap* inline_cache_map)
+ : inline_caches(inline_cache_map) {}
+
bool operator==(const OfflineProfileMethodInfo& other) const;
+ const InlineCacheMap* const inline_caches;
std::vector<DexReference> dex_references;
- InlineCacheMap inline_caches;
};
// Public methods to create, extend or query the profile.
+ ProfileCompilationInfo();
+ explicit ProfileCompilationInfo(ArenaPool* arena_pool);
- ProfileCompilationInfo() {}
- ProfileCompilationInfo(const ProfileCompilationInfo& pci);
~ProfileCompilationInfo();
// Add the given methods and classes to the current profile object.
@@ -223,12 +222,13 @@ class ProfileCompilationInfo {
// Return true if the class's type is present in the profiling info.
bool ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const;
- // Return true if the method is present in the profiling info.
- // If the method is found, `pmi` is populated with its inline caches.
- bool GetMethod(const std::string& dex_location,
- uint32_t dex_checksum,
- uint16_t dex_method_index,
- /*out*/OfflineProfileMethodInfo* pmi) const;
+ // Return the method data for the given location and index from the profiling info.
+ // If the method index is not found or the checksum doesn't match, null is returned.
+ // Note: the inline cache map is a pointer to the map stored in the profile and
+ // its allocation will go away if the profile goes out of scope.
+ std::unique_ptr<OfflineProfileMethodInfo> GetMethod(const std::string& dex_location,
+ uint32_t dex_checksum,
+ uint16_t dex_method_index) const;
// Dump all the loaded profile info into a string and returns it.
// If dex_files is not null then the method indices will be resolved to their
@@ -239,12 +239,12 @@ class ProfileCompilationInfo {
std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
bool print_full_dex_location = true) const;
- // Return the classes and methods for a given dex file through out args. The otu args are the set
+ // Return the classes and methods for a given dex file through out args. The out args are the set
// of class as well as the methods and their associated inline caches. Returns true if the dex
// file is register and has a matching checksum, false otherwise.
- bool GetClassesAndMethods(const DexFile* dex_file,
- std::set<dex::TypeIndex>* class_set,
- MethodMap* method_map) const;
+ bool GetClassesAndMethods(const DexFile& dex_file,
+ /*out*/std::set<dex::TypeIndex>* class_set,
+ /*out*/std::set<uint16_t>* method_set) const;
// Perform an equality test with the `other` profile information.
bool Equals(const ProfileCompilationInfo& other);
@@ -253,9 +253,6 @@ class ProfileCompilationInfo {
std::set<DexCacheResolvedClasses> GetResolvedClasses(
const std::unordered_set<std::string>& dex_files_locations) const;
- // Clear the resolved classes from the current object.
- void ClearResolvedClasses();
-
// Return the profile key associated with the given dex location.
static std::string GetProfileDexFileKey(const std::string& dex_location);
@@ -277,6 +274,8 @@ class ProfileCompilationInfo {
static bool Equals(const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi1,
const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi2);
+ ArenaAllocator* GetArena() { return &arena_; }
+
private:
enum ProfileLoadSatus {
kProfileLoadWouldOverwiteData,
@@ -295,9 +294,20 @@ class ProfileCompilationInfo {
// profile) fields in this struct because we can infer them from
// profile_key_map_ and info_. However, it makes the profiles logic much
// simpler if we have references here as well.
- struct DexFileData {
- DexFileData(const std::string& key, uint32_t location_checksum, uint16_t index)
- : profile_key(key), profile_index(index), checksum(location_checksum) {}
+ struct DexFileData : public DeletableArenaObject<kArenaAllocProfile> {
+ DexFileData(ArenaAllocator* arena,
+ const std::string& key,
+ uint32_t location_checksum,
+ uint16_t index)
+ : arena_(arena),
+ profile_key(key),
+ profile_index(index),
+ checksum(location_checksum),
+ method_map(std::less<uint16_t>(), arena->Adapter(kArenaAllocProfile)),
+ class_set(std::less<dex::TypeIndex>(), arena->Adapter(kArenaAllocProfile)) {}
+
+ // The arena used to allocate new inline cache maps.
+ ArenaAllocator* arena_;
// The profile key this data belongs to.
std::string profile_key;
// The profile index of this dex file (matches ClassReference#dex_profile_index).
@@ -308,11 +318,15 @@ class ProfileCompilationInfo {
MethodMap method_map;
// The classes which have been profiled. Note that these don't necessarily include
// all the classes that can be found in the inline caches reference.
- std::set<dex::TypeIndex> class_set;
+ ArenaSet<dex::TypeIndex> class_set;
bool operator==(const DexFileData& other) const {
return checksum == other.checksum && method_map == other.method_map;
}
+
+ // Find the inline caches of the the given method index. Add an empty entry if
+ // no previous data is found.
+ InlineCacheMap* FindOrAddMethod(uint16_t method_index);
};
// Return the profile data for the given profile key or null if the dex location
@@ -352,9 +366,6 @@ class ProfileCompilationInfo {
// doesn't contain the key.
const DexFileData* FindDexData(const std::string& profile_key) const;
- // Clear all the profile data.
- void ClearProfile();
-
// Checks if the profile is empty.
bool IsEmpty() const;
@@ -485,20 +496,27 @@ class ProfileCompilationInfo {
const ClassSet& classes,
/*out*/SafeMap<uint8_t, std::vector<dex::TypeIndex>>* dex_to_classes_map);
+ // Find the data for the dex_pc in the inline cache. Adds an empty entry
+ // if no previous data exists.
+ DexPcData* FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc);
+
friend class ProfileCompilationInfoTest;
friend class CompilerDriverProfileTest;
friend class ProfileAssistantTest;
friend class Dex2oatLayoutTest;
+ ArenaPool default_arena_pool_;
+ ArenaAllocator arena_;
+
// Vector containing the actual profile info.
// The vector index is the profile index of the dex data and
// matched DexFileData::profile_index.
- std::vector<DexFileData*> info_;
+ ArenaVector<DexFileData*> info_;
// Cache mapping profile keys to profile index.
// This is used to speed up searches since it avoids iterating
// over the info_ vector when searching by profile key.
- SafeMap<const std::string, uint8_t> profile_key_map_;
+ ArenaSafeMap<const std::string, uint8_t> profile_key_map_;
};
} // namespace art
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index e8f4ce268a..1cfa3552b9 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -26,11 +26,18 @@
#include "mirror/class_loader.h"
#include "handle_scope-inl.h"
#include "jit/profile_compilation_info.h"
+#include "linear_alloc.h"
#include "scoped_thread_state_change-inl.h"
+#include "type_reference.h"
namespace art {
class ProfileCompilationInfoTest : public CommonRuntimeTest {
+ public:
+ void PostRuntimeCreate() OVERRIDE {
+ arena_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
+ }
+
protected:
std::vector<ArtMethod*> GetVirtualMethods(jobject class_loader,
const std::string& clazz) {
@@ -117,13 +124,13 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
std::vector<ProfileMethodInfo::ProfileInlineCache> caches;
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
- std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ std::vector<TypeReference> classes;
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(0));
caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
}
// Polymorphic
for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
- std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ std::vector<TypeReference> classes;
for (uint16_t k = 0; k < InlineCache::kIndividualCacheSize / 2; k++) {
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k));
}
@@ -131,7 +138,7 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
- std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ std::vector<TypeReference> classes;
for (uint16_t k = 0; k < 2 * InlineCache::kIndividualCacheSize; k++) {
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k));
}
@@ -139,7 +146,7 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
}
// Missing types
for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
- std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ std::vector<TypeReference> classes;
caches.emplace_back(dex_pc, /*is_missing_types*/true, classes);
}
ProfileMethodInfo pmi(method->GetDexFile(), method->GetDexMethodIndex(), caches);
@@ -156,13 +163,22 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
return info.Save(filename, nullptr);
}
+ // Creates an inline cache which will be destructed at the end of the test.
+ ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
+ used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
+ std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ return used_inline_caches.back().get();
+ }
+
ProfileCompilationInfo::OfflineProfileMethodInfo ConvertProfileMethodInfo(
const ProfileMethodInfo& pmi) {
- ProfileCompilationInfo::OfflineProfileMethodInfo offline_pmi;
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo offline_pmi(ic_map);
SafeMap<DexFile*, uint8_t> dex_map; // dex files to profile index
for (const auto& inline_cache : pmi.inline_caches) {
ProfileCompilationInfo::DexPcData& dex_pc_data =
- offline_pmi.inline_caches.FindOrAdd(inline_cache.dex_pc)->second;
+ ic_map->FindOrAdd(
+ inline_cache.dex_pc, ProfileCompilationInfo::DexPcData(arena_.get()))->second;
if (inline_cache.is_missing_types) {
dex_pc_data.SetIsMissingTypes();
}
@@ -184,45 +200,48 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
// Creates an offline profile used for testing inline caches.
ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo() {
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
-
- pmi.dex_references.emplace_back("dex_location1", /* checksum */1);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */2);
- pmi.dex_references.emplace_back("dex_location3", /* checksum */3);
-
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Polymorphic
for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
dex_pc_data.AddClass(2, dex::TypeIndex(2));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMegamorphic();
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Missing types
for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMissingTypes();
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
+
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */1);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum */2);
+ pmi.dex_references.emplace_back("dex_location3", /* checksum */3);
+
return pmi;
}
void MakeMegamorphic(/*out*/ProfileCompilationInfo::OfflineProfileMethodInfo* pmi) {
- for (auto it : pmi->inline_caches) {
+ ProfileCompilationInfo::InlineCacheMap* ic_map =
+ const_cast<ProfileCompilationInfo::InlineCacheMap*>(pmi->inline_caches);
+ for (auto it : *ic_map) {
for (uint16_t k = 0; k <= 2 * InlineCache::kIndividualCacheSize; k++) {
it.second.AddClass(0, dex::TypeIndex(k));
}
@@ -230,7 +249,9 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
}
void SetIsMissingTypes(/*out*/ProfileCompilationInfo::OfflineProfileMethodInfo* pmi) {
- for (auto it : pmi->inline_caches) {
+ ProfileCompilationInfo::InlineCacheMap* ic_map =
+ const_cast<ProfileCompilationInfo::InlineCacheMap*>(pmi->inline_caches);
+ for (auto it : *ic_map) {
it.second.SetIsMissingTypes();
}
}
@@ -239,6 +260,13 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
// They should not change anyway.
static constexpr int kProfileMagicSize = 4;
static constexpr int kProfileVersionSize = 4;
+
+ std::unique_ptr<ArenaAllocator> arena_;
+
+ // Cache of inline caches generated during tests.
+ // This makes it easier to pass data between different utilities and ensure that
+ // caches are destructed at the end of the test.
+ std::vector<std::unique_ptr<ProfileCompilationInfo::InlineCacheMap>> used_inline_caches;
};
TEST_F(ProfileCompilationInfoTest, SaveArtMethods) {
@@ -500,18 +528,14 @@ TEST_F(ProfileCompilationInfoTest, SaveInlineCaches) {
ASSERT_TRUE(loaded_info.Equals(saved_info));
- ProfileCompilationInfo::OfflineProfileMethodInfo loaded_pmi1;
- ASSERT_TRUE(loaded_info.GetMethod("dex_location1",
- /* checksum */ 1,
- /* method_idx */ 3,
- &loaded_pmi1));
- ASSERT_TRUE(loaded_pmi1 == pmi);
- ProfileCompilationInfo::OfflineProfileMethodInfo loaded_pmi2;
- ASSERT_TRUE(loaded_info.GetMethod("dex_location4",
- /* checksum */ 4,
- /* method_idx */ 3,
- &loaded_pmi2));
- ASSERT_TRUE(loaded_pmi2 == pmi);
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
+ loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+ ASSERT_TRUE(loaded_pmi1 != nullptr);
+ ASSERT_TRUE(*loaded_pmi1 == pmi);
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
+ loaded_info.GetMethod("dex_location4", /* checksum */ 4, /* method_idx */ 3);
+ ASSERT_TRUE(loaded_pmi2 != nullptr);
+ ASSERT_TRUE(*loaded_pmi2 == pmi);
}
TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCaches) {
@@ -550,12 +574,11 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCaches) {
ASSERT_TRUE(loaded_info.Equals(saved_info));
- ProfileCompilationInfo::OfflineProfileMethodInfo loaded_pmi1;
- ASSERT_TRUE(loaded_info.GetMethod("dex_location1",
- /* checksum */ 1,
- /* method_idx */ 3,
- &loaded_pmi1));
- ASSERT_TRUE(loaded_pmi1 == pmi_extra);
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
+ loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+
+ ASSERT_TRUE(loaded_pmi1 != nullptr);
+ ASSERT_TRUE(*loaded_pmi1 == pmi_extra);
}
TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) {
@@ -602,12 +625,10 @@ TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) {
ASSERT_TRUE(loaded_info.Equals(saved_info));
- ProfileCompilationInfo::OfflineProfileMethodInfo loaded_pmi1;
- ASSERT_TRUE(loaded_info.GetMethod("dex_location1",
- /* checksum */ 1,
- /* method_idx */ 3,
- &loaded_pmi1));
- ASSERT_TRUE(loaded_pmi1 == pmi_extra);
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
+ loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+ ASSERT_TRUE(loaded_pmi1 != nullptr);
+ ASSERT_TRUE(*loaded_pmi1 == pmi_extra);
}
TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
@@ -638,14 +659,14 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
for (ArtMethod* m : main_methods) {
ASSERT_TRUE(info.ContainsMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
const ProfileMethodInfo& pmi = profile_methods_map.find(m)->second;
- ProfileCompilationInfo::OfflineProfileMethodInfo offline_pmi;
- ASSERT_TRUE(info.GetMethod(m->GetDexFile()->GetLocation(),
- m->GetDexFile()->GetLocationChecksum(),
- m->GetDexMethodIndex(),
- &offline_pmi));
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> offline_pmi =
+ info.GetMethod(m->GetDexFile()->GetLocation(),
+ m->GetDexFile()->GetLocationChecksum(),
+ m->GetDexMethodIndex());
+ ASSERT_TRUE(offline_pmi != nullptr);
ProfileCompilationInfo::OfflineProfileMethodInfo converted_pmi =
ConvertProfileMethodInfo(pmi);
- ASSERT_EQ(converted_pmi, offline_pmi);
+ ASSERT_EQ(converted_pmi, *offline_pmi);
}
}
}
@@ -671,24 +692,26 @@ TEST_F(ProfileCompilationInfoTest, MergeInlineCacheTriggerReindex) {
ProfileCompilationInfo info;
ProfileCompilationInfo info_reindexed;
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi_reindexed;
+ ProfileCompilationInfo::InlineCacheMap* ic_map_reindexed = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi_reindexed(ic_map_reindexed);
pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum */ 2);
pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum */ 1);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(1, dex::TypeIndex(0));
dex_pc_data.AddClass(0, dex::TypeIndex(1));
- pmi_reindexed.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map_reindexed->Put(dex_pc, dex_pc_data);
}
// Profile 1 and Profile 2 get the same methods but in different order.
@@ -705,23 +728,20 @@ TEST_F(ProfileCompilationInfoTest, MergeInlineCacheTriggerReindex) {
"dex_location1", /* checksum */ 1, method_idx, pmi_reindexed, &info_reindexed));
}
- ProfileCompilationInfo info_backup = info;
+ ProfileCompilationInfo info_backup;
+ info_backup.MergeWith(info);
ASSERT_TRUE(info.MergeWith(info_reindexed));
// Merging should have no effect as we're adding the exact same stuff.
ASSERT_TRUE(info.Equals(info_backup));
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ProfileCompilationInfo::OfflineProfileMethodInfo loaded_pmi1;
- ASSERT_TRUE(info.GetMethod("dex_location1",
- /* checksum */ 1,
- /* method_idx */ method_idx,
- &loaded_pmi1));
- ASSERT_TRUE(loaded_pmi1 == pmi);
- ProfileCompilationInfo::OfflineProfileMethodInfo loaded_pmi2;
- ASSERT_TRUE(info.GetMethod("dex_location2",
- /* checksum */ 2,
- /* method_idx */ method_idx,
- &loaded_pmi2));
- ASSERT_TRUE(loaded_pmi2 == pmi);
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
+ info.GetMethod("dex_location1", /* checksum */ 1, method_idx);
+ ASSERT_TRUE(loaded_pmi1 != nullptr);
+ ASSERT_TRUE(*loaded_pmi1 == pmi);
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
+ info.GetMethod("dex_location2", /* checksum */ 2, method_idx);
+ ASSERT_TRUE(loaded_pmi2 != nullptr);
+ ASSERT_TRUE(*loaded_pmi2 == pmi);
}
}
@@ -739,11 +759,12 @@ TEST_F(ProfileCompilationInfoTest, AddMoreDexFileThanLimit) {
TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
// Create a megamorphic inline cache.
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMegamorphic();
- pmi.inline_caches.Put(/*dex_pc*/ 0, dex_pc_data);
+ ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
ASSERT_TRUE(AddMethod("dex_location1",
@@ -768,11 +789,12 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCachesMerge) {
// Create an inline cache with missing types
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
- ProfileCompilationInfo::DexPcData dex_pc_data;
+ ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMissingTypes();
- pmi.inline_caches.Put(/*dex_pc*/ 0, dex_pc_data);
+ ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
ASSERT_TRUE(AddMethod("dex_location1",
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 2dba9b76a2..166b6f4ba1 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -64,6 +64,12 @@ ProfileSaver::ProfileSaver(const ProfileSaverOptions& options,
AddTrackedLocations(output_filename, code_paths);
}
+ProfileSaver::~ProfileSaver() {
+ for (auto& it : profile_cache_) {
+ delete it.second;
+ }
+}
+
void ProfileSaver::Run() {
Thread* self = Thread::Current();
@@ -253,9 +259,11 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() {
<< " (" << classes.GetDexLocation() << ")";
}
}
- auto info_it = profile_cache_.Put(filename, ProfileCompilationInfo());
+ auto info_it = profile_cache_.Put(
+ filename,
+ new ProfileCompilationInfo(Runtime::Current()->GetArenaPool()));
- ProfileCompilationInfo* cached_info = &(info_it->second);
+ ProfileCompilationInfo* cached_info = info_it->second;
cached_info->AddMethodsAndClasses(profile_methods_for_location,
resolved_classes_for_location);
total_number_of_profile_entries_cached += resolved_classes_for_location.size();
@@ -279,7 +287,6 @@ bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number
}
bool profile_file_saved = false;
- uint64_t total_number_of_profile_entries_cached = 0;
if (number_of_new_methods != nullptr) {
*number_of_new_methods = 0;
}
@@ -300,60 +307,70 @@ bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number
jit_code_cache_->GetProfiledMethods(locations, profile_methods);
total_number_of_code_cache_queries_++;
}
- ProfileCompilationInfo info;
- if (!info.Load(filename, /*clear_if_invalid*/ true)) {
- LOG(WARNING) << "Could not forcefully load profile " << filename;
- continue;
- }
- uint64_t last_save_number_of_methods = info.GetNumberOfMethods();
- uint64_t last_save_number_of_classes = info.GetNumberOfResolvedClasses();
-
- info.AddMethodsAndClasses(profile_methods, std::set<DexCacheResolvedClasses>());
- auto profile_cache_it = profile_cache_.find(filename);
- if (profile_cache_it != profile_cache_.end()) {
- info.MergeWith(profile_cache_it->second);
- }
+ {
+ ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
+ if (!info.Load(filename, /*clear_if_invalid*/ true)) {
+ LOG(WARNING) << "Could not forcefully load profile " << filename;
+ continue;
+ }
+ uint64_t last_save_number_of_methods = info.GetNumberOfMethods();
+ uint64_t last_save_number_of_classes = info.GetNumberOfResolvedClasses();
- int64_t delta_number_of_methods = info.GetNumberOfMethods() - last_save_number_of_methods;
- int64_t delta_number_of_classes = info.GetNumberOfResolvedClasses() - last_save_number_of_classes;
-
- if (!force_save &&
- delta_number_of_methods < options_.GetMinMethodsToSave() &&
- delta_number_of_classes < options_.GetMinClassesToSave()) {
- VLOG(profiler) << "Not enough information to save to: " << filename
- << " Number of methods: " << delta_number_of_methods
- << " Number of classes: " << delta_number_of_classes;
- total_number_of_skipped_writes_++;
- continue;
- }
- if (number_of_new_methods != nullptr) {
- *number_of_new_methods = std::max(static_cast<uint16_t>(delta_number_of_methods),
- *number_of_new_methods);
- }
- uint64_t bytes_written;
- // Force the save. In case the profile data is corrupted or the the profile
- // has the wrong version this will "fix" the file to the correct format.
- if (info.Save(filename, &bytes_written)) {
- // We managed to save the profile. Clear the cache stored during startup.
+ info.AddMethodsAndClasses(profile_methods,
+ std::set<DexCacheResolvedClasses>());
+ auto profile_cache_it = profile_cache_.find(filename);
if (profile_cache_it != profile_cache_.end()) {
- profile_cache_.erase(profile_cache_it);
- total_number_of_profile_entries_cached = 0;
+ info.MergeWith(*(profile_cache_it->second));
}
- if (bytes_written > 0) {
- total_number_of_writes_++;
- total_bytes_written_ += bytes_written;
- profile_file_saved = true;
- } else {
- // At this point we could still have avoided the write.
- // We load and merge the data from the file lazily at its first ever
- // save attempt. So, whatever we are trying to save could already be
- // in the file.
+
+ int64_t delta_number_of_methods =
+ info.GetNumberOfMethods() - last_save_number_of_methods;
+ int64_t delta_number_of_classes =
+ info.GetNumberOfResolvedClasses() - last_save_number_of_classes;
+
+ if (!force_save &&
+ delta_number_of_methods < options_.GetMinMethodsToSave() &&
+ delta_number_of_classes < options_.GetMinClassesToSave()) {
+ VLOG(profiler) << "Not enough information to save to: " << filename
+ << " Number of methods: " << delta_number_of_methods
+ << " Number of classes: " << delta_number_of_classes;
total_number_of_skipped_writes_++;
+ continue;
+ }
+ if (number_of_new_methods != nullptr) {
+ *number_of_new_methods =
+ std::max(static_cast<uint16_t>(delta_number_of_methods),
+ *number_of_new_methods);
+ }
+ uint64_t bytes_written;
+ // Force the save. In case the profile data is corrupted or the the profile
+ // has the wrong version this will "fix" the file to the correct format.
+ if (info.Save(filename, &bytes_written)) {
+ // We managed to save the profile. Clear the cache stored during startup.
+ if (profile_cache_it != profile_cache_.end()) {
+ ProfileCompilationInfo *cached_info = profile_cache_it->second;
+ profile_cache_.erase(profile_cache_it);
+ delete cached_info;
+ }
+ if (bytes_written > 0) {
+ total_number_of_writes_++;
+ total_bytes_written_ += bytes_written;
+ profile_file_saved = true;
+ } else {
+ // At this point we could still have avoided the write.
+ // We load and merge the data from the file lazily at its first ever
+ // save attempt. So, whatever we are trying to save could already be
+ // in the file.
+ total_number_of_skipped_writes_++;
+ }
+ } else {
+ LOG(WARNING) << "Could not save profiling info to " << filename;
+ total_number_of_failed_writes_++;
}
- } else {
- LOG(WARNING) << "Could not save profiling info to " << filename;
- total_number_of_failed_writes_++;
}
+ // Trim the maps to madvise the pages used for profile info.
+ // It is unlikely we will need them again in the near feature.
+ Runtime::Current()->GetArenaPool()->TrimMaps();
}
return profile_file_saved;
@@ -579,7 +596,7 @@ bool ProfileSaver::HasSeenMethod(const std::string& profile,
uint16_t method_idx) {
MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
if (instance_ != nullptr) {
- ProfileCompilationInfo info;
+ ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
if (!info.Load(profile, /*clear_if_invalid*/false)) {
return false;
}
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 60c9cc6ed4..01d72fec3d 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -65,6 +65,7 @@ class ProfileSaver {
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
const std::vector<std::string>& code_paths);
+ ~ProfileSaver();
// NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
static void* RunProfileSaverThread(void* arg)
@@ -131,7 +132,7 @@ class ProfileSaver {
// we don't hammer the disk to save them right away.
// The size of this cache is usually very small and tops
// to just a few hundreds entries in the ProfileCompilationInfo objects.
- SafeMap<std::string, ProfileCompilationInfo> profile_cache_;
+ SafeMap<std::string, ProfileCompilationInfo*> profile_cache_;
// Save period condition support.
Mutex wait_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 2626eefde2..0fde41bd4f 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -3057,7 +3057,8 @@ void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINat
if (c.get() == nullptr) {
LOG(FATAL) << "Couldn't find class: " << jni_class_name;
}
- JNI::RegisterNativeMethods(env, c.get(), methods, method_count, false);
+ jint jni_result = env->RegisterNatives(c.get(), methods, method_count);
+ CHECK_EQ(JNI_OK, jni_result);
}
} // namespace art
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index d7527d59b4..6230ae96e1 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -582,7 +582,7 @@ TEST_F(ObjectTest, IsAssignableFrom) {
// Primitive types are only assignable to themselves
const char* prims = "ZBCSIJFD";
- Class* prim_types[strlen(prims)];
+ std::vector<Class*> prim_types(strlen(prims));
for (size_t i = 0; i < strlen(prims); i++) {
prim_types[i] = class_linker_->FindPrimitiveClass(prims[i]);
}
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index fc91efa6ac..ef4957c0ba 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -238,9 +238,9 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define("-Xlockprofthreshold:_")
.WithType<unsigned int>()
.IntoKey(M::LockProfThreshold)
- .Define("-Xstacktracedir:_")
- .WithType<std::string>()
- .IntoKey(M::StackTraceDir)
+ .Define("-Xusetombstonedtraces")
+ .WithValue(true)
+ .IntoKey(M::UseTombstonedTraces)
.Define("-Xstacktracefile:_")
.WithType<std::string>()
.IntoKey(M::StackTraceFile)
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index c46bd8d2b9..968f02a359 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -834,7 +834,7 @@ void Runtime::InitNonZygoteOrPostFork(
void Runtime::StartSignalCatcher() {
if (!is_zygote_) {
- signal_catcher_ = new SignalCatcher(stack_trace_dir_, stack_trace_file_);
+ signal_catcher_ = new SignalCatcher(stack_trace_file_, use_tombstoned_traces_);
}
}
@@ -1069,7 +1069,11 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
abort_ = runtime_options.GetOrDefault(Opt::HookAbort);
default_stack_size_ = runtime_options.GetOrDefault(Opt::StackSize);
- stack_trace_dir_ = runtime_options.ReleaseOrDefault(Opt::StackTraceDir);
+ use_tombstoned_traces_ = runtime_options.GetOrDefault(Opt::UseTombstonedTraces);
+#if !defined(ART_TARGET_ANDROID)
+ CHECK(!use_tombstoned_traces_)
+ << "-Xusetombstonedtraces is only supported in an Android environment";
+#endif
stack_trace_file_ = runtime_options.ReleaseOrDefault(Opt::StackTraceFile);
compiler_executable_ = runtime_options.ReleaseOrDefault(Opt::Compiler);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 2e3b8d7bae..4e143e0f64 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -681,6 +681,14 @@ class Runtime {
deoptimization_counts_[static_cast<size_t>(kind)]++;
}
+ uint32_t GetNumberOfDeoptimizations() const {
+ uint32_t result = 0;
+ for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
+ result += deoptimization_counts_[i];
+ }
+ return result;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -783,7 +791,13 @@ class Runtime {
ClassLinker* class_linker_;
SignalCatcher* signal_catcher_;
- std::string stack_trace_dir_;
+
+ // If true, the runtime will connect to tombstoned via a socket to
+ // request an open file descriptor to write its traces to.
+ bool use_tombstoned_traces_;
+
+ // Location to which traces must be written on SIGQUIT. Only used if
+ // tombstoned_traces_ == false.
std::string stack_trace_file_;
std::unique_ptr<JavaVMExt> java_vm_;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 77132a8bae..cfc681f23f 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -100,7 +100,7 @@ RUNTIME_OPTIONS_KEY (Unit, NoSigChain)
RUNTIME_OPTIONS_KEY (Unit, ForceNativeBridge)
RUNTIME_OPTIONS_KEY (LogVerbosity, Verbose)
RUNTIME_OPTIONS_KEY (unsigned int, LockProfThreshold)
-RUNTIME_OPTIONS_KEY (std::string, StackTraceDir)
+RUNTIME_OPTIONS_KEY (bool, UseTombstonedTraces, false)
RUNTIME_OPTIONS_KEY (std::string, StackTraceFile)
RUNTIME_OPTIONS_KEY (Unit, MethodTrace)
RUNTIME_OPTIONS_KEY (std::string, MethodTraceFile, "/data/misc/trace/method-trace-file.bin")
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index faea7b3821..e3dfc74628 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -42,6 +42,10 @@
#include "thread_list.h"
#include "utils.h"
+#if defined(ART_TARGET_ANDROID)
+#include "tombstoned/tombstoned.h"
+#endif
+
namespace art {
static void DumpCmdLine(std::ostream& os) {
@@ -66,13 +70,19 @@ static void DumpCmdLine(std::ostream& os) {
#endif
}
-SignalCatcher::SignalCatcher(const std::string& stack_trace_dir,
- const std::string& stack_trace_file)
- : stack_trace_dir_(stack_trace_dir),
- stack_trace_file_(stack_trace_file),
+SignalCatcher::SignalCatcher(const std::string& stack_trace_file,
+ bool use_tombstoned_stack_trace_fd)
+ : stack_trace_file_(stack_trace_file),
+ use_tombstoned_stack_trace_fd_(use_tombstoned_stack_trace_fd),
lock_("SignalCatcher lock"),
cond_("SignalCatcher::cond_", lock_),
thread_(nullptr) {
+#if !defined(ART_TARGET_ANDROID)
+ // We're not running on Android, so we can't communicate with tombstoned
+ // to ask for an open file.
+ CHECK(!use_tombstoned_stack_trace_fd_);
+#endif
+
SetHaltFlag(false);
// Create a raw pthread; its start routine will attach to the runtime.
@@ -103,62 +113,65 @@ bool SignalCatcher::ShouldHalt() {
return halt_;
}
-std::string SignalCatcher::GetStackTraceFileName() {
- if (!stack_trace_dir_.empty()) {
- // We'll try a maximum of ten times (arbitrarily selected) to create a file
- // with a unique name, seeding the pseudo random generator each time.
- //
- // If this doesn't work, give up and log to stdout. Note that we could try
- // indefinitely, but that would make problems in this code harder to detect
- // since we'd be spinning in the signal catcher thread.
- static constexpr uint32_t kMaxRetries = 10;
-
- for (uint32_t i = 0; i < kMaxRetries; ++i) {
- std::srand(NanoTime());
- // Sample output for PID 1234 : /data/anr/anr-pid1234-cafeffee.txt
- const std::string file_name = android::base::StringPrintf(
- "%s/anr-pid%" PRId32 "-%08" PRIx32 ".txt",
- stack_trace_dir_.c_str(),
- static_cast<int32_t>(getpid()),
- static_cast<uint32_t>(std::rand()));
-
- if (!OS::FileExists(file_name.c_str())) {
- return file_name;
- }
- }
+bool SignalCatcher::OpenStackTraceFile(android::base::unique_fd* tombstone_fd,
+ android::base::unique_fd* output_fd) {
+ if (use_tombstoned_stack_trace_fd_) {
+#if defined(ART_TARGET_ANDROID)
+ return tombstoned_connect(getpid(), tombstone_fd, output_fd, false /* is_native_crash */);
+#else
+ UNUSED(tombstone_fd);
+ UNUSED(output_fd);
+#endif
+ }
+
+ // The runtime is not configured to dump traces to a file, will LOG(INFO)
+ // instead.
+ if (stack_trace_file_.empty()) {
+ return false;
+ }
- LOG(ERROR) << "Unable to obtain stack trace filename at path : " << stack_trace_dir_;
- return "";
+ int fd = open(stack_trace_file_.c_str(), O_APPEND | O_CREAT | O_WRONLY, 0666);
+ if (fd == -1) {
+ PLOG(ERROR) << "Unable to open stack trace file '" << stack_trace_file_ << "'";
+ return false;
}
- return stack_trace_file_;
+ output_fd->reset(fd);
+ return true;
}
void SignalCatcher::Output(const std::string& s) {
- const std::string output_file = GetStackTraceFileName();
- if (output_file.empty()) {
+ android::base::unique_fd tombstone_fd;
+ android::base::unique_fd output_fd;
+ if (!OpenStackTraceFile(&tombstone_fd, &output_fd)) {
LOG(INFO) << s;
return;
}
ScopedThreadStateChange tsc(Thread::Current(), kWaitingForSignalCatcherOutput);
- int fd = open(output_file.c_str(), O_APPEND | O_CREAT | O_WRONLY, 0666);
- if (fd == -1) {
- PLOG(ERROR) << "Unable to open stack trace file '" << output_file << "'";
- return;
- }
- std::unique_ptr<File> file(new File(fd, output_file, true));
+
+ std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage */));
bool success = file->WriteFully(s.data(), s.size());
if (success) {
success = file->FlushCloseOrErase() == 0;
} else {
file->Erase();
}
+
+ const std::string output_path_msg = (use_tombstoned_stack_trace_fd_) ?
+ "[tombstoned]" : stack_trace_file_;
+
if (success) {
- LOG(INFO) << "Wrote stack traces to '" << output_file << "'";
+ LOG(INFO) << "Wrote stack traces to '" << output_path_msg << "'";
} else {
- PLOG(ERROR) << "Failed to write stack traces to '" << output_file << "'";
+ PLOG(ERROR) << "Failed to write stack traces to '" << output_path_msg << "'";
}
+
+#if defined(ART_TARGET_ANDROID)
+ if (!tombstoned_notify_completion(tombstone_fd)) {
+ LOG(WARNING) << "Unable to notify tombstoned of dump completion.";
+ }
+#endif
}
void SignalCatcher::HandleSigQuit() {
diff --git a/runtime/signal_catcher.h b/runtime/signal_catcher.h
index 4cd7a98795..8a2a7289de 100644
--- a/runtime/signal_catcher.h
+++ b/runtime/signal_catcher.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_SIGNAL_CATCHER_H_
#define ART_RUNTIME_SIGNAL_CATCHER_H_
+#include "android-base/unique_fd.h"
#include "base/mutex.h"
namespace art {
@@ -32,15 +33,17 @@ class Thread;
*/
class SignalCatcher {
public:
- // If |stack_trace_dir| is non empty, traces will be written to a
- // unique file under that directory.
+ // If |use_tombstoned_stack_trace_fd| is |true|, traces will be
+ // written to a file descriptor provided by tombstoned. The process
+ // will communicate with tombstoned via a unix domain socket. This
+ // mode of stack trace dumping is only supported in an Android
+ // environment.
//
- // If |stack_trace_dir| is empty, and |stack_frace_file| is non-empty,
- // traces will be appended to |stack_trace_file|.
- //
- // If both are empty, all traces will be written to the log buffer.
- explicit SignalCatcher(const std::string& stack_trace_dir,
- const std::string& stack_trace_file);
+ // If false, all traces will be dumped to |stack_trace_file| if it's
+ // non-empty. If |stack_trace_file| is empty, all traces will be written
+ // to the log buffer.
+ SignalCatcher(const std::string& stack_trace_file,
+ const bool use_tombstoned_stack_trace_fd);
~SignalCatcher();
void HandleSigQuit() REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
@@ -51,15 +54,18 @@ class SignalCatcher {
// NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
static void* Run(void* arg) NO_THREAD_SAFETY_ANALYSIS;
- std::string GetStackTraceFileName();
+ // NOTE: We're using android::base::unique_fd here for easier
+ // interoperability with tombstoned client APIs.
+ bool OpenStackTraceFile(android::base::unique_fd* tombstone_fd,
+ android::base::unique_fd* output_fd);
void HandleSigUsr1();
void Output(const std::string& s);
void SetHaltFlag(bool new_value) REQUIRES(!lock_);
bool ShouldHalt() REQUIRES(!lock_);
int WaitForSignal(Thread* self, SignalSet& signals) REQUIRES(!lock_);
- std::string stack_trace_dir_;
std::string stack_trace_file_;
+ const bool use_tombstoned_stack_trace_fd_;
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ConditionVariable cond_ GUARDED_BY(lock_);
diff --git a/compiler/utils/type_reference.h b/runtime/type_reference.h
index a0fa1a4a63..b7e964b3ad 100644
--- a/compiler/utils/type_reference.h
+++ b/runtime/type_reference.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_TYPE_REFERENCE_H_
-#define ART_COMPILER_UTILS_TYPE_REFERENCE_H_
+#ifndef ART_RUNTIME_TYPE_REFERENCE_H_
+#define ART_RUNTIME_TYPE_REFERENCE_H_
#include <stdint.h>
@@ -29,7 +29,9 @@ class DexFile;
// A type is located by its DexFile and the string_ids_ table index into that DexFile.
struct TypeReference {
- TypeReference(const DexFile* file, dex::TypeIndex index) : dex_file(file), type_index(index) { }
+ TypeReference(const DexFile* file = nullptr, dex::TypeIndex index = dex::TypeIndex())
+ : dex_file(file),
+ type_index(index) {}
const DexFile* dex_file;
dex::TypeIndex type_index;
@@ -48,4 +50,4 @@ struct TypeReferenceValueComparator {
} // namespace art
-#endif // ART_COMPILER_UTILS_TYPE_REFERENCE_H_
+#endif // ART_RUNTIME_TYPE_REFERENCE_H_
diff --git a/test/596-app-images/app_images.cc b/test/596-app-images/app_images.cc
index 42211f7548..fa9c902070 100644
--- a/test/596-app-images/app_images.cc
+++ b/test/596-app-images/app_images.cc
@@ -63,6 +63,12 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkAppImageContains(JNIEnv*, j
return JNI_FALSE;
}
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkInitialized(JNIEnv*, jclass, jclass c) {
+ ScopedObjectAccess soa(Thread::Current());
+ ObjPtr<mirror::Class> klass_ptr = soa.Decode<mirror::Class>(c);
+ return klass_ptr->IsInitialized();
+}
+
} // namespace
} // namespace art
diff --git a/test/596-app-images/src/Main.java b/test/596-app-images/src/Main.java
index 75b31b8061..8ee3c888b0 100644
--- a/test/596-app-images/src/Main.java
+++ b/test/596-app-images/src/Main.java
@@ -16,7 +16,11 @@
class Main {
static class Inner {
- public static int abc = 0;
+ final public static int abc = 10;
+ }
+
+ static class Nested {
+
}
public static void main(String[] args) {
@@ -26,8 +30,44 @@ class Main {
} else if (!checkAppImageContains(Inner.class)) {
System.out.println("App image does not contain Inner!");
}
+
+ if (!checkInitialized(Inner.class))
+ System.out.println("Inner class is not initialized!");
+
+ if (!checkInitialized(Nested.class))
+ System.out.println("Nested class is not initialized!");
+
+ if (!checkInitialized(StaticFields.class))
+ System.out.println("StaticFields class is not initialized!");
+
+ if (!checkInitialized(StaticFieldsInitSub.class))
+ System.out.println("StaticFieldsInitSub class is not initialized!");
+
+ if (!checkInitialized(StaticFieldsInit.class))
+ System.out.println("StaticFieldsInit class is not initialized!");
+
+ if (checkInitialized(StaticInternString.class))
+ System.out.println("StaticInternString class is initialized!");
}
public static native boolean checkAppImageLoaded();
public static native boolean checkAppImageContains(Class<?> klass);
+ public static native boolean checkInitialized(Class<?> klass);
}
+
+class StaticFields{
+ public static int abc;
+}
+
+class StaticFieldsInitSub extends StaticFieldsInit {
+ final public static int def = 10;
+}
+
+class StaticFieldsInit{
+ final public static int abc = 10;
+}
+
+class StaticInternString {
+ final public static String intern = "java.abc.Action";
+}
+
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 520e7c367c..3a2145bf2b 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -351,6 +351,35 @@ public class Main {
}
}
+ /// CHECK-START: void Main.typeConv(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.typeConv(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Vadd:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi1>>,<<Vadd>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>] loop:<<Loop2>> outer_loop:none
+ //
+ // Scalar code in cleanup loop uses correct byte type on array get and type conversion.
+ private static void typeConv(byte[] a, byte[] b) {
+ int len = Math.min(a.length, b.length);
+ for (int i = 0; i < len; i++) {
+ a[i] = (byte) (b[i] + 1);
+ }
+ }
+
public static void main(String[] args) {
expectEquals(10, earlyExitFirst(-1));
for (int i = 0; i <= 10; i++) {
@@ -453,6 +482,17 @@ public class Main {
expectEquals(40, bt[i]);
}
+ byte[] b1 = new byte[259]; // few extra iterations
+ byte[] b2 = new byte[259];
+ for (int i = 0; i < 259; i++) {
+ b1[i] = 0;
+ b2[i] = (byte) i;
+ }
+ typeConv(b1, b2);
+ for (int i = 0; i < 259; i++) {
+ expectEquals((byte)(i + 1), b1[i]);
+ }
+
System.out.println("passed");
}
diff --git a/test/638-checker-inline-caches/src/Main.java b/test/638-checker-inline-caches/src/Main.java
index 680bd14dbc..f104e6aea8 100644
--- a/test/638-checker-inline-caches/src/Main.java
+++ b/test/638-checker-inline-caches/src/Main.java
@@ -36,16 +36,17 @@ public class Main {
/// CHECK: InvokeVirtual method_name:Super.getValue
/// CHECK-START: int Main.inlineMonomorphicSubA(Super) inliner (after)
- /// CHECK-NOT: InvokeVirtual method_name:Super.getValue
-
- /// CHECK-START: int Main.inlineMonomorphicSubA(Super) inliner (after)
/// CHECK: <<SubARet:i\d+>> IntConstant 42
/// CHECK: <<Obj:l\d+>> NullCheck
/// CHECK: <<ObjClass:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
/// CHECK: <<InlineClass:l\d+>> LoadClass class_name:SubA
/// CHECK: <<Test:z\d+>> NotEqual [<<InlineClass>>,<<ObjClass>>]
- /// CHECK: Deoptimize [<<Test>>,<<Obj>>]
- /// CHECK: Return [<<SubARet>>]
+ /// CHECK: <<DefaultRet:i\d+>> InvokeVirtual [<<Obj>>] method_name:Super.getValue
+
+ /// CHECK: <<Ret:i\d+>> Phi [<<SubARet>>,<<DefaultRet>>]
+ /// CHECK: Return [<<Ret>>]
+
+ /// CHECK-NOT: Deoptimize
public static int inlineMonomorphicSubA(Super a) {
return a.getValue();
}
@@ -53,27 +54,27 @@ public class Main {
/// CHECK-START: int Main.inlinePolymophicSubASubB(Super) inliner (before)
/// CHECK: InvokeVirtual method_name:Super.getValue
- /// CHECK-START: int Main.inlinePolymophicSubASubB(Super) inliner (after)
- /// CHECK-NOT: InvokeVirtual method_name:Super.getValue
-
// Note that the order in which the types are added to the inline cache in the profile matters.
/// CHECK-START: int Main.inlinePolymophicSubASubB(Super) inliner (after)
/// CHECK-DAG: <<SubARet:i\d+>> IntConstant 42
/// CHECK-DAG: <<SubBRet:i\d+>> IntConstant 38
- /// CHECK: <<Obj:l\d+>> NullCheck
- /// CHECK: <<ObjClassSubA:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
- /// CHECK: <<InlineClassSubA:l\d+>> LoadClass class_name:SubA
- /// CHECK: <<TestSubA:z\d+>> NotEqual [<<InlineClassSubA>>,<<ObjClassSubA>>]
- /// CHECK: If [<<TestSubA>>]
-
- /// CHECK: <<ObjClassSubB:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
- /// CHECK: <<InlineClassSubB:l\d+>> LoadClass class_name:SubB
- /// CHECK: <<TestSubB:z\d+>> NotEqual [<<InlineClassSubB>>,<<ObjClassSubB>>]
- /// CHECK: Deoptimize [<<TestSubB>>,<<Obj>>]
-
- /// CHECK: <<Ret:i\d+>> Phi [<<SubARet>>,<<SubBRet>>]
- /// CHECK: Return [<<Ret>>]
+ /// CHECK-DAG: <<Obj:l\d+>> NullCheck
+ /// CHECK-DAG: <<ObjClassSubA:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
+ /// CHECK-DAG: <<InlineClassSubA:l\d+>> LoadClass class_name:SubA
+ /// CHECK-DAG: <<TestSubA:z\d+>> NotEqual [<<InlineClassSubA>>,<<ObjClassSubA>>]
+ /// CHECK-DAG: If [<<TestSubA>>]
+
+ /// CHECK-DAG: <<ObjClassSubB:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
+ /// CHECK-DAG: <<InlineClassSubB:l\d+>> LoadClass class_name:SubB
+ /// CHECK-DAG: <<TestSubB:z\d+>> NotEqual [<<InlineClassSubB>>,<<ObjClassSubB>>]
+ /// CHECK-DAG: <<DefaultRet:i\d+>> InvokeVirtual [<<Obj>>] method_name:Super.getValue
+
+ /// CHECK-DAG: <<FirstMerge:i\d+>> Phi [<<SubBRet>>,<<DefaultRet>>]
+ /// CHECK-DAG: <<Ret:i\d+>> Phi [<<SubARet>>,<<FirstMerge>>]
+ /// CHECK-DAG: Return [<<Ret>>]
+
+ /// CHECK-NOT: Deoptimize
public static int inlinePolymophicSubASubB(Super a) {
return a.getValue();
}
@@ -81,27 +82,27 @@ public class Main {
/// CHECK-START: int Main.inlinePolymophicCrossDexSubASubC(Super) inliner (before)
/// CHECK: InvokeVirtual method_name:Super.getValue
- /// CHECK-START: int Main.inlinePolymophicCrossDexSubASubC(Super) inliner (after)
- /// CHECK-NOT: InvokeVirtual method_name:Super.getValue
-
// Note that the order in which the types are added to the inline cache in the profile matters.
/// CHECK-START: int Main.inlinePolymophicCrossDexSubASubC(Super) inliner (after)
/// CHECK-DAG: <<SubARet:i\d+>> IntConstant 42
/// CHECK-DAG: <<SubCRet:i\d+>> IntConstant 24
- /// CHECK: <<Obj:l\d+>> NullCheck
- /// CHECK: <<ObjClassSubA:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
- /// CHECK: <<InlineClassSubA:l\d+>> LoadClass class_name:SubA
- /// CHECK: <<TestSubA:z\d+>> NotEqual [<<InlineClassSubA>>,<<ObjClassSubA>>]
- /// CHECK: If [<<TestSubA>>]
-
- /// CHECK: <<ObjClassSubC:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
- /// CHECK: <<InlineClassSubC:l\d+>> LoadClass class_name:SubC
- /// CHECK: <<TestSubC:z\d+>> NotEqual [<<InlineClassSubC>>,<<ObjClassSubC>>]
- /// CHECK: Deoptimize [<<TestSubC>>,<<Obj>>]
-
- /// CHECK: <<Ret:i\d+>> Phi [<<SubARet>>,<<SubCRet>>]
- /// CHECK: Return [<<Ret>>]
+ /// CHECK-DAG: <<Obj:l\d+>> NullCheck
+ /// CHECK-DAG: <<ObjClassSubA:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
+ /// CHECK-DAG: <<InlineClassSubA:l\d+>> LoadClass class_name:SubA
+ /// CHECK-DAG: <<TestSubA:z\d+>> NotEqual [<<InlineClassSubA>>,<<ObjClassSubA>>]
+ /// CHECK-DAG: If [<<TestSubA>>]
+
+ /// CHECK-DAG: <<ObjClassSubC:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
+ /// CHECK-DAG: <<InlineClassSubC:l\d+>> LoadClass class_name:SubC
+ /// CHECK-DAG: <<TestSubC:z\d+>> NotEqual [<<InlineClassSubC>>,<<ObjClassSubC>>]
+ /// CHECK-DAG: <<DefaultRet:i\d+>> InvokeVirtual [<<Obj>>] method_name:Super.getValue
+
+ /// CHECK-DAG: <<FirstMerge:i\d+>> Phi [<<SubCRet>>,<<DefaultRet>>]
+ /// CHECK-DAG: <<Ret:i\d+>> Phi [<<SubARet>>,<<FirstMerge>>]
+ /// CHECK-DAG: Return [<<Ret>>]
+
+ /// CHECK-NOT: Deoptimize
public static int inlinePolymophicCrossDexSubASubC(Super a) {
return a.getValue();
}
diff --git a/test/640-checker-byte-simd/src/Main.java b/test/640-checker-byte-simd/src/Main.java
index 10b20b83b0..21d71e8a13 100644
--- a/test/640-checker-byte-simd/src/Main.java
+++ b/test/640-checker-byte-simd/src/Main.java
@@ -135,8 +135,10 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- //
- // TODO: fill in when supported
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -147,9 +149,9 @@ public class Main {
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- //
- // TODO: fill in when supported
+ // TODO: would need signess flip.
+ /// CHECK-START: void Main.shr2() loop_optimization (after)
+ /// CHECK-NOT: VecUShr
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/640-checker-char-simd/src/Main.java b/test/640-checker-char-simd/src/Main.java
index 0628b36003..89d4b6b84e 100644
--- a/test/640-checker-char-simd/src/Main.java
+++ b/test/640-checker-char-simd/src/Main.java
@@ -134,9 +134,9 @@ public class Main {
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- //
- // TODO: fill in when supported
+ // TODO: would need signess flip.
+ /// CHECK-START: void Main.sar2() loop_optimization (after)
+ /// CHECK-NOT: VecShr
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -148,8 +148,10 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- //
- // TODO: fill in when supported
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/640-checker-double-simd/src/Main.java b/test/640-checker-double-simd/src/Main.java
index 0d4f87a6cd..5709b5dab8 100644
--- a/test/640-checker-double-simd/src/Main.java
+++ b/test/640-checker-double-simd/src/Main.java
@@ -122,8 +122,10 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.conv(long[]) loop_optimization (after)
+ /// CHECK-NOT: VecLoad
+ /// CHECK-NOT: VecStore
//
- // TODO: fill in when supported
+ // TODO: fill in when long2double is supported
static void conv(long[] b) {
for (int i = 0; i < 128; i++)
a[i] = b[i];
diff --git a/test/640-checker-int-simd/src/Main.java b/test/640-checker-int-simd/src/Main.java
index 97048eb951..9ee553c469 100644
--- a/test/640-checker-int-simd/src/Main.java
+++ b/test/640-checker-int-simd/src/Main.java
@@ -136,8 +136,10 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- //
- // TODO: fill in when supported
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -149,8 +151,10 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- //
- // TODO: fill in when supported
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/640-checker-long-simd/src/Main.java b/test/640-checker-long-simd/src/Main.java
index e42c716d19..8f6af9d012 100644
--- a/test/640-checker-long-simd/src/Main.java
+++ b/test/640-checker-long-simd/src/Main.java
@@ -134,8 +134,10 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- //
- // TODO: fill in when supported
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -147,8 +149,10 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- //
- // TODO: fill in when supported
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/640-checker-short-simd/src/Main.java b/test/640-checker-short-simd/src/Main.java
index 241f8e6eea..f62c726c05 100644
--- a/test/640-checker-short-simd/src/Main.java
+++ b/test/640-checker-short-simd/src/Main.java
@@ -135,8 +135,10 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- //
- // TODO: fill in when supported
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -147,9 +149,9 @@ public class Main {
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- //
- // TODO: fill in when supported
+ // TODO: would need signess flip.
+ /// CHECK-START: void Main.shr2() loop_optimization (after)
+ /// CHECK-NOT: VecUShr
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/644-checker-deopt/expected.txt b/test/644-checker-deopt/expected.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/644-checker-deopt/expected.txt
+++ /dev/null
diff --git a/test/644-checker-deopt/info.txt b/test/644-checker-deopt/info.txt
deleted file mode 100644
index c5fb12c570..0000000000
--- a/test/644-checker-deopt/info.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Regression test for making sure HDeoptimize is executed before
-the code it should have prevented executing.
diff --git a/test/644-checker-deopt/profile b/test/644-checker-deopt/profile
deleted file mode 100644
index cb261cc694..0000000000
--- a/test/644-checker-deopt/profile
+++ /dev/null
@@ -1,2 +0,0 @@
-LMain;->inlineMonomorphic(LMain;)I+LMain;
-LMain;->inlinePolymorphic(LMain;)I+LMain;,LSubMain;
diff --git a/test/644-checker-deopt/run b/test/644-checker-deopt/run
deleted file mode 100644
index 146e180000..0000000000
--- a/test/644-checker-deopt/run
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-exec ${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile
diff --git a/test/644-checker-deopt/src/Main.java b/test/644-checker-deopt/src/Main.java
deleted file mode 100644
index 17c80a6057..0000000000
--- a/test/644-checker-deopt/src/Main.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) inliner (before)
- /// CHECK: InvokeVirtual method_name:Main.getValue
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) inliner (after)
- /// CHECK-NOT: InvokeVirtual method_name:Main.getValue
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) licm (before)
- /// CHECK: <<Deopt:l\d+>> Deoptimize
- /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) licm (after)
- /// CHECK: <<Deopt:l\d+>> Deoptimize
- /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
-
- public static int inlineMonomorphic(Main a) {
- if (a == null) {
- return 42;
- }
- int i = 0;
- while (i < 100) {
- i += a.getValue();
- }
- return i;
- }
-
- /// CHECK-START: int Main.inlinePolymorphic(Main) inliner (before)
- /// CHECK: InvokeVirtual method_name:Main.getValue
-
- /// CHECK-START: int Main.inlinePolymorphic(Main) inliner (after)
- /// CHECK-NOT: InvokeVirtual method_name:Main.getValue
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) licm (before)
- /// CHECK: <<Deopt:l\d+>> Deoptimize
- /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) licm (after)
- /// CHECK: <<Deopt:l\d+>> Deoptimize
- /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
- public static int inlinePolymorphic(Main a) {
- return a.getValue();
- }
-
- public int getValue() {
- return value;
- }
-
- public static void main(String[] args) {
- inlineMonomorphic(new Main());
- }
-
- int value = 1;
-}
-
-// Add a subclass of 'Main' to write the polymorphic inline cache in the profile.
-class SubMain extends Main {
-}
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
index 76850abded..5a63d9f539 100644
--- a/test/645-checker-abs-simd/src/Main.java
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -22,6 +22,67 @@ public class Main {
private static final int SPQUIET = 1 << 22;
private static final long DPQUIET = 1L << 51;
+ /// CHECK-START: void Main.doitByte(byte[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitByte(byte[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ private static void doitByte(byte[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = (byte) Math.abs(x[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitChar(char[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.doitChar(char[]) loop_optimization (after)
+ /// CHECK-NOT: VecAbs
+ private static void doitChar(char[] x) {
+ // Basically a nop due to zero extension.
+ for (int i = 0; i < x.length; i++) {
+ x[i] = (char) Math.abs(x[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitShort(short[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitShort(short[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ private static void doitShort(short[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = (short) Math.abs(x[i]);
+ }
+ }
+
/// CHECK-START: void Main.doitInt(int[]) loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
@@ -52,8 +113,16 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitLong(long[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsLong loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
- // TODO: Not supported yet.
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
private static void doitLong(long[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = Math.abs(x[i]);
@@ -90,8 +159,16 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitDouble(double[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsDouble loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
- // TODO: Not supported yet.
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
private static void doitDouble(double[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = Math.abs(x[i]);
@@ -99,6 +176,31 @@ public class Main {
}
public static void main(String[] args) {
+ // Bytes, chars, shorts.
+ byte[] xb = new byte[256];
+ for (int i = 0; i < 256; i++) {
+ xb[i] = (byte) i;
+ }
+ doitByte(xb);
+ for (int i = 0; i < 256; i++) {
+ expectEquals32((byte) Math.abs((byte) i), xb[i]);
+ }
+ char[] xc = new char[1024 * 64];
+ for (int i = 0; i < 1024 * 64; i++) {
+ xc[i] = (char) i;
+ }
+ doitChar(xc);
+ for (int i = 0; i < 1024 *64; i++) {
+ expectEquals32((char) Math.abs((char) i), xc[i]);
+ }
+ short[] xs = new short[1024 * 64];
+ for (int i = 0; i < 1024 * 64; i++) {
+ xs[i] = (short) i;
+ }
+ doitShort(xs);
+ for (int i = 0; i < 1024 * 64; i++) {
+ expectEquals32((short) Math.abs((short) i), xs[i]);
+ }
// Set up minint32, maxint32 and some others.
int[] xi = new int[8];
xi[0] = 0x80000000;
diff --git a/test/651-checker-byte-simd-minmax/src/Main.java b/test/651-checker-byte-simd-minmax/src/Main.java
index 8211ace741..fe4580784a 100644
--- a/test/651-checker-byte-simd-minmax/src/Main.java
+++ b/test/651-checker-byte-simd-minmax/src/Main.java
@@ -27,9 +27,12 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // TODO: narrow type vectorization.
- /// CHECK-START: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-NOT: VecMin
+ /// CHECK-START-ARM64: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -37,6 +40,30 @@ public class Main {
}
}
+ /// CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ private static void doitMinUnsigned(byte[] x, byte[] y, byte[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = (byte) Math.min(y[i] & 0xff, z[i] & 0xff);
+ }
+ }
+
/// CHECK-START: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (before)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -45,9 +72,12 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // TODO: narrow type vectorization.
- /// CHECK-START: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-NOT: VecMax
+ /// CHECK-START-ARM64: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -55,6 +85,30 @@ public class Main {
}
}
+ /// CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ private static void doitMaxUnsigned(byte[] x, byte[] y, byte[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = (byte) Math.max(y[i] & 0xff, z[i] & 0xff);
+ }
+ }
+
public static void main(String[] args) {
// Initialize cross-values for all possible values.
int total = 256 * 256;
@@ -77,11 +131,21 @@ public class Main {
byte expected = (byte) Math.min(y[i], z[i]);
expectEquals(expected, x[i]);
}
+ doitMinUnsigned(x, y, z);
+ for (int i = 0; i < total; i++) {
+ byte expected = (byte) Math.min(y[i] & 0xff, z[i] & 0xff);
+ expectEquals(expected, x[i]);
+ }
doitMax(x, y, z);
for (int i = 0; i < total; i++) {
byte expected = (byte) Math.max(y[i], z[i]);
expectEquals(expected, x[i]);
}
+ doitMaxUnsigned(x, y, z);
+ for (int i = 0; i < total; i++) {
+ byte expected = (byte) Math.max(y[i] & 0xff, z[i] & 0xff);
+ expectEquals(expected, x[i]);
+ }
System.out.println("passed");
}
diff --git a/test/651-checker-char-simd-minmax/src/Main.java b/test/651-checker-char-simd-minmax/src/Main.java
index 5ce7b94bf4..e2998dadf6 100644
--- a/test/651-checker-char-simd-minmax/src/Main.java
+++ b/test/651-checker-char-simd-minmax/src/Main.java
@@ -27,9 +27,12 @@ public class Main {
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // TODO: narrow type vectorization.
- /// CHECK-START: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
- /// CHECK-NOT: VecMin
+ /// CHECK-START-ARM64: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -45,9 +48,12 @@ public class Main {
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // TODO: narrow type vectorization.
- /// CHECK-START: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
- /// CHECK-NOT: VecMax
+ /// CHECK-START-ARM64: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-double-simd-minmax/src/Main.java b/test/651-checker-double-simd-minmax/src/Main.java
index e1711aef60..cf04f85906 100644
--- a/test/651-checker-double-simd-minmax/src/Main.java
+++ b/test/651-checker-double-simd-minmax/src/Main.java
@@ -48,7 +48,7 @@ public class Main {
/// CHECK-DAG: <<Max:d\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxDoubleDouble loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
- // TODO-x86: 0.0 vs -0.0?
+ // TODO x86: 0.0 vs -0.0?
//
/// CHECK-START-ARM64: void Main.doitMax(double[], double[], double[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
diff --git a/test/651-checker-int-simd-minmax/src/Main.java b/test/651-checker-int-simd-minmax/src/Main.java
index 4e05a9ded3..6cee7b5484 100644
--- a/test/651-checker-int-simd-minmax/src/Main.java
+++ b/test/651-checker-int-simd-minmax/src/Main.java
@@ -30,7 +30,7 @@ public class Main {
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -50,7 +50,7 @@ public class Main {
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
diff --git a/test/651-checker-short-simd-minmax/src/Main.java b/test/651-checker-short-simd-minmax/src/Main.java
index f34f5264c1..09485a2d8a 100644
--- a/test/651-checker-short-simd-minmax/src/Main.java
+++ b/test/651-checker-short-simd-minmax/src/Main.java
@@ -27,9 +27,12 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // TODO: narrow type vectorization.
- /// CHECK-START: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
- /// CHECK-NOT: VecMin
+ /// CHECK-START-ARM64: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -37,6 +40,30 @@ public class Main {
}
}
+ /// CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ private static void doitMinUnsigned(short[] x, short[] y, short[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = (short) Math.min(y[i] & 0xffff, z[i] & 0xffff);
+ }
+ }
+
/// CHECK-START: void Main.doitMax(short[], short[], short[]) loop_optimization (before)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -45,9 +72,12 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // TODO: narrow type vectorization.
- /// CHECK-START: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
- /// CHECK-NOT: VecMax
+ /// CHECK-START-ARM64: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -55,6 +85,30 @@ public class Main {
}
}
+ /// CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ private static void doitMaxUnsigned(short[] x, short[] y, short[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = (short) Math.max(y[i] & 0xffff, z[i] & 0xffff);
+ }
+ }
+
public static void main(String[] args) {
short[] interesting = {
(short) 0x0000, (short) 0x0001, (short) 0x007f,
@@ -91,11 +145,21 @@ public class Main {
short expected = (short) Math.min(y[i], z[i]);
expectEquals(expected, x[i]);
}
+ doitMinUnsigned(x, y, z);
+ for (int i = 0; i < total; i++) {
+ short expected = (short) Math.min(y[i] & 0xffff, z[i] & 0xffff);
+ expectEquals(expected, x[i]);
+ }
doitMax(x, y, z);
for (int i = 0; i < total; i++) {
short expected = (short) Math.max(y[i], z[i]);
expectEquals(expected, x[i]);
}
+ doitMaxUnsigned(x, y, z);
+ for (int i = 0; i < total; i++) {
+ short expected = (short) Math.max(y[i] & 0xffff, z[i] & 0xffff);
+ expectEquals(expected, x[i]);
+ }
System.out.println("passed");
}
diff --git a/test/652-deopt-intrinsic/expected.txt b/test/652-deopt-intrinsic/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/652-deopt-intrinsic/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/652-deopt-intrinsic/info.txt b/test/652-deopt-intrinsic/info.txt
new file mode 100644
index 0000000000..58a90fad16
--- /dev/null
+++ b/test/652-deopt-intrinsic/info.txt
@@ -0,0 +1,2 @@
+Regression test for the interpreter/JIT, where the interpreter used to not
+record inline caches when seeing an intrinsic.
diff --git a/test/652-deopt-intrinsic/src/Main.java b/test/652-deopt-intrinsic/src/Main.java
new file mode 100644
index 0000000000..a82580c8a1
--- /dev/null
+++ b/test/652-deopt-intrinsic/src/Main.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ loop();
+ ensureJitCompiled(Main.class, "$noinline$doCall");
+ loop();
+ }
+
+ public static void loop() {
+ Main m = new Main();
+ for (int i = 0; i < 5000; i++) {
+ $noinline$doCall("foo");
+ $noinline$doCall(m);
+ if (numberOfDeoptimizations() != 0) {
+ throw new Error("Unexpected deoptimizations");
+ }
+ }
+ }
+
+ public static boolean $noinline$doCall(Object foo) {
+ return foo.equals(Main.class);
+ }
+
+ public static native int numberOfDeoptimizations();
+ public static native void ensureJitCompiled(Class<?> cls, String methodName);
+}
diff --git a/test/906-iterate-heap/expected.txt b/test/906-iterate-heap/expected.txt
index b6af8435de..73b7129bba 100644
--- a/test/906-iterate-heap/expected.txt
+++ b/test/906-iterate-heap/expected.txt
@@ -18,14 +18,14 @@
2
1@0 (32, 2xD '0000000000000000000000000000f03f')
2
+doTestPrimitiveFieldsClasses
10000@0 (static, int, index=3) 0000000000000000
10001
10000@0 (static, int, index=11) 0000000000000000
10001
-10000@0 (static, int, index=0) 0000000000000000
10001
-10000@0 (static, int, index=1) 0000000000000000
10001
+doTestPrimitiveFieldsIntegral
10000@0 (instance, int, index=2) 0000000000000000
10001@0 (instance, byte, index=4) 0000000000000001
10002@0 (instance, char, index=5) 0000000000000061
@@ -33,6 +33,7 @@
10004@0 (instance, long, index=7) 0000000000000004
10005@0 (instance, short, index=9) 0000000000000002
10006
+doTestPrimitiveFieldsFloat
10000@0 (instance, int, index=3) 0000000000000000
10001@0 (instance, byte, index=5) 0000000000000001
10002@0 (instance, char, index=6) 0000000000000061
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index 6534b4c3db..02ac69942a 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -408,5 +408,15 @@ extern "C" JNIEXPORT jstring JNICALL Java_art_Test906_iterateThroughHeapPrimitiv
return env->NewStringUTF(ffc.data.c_str());
}
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test906_checkInitialized(
+ JNIEnv* env, jclass, jclass c) {
+ jint status;
+ jvmtiError error = jvmti_env->GetClassStatus(c, &status);
+ if (JvmtiErrorToException(env, jvmti_env, error)) {
+ return false;
+ }
+ return (status & JVMTI_CLASS_STATUS_INITIALIZED) != 0;
+}
+
} // namespace Test906IterateHeap
} // namespace art
diff --git a/test/906-iterate-heap/src/art/Test906.java b/test/906-iterate-heap/src/art/Test906.java
index fe18e38501..65c2c8c560 100644
--- a/test/906-iterate-heap/src/art/Test906.java
+++ b/test/906-iterate-heap/src/art/Test906.java
@@ -142,6 +142,7 @@ public class Test906 {
}
private static void doTestPrimitiveFieldsClasses() {
+ System.out.println("doTestPrimitiveFieldsClasses");
setTag(IntObject.class, 10000);
System.out.println(iterateThroughHeapPrimitiveFields(10000));
System.out.println(getTag(IntObject.class));
@@ -152,18 +153,40 @@ public class Test906 {
System.out.println(getTag(FloatObject.class));
setTag(FloatObject.class, 0);
+ boolean correctHeapValue = false;
setTag(Inf1.class, 10000);
- System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ String heapTrace = iterateThroughHeapPrimitiveFields(10000);
+
+ if (!checkInitialized(Inf1.class)) {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=0) 0000000000000000");
+ } else {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=0) 0000000000000001");
+ }
+
+ if (!correctHeapValue)
+ System.out.println("Heap Trace for Inf1 is not as expected:\n" + heapTrace);
+
System.out.println(getTag(Inf1.class));
setTag(Inf1.class, 0);
setTag(Inf2.class, 10000);
- System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ heapTrace = iterateThroughHeapPrimitiveFields(10000);
+
+ if (!checkInitialized(Inf2.class)) {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=1) 0000000000000000");
+ } else {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=1) 0000000000000001");
+ }
+
+ if (!correctHeapValue)
+ System.out.println("Heap Trace for Inf2 is not as expected:\n" + heapTrace);
System.out.println(getTag(Inf2.class));
+
setTag(Inf2.class, 0);
}
private static void doTestPrimitiveFieldsIntegral() {
+ System.out.println("doTestPrimitiveFieldsIntegral");
IntObject intObject = new IntObject();
setTag(intObject, 10000);
System.out.println(iterateThroughHeapPrimitiveFields(10000));
@@ -171,6 +194,7 @@ public class Test906 {
}
private static void doTestPrimitiveFieldsFloat() {
+ System.out.println("doTestPrimitiveFieldsFloat");
FloatObject floatObject = new FloatObject();
setTag(floatObject, 10000);
System.out.println(iterateThroughHeapPrimitiveFields(10000));
@@ -265,6 +289,7 @@ public class Test906 {
return Main.getTag(o);
}
+ private static native boolean checkInitialized(Class<?> klass);
private static native int iterateThroughHeapCount(int heapFilter,
Class<?> klassFilter, int stopAfter);
private static native int iterateThroughHeapData(int heapFilter,
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index b128d1cb70..80f8b9e947 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -140,9 +140,7 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
10001
10000@0 (static, int, index=11) 0000000000000000
10001
-10000@0 (static, int, index=0) 0000000000000000
10001
-10000@0 (static, int, index=1) 0000000000000000
10001
10000@0 (instance, int, index=2) 0000000000000000
10001@0 (instance, byte, index=4) 0000000000000001
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index ec36cebd43..bf3f7b66a5 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -1078,5 +1078,14 @@ extern "C" JNIEXPORT void JNICALL Java_art_Test913_iterateThroughHeapExt(
CHECK(gFoundExt);
}
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test913_checkInitialized(JNIEnv* env, jclass, jclass c) {
+ jint status;
+ jvmtiError error = jvmti_env->GetClassStatus(c, &status);
+ if (JvmtiErrorToException(env, jvmti_env, error)) {
+ return false;
+ }
+ return (status & JVMTI_CLASS_STATUS_INITIALIZED) != 0;
+}
+
} // namespace Test913Heaps
} // namespace art
diff --git a/test/913-heaps/src/art/Test913.java b/test/913-heaps/src/art/Test913.java
index 97f48eea03..b9990010ff 100644
--- a/test/913-heaps/src/art/Test913.java
+++ b/test/913-heaps/src/art/Test913.java
@@ -195,13 +195,33 @@ public class Test913 {
System.out.println(getTag(FloatObject.class));
setTag(FloatObject.class, 0);
+ boolean correctHeapValue = false;
setTag(Inf1.class, 10000);
- System.out.println(followReferencesPrimitiveFields(Inf1.class));
+ String heapTrace = followReferencesPrimitiveFields(Inf1.class);
+
+ if (!checkInitialized(Inf1.class)) {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=0) 0000000000000000");
+ } else {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=0) 0000000000000001");
+ }
+
+ if (!correctHeapValue)
+ System.out.println("Heap Trace for Inf1 is not as expected:\n" + heapTrace);
+
System.out.println(getTag(Inf1.class));
setTag(Inf1.class, 0);
setTag(Inf2.class, 10000);
- System.out.println(followReferencesPrimitiveFields(Inf2.class));
+ heapTrace = followReferencesPrimitiveFields(Inf2.class);
+
+ if (!checkInitialized(Inf2.class)) {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=1) 0000000000000000");
+ } else {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=1) 0000000000000001");
+ }
+
+ if (!correctHeapValue)
+ System.out.println("Heap Trace for Inf2 is not as expected:\n" + heapTrace);
System.out.println(getTag(Inf2.class));
setTag(Inf2.class, 0);
}
@@ -712,6 +732,7 @@ public class Test913 {
return Main.getTag(o);
}
+ private static native boolean checkInitialized(Class<?> klass);
private static native void setupGcCallback();
private static native void enableGcTracking(boolean enable);
private static native int getGcStarts();
diff --git a/test/987-stack-trace-dumping/expected.txt b/test/987-stack-trace-dumping/expected.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/987-stack-trace-dumping/expected.txt
+++ /dev/null
diff --git a/test/987-stack-trace-dumping/info.txt b/test/987-stack-trace-dumping/info.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/987-stack-trace-dumping/info.txt
+++ /dev/null
diff --git a/test/987-stack-trace-dumping/run b/test/987-stack-trace-dumping/run
deleted file mode 100755
index dee3e8ba04..0000000000
--- a/test/987-stack-trace-dumping/run
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Ask for stack traces to be dumped to a file rather than to stdout.
-./default-run "$@" --set-stack-trace-dump-dir
diff --git a/test/987-stack-trace-dumping/src/Main.java b/test/987-stack-trace-dumping/src/Main.java
deleted file mode 100644
index d1e8a1b56b..0000000000
--- a/test/987-stack-trace-dumping/src/Main.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.File;
-
-public class Main {
- public static void main(String[] args) throws Exception {
- if (args.length != 3) {
- throw new AssertionError("Unexpected number of args: " + args.length);
- }
-
- if (!"--stack-trace-dir".equals(args[1])) {
- throw new AssertionError("Unexpected argument in position 1: " + args[1]);
- }
-
- // Send ourselves signal 3, which forces stack traces to be written to disk.
- android.system.Os.kill(android.system.Os.getpid(), 3);
-
- File[] files = null;
- final String stackTraceDir = args[2];
- for (int i = 0; i < 5; ++i) {
- // Give the signal handler some time to run and dump traces - up to a maximum
- // of 5 seconds. This is a kludge, but it's hard to do this without using things
- // like inotify / WatchService and the like.
- Thread.sleep(1000);
-
- files = (new File(stackTraceDir)).listFiles();
- if (files != null && files.length == 1) {
- break;
- }
- }
-
-
- if (files == null) {
- throw new AssertionError("Gave up waiting for traces: " + java.util.Arrays.toString(files));
- }
-
- final String fileName = files[0].getName();
- if (!fileName.startsWith("anr-pid")) {
- throw new AssertionError("Unexpected prefix: " + fileName);
- }
-
- if (!fileName.contains(String.valueOf(android.system.Os.getpid()))) {
- throw new AssertionError("File name does not contain process PID: " + fileName);
- }
- }
-}
diff --git a/test/Android.bp b/test/Android.bp
index 1679669056..599b0115b4 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -51,9 +51,9 @@ art_cc_defaults {
// These really are gtests, but the gtest library comes from libart-gtest.so
gtest: false,
defaults: [
- "art_defaults",
- "art_debug_defaults",
"art_test_defaults",
+ "art_debug_defaults",
+ "art_defaults",
],
shared_libs: [
@@ -128,8 +128,8 @@ art_cc_defaults {
name: "libart-gtest-defaults",
host_supported: true,
defaults: [
- "art_defaults",
"art_debug_defaults",
+ "art_defaults",
],
shared_libs: [
"libartd",
@@ -202,8 +202,8 @@ art_cc_library {
cc_defaults {
name: "libartagent-defaults",
defaults: [
- "art_defaults",
"art_test_defaults",
+ "art_defaults",
],
shared_libs: [
"libbacktrace",
@@ -234,8 +234,8 @@ art_cc_test_library {
name: "libartagentd",
srcs: ["900-hello-plugin/load_unload.cc"],
defaults: [
- "libartagent-defaults",
"art_debug_defaults",
+ "libartagent-defaults",
],
shared_libs: ["libartd"],
}
@@ -313,8 +313,8 @@ art_cc_test_library {
art_cc_test_library {
name: "libtiagentd",
defaults: [
- "libtiagent-defaults",
"art_debug_defaults",
+ "libtiagent-defaults",
],
shared_libs: ["libartd"],
}
@@ -340,8 +340,8 @@ art_cc_test_library {
art_cc_test_library {
name: "libtistressd",
defaults: [
- "libtistress-defaults",
"art_debug_defaults",
+ "libtistress-defaults",
],
shared_libs: ["libartd"],
}
@@ -355,8 +355,8 @@ art_cc_test_library {
cc_defaults {
name: "libarttest-defaults",
defaults: [
- "art_defaults",
"art_test_defaults",
+ "art_defaults",
],
srcs: [
"common/runtime_state.cc",
@@ -421,8 +421,8 @@ art_cc_test_library {
art_cc_test_library {
name: "libarttestd",
defaults: [
- "libarttest-defaults",
"art_debug_defaults",
+ "libarttest-defaults",
],
shared_libs: ["libartd"],
}
@@ -431,9 +431,9 @@ art_cc_test_library {
name: "libnativebridgetest",
shared_libs: ["libart"],
defaults: [
- "art_defaults",
- "art_debug_defaults",
"art_test_defaults",
+ "art_debug_defaults",
+ "art_defaults",
],
srcs: ["115-native-bridge/nativebridge.cc"],
target: {
diff --git a/test/Android.run-test-jvmti-java-library.mk b/test/Android.run-test-jvmti-java-library.mk
index c480be5cb2..da28b4c4bd 100644
--- a/test/Android.run-test-jvmti-java-library.mk
+++ b/test/Android.run-test-jvmti-java-library.mk
@@ -151,4 +151,8 @@ $(foreach NR,$(JVMTI_RUN_TEST_GENERATED_NUMBERS),\
$(eval $(call GEN_JVMTI_RUN_TEST_GENERATED_FILE,$(NR))))
LOCAL_JAVA_RESOURCE_FILES := $(JVMTI_RUN_TEST_GENERATED_FILES)
+# We only want to depend on libcore.
+LOCAL_NO_STANDARD_LIBRARIES := true
+LOCAL_JAVA_LIBRARIES := core-all
+
include $(BUILD_JAVA_LIBRARY)
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index b683a27728..d2cfbffc30 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -238,4 +238,8 @@ extern "C" JNIEXPORT int JNICALL Java_Main_getHotnessCounter(JNIEnv* env,
return method->GetCounter();
}
+extern "C" JNIEXPORT int JNICALL Java_Main_numberOfDeoptimizations(JNIEnv*, jclass) {
+ return Runtime::Current()->GetNumberOfDeoptimizations();
+}
+
} // namespace art
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 4b44df7958..36ac307c5e 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -508,7 +508,6 @@
"640-checker-short-simd",
"641-checker-arraycopy",
"643-checker-bogus-ic",
- "644-checker-deopt",
"645-checker-abs-simd",
"706-checker-scheduler"],
"description": ["Checker tests are not compatible with jvmti."],
@@ -651,8 +650,7 @@
"969-iface-super",
"981-dedup-original-dex",
"984-obsolete-invoke",
- "985-re-obsolete",
- "987-stack-trace-dumping"
+ "985-re-obsolete"
],
"description": "The tests above fail with --build-with-javac-dx.",
"env_vars": {"ANDROID_COMPILE_WITH_JACK": "false"},
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index c99159f1ae..77ef25a75b 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -750,6 +750,9 @@ def print_analysis():
print_text(COLOR_ERROR + 'FAILED: ' + COLOR_NORMAL + '\n')
for test_info in failed_tests:
print_text(('%s\n%s\n' % (test_info[0], test_info[1])))
+ print_text(COLOR_ERROR + '----------' + COLOR_NORMAL + '\n')
+ for failed_test in sorted([test_info[0] for test_info in failed_tests]):
+ print_text(('%s\n' % (failed_test)))
def parse_test_name(test_name):
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index 133426f2f0..38556abff2 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -75,6 +75,12 @@ Things to move to perflib:
* Instance.isRoot and Instance.getRootTypes.
Release History:
+ 1.3 Pending
+
+ 1.2 May 26, 2017
+ Show registered native sizes of objects.
+ Simplify presentation of sample path from gc root.
+
1.1 Feb 21, 2017
Show java.lang.ref.Reference referents as "unreachable" instead of null.
diff --git a/tools/ahat/src/DocString.java b/tools/ahat/src/DocString.java
index c6303c8c35..7970bf8de4 100644
--- a/tools/ahat/src/DocString.java
+++ b/tools/ahat/src/DocString.java
@@ -126,6 +126,23 @@ class DocString {
}
/**
+ * Standard formatted DocString for describing a size.
+ *
+ * Nothing is printed for a size of zero.
+ * Set isPlaceHolder to true to indicate that the size field corresponds to
+ * for a place holder object that should be annotated specially.
+ */
+ public static DocString size(long size, boolean isPlaceHolder) {
+ DocString string = new DocString();
+ if (isPlaceHolder) {
+ string.append(DocString.removed("del"));
+ } else if (size != 0) {
+ string.appendFormat("%,14d", size);
+ }
+ return string;
+ }
+
+ /**
* Standard formatted DocString for describing a change in size relative to
* a baseline.
* @param noCurrent - whether no current object exists.
diff --git a/tools/ahat/src/DominatedList.java b/tools/ahat/src/DominatedList.java
index f73e3ca027..75133b2184 100644
--- a/tools/ahat/src/DominatedList.java
+++ b/tools/ahat/src/DominatedList.java
@@ -55,7 +55,7 @@ class DominatedList {
@Override
public long getSize(AhatInstance element, AhatHeap heap) {
- return element.getRetainedSize(heap);
+ return element.getRetainedSize(heap).getSize();
}
@Override
diff --git a/tools/ahat/src/HeapTable.java b/tools/ahat/src/HeapTable.java
index 9abbe4a4ed..b04f2aebf7 100644
--- a/tools/ahat/src/HeapTable.java
+++ b/tools/ahat/src/HeapTable.java
@@ -45,16 +45,6 @@ class HeapTable {
List<ValueConfig<T>> getValueConfigs();
}
- private static DocString sizeString(long size, boolean isPlaceHolder) {
- DocString string = new DocString();
- if (isPlaceHolder) {
- string.append(DocString.removed("del"));
- } else if (size != 0) {
- string.appendFormat("%,14d", size);
- }
- return string;
- }
-
/**
* Render the table to the given document.
* @param query - The page query.
@@ -100,10 +90,10 @@ class HeapTable {
long basesize = config.getSize(base, heap.getBaseline());
total += size;
basetotal += basesize;
- vals.add(sizeString(size, elem.isPlaceHolder()));
+ vals.add(DocString.size(size, elem.isPlaceHolder()));
vals.add(DocString.delta(elem.isPlaceHolder(), base.isPlaceHolder(), size, basesize));
}
- vals.add(sizeString(total, elem.isPlaceHolder()));
+ vals.add(DocString.size(total, elem.isPlaceHolder()));
vals.add(DocString.delta(elem.isPlaceHolder(), base.isPlaceHolder(), total, basetotal));
for (ValueConfig<T> value : values) {
@@ -140,10 +130,10 @@ class HeapTable {
long basesize = basesummary.get(heap);
total += size;
basetotal += basesize;
- vals.add(sizeString(size, false));
+ vals.add(DocString.size(size, false));
vals.add(DocString.delta(false, false, size, basesize));
}
- vals.add(sizeString(total, false));
+ vals.add(DocString.size(total, false));
vals.add(DocString.delta(false, false, total, basetotal));
for (ValueConfig<T> value : values) {
@@ -159,7 +149,7 @@ class HeapTable {
public static <T extends Diffable<T>> boolean hasNonZeroEntry(AhatHeap heap,
TableConfig<T> config, List<T> elements) {
AhatHeap baseheap = heap.getBaseline();
- if (heap.getSize() > 0 || baseheap.getSize() > 0) {
+ if (!heap.getSize().isZero() || !baseheap.getSize().isZero()) {
for (T element : elements) {
if (config.getSize(element, heap) > 0 ||
config.getSize(element.getBaseline(), baseheap) > 0) {
diff --git a/tools/ahat/src/ObjectHandler.java b/tools/ahat/src/ObjectHandler.java
index 2e0ae6ed2d..d6f1faa3c3 100644
--- a/tools/ahat/src/ObjectHandler.java
+++ b/tools/ahat/src/ObjectHandler.java
@@ -19,7 +19,6 @@ package com.android.ahat;
import com.android.ahat.heapdump.AhatArrayInstance;
import com.android.ahat.heapdump.AhatClassInstance;
import com.android.ahat.heapdump.AhatClassObj;
-import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatInstance;
import com.android.ahat.heapdump.AhatSnapshot;
import com.android.ahat.heapdump.Diff;
@@ -29,7 +28,6 @@ import com.android.ahat.heapdump.Site;
import com.android.ahat.heapdump.Value;
import java.io.IOException;
import java.util.Collection;
-import java.util.Collections;
import java.util.List;
import java.util.Objects;
@@ -72,16 +70,6 @@ class ObjectHandler implements AhatHandler {
doc.descriptions();
doc.description(DocString.text("Class"), Summarizer.summarize(cls));
- DocString sizeDescription = DocString.format("%,14d ", inst.getSize());
- sizeDescription.appendDelta(false, base.isPlaceHolder(),
- inst.getSize(), base.getSize());
- doc.description(DocString.text("Size"), sizeDescription);
-
- DocString rsizeDescription = DocString.format("%,14d ", inst.getTotalRetainedSize());
- rsizeDescription.appendDelta(false, base.isPlaceHolder(),
- inst.getTotalRetainedSize(), base.getTotalRetainedSize());
- doc.description(DocString.text("Retained Size"), rsizeDescription);
-
doc.description(DocString.text("Heap"), DocString.text(inst.getHeap().getName()));
Collection<String> rootTypes = inst.getRootTypes();
@@ -98,6 +86,13 @@ class ObjectHandler implements AhatHandler {
doc.end();
+ doc.section("Object Size");
+ SizeTable.table(doc, new Column(""), inst != base && !base.isPlaceHolder());
+ SizeTable.row(doc, DocString.text("Shallow"), inst.getSize(), base.getSize());
+ SizeTable.row(doc, DocString.text("Retained"),
+ inst.getTotalRetainedSize(), base.getTotalRetainedSize());
+ SizeTable.end(doc);
+
printBitmap(doc, inst);
if (inst.isClassInstance()) {
printClassInstanceFields(doc, query, inst.asClassInstance());
@@ -249,47 +244,16 @@ class ObjectHandler implements AhatHandler {
private void printGcRootPath(Doc doc, Query query, AhatInstance inst) {
doc.section("Sample Path from GC Root");
List<PathElement> path = inst.getPathFromGcRoot();
-
- // Add a dummy PathElement as a marker for the root.
- final PathElement root = new PathElement(null, null);
- path.add(0, root);
-
- HeapTable.TableConfig<PathElement> table = new HeapTable.TableConfig<PathElement>() {
- public String getHeapsDescription() {
- return "Bytes Retained by Heap (Dominators Only)";
- }
-
- public long getSize(PathElement element, AhatHeap heap) {
- if (element == root) {
- return heap.getSize();
- }
- if (element.isDominator) {
- return element.instance.getRetainedSize(heap);
- }
- return 0;
- }
-
- public List<HeapTable.ValueConfig<PathElement>> getValueConfigs() {
- HeapTable.ValueConfig<PathElement> value = new HeapTable.ValueConfig<PathElement>() {
- public String getDescription() {
- return "Path Element";
- }
-
- public DocString render(PathElement element) {
- if (element == root) {
- return DocString.link(DocString.uri("rooted"), DocString.text("ROOT"));
- } else {
- DocString label = DocString.text("→ ");
- label.append(Summarizer.summarize(element.instance));
- label.append(element.field);
- return label;
- }
- }
- };
- return Collections.singletonList(value);
- }
+ doc.table(new Column(""), new Column("Path Element"));
+ doc.row(DocString.text("(rooted)"),
+ DocString.link(DocString.uri("root"), DocString.text("ROOT")));
+ for (PathElement element : path) {
+ DocString label = DocString.text("→ ");
+ label.append(Summarizer.summarize(element.instance));
+ label.append(element.field);
+ doc.row(DocString.text(element.isDominator ? "(dominator)" : ""), label);
};
- HeapTable.render(doc, query, DOMINATOR_PATH_ID, table, mSnapshot, path);
+ doc.end();
}
public void printDominatedObjects(Doc doc, Query query, AhatInstance inst) {
diff --git a/tools/ahat/src/ObjectsHandler.java b/tools/ahat/src/ObjectsHandler.java
index 3062d23b53..86d48f1702 100644
--- a/tools/ahat/src/ObjectsHandler.java
+++ b/tools/ahat/src/ObjectsHandler.java
@@ -54,23 +54,18 @@ class ObjectsHandler implements AhatHandler {
doc.title("Objects");
- doc.table(
- new Column("Size", Column.Align.RIGHT),
- new Column("Δ", Column.Align.RIGHT, mSnapshot.isDiffed()),
+ SizeTable.table(doc, mSnapshot.isDiffed(),
new Column("Heap"),
new Column("Object"));
SubsetSelector<AhatInstance> selector = new SubsetSelector(query, OBJECTS_ID, insts);
for (AhatInstance inst : selector.selected()) {
AhatInstance base = inst.getBaseline();
- doc.row(
- DocString.format("%,14d", inst.getSize()),
- DocString.delta(inst.isPlaceHolder(), base.isPlaceHolder(),
- inst.getSize(), base.getSize()),
+ SizeTable.row(doc, inst.getSize(), base.getSize(),
DocString.text(inst.getHeap().getName()),
Summarizer.summarize(inst));
}
- doc.end();
+ SizeTable.end(doc);
selector.render(doc);
}
}
diff --git a/tools/ahat/src/OverviewHandler.java b/tools/ahat/src/OverviewHandler.java
index ea305c4e94..c9f84259a9 100644
--- a/tools/ahat/src/OverviewHandler.java
+++ b/tools/ahat/src/OverviewHandler.java
@@ -18,16 +18,12 @@ package com.android.ahat;
import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatSnapshot;
-import com.android.ahat.heapdump.Diffable;
+import com.android.ahat.heapdump.Size;
import java.io.File;
import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
class OverviewHandler implements AhatHandler {
- private static final String OVERVIEW_ID = "overview";
-
private AhatSnapshot mSnapshot;
private File mHprof;
private File mBaseHprof;
@@ -53,39 +49,27 @@ class OverviewHandler implements AhatHandler {
}
doc.end();
- doc.section("Heap Sizes");
- printHeapSizes(doc, query);
+ doc.section("Bytes Retained by Heap");
+ printHeapSizes(doc);
doc.big(Menu.getMenu());
}
- private static class TableElem implements Diffable<TableElem> {
- @Override public TableElem getBaseline() {
- return this;
- }
-
- @Override public boolean isPlaceHolder() {
- return false;
- }
- }
-
- private void printHeapSizes(Doc doc, Query query) {
- List<TableElem> dummy = Collections.singletonList(new TableElem());
-
- HeapTable.TableConfig<TableElem> table = new HeapTable.TableConfig<TableElem>() {
- public String getHeapsDescription() {
- return "Bytes Retained by Heap";
- }
-
- public long getSize(TableElem element, AhatHeap heap) {
- return heap.getSize();
+ private void printHeapSizes(Doc doc) {
+ SizeTable.table(doc, new Column("Heap"), mSnapshot.isDiffed());
+ Size totalSize = Size.ZERO;
+ Size totalBase = Size.ZERO;
+ for (AhatHeap heap : mSnapshot.getHeaps()) {
+ Size size = heap.getSize();
+ Size base = heap.getBaseline().getSize();
+ if (!size.isZero() || !base.isZero()) {
+ SizeTable.row(doc, DocString.text(heap.getName()), size, base);
+ totalSize = totalSize.plus(size);
+ totalBase = totalBase.plus(base);
}
-
- public List<HeapTable.ValueConfig<TableElem>> getValueConfigs() {
- return Collections.emptyList();
- }
- };
- HeapTable.render(doc, query, OVERVIEW_ID, table, mSnapshot, dummy);
+ }
+ SizeTable.row(doc, DocString.text("Total"), totalSize, totalBase);
+ SizeTable.end(doc);
}
}
diff --git a/tools/ahat/src/SiteHandler.java b/tools/ahat/src/SiteHandler.java
index febf1713fb..7a831d3018 100644
--- a/tools/ahat/src/SiteHandler.java
+++ b/tools/ahat/src/SiteHandler.java
@@ -60,7 +60,7 @@ class SiteHandler implements AhatHandler {
}
public long getSize(Site element, AhatHeap heap) {
- return element.getSize(heap);
+ return element.getSize(heap).getSize();
}
public List<HeapTable.ValueConfig<Site>> getValueConfigs() {
@@ -80,10 +80,7 @@ class SiteHandler implements AhatHandler {
}
doc.section("Objects Allocated");
-
- doc.table(
- new Column("Reachable Bytes Allocated", Column.Align.RIGHT),
- new Column("Δ", Column.Align.RIGHT, mSnapshot.isDiffed()),
+ SizeTable.table(doc, mSnapshot.isDiffed(),
new Column("Instances", Column.Align.RIGHT),
new Column("Δ", Column.Align.RIGHT, mSnapshot.isDiffed()),
new Column("Heap"),
@@ -100,9 +97,7 @@ class SiteHandler implements AhatHandler {
for (Site.ObjectsInfo info : selector.selected()) {
Site.ObjectsInfo baseinfo = info.getBaseline();
String className = info.getClassName();
- doc.row(
- DocString.format("%,14d", info.numBytes),
- DocString.delta(false, false, info.numBytes, baseinfo.numBytes),
+ SizeTable.row(doc, info.numBytes, baseinfo.numBytes,
DocString.link(
DocString.formattedUri("objects?id=%d&depth=%d&heap=%s&class=%s",
site.getId(), site.getDepth(), info.heap.getName(), className),
@@ -111,7 +106,7 @@ class SiteHandler implements AhatHandler {
DocString.text(info.heap.getName()),
Summarizer.summarize(info.classObj));
}
- doc.end();
+ SizeTable.end(doc);
selector.render(doc);
}
}
diff --git a/tools/ahat/src/SitePrinter.java b/tools/ahat/src/SitePrinter.java
index 21ca2deda4..32037f4414 100644
--- a/tools/ahat/src/SitePrinter.java
+++ b/tools/ahat/src/SitePrinter.java
@@ -38,7 +38,7 @@ class SitePrinter {
}
public long getSize(Site element, AhatHeap heap) {
- return element.getSize(heap);
+ return element.getSize(heap).getSize();
}
public List<HeapTable.ValueConfig<Site>> getValueConfigs() {
diff --git a/tools/ahat/src/SizeTable.java b/tools/ahat/src/SizeTable.java
new file mode 100644
index 0000000000..46e395669f
--- /dev/null
+++ b/tools/ahat/src/SizeTable.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+import com.android.ahat.heapdump.Size;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Class for rendering a table that includes all categories of Size.
+ * Two table formats are supported, one where a custom left column can be
+ * added before the size columns:
+ * |left column|Java Size|Native Size|...|Total Size|custom columns...|
+ *
+ * The other without the custom left column:
+ * |Java Size|Native Size|...|Total Size|custom columns...|
+ */
+class SizeTable {
+ /**
+ * Start a size table with a custom left column.
+ *
+ * |left column|Java Size|Native Size|...|Total Size|custom columns...|
+ *
+ * This should be followed by calls to the 'row' method to fill in the table
+ * contents and the 'end' method to end the table.
+ *
+ * Set showDiff to true if size diffs should be shown.
+ */
+ static void table(Doc doc, Column left, boolean showDiff, Column... columns) {
+ List<Column> cols = new ArrayList<Column>();
+ cols.add(left);
+ cols.add(new Column("Java Size", Column.Align.RIGHT));
+ cols.add(new Column("Δ", Column.Align.RIGHT, showDiff));
+ cols.add(new Column("Registered Native Size", Column.Align.RIGHT));
+ cols.add(new Column("Δ", Column.Align.RIGHT, showDiff));
+ cols.add(new Column("Total Size", Column.Align.RIGHT));
+ cols.add(new Column("Δ", Column.Align.RIGHT, showDiff));
+ cols.addAll(Arrays.asList(columns));
+ doc.table(cols.toArray(new Column[cols.size()]));
+ }
+
+ /**
+ * Add a row to the currently active size table with custom left column.
+ * The number of values must match the number of columns provided for the
+ * currently active table.
+ */
+ static void row(Doc doc, DocString left, Size size, Size base, DocString... values) {
+ List<DocString> vals = new ArrayList<DocString>();
+ vals.add(left);
+ vals.add(DocString.size(size.getJavaSize(), false));
+ vals.add(DocString.delta(false, false, size.getJavaSize(), base.getJavaSize()));
+ vals.add(DocString.size(size.getRegisteredNativeSize(), false));
+ vals.add(DocString.delta(false, false,
+ size.getRegisteredNativeSize(), base.getRegisteredNativeSize()));
+ vals.add(DocString.size(size.getSize(), false));
+ vals.add(DocString.delta(false, false, size.getSize(), base.getSize()));
+ vals.addAll(Arrays.asList(values));
+ doc.row(vals.toArray(new DocString[vals.size()]));
+ }
+
+ /**
+ * Start a size table without a custom left column.
+ *
+ * |Java Size|Native Size|...|Total Size|custom columns...|
+ * This should be followed by calls to the 'row' method to fill in the table
+ * contents and the 'end' method to end the table.
+ *
+ * Set showDiff to true if size diffs should be shown.
+ */
+ static void table(Doc doc, boolean showDiff, Column... columns) {
+ // Re-use the code for a size table with custom left column by having an
+ // invisible custom left column.
+ table(doc, new Column("", Column.Align.LEFT, false), showDiff, columns);
+ }
+
+ /**
+ * Add a row to the currently active size table without a custom left column.
+ * The number of values must match the number of columns provided for the
+ * currently active table.
+ */
+ static void row(Doc doc, Size size, Size base, DocString... values) {
+ row(doc, new DocString(), size, base, values);
+ }
+
+ /**
+ * End the currently active table.
+ */
+ static void end(Doc doc) {
+ doc.end();
+ }
+}
diff --git a/tools/ahat/src/heapdump/AhatHeap.java b/tools/ahat/src/heapdump/AhatHeap.java
index c39adc4b41..b8897a182c 100644
--- a/tools/ahat/src/heapdump/AhatHeap.java
+++ b/tools/ahat/src/heapdump/AhatHeap.java
@@ -18,7 +18,7 @@ package com.android.ahat.heapdump;
public class AhatHeap implements Diffable<AhatHeap> {
private String mName;
- private long mSize = 0;
+ private Size mSize = Size.ZERO;
private int mIndex;
private AhatHeap mBaseline;
private boolean mIsPlaceHolder = false;
@@ -47,8 +47,8 @@ public class AhatHeap implements Diffable<AhatHeap> {
return new AhatHeap(name, baseline);
}
- void addToSize(long increment) {
- mSize += increment;
+ void addToSize(Size size) {
+ mSize = mSize.plus(size);
}
/**
@@ -69,7 +69,7 @@ public class AhatHeap implements Diffable<AhatHeap> {
/**
* Returns the total number of bytes allocated on this heap.
*/
- public long getSize() {
+ public Size getSize() {
return mSize;
}
diff --git a/tools/ahat/src/heapdump/AhatInstance.java b/tools/ahat/src/heapdump/AhatInstance.java
index e6b9c00384..af369d95d8 100644
--- a/tools/ahat/src/heapdump/AhatInstance.java
+++ b/tools/ahat/src/heapdump/AhatInstance.java
@@ -20,17 +20,18 @@ import com.android.tools.perflib.heap.ClassObj;
import com.android.tools.perflib.heap.Instance;
import com.android.tools.perflib.heap.RootObj;
import java.awt.image.BufferedImage;
+import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.Deque;
import java.util.List;
public abstract class AhatInstance implements Diffable<AhatInstance> {
private long mId;
- private long mSize;
- private long mTotalRetainedSize;
- private long mRetainedSizes[]; // Retained size indexed by heap index
+ private Size mSize;
+ private Size[] mRetainedSizes; // Retained size indexed by heap index
private boolean mIsReachable;
private AhatHeap mHeap;
private AhatInstance mImmediateDominator;
@@ -63,15 +64,10 @@ public abstract class AhatInstance implements Diffable<AhatInstance> {
*/
void initialize(AhatSnapshot snapshot, Instance inst) {
mId = inst.getId();
- mSize = inst.getSize();
- mTotalRetainedSize = inst.getTotalRetainedSize();
+ mSize = new Size(inst.getSize(), 0);
mIsReachable = inst.isReachable();
List<AhatHeap> heaps = snapshot.getHeaps();
- mRetainedSizes = new long[heaps.size()];
- for (AhatHeap heap : heaps) {
- mRetainedSizes[heap.getIndex()] = inst.getRetainedSize(heap.getIndex());
- }
mHeap = snapshot.getHeap(inst.getHeap().getName());
@@ -130,7 +126,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance> {
/**
* Returns the shallow number of bytes this object takes up.
*/
- public long getSize() {
+ public Size getSize() {
return mSize;
}
@@ -138,16 +134,32 @@ public abstract class AhatInstance implements Diffable<AhatInstance> {
* Returns the number of bytes belonging to the given heap that this instance
* retains.
*/
- public long getRetainedSize(AhatHeap heap) {
+ public Size getRetainedSize(AhatHeap heap) {
int index = heap.getIndex();
- return 0 <= index && index < mRetainedSizes.length ? mRetainedSizes[heap.getIndex()] : 0;
+ if (mRetainedSizes != null && 0 <= index && index < mRetainedSizes.length) {
+ return mRetainedSizes[heap.getIndex()];
+ }
+ return Size.ZERO;
}
/**
* Returns the total number of bytes this instance retains.
*/
- public long getTotalRetainedSize() {
- return mTotalRetainedSize;
+ public Size getTotalRetainedSize() {
+ Size size = Size.ZERO;
+ if (mRetainedSizes != null) {
+ for (int i = 0; i < mRetainedSizes.length; i++) {
+ size = size.plus(mRetainedSizes[i]);
+ }
+ }
+ return size;
+ }
+
+ /**
+ * Increment the number of registered native bytes tied to this object.
+ */
+ void addRegisteredNativeSize(long size) {
+ mSize = mSize.plusRegisteredNativeSize(size);
}
/**
@@ -452,4 +464,41 @@ public abstract class AhatInstance implements Diffable<AhatInstance> {
AhatInstance newPlaceHolderInstance() {
return new AhatPlaceHolderInstance(this);
}
+
+ /**
+ * Recursively compute the retained size of the given instance and all
+ * other instances it dominates.
+ */
+ static void computeRetainedSize(AhatInstance inst, int numHeaps) {
+ // Note: We can't use a recursive implementation because it can lead to
+ // stack overflow. Use an iterative implementation instead.
+ //
+ // Objects not yet processed will have mRetainedSizes set to null.
+ // Once prepared, an object will have mRetaiedSizes set to an array of 0
+ // sizes.
+ Deque<AhatInstance> deque = new ArrayDeque<AhatInstance>();
+ deque.push(inst);
+
+ while (!deque.isEmpty()) {
+ inst = deque.pop();
+ if (inst.mRetainedSizes == null) {
+ inst.mRetainedSizes = new Size[numHeaps];
+ for (int i = 0; i < numHeaps; i++) {
+ inst.mRetainedSizes[i] = Size.ZERO;
+ }
+ inst.mRetainedSizes[inst.mHeap.getIndex()] =
+ inst.mRetainedSizes[inst.mHeap.getIndex()].plus(inst.mSize);
+ deque.push(inst);
+ for (AhatInstance dominated : inst.mDominated) {
+ deque.push(dominated);
+ }
+ } else {
+ for (AhatInstance dominated : inst.mDominated) {
+ for (int i = 0; i < numHeaps; i++) {
+ inst.mRetainedSizes[i] = inst.mRetainedSizes[i].plus(dominated.mRetainedSizes[i]);
+ }
+ }
+ }
+ }
+ }
}
diff --git a/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java b/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java
index c6ad87fda5..2b3e056a1e 100644
--- a/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java
+++ b/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java
@@ -29,16 +29,16 @@ public class AhatPlaceHolderClassObj extends AhatClassObj {
baseline.setBaseline(this);
}
- @Override public long getSize() {
- return 0;
+ @Override public Size getSize() {
+ return Size.ZERO;
}
- @Override public long getRetainedSize(AhatHeap heap) {
- return 0;
+ @Override public Size getRetainedSize(AhatHeap heap) {
+ return Size.ZERO;
}
- @Override public long getTotalRetainedSize() {
- return 0;
+ @Override public Size getTotalRetainedSize() {
+ return Size.ZERO;
}
@Override public AhatHeap getHeap() {
diff --git a/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java b/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java
index 9412eae9a1..4aac80484d 100644
--- a/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java
+++ b/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java
@@ -29,16 +29,16 @@ public class AhatPlaceHolderInstance extends AhatInstance {
baseline.setBaseline(this);
}
- @Override public long getSize() {
- return 0;
+ @Override public Size getSize() {
+ return Size.ZERO;
}
- @Override public long getRetainedSize(AhatHeap heap) {
- return 0;
+ @Override public Size getRetainedSize(AhatHeap heap) {
+ return Size.ZERO;
}
- @Override public long getTotalRetainedSize() {
- return 0;
+ @Override public Size getTotalRetainedSize() {
+ return Size.ZERO;
}
@Override public AhatHeap getHeap() {
diff --git a/tools/ahat/src/heapdump/AhatSnapshot.java b/tools/ahat/src/heapdump/AhatSnapshot.java
index 20b85da763..35d6c8a315 100644
--- a/tools/ahat/src/heapdump/AhatSnapshot.java
+++ b/tools/ahat/src/heapdump/AhatSnapshot.java
@@ -82,8 +82,7 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
Snapshot snapshot = Snapshot.createSnapshot(buffer, map);
snapshot.computeDominators();
- // Properly label the class of class objects in the perflib snapshot, and
- // count the total number of instances.
+ // Properly label the class of class objects in the perflib snapshot.
final ClassObj javaLangClass = snapshot.findClass("java.lang.Class");
if (javaLangClass != null) {
for (Heap heap : snapshot.getHeaps()) {
@@ -134,12 +133,19 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
}
});
+ Map<Instance, Long> registeredNative = Perflib.getRegisteredNativeAllocations(snapshot);
+
// Initialize ahat snapshot and instances based on the perflib snapshot
// and instances.
for (AhatInstance ahat : mInstances) {
Instance inst = snapshot.findInstance(ahat.getId());
ahat.initialize(this, inst);
+ Long registeredNativeSize = registeredNative.get(inst);
+ if (registeredNativeSize != null) {
+ ahat.addRegisteredNativeSize(registeredNativeSize);
+ }
+
if (inst.getImmediateDominator() == Snapshot.SENTINEL_ROOT) {
mRooted.add(ahat);
}
@@ -166,6 +172,13 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
}
}
snapshot.dispose();
+
+ // Compute the retained sizes of objects. We do this explicitly now rather
+ // than relying on the retained sizes computed by perflib so that
+ // registered native sizes are included.
+ for (AhatInstance inst : mRooted) {
+ AhatInstance.computeRetainedSize(inst, mHeaps.size());
+ }
}
/**
diff --git a/tools/ahat/src/heapdump/Perflib.java b/tools/ahat/src/heapdump/Perflib.java
new file mode 100644
index 0000000000..d0264a3b39
--- /dev/null
+++ b/tools/ahat/src/heapdump/Perflib.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.heapdump;
+
+import com.android.tools.perflib.heap.ClassInstance;
+import com.android.tools.perflib.heap.ClassObj;
+import com.android.tools.perflib.heap.Instance;
+import com.android.tools.perflib.heap.Snapshot;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Collection of utilities that may be suitable to have in perflib instead of
+ * ahat.
+ */
+public class Perflib {
+ /**
+ * Return a collection of instances in the given snapshot that are tied to
+ * registered native allocations and their corresponding registered native
+ * sizes.
+ */
+ public static Map<Instance, Long> getRegisteredNativeAllocations(Snapshot snapshot) {
+ Map<Instance, Long> allocs = new HashMap<Instance, Long>();
+ ClassObj cleanerClass = snapshot.findClass("sun.misc.Cleaner");
+ if (cleanerClass != null) {
+ for (Instance cleanerInst : cleanerClass.getInstancesList()) {
+ ClassInstance cleaner = (ClassInstance)cleanerInst;
+ Object referent = getField(cleaner, "referent");
+ if (referent instanceof Instance) {
+ Instance inst = (Instance)referent;
+ Object thunkValue = getField(cleaner, "thunk");
+ if (thunkValue instanceof ClassInstance) {
+ ClassInstance thunk = (ClassInstance)thunkValue;
+ ClassObj thunkClass = thunk.getClassObj();
+ String cleanerThunkClassName = "libcore.util.NativeAllocationRegistry$CleanerThunk";
+ if (thunkClass != null && cleanerThunkClassName.equals(thunkClass.getClassName())) {
+ for (ClassInstance.FieldValue thunkField : thunk.getValues()) {
+ if (thunkField.getValue() instanceof ClassInstance) {
+ ClassInstance registry = (ClassInstance)thunkField.getValue();
+ ClassObj registryClass = registry.getClassObj();
+ String registryClassName = "libcore.util.NativeAllocationRegistry";
+ if (registryClass != null
+ && registryClassName.equals(registryClass.getClassName())) {
+ Object sizeValue = getField(registry, "size");
+ if (sizeValue instanceof Long) {
+ long size = (Long)sizeValue;
+ if (size > 0) {
+ Long old = allocs.get(inst);
+ allocs.put(inst, old == null ? size : old + size);
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return allocs;
+ }
+
+ /**
+ * Helper function to read a single field from a perflib class instance.
+ * Returns null if field not found. Note there is no way to distinguish
+ * between field not found an a field value of null.
+ */
+ private static Object getField(ClassInstance cls, String name) {
+ for (ClassInstance.FieldValue field : cls.getValues()) {
+ if (name.equals(field.getField().getName())) {
+ return field.getValue();
+ }
+ }
+ return null;
+ }
+}
diff --git a/tools/ahat/src/heapdump/Site.java b/tools/ahat/src/heapdump/Site.java
index 738eaf0687..fdd4eea7b3 100644
--- a/tools/ahat/src/heapdump/Site.java
+++ b/tools/ahat/src/heapdump/Site.java
@@ -44,7 +44,7 @@ public class Site implements Diffable<Site> {
// The total size of objects allocated in this site (including child sites),
// organized by heap index. Heap indices outside the range of mSizesByHeap
// implicitly have size 0.
- private long[] mSizesByHeap;
+ private Size[] mSizesByHeap;
// List of child sites.
private List<Site> mChildren;
@@ -60,14 +60,18 @@ public class Site implements Diffable<Site> {
public AhatHeap heap;
public AhatClassObj classObj; // May be null.
public long numInstances;
- public long numBytes;
+ public Size numBytes;
private ObjectsInfo baseline;
- public ObjectsInfo(AhatHeap heap, AhatClassObj classObj, long numInstances, long numBytes) {
+ /**
+ * Construct a new, empty objects info for the given heap and class
+ * combination.
+ */
+ public ObjectsInfo(AhatHeap heap, AhatClassObj classObj) {
this.heap = heap;
this.classObj = classObj;
- this.numInstances = numInstances;
- this.numBytes = numBytes;
+ this.numInstances = 0;
+ this.numBytes = Size.ZERO;
this.baseline = this;
}
@@ -107,7 +111,7 @@ public class Site implements Diffable<Site> {
mLineNumber = line;
mId = id;
mDepth = depth;
- mSizesByHeap = new long[1];
+ mSizesByHeap = new Size[0];
mChildren = new ArrayList<Site>();
mObjects = new ArrayList<AhatInstance>();
mObjectsInfos = new ArrayList<ObjectsInfo>();
@@ -133,16 +137,20 @@ public class Site implements Diffable<Site> {
if (inst.isReachable()) {
AhatHeap heap = inst.getHeap();
if (heap.getIndex() >= site.mSizesByHeap.length) {
- long[] newSizes = new long[heap.getIndex() + 1];
+ Size[] newSizes = new Size[heap.getIndex() + 1];
for (int i = 0; i < site.mSizesByHeap.length; i++) {
newSizes[i] = site.mSizesByHeap[i];
}
+ for (int i = site.mSizesByHeap.length; i < heap.getIndex() + 1; i++) {
+ newSizes[i] = Size.ZERO;
+ }
site.mSizesByHeap = newSizes;
}
- site.mSizesByHeap[heap.getIndex()] += inst.getSize();
+ site.mSizesByHeap[heap.getIndex()]
+ = site.mSizesByHeap[heap.getIndex()].plus(inst.getSize());
info.numInstances++;
- info.numBytes += inst.getSize();
+ info.numBytes = info.numBytes.plus(inst.getSize());
}
if (depth > 0) {
@@ -172,9 +180,9 @@ public class Site implements Diffable<Site> {
}
// Get the size of a site for a specific heap.
- public long getSize(AhatHeap heap) {
+ public Size getSize(AhatHeap heap) {
int index = heap.getIndex();
- return index >= 0 && index < mSizesByHeap.length ? mSizesByHeap[index] : 0;
+ return index >= 0 && index < mSizesByHeap.length ? mSizesByHeap[index] : Size.ZERO;
}
/**
@@ -198,7 +206,7 @@ public class Site implements Diffable<Site> {
ObjectsInfo info = classToObjectsInfo.get(classObj);
if (info == null) {
- info = new ObjectsInfo(heap, classObj, 0, 0);
+ info = new ObjectsInfo(heap, classObj);
mObjectsInfos.add(info);
classToObjectsInfo.put(classObj, info);
}
@@ -210,10 +218,10 @@ public class Site implements Diffable<Site> {
}
// Get the combined size of the site for all heaps.
- public long getTotalSize() {
- long total = 0;
+ public Size getTotalSize() {
+ Size total = Size.ZERO;
for (int i = 0; i < mSizesByHeap.length; i++) {
- total += mSizesByHeap[i];
+ total = total.plus(mSizesByHeap[i]);
}
return total;
}
diff --git a/tools/ahat/src/heapdump/Size.java b/tools/ahat/src/heapdump/Size.java
new file mode 100644
index 0000000000..7c8db900df
--- /dev/null
+++ b/tools/ahat/src/heapdump/Size.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.heapdump;
+
+/**
+ * The Size class is used to represent how much space an instance takes up.
+ *
+ * An abstraction is introduced rather than using a long directly in order to
+ * more easily keep track of the different components of the size. For
+ * example, some instances may have associated native, code, or graphics
+ * sizes.
+ *
+ * Size objects are immutable.
+ */
+public class Size {
+ private final long mJavaSize;
+ private final long mRegisteredNativeSize;
+
+ public static Size ZERO = new Size(0, 0);
+
+ public Size(long javaSize, long registeredNativeSize) {
+ mJavaSize = javaSize;
+ mRegisteredNativeSize = registeredNativeSize;
+ }
+
+ public long getSize() {
+ return mJavaSize + mRegisteredNativeSize;
+ }
+
+ public long getJavaSize() {
+ return mJavaSize;
+ }
+
+ public long getRegisteredNativeSize() {
+ return mRegisteredNativeSize;
+ }
+
+ /**
+ * Returns true if all the fields of this size object are zero.
+ */
+ public boolean isZero() {
+ return mJavaSize == 0 && mRegisteredNativeSize == 0;
+ }
+
+ /**
+ * Return a new Size object that is the sum of this size and the other.
+ */
+ public Size plus(Size other) {
+ if (isZero()) {
+ return other;
+ } else if (other.isZero()) {
+ return this;
+ } else {
+ return new Size(mJavaSize + other.mJavaSize,
+ mRegisteredNativeSize + other.mRegisteredNativeSize);
+ }
+ }
+
+ /**
+ * Return a new Size object that has 'size' more registered native size than
+ * this Size object.
+ */
+ public Size plusRegisteredNativeSize(long size) {
+ return new Size(mJavaSize, mRegisteredNativeSize + size);
+ }
+
+ @Override public boolean equals(Object other) {
+ if (other instanceof Size) {
+ Size s = (Size)other;
+ return mJavaSize == s.mJavaSize && mRegisteredNativeSize == s.mRegisteredNativeSize;
+ }
+ return false;
+ }
+}
+
diff --git a/tools/ahat/src/heapdump/Sort.java b/tools/ahat/src/heapdump/Sort.java
index 93d147a49e..0745803817 100644
--- a/tools/ahat/src/heapdump/Sort.java
+++ b/tools/ahat/src/heapdump/Sort.java
@@ -32,6 +32,17 @@ import java.util.List;
*/
public class Sort {
/**
+ * Compare sizes by their total size.
+ * This sorts sizes from smaller total size to larger total size.
+ */
+ public static final Comparator<Size> SIZE_BY_SIZE = new Comparator<Size>() {
+ @Override
+ public int compare(Size a, Size b) {
+ return Long.compare(a.getSize(), b.getSize());
+ }
+ };
+
+ /**
* Compare instances by their total retained size.
* Different instances with the same total retained size are considered
* equal for the purposes of comparison.
@@ -41,7 +52,7 @@ public class Sort {
= new Comparator<AhatInstance>() {
@Override
public int compare(AhatInstance a, AhatInstance b) {
- return Long.compare(b.getTotalRetainedSize(), a.getTotalRetainedSize());
+ return SIZE_BY_SIZE.compare(b.getTotalRetainedSize(), a.getTotalRetainedSize());
}
};
@@ -60,7 +71,7 @@ public class Sort {
@Override
public int compare(AhatInstance a, AhatInstance b) {
- return Long.compare(b.getRetainedSize(mHeap), a.getRetainedSize(mHeap));
+ return SIZE_BY_SIZE.compare(b.getRetainedSize(mHeap), a.getRetainedSize(mHeap));
}
}
@@ -119,7 +130,7 @@ public class Sort {
@Override
public int compare(Site a, Site b) {
- return Long.compare(b.getSize(mHeap), a.getSize(mHeap));
+ return SIZE_BY_SIZE.compare(b.getSize(mHeap), a.getSize(mHeap));
}
}
@@ -130,7 +141,7 @@ public class Sort {
public static final Comparator<Site> SITE_BY_TOTAL_SIZE = new Comparator<Site>() {
@Override
public int compare(Site a, Site b) {
- return Long.compare(b.getTotalSize(), a.getTotalSize());
+ return SIZE_BY_SIZE.compare(b.getTotalSize(), a.getTotalSize());
}
};
@@ -158,7 +169,7 @@ public class Sort {
= new Comparator<Site.ObjectsInfo>() {
@Override
public int compare(Site.ObjectsInfo a, Site.ObjectsInfo b) {
- return Long.compare(b.numBytes, a.numBytes);
+ return SIZE_BY_SIZE.compare(b.numBytes, a.numBytes);
}
};
diff --git a/tools/ahat/src/manifest.txt b/tools/ahat/src/manifest.txt
index 20245f312d..c35ccf1cd3 100644
--- a/tools/ahat/src/manifest.txt
+++ b/tools/ahat/src/manifest.txt
@@ -1,4 +1,4 @@
Name: ahat/
Implementation-Title: ahat
-Implementation-Version: 1.1
+Implementation-Version: 1.2
Main-Class: com.android.ahat.Main
diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/test-dump/Main.java
index 7a05b1cb89..3d3de78255 100644
--- a/tools/ahat/test-dump/Main.java
+++ b/tools/ahat/test-dump/Main.java
@@ -20,6 +20,7 @@ import java.lang.ref.PhantomReference;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.SoftReference;
import java.lang.ref.WeakReference;
+import libcore.util.NativeAllocationRegistry;
import org.apache.harmony.dalvik.ddmc.DdmVmInternal;
/**
@@ -98,6 +99,11 @@ public class Main {
bigArray[i] = (byte)((i*i) & 0xFF);
}
+ // 0x12345, 50000, and 0xABCDABCD are arbitrary values.
+ NativeAllocationRegistry registry = new NativeAllocationRegistry(
+ Main.class.getClassLoader(), 0x12345, 50000);
+ registry.registerNativeAllocation(anObject, 0xABCDABCD);
+
addedObject = baseline ? null : new AddedObject();
removedObject = baseline ? new RemovedObject() : null;
modifiedObject = new ModifiedObject();
diff --git a/tools/ahat/test/InstanceTest.java b/tools/ahat/test/InstanceTest.java
index 3a50150c0e..71b081c9a4 100644
--- a/tools/ahat/test/InstanceTest.java
+++ b/tools/ahat/test/InstanceTest.java
@@ -21,6 +21,7 @@ import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatInstance;
import com.android.ahat.heapdump.AhatSnapshot;
import com.android.ahat.heapdump.PathElement;
+import com.android.ahat.heapdump.Size;
import com.android.ahat.heapdump.Value;
import com.android.tools.perflib.heap.hprof.HprofClassDump;
import com.android.tools.perflib.heap.hprof.HprofConstant;
@@ -292,13 +293,13 @@ public class InstanceTest {
// allocated on, and should be 0 for all other heaps.
AhatInstance anObject = dump.getDumpedAhatInstance("anObject");
AhatSnapshot snapshot = dump.getAhatSnapshot();
- long size = anObject.getSize();
+ Size size = anObject.getSize();
assertEquals(size, anObject.getTotalRetainedSize());
assertEquals(size, anObject.getRetainedSize(anObject.getHeap()));
for (AhatHeap heap : snapshot.getHeaps()) {
if (!heap.equals(anObject.getHeap())) {
assertEquals(String.format("For heap '%s'", heap.getName()),
- 0, anObject.getRetainedSize(heap));
+ Size.ZERO, anObject.getRetainedSize(heap));
}
}
}
diff --git a/tools/ahat/test/NativeAllocationTest.java b/tools/ahat/test/NativeAllocationTest.java
new file mode 100644
index 0000000000..7436be8311
--- /dev/null
+++ b/tools/ahat/test/NativeAllocationTest.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+import com.android.ahat.heapdump.AhatInstance;
+import com.android.ahat.heapdump.AhatSnapshot;
+import java.io.IOException;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class NativeAllocationTest {
+
+ @Test
+ public void nativeAllocation() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+
+ AhatSnapshot snapshot = dump.getAhatSnapshot();
+ AhatInstance referent = dump.getDumpedAhatInstance("anObject");
+ assertEquals(50000, referent.getSize().getRegisteredNativeSize());
+ }
+}
+
diff --git a/tools/ahat/test/Tests.java b/tools/ahat/test/Tests.java
index 2fd3286172..c7e9b1811b 100644
--- a/tools/ahat/test/Tests.java
+++ b/tools/ahat/test/Tests.java
@@ -24,6 +24,7 @@ public class Tests {
args = new String[]{
"com.android.ahat.DiffTest",
"com.android.ahat.InstanceTest",
+ "com.android.ahat.NativeAllocationTest",
"com.android.ahat.ObjectHandlerTest",
"com.android.ahat.OverviewHandlerTest",
"com.android.ahat.PerformanceTest",