summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk9
-rw-r--r--build/Android.common_path.mk5
-rw-r--r--compiler/driver/compiler_driver.cc335
-rw-r--r--compiler/oat_writer.h2
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h2
-rw-r--r--compiler/optimizing/code_generator_mips.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/inliner.cc43
-rw-r--r--compiler/optimizing/inliner.h3
-rw-r--r--dex2oat/dex2oat.cc7
-rw-r--r--profman/profile_assistant_test.cc31
-rw-r--r--profman/profman.cc23
-rw-r--r--runtime/cha.cc61
-rw-r--r--runtime/cha.h7
-rw-r--r--runtime/cha_test.cc64
-rw-r--r--runtime/interpreter/mterp/mips/op_double_to_int.S20
-rw-r--r--runtime/interpreter/mterp/mips/op_double_to_long.S15
-rw-r--r--runtime/interpreter/mterp/mips/op_float_to_int.S19
-rw-r--r--runtime/interpreter/mterp/mips/op_float_to_long.S14
-rw-r--r--runtime/interpreter/mterp/mips64/op_double_to_int.S22
-rw-r--r--runtime/interpreter/mterp/mips64/op_double_to_long.S22
-rw-r--r--runtime/interpreter/mterp/mips64/op_float_to_int.S22
-rw-r--r--runtime/interpreter/mterp/mips64/op_float_to_long.S22
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips.S68
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips64.S161
-rw-r--r--runtime/jit/jit_code_cache.cc32
-rw-r--r--runtime/jit/profile_compilation_info.cc81
-rw-r--r--runtime/jit/profile_compilation_info.h39
-rw-r--r--runtime/jit/profile_compilation_info_test.cc79
-rw-r--r--runtime/jit/profile_saver.cc6
-rw-r--r--runtime/type_reference.h (renamed from compiler/utils/type_reference.h)10
-rw-r--r--test/596-app-images/app_images.cc6
-rw-r--r--test/596-app-images/src/Main.java42
-rw-r--r--test/638-checker-inline-caches/src/Main.java75
-rw-r--r--test/644-checker-deopt/expected.txt0
-rw-r--r--test/644-checker-deopt/info.txt2
-rw-r--r--test/644-checker-deopt/profile2
-rw-r--r--test/644-checker-deopt/run17
-rw-r--r--test/644-checker-deopt/src/Main.java74
-rw-r--r--test/906-iterate-heap/expected.txt5
-rw-r--r--test/906-iterate-heap/iterate_heap.cc10
-rw-r--r--test/906-iterate-heap/src/art/Test906.java29
-rw-r--r--test/913-heaps/expected.txt2
-rw-r--r--test/913-heaps/heaps.cc9
-rw-r--r--test/913-heaps/src/art/Test913.java25
-rw-r--r--test/knownfailures.json1
-rw-r--r--tools/ahat/README.txt5
-rw-r--r--tools/ahat/src/DocString.java17
-rw-r--r--tools/ahat/src/DominatedList.java2
-rw-r--r--tools/ahat/src/HeapTable.java20
-rw-r--r--tools/ahat/src/ObjectHandler.java17
-rw-r--r--tools/ahat/src/ObjectsHandler.java11
-rw-r--r--tools/ahat/src/OverviewHandler.java50
-rw-r--r--tools/ahat/src/SiteHandler.java13
-rw-r--r--tools/ahat/src/SitePrinter.java2
-rw-r--r--tools/ahat/src/SizeTable.java106
-rw-r--r--tools/ahat/src/heapdump/AhatHeap.java8
-rw-r--r--tools/ahat/src/heapdump/AhatInstance.java77
-rw-r--r--tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java12
-rw-r--r--tools/ahat/src/heapdump/AhatPlaceHolderInstance.java12
-rw-r--r--tools/ahat/src/heapdump/AhatSnapshot.java17
-rw-r--r--tools/ahat/src/heapdump/Perflib.java91
-rw-r--r--tools/ahat/src/heapdump/Site.java38
-rw-r--r--tools/ahat/src/heapdump/Size.java89
-rw-r--r--tools/ahat/src/heapdump/Sort.java21
-rw-r--r--tools/ahat/src/manifest.txt2
-rw-r--r--tools/ahat/test-dump/Main.java6
-rw-r--r--tools/ahat/test/InstanceTest.java5
-rw-r--r--tools/ahat/test/NativeAllocationTest.java37
-rw-r--r--tools/ahat/test/Tests.java1
73 files changed, 1360 insertions, 832 deletions
diff --git a/Android.mk b/Android.mk
index 803ba502cb..c0935a72bc 100644
--- a/Android.mk
+++ b/Android.mk
@@ -87,11 +87,20 @@ ART_HOST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
$(ART_HOST_DEX_DEPENDENCIES) \
$(ART_HOST_SHARED_LIBRARY_DEPENDENCIES)
+
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
+ART_HOST_DEPENDENCIES += $(ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES)
+endif
+
ART_TARGET_DEPENDENCIES := \
$(ART_TARGET_EXECUTABLES) \
$(ART_TARGET_DEX_DEPENDENCIES) \
$(ART_TARGET_SHARED_LIBRARY_DEPENDENCIES)
+ifeq ($(ART_BUILD_TARGET_DEBUG),true)
+ART_TARGET_DEPENDENCIES += $(ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES)
+endif
+
########################################################################
# test rules
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 6de5aefc0b..446611816a 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -97,14 +97,19 @@ ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_CORE_JARS),$(HOST_OUT_JAVA_LIB
ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
ART_CORE_SHARED_LIBRARIES := libjavacore libopenjdk libopenjdkjvm libopenjdkjvmti
+ART_CORE_SHARED_DEBUG_LIBRARIES := libopenjdkd libopenjdkjvmd libopenjdkjvmtid
ART_HOST_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(ART_HOST_OUT_SHARED_LIBRARIES)/$(lib)$(ART_HOST_SHLIB_EXTENSION))
+ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(ART_HOST_OUT_SHARED_LIBRARIES)/$(lib)$(ART_HOST_SHLIB_EXTENSION))
ifdef HOST_2ND_ARCH
ART_HOST_SHARED_LIBRARY_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(2ND_HOST_OUT_SHARED_LIBRARIES)/$(lib).so)
+ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(2ND_HOST_OUT_SHARED_LIBRARIES)/$(lib).so)
endif
ART_TARGET_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
+ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
ifdef TARGET_2ND_ARCH
ART_TARGET_SHARED_LIBRARY_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
+ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
endif
ART_CORE_DEBUGGABLE_EXECUTABLES := \
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c2d792d352..70c3f6098a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2238,7 +2238,7 @@ class InitializeClassVisitor : public CompilationVisitor {
public:
explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ void Visit(size_t class_def_index) OVERRIDE {
ATRACE_CALL();
jobject jclass_loader = manager_->GetClassLoader();
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2253,89 +2253,123 @@ class InitializeClassVisitor : public CompilationVisitor {
Handle<mirror::Class> klass(
hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
- // Only try to initialize classes that were successfully verified.
- if (klass->IsVerified()) {
- // Attempt to initialize the class but bail if we either need to initialize the super-class
- // or static fields.
- manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
- if (!klass->IsInitialized()) {
- // We don't want non-trivial class initialization occurring on multiple threads due to
- // deadlock problems. For example, a parent class is initialized (holding its lock) that
- // refers to a sub-class in its static/class initializer causing it to try to acquire the
- // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock)
- // after first initializing its parents, whose locks are acquired. This leads to a
- // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
- // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
- // than use a special Object for the purpose we use the Class of java.lang.Class.
- Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
- ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
- // Attempt to initialize allowing initialization of parent classes but still not static
- // fields.
+ if (klass != nullptr && !SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) {
+ TryInitializeClass(klass, class_loader);
+ }
+ // Clear any class not found or verification exceptions.
+ soa.Self()->ClearException();
+ }
+
+ // A helper function for initializing klass.
+ void TryInitializeClass(Handle<mirror::Class> klass, Handle<mirror::ClassLoader>& class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const DexFile& dex_file = klass->GetDexFile();
+ const DexFile::ClassDef* class_def = klass->GetClassDef();
+ const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def->class_idx_);
+ const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ StackHandleScope<3> hs(soa.Self());
+
+ mirror::Class::Status old_status = klass->GetStatus();;
+ // Only try to initialize classes that were successfully verified.
+ if (klass->IsVerified()) {
+ // Attempt to initialize the class but bail if we either need to initialize the super-class
+ // or static fields.
+ manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
+ old_status = klass->GetStatus();
+ if (!klass->IsInitialized()) {
+ // We don't want non-trivial class initialization occurring on multiple threads due to
+ // deadlock problems. For example, a parent class is initialized (holding its lock) that
+ // refers to a sub-class in its static/class initializer causing it to try to acquire the
+ // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock)
+ // after first initializing its parents, whose locks are acquired. This leads to a
+ // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
+ // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
+ // than use a special Object for the purpose we use the Class of java.lang.Class.
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
+ ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
+ // Attempt to initialize allowing initialization of parent classes but still not static
+ // fields.
+ bool is_superclass_initialized = InitializeDependencies(klass, class_loader, soa.Self());
+ if (is_superclass_initialized) {
manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
- if (!klass->IsInitialized()) {
+ }
+ old_status = klass->GetStatus();
+ // If superclass cannot be initialized, no need to proceed.
+ if (!klass->IsInitialized() &&
+ is_superclass_initialized &&
+ manager_->GetCompiler()->IsImageClass(descriptor)) {
+ bool can_init_static_fields = false;
+ if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage()) {
// We need to initialize static fields, we only do this for image classes that aren't
// marked with the $NoPreloadHolder (which implies this should not be initialized early).
- bool can_init_static_fields =
- manager_->GetCompiler()->GetCompilerOptions().IsBootImage() &&
- manager_->GetCompiler()->IsImageClass(descriptor) &&
- !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
- if (can_init_static_fields) {
- VLOG(compiler) << "Initializing: " << descriptor;
- // TODO multithreading support. We should ensure the current compilation thread has
- // exclusive access to the runtime and the transaction. To achieve this, we could use
- // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
- // checks in Thread::AssertThreadSuspensionIsAllowable.
- Runtime* const runtime = Runtime::Current();
- Transaction transaction;
-
- // Run the class initializer in transaction mode.
- runtime->EnterTransactionMode(&transaction);
- const mirror::Class::Status old_status = klass->GetStatus();
- bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
- true);
- // TODO we detach transaction from runtime to indicate we quit the transactional
- // mode which prevents the GC from visiting objects modified during the transaction.
- // Ensure GC is not run so don't access freed objects when aborting transaction.
-
- {
- ScopedAssertNoThreadSuspension ants("Transaction end");
- runtime->ExitTransactionMode();
-
- if (!success) {
- CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException();
- VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
- << exception->Dump();
- std::ostream* file_log = manager_->GetCompiler()->
- GetCompilerOptions().GetInitFailureOutput();
- if (file_log != nullptr) {
- *file_log << descriptor << "\n";
- *file_log << exception->Dump() << "\n";
- }
- soa.Self()->ClearException();
- transaction.Rollback();
- CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
+ can_init_static_fields = !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
+ } else {
+ can_init_static_fields = manager_->GetCompiler()->GetCompilerOptions().IsAppImage() &&
+ !soa.Self()->IsExceptionPending() &&
+ NoClinitInDependency(klass, soa.Self(), &class_loader);
+ // TODO The checking for clinit can be removed since it's already
+ // checked when init superclass. Currently keep it because it contains
+ // processing of intern strings. Will be removed later when intern strings
+ // and clinit are both initialized.
+ }
+
+ if (can_init_static_fields) {
+ VLOG(compiler) << "Initializing: " << descriptor;
+ // TODO multithreading support. We should ensure the current compilation thread has
+ // exclusive access to the runtime and the transaction. To achieve this, we could use
+ // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
+ // checks in Thread::AssertThreadSuspensionIsAllowable.
+ Runtime* const runtime = Runtime::Current();
+ Transaction transaction;
+
+ // Run the class initializer in transaction mode.
+ runtime->EnterTransactionMode(&transaction);
+ bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
+ true);
+ // TODO we detach transaction from runtime to indicate we quit the transactional
+ // mode which prevents the GC from visiting objects modified during the transaction.
+ // Ensure GC is not run so don't access freed objects when aborting transaction.
+
+ {
+ ScopedAssertNoThreadSuspension ants("Transaction end");
+ runtime->ExitTransactionMode();
+
+ if (!success) {
+ CHECK(soa.Self()->IsExceptionPending());
+ mirror::Throwable* exception = soa.Self()->GetException();
+ VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
+ << exception->Dump();
+ std::ostream* file_log = manager_->GetCompiler()->
+ GetCompilerOptions().GetInitFailureOutput();
+ if (file_log != nullptr) {
+ *file_log << descriptor << "\n";
+ *file_log << exception->Dump() << "\n";
}
+ soa.Self()->ClearException();
+ transaction.Rollback();
+ CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
}
+ }
- if (!success) {
- // On failure, still intern strings of static fields and seen in <clinit>, as these
- // will be created in the zygote. This is separated from the transaction code just
- // above as we will allocate strings, so must be allowed to suspend.
+ if (!success) {
+ // On failure, still intern strings of static fields and seen in <clinit>, as these
+ // will be created in the zygote. This is separated from the transaction code just
+ // above as we will allocate strings, so must be allowed to suspend.
+ if (&klass->GetDexFile() == manager_->GetDexFile()) {
InternStrings(klass, class_loader);
}
}
}
- soa.Self()->AssertNoPendingException();
}
+ soa.Self()->AssertNoPendingException();
}
- // Record the final class status if necessary.
- ClassReference ref(manager_->GetDexFile(), class_def_index);
- manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
}
- // Clear any class not found or verification exceptions.
- soa.Self()->ClearException();
+ // Record the final class status if necessary.
+ ClassReference ref(&dex_file, klass->GetDexClassDefIndex());
+ // Back up the status before doing initialization for static encoded fields,
+ // because the static encoded branch wants to keep the status to uninitialized.
+ manager_->GetCompiler()->RecordClassStatus(ref, old_status);
}
private:
@@ -2390,6 +2424,160 @@ class InitializeClassVisitor : public CompilationVisitor {
}
}
+ bool NoPotentialInternStrings(Handle<mirror::Class> klass,
+ Handle<mirror::ClassLoader>* class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> h_dex_cache = hs.NewHandle(klass->GetDexCache());
+ const DexFile* dex_file = h_dex_cache->GetDexFile();
+ const DexFile::ClassDef* class_def = klass->GetClassDef();
+ annotations::RuntimeEncodedStaticFieldValueIterator value_it(*dex_file,
+ &h_dex_cache,
+ class_loader,
+ manager_->GetClassLinker(),
+ *class_def);
+
+ const auto jString = annotations::RuntimeEncodedStaticFieldValueIterator::kString;
+ for ( ; value_it.HasNext(); value_it.Next()) {
+ if (value_it.GetValueType() == jString) {
+ // We don't want cache the static encoded strings which is a potential intern.
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool ResolveTypesOfMethods(Thread* self, ArtMethod* m)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto rtn_type = m->GetReturnType(true); // return value is discarded because resolve will be done internally.
+ if (rtn_type == nullptr) {
+ self->ClearException();
+ return false;
+ }
+ const DexFile::TypeList* types = m->GetParameterTypeList();
+ if (types != nullptr) {
+ for (uint32_t i = 0; i < types->Size(); ++i) {
+ dex::TypeIndex param_type_idx = types->GetTypeItem(i).type_idx_;
+ auto param_type = m->GetClassFromTypeIndex(param_type_idx, true);
+ if (param_type == nullptr) {
+ self->ClearException();
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ // Pre resolve types mentioned in all method signatures before start a transaction
+ // since ResolveType doesn't work in transaction mode.
+ bool PreResolveTypes(Thread* self, const Handle<mirror::Class>& klass)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ PointerSize pointer_size = manager_->GetClassLinker()->GetImagePointerSize();
+ for (ArtMethod& m : klass->GetMethods(pointer_size)) {
+ if (!ResolveTypesOfMethods(self, &m)) {
+ return false;
+ }
+ }
+ if (klass->IsInterface()) {
+ return true;
+ } else if (klass->HasSuperClass()) {
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Class> super_klass(hs.NewHandle<mirror::Class>(klass->GetSuperClass()));
+ for (int i = super_klass->GetVTableLength() - 1; i >= 0; --i) {
+ ArtMethod* m = klass->GetVTableEntry(i, pointer_size);
+ ArtMethod* super_m = super_klass->GetVTableEntry(i, pointer_size);
+ if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
+ return false;
+ }
+ }
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ super_klass.Assign(klass->GetIfTable()->GetInterface(i));
+ if (klass->GetClassLoader() != super_klass->GetClassLoader()) {
+ uint32_t num_methods = super_klass->NumVirtualMethods();
+ for (uint32_t j = 0; j < num_methods; ++j) {
+ ArtMethod* m = klass->GetIfTable()->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>(
+ j, pointer_size);
+ ArtMethod* super_m = super_klass->GetVirtualMethod(j, pointer_size);
+ if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ // Initialize the klass's dependencies recursively before initializing itself.
+ // Checking for interfaces is also necessary since interfaces can contain
+ // both default methods and static encoded fields.
+ bool InitializeDependencies(const Handle<mirror::Class>& klass,
+ Handle<mirror::ClassLoader> class_loader,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (klass->HasSuperClass()) {
+ ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
+ if (!handle_scope_super->IsInitialized()) {
+ this->TryInitializeClass(handle_scope_super, class_loader);
+ if (!handle_scope_super->IsInitialized()) {
+ return false;
+ }
+ }
+ }
+
+ uint32_t num_if = klass->NumDirectInterfaces();
+ for (size_t i = 0; i < num_if; i++) {
+ ObjPtr<mirror::Class>
+ interface = mirror::Class::GetDirectInterface(self, klass.Get(), i);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_interface(hs.NewHandle(interface));
+
+ TryInitializeClass(handle_interface, class_loader);
+
+ if (!handle_interface->IsInitialized()) {
+ return false;
+ }
+ }
+
+ return PreResolveTypes(self, klass);
+ }
+
+ // In this phase the classes containing class initializers are ignored. Make sure no
+ // clinit appears in kalss's super class chain and interfaces.
+ bool NoClinitInDependency(const Handle<mirror::Class>& klass,
+ Thread* self,
+ Handle<mirror::ClassLoader>* class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* clinit =
+ klass->FindClassInitializer(manager_->GetClassLinker()->GetImagePointerSize());
+ if (clinit != nullptr) {
+ VLOG(compiler) << klass->PrettyClass() << ' ' << clinit->PrettyMethod(true);
+ return false;
+ }
+ if (klass->HasSuperClass()) {
+ ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
+ if (!NoClinitInDependency(handle_scope_super, self, class_loader))
+ return false;
+ }
+
+ uint32_t num_if = klass->NumDirectInterfaces();
+ for (size_t i = 0; i < num_if; i++) {
+ ObjPtr<mirror::Class>
+ interface = mirror::Class::GetDirectInterface(self, klass.Get(), i);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_interface(hs.NewHandle(interface));
+ if (!NoClinitInDependency(handle_interface, self, class_loader))
+ return false;
+ }
+
+ return NoPotentialInternStrings(klass, class_loader);
+ }
+
const ParallelCompilationManager* const manager_;
};
@@ -2409,7 +2597,10 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader,
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, dex_files,
init_thread_pool);
- if (GetCompilerOptions().IsBootImage()) {
+
+ if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsAppImage()) {
+ // Set the concurrency thread to 1 to support initialization for App Images since transaction
+ // doesn't support multithreading now.
// TODO: remove this when transactional mode supports multithreading.
init_thread_count = 1U;
}
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index e778f75551..66b70ade2e 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -31,7 +31,7 @@
#include "os.h"
#include "safe_map.h"
#include "string_reference.h"
-#include "utils/type_reference.h"
+#include "type_reference.h"
namespace art {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 9ef692aaf0..c2b2ebfade 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -33,8 +33,8 @@
#include "read_barrier_option.h"
#include "stack_map_stream.h"
#include "string_reference.h"
+#include "type_reference.h"
#include "utils/label.h"
-#include "utils/type_reference.h"
namespace art {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index fa1c14dcda..2409a4d38d 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -24,8 +24,8 @@
#include "nodes.h"
#include "string_reference.h"
#include "parallel_move_resolver.h"
+#include "type_reference.h"
#include "utils/arm/assembler_thumb2.h"
-#include "utils/type_reference.h"
namespace art {
namespace arm {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 71e221da22..7a4b3d4805 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -25,8 +25,8 @@
#include "nodes.h"
#include "parallel_move_resolver.h"
#include "string_reference.h"
+#include "type_reference.h"
#include "utils/arm64/assembler_arm64.h"
-#include "utils/type_reference.h"
// TODO(VIXL): Make VIXL compile with -Wshadow.
#pragma GCC diagnostic push
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 91e9a3edc4..ef809510ad 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -24,8 +24,8 @@
#include "nodes.h"
#include "string_reference.h"
#include "parallel_move_resolver.h"
+#include "type_reference.h"
#include "utils/arm/assembler_arm_vixl.h"
-#include "utils/type_reference.h"
// TODO(VIXL): make vixl clean wrt -Wshadow.
#pragma GCC diagnostic push
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index ff1fde6489..736b5070d9 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -23,8 +23,8 @@
#include "nodes.h"
#include "parallel_move_resolver.h"
#include "string_reference.h"
+#include "type_reference.h"
#include "utils/mips/assembler_mips.h"
-#include "utils/type_reference.h"
namespace art {
namespace mips {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index f49ad49fce..8405040386 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -21,8 +21,8 @@
#include "driver/compiler_options.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
+#include "type_reference.h"
#include "utils/mips64/assembler_mips64.h"
-#include "utils/type_reference.h"
namespace art {
namespace mips64 {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 4284c689e7..f203d7f47e 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -470,6 +470,33 @@ static Handle<mirror::ObjectArray<mirror::Class>> AllocateInlineCacheHolder(
return inline_cache;
}
+bool HInliner::UseOnlyPolymorphicInliningWithNoDeopt() {
+ // If we are compiling AOT or OSR, pretend the call using inline caches is polymorphic and
+ // do not generate a deopt.
+ //
+ // For AOT:
+ // Generating a deopt does not ensure that we will actually capture the new types;
+ // and the danger is that we could be stuck in a loop with "forever" deoptimizations.
+ // Take for example the following scenario:
+ // - we capture the inline cache in one run
+ // - the next run, we deoptimize because we miss a type check, but the method
+ // never becomes hot again
+ // In this case, the inline cache will not be updated in the profile and the AOT code
+ // will keep deoptimizing.
+ // Another scenario is if we use profile compilation for a process which is not allowed
+ // to JIT (e.g. system server). If we deoptimize we will run interpreted code for the
+ // rest of the lifetime.
+ // TODO(calin):
+ // This is a compromise because we will most likely never update the inline cache
+ // in the profile (unless there's another reason to deopt). So we might be stuck with
+ // a sub-optimal inline cache.
+ // We could be smarter when capturing inline caches to mitigate this.
+ // (e.g. by having different thresholds for new and old methods).
+ //
+ // For OSR:
+ // We may come from the interpreter and it may have seen different receiver types.
+ return Runtime::Current()->IsAotCompiler() || outermost_graph_->IsCompilingOsr();
+}
bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
HInvoke* invoke_instruction,
ArtMethod* resolved_method)
@@ -503,9 +530,7 @@ bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
case kInlineCacheMonomorphic: {
MaybeRecordStat(kMonomorphicCall);
- if (outermost_graph_->IsCompilingOsr()) {
- // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
- // interpreter and it may have seen different receiver types.
+ if (UseOnlyPolymorphicInliningWithNoDeopt()) {
return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
} else {
return TryInlineMonomorphicCall(invoke_instruction, resolved_method, inline_cache);
@@ -578,7 +603,6 @@ HInliner::InlineCacheType HInliner::GetInlineCacheAOT(
return kInlineCacheNoData;
}
- // Use the profile arena when extracting the method info.
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> offline_profile =
pci->GetMethod(caller_dex_file.GetLocation(),
caller_dex_file.GetLocationChecksum(),
@@ -603,8 +627,8 @@ HInliner::InlineCacheType HInliner::ExtractClassesFromOfflineProfile(
const ProfileCompilationInfo::OfflineProfileMethodInfo& offline_profile,
/*out*/Handle<mirror::ObjectArray<mirror::Class>> inline_cache)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const auto it = offline_profile.inline_caches.find(invoke_instruction->GetDexPc());
- if (it == offline_profile.inline_caches.end()) {
+ const auto it = offline_profile.inline_caches->find(invoke_instruction->GetDexPc());
+ if (it == offline_profile.inline_caches->end()) {
return kInlineCacheUninitialized;
}
@@ -926,14 +950,11 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
// If we have inlined all targets before, and this receiver is the last seen,
// we deoptimize instead of keeping the original invoke instruction.
- bool deoptimize = all_targets_inlined &&
+ bool deoptimize = !UseOnlyPolymorphicInliningWithNoDeopt() &&
+ all_targets_inlined &&
(i != InlineCache::kIndividualCacheSize - 1) &&
(classes->Get(i + 1) == nullptr);
- if (outermost_graph_->IsCompilingOsr()) {
- // We do not support HDeoptimize in OSR methods.
- deoptimize = false;
- }
HInstruction* compare = AddTypeGuard(receiver,
cursor,
bb_cursor,
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 9e4685cbf4..67476b6956 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -180,6 +180,9 @@ class HInliner : public HOptimization {
Handle<mirror::ObjectArray<mirror::Class>> classes)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns whether or not we should use only polymorphic inlining with no deoptimizations.
+ bool UseOnlyPolymorphicInliningWithNoDeopt();
+
// Try CHA-based devirtualization to change virtual method calls into
// direct calls.
// Returns the actual method that resolved_method can be devirtualized to.
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 8bdc576c13..a35b199346 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2130,9 +2130,10 @@ class Dex2Oat FINAL {
bool LoadProfile() {
DCHECK(UseProfile());
- // TODO(calin): We should be using the runtime arena pool (instead of the default profile arean).
- // However the setup logic is messy and needs cleaning up before that (e.g. the oat writers are
- // created before the runtime).
+ // TODO(calin): We should be using the runtime arena pool (instead of the
+ // default profile arena). However the setup logic is messy and needs
+ // cleaning up before that (e.g. the oat writers are created before the
+ // runtime).
profile_compilation_info_.reset(new ProfileCompilationInfo());
ScopedFlock flock;
bool success = true;
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index b8366323d6..41b9f99207 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -32,7 +32,7 @@ namespace art {
class ProfileAssistantTest : public CommonRuntimeTest {
public:
- virtual void PostRuntimeCreate() {
+ void PostRuntimeCreate() OVERRIDE {
arena_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
@@ -72,10 +72,18 @@ class ProfileAssistantTest : public CommonRuntimeTest {
ASSERT_TRUE(profile.GetFile()->ResetOffset());
}
+ // Creates an inline cache which will be destructed at the end of the test.
+ ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
+ used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
+ std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ return used_inline_caches.back().get();
+ }
+
ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo(
const std::string& dex_location1, uint32_t dex_checksum1,
const std::string& dex_location2, uint32_t dex_checksum2) {
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi(arena_.get());
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back(dex_location1, dex_checksum1);
pmi.dex_references.emplace_back(dex_location2, dex_checksum2);
@@ -83,7 +91,7 @@ class ProfileAssistantTest : public CommonRuntimeTest {
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Polymorphic
for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
@@ -91,19 +99,19 @@ class ProfileAssistantTest : public CommonRuntimeTest {
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMegamorphic();
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Missing types
for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMissingTypes();
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
return pmi;
@@ -258,8 +266,8 @@ class ProfileAssistantTest : public CommonRuntimeTest {
method->GetDexFile()->GetLocationChecksum(),
method->GetDexMethodIndex());
ASSERT_TRUE(pmi != nullptr);
- ASSERT_EQ(pmi->inline_caches.size(), 1u);
- ProfileCompilationInfo::DexPcData dex_pc_data = pmi->inline_caches.begin()->second;
+ ASSERT_EQ(pmi->inline_caches->size(), 1u);
+ const ProfileCompilationInfo::DexPcData& dex_pc_data = pmi->inline_caches->begin()->second;
ASSERT_EQ(dex_pc_data.is_megamorphic, is_megamorphic);
ASSERT_EQ(dex_pc_data.is_missing_types, is_missing_types);
@@ -280,6 +288,11 @@ class ProfileAssistantTest : public CommonRuntimeTest {
}
std::unique_ptr<ArenaAllocator> arena_;
+
+ // Cache of inline caches generated during tests.
+ // This makes it easier to pass data between different utilities and ensure that
+ // caches are destructed at the end of the test.
+ std::vector<std::unique_ptr<ProfileCompilationInfo::InlineCacheMap>> used_inline_caches;
};
TEST_F(ProfileAssistantTest, AdviseCompilationEmptyReferences) {
@@ -702,7 +715,7 @@ TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) {
no_inline_cache->GetDexFile()->GetLocationChecksum(),
no_inline_cache->GetDexMethodIndex());
ASSERT_TRUE(pmi_no_inline_cache != nullptr);
- ASSERT_TRUE(pmi_no_inline_cache->inline_caches.empty());
+ ASSERT_TRUE(pmi_no_inline_cache->inline_caches->empty());
}
}
diff --git a/profman/profman.cc b/profman/profman.cc
index 26e7e46033..e565171265 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -39,10 +39,11 @@
#include "bytecode_utils.h"
#include "dex_file.h"
#include "jit/profile_compilation_info.h"
+#include "profile_assistant.h"
#include "runtime.h"
+#include "type_reference.h"
#include "utils.h"
#include "zip_archive.h"
-#include "profile_assistant.h"
namespace art {
@@ -423,16 +424,13 @@ class ProfMan FINAL {
}
for (const std::unique_ptr<const DexFile>& dex_file : *dex_files) {
std::set<dex::TypeIndex> class_types;
- ProfileCompilationInfo::MethodMap methods(std::less<uint16_t>(),
- profile_info.GetArena()->Adapter());
- if (profile_info.GetClassesAndMethods(dex_file.get(), &class_types, &methods)) {
+ std::set<uint16_t> methods;
+ if (profile_info.GetClassesAndMethods(*dex_file.get(), &class_types, &methods)) {
for (const dex::TypeIndex& type_index : class_types) {
const DexFile::TypeId& type_id = dex_file->GetTypeId(type_index);
out_lines->insert(std::string(dex_file->GetTypeDescriptor(type_id)));
}
- for (const auto& pair : methods) {
- // TODO: Process inline caches.
- const uint16_t dex_method_idx = pair.first;
+ for (uint16_t dex_method_idx : methods) {
const DexFile::MethodId& id = dex_file->GetMethodId(dex_method_idx);
std::string signature_string(dex_file->GetMethodSignature(id).ToString());
std::string type_string(dex_file->GetTypeDescriptor(dex_file->GetTypeId(id.class_idx_)));
@@ -563,7 +561,7 @@ class ProfMan FINAL {
// Return true if the definition of the class was found in any of the dex_files.
bool FindClass(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
const std::string& klass_descriptor,
- /*out*/ProfileMethodInfo::ProfileClassReference* class_ref) {
+ /*out*/TypeReference* class_ref) {
for (const std::unique_ptr<const DexFile>& dex_file_ptr : dex_files) {
const DexFile* dex_file = dex_file_ptr.get();
const DexFile::TypeId* type_id = dex_file->FindTypeId(klass_descriptor.c_str());
@@ -583,8 +581,7 @@ class ProfMan FINAL {
}
// Find the method specified by method_spec in the class class_ref.
- uint32_t FindMethodIndex(const ProfileMethodInfo::ProfileClassReference& class_ref,
- const std::string& method_spec) {
+ uint32_t FindMethodIndex(const TypeReference& class_ref, const std::string& method_spec) {
std::vector<std::string> name_and_signature;
Split(method_spec, kProfileParsingFirstCharInSignature, &name_and_signature);
if (name_and_signature.size() != 2) {
@@ -626,7 +623,7 @@ class ProfMan FINAL {
// The format of the method spec is "inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;".
//
// TODO(calin): support INVOKE_INTERFACE and the range variants.
- bool HasSingleInvoke(const ProfileMethodInfo::ProfileClassReference& class_ref,
+ bool HasSingleInvoke(const TypeReference& class_ref,
uint16_t method_index,
/*out*/uint32_t* dex_pc) {
const DexFile* dex_file = class_ref.dex_file;
@@ -675,7 +672,7 @@ class ProfMan FINAL {
method_str = line.substr(method_sep_index + kMethodSep.size());
}
- ProfileMethodInfo::ProfileClassReference class_ref;
+ TypeReference class_ref;
if (!FindClass(dex_files, klass, &class_ref)) {
LOG(WARNING) << "Could not find class: " << klass;
return false;
@@ -746,7 +743,7 @@ class ProfMan FINAL {
if (!HasSingleInvoke(class_ref, method_index, &dex_pc)) {
return false;
}
- std::vector<ProfileMethodInfo::ProfileClassReference> classes(inline_cache_elems.size());
+ std::vector<TypeReference> classes(inline_cache_elems.size());
size_t class_it = 0;
for (const std::string& ic_class : inline_cache_elems) {
if (!FindClass(dex_files, ic_class, &(classes[class_it++]))) {
diff --git a/runtime/cha.cc b/runtime/cha.cc
index 7948c29e5d..e6bdb84d4c 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -31,34 +31,24 @@ namespace art {
void ClassHierarchyAnalysis::AddDependency(ArtMethod* method,
ArtMethod* dependent_method,
OatQuickMethodHeader* dependent_header) {
- auto it = cha_dependency_map_.find(method);
- if (it == cha_dependency_map_.end()) {
- cha_dependency_map_[method] =
- new std::vector<std::pair<art::ArtMethod*, art::OatQuickMethodHeader*>>();
- it = cha_dependency_map_.find(method);
- } else {
- DCHECK(it->second != nullptr);
- }
- it->second->push_back(std::make_pair(dependent_method, dependent_header));
+ const auto it = cha_dependency_map_.insert(
+ decltype(cha_dependency_map_)::value_type(method, ListOfDependentPairs())).first;
+ it->second.push_back({dependent_method, dependent_header});
}
-std::vector<std::pair<ArtMethod*, OatQuickMethodHeader*>>*
- ClassHierarchyAnalysis::GetDependents(ArtMethod* method) {
+static const ClassHierarchyAnalysis::ListOfDependentPairs s_empty_vector;
+
+const ClassHierarchyAnalysis::ListOfDependentPairs& ClassHierarchyAnalysis::GetDependents(
+ ArtMethod* method) {
auto it = cha_dependency_map_.find(method);
if (it != cha_dependency_map_.end()) {
- DCHECK(it->second != nullptr);
return it->second;
}
- return nullptr;
+ return s_empty_vector;
}
-void ClassHierarchyAnalysis::RemoveDependencyFor(ArtMethod* method) {
- auto it = cha_dependency_map_.find(method);
- if (it != cha_dependency_map_.end()) {
- auto dependents = it->second;
- cha_dependency_map_.erase(it);
- delete dependents;
- }
+void ClassHierarchyAnalysis::RemoveAllDependenciesFor(ArtMethod* method) {
+ cha_dependency_map_.erase(method);
}
void ClassHierarchyAnalysis::RemoveDependentsWithMethodHeaders(
@@ -66,20 +56,19 @@ void ClassHierarchyAnalysis::RemoveDependentsWithMethodHeaders(
// Iterate through all entries in the dependency map and remove any entry that
// contains one of those in method_headers.
for (auto map_it = cha_dependency_map_.begin(); map_it != cha_dependency_map_.end(); ) {
- auto dependents = map_it->second;
- for (auto vec_it = dependents->begin(); vec_it != dependents->end(); ) {
- OatQuickMethodHeader* method_header = vec_it->second;
- auto it = std::find(method_headers.begin(), method_headers.end(), method_header);
- if (it != method_headers.end()) {
- vec_it = dependents->erase(vec_it);
- } else {
- vec_it++;
- }
- }
+ ListOfDependentPairs& dependents = map_it->second;
+ dependents.erase(
+ std::remove_if(
+ dependents.begin(),
+ dependents.end(),
+ [&method_headers](MethodAndMethodHeaderPair& dependent) {
+ return method_headers.find(dependent.second) != method_headers.end();
+ }),
+ dependents.end());
+
// Remove the map entry if there are no more dependents.
- if (dependents->empty()) {
+ if (dependents.empty()) {
map_it = cha_dependency_map_.erase(map_it);
- delete dependents;
} else {
map_it++;
}
@@ -554,11 +543,7 @@ void ClassHierarchyAnalysis::InvalidateSingleImplementationMethods(
}
// Invalidate all dependents.
- auto dependents = GetDependents(invalidated);
- if (dependents == nullptr) {
- continue;
- }
- for (const auto& dependent : *dependents) {
+ for (const auto& dependent : GetDependents(invalidated)) {
ArtMethod* method = dependent.first;;
OatQuickMethodHeader* method_header = dependent.second;
VLOG(class_linker) << "CHA invalidated compiled code for " << method->PrettyMethod();
@@ -567,7 +552,7 @@ void ClassHierarchyAnalysis::InvalidateSingleImplementationMethods(
method, method_header);
dependent_method_headers.insert(method_header);
}
- RemoveDependencyFor(invalidated);
+ RemoveAllDependenciesFor(invalidated);
}
}
diff --git a/runtime/cha.h b/runtime/cha.h
index 99c49d2bca..d9692a684e 100644
--- a/runtime/cha.h
+++ b/runtime/cha.h
@@ -94,12 +94,11 @@ class ClassHierarchyAnalysis {
OatQuickMethodHeader* dependent_header) REQUIRES(Locks::cha_lock_);
// Return compiled code that assumes that `method` has single-implementation.
- std::vector<MethodAndMethodHeaderPair>* GetDependents(ArtMethod* method)
- REQUIRES(Locks::cha_lock_);
+ const ListOfDependentPairs& GetDependents(ArtMethod* method) REQUIRES(Locks::cha_lock_);
// Remove dependency tracking for compiled code that assumes that
// `method` has single-implementation.
- void RemoveDependencyFor(ArtMethod* method) REQUIRES(Locks::cha_lock_);
+ void RemoveAllDependenciesFor(ArtMethod* method) REQUIRES(Locks::cha_lock_);
// Remove from cha_dependency_map_ all entries that contain OatQuickMethodHeader from
// the given `method_headers` set.
@@ -158,7 +157,7 @@ class ClassHierarchyAnalysis {
// A map that maps a method to a set of compiled code that assumes that method has a
// single implementation, which is used to do CHA-based devirtualization.
- std::unordered_map<ArtMethod*, ListOfDependentPairs*> cha_dependency_map_
+ std::unordered_map<ArtMethod*, ListOfDependentPairs> cha_dependency_map_
GUARDED_BY(Locks::cha_lock_);
DISALLOW_COPY_AND_ASSIGN(ClassHierarchyAnalysis);
diff --git a/runtime/cha_test.cc b/runtime/cha_test.cc
index d2f335e951..c60720f4d3 100644
--- a/runtime/cha_test.cc
+++ b/runtime/cha_test.cc
@@ -36,58 +36,58 @@ TEST_F(CHATest, CHACheckDependency) {
ClassHierarchyAnalysis cha;
MutexLock cha_mu(Thread::Current(), *Locks::cha_lock_);
- ASSERT_EQ(cha.GetDependents(METHOD1), nullptr);
- ASSERT_EQ(cha.GetDependents(METHOD2), nullptr);
- ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ ASSERT_TRUE(cha.GetDependents(METHOD1).empty());
+ ASSERT_TRUE(cha.GetDependents(METHOD2).empty());
+ ASSERT_TRUE(cha.GetDependents(METHOD3).empty());
cha.AddDependency(METHOD1, METHOD2, METHOD_HEADER2);
- ASSERT_EQ(cha.GetDependents(METHOD2), nullptr);
- ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ ASSERT_TRUE(cha.GetDependents(METHOD2).empty());
+ ASSERT_TRUE(cha.GetDependents(METHOD3).empty());
auto dependents = cha.GetDependents(METHOD1);
- ASSERT_EQ(dependents->size(), 1u);
- ASSERT_EQ(dependents->at(0).first, METHOD2);
- ASSERT_EQ(dependents->at(0).second, METHOD_HEADER2);
+ ASSERT_EQ(dependents.size(), 1u);
+ ASSERT_EQ(dependents[0].first, METHOD2);
+ ASSERT_EQ(dependents[0].second, METHOD_HEADER2);
cha.AddDependency(METHOD1, METHOD3, METHOD_HEADER3);
- ASSERT_EQ(cha.GetDependents(METHOD2), nullptr);
- ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ ASSERT_TRUE(cha.GetDependents(METHOD2).empty());
+ ASSERT_TRUE(cha.GetDependents(METHOD3).empty());
dependents = cha.GetDependents(METHOD1);
- ASSERT_EQ(dependents->size(), 2u);
- ASSERT_EQ(dependents->at(0).first, METHOD2);
- ASSERT_EQ(dependents->at(0).second, METHOD_HEADER2);
- ASSERT_EQ(dependents->at(1).first, METHOD3);
- ASSERT_EQ(dependents->at(1).second, METHOD_HEADER3);
+ ASSERT_EQ(dependents.size(), 2u);
+ ASSERT_EQ(dependents[0].first, METHOD2);
+ ASSERT_EQ(dependents[0].second, METHOD_HEADER2);
+ ASSERT_EQ(dependents[1].first, METHOD3);
+ ASSERT_EQ(dependents[1].second, METHOD_HEADER3);
std::unordered_set<OatQuickMethodHeader*> headers;
headers.insert(METHOD_HEADER2);
cha.RemoveDependentsWithMethodHeaders(headers);
- ASSERT_EQ(cha.GetDependents(METHOD2), nullptr);
- ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ ASSERT_TRUE(cha.GetDependents(METHOD2).empty());
+ ASSERT_TRUE(cha.GetDependents(METHOD3).empty());
dependents = cha.GetDependents(METHOD1);
- ASSERT_EQ(dependents->size(), 1u);
- ASSERT_EQ(dependents->at(0).first, METHOD3);
- ASSERT_EQ(dependents->at(0).second, METHOD_HEADER3);
+ ASSERT_EQ(dependents.size(), 1u);
+ ASSERT_EQ(dependents[0].first, METHOD3);
+ ASSERT_EQ(dependents[0].second, METHOD_HEADER3);
cha.AddDependency(METHOD2, METHOD1, METHOD_HEADER1);
- ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ ASSERT_TRUE(cha.GetDependents(METHOD3).empty());
dependents = cha.GetDependents(METHOD1);
- ASSERT_EQ(dependents->size(), 1u);
+ ASSERT_EQ(dependents.size(), 1u);
dependents = cha.GetDependents(METHOD2);
- ASSERT_EQ(dependents->size(), 1u);
+ ASSERT_EQ(dependents.size(), 1u);
headers.insert(METHOD_HEADER3);
cha.RemoveDependentsWithMethodHeaders(headers);
- ASSERT_EQ(cha.GetDependents(METHOD1), nullptr);
- ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ ASSERT_TRUE(cha.GetDependents(METHOD1).empty());
+ ASSERT_TRUE(cha.GetDependents(METHOD3).empty());
dependents = cha.GetDependents(METHOD2);
- ASSERT_EQ(dependents->size(), 1u);
- ASSERT_EQ(dependents->at(0).first, METHOD1);
- ASSERT_EQ(dependents->at(0).second, METHOD_HEADER1);
+ ASSERT_EQ(dependents.size(), 1u);
+ ASSERT_EQ(dependents[0].first, METHOD1);
+ ASSERT_EQ(dependents[0].second, METHOD_HEADER1);
- cha.RemoveDependencyFor(METHOD2);
- ASSERT_EQ(cha.GetDependents(METHOD1), nullptr);
- ASSERT_EQ(cha.GetDependents(METHOD2), nullptr);
- ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ cha.RemoveAllDependenciesFor(METHOD2);
+ ASSERT_TRUE(cha.GetDependents(METHOD1).empty());
+ ASSERT_TRUE(cha.GetDependents(METHOD2).empty());
+ ASSERT_TRUE(cha.GetDependents(METHOD3).empty());
}
} // namespace art
diff --git a/runtime/interpreter/mterp/mips/op_double_to_int.S b/runtime/interpreter/mterp/mips/op_double_to_int.S
index 3b44964333..6d7c6cae61 100644
--- a/runtime/interpreter/mterp/mips/op_double_to_int.S
+++ b/runtime/interpreter/mterp/mips/op_double_to_int.S
@@ -3,7 +3,8 @@
*
* We have to clip values to int min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -11,29 +12,20 @@
EAS2(a3, rFP, a3) # a3 <- &fp[B]
LOAD64_F(fa0, fa0f, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
+#ifndef MIPS32REVGE6
li t0, INT_MIN_AS_DOUBLE_HIGH
mtc1 zero, fa1
MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- cmp.le.d ft0, fa1, fa0
- GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
- cmp.eq.d ft0, fa0, fa0
- selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-#else
c.ole.d fcc0, fa1, fa0
+#endif
GET_INST_OPCODE(t1) # extract opcode from rINST
+#ifndef MIPS32REVGE6
bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
c.eq.d fcc0, fa0, fa0
mtc1 zero, fa0
MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
movt.d fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-#endif
1:
+#endif
trunc.w.d fa0, fa0
SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_double_to_long.S b/runtime/interpreter/mterp/mips/op_double_to_long.S
index 78d4a8f5c7..459ab7eed0 100644
--- a/runtime/interpreter/mterp/mips/op_double_to_long.S
+++ b/runtime/interpreter/mterp/mips/op_double_to_long.S
@@ -3,7 +3,8 @@
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -13,19 +14,7 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, LONG_MIN_AS_DOUBLE_HIGH
- mtc1 zero, fa1
- mthc1 t0, fa1
- cmp.le.d ft0, fa1, fa0
GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
- cmp.eq.d ft0, fa0, fa0
- selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_DOUBLE : 0
-1:
trunc.l.d fa0, fa0
SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
#else
diff --git a/runtime/interpreter/mterp/mips/op_float_to_int.S b/runtime/interpreter/mterp/mips/op_float_to_int.S
index 087e50fe80..26a0988082 100644
--- a/runtime/interpreter/mterp/mips/op_float_to_int.S
+++ b/runtime/interpreter/mterp/mips/op_float_to_int.S
@@ -3,7 +3,8 @@
*
* We have to clip values to int min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -11,26 +12,18 @@
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+#ifndef MIPS32REVGE6
li t0, INT_MIN_AS_FLOAT
mtc1 t0, fa1
-#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- cmp.le.s ft0, fa1, fa0
- GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
- cmp.eq.s ft0, fa0, fa0
- selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-#else
c.ole.s fcc0, fa1, fa0
+#endif
GET_INST_OPCODE(t1) # extract opcode from rINST
+#ifndef MIPS32REVGE6
bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
c.eq.s fcc0, fa0, fa0
mtc1 zero, fa0
movt.s fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-#endif
1:
+#endif
trunc.w.s fa0, fa0
SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_float_to_long.S b/runtime/interpreter/mterp/mips/op_float_to_long.S
index dc88a78e7a..b8f8efbdcb 100644
--- a/runtime/interpreter/mterp/mips/op_float_to_long.S
+++ b/runtime/interpreter/mterp/mips/op_float_to_long.S
@@ -3,7 +3,8 @@
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -12,18 +13,7 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, fa1
- cmp.le.s ft0, fa1, fa0
GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
- cmp.eq.s ft0, fa0, fa0
- selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_FLOAT : 0
-1:
trunc.l.s fa0, fa0
SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
#else
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_int.S b/runtime/interpreter/mterp/mips64/op_double_to_int.S
index aa2cbcad38..d09952233c 100644
--- a/runtime/interpreter/mterp/mips64/op_double_to_int.S
+++ b/runtime/interpreter/mterp/mips64/op_double_to_int.S
@@ -1,23 +1,3 @@
%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- dli t0, INT_MIN_AS_DOUBLE
- dmtc1 t0, f1
- cmp.le.d f1, f1, f0
- bc1nez f1, .L${opcode}_trunc
- cmp.eq.d f1, f0, f0
- li t0, INT_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .L${opcode}_done
-%break
-.L${opcode}_trunc:
trunc.w.d f0, f0
- mfc1 t0, f0
-.L${opcode}_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG t0, a1
- GOTO_OPCODE v0 # jump to next instruction
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_long.S b/runtime/interpreter/mterp/mips64/op_double_to_long.S
index 777cfeb6c8..9b65da5602 100644
--- a/runtime/interpreter/mterp/mips64/op_double_to_long.S
+++ b/runtime/interpreter/mterp/mips64/op_double_to_long.S
@@ -1,23 +1,3 @@
%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- dli t0, LONG_MIN_AS_DOUBLE
- dmtc1 t0, f1
- cmp.le.d f1, f1, f0
- bc1nez f1, .L${opcode}_trunc
- cmp.eq.d f1, f0, f0
- dli t0, LONG_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .L${opcode}_done
-%break
-.L${opcode}_trunc:
trunc.l.d f0, f0
- dmfc1 t0, f0
-.L${opcode}_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE t0, a1
- GOTO_OPCODE v0 # jump to next instruction
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_int.S b/runtime/interpreter/mterp/mips64/op_float_to_int.S
index d957540a7b..2806973935 100644
--- a/runtime/interpreter/mterp/mips64/op_float_to_int.S
+++ b/runtime/interpreter/mterp/mips64/op_float_to_int.S
@@ -1,23 +1,3 @@
%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, INT_MIN_AS_FLOAT
- mtc1 t0, f1
- cmp.le.s f1, f1, f0
- bc1nez f1, .L${opcode}_trunc
- cmp.eq.s f1, f0, f0
- li t0, INT_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .L${opcode}_done
-%break
-.L${opcode}_trunc:
trunc.w.s f0, f0
- mfc1 t0, f0
-.L${opcode}_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG t0, a1
- GOTO_OPCODE v0 # jump to next instruction
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_long.S b/runtime/interpreter/mterp/mips64/op_float_to_long.S
index 5d036c8455..c40c8a6680 100644
--- a/runtime/interpreter/mterp/mips64/op_float_to_long.S
+++ b/runtime/interpreter/mterp/mips64/op_float_to_long.S
@@ -1,23 +1,3 @@
%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, f1
- cmp.le.s f1, f1, f0
- bc1nez f1, .L${opcode}_trunc
- cmp.eq.s f1, f0, f0
- dli t0, LONG_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .L${opcode}_done
-%break
-.L${opcode}_trunc:
trunc.l.s f0, f0
- dmfc1 t0, f0
-.L${opcode}_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE t0, a1
- GOTO_OPCODE v0 # jump to next instruction
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index 579afc2387..636289798c 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -3967,7 +3967,8 @@ artMterpAsmInstructionStart = .L_op_nop
*
* We have to clip values to int min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -3975,27 +3976,19 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+#ifndef MIPS32REVGE6
li t0, INT_MIN_AS_FLOAT
mtc1 t0, fa1
-#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- cmp.le.s ft0, fa1, fa0
- GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
- cmp.eq.s ft0, fa0, fa0
- selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-#else
c.ole.s fcc0, fa1, fa0
+#endif
GET_INST_OPCODE(t1) # extract opcode from rINST
+#ifndef MIPS32REVGE6
bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
c.eq.s fcc0, fa0, fa0
mtc1 zero, fa0
movt.s fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-#endif
1:
+#endif
trunc.w.s fa0, fa0
SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
@@ -4008,7 +4001,8 @@ artMterpAsmInstructionStart = .L_op_nop
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -4017,18 +4011,7 @@ artMterpAsmInstructionStart = .L_op_nop
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, fa1
- cmp.le.s ft0, fa1, fa0
GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
- cmp.eq.s ft0, fa0, fa0
- selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_FLOAT : 0
-1:
trunc.l.s fa0, fa0
SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
#else
@@ -4084,7 +4067,8 @@ artMterpAsmInstructionStart = .L_op_nop
*
* We have to clip values to int min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -4092,30 +4076,21 @@ artMterpAsmInstructionStart = .L_op_nop
EAS2(a3, rFP, a3) # a3 <- &fp[B]
LOAD64_F(fa0, fa0f, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
+#ifndef MIPS32REVGE6
li t0, INT_MIN_AS_DOUBLE_HIGH
mtc1 zero, fa1
MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- cmp.le.d ft0, fa1, fa0
- GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
- cmp.eq.d ft0, fa0, fa0
- selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-#else
c.ole.d fcc0, fa1, fa0
+#endif
GET_INST_OPCODE(t1) # extract opcode from rINST
+#ifndef MIPS32REVGE6
bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
c.eq.d fcc0, fa0, fa0
mtc1 zero, fa0
MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
movt.d fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-#endif
1:
+#endif
trunc.w.d fa0, fa0
SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
@@ -4128,7 +4103,8 @@ artMterpAsmInstructionStart = .L_op_nop
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -4138,19 +4114,7 @@ artMterpAsmInstructionStart = .L_op_nop
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
#ifdef MIPS32REVGE6
- /*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
- */
- li t0, LONG_MIN_AS_DOUBLE_HIGH
- mtc1 zero, fa1
- mthc1 t0, fa1
- cmp.le.d ft0, fa1, fa0
GET_INST_OPCODE(t1) # extract opcode from rINST
- bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
- cmp.eq.d ft0, fa0, fa0
- selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_DOUBLE : 0
-1:
trunc.l.d fa0, fa0
SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
#else
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 3656df9a8e..bc0d90c7cb 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -3699,19 +3699,27 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG_FLOAT f0, a2
FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ trunc.w.s f0, f0
+/* File: mips64/fcvtFooter.S */
/*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
*/
- li t0, INT_MIN_AS_FLOAT
- mtc1 t0, f1
- cmp.le.s f1, f1, f0
- bc1nez f1, .Lop_float_to_int_trunc
- cmp.eq.s f1, f0, f0
- li t0, INT_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .Lop_float_to_int_done
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
/* ------------------------------ */
.balign 128
@@ -3734,19 +3742,28 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG_FLOAT f0, a2
FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ trunc.l.s f0, f0
+/* File: mips64/fcvtFooter.S */
/*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
*/
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, f1
- cmp.le.s f1, f1, f0
- bc1nez f1, .Lop_float_to_long_trunc
- cmp.eq.s f1, f0, f0
- dli t0, LONG_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .Lop_float_to_long_done
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+
/* ------------------------------ */
.balign 128
@@ -3817,19 +3834,27 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG_DOUBLE f0, a2
FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ trunc.w.d f0, f0
+/* File: mips64/fcvtFooter.S */
/*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
*/
- dli t0, INT_MIN_AS_DOUBLE
- dmtc1 t0, f1
- cmp.le.d f1, f1, f0
- bc1nez f1, .Lop_double_to_int_trunc
- cmp.eq.d f1, f0, f0
- li t0, INT_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .Lop_double_to_int_done
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
/* ------------------------------ */
.balign 128
@@ -3852,19 +3877,27 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG_DOUBLE f0, a2
FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ trunc.l.d f0, f0
+/* File: mips64/fcvtFooter.S */
/*
- * TODO: simplify this when the MIPS64R6 emulator
- * supports NAN2008=1.
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
*/
- dli t0, LONG_MIN_AS_DOUBLE
- dmtc1 t0, f1
- cmp.le.d f1, f1, f0
- bc1nez f1, .Lop_double_to_long_trunc
- cmp.eq.d f1, f0, f0
- dli t0, LONG_MIN
- mfc1 t1, f1
- and t0, t0, t1
- b .Lop_double_to_long_done
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
/* ------------------------------ */
.balign 128
@@ -7132,46 +7165,6 @@ artMterpAsmInstructionEnd:
.balign 4
artMterpAsmSisterStart:
-/* continuation for op_float_to_int */
-.Lop_float_to_int_trunc:
- trunc.w.s f0, f0
- mfc1 t0, f0
-.Lop_float_to_int_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG t0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* continuation for op_float_to_long */
-.Lop_float_to_long_trunc:
- trunc.l.s f0, f0
- dmfc1 t0, f0
-.Lop_float_to_long_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE t0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* continuation for op_double_to_int */
-.Lop_double_to_int_trunc:
- trunc.w.d f0, f0
- mfc1 t0, f0
-.Lop_double_to_int_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG t0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* continuation for op_double_to_long */
-.Lop_double_to_long_trunc:
- trunc.l.d f0, f0
- dmfc1 t0, f0
-.Lop_double_to_long_done:
- /* Can't include fcvtFooter.S after break */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE t0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
.size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 5ce54475d1..2377b5b5aa 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -192,19 +192,26 @@ bool JitCodeCache::ContainsMethod(ArtMethod* method) {
class ScopedCodeCacheWrite : ScopedTrace {
public:
- explicit ScopedCodeCacheWrite(MemMap* code_map)
+ explicit ScopedCodeCacheWrite(MemMap* code_map, bool only_for_tlb_shootdown = false)
: ScopedTrace("ScopedCodeCacheWrite"),
- code_map_(code_map) {
+ code_map_(code_map),
+ only_for_tlb_shootdown_(only_for_tlb_shootdown) {
ScopedTrace trace("mprotect all");
- CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
+ CHECKED_MPROTECT(
+ code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtAll);
}
~ScopedCodeCacheWrite() {
ScopedTrace trace("mprotect code");
- CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
+ CHECKED_MPROTECT(
+ code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtCode);
}
private:
MemMap* const code_map_;
+ // If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to
+ // one page.
+ const bool only_for_tlb_shootdown_;
+
DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
};
@@ -565,11 +572,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
core_spill_mask,
fp_spill_mask,
code_size);
- DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
- DCHECK_LE(roots_data, stack_map);
- // Flush data cache, as compiled code references literals in it.
- FlushDataCache(reinterpret_cast<char*>(roots_data),
- reinterpret_cast<char*>(roots_data + data_size));
// Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
// trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
// This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
@@ -621,10 +623,18 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
// possible that the compiled code is considered invalidated by some class linking,
// but below we still make the compiled code valid for the method.
MutexLock mu(self, lock_);
- method_code_map_.Put(code_ptr, method);
// Fill the root table before updating the entry point.
DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
+ DCHECK_LE(roots_data, stack_map);
FillRootTable(roots_data, roots);
+ {
+ // Flush data cache, as compiled code references literals in it.
+ // We also need a TLB shootdown to act as memory barrier across cores.
+ ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true);
+ FlushDataCache(reinterpret_cast<char*>(roots_data),
+ reinterpret_cast<char*>(roots_data + data_size));
+ }
+ method_code_map_.Put(code_ptr, method);
if (osr) {
number_of_osr_compilations_++;
osr_code_map_.Put(method, code_ptr);
@@ -1289,7 +1299,7 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
}
for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
- std::vector<ProfileMethodInfo::ProfileClassReference> profile_classes;
+ std::vector<TypeReference> profile_classes;
const InlineCache& cache = info->cache_[i];
ArtMethod* caller = info->GetMethod();
bool is_missing_types = false;
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 9b80ad71e1..1e720c0cf4 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -69,21 +69,21 @@ static_assert(InlineCache::kIndividualCacheSize < kIsMissingTypesEncoding,
"InlineCache::kIndividualCacheSize is larger than expected");
ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
- : default_arena_pool_(nullptr),
- arena_(new ArenaAllocator(custom_arena_pool)),
- info_(arena_->Adapter(kArenaAllocProfile)),
- profile_key_map_(std::less<const std::string>(), arena_->Adapter(kArenaAllocProfile)) {
+ : default_arena_pool_(),
+ arena_(custom_arena_pool),
+ info_(arena_.Adapter(kArenaAllocProfile)),
+ profile_key_map_(std::less<const std::string>(), arena_.Adapter(kArenaAllocProfile)) {
}
ProfileCompilationInfo::ProfileCompilationInfo()
- : default_arena_pool_(new ArenaPool(/*use_malloc*/true, /*low_4gb*/false, "ProfileCompilationInfo")),
- arena_(new ArenaAllocator(default_arena_pool_.get())),
- info_(arena_->Adapter(kArenaAllocProfile)),
- profile_key_map_(std::less<const std::string>(), arena_->Adapter(kArenaAllocProfile)) {
+ : default_arena_pool_(/*use_malloc*/true, /*low_4gb*/false, "ProfileCompilationInfo"),
+ arena_(&default_arena_pool_),
+ info_(arena_.Adapter(kArenaAllocProfile)),
+ profile_key_map_(std::less<const std::string>(), arena_.Adapter(kArenaAllocProfile)) {
}
ProfileCompilationInfo::~ProfileCompilationInfo() {
- VLOG(profiler) << Dumpable<MemStats>(arena_->GetMemStats());
+ VLOG(profiler) << Dumpable<MemStats>(arena_.GetMemStats());
for (DexFileData* data : info_) {
delete data;
}
@@ -94,11 +94,27 @@ void ProfileCompilationInfo::DexPcData::AddClass(uint16_t dex_profile_idx,
if (is_megamorphic || is_missing_types) {
return;
}
- classes.emplace(dex_profile_idx, type_idx);
- if (classes.size() >= InlineCache::kIndividualCacheSize) {
+
+ // Perform an explicit lookup for the type instead of directly emplacing the
+ // element. We do this because emplace() allocates the node before doing the
+ // lookup and if it then finds an identical element, it shall deallocate the
+ // node. For Arena allocations, that's essentially a leak.
+ ClassReference ref(dex_profile_idx, type_idx);
+ auto it = classes.find(ref);
+ if (it != classes.end()) {
+ // The type index exists.
+ return;
+ }
+
+ // Check if the adding the type will cause the cache to become megamorphic.
+ if (classes.size() + 1 >= InlineCache::kIndividualCacheSize) {
is_megamorphic = true;
classes.clear();
+ return;
}
+
+ // The type does not exist and the inline cache will not be megamorphic.
+ classes.insert(ref);
}
// Transform the actual dex location into relative paths.
@@ -475,8 +491,8 @@ ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData
uint8_t profile_index = profile_index_it->second;
if (info_.size() <= profile_index) {
// This is a new addition. Add it to the info_ array.
- DexFileData* dex_file_data = new (arena_.get()) DexFileData(
- arena_.get(), profile_key, checksum, profile_index);
+ DexFileData* dex_file_data = new (&arena_) DexFileData(
+ &arena_, profile_key, checksum, profile_index);
info_.push_back(dex_file_data);
}
DexFileData* result = info_[profile_index];
@@ -523,7 +539,7 @@ bool ProfileCompilationInfo::AddResolvedClasses(const DexCacheResolvedClasses& c
bool ProfileCompilationInfo::AddMethodIndex(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t method_index) {
- return AddMethod(dex_location, dex_checksum, method_index, OfflineProfileMethodInfo(arena_.get()));
+ return AddMethod(dex_location, dex_checksum, method_index, OfflineProfileMethodInfo(nullptr));
}
bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
@@ -534,8 +550,14 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
if (data == nullptr) { // checksum mismatch
return false;
}
+ // Add the method.
InlineCacheMap* inline_cache = data->FindOrAddMethod(method_index);
- for (const auto& pmi_inline_cache_it : pmi.inline_caches) {
+
+ if (pmi.inline_caches == nullptr) {
+ // If we don't have inline caches return success right away.
+ return true;
+ }
+ for (const auto& pmi_inline_cache_it : *pmi.inline_caches) {
uint16_t pmi_ic_dex_pc = pmi_inline_cache_it.first;
const DexPcData& pmi_ic_dex_pc_data = pmi_inline_cache_it.second;
DexPcData* dex_pc_data = FindOrAddDexPc(inline_cache, pmi_ic_dex_pc);
@@ -581,7 +603,7 @@ bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
FindOrAddDexPc(inline_cache, cache.dex_pc)->SetIsMissingTypes();
continue;
}
- for (const ProfileMethodInfo::ProfileClassReference& class_ref : cache.classes) {
+ for (const TypeReference& class_ref : cache.classes) {
DexFileData* class_dex_data = GetOrAddDexFileData(
GetProfileDexFileKey(class_ref.dex_file->GetLocation()),
class_ref.dex_file->GetLocationChecksum());
@@ -1167,7 +1189,8 @@ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> ProfileCompila
if (inline_caches == nullptr) {
return nullptr;
}
- std::unique_ptr<OfflineProfileMethodInfo> pmi(new OfflineProfileMethodInfo(arena_.get()));
+
+ std::unique_ptr<OfflineProfileMethodInfo> pmi(new OfflineProfileMethodInfo(inline_caches));
pmi->dex_references.resize(info_.size());
for (const DexFileData* dex_data : info_) {
@@ -1175,8 +1198,6 @@ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> ProfileCompila
pmi->dex_references[dex_data->profile_index].dex_checksum = dex_data->checksum;
}
- // TODO(calin): maybe expose a direct pointer to avoid copying
- pmi->inline_caches = *inline_caches;
return pmi;
}
@@ -1293,16 +1314,18 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
return os.str();
}
-bool ProfileCompilationInfo::GetClassesAndMethods(const DexFile* dex_file,
+bool ProfileCompilationInfo::GetClassesAndMethods(const DexFile& dex_file,
std::set<dex::TypeIndex>* class_set,
- MethodMap* method_map) const {
+ std::set<uint16_t>* method_set) const {
std::set<std::string> ret;
- std::string profile_key = GetProfileDexFileKey(dex_file->GetLocation());
+ std::string profile_key = GetProfileDexFileKey(dex_file.GetLocation());
const DexFileData* dex_data = FindDexData(profile_key);
- if (dex_data == nullptr || dex_data->checksum != dex_file->GetLocationChecksum()) {
+ if (dex_data == nullptr || dex_data->checksum != dex_file.GetLocationChecksum()) {
return false;
}
- *method_map = dex_data->method_map;
+ for (const auto& it : dex_data->method_map) {
+ method_set->insert(it.first);
+ }
for (const dex::TypeIndex& type_index : dex_data->class_set) {
class_set->insert(type_index);
}
@@ -1415,17 +1438,17 @@ bool ProfileCompilationInfo::GenerateTestProfile(
bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
const OfflineProfileMethodInfo& other) const {
- if (inline_caches.size() != other.inline_caches.size()) {
+ if (inline_caches->size() != other.inline_caches->size()) {
return false;
}
// We can't use a simple equality test because we need to match the dex files
// of the inline caches which might have different profile indexes.
- for (const auto& inline_cache_it : inline_caches) {
+ for (const auto& inline_cache_it : *inline_caches) {
uint16_t dex_pc = inline_cache_it.first;
const DexPcData dex_pc_data = inline_cache_it.second;
- const auto other_it = other.inline_caches.find(dex_pc);
- if (other_it == other.inline_caches.end()) {
+ const auto& other_it = other.inline_caches->find(dex_pc);
+ if (other_it == other.inline_caches->end()) {
return false;
}
const DexPcData& other_dex_pc_data = other_it->second;
@@ -1468,7 +1491,7 @@ ProfileCompilationInfo::DexFileData::FindOrAddMethod(uint16_t method_index) {
ProfileCompilationInfo::DexPcData*
ProfileCompilationInfo::FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc) {
- return &(inline_cache->FindOrAdd(dex_pc, DexPcData(arena_.get()))->second);
+ return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&arena_))->second);
}
} // namespace art
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 6756352496..e903e2d232 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -28,6 +28,7 @@
#include "dex_file_types.h"
#include "method_reference.h"
#include "safe_map.h"
+#include "type_reference.h"
namespace art {
@@ -36,24 +37,15 @@ namespace art {
* without the need to hold GC-able objects.
*/
struct ProfileMethodInfo {
- struct ProfileClassReference {
- ProfileClassReference() : dex_file(nullptr) {}
- ProfileClassReference(const DexFile* dex, const dex::TypeIndex& index)
- : dex_file(dex), type_index(index) {}
-
- const DexFile* dex_file;
- dex::TypeIndex type_index;
- };
-
struct ProfileInlineCache {
ProfileInlineCache(uint32_t pc,
bool missing_types,
- const std::vector<ProfileClassReference>& profile_classes)
+ const std::vector<TypeReference>& profile_classes)
: dex_pc(pc), is_missing_types(missing_types), classes(profile_classes) {}
const uint32_t dex_pc;
const bool is_missing_types;
- const std::vector<ProfileClassReference> classes;
+ const std::vector<TypeReference> classes;
};
ProfileMethodInfo(const DexFile* dex, uint32_t method_index)
@@ -117,7 +109,7 @@ class ProfileCompilationInfo {
// data from multiple splits. This means that a profile may contain a classes2.dex from split-A
// and one from split-B.
struct ClassReference : public ValueObject {
- ClassReference(uint8_t dex_profile_idx, const dex::TypeIndex& type_idx) :
+ ClassReference(uint8_t dex_profile_idx, const dex::TypeIndex type_idx) :
dex_profile_index(dex_profile_idx), type_index(type_idx) {}
bool operator==(const ClassReference& other) const {
@@ -180,13 +172,13 @@ class ProfileCompilationInfo {
// i.e. the dex file of any ClassReference present in the inline caches can be found at
// dex_references[ClassReference::dex_profile_index].
struct OfflineProfileMethodInfo {
- explicit OfflineProfileMethodInfo(ArenaAllocator* allocator)
- : inline_caches(std::less<uint16_t>(), allocator->Adapter(kArenaAllocProfile)) {}
+ explicit OfflineProfileMethodInfo(const InlineCacheMap* inline_cache_map)
+ : inline_caches(inline_cache_map) {}
bool operator==(const OfflineProfileMethodInfo& other) const;
+ const InlineCacheMap* const inline_caches;
std::vector<DexReference> dex_references;
- InlineCacheMap inline_caches;
};
// Public methods to create, extend or query the profile.
@@ -232,7 +224,8 @@ class ProfileCompilationInfo {
// Return the method data for the given location and index from the profiling info.
// If the method index is not found or the checksum doesn't match, null is returned.
- // The allocations for the method info are done on the current profile arena.
+ // Note: the inline cache map is a pointer to the map stored in the profile and
+ // its allocation will go away if the profile goes out of scope.
std::unique_ptr<OfflineProfileMethodInfo> GetMethod(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t dex_method_index) const;
@@ -246,12 +239,12 @@ class ProfileCompilationInfo {
std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
bool print_full_dex_location = true) const;
- // Return the classes and methods for a given dex file through out args. The otu args are the set
+ // Return the classes and methods for a given dex file through out args. The out args are the set
// of class as well as the methods and their associated inline caches. Returns true if the dex
// file is register and has a matching checksum, false otherwise.
- bool GetClassesAndMethods(const DexFile* dex_file,
- std::set<dex::TypeIndex>* class_set,
- MethodMap* method_map) const;
+ bool GetClassesAndMethods(const DexFile& dex_file,
+ /*out*/std::set<dex::TypeIndex>* class_set,
+ /*out*/std::set<uint16_t>* method_set) const;
// Perform an equality test with the `other` profile information.
bool Equals(const ProfileCompilationInfo& other);
@@ -281,7 +274,7 @@ class ProfileCompilationInfo {
static bool Equals(const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi1,
const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi2);
- ArenaAllocator* GetArena() { return arena_.get(); }
+ ArenaAllocator* GetArena() { return &arena_; }
private:
enum ProfileLoadSatus {
@@ -512,8 +505,8 @@ class ProfileCompilationInfo {
friend class ProfileAssistantTest;
friend class Dex2oatLayoutTest;
- std::unique_ptr<ArenaPool> default_arena_pool_;
- std::unique_ptr<ArenaAllocator> arena_;
+ ArenaPool default_arena_pool_;
+ ArenaAllocator arena_;
// Vector containing the actual profile info.
// The vector index is the profile index of the dex data and
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index a054199e1f..1cfa3552b9 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -25,15 +25,16 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "handle_scope-inl.h"
-#include "linear_alloc.h"
#include "jit/profile_compilation_info.h"
+#include "linear_alloc.h"
#include "scoped_thread_state_change-inl.h"
+#include "type_reference.h"
namespace art {
class ProfileCompilationInfoTest : public CommonRuntimeTest {
public:
- virtual void PostRuntimeCreate() {
+ void PostRuntimeCreate() OVERRIDE {
arena_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
@@ -123,13 +124,13 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
std::vector<ProfileMethodInfo::ProfileInlineCache> caches;
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
- std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ std::vector<TypeReference> classes;
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(0));
caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
}
// Polymorphic
for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
- std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ std::vector<TypeReference> classes;
for (uint16_t k = 0; k < InlineCache::kIndividualCacheSize / 2; k++) {
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k));
}
@@ -137,7 +138,7 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
- std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ std::vector<TypeReference> classes;
for (uint16_t k = 0; k < 2 * InlineCache::kIndividualCacheSize; k++) {
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k));
}
@@ -145,7 +146,7 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
}
// Missing types
for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
- std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ std::vector<TypeReference> classes;
caches.emplace_back(dex_pc, /*is_missing_types*/true, classes);
}
ProfileMethodInfo pmi(method->GetDexFile(), method->GetDexMethodIndex(), caches);
@@ -162,13 +163,21 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
return info.Save(filename, nullptr);
}
+ // Creates an inline cache which will be destructed at the end of the test.
+ ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
+ used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
+ std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ return used_inline_caches.back().get();
+ }
+
ProfileCompilationInfo::OfflineProfileMethodInfo ConvertProfileMethodInfo(
const ProfileMethodInfo& pmi) {
- ProfileCompilationInfo::OfflineProfileMethodInfo offline_pmi(arena_.get());
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo offline_pmi(ic_map);
SafeMap<DexFile*, uint8_t> dex_map; // dex files to profile index
for (const auto& inline_cache : pmi.inline_caches) {
ProfileCompilationInfo::DexPcData& dex_pc_data =
- offline_pmi.inline_caches.FindOrAdd(
+ ic_map->FindOrAdd(
inline_cache.dex_pc, ProfileCompilationInfo::DexPcData(arena_.get()))->second;
if (inline_cache.is_missing_types) {
dex_pc_data.SetIsMissingTypes();
@@ -191,17 +200,12 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
// Creates an offline profile used for testing inline caches.
ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo() {
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi(arena_.get());
-
- pmi.dex_references.emplace_back("dex_location1", /* checksum */1);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */2);
- pmi.dex_references.emplace_back("dex_location3", /* checksum */3);
-
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Polymorphic
for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
@@ -210,26 +214,34 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
dex_pc_data.AddClass(1, dex::TypeIndex(1));
dex_pc_data.AddClass(2, dex::TypeIndex(2));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMegamorphic();
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
// Missing types
for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMissingTypes();
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
+
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */1);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum */2);
+ pmi.dex_references.emplace_back("dex_location3", /* checksum */3);
+
return pmi;
}
void MakeMegamorphic(/*out*/ProfileCompilationInfo::OfflineProfileMethodInfo* pmi) {
- for (auto it : pmi->inline_caches) {
+ ProfileCompilationInfo::InlineCacheMap* ic_map =
+ const_cast<ProfileCompilationInfo::InlineCacheMap*>(pmi->inline_caches);
+ for (auto it : *ic_map) {
for (uint16_t k = 0; k <= 2 * InlineCache::kIndividualCacheSize; k++) {
it.second.AddClass(0, dex::TypeIndex(k));
}
@@ -237,7 +249,9 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
}
void SetIsMissingTypes(/*out*/ProfileCompilationInfo::OfflineProfileMethodInfo* pmi) {
- for (auto it : pmi->inline_caches) {
+ ProfileCompilationInfo::InlineCacheMap* ic_map =
+ const_cast<ProfileCompilationInfo::InlineCacheMap*>(pmi->inline_caches);
+ for (auto it : *ic_map) {
it.second.SetIsMissingTypes();
}
}
@@ -248,6 +262,11 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
static constexpr int kProfileVersionSize = 4;
std::unique_ptr<ArenaAllocator> arena_;
+
+ // Cache of inline caches generated during tests.
+ // This makes it easier to pass data between different utilities and ensure that
+ // caches are destructed at the end of the test.
+ std::vector<std::unique_ptr<ProfileCompilationInfo::InlineCacheMap>> used_inline_caches;
};
TEST_F(ProfileCompilationInfoTest, SaveArtMethods) {
@@ -673,24 +692,26 @@ TEST_F(ProfileCompilationInfoTest, MergeInlineCacheTriggerReindex) {
ProfileCompilationInfo info;
ProfileCompilationInfo info_reindexed;
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi(arena_.get());
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map->Put(dex_pc, dex_pc_data);
}
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi_reindexed(arena_.get());
+ ProfileCompilationInfo::InlineCacheMap* ic_map_reindexed = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi_reindexed(ic_map_reindexed);
pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum */ 2);
pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum */ 1);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(1, dex::TypeIndex(0));
dex_pc_data.AddClass(0, dex::TypeIndex(1));
- pmi_reindexed.inline_caches.Put(dex_pc, dex_pc_data);
+ ic_map_reindexed->Put(dex_pc, dex_pc_data);
}
// Profile 1 and Profile 2 get the same methods but in different order.
@@ -738,11 +759,12 @@ TEST_F(ProfileCompilationInfoTest, AddMoreDexFileThanLimit) {
TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
// Create a megamorphic inline cache.
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi(arena_.get());
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMegamorphic();
- pmi.inline_caches.Put(/*dex_pc*/ 0, dex_pc_data);
+ ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
ASSERT_TRUE(AddMethod("dex_location1",
@@ -767,11 +789,12 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCachesMerge) {
// Create an inline cache with missing types
- ProfileCompilationInfo::OfflineProfileMethodInfo pmi(arena_.get());
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMissingTypes();
- pmi.inline_caches.Put(/*dex_pc*/ 0, dex_pc_data);
+ ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
ASSERT_TRUE(AddMethod("dex_location1",
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 0c94a94a6a..166b6f4ba1 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -259,7 +259,9 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() {
<< " (" << classes.GetDexLocation() << ")";
}
}
- auto info_it = profile_cache_.Put(filename, new ProfileCompilationInfo(Runtime::Current()->GetArenaPool()));
+ auto info_it = profile_cache_.Put(
+ filename,
+ new ProfileCompilationInfo(Runtime::Current()->GetArenaPool()));
ProfileCompilationInfo* cached_info = info_it->second;
cached_info->AddMethodsAndClasses(profile_methods_for_location,
@@ -366,6 +368,8 @@ bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number
total_number_of_failed_writes_++;
}
}
+ // Trim the maps to madvise the pages used for profile info.
+ // It is unlikely we will need them again in the near feature.
Runtime::Current()->GetArenaPool()->TrimMaps();
}
diff --git a/compiler/utils/type_reference.h b/runtime/type_reference.h
index a0fa1a4a63..b7e964b3ad 100644
--- a/compiler/utils/type_reference.h
+++ b/runtime/type_reference.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_TYPE_REFERENCE_H_
-#define ART_COMPILER_UTILS_TYPE_REFERENCE_H_
+#ifndef ART_RUNTIME_TYPE_REFERENCE_H_
+#define ART_RUNTIME_TYPE_REFERENCE_H_
#include <stdint.h>
@@ -29,7 +29,9 @@ class DexFile;
// A type is located by its DexFile and the string_ids_ table index into that DexFile.
struct TypeReference {
- TypeReference(const DexFile* file, dex::TypeIndex index) : dex_file(file), type_index(index) { }
+ TypeReference(const DexFile* file = nullptr, dex::TypeIndex index = dex::TypeIndex())
+ : dex_file(file),
+ type_index(index) {}
const DexFile* dex_file;
dex::TypeIndex type_index;
@@ -48,4 +50,4 @@ struct TypeReferenceValueComparator {
} // namespace art
-#endif // ART_COMPILER_UTILS_TYPE_REFERENCE_H_
+#endif // ART_RUNTIME_TYPE_REFERENCE_H_
diff --git a/test/596-app-images/app_images.cc b/test/596-app-images/app_images.cc
index 42211f7548..fa9c902070 100644
--- a/test/596-app-images/app_images.cc
+++ b/test/596-app-images/app_images.cc
@@ -63,6 +63,12 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkAppImageContains(JNIEnv*, j
return JNI_FALSE;
}
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkInitialized(JNIEnv*, jclass, jclass c) {
+ ScopedObjectAccess soa(Thread::Current());
+ ObjPtr<mirror::Class> klass_ptr = soa.Decode<mirror::Class>(c);
+ return klass_ptr->IsInitialized();
+}
+
} // namespace
} // namespace art
diff --git a/test/596-app-images/src/Main.java b/test/596-app-images/src/Main.java
index 75b31b8061..8ee3c888b0 100644
--- a/test/596-app-images/src/Main.java
+++ b/test/596-app-images/src/Main.java
@@ -16,7 +16,11 @@
class Main {
static class Inner {
- public static int abc = 0;
+ final public static int abc = 10;
+ }
+
+ static class Nested {
+
}
public static void main(String[] args) {
@@ -26,8 +30,44 @@ class Main {
} else if (!checkAppImageContains(Inner.class)) {
System.out.println("App image does not contain Inner!");
}
+
+ if (!checkInitialized(Inner.class))
+ System.out.println("Inner class is not initialized!");
+
+ if (!checkInitialized(Nested.class))
+ System.out.println("Nested class is not initialized!");
+
+ if (!checkInitialized(StaticFields.class))
+ System.out.println("StaticFields class is not initialized!");
+
+ if (!checkInitialized(StaticFieldsInitSub.class))
+ System.out.println("StaticFieldsInitSub class is not initialized!");
+
+ if (!checkInitialized(StaticFieldsInit.class))
+ System.out.println("StaticFieldsInit class is not initialized!");
+
+ if (checkInitialized(StaticInternString.class))
+ System.out.println("StaticInternString class is initialized!");
}
public static native boolean checkAppImageLoaded();
public static native boolean checkAppImageContains(Class<?> klass);
+ public static native boolean checkInitialized(Class<?> klass);
}
+
+class StaticFields{
+ public static int abc;
+}
+
+class StaticFieldsInitSub extends StaticFieldsInit {
+ final public static int def = 10;
+}
+
+class StaticFieldsInit{
+ final public static int abc = 10;
+}
+
+class StaticInternString {
+ final public static String intern = "java.abc.Action";
+}
+
diff --git a/test/638-checker-inline-caches/src/Main.java b/test/638-checker-inline-caches/src/Main.java
index 680bd14dbc..f104e6aea8 100644
--- a/test/638-checker-inline-caches/src/Main.java
+++ b/test/638-checker-inline-caches/src/Main.java
@@ -36,16 +36,17 @@ public class Main {
/// CHECK: InvokeVirtual method_name:Super.getValue
/// CHECK-START: int Main.inlineMonomorphicSubA(Super) inliner (after)
- /// CHECK-NOT: InvokeVirtual method_name:Super.getValue
-
- /// CHECK-START: int Main.inlineMonomorphicSubA(Super) inliner (after)
/// CHECK: <<SubARet:i\d+>> IntConstant 42
/// CHECK: <<Obj:l\d+>> NullCheck
/// CHECK: <<ObjClass:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
/// CHECK: <<InlineClass:l\d+>> LoadClass class_name:SubA
/// CHECK: <<Test:z\d+>> NotEqual [<<InlineClass>>,<<ObjClass>>]
- /// CHECK: Deoptimize [<<Test>>,<<Obj>>]
- /// CHECK: Return [<<SubARet>>]
+ /// CHECK: <<DefaultRet:i\d+>> InvokeVirtual [<<Obj>>] method_name:Super.getValue
+
+ /// CHECK: <<Ret:i\d+>> Phi [<<SubARet>>,<<DefaultRet>>]
+ /// CHECK: Return [<<Ret>>]
+
+ /// CHECK-NOT: Deoptimize
public static int inlineMonomorphicSubA(Super a) {
return a.getValue();
}
@@ -53,27 +54,27 @@ public class Main {
/// CHECK-START: int Main.inlinePolymophicSubASubB(Super) inliner (before)
/// CHECK: InvokeVirtual method_name:Super.getValue
- /// CHECK-START: int Main.inlinePolymophicSubASubB(Super) inliner (after)
- /// CHECK-NOT: InvokeVirtual method_name:Super.getValue
-
// Note that the order in which the types are added to the inline cache in the profile matters.
/// CHECK-START: int Main.inlinePolymophicSubASubB(Super) inliner (after)
/// CHECK-DAG: <<SubARet:i\d+>> IntConstant 42
/// CHECK-DAG: <<SubBRet:i\d+>> IntConstant 38
- /// CHECK: <<Obj:l\d+>> NullCheck
- /// CHECK: <<ObjClassSubA:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
- /// CHECK: <<InlineClassSubA:l\d+>> LoadClass class_name:SubA
- /// CHECK: <<TestSubA:z\d+>> NotEqual [<<InlineClassSubA>>,<<ObjClassSubA>>]
- /// CHECK: If [<<TestSubA>>]
-
- /// CHECK: <<ObjClassSubB:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
- /// CHECK: <<InlineClassSubB:l\d+>> LoadClass class_name:SubB
- /// CHECK: <<TestSubB:z\d+>> NotEqual [<<InlineClassSubB>>,<<ObjClassSubB>>]
- /// CHECK: Deoptimize [<<TestSubB>>,<<Obj>>]
-
- /// CHECK: <<Ret:i\d+>> Phi [<<SubARet>>,<<SubBRet>>]
- /// CHECK: Return [<<Ret>>]
+ /// CHECK-DAG: <<Obj:l\d+>> NullCheck
+ /// CHECK-DAG: <<ObjClassSubA:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
+ /// CHECK-DAG: <<InlineClassSubA:l\d+>> LoadClass class_name:SubA
+ /// CHECK-DAG: <<TestSubA:z\d+>> NotEqual [<<InlineClassSubA>>,<<ObjClassSubA>>]
+ /// CHECK-DAG: If [<<TestSubA>>]
+
+ /// CHECK-DAG: <<ObjClassSubB:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
+ /// CHECK-DAG: <<InlineClassSubB:l\d+>> LoadClass class_name:SubB
+ /// CHECK-DAG: <<TestSubB:z\d+>> NotEqual [<<InlineClassSubB>>,<<ObjClassSubB>>]
+ /// CHECK-DAG: <<DefaultRet:i\d+>> InvokeVirtual [<<Obj>>] method_name:Super.getValue
+
+ /// CHECK-DAG: <<FirstMerge:i\d+>> Phi [<<SubBRet>>,<<DefaultRet>>]
+ /// CHECK-DAG: <<Ret:i\d+>> Phi [<<SubARet>>,<<FirstMerge>>]
+ /// CHECK-DAG: Return [<<Ret>>]
+
+ /// CHECK-NOT: Deoptimize
public static int inlinePolymophicSubASubB(Super a) {
return a.getValue();
}
@@ -81,27 +82,27 @@ public class Main {
/// CHECK-START: int Main.inlinePolymophicCrossDexSubASubC(Super) inliner (before)
/// CHECK: InvokeVirtual method_name:Super.getValue
- /// CHECK-START: int Main.inlinePolymophicCrossDexSubASubC(Super) inliner (after)
- /// CHECK-NOT: InvokeVirtual method_name:Super.getValue
-
// Note that the order in which the types are added to the inline cache in the profile matters.
/// CHECK-START: int Main.inlinePolymophicCrossDexSubASubC(Super) inliner (after)
/// CHECK-DAG: <<SubARet:i\d+>> IntConstant 42
/// CHECK-DAG: <<SubCRet:i\d+>> IntConstant 24
- /// CHECK: <<Obj:l\d+>> NullCheck
- /// CHECK: <<ObjClassSubA:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
- /// CHECK: <<InlineClassSubA:l\d+>> LoadClass class_name:SubA
- /// CHECK: <<TestSubA:z\d+>> NotEqual [<<InlineClassSubA>>,<<ObjClassSubA>>]
- /// CHECK: If [<<TestSubA>>]
-
- /// CHECK: <<ObjClassSubC:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
- /// CHECK: <<InlineClassSubC:l\d+>> LoadClass class_name:SubC
- /// CHECK: <<TestSubC:z\d+>> NotEqual [<<InlineClassSubC>>,<<ObjClassSubC>>]
- /// CHECK: Deoptimize [<<TestSubC>>,<<Obj>>]
-
- /// CHECK: <<Ret:i\d+>> Phi [<<SubARet>>,<<SubCRet>>]
- /// CHECK: Return [<<Ret>>]
+ /// CHECK-DAG: <<Obj:l\d+>> NullCheck
+ /// CHECK-DAG: <<ObjClassSubA:l\d+>> InstanceFieldGet [<<Obj>>] field_name:java.lang.Object.shadow$_klass_
+ /// CHECK-DAG: <<InlineClassSubA:l\d+>> LoadClass class_name:SubA
+ /// CHECK-DAG: <<TestSubA:z\d+>> NotEqual [<<InlineClassSubA>>,<<ObjClassSubA>>]
+ /// CHECK-DAG: If [<<TestSubA>>]
+
+ /// CHECK-DAG: <<ObjClassSubC:l\d+>> InstanceFieldGet field_name:java.lang.Object.shadow$_klass_
+ /// CHECK-DAG: <<InlineClassSubC:l\d+>> LoadClass class_name:SubC
+ /// CHECK-DAG: <<TestSubC:z\d+>> NotEqual [<<InlineClassSubC>>,<<ObjClassSubC>>]
+ /// CHECK-DAG: <<DefaultRet:i\d+>> InvokeVirtual [<<Obj>>] method_name:Super.getValue
+
+ /// CHECK-DAG: <<FirstMerge:i\d+>> Phi [<<SubCRet>>,<<DefaultRet>>]
+ /// CHECK-DAG: <<Ret:i\d+>> Phi [<<SubARet>>,<<FirstMerge>>]
+ /// CHECK-DAG: Return [<<Ret>>]
+
+ /// CHECK-NOT: Deoptimize
public static int inlinePolymophicCrossDexSubASubC(Super a) {
return a.getValue();
}
diff --git a/test/644-checker-deopt/expected.txt b/test/644-checker-deopt/expected.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/644-checker-deopt/expected.txt
+++ /dev/null
diff --git a/test/644-checker-deopt/info.txt b/test/644-checker-deopt/info.txt
deleted file mode 100644
index c5fb12c570..0000000000
--- a/test/644-checker-deopt/info.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Regression test for making sure HDeoptimize is executed before
-the code it should have prevented executing.
diff --git a/test/644-checker-deopt/profile b/test/644-checker-deopt/profile
deleted file mode 100644
index cb261cc694..0000000000
--- a/test/644-checker-deopt/profile
+++ /dev/null
@@ -1,2 +0,0 @@
-LMain;->inlineMonomorphic(LMain;)I+LMain;
-LMain;->inlinePolymorphic(LMain;)I+LMain;,LSubMain;
diff --git a/test/644-checker-deopt/run b/test/644-checker-deopt/run
deleted file mode 100644
index 146e180000..0000000000
--- a/test/644-checker-deopt/run
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-exec ${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile
diff --git a/test/644-checker-deopt/src/Main.java b/test/644-checker-deopt/src/Main.java
deleted file mode 100644
index 17c80a6057..0000000000
--- a/test/644-checker-deopt/src/Main.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) inliner (before)
- /// CHECK: InvokeVirtual method_name:Main.getValue
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) inliner (after)
- /// CHECK-NOT: InvokeVirtual method_name:Main.getValue
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) licm (before)
- /// CHECK: <<Deopt:l\d+>> Deoptimize
- /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) licm (after)
- /// CHECK: <<Deopt:l\d+>> Deoptimize
- /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
-
- public static int inlineMonomorphic(Main a) {
- if (a == null) {
- return 42;
- }
- int i = 0;
- while (i < 100) {
- i += a.getValue();
- }
- return i;
- }
-
- /// CHECK-START: int Main.inlinePolymorphic(Main) inliner (before)
- /// CHECK: InvokeVirtual method_name:Main.getValue
-
- /// CHECK-START: int Main.inlinePolymorphic(Main) inliner (after)
- /// CHECK-NOT: InvokeVirtual method_name:Main.getValue
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) licm (before)
- /// CHECK: <<Deopt:l\d+>> Deoptimize
- /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
-
- /// CHECK-START: int Main.inlineMonomorphic(Main) licm (after)
- /// CHECK: <<Deopt:l\d+>> Deoptimize
- /// CHECK: InstanceFieldGet [<<Deopt>>] field_name:Main.value
- public static int inlinePolymorphic(Main a) {
- return a.getValue();
- }
-
- public int getValue() {
- return value;
- }
-
- public static void main(String[] args) {
- inlineMonomorphic(new Main());
- }
-
- int value = 1;
-}
-
-// Add a subclass of 'Main' to write the polymorphic inline cache in the profile.
-class SubMain extends Main {
-}
diff --git a/test/906-iterate-heap/expected.txt b/test/906-iterate-heap/expected.txt
index b6af8435de..73b7129bba 100644
--- a/test/906-iterate-heap/expected.txt
+++ b/test/906-iterate-heap/expected.txt
@@ -18,14 +18,14 @@
2
1@0 (32, 2xD '0000000000000000000000000000f03f')
2
+doTestPrimitiveFieldsClasses
10000@0 (static, int, index=3) 0000000000000000
10001
10000@0 (static, int, index=11) 0000000000000000
10001
-10000@0 (static, int, index=0) 0000000000000000
10001
-10000@0 (static, int, index=1) 0000000000000000
10001
+doTestPrimitiveFieldsIntegral
10000@0 (instance, int, index=2) 0000000000000000
10001@0 (instance, byte, index=4) 0000000000000001
10002@0 (instance, char, index=5) 0000000000000061
@@ -33,6 +33,7 @@
10004@0 (instance, long, index=7) 0000000000000004
10005@0 (instance, short, index=9) 0000000000000002
10006
+doTestPrimitiveFieldsFloat
10000@0 (instance, int, index=3) 0000000000000000
10001@0 (instance, byte, index=5) 0000000000000001
10002@0 (instance, char, index=6) 0000000000000061
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index 6534b4c3db..02ac69942a 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -408,5 +408,15 @@ extern "C" JNIEXPORT jstring JNICALL Java_art_Test906_iterateThroughHeapPrimitiv
return env->NewStringUTF(ffc.data.c_str());
}
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test906_checkInitialized(
+ JNIEnv* env, jclass, jclass c) {
+ jint status;
+ jvmtiError error = jvmti_env->GetClassStatus(c, &status);
+ if (JvmtiErrorToException(env, jvmti_env, error)) {
+ return false;
+ }
+ return (status & JVMTI_CLASS_STATUS_INITIALIZED) != 0;
+}
+
} // namespace Test906IterateHeap
} // namespace art
diff --git a/test/906-iterate-heap/src/art/Test906.java b/test/906-iterate-heap/src/art/Test906.java
index fe18e38501..65c2c8c560 100644
--- a/test/906-iterate-heap/src/art/Test906.java
+++ b/test/906-iterate-heap/src/art/Test906.java
@@ -142,6 +142,7 @@ public class Test906 {
}
private static void doTestPrimitiveFieldsClasses() {
+ System.out.println("doTestPrimitiveFieldsClasses");
setTag(IntObject.class, 10000);
System.out.println(iterateThroughHeapPrimitiveFields(10000));
System.out.println(getTag(IntObject.class));
@@ -152,18 +153,40 @@ public class Test906 {
System.out.println(getTag(FloatObject.class));
setTag(FloatObject.class, 0);
+ boolean correctHeapValue = false;
setTag(Inf1.class, 10000);
- System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ String heapTrace = iterateThroughHeapPrimitiveFields(10000);
+
+ if (!checkInitialized(Inf1.class)) {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=0) 0000000000000000");
+ } else {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=0) 0000000000000001");
+ }
+
+ if (!correctHeapValue)
+ System.out.println("Heap Trace for Inf1 is not as expected:\n" + heapTrace);
+
System.out.println(getTag(Inf1.class));
setTag(Inf1.class, 0);
setTag(Inf2.class, 10000);
- System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ heapTrace = iterateThroughHeapPrimitiveFields(10000);
+
+ if (!checkInitialized(Inf2.class)) {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=1) 0000000000000000");
+ } else {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=1) 0000000000000001");
+ }
+
+ if (!correctHeapValue)
+ System.out.println("Heap Trace for Inf2 is not as expected:\n" + heapTrace);
System.out.println(getTag(Inf2.class));
+
setTag(Inf2.class, 0);
}
private static void doTestPrimitiveFieldsIntegral() {
+ System.out.println("doTestPrimitiveFieldsIntegral");
IntObject intObject = new IntObject();
setTag(intObject, 10000);
System.out.println(iterateThroughHeapPrimitiveFields(10000));
@@ -171,6 +194,7 @@ public class Test906 {
}
private static void doTestPrimitiveFieldsFloat() {
+ System.out.println("doTestPrimitiveFieldsFloat");
FloatObject floatObject = new FloatObject();
setTag(floatObject, 10000);
System.out.println(iterateThroughHeapPrimitiveFields(10000));
@@ -265,6 +289,7 @@ public class Test906 {
return Main.getTag(o);
}
+ private static native boolean checkInitialized(Class<?> klass);
private static native int iterateThroughHeapCount(int heapFilter,
Class<?> klassFilter, int stopAfter);
private static native int iterateThroughHeapData(int heapFilter,
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index b128d1cb70..80f8b9e947 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -140,9 +140,7 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
10001
10000@0 (static, int, index=11) 0000000000000000
10001
-10000@0 (static, int, index=0) 0000000000000000
10001
-10000@0 (static, int, index=1) 0000000000000000
10001
10000@0 (instance, int, index=2) 0000000000000000
10001@0 (instance, byte, index=4) 0000000000000001
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index ec36cebd43..bf3f7b66a5 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -1078,5 +1078,14 @@ extern "C" JNIEXPORT void JNICALL Java_art_Test913_iterateThroughHeapExt(
CHECK(gFoundExt);
}
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test913_checkInitialized(JNIEnv* env, jclass, jclass c) {
+ jint status;
+ jvmtiError error = jvmti_env->GetClassStatus(c, &status);
+ if (JvmtiErrorToException(env, jvmti_env, error)) {
+ return false;
+ }
+ return (status & JVMTI_CLASS_STATUS_INITIALIZED) != 0;
+}
+
} // namespace Test913Heaps
} // namespace art
diff --git a/test/913-heaps/src/art/Test913.java b/test/913-heaps/src/art/Test913.java
index 97f48eea03..b9990010ff 100644
--- a/test/913-heaps/src/art/Test913.java
+++ b/test/913-heaps/src/art/Test913.java
@@ -195,13 +195,33 @@ public class Test913 {
System.out.println(getTag(FloatObject.class));
setTag(FloatObject.class, 0);
+ boolean correctHeapValue = false;
setTag(Inf1.class, 10000);
- System.out.println(followReferencesPrimitiveFields(Inf1.class));
+ String heapTrace = followReferencesPrimitiveFields(Inf1.class);
+
+ if (!checkInitialized(Inf1.class)) {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=0) 0000000000000000");
+ } else {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=0) 0000000000000001");
+ }
+
+ if (!correctHeapValue)
+ System.out.println("Heap Trace for Inf1 is not as expected:\n" + heapTrace);
+
System.out.println(getTag(Inf1.class));
setTag(Inf1.class, 0);
setTag(Inf2.class, 10000);
- System.out.println(followReferencesPrimitiveFields(Inf2.class));
+ heapTrace = followReferencesPrimitiveFields(Inf2.class);
+
+ if (!checkInitialized(Inf2.class)) {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=1) 0000000000000000");
+ } else {
+ correctHeapValue = heapTrace.equals("10000@0 (static, int, index=1) 0000000000000001");
+ }
+
+ if (!correctHeapValue)
+ System.out.println("Heap Trace for Inf2 is not as expected:\n" + heapTrace);
System.out.println(getTag(Inf2.class));
setTag(Inf2.class, 0);
}
@@ -712,6 +732,7 @@ public class Test913 {
return Main.getTag(o);
}
+ private static native boolean checkInitialized(Class<?> klass);
private static native void setupGcCallback();
private static native void enableGcTracking(boolean enable);
private static native int getGcStarts();
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 96c2967f86..36ac307c5e 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -508,7 +508,6 @@
"640-checker-short-simd",
"641-checker-arraycopy",
"643-checker-bogus-ic",
- "644-checker-deopt",
"645-checker-abs-simd",
"706-checker-scheduler"],
"description": ["Checker tests are not compatible with jvmti."],
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index 3049871d4c..38556abff2 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -75,7 +75,10 @@ Things to move to perflib:
* Instance.isRoot and Instance.getRootTypes.
Release History:
- 1.2 Pending
+ 1.3 Pending
+
+ 1.2 May 26, 2017
+ Show registered native sizes of objects.
Simplify presentation of sample path from gc root.
1.1 Feb 21, 2017
diff --git a/tools/ahat/src/DocString.java b/tools/ahat/src/DocString.java
index c6303c8c35..7970bf8de4 100644
--- a/tools/ahat/src/DocString.java
+++ b/tools/ahat/src/DocString.java
@@ -126,6 +126,23 @@ class DocString {
}
/**
+ * Standard formatted DocString for describing a size.
+ *
+ * Nothing is printed for a size of zero.
+ * Set isPlaceHolder to true to indicate that the size field corresponds to
+ * for a place holder object that should be annotated specially.
+ */
+ public static DocString size(long size, boolean isPlaceHolder) {
+ DocString string = new DocString();
+ if (isPlaceHolder) {
+ string.append(DocString.removed("del"));
+ } else if (size != 0) {
+ string.appendFormat("%,14d", size);
+ }
+ return string;
+ }
+
+ /**
* Standard formatted DocString for describing a change in size relative to
* a baseline.
* @param noCurrent - whether no current object exists.
diff --git a/tools/ahat/src/DominatedList.java b/tools/ahat/src/DominatedList.java
index f73e3ca027..75133b2184 100644
--- a/tools/ahat/src/DominatedList.java
+++ b/tools/ahat/src/DominatedList.java
@@ -55,7 +55,7 @@ class DominatedList {
@Override
public long getSize(AhatInstance element, AhatHeap heap) {
- return element.getRetainedSize(heap);
+ return element.getRetainedSize(heap).getSize();
}
@Override
diff --git a/tools/ahat/src/HeapTable.java b/tools/ahat/src/HeapTable.java
index 9abbe4a4ed..b04f2aebf7 100644
--- a/tools/ahat/src/HeapTable.java
+++ b/tools/ahat/src/HeapTable.java
@@ -45,16 +45,6 @@ class HeapTable {
List<ValueConfig<T>> getValueConfigs();
}
- private static DocString sizeString(long size, boolean isPlaceHolder) {
- DocString string = new DocString();
- if (isPlaceHolder) {
- string.append(DocString.removed("del"));
- } else if (size != 0) {
- string.appendFormat("%,14d", size);
- }
- return string;
- }
-
/**
* Render the table to the given document.
* @param query - The page query.
@@ -100,10 +90,10 @@ class HeapTable {
long basesize = config.getSize(base, heap.getBaseline());
total += size;
basetotal += basesize;
- vals.add(sizeString(size, elem.isPlaceHolder()));
+ vals.add(DocString.size(size, elem.isPlaceHolder()));
vals.add(DocString.delta(elem.isPlaceHolder(), base.isPlaceHolder(), size, basesize));
}
- vals.add(sizeString(total, elem.isPlaceHolder()));
+ vals.add(DocString.size(total, elem.isPlaceHolder()));
vals.add(DocString.delta(elem.isPlaceHolder(), base.isPlaceHolder(), total, basetotal));
for (ValueConfig<T> value : values) {
@@ -140,10 +130,10 @@ class HeapTable {
long basesize = basesummary.get(heap);
total += size;
basetotal += basesize;
- vals.add(sizeString(size, false));
+ vals.add(DocString.size(size, false));
vals.add(DocString.delta(false, false, size, basesize));
}
- vals.add(sizeString(total, false));
+ vals.add(DocString.size(total, false));
vals.add(DocString.delta(false, false, total, basetotal));
for (ValueConfig<T> value : values) {
@@ -159,7 +149,7 @@ class HeapTable {
public static <T extends Diffable<T>> boolean hasNonZeroEntry(AhatHeap heap,
TableConfig<T> config, List<T> elements) {
AhatHeap baseheap = heap.getBaseline();
- if (heap.getSize() > 0 || baseheap.getSize() > 0) {
+ if (!heap.getSize().isZero() || !baseheap.getSize().isZero()) {
for (T element : elements) {
if (config.getSize(element, heap) > 0 ||
config.getSize(element.getBaseline(), baseheap) > 0) {
diff --git a/tools/ahat/src/ObjectHandler.java b/tools/ahat/src/ObjectHandler.java
index b1d7904ef6..d6f1faa3c3 100644
--- a/tools/ahat/src/ObjectHandler.java
+++ b/tools/ahat/src/ObjectHandler.java
@@ -70,16 +70,6 @@ class ObjectHandler implements AhatHandler {
doc.descriptions();
doc.description(DocString.text("Class"), Summarizer.summarize(cls));
- DocString sizeDescription = DocString.format("%,14d ", inst.getSize());
- sizeDescription.appendDelta(false, base.isPlaceHolder(),
- inst.getSize(), base.getSize());
- doc.description(DocString.text("Size"), sizeDescription);
-
- DocString rsizeDescription = DocString.format("%,14d ", inst.getTotalRetainedSize());
- rsizeDescription.appendDelta(false, base.isPlaceHolder(),
- inst.getTotalRetainedSize(), base.getTotalRetainedSize());
- doc.description(DocString.text("Retained Size"), rsizeDescription);
-
doc.description(DocString.text("Heap"), DocString.text(inst.getHeap().getName()));
Collection<String> rootTypes = inst.getRootTypes();
@@ -96,6 +86,13 @@ class ObjectHandler implements AhatHandler {
doc.end();
+ doc.section("Object Size");
+ SizeTable.table(doc, new Column(""), inst != base && !base.isPlaceHolder());
+ SizeTable.row(doc, DocString.text("Shallow"), inst.getSize(), base.getSize());
+ SizeTable.row(doc, DocString.text("Retained"),
+ inst.getTotalRetainedSize(), base.getTotalRetainedSize());
+ SizeTable.end(doc);
+
printBitmap(doc, inst);
if (inst.isClassInstance()) {
printClassInstanceFields(doc, query, inst.asClassInstance());
diff --git a/tools/ahat/src/ObjectsHandler.java b/tools/ahat/src/ObjectsHandler.java
index 3062d23b53..86d48f1702 100644
--- a/tools/ahat/src/ObjectsHandler.java
+++ b/tools/ahat/src/ObjectsHandler.java
@@ -54,23 +54,18 @@ class ObjectsHandler implements AhatHandler {
doc.title("Objects");
- doc.table(
- new Column("Size", Column.Align.RIGHT),
- new Column("Δ", Column.Align.RIGHT, mSnapshot.isDiffed()),
+ SizeTable.table(doc, mSnapshot.isDiffed(),
new Column("Heap"),
new Column("Object"));
SubsetSelector<AhatInstance> selector = new SubsetSelector(query, OBJECTS_ID, insts);
for (AhatInstance inst : selector.selected()) {
AhatInstance base = inst.getBaseline();
- doc.row(
- DocString.format("%,14d", inst.getSize()),
- DocString.delta(inst.isPlaceHolder(), base.isPlaceHolder(),
- inst.getSize(), base.getSize()),
+ SizeTable.row(doc, inst.getSize(), base.getSize(),
DocString.text(inst.getHeap().getName()),
Summarizer.summarize(inst));
}
- doc.end();
+ SizeTable.end(doc);
selector.render(doc);
}
}
diff --git a/tools/ahat/src/OverviewHandler.java b/tools/ahat/src/OverviewHandler.java
index ea305c4e94..c9f84259a9 100644
--- a/tools/ahat/src/OverviewHandler.java
+++ b/tools/ahat/src/OverviewHandler.java
@@ -18,16 +18,12 @@ package com.android.ahat;
import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatSnapshot;
-import com.android.ahat.heapdump.Diffable;
+import com.android.ahat.heapdump.Size;
import java.io.File;
import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
class OverviewHandler implements AhatHandler {
- private static final String OVERVIEW_ID = "overview";
-
private AhatSnapshot mSnapshot;
private File mHprof;
private File mBaseHprof;
@@ -53,39 +49,27 @@ class OverviewHandler implements AhatHandler {
}
doc.end();
- doc.section("Heap Sizes");
- printHeapSizes(doc, query);
+ doc.section("Bytes Retained by Heap");
+ printHeapSizes(doc);
doc.big(Menu.getMenu());
}
- private static class TableElem implements Diffable<TableElem> {
- @Override public TableElem getBaseline() {
- return this;
- }
-
- @Override public boolean isPlaceHolder() {
- return false;
- }
- }
-
- private void printHeapSizes(Doc doc, Query query) {
- List<TableElem> dummy = Collections.singletonList(new TableElem());
-
- HeapTable.TableConfig<TableElem> table = new HeapTable.TableConfig<TableElem>() {
- public String getHeapsDescription() {
- return "Bytes Retained by Heap";
- }
-
- public long getSize(TableElem element, AhatHeap heap) {
- return heap.getSize();
+ private void printHeapSizes(Doc doc) {
+ SizeTable.table(doc, new Column("Heap"), mSnapshot.isDiffed());
+ Size totalSize = Size.ZERO;
+ Size totalBase = Size.ZERO;
+ for (AhatHeap heap : mSnapshot.getHeaps()) {
+ Size size = heap.getSize();
+ Size base = heap.getBaseline().getSize();
+ if (!size.isZero() || !base.isZero()) {
+ SizeTable.row(doc, DocString.text(heap.getName()), size, base);
+ totalSize = totalSize.plus(size);
+ totalBase = totalBase.plus(base);
}
-
- public List<HeapTable.ValueConfig<TableElem>> getValueConfigs() {
- return Collections.emptyList();
- }
- };
- HeapTable.render(doc, query, OVERVIEW_ID, table, mSnapshot, dummy);
+ }
+ SizeTable.row(doc, DocString.text("Total"), totalSize, totalBase);
+ SizeTable.end(doc);
}
}
diff --git a/tools/ahat/src/SiteHandler.java b/tools/ahat/src/SiteHandler.java
index febf1713fb..7a831d3018 100644
--- a/tools/ahat/src/SiteHandler.java
+++ b/tools/ahat/src/SiteHandler.java
@@ -60,7 +60,7 @@ class SiteHandler implements AhatHandler {
}
public long getSize(Site element, AhatHeap heap) {
- return element.getSize(heap);
+ return element.getSize(heap).getSize();
}
public List<HeapTable.ValueConfig<Site>> getValueConfigs() {
@@ -80,10 +80,7 @@ class SiteHandler implements AhatHandler {
}
doc.section("Objects Allocated");
-
- doc.table(
- new Column("Reachable Bytes Allocated", Column.Align.RIGHT),
- new Column("Δ", Column.Align.RIGHT, mSnapshot.isDiffed()),
+ SizeTable.table(doc, mSnapshot.isDiffed(),
new Column("Instances", Column.Align.RIGHT),
new Column("Δ", Column.Align.RIGHT, mSnapshot.isDiffed()),
new Column("Heap"),
@@ -100,9 +97,7 @@ class SiteHandler implements AhatHandler {
for (Site.ObjectsInfo info : selector.selected()) {
Site.ObjectsInfo baseinfo = info.getBaseline();
String className = info.getClassName();
- doc.row(
- DocString.format("%,14d", info.numBytes),
- DocString.delta(false, false, info.numBytes, baseinfo.numBytes),
+ SizeTable.row(doc, info.numBytes, baseinfo.numBytes,
DocString.link(
DocString.formattedUri("objects?id=%d&depth=%d&heap=%s&class=%s",
site.getId(), site.getDepth(), info.heap.getName(), className),
@@ -111,7 +106,7 @@ class SiteHandler implements AhatHandler {
DocString.text(info.heap.getName()),
Summarizer.summarize(info.classObj));
}
- doc.end();
+ SizeTable.end(doc);
selector.render(doc);
}
}
diff --git a/tools/ahat/src/SitePrinter.java b/tools/ahat/src/SitePrinter.java
index 21ca2deda4..32037f4414 100644
--- a/tools/ahat/src/SitePrinter.java
+++ b/tools/ahat/src/SitePrinter.java
@@ -38,7 +38,7 @@ class SitePrinter {
}
public long getSize(Site element, AhatHeap heap) {
- return element.getSize(heap);
+ return element.getSize(heap).getSize();
}
public List<HeapTable.ValueConfig<Site>> getValueConfigs() {
diff --git a/tools/ahat/src/SizeTable.java b/tools/ahat/src/SizeTable.java
new file mode 100644
index 0000000000..46e395669f
--- /dev/null
+++ b/tools/ahat/src/SizeTable.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+import com.android.ahat.heapdump.Size;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Class for rendering a table that includes all categories of Size.
+ * Two table formats are supported, one where a custom left column can be
+ * added before the size columns:
+ * |left column|Java Size|Native Size|...|Total Size|custom columns...|
+ *
+ * The other without the custom left column:
+ * |Java Size|Native Size|...|Total Size|custom columns...|
+ */
+class SizeTable {
+ /**
+ * Start a size table with a custom left column.
+ *
+ * |left column|Java Size|Native Size|...|Total Size|custom columns...|
+ *
+ * This should be followed by calls to the 'row' method to fill in the table
+ * contents and the 'end' method to end the table.
+ *
+ * Set showDiff to true if size diffs should be shown.
+ */
+ static void table(Doc doc, Column left, boolean showDiff, Column... columns) {
+ List<Column> cols = new ArrayList<Column>();
+ cols.add(left);
+ cols.add(new Column("Java Size", Column.Align.RIGHT));
+ cols.add(new Column("Δ", Column.Align.RIGHT, showDiff));
+ cols.add(new Column("Registered Native Size", Column.Align.RIGHT));
+ cols.add(new Column("Δ", Column.Align.RIGHT, showDiff));
+ cols.add(new Column("Total Size", Column.Align.RIGHT));
+ cols.add(new Column("Δ", Column.Align.RIGHT, showDiff));
+ cols.addAll(Arrays.asList(columns));
+ doc.table(cols.toArray(new Column[cols.size()]));
+ }
+
+ /**
+ * Add a row to the currently active size table with custom left column.
+ * The number of values must match the number of columns provided for the
+ * currently active table.
+ */
+ static void row(Doc doc, DocString left, Size size, Size base, DocString... values) {
+ List<DocString> vals = new ArrayList<DocString>();
+ vals.add(left);
+ vals.add(DocString.size(size.getJavaSize(), false));
+ vals.add(DocString.delta(false, false, size.getJavaSize(), base.getJavaSize()));
+ vals.add(DocString.size(size.getRegisteredNativeSize(), false));
+ vals.add(DocString.delta(false, false,
+ size.getRegisteredNativeSize(), base.getRegisteredNativeSize()));
+ vals.add(DocString.size(size.getSize(), false));
+ vals.add(DocString.delta(false, false, size.getSize(), base.getSize()));
+ vals.addAll(Arrays.asList(values));
+ doc.row(vals.toArray(new DocString[vals.size()]));
+ }
+
+ /**
+ * Start a size table without a custom left column.
+ *
+ * |Java Size|Native Size|...|Total Size|custom columns...|
+ * This should be followed by calls to the 'row' method to fill in the table
+ * contents and the 'end' method to end the table.
+ *
+ * Set showDiff to true if size diffs should be shown.
+ */
+ static void table(Doc doc, boolean showDiff, Column... columns) {
+ // Re-use the code for a size table with custom left column by having an
+ // invisible custom left column.
+ table(doc, new Column("", Column.Align.LEFT, false), showDiff, columns);
+ }
+
+ /**
+ * Add a row to the currently active size table without a custom left column.
+ * The number of values must match the number of columns provided for the
+ * currently active table.
+ */
+ static void row(Doc doc, Size size, Size base, DocString... values) {
+ row(doc, new DocString(), size, base, values);
+ }
+
+ /**
+ * End the currently active table.
+ */
+ static void end(Doc doc) {
+ doc.end();
+ }
+}
diff --git a/tools/ahat/src/heapdump/AhatHeap.java b/tools/ahat/src/heapdump/AhatHeap.java
index c39adc4b41..b8897a182c 100644
--- a/tools/ahat/src/heapdump/AhatHeap.java
+++ b/tools/ahat/src/heapdump/AhatHeap.java
@@ -18,7 +18,7 @@ package com.android.ahat.heapdump;
public class AhatHeap implements Diffable<AhatHeap> {
private String mName;
- private long mSize = 0;
+ private Size mSize = Size.ZERO;
private int mIndex;
private AhatHeap mBaseline;
private boolean mIsPlaceHolder = false;
@@ -47,8 +47,8 @@ public class AhatHeap implements Diffable<AhatHeap> {
return new AhatHeap(name, baseline);
}
- void addToSize(long increment) {
- mSize += increment;
+ void addToSize(Size size) {
+ mSize = mSize.plus(size);
}
/**
@@ -69,7 +69,7 @@ public class AhatHeap implements Diffable<AhatHeap> {
/**
* Returns the total number of bytes allocated on this heap.
*/
- public long getSize() {
+ public Size getSize() {
return mSize;
}
diff --git a/tools/ahat/src/heapdump/AhatInstance.java b/tools/ahat/src/heapdump/AhatInstance.java
index e6b9c00384..af369d95d8 100644
--- a/tools/ahat/src/heapdump/AhatInstance.java
+++ b/tools/ahat/src/heapdump/AhatInstance.java
@@ -20,17 +20,18 @@ import com.android.tools.perflib.heap.ClassObj;
import com.android.tools.perflib.heap.Instance;
import com.android.tools.perflib.heap.RootObj;
import java.awt.image.BufferedImage;
+import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.Deque;
import java.util.List;
public abstract class AhatInstance implements Diffable<AhatInstance> {
private long mId;
- private long mSize;
- private long mTotalRetainedSize;
- private long mRetainedSizes[]; // Retained size indexed by heap index
+ private Size mSize;
+ private Size[] mRetainedSizes; // Retained size indexed by heap index
private boolean mIsReachable;
private AhatHeap mHeap;
private AhatInstance mImmediateDominator;
@@ -63,15 +64,10 @@ public abstract class AhatInstance implements Diffable<AhatInstance> {
*/
void initialize(AhatSnapshot snapshot, Instance inst) {
mId = inst.getId();
- mSize = inst.getSize();
- mTotalRetainedSize = inst.getTotalRetainedSize();
+ mSize = new Size(inst.getSize(), 0);
mIsReachable = inst.isReachable();
List<AhatHeap> heaps = snapshot.getHeaps();
- mRetainedSizes = new long[heaps.size()];
- for (AhatHeap heap : heaps) {
- mRetainedSizes[heap.getIndex()] = inst.getRetainedSize(heap.getIndex());
- }
mHeap = snapshot.getHeap(inst.getHeap().getName());
@@ -130,7 +126,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance> {
/**
* Returns the shallow number of bytes this object takes up.
*/
- public long getSize() {
+ public Size getSize() {
return mSize;
}
@@ -138,16 +134,32 @@ public abstract class AhatInstance implements Diffable<AhatInstance> {
* Returns the number of bytes belonging to the given heap that this instance
* retains.
*/
- public long getRetainedSize(AhatHeap heap) {
+ public Size getRetainedSize(AhatHeap heap) {
int index = heap.getIndex();
- return 0 <= index && index < mRetainedSizes.length ? mRetainedSizes[heap.getIndex()] : 0;
+ if (mRetainedSizes != null && 0 <= index && index < mRetainedSizes.length) {
+ return mRetainedSizes[heap.getIndex()];
+ }
+ return Size.ZERO;
}
/**
* Returns the total number of bytes this instance retains.
*/
- public long getTotalRetainedSize() {
- return mTotalRetainedSize;
+ public Size getTotalRetainedSize() {
+ Size size = Size.ZERO;
+ if (mRetainedSizes != null) {
+ for (int i = 0; i < mRetainedSizes.length; i++) {
+ size = size.plus(mRetainedSizes[i]);
+ }
+ }
+ return size;
+ }
+
+ /**
+ * Increment the number of registered native bytes tied to this object.
+ */
+ void addRegisteredNativeSize(long size) {
+ mSize = mSize.plusRegisteredNativeSize(size);
}
/**
@@ -452,4 +464,41 @@ public abstract class AhatInstance implements Diffable<AhatInstance> {
AhatInstance newPlaceHolderInstance() {
return new AhatPlaceHolderInstance(this);
}
+
+ /**
+ * Recursively compute the retained size of the given instance and all
+ * other instances it dominates.
+ */
+ static void computeRetainedSize(AhatInstance inst, int numHeaps) {
+ // Note: We can't use a recursive implementation because it can lead to
+ // stack overflow. Use an iterative implementation instead.
+ //
+ // Objects not yet processed will have mRetainedSizes set to null.
+ // Once prepared, an object will have mRetaiedSizes set to an array of 0
+ // sizes.
+ Deque<AhatInstance> deque = new ArrayDeque<AhatInstance>();
+ deque.push(inst);
+
+ while (!deque.isEmpty()) {
+ inst = deque.pop();
+ if (inst.mRetainedSizes == null) {
+ inst.mRetainedSizes = new Size[numHeaps];
+ for (int i = 0; i < numHeaps; i++) {
+ inst.mRetainedSizes[i] = Size.ZERO;
+ }
+ inst.mRetainedSizes[inst.mHeap.getIndex()] =
+ inst.mRetainedSizes[inst.mHeap.getIndex()].plus(inst.mSize);
+ deque.push(inst);
+ for (AhatInstance dominated : inst.mDominated) {
+ deque.push(dominated);
+ }
+ } else {
+ for (AhatInstance dominated : inst.mDominated) {
+ for (int i = 0; i < numHeaps; i++) {
+ inst.mRetainedSizes[i] = inst.mRetainedSizes[i].plus(dominated.mRetainedSizes[i]);
+ }
+ }
+ }
+ }
+ }
}
diff --git a/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java b/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java
index c6ad87fda5..2b3e056a1e 100644
--- a/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java
+++ b/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java
@@ -29,16 +29,16 @@ public class AhatPlaceHolderClassObj extends AhatClassObj {
baseline.setBaseline(this);
}
- @Override public long getSize() {
- return 0;
+ @Override public Size getSize() {
+ return Size.ZERO;
}
- @Override public long getRetainedSize(AhatHeap heap) {
- return 0;
+ @Override public Size getRetainedSize(AhatHeap heap) {
+ return Size.ZERO;
}
- @Override public long getTotalRetainedSize() {
- return 0;
+ @Override public Size getTotalRetainedSize() {
+ return Size.ZERO;
}
@Override public AhatHeap getHeap() {
diff --git a/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java b/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java
index 9412eae9a1..4aac80484d 100644
--- a/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java
+++ b/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java
@@ -29,16 +29,16 @@ public class AhatPlaceHolderInstance extends AhatInstance {
baseline.setBaseline(this);
}
- @Override public long getSize() {
- return 0;
+ @Override public Size getSize() {
+ return Size.ZERO;
}
- @Override public long getRetainedSize(AhatHeap heap) {
- return 0;
+ @Override public Size getRetainedSize(AhatHeap heap) {
+ return Size.ZERO;
}
- @Override public long getTotalRetainedSize() {
- return 0;
+ @Override public Size getTotalRetainedSize() {
+ return Size.ZERO;
}
@Override public AhatHeap getHeap() {
diff --git a/tools/ahat/src/heapdump/AhatSnapshot.java b/tools/ahat/src/heapdump/AhatSnapshot.java
index 20b85da763..35d6c8a315 100644
--- a/tools/ahat/src/heapdump/AhatSnapshot.java
+++ b/tools/ahat/src/heapdump/AhatSnapshot.java
@@ -82,8 +82,7 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
Snapshot snapshot = Snapshot.createSnapshot(buffer, map);
snapshot.computeDominators();
- // Properly label the class of class objects in the perflib snapshot, and
- // count the total number of instances.
+ // Properly label the class of class objects in the perflib snapshot.
final ClassObj javaLangClass = snapshot.findClass("java.lang.Class");
if (javaLangClass != null) {
for (Heap heap : snapshot.getHeaps()) {
@@ -134,12 +133,19 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
}
});
+ Map<Instance, Long> registeredNative = Perflib.getRegisteredNativeAllocations(snapshot);
+
// Initialize ahat snapshot and instances based on the perflib snapshot
// and instances.
for (AhatInstance ahat : mInstances) {
Instance inst = snapshot.findInstance(ahat.getId());
ahat.initialize(this, inst);
+ Long registeredNativeSize = registeredNative.get(inst);
+ if (registeredNativeSize != null) {
+ ahat.addRegisteredNativeSize(registeredNativeSize);
+ }
+
if (inst.getImmediateDominator() == Snapshot.SENTINEL_ROOT) {
mRooted.add(ahat);
}
@@ -166,6 +172,13 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
}
}
snapshot.dispose();
+
+ // Compute the retained sizes of objects. We do this explicitly now rather
+ // than relying on the retained sizes computed by perflib so that
+ // registered native sizes are included.
+ for (AhatInstance inst : mRooted) {
+ AhatInstance.computeRetainedSize(inst, mHeaps.size());
+ }
}
/**
diff --git a/tools/ahat/src/heapdump/Perflib.java b/tools/ahat/src/heapdump/Perflib.java
new file mode 100644
index 0000000000..d0264a3b39
--- /dev/null
+++ b/tools/ahat/src/heapdump/Perflib.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.heapdump;
+
+import com.android.tools.perflib.heap.ClassInstance;
+import com.android.tools.perflib.heap.ClassObj;
+import com.android.tools.perflib.heap.Instance;
+import com.android.tools.perflib.heap.Snapshot;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Collection of utilities that may be suitable to have in perflib instead of
+ * ahat.
+ */
+public class Perflib {
+ /**
+ * Return a collection of instances in the given snapshot that are tied to
+ * registered native allocations and their corresponding registered native
+ * sizes.
+ */
+ public static Map<Instance, Long> getRegisteredNativeAllocations(Snapshot snapshot) {
+ Map<Instance, Long> allocs = new HashMap<Instance, Long>();
+ ClassObj cleanerClass = snapshot.findClass("sun.misc.Cleaner");
+ if (cleanerClass != null) {
+ for (Instance cleanerInst : cleanerClass.getInstancesList()) {
+ ClassInstance cleaner = (ClassInstance)cleanerInst;
+ Object referent = getField(cleaner, "referent");
+ if (referent instanceof Instance) {
+ Instance inst = (Instance)referent;
+ Object thunkValue = getField(cleaner, "thunk");
+ if (thunkValue instanceof ClassInstance) {
+ ClassInstance thunk = (ClassInstance)thunkValue;
+ ClassObj thunkClass = thunk.getClassObj();
+ String cleanerThunkClassName = "libcore.util.NativeAllocationRegistry$CleanerThunk";
+ if (thunkClass != null && cleanerThunkClassName.equals(thunkClass.getClassName())) {
+ for (ClassInstance.FieldValue thunkField : thunk.getValues()) {
+ if (thunkField.getValue() instanceof ClassInstance) {
+ ClassInstance registry = (ClassInstance)thunkField.getValue();
+ ClassObj registryClass = registry.getClassObj();
+ String registryClassName = "libcore.util.NativeAllocationRegistry";
+ if (registryClass != null
+ && registryClassName.equals(registryClass.getClassName())) {
+ Object sizeValue = getField(registry, "size");
+ if (sizeValue instanceof Long) {
+ long size = (Long)sizeValue;
+ if (size > 0) {
+ Long old = allocs.get(inst);
+ allocs.put(inst, old == null ? size : old + size);
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return allocs;
+ }
+
+ /**
+ * Helper function to read a single field from a perflib class instance.
+ * Returns null if field not found. Note there is no way to distinguish
+ * between field not found an a field value of null.
+ */
+ private static Object getField(ClassInstance cls, String name) {
+ for (ClassInstance.FieldValue field : cls.getValues()) {
+ if (name.equals(field.getField().getName())) {
+ return field.getValue();
+ }
+ }
+ return null;
+ }
+}
diff --git a/tools/ahat/src/heapdump/Site.java b/tools/ahat/src/heapdump/Site.java
index 738eaf0687..fdd4eea7b3 100644
--- a/tools/ahat/src/heapdump/Site.java
+++ b/tools/ahat/src/heapdump/Site.java
@@ -44,7 +44,7 @@ public class Site implements Diffable<Site> {
// The total size of objects allocated in this site (including child sites),
// organized by heap index. Heap indices outside the range of mSizesByHeap
// implicitly have size 0.
- private long[] mSizesByHeap;
+ private Size[] mSizesByHeap;
// List of child sites.
private List<Site> mChildren;
@@ -60,14 +60,18 @@ public class Site implements Diffable<Site> {
public AhatHeap heap;
public AhatClassObj classObj; // May be null.
public long numInstances;
- public long numBytes;
+ public Size numBytes;
private ObjectsInfo baseline;
- public ObjectsInfo(AhatHeap heap, AhatClassObj classObj, long numInstances, long numBytes) {
+ /**
+ * Construct a new, empty objects info for the given heap and class
+ * combination.
+ */
+ public ObjectsInfo(AhatHeap heap, AhatClassObj classObj) {
this.heap = heap;
this.classObj = classObj;
- this.numInstances = numInstances;
- this.numBytes = numBytes;
+ this.numInstances = 0;
+ this.numBytes = Size.ZERO;
this.baseline = this;
}
@@ -107,7 +111,7 @@ public class Site implements Diffable<Site> {
mLineNumber = line;
mId = id;
mDepth = depth;
- mSizesByHeap = new long[1];
+ mSizesByHeap = new Size[0];
mChildren = new ArrayList<Site>();
mObjects = new ArrayList<AhatInstance>();
mObjectsInfos = new ArrayList<ObjectsInfo>();
@@ -133,16 +137,20 @@ public class Site implements Diffable<Site> {
if (inst.isReachable()) {
AhatHeap heap = inst.getHeap();
if (heap.getIndex() >= site.mSizesByHeap.length) {
- long[] newSizes = new long[heap.getIndex() + 1];
+ Size[] newSizes = new Size[heap.getIndex() + 1];
for (int i = 0; i < site.mSizesByHeap.length; i++) {
newSizes[i] = site.mSizesByHeap[i];
}
+ for (int i = site.mSizesByHeap.length; i < heap.getIndex() + 1; i++) {
+ newSizes[i] = Size.ZERO;
+ }
site.mSizesByHeap = newSizes;
}
- site.mSizesByHeap[heap.getIndex()] += inst.getSize();
+ site.mSizesByHeap[heap.getIndex()]
+ = site.mSizesByHeap[heap.getIndex()].plus(inst.getSize());
info.numInstances++;
- info.numBytes += inst.getSize();
+ info.numBytes = info.numBytes.plus(inst.getSize());
}
if (depth > 0) {
@@ -172,9 +180,9 @@ public class Site implements Diffable<Site> {
}
// Get the size of a site for a specific heap.
- public long getSize(AhatHeap heap) {
+ public Size getSize(AhatHeap heap) {
int index = heap.getIndex();
- return index >= 0 && index < mSizesByHeap.length ? mSizesByHeap[index] : 0;
+ return index >= 0 && index < mSizesByHeap.length ? mSizesByHeap[index] : Size.ZERO;
}
/**
@@ -198,7 +206,7 @@ public class Site implements Diffable<Site> {
ObjectsInfo info = classToObjectsInfo.get(classObj);
if (info == null) {
- info = new ObjectsInfo(heap, classObj, 0, 0);
+ info = new ObjectsInfo(heap, classObj);
mObjectsInfos.add(info);
classToObjectsInfo.put(classObj, info);
}
@@ -210,10 +218,10 @@ public class Site implements Diffable<Site> {
}
// Get the combined size of the site for all heaps.
- public long getTotalSize() {
- long total = 0;
+ public Size getTotalSize() {
+ Size total = Size.ZERO;
for (int i = 0; i < mSizesByHeap.length; i++) {
- total += mSizesByHeap[i];
+ total = total.plus(mSizesByHeap[i]);
}
return total;
}
diff --git a/tools/ahat/src/heapdump/Size.java b/tools/ahat/src/heapdump/Size.java
new file mode 100644
index 0000000000..7c8db900df
--- /dev/null
+++ b/tools/ahat/src/heapdump/Size.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.heapdump;
+
+/**
+ * The Size class is used to represent how much space an instance takes up.
+ *
+ * An abstraction is introduced rather than using a long directly in order to
+ * more easily keep track of the different components of the size. For
+ * example, some instances may have associated native, code, or graphics
+ * sizes.
+ *
+ * Size objects are immutable.
+ */
+public class Size {
+ private final long mJavaSize;
+ private final long mRegisteredNativeSize;
+
+ public static Size ZERO = new Size(0, 0);
+
+ public Size(long javaSize, long registeredNativeSize) {
+ mJavaSize = javaSize;
+ mRegisteredNativeSize = registeredNativeSize;
+ }
+
+ public long getSize() {
+ return mJavaSize + mRegisteredNativeSize;
+ }
+
+ public long getJavaSize() {
+ return mJavaSize;
+ }
+
+ public long getRegisteredNativeSize() {
+ return mRegisteredNativeSize;
+ }
+
+ /**
+ * Returns true if all the fields of this size object are zero.
+ */
+ public boolean isZero() {
+ return mJavaSize == 0 && mRegisteredNativeSize == 0;
+ }
+
+ /**
+ * Return a new Size object that is the sum of this size and the other.
+ */
+ public Size plus(Size other) {
+ if (isZero()) {
+ return other;
+ } else if (other.isZero()) {
+ return this;
+ } else {
+ return new Size(mJavaSize + other.mJavaSize,
+ mRegisteredNativeSize + other.mRegisteredNativeSize);
+ }
+ }
+
+ /**
+ * Return a new Size object that has 'size' more registered native size than
+ * this Size object.
+ */
+ public Size plusRegisteredNativeSize(long size) {
+ return new Size(mJavaSize, mRegisteredNativeSize + size);
+ }
+
+ @Override public boolean equals(Object other) {
+ if (other instanceof Size) {
+ Size s = (Size)other;
+ return mJavaSize == s.mJavaSize && mRegisteredNativeSize == s.mRegisteredNativeSize;
+ }
+ return false;
+ }
+}
+
diff --git a/tools/ahat/src/heapdump/Sort.java b/tools/ahat/src/heapdump/Sort.java
index 93d147a49e..0745803817 100644
--- a/tools/ahat/src/heapdump/Sort.java
+++ b/tools/ahat/src/heapdump/Sort.java
@@ -32,6 +32,17 @@ import java.util.List;
*/
public class Sort {
/**
+ * Compare sizes by their total size.
+ * This sorts sizes from smaller total size to larger total size.
+ */
+ public static final Comparator<Size> SIZE_BY_SIZE = new Comparator<Size>() {
+ @Override
+ public int compare(Size a, Size b) {
+ return Long.compare(a.getSize(), b.getSize());
+ }
+ };
+
+ /**
* Compare instances by their total retained size.
* Different instances with the same total retained size are considered
* equal for the purposes of comparison.
@@ -41,7 +52,7 @@ public class Sort {
= new Comparator<AhatInstance>() {
@Override
public int compare(AhatInstance a, AhatInstance b) {
- return Long.compare(b.getTotalRetainedSize(), a.getTotalRetainedSize());
+ return SIZE_BY_SIZE.compare(b.getTotalRetainedSize(), a.getTotalRetainedSize());
}
};
@@ -60,7 +71,7 @@ public class Sort {
@Override
public int compare(AhatInstance a, AhatInstance b) {
- return Long.compare(b.getRetainedSize(mHeap), a.getRetainedSize(mHeap));
+ return SIZE_BY_SIZE.compare(b.getRetainedSize(mHeap), a.getRetainedSize(mHeap));
}
}
@@ -119,7 +130,7 @@ public class Sort {
@Override
public int compare(Site a, Site b) {
- return Long.compare(b.getSize(mHeap), a.getSize(mHeap));
+ return SIZE_BY_SIZE.compare(b.getSize(mHeap), a.getSize(mHeap));
}
}
@@ -130,7 +141,7 @@ public class Sort {
public static final Comparator<Site> SITE_BY_TOTAL_SIZE = new Comparator<Site>() {
@Override
public int compare(Site a, Site b) {
- return Long.compare(b.getTotalSize(), a.getTotalSize());
+ return SIZE_BY_SIZE.compare(b.getTotalSize(), a.getTotalSize());
}
};
@@ -158,7 +169,7 @@ public class Sort {
= new Comparator<Site.ObjectsInfo>() {
@Override
public int compare(Site.ObjectsInfo a, Site.ObjectsInfo b) {
- return Long.compare(b.numBytes, a.numBytes);
+ return SIZE_BY_SIZE.compare(b.numBytes, a.numBytes);
}
};
diff --git a/tools/ahat/src/manifest.txt b/tools/ahat/src/manifest.txt
index 20245f312d..c35ccf1cd3 100644
--- a/tools/ahat/src/manifest.txt
+++ b/tools/ahat/src/manifest.txt
@@ -1,4 +1,4 @@
Name: ahat/
Implementation-Title: ahat
-Implementation-Version: 1.1
+Implementation-Version: 1.2
Main-Class: com.android.ahat.Main
diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/test-dump/Main.java
index 7a05b1cb89..3d3de78255 100644
--- a/tools/ahat/test-dump/Main.java
+++ b/tools/ahat/test-dump/Main.java
@@ -20,6 +20,7 @@ import java.lang.ref.PhantomReference;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.SoftReference;
import java.lang.ref.WeakReference;
+import libcore.util.NativeAllocationRegistry;
import org.apache.harmony.dalvik.ddmc.DdmVmInternal;
/**
@@ -98,6 +99,11 @@ public class Main {
bigArray[i] = (byte)((i*i) & 0xFF);
}
+ // 0x12345, 50000, and 0xABCDABCD are arbitrary values.
+ NativeAllocationRegistry registry = new NativeAllocationRegistry(
+ Main.class.getClassLoader(), 0x12345, 50000);
+ registry.registerNativeAllocation(anObject, 0xABCDABCD);
+
addedObject = baseline ? null : new AddedObject();
removedObject = baseline ? new RemovedObject() : null;
modifiedObject = new ModifiedObject();
diff --git a/tools/ahat/test/InstanceTest.java b/tools/ahat/test/InstanceTest.java
index 3a50150c0e..71b081c9a4 100644
--- a/tools/ahat/test/InstanceTest.java
+++ b/tools/ahat/test/InstanceTest.java
@@ -21,6 +21,7 @@ import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatInstance;
import com.android.ahat.heapdump.AhatSnapshot;
import com.android.ahat.heapdump.PathElement;
+import com.android.ahat.heapdump.Size;
import com.android.ahat.heapdump.Value;
import com.android.tools.perflib.heap.hprof.HprofClassDump;
import com.android.tools.perflib.heap.hprof.HprofConstant;
@@ -292,13 +293,13 @@ public class InstanceTest {
// allocated on, and should be 0 for all other heaps.
AhatInstance anObject = dump.getDumpedAhatInstance("anObject");
AhatSnapshot snapshot = dump.getAhatSnapshot();
- long size = anObject.getSize();
+ Size size = anObject.getSize();
assertEquals(size, anObject.getTotalRetainedSize());
assertEquals(size, anObject.getRetainedSize(anObject.getHeap()));
for (AhatHeap heap : snapshot.getHeaps()) {
if (!heap.equals(anObject.getHeap())) {
assertEquals(String.format("For heap '%s'", heap.getName()),
- 0, anObject.getRetainedSize(heap));
+ Size.ZERO, anObject.getRetainedSize(heap));
}
}
}
diff --git a/tools/ahat/test/NativeAllocationTest.java b/tools/ahat/test/NativeAllocationTest.java
new file mode 100644
index 0000000000..7436be8311
--- /dev/null
+++ b/tools/ahat/test/NativeAllocationTest.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+import com.android.ahat.heapdump.AhatInstance;
+import com.android.ahat.heapdump.AhatSnapshot;
+import java.io.IOException;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class NativeAllocationTest {
+
+ @Test
+ public void nativeAllocation() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+
+ AhatSnapshot snapshot = dump.getAhatSnapshot();
+ AhatInstance referent = dump.getDumpedAhatInstance("anObject");
+ assertEquals(50000, referent.getSize().getRegisteredNativeSize());
+ }
+}
+
diff --git a/tools/ahat/test/Tests.java b/tools/ahat/test/Tests.java
index 2fd3286172..c7e9b1811b 100644
--- a/tools/ahat/test/Tests.java
+++ b/tools/ahat/test/Tests.java
@@ -24,6 +24,7 @@ public class Tests {
args = new String[]{
"com.android.ahat.DiffTest",
"com.android.ahat.InstanceTest",
+ "com.android.ahat.NativeAllocationTest",
"com.android.ahat.ObjectHandlerTest",
"com.android.ahat.OverviewHandlerTest",
"com.android.ahat.PerformanceTest",