diff options
21 files changed, 659 insertions, 307 deletions
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc index d4359458d3..fda6376454 100644 --- a/libdexfile/dex/dex_file_verifier.cc +++ b/libdexfile/dex/dex_file_verifier.cc @@ -3056,7 +3056,7 @@ void DexFileVerifier::FindStringRangesForMethodNames() { return reinterpret_cast<const char*>(str_data_ptr); }; auto compare = [&get_string](const DexFile::StringId& lhs, const char* rhs) { - return strcmp(get_string(lhs), rhs) < 0; + return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(get_string(lhs), rhs) < 0; }; // '=' follows '<' diff --git a/libdexfile/dex/dex_file_verifier_test.cc b/libdexfile/dex/dex_file_verifier_test.cc index c9bac0fef2..65448cabd1 100644 --- a/libdexfile/dex/dex_file_verifier_test.cc +++ b/libdexfile/dex/dex_file_verifier_test.cc @@ -176,6 +176,21 @@ TEST_F(DexFileVerifierTest, MethodId) { "Bad index for method flags verification"); } +TEST_F(DexFileVerifierTest, InitCachingWithUnicode) { + static const char kInitWithUnicode[] = + "ZGV4CjAzNQDhN60rgMnSK13MoRscTuD+NZe7f6rIkHAAAgAAcAAAAHhWNBIAAAAAAAAAAGwBAAAJ" + "AAAAcAAAAAMAAACUAAAAAQAAAKAAAAAAAAAAAAAAAAIAAACsAAAAAQAAALwAAAAkAQAA3AAAANwA" + "AADgAAAA5gAAAO4AAAD1AAAAAQEAABUBAAAgAQAAIwEAAAQAAAAFAAAABwAAAAcAAAACAAAAAAAA" + "AAAAAAACAAAAAQAAAAIAAAAAAAAAAAAAAAEAAAAAAAAABgAAAAAAAABgAQAAAAAAAAHAgAACwIDA" + "gAAGPGluaXQ+AAVIZWxsbwAKTFRlc3RTeW5jOwASTGphdmEvbGFuZy9PYmplY3Q7AAlNYWluLmph" + "dmEAAVYABVdvcmxkAAAAAAAAAAYABw4AAAAACgABAAEAAAAwAQAADAAAAHAQAQAJABoBAwAaAggA" + "GgMAABoEAQAOAAAAAQAAgIAEuAIAAAwAAAAAAAAAAQAAAAAAAAABAAAACQAAAHAAAAACAAAAAwAA" + "AJQAAAADAAAAAQAAAKAAAAAFAAAAAgAAAKwAAAAGAAAAAQAAALwAAAACIAAACQAAANwAAAADEAAA" + "AQAAACwBAAADIAAAAQAAADABAAABIAAAAQAAADgBAAAAIAAAAQAAAGABAAAAEAAAAQAAAGwBAAA="; + // Just ensure it verifies w/o modification. + VerifyModification(kInitWithUnicode, "init_with_unicode", [](DexFile*) {}, nullptr); +} + // Method flags test class generated from the following smali code. The declared-synchronized // flags are there to enforce a 3-byte uLEB128 encoding so we don't have to relayout // the code, but we need to remove them before doing tests. diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc index b444cc789f..2f24d7ea3d 100644 --- a/openjdkjvmti/deopt_manager.cc +++ b/openjdkjvmti/deopt_manager.cc @@ -40,9 +40,7 @@ #include "dex/dex_file_annotations.h" #include "dex/modifiers.h" #include "events-inl.h" -#include "intrinsics_list.h" #include "jit/jit.h" -#include "jit/jit_code_cache.h" #include "jni/jni_internal.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" @@ -86,13 +84,11 @@ DeoptManager::DeoptManager() deoptimization_condition_("JVMTI_DeoptimizationCondition", deoptimization_status_lock_), performing_deoptimization_(false), global_deopt_count_(0), - global_interpreter_deopt_count_(0), deopter_count_(0), breakpoint_status_lock_("JVMTI_BreakpointStatusLock", static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1)), inspection_callback_(this), - set_local_variable_called_(false), - already_disabled_intrinsics_(false) { } + set_local_variable_called_(false) { } void DeoptManager::Setup() { art::ScopedThreadStateChange stsc(art::Thread::Current(), @@ -163,18 +159,18 @@ bool DeoptManager::MethodHasBreakpointsLocked(art::ArtMethod* method) { return elem != breakpoint_status_.end() && elem->second != 0; } -void DeoptManager::RemoveDeoptimizeAllMethods(FullDeoptRequirement req) { +void DeoptManager::RemoveDeoptimizeAllMethods() { art::Thread* self = art::Thread::Current(); art::ScopedThreadSuspension sts(self, art::kSuspended); deoptimization_status_lock_.ExclusiveLock(self); - RemoveDeoptimizeAllMethodsLocked(self, req); + RemoveDeoptimizeAllMethodsLocked(self); } -void DeoptManager::AddDeoptimizeAllMethods(FullDeoptRequirement req) { +void DeoptManager::AddDeoptimizeAllMethods() { art::Thread* self = art::Thread::Current(); art::ScopedThreadSuspension sts(self, art::kSuspended); deoptimization_status_lock_.ExclusiveLock(self); - AddDeoptimizeAllMethodsLocked(self, req); + AddDeoptimizeAllMethodsLocked(self); } void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) { @@ -211,7 +207,7 @@ void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) { deoptimization_status_lock_.ExclusiveUnlock(self); return; } else if (is_default) { - AddDeoptimizeAllMethodsLocked(self, FullDeoptRequirement::kInterpreter); + AddDeoptimizeAllMethodsLocked(self); } else { PerformLimitedDeoptimization(self, method); } @@ -248,7 +244,7 @@ void DeoptManager::RemoveMethodBreakpoint(art::ArtMethod* method) { return; } else if (is_last_breakpoint) { if (UNLIKELY(is_default)) { - RemoveDeoptimizeAllMethodsLocked(self, FullDeoptRequirement::kInterpreter); + RemoveDeoptimizeAllMethodsLocked(self); } else { PerformLimitedUndeoptimization(self, method); } @@ -276,22 +272,13 @@ class ScopedDeoptimizationContext : public art::ValueObject { RELEASE(deopt->deoptimization_status_lock_) ACQUIRE(art::Locks::mutator_lock_) ACQUIRE(art::Roles::uninterruptible_) - : self_(self), - deopt_(deopt), - uninterruptible_cause_(nullptr), - jit_(art::Runtime::Current()->GetJit()) { + : self_(self), deopt_(deopt), uninterruptible_cause_(nullptr) { deopt_->WaitForDeoptimizationToFinishLocked(self_); DCHECK(!deopt->performing_deoptimization_) << "Already performing deoptimization on another thread!"; // Use performing_deoptimization_ to keep track of the lock. deopt_->performing_deoptimization_ = true; deopt_->deoptimization_status_lock_.Unlock(self_); - // Stop the jit. We might need to disable all intrinsics which needs the jit disabled and this - // is the only place we can do that. Since this isn't expected to be entered too often it should - // be fine to always stop it. - if (jit_ != nullptr) { - jit_->Stop(); - } art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods", /*long_suspend*/ false); uninterruptible_cause_ = self_->StartAssertNoThreadSuspension("JVMTI deoptimizing methods"); @@ -304,10 +291,6 @@ class ScopedDeoptimizationContext : public art::ValueObject { self_->EndAssertNoThreadSuspension(uninterruptible_cause_); // Release the mutator lock. art::Runtime::Current()->GetThreadList()->ResumeAll(); - // Let the jit start again. - if (jit_ != nullptr) { - jit_->Start(); - } // Let other threads know it's fine to proceed. art::MutexLock lk(self_, deopt_->deoptimization_status_lock_); deopt_->performing_deoptimization_ = false; @@ -318,44 +301,22 @@ class ScopedDeoptimizationContext : public art::ValueObject { art::Thread* self_; DeoptManager* deopt_; const char* uninterruptible_cause_; - art::jit::Jit* jit_; }; -void DeoptManager::AddDeoptimizeAllMethodsLocked(art::Thread* self, FullDeoptRequirement req) { - DCHECK_GE(global_deopt_count_, global_interpreter_deopt_count_); +void DeoptManager::AddDeoptimizeAllMethodsLocked(art::Thread* self) { global_deopt_count_++; - if (req == FullDeoptRequirement::kInterpreter) { - global_interpreter_deopt_count_++; - } if (global_deopt_count_ == 1) { - PerformGlobalDeoptimization(self, - /*needs_interpreter*/ global_interpreter_deopt_count_ > 0, - /*disable_intrinsics*/ global_interpreter_deopt_count_ == 0); - } else if (req == FullDeoptRequirement::kInterpreter && global_interpreter_deopt_count_ == 1) { - // First kInterpreter request. - PerformGlobalDeoptimization(self, - /*needs_interpreter*/true, - /*disable_intrinsics*/false); + PerformGlobalDeoptimization(self); } else { WaitForDeoptimizationToFinish(self); } } -void DeoptManager::RemoveDeoptimizeAllMethodsLocked(art::Thread* self, FullDeoptRequirement req) { +void DeoptManager::RemoveDeoptimizeAllMethodsLocked(art::Thread* self) { DCHECK_GT(global_deopt_count_, 0u) << "Request to remove non-existent global deoptimization!"; - DCHECK_GE(global_deopt_count_, global_interpreter_deopt_count_); global_deopt_count_--; - if (req == FullDeoptRequirement::kInterpreter) { - global_interpreter_deopt_count_--; - } if (global_deopt_count_ == 0) { - PerformGlobalUndeoptimization(self, - /*still_needs_stubs*/ false, - /*disable_intrinsics*/ false); - } else if (req == FullDeoptRequirement::kInterpreter && global_interpreter_deopt_count_ == 0) { - PerformGlobalUndeoptimization(self, - /*still_needs_stubs*/ global_deopt_count_ > 0, - /*disable_intrinsics*/ global_deopt_count_ > 0); + PerformGlobalUndeoptimization(self); } else { WaitForDeoptimizationToFinish(self); } @@ -371,85 +332,18 @@ void DeoptManager::PerformLimitedUndeoptimization(art::Thread* self, art::ArtMet art::Runtime::Current()->GetInstrumentation()->Undeoptimize(method); } -void DeoptManager::PerformGlobalDeoptimization(art::Thread* self, - bool needs_interpreter, - bool disable_intrinsics) { +void DeoptManager::PerformGlobalDeoptimization(art::Thread* self) { ScopedDeoptimizationContext sdc(self, this); - art::Runtime::Current()->GetInstrumentation()->EnableMethodTracing( - kDeoptManagerInstrumentationKey, needs_interpreter); - MaybeDisableIntrinsics(disable_intrinsics); + art::Runtime::Current()->GetInstrumentation()->DeoptimizeEverything( + kDeoptManagerInstrumentationKey); } -void DeoptManager::PerformGlobalUndeoptimization(art::Thread* self, - bool still_needs_stubs, - bool disable_intrinsics) { +void DeoptManager::PerformGlobalUndeoptimization(art::Thread* self) { ScopedDeoptimizationContext sdc(self, this); - if (still_needs_stubs) { - art::Runtime::Current()->GetInstrumentation()->EnableMethodTracing( - kDeoptManagerInstrumentationKey, /*needs_interpreter*/false); - MaybeDisableIntrinsics(disable_intrinsics); - } else { - art::Runtime::Current()->GetInstrumentation()->DisableMethodTracing( - kDeoptManagerInstrumentationKey); - // We shouldn't care about intrinsics if we don't need tracing anymore. - DCHECK(!disable_intrinsics); - } -} - -static void DisableSingleIntrinsic(const char* class_name, - const char* method_name, - const char* signature) - REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_) { - // Since these intrinsics are all loaded during runtime startup this cannot fail and will not - // suspend. - art::Thread* self = art::Thread::Current(); - art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker(); - art::ObjPtr<art::mirror::Class> cls = class_linker->FindSystemClass(self, class_name); - - if (cls == nullptr) { - LOG(FATAL) << "Could not find class of intrinsic " - << class_name << "->" << method_name << signature; - } - - art::ArtMethod* method = cls->FindClassMethod(method_name, signature, art::kRuntimePointerSize); - if (method == nullptr || method->GetDeclaringClass() != cls) { - LOG(FATAL) << "Could not find method of intrinsic " - << class_name << "->" << method_name << signature; - } - - if (LIKELY(method->IsIntrinsic())) { - method->SetNotIntrinsic(); - } else { - LOG(WARNING) << method->PrettyMethod() << " was already marked as non-intrinsic!"; - } + art::Runtime::Current()->GetInstrumentation()->UndeoptimizeEverything( + kDeoptManagerInstrumentationKey); } -void DeoptManager::MaybeDisableIntrinsics(bool do_disable) { - if (!do_disable || already_disabled_intrinsics_) { - // Don't toggle intrinsics on and off. It will lead to too much purging of the jit and would - // require us to keep around the intrinsics status of all methods. - return; - } - already_disabled_intrinsics_ = true; - // First just mark all intrinsic methods as no longer intrinsics. -#define DISABLE_INTRINSIC(_1, _2, _3, _4, _5, decl_class_name, meth_name, meth_desc) \ - DisableSingleIntrinsic(decl_class_name, meth_name, meth_desc); - INTRINSICS_LIST(DISABLE_INTRINSIC); -#undef DISABLE_INTRINSIC - // Next tell the jit to throw away all of its code (since there might be intrinsic code in them. - // TODO it would be nice to be more selective. - art::jit::Jit* jit = art::Runtime::Current()->GetJit(); - if (jit != nullptr) { - jit->GetCodeCache()->ClearAllCompiledDexCode(); - } - art::MutexLock mu(art::Thread::Current(), *art::Locks::thread_list_lock_); - // Now make all threads go to interpreter. - art::Runtime::Current()->GetThreadList()->ForEach( - [](art::Thread* thr, void* ctx) REQUIRES(art::Locks::mutator_lock_) { - reinterpret_cast<DeoptManager*>(ctx)->DeoptimizeThread(thr); - }, - this); -} void DeoptManager::RemoveDeoptimizationRequester() { art::Thread* self = art::Thread::Current(); diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h index 1a13c0ff15..6e991dee3d 100644 --- a/openjdkjvmti/deopt_manager.h +++ b/openjdkjvmti/deopt_manager.h @@ -51,11 +51,6 @@ class Class; namespace openjdkjvmti { -enum class FullDeoptRequirement { - kStubs, - kInterpreter, -}; - class DeoptManager; struct JvmtiMethodInspectionCallback : public art::MethodInspectionCallback { @@ -99,11 +94,11 @@ class DeoptManager { REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_) REQUIRES_SHARED(art::Locks::mutator_lock_); - void AddDeoptimizeAllMethods(FullDeoptRequirement requirement) + void AddDeoptimizeAllMethods() REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_) REQUIRES_SHARED(art::Locks::mutator_lock_); - void RemoveDeoptimizeAllMethods(FullDeoptRequirement requirement) + void RemoveDeoptimizeAllMethods() REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_) REQUIRES_SHARED(art::Locks::mutator_lock_); @@ -137,23 +132,19 @@ class DeoptManager { void WaitForDeoptimizationToFinishLocked(art::Thread* self) REQUIRES(deoptimization_status_lock_, !art::Locks::mutator_lock_); - void AddDeoptimizeAllMethodsLocked(art::Thread* self, FullDeoptRequirement req) + void AddDeoptimizeAllMethodsLocked(art::Thread* self) RELEASE(deoptimization_status_lock_) REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_); - void RemoveDeoptimizeAllMethodsLocked(art::Thread* self, FullDeoptRequirement req) + void RemoveDeoptimizeAllMethodsLocked(art::Thread* self) RELEASE(deoptimization_status_lock_) REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_); - void PerformGlobalDeoptimization(art::Thread* self, - bool needs_interpreter, - bool disable_intrinsics) + void PerformGlobalDeoptimization(art::Thread* self) RELEASE(deoptimization_status_lock_) REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_); - void PerformGlobalUndeoptimization(art::Thread* self, - bool still_needs_stubs, - bool disable_intrinsics) + void PerformGlobalUndeoptimization(art::Thread* self) RELEASE(deoptimization_status_lock_) REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_); @@ -165,25 +156,15 @@ class DeoptManager { RELEASE(deoptimization_status_lock_) REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_); - // Disables intrinsics and purges the jit code cache if needed. - void MaybeDisableIntrinsics(bool do_disable) - REQUIRES(art::Locks::mutator_lock_, - !deoptimization_status_lock_, - art::Roles::uninterruptible_); - static constexpr const char* kDeoptManagerInstrumentationKey = "JVMTI_DeoptManager"; art::Mutex deoptimization_status_lock_ ACQUIRED_BEFORE(art::Locks::classlinker_classes_lock_); art::ConditionVariable deoptimization_condition_ GUARDED_BY(deoptimization_status_lock_); bool performing_deoptimization_ GUARDED_BY(deoptimization_status_lock_); - // Number of times we have gotten requests to deopt everything both requiring and not requiring - // interpreter. + // Number of times we have gotten requests to deopt everything. uint32_t global_deopt_count_ GUARDED_BY(deoptimization_status_lock_); - // Number of deopt-everything requests that require interpreter. - uint32_t global_interpreter_deopt_count_ GUARDED_BY(deoptimization_status_lock_); - // Number of users of deoptimization there currently are. uint32_t deopter_count_ GUARDED_BY(deoptimization_status_lock_); @@ -201,10 +182,6 @@ class DeoptManager { // OSR after this. std::atomic<bool> set_local_variable_called_; - // If we have already disabled intrinsics. Since doing this throws out all JIT code we really will - // only ever do it once and never undo it. - bool already_disabled_intrinsics_ GUARDED_BY(art::Locks::mutator_lock_); - // Helper for setting up/tearing-down for deoptimization. friend class ScopedDeoptimizationContext; }; diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc index 10a4923935..f71a5dc72d 100644 --- a/openjdkjvmti/events.cc +++ b/openjdkjvmti/events.cc @@ -888,29 +888,16 @@ static bool EventNeedsFullDeopt(ArtJvmtiEvent event) { case ArtJvmtiEvent::kBreakpoint: case ArtJvmtiEvent::kException: return false; - default: - return true; - } -} - -static FullDeoptRequirement GetFullDeoptRequirement(ArtJvmtiEvent event) { - switch (event) { - // TODO We should support more of these as Limited or at least do something to make them - // discriminate by thread. + // TODO We should support more of these or at least do something to make them discriminate by + // thread. case ArtJvmtiEvent::kMethodEntry: - case ArtJvmtiEvent::kMethodExit: - // We only need MethodEntered and MethodExited for these so we can use Stubs. We will need to - // disable intrinsics. - // TODO Offer a version of this without disabling intrinsics. - return FullDeoptRequirement::kStubs; case ArtJvmtiEvent::kExceptionCatch: + case ArtJvmtiEvent::kMethodExit: case ArtJvmtiEvent::kFieldModification: case ArtJvmtiEvent::kFieldAccess: case ArtJvmtiEvent::kSingleStep: - // NB If we ever make this runnable using stubs or some other method we will need to be careful - // that it doesn't require disabling intrinsics. case ArtJvmtiEvent::kFramePop: - return FullDeoptRequirement::kInterpreter; + return true; default: LOG(FATAL) << "Unexpected event type!"; UNREACHABLE(); @@ -920,18 +907,19 @@ static FullDeoptRequirement GetFullDeoptRequirement(ArtJvmtiEvent event) { void EventHandler::SetupTraceListener(JvmtiMethodTraceListener* listener, ArtJvmtiEvent event, bool enable) { + bool needs_full_deopt = EventNeedsFullDeopt(event); // Make sure we can deopt. { art::ScopedObjectAccess soa(art::Thread::Current()); DeoptManager* deopt_manager = DeoptManager::Get(); if (enable) { deopt_manager->AddDeoptimizationRequester(); - if (EventNeedsFullDeopt(event)) { - deopt_manager->AddDeoptimizeAllMethods(GetFullDeoptRequirement(event)); + if (needs_full_deopt) { + deopt_manager->AddDeoptimizeAllMethods(); } } else { - if (EventNeedsFullDeopt(event)) { - deopt_manager->RemoveDeoptimizeAllMethods(GetFullDeoptRequirement(event)); + if (needs_full_deopt) { + deopt_manager->RemoveDeoptimizeAllMethods(); } deopt_manager->RemoveDeoptimizationRequester(); } diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index 86f06064ad..ed449b5433 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -768,20 +768,13 @@ void Jit::WaitForCompilationToFinish(Thread* self) { void Jit::Stop() { Thread* self = Thread::Current(); // TODO(ngeoffray): change API to not require calling WaitForCompilationToFinish twice. - // During shutdown and startup the thread-pool can be null. - if (GetThreadPool() == nullptr) { - return; - } WaitForCompilationToFinish(self); GetThreadPool()->StopWorkers(self); WaitForCompilationToFinish(self); } void Jit::Start() { - // During shutdown and startup the thread-pool can be null. - if (GetThreadPool() != nullptr) { - GetThreadPool()->StartWorkers(Thread::Current()); - } + GetThreadPool()->StartWorkers(Thread::Current()); } ScopedJitSuspend::ScopedJitSuspend() { diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 86582ef804..eeb35156b5 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -362,23 +362,10 @@ const void* JitCodeCache::GetJniStubCode(ArtMethod* method) { return nullptr; } -void JitCodeCache::ClearAllCompiledDexCode() { - MutexLock mu(Thread::Current(), lock_); - // Get rid of OSR code waiting to be put on a thread. - osr_code_map_.clear(); - - // We don't clear out or even touch method_code_map_ since that is what we use to go the other - // way, move from code currently-running to the method it's from. Getting rid of it would break - // the jit-gc, stack-walking and signal handling. Since we never look through it to go the other - // way (from method -> code) everything is fine. - - for (ProfilingInfo* p : profiling_infos_) { - p->SetSavedEntryPoint(nullptr); - } -} - const void* JitCodeCache::FindCompiledCodeForInstrumentation(ArtMethod* method) { - if (LIKELY(!GetGarbageCollectCode())) { + // If jit-gc is still on we use the SavedEntryPoint field for doing that and so cannot use it to + // find the instrumentation entrypoint. + if (LIKELY(GetGarbageCollectCode())) { return nullptr; } ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index ee6111a430..49a19a18f1 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -215,8 +215,6 @@ class JitCodeCache { REQUIRES(!lock_) REQUIRES_SHARED(Locks::mutator_lock_); - void ClearAllCompiledDexCode() REQUIRES(!lock_, Locks::mutator_lock_); - void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array) REQUIRES(!lock_) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/trace.cc b/runtime/trace.cc index 292cac6d0a..1986eec353 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -433,7 +433,12 @@ void Trace::Start(std::unique_ptr<File>&& trace_file_in, instrumentation::Instrumentation::kMethodExited | instrumentation::Instrumentation::kMethodUnwind); // TODO: In full-PIC mode, we don't need to fully deopt. - runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey); + // TODO: We can only use trampoline entrypoints if we are java-debuggable since in that case + // we know that inlining and other problematic optimizations are disabled. We might just + // want to use the trampolines anyway since it is faster. It makes the story with disabling + // jit-gc more complex though. + runtime->GetInstrumentation()->EnableMethodTracing( + kTracerInstrumentationKey, /*needs_interpreter*/!runtime->IsJavaDebuggable()); } } } diff --git a/test/knownfailures.json b/test/knownfailures.json index 9ba2b50cba..2b7a9b064f 100644 --- a/test/knownfailures.json +++ b/test/knownfailures.json @@ -193,6 +193,22 @@ "bug": "http://b/34369284" }, { + "tests": ["018-stack-overflow", + "107-int-math2", + "667-jit-jni-stub"], + "description": ["We run for long enough for jit to compile some of the methods ", + "which means it takes so long to finish the test that it will ", + "timeout. The timeout is due to having to walk the stack ", + "when entering and exiting methods due to the way the instrumentation ", + "trampoline is implemented."], + "variant": "debuggable & jit & trace | debuggable & jit & stream" + }, + { + "tests": "1935-get-set-current-frame-jit", + "description": ["Test expects that OSR works but tracing turns this feature off."], + "variant": "trace | stream" + }, + { "tests": "1940-ddms-ext", "description": ["Test expects to be able to start tracing but we cannot", "do that if tracing is already ongoing."], @@ -201,7 +217,7 @@ { "tests": "137-cfi", "description": ["This test unrolls and expects managed frames, but", - "tracing means we run the interpreter."], + "tracing means we run the interpreter or trampolines."], "variant": "trace | stream" }, { diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk index 2741a9247d..8c748172bb 100644 --- a/tools/ahat/Android.mk +++ b/tools/ahat/Android.mk @@ -129,6 +129,26 @@ $(AHAT_TEST_DUMP_BASE_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIE ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \ $(PRIVATE_AHAT_TEST_ART) -d --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base +# --- ahat-ri-test-dump.jar ------- +include $(CLEAR_VARS) +LOCAL_MODULE := ahat-ri-test-dump +LOCAL_MODULE_TAGS := tests +LOCAL_SRC_FILES := $(call all-java-files-under, src/ri-test-dump) +LOCAL_IS_HOST_MODULE := true +include $(BUILD_HOST_JAVA_LIBRARY) + +# Determine the location of the ri-test-dump.jar and ri-test-dump.hprof. +# These use variables set implicitly by the include of BUILD_JAVA_LIBRARY +# above. +AHAT_RI_TEST_DUMP_JAR := $(LOCAL_BUILT_MODULE) +AHAT_RI_TEST_DUMP_HPROF := $(intermediates.COMMON)/ri-test-dump.hprof + +# Run ahat-ri-test-dump.jar to generate ri-test-dump.hprof +$(AHAT_RI_TEST_DUMP_HPROF): PRIVATE_AHAT_RI_TEST_DUMP_JAR := $(AHAT_RI_TEST_DUMP_JAR) +$(AHAT_RI_TEST_DUMP_HPROF): $(AHAT_RI_TEST_DUMP_JAR) + rm -rf $@ + java -cp $(PRIVATE_AHAT_RI_TEST_DUMP_JAR) Main $@ + # --- ahat-tests.jar -------------- include $(CLEAR_VARS) LOCAL_SRC_FILES := $(call all-java-files-under, src/test) @@ -137,6 +157,7 @@ LOCAL_JAVA_RESOURCE_FILES := \ $(AHAT_TEST_DUMP_HPROF) \ $(AHAT_TEST_DUMP_BASE_HPROF) \ $(AHAT_TEST_DUMP_PROGUARD_MAP) \ + $(AHAT_RI_TEST_DUMP_HPROF) \ $(LOCAL_PATH)/etc/L.hprof \ $(LOCAL_PATH)/etc/O.hprof \ $(LOCAL_PATH)/etc/RI.hprof diff --git a/tools/ahat/etc/hprofdump.py b/tools/ahat/etc/hprofdump.py new file mode 100644 index 0000000000..1c4f21b159 --- /dev/null +++ b/tools/ahat/etc/hprofdump.py @@ -0,0 +1,331 @@ +# Copyright (C) 2018 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# usage: python hprofdump.py FILE +# Dumps a binary heap dump file to text, to facilitate debugging of heap +# dumps and heap dump viewers. + +import time +import struct +import sys + +filename = sys.argv[1] +hprof = open(filename, "rb") + +def readu1(hprof): + return struct.unpack('!B', hprof.read(1))[0] + +def readu2(hprof): + return struct.unpack('!H', hprof.read(2))[0] + +def readu4(hprof): + return struct.unpack('!I', hprof.read(4))[0] + +def readu8(hprof): + return struct.unpack('!Q', hprof.read(8))[0] + +def readN(n, hprof): + if n == 1: + return readu1(hprof) + if n == 2: + return readu2(hprof) + if n == 4: + return readu4(hprof) + if n == 8: + return readu8(hprof) + raise Exception("Unsupported size of readN: %d" % n) + +TY_OBJECT = 2 +TY_BOOLEAN = 4 +TY_CHAR = 5 +TY_FLOAT = 6 +TY_DOUBLE = 7 +TY_BYTE = 8 +TY_SHORT = 9 +TY_INT = 10 +TY_LONG = 11 + +def showty(ty): + if ty == TY_OBJECT: + return "Object" + if ty == TY_BOOLEAN: + return "boolean" + if ty == TY_CHAR: + return "char" + if ty == TY_FLOAT: + return "float" + if ty == TY_DOUBLE: + return "double" + if ty == TY_BYTE: + return "byte" + if ty == TY_SHORT: + return "short" + if ty == TY_INT: + return "int" + if ty == TY_LONG: + return "long" + raise Exception("Unsupported type %d" % ty) + +strs = { } +def showstr(id): + if id in strs: + return strs[id] + return "STR[@%x]" % id + +loaded = { } +def showloaded(serial): + if serial in loaded: + return showstr(loaded[serial]) + return "SERIAL[@%x]" % serial + +classobjs = { } +def showclassobj(id): + if id in classobjs: + return "%s @%x" % (showstr(classobjs[id]), id) + return "@%x" % id + + +# [u1]* An initial NULL terminate series of bytes representing the format name +# and version. +version = "" +c = hprof.read(1) +while (c != '\0'): + version += c + c = hprof.read(1) +print "Version: %s" % version + +# [u4] size of identifiers. +idsize = readu4(hprof) +print "ID Size: %d bytes" % idsize +def readID(hprof): + return readN(idsize, hprof) + +def valsize(ty): + if ty == TY_OBJECT: + return idsize + if ty == TY_BOOLEAN: + return 1 + if ty == TY_CHAR: + return 2 + if ty == TY_FLOAT: + return 4 + if ty == TY_DOUBLE: + return 8 + if ty == TY_BYTE: + return 1 + if ty == TY_SHORT: + return 2 + if ty == TY_INT: + return 4 + if ty == TY_LONG: + return 8 + raise Exception("Unsupported type %d" % ty) + +def readval(ty, hprof): + return readN(valsize(ty), hprof) + +# [u4] high word of number of ms since 0:00 GMT, 1/1/70 +# [u4] low word of number of ms since 0:00 GMT, 1/1/70 +timestamp = (readu4(hprof) << 32) | readu4(hprof) +s, ms = divmod(timestamp, 1000) +print "Date: %s.%03d" % (time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(s)), ms) + +while hprof.read(1): + hprof.seek(-1,1) + pos = hprof.tell() + tag = readu1(hprof) + time = readu4(hprof) + length = readu4(hprof) + if tag == 0x01: + id = readID(hprof) + string = hprof.read(length - idsize) + print "%d: STRING %x %s" % (pos, id, repr(string)) + strs[id] = string + elif tag == 0x02: + serial = readu4(hprof) + classobj = readID(hprof) + stack = readu4(hprof) + classname = readID(hprof) + loaded[serial] = classname + classobjs[classobj] = classname + print "LOAD CLASS #%d %s @%x stack=@%x" % (serial, showstr(classname), classobj, stack) + elif tag == 0x04: + id = readID(hprof) + method = readID(hprof) + sig = readID(hprof) + file = readID(hprof) + serial = readu4(hprof) + line = readu4(hprof); + print "STACK FRAME %d '%s' '%s' '%s' line=%d classserial=%d" % (id, showstr(method), showstr(sig), showstr(file), line, serial) + elif tag == 0x05: + serial = readu4(hprof) + print "STACK TRACE %d" % serial + thread = readu4(hprof) + frames = readu4(hprof) + hprof.read(idsize * frames) + elif tag == 0x06: + print "ALLOC SITES" + flags = readu2(hprof) + cutoff_ratio = readu4(hprof) + live_bytes = readu4(hprof) + live_insts = readu4(hprof) + alloc_bytes = readu8(hprof) + alloc_insts = readu8(hprof) + numsites = readu4(hprof) + while numsites > 0: + indicator = readu1(hprof) + class_serial = readu4(hprof) + stack = readu4(hprof) + live_bytes = readu4(hprof) + live_insts = readu4(hprof) + alloc_bytes = readu4(hprof) + alloc_insts = readu4(hprof) + numsites -= 1 + elif tag == 0x0A: + thread = readu4(hprof) + object = readID(hprof) + stack = readu4(hprof) + name = readID(hprof) + group_name = readID(hprof) + pgroup_name = readID(hprof) + print "START THREAD serial=%d" % thread + elif tag == 0x0B: + thread = readu4(hprof) + print "END THREAD" + elif tag == 0x0C or tag == 0x1C: + if tag == 0x0C: + print "HEAP DUMP" + else: + print "HEAP DUMP SEGMENT" + + while (length > 0): + subtag = readu1(hprof) ; length -= 1 + if subtag == 0xFF: + print " ROOT UNKNOWN" + objid = readID(hprof) ; length -= idsize + elif subtag == 0x01: + print " ROOT JNI GLOBAL" + objid = readID(hprof) ; length -= idsize + ref = readID(hprof) ; length -= idsize + elif subtag == 0x02: + print " ROOT JNI LOCAL" + objid = readID(hprof) ; length -= idsize + thread = readu4(hprof) ; length -= 4 + frame = readu4(hprof) ; length -= 4 + elif subtag == 0x03: + print " ROOT JAVA FRAME" + objid = readID(hprof) ; length -= idsize + serial = readu4(hprof) ; length -= 4 + frame = readu4(hprof) ; length -= 4 + elif subtag == 0x04: + objid = readID(hprof) ; length -= idsize + serial = readu4(hprof) ; length -= 4 + print " ROOT NATIVE STACK serial=%d" % serial + elif subtag == 0x05: + print " ROOT STICKY CLASS" + objid = readID(hprof) ; length -= idsize + elif subtag == 0x06: + print " ROOT THREAD BLOCK" + objid = readID(hprof) ; length -= idsize + thread = readu4(hprof) ; length -= 4 + elif subtag == 0x07: + print " ROOT MONITOR USED" + objid = readID(hprof) ; length -= idsize + elif subtag == 0x08: + threadid = readID(hprof) ; length -= idsize + serial = readu4(hprof) ; length -= 4 + stack = readu4(hprof) ; length -= 4 + print " ROOT THREAD OBJECT threadid=@%x serial=%d" % (threadid, serial) + elif subtag == 0x20: + print " CLASS DUMP" + print " class class object ID: %s" % showclassobj(readID(hprof)) ; length -= idsize + print " stack trace serial number: #%d" % readu4(hprof) ; length -= 4 + print " super class object ID: @%x" % readID(hprof) ; length -= idsize + print " class loader object ID: @%x" % readID(hprof) ; length -= idsize + print " signers object ID: @%x" % readID(hprof) ; length -= idsize + print " protection domain object ID: @%x" % readID(hprof) ; length -= idsize + print " reserved: @%x" % readID(hprof) ; length -= idsize + print " reserved: @%x" % readID(hprof) ; length -= idsize + print " instance size (in bytes): %d" % readu4(hprof) ; length -= 4 + print " constant pool:" + poolsize = readu2(hprof) ; length -= 2 + while poolsize > 0: + poolsize -= 1 + idx = readu2(hprof) ; length -= 2 + ty = readu1(hprof) ; length -= 1 + val = readval(ty, hprof) ; length -= valsize(ty) + print " %d %s 0x%x" % (idx, showty(ty), val) + numstatic = readu2(hprof) ; length -= 2 + print " static fields:" + while numstatic > 0: + numstatic -= 1 + nameid = readID(hprof) ; length -= idsize + ty = readu1(hprof) ; length -= 1 + val = readval(ty, hprof) ; length -= valsize(ty) + print " %s %s 0x%x" % (showstr(nameid), showty(ty), val) + numinst = readu2(hprof) ; length -= 2 + print " instance fields:" + while numinst > 0: + numinst -= 1 + nameid = readID(hprof) ; length -= idsize + ty = readu1(hprof) ; length -= 1 + print " %s %s" % (showstr(nameid), showty(ty)) + elif subtag == 0x21: + print " INSTANCE DUMP:" + print " object ID: @%x" % readID(hprof) ; length -= idsize + stack = readu4(hprof) ; length -= 4 + print " stack: %s" % stack + print " class object ID: %s" % showclassobj(readID(hprof)) ; length -= idsize + datalen = readu4(hprof) ; length -= 4 + print " %d bytes of instance data" % datalen + data = hprof.read(datalen) ; length -= datalen + elif subtag == 0x22: + print " OBJECT ARRAY DUMP:" + print " array object ID: @%x" % readID(hprof) ; length -= idsize + stack = readu4(hprof) ; length -= 4 + print " stack: %s" % stack + count = readu4(hprof) ; length -= 4 + print " array class object ID: %s" % showclassobj(readID(hprof)) ; length -= idsize + hprof.read(idsize * count) ; length -= (idsize * count) + elif subtag == 0x23: + print " PRIMITIVE ARRAY DUMP:" + print " array object ID: @%x" % readID(hprof) ; length -= idsize + stack = readu4(hprof) ; length -= 4 + count = readu4(hprof) ; length -= 4 + ty = readu1(hprof) ; length -= 1 + hprof.read(valsize(ty)*count) ; length -= (valsize(ty)*count) + elif subtag == 0x89: + print " HPROF_ROOT_INTERNED_STRING" + objid = readID(hprof) ; length -= idsize + elif subtag == 0x8b: + objid = readID(hprof) ; length -= idsize + print " HPROF ROOT DEBUGGER @%x (at offset %d)" % (objid, hprof.tell() - (idsize + 1)) + elif subtag == 0x8d: + objid = readID(hprof) ; length -= idsize + print " HPROF ROOT VM INTERNAL @%x" % objid + elif subtag == 0xfe: + hty = readu4(hprof) ; length -= 4 + hnameid = readID(hprof) ; length -= idsize + print " HPROF_HEAP_DUMP_INFO %s" % showstr(hnameid) + else: + raise Exception("TODO: subtag %x" % subtag) + elif tag == 0x0E: + flags = readu4(hprof) + depth = readu2(hprof) + print "CONTROL SETTINGS %x %d" % (flags, depth) + elif tag == 0x2C: + print "HEAP DUMP END" + else: + raise Exception("TODO: TAG %x" % tag) + diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java index cf48d6d459..6c215ae808 100644 --- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java @@ -39,9 +39,11 @@ public class AhatArrayInstance extends AhatInstance { private List<Value> mValues; private byte[] mByteArray; // null if not a byte array. private char[] mCharArray; // null if not a char array. + private final int mRefSize; - AhatArrayInstance(long id) { + AhatArrayInstance(long id, int refSize) { super(id); + mRefSize = refSize; } /** @@ -188,7 +190,7 @@ public class AhatArrayInstance extends AhatInstance { return 0; } - return Value.getType(mValues.get(0)).size * getLength(); + return Value.getType(mValues.get(0)).size(mRefSize) * getLength(); } /** diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java b/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java index 7bb19a244b..b0329245c7 100644 --- a/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java @@ -19,6 +19,7 @@ package com.android.ahat.heapdump; import java.util.Comparator; import java.util.Iterator; import java.util.List; +import java.util.function.Predicate; /** * A collection of instances that can be searched for by id. @@ -42,6 +43,25 @@ class Instances<T extends AhatInstance> implements Iterable<T> { return Long.compare(a.getId(), b.getId()); } }); + + // Ensure there is a one-to-one mapping between ids and instances by + // removing instances that have the same id as a previous instance. The + // heap dump really ought not to include multiple instances with the same + // id, but this happens on some older versions of ART and in some versions + // of the RI. + Predicate<T> isDuplicate = new Predicate<T>() { + private long previous = -1; + + @Override + public boolean test(T x) { + if (x.getId() == previous) { + return true; + } + previous = x.getId(); + return false; + } + }; + mInstances.removeIf(isDuplicate); } /** diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java index 597a260628..c18d8b120c 100644 --- a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java @@ -50,12 +50,9 @@ import java.util.Map; * </ul> * <li> All classes are defined via a LOAD CLASS record before the first * heap dump segment. - * <li> The ID size used in the heap dump is 4 bytes. * </ul> */ public class Parser { - private static final int ID_SIZE = 4; - private HprofBuffer hprof = null; private ProguardMap map = new ProguardMap(); private Progress progress = new NullProgress(); @@ -152,6 +149,7 @@ public class Parser { private AhatSnapshot parseInternal() throws IOException, HprofFormatException { // Read, and mostly ignore, the hprof header info. + int idSize; { StringBuilder format = new StringBuilder(); int b; @@ -159,8 +157,10 @@ public class Parser { format.append((char)b); } - int idSize = hprof.getU4(); - if (idSize != ID_SIZE) { + idSize = hprof.getU4(); + if (idSize == 8) { + hprof.setIdSize8(); + } else if (idSize != 4) { throw new HprofFormatException("Id size " + idSize + " not supported."); } int hightime = hprof.getU4(); @@ -177,8 +177,10 @@ public class Parser { HeapList heaps = new HeapList(); { // Note: Strings do not satisfy the DenseMap requirements on heap dumps - // from Android K. + // from Android K. And the RI seems to use string id 0 to refer to a + // null string? UnDenseMap<String> strings = new UnDenseMap<String>("String"); + strings.put(0, "???"); DenseMap<ProguardMap.Frame> frames = new DenseMap<ProguardMap.Frame>("Stack Frame"); DenseMap<Site> sites = new DenseMap<Site>("Stack Trace"); DenseMap<String> classNamesBySerial = new DenseMap<String>("Class Serial Number"); @@ -196,7 +198,7 @@ public class Parser { switch (tag) { case 0x01: { // STRING long id = hprof.getId(); - byte[] bytes = new byte[recordLength - ID_SIZE]; + byte[] bytes = new byte[recordLength - idSize]; hprof.getBytes(bytes); String str = new String(bytes, StandardCharsets.UTF_8); strings.put(id, str); @@ -208,7 +210,8 @@ public class Parser { long objectId = hprof.getId(); int stackSerialNumber = hprof.getU4(); long classNameStringId = hprof.getId(); - String obfClassName = strings.get(classNameStringId); + String rawClassName = strings.get(classNameStringId); + String obfClassName = normalizeClassName(rawClassName); String clrClassName = map.getClassName(obfClassName); AhatClassObj classObj = new AhatClassObj(objectId, clrClassName); classNamesBySerial.put(classSerialNumber, clrClassName); @@ -339,7 +342,7 @@ public class Parser { for (int i = 0; i < constantPoolSize; ++i) { int index = hprof.getU2(); Type type = hprof.getType(); - hprof.skip(type.size); + hprof.skip(type.size(idSize)); } int numStaticFields = hprof.getU2(); data.staticFields = new FieldValue[numStaticFields]; @@ -351,7 +354,7 @@ public class Parser { String clrName = map.getFieldName(clrClassName, obfName); Type type = hprof.getType(); Value value = hprof.getDeferredValue(type); - staticFieldsSize += type.size; + staticFieldsSize += type.size(idSize); data.staticFields[i] = new FieldValue(clrName, type, value); } AhatClassObj superClass = classById.get(superClassId); @@ -395,11 +398,11 @@ public class Parser { int length = hprof.getU4(); long classId = hprof.getId(); ObjArrayData data = new ObjArrayData(length, hprof.tell()); - hprof.skip(length * ID_SIZE); + hprof.skip(length * idSize); Site site = sites.get(stackSerialNumber); AhatClassObj classObj = classById.get(classId); - AhatArrayInstance obj = new AhatArrayInstance(objectId); + AhatArrayInstance obj = new AhatArrayInstance(objectId, idSize); obj.initialize(heaps.getCurrentHeap(), site, classObj); obj.setTemporaryUserData(data); instances.add(obj); @@ -419,7 +422,7 @@ public class Parser { "No class definition found for " + type.name + "[]"); } - AhatArrayInstance obj = new AhatArrayInstance(objectId); + AhatArrayInstance obj = new AhatArrayInstance(objectId, idSize); obj.initialize(heaps.getCurrentHeap(), site, classObj); instances.add(obj); switch (type) { @@ -890,7 +893,8 @@ public class Parser { * accessing data from an hprof file. */ private static class HprofBuffer { - private ByteBuffer mBuffer; + private boolean mIdSize8; + private final ByteBuffer mBuffer; public HprofBuffer(File path) throws IOException { FileChannel channel = FileChannel.open(path.toPath(), StandardOpenOption.READ); @@ -902,6 +906,10 @@ public class Parser { mBuffer = buffer; } + public void setIdSize8() { + mIdSize8 = true; + } + public boolean hasRemaining() { return mBuffer.hasRemaining(); } @@ -948,7 +956,11 @@ public class Parser { } public long getId() { - return mBuffer.getInt() & 0xFFFFFFFFL; + if (mIdSize8) { + return mBuffer.getLong(); + } else { + return mBuffer.getInt() & 0xFFFFFFFFL; + } } public boolean getBool() { @@ -1049,4 +1061,44 @@ public class Parser { } } } + + // ART outputs class names such as: + // "java.lang.Class", "java.lang.Class[]", "byte", "byte[]" + // RI outputs class names such as: + // "java/lang/Class", '[Ljava/lang/Class;", N/A, "[B" + // + // This function converts all class names to match the ART format, which is + // assumed elsewhere in ahat. + private static String normalizeClassName(String name) throws HprofFormatException { + int numDimensions = 0; + while (name.startsWith("[")) { + numDimensions++; + name = name.substring(1); + } + + if (numDimensions > 0) { + // If there was an array type signature to start, then interpret the + // class name as a type signature. + switch (name.charAt(0)) { + case 'Z': name = "boolean"; break; + case 'B': name = "byte"; break; + case 'C': name = "char"; break; + case 'S': name = "short"; break; + case 'I': name = "int"; break; + case 'J': name = "long"; break; + case 'F': name = "float"; break; + case 'D': name = "double"; break; + case 'L': name = name.substring(1, name.length() - 1); break; + default: throw new HprofFormatException("Invalid type signature in class name: " + name); + } + } + + name = name.replace('/', '.'); + + for (int i = 0; i < numDimensions; ++i) { + name += "[]"; + } + + return name; + } } diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Type.java b/tools/ahat/src/main/com/android/ahat/heapdump/Type.java index ff79864505..61f4fabfa5 100644 --- a/tools/ahat/src/main/com/android/ahat/heapdump/Type.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Type.java @@ -23,7 +23,7 @@ public enum Type { /** * Type used for any Java object. */ - OBJECT("Object", 4), + OBJECT("Object", 0), // size is 0 to indicate it depends on the size of references /** * The primitive boolean type. @@ -73,7 +73,16 @@ public enum Type { /** * The number of bytes taken up by values of this type in the Java heap. */ - final int size; + private final int size; + + /** + * Get the number of bytes taken up by values of this type in the Java heap. + * + * @param refSize the size of object references as specified in the heap dump + */ + int size(int refSize) { + return size == 0 ? refSize : size; + } Type(String name, int size) { this.name = name; diff --git a/tools/ahat/src/ri-test-dump/Main.java b/tools/ahat/src/ri-test-dump/Main.java new file mode 100644 index 0000000000..0f5f4801f6 --- /dev/null +++ b/tools/ahat/src/ri-test-dump/Main.java @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.sun.management.HotSpotDiagnosticMXBean; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import javax.management.MBeanServer; + +/** + * Program used to create an RI heap dump for test purposes. + */ +public class Main { + + public static void main(String[] args) throws IOException { + if (args.length < 1) { + System.err.println("no output file specified"); + return; + } + String file = args[0]; + + // Take a heap dump of this process. + MBeanServer server = ManagementFactory.getPlatformMBeanServer(); + HotSpotDiagnosticMXBean bean = ManagementFactory.newPlatformMXBeanProxy(server, + "com.sun.management:type=HotSpotDiagnostic", HotSpotDiagnosticMXBean.class); + bean.dumpHeap(file, false); + } +} diff --git a/tools/ahat/src/test/com/android/ahat/AhatTestSuite.java b/tools/ahat/src/test/com/android/ahat/AhatTestSuite.java index bce1f053be..3aa52b5bb8 100644 --- a/tools/ahat/src/test/com/android/ahat/AhatTestSuite.java +++ b/tools/ahat/src/test/com/android/ahat/AhatTestSuite.java @@ -34,6 +34,7 @@ import org.junit.runners.Suite; ProguardMapTest.class, RootedHandlerTest.class, QueryTest.class, + RiTest.class, SiteHandlerTest.class, SiteTest.class }) diff --git a/tools/ahat/src/test/com/android/ahat/RiTest.java b/tools/ahat/src/test/com/android/ahat/RiTest.java new file mode 100644 index 0000000000..d46cafc1b4 --- /dev/null +++ b/tools/ahat/src/test/com/android/ahat/RiTest.java @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.android.ahat; + +import java.io.IOException; +import org.junit.Test; + +public class RiTest { + @Test + public void loadRi() throws IOException { + // Verify we can load a heap dump generated from the RI. + TestDump.getTestDump("ri-test-dump.hprof", null, null); + } +} + diff --git a/tools/dexanalyze/dexanalyze_bytecode.cc b/tools/dexanalyze/dexanalyze_bytecode.cc index 7e9f177bcf..d18b0dfdbd 100644 --- a/tools/dexanalyze/dexanalyze_bytecode.cc +++ b/tools/dexanalyze/dexanalyze_bytecode.cc @@ -67,13 +67,12 @@ void NewRegisterInstructions::ProcessDexFiles( std::map<size_t, TypeLinkage> types; std::set<const void*> visited; for (ClassAccessor accessor : dex_file->GetClasses()) { - InstructionBuilder inst_builder(types, - /*count_types*/ true, - /*dump*/ false, - experiments_, - instruction_freq_); for (const ClassAccessor::Method& method : accessor.GetMethods()) { - inst_builder.Process(*dex_file, method.GetInstructionsAndData(), accessor.GetClassIdx()); + ProcessCodeItem(*dex_file, + method.GetInstructionsAndData(), + accessor.GetClassIdx(), + /*count_types*/ true, + types); } } // Reorder to get an index for each map instead of a count. @@ -85,11 +84,6 @@ void NewRegisterInstructions::ProcessDexFiles( } // Visit classes and convert code items. for (ClassAccessor accessor : dex_file->GetClasses()) { - InstructionBuilder inst_builder(types, - /*count_types*/ false, - dump_, - experiments_, - instruction_freq_); for (const ClassAccessor::Method& method : accessor.GetMethods()) { if (method.GetCodeItem() == nullptr || !visited.insert(method.GetCodeItem()).second) { continue; @@ -99,8 +93,13 @@ void NewRegisterInstructions::ProcessDexFiles( << "Processing " << dex_file->PrettyMethod(method.GetIndex(), true); } CodeItemDataAccessor data = method.GetInstructionsAndData(); - inst_builder.Process(*dex_file, data, accessor.GetClassIdx()); - std::vector<uint8_t> buffer = std::move(inst_builder.buffer_); + ProcessCodeItem(*dex_file, + data, + accessor.GetClassIdx(), + /*count_types*/ false, + types); + std::vector<uint8_t> buffer = std::move(buffer_); + buffer_.clear(); const size_t buffer_size = buffer.size(); dex_code_bytes_ += data.InsnsSizeInBytes(); output_size_ += buffer_size; @@ -114,8 +113,6 @@ void NewRegisterInstructions::ProcessDexFiles( deduped_size_ += buffer_size; } } - missing_field_idx_count_ += inst_builder.missing_field_idx_count_; - missing_method_idx_count_ += inst_builder.missing_method_idx_count_; } } } @@ -153,25 +150,16 @@ void NewRegisterInstructions::Dump(std::ostream& os, uint64_t total_size) const << Percent(top_instructions_savings, total_size) << "\n"; } -InstructionBuilder::InstructionBuilder(std::map<size_t, TypeLinkage>& types, - bool count_types, - bool dump, - uint64_t experiments, - std::map<std::vector<uint8_t>, size_t>& instruction_freq) - : types_(types), - count_types_(count_types), - dump_(dump), - experiments_(experiments), - instruction_freq_(instruction_freq) {} - -void InstructionBuilder::Process(const DexFile& dex_file, - const CodeItemDataAccessor& code_item, - dex::TypeIndex current_class_type) { - TypeLinkage& current_type = types_[current_class_type.index_]; +void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file, + const CodeItemDataAccessor& code_item, + dex::TypeIndex current_class_type, + bool count_types, + std::map<size_t, TypeLinkage>& types) { + TypeLinkage& current_type = types[current_class_type.index_]; bool skip_next = false; size_t last_start = 0u; for (auto inst = code_item.begin(); ; ++inst) { - if (!count_types_ && last_start != buffer_.size()) { + if (!count_types && last_start != buffer_.size()) { // Register the instruction blob. ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + last_start, buffer_.end())]; last_start = buffer_.size(); @@ -223,21 +211,21 @@ void InstructionBuilder::Process(const DexFile& dex_file, if (Enabled(kExperimentInstanceFieldSelf) && first_arg_reg == receiver && holder_type == current_class_type) { - if (count_types_) { + if (count_types) { ++current_type.fields_.FindOrAdd(dex_field_idx)->second; } else { - uint32_t field_idx = types_[holder_type.index_].fields_.Get(dex_field_idx); + uint32_t field_idx = types[holder_type.index_].fields_.Get(dex_field_idx); ExtendPrefix(&out_reg, &field_idx); CHECK(InstNibbles(new_opcode, {out_reg, field_idx})); continue; } } else if (Enabled(kExperimentInstanceField)) { - if (count_types_) { + if (count_types) { ++current_type.types_.FindOrAdd(holder_type.index_)->second; - ++types_[holder_type.index_].fields_.FindOrAdd(dex_field_idx)->second; + ++types[holder_type.index_].fields_.FindOrAdd(dex_field_idx)->second; } else { uint32_t type_idx = current_type.types_.Get(holder_type.index_); - uint32_t field_idx = types_[holder_type.index_].fields_.Get(dex_field_idx); + uint32_t field_idx = types[holder_type.index_].fields_.Get(dex_field_idx); ExtendPrefix(&type_idx, &field_idx); CHECK(InstNibbles(new_opcode, {out_reg, receiver, type_idx, field_idx})); continue; @@ -252,7 +240,7 @@ void InstructionBuilder::Process(const DexFile& dex_file, uint32_t out_reg = is_jumbo ? inst->VRegA_31c() : inst->VRegA_21c(); if (Enabled(kExperimentString)) { new_opcode = Instruction::CONST_STRING; - if (count_types_) { + if (count_types) { ++current_type.strings_.FindOrAdd(str_idx)->second; } else { uint32_t idx = current_type.strings_.Get(str_idx); @@ -283,22 +271,22 @@ void InstructionBuilder::Process(const DexFile& dex_file, dex::TypeIndex holder_type = dex_file.GetFieldId(dex_field_idx).class_idx_; if (Enabled(kExperimentStaticField)) { if (holder_type == current_class_type) { - if (count_types_) { - ++types_[holder_type.index_].fields_.FindOrAdd(dex_field_idx)->second; + if (count_types) { + ++types[holder_type.index_].fields_.FindOrAdd(dex_field_idx)->second; } else { - uint32_t field_idx = types_[holder_type.index_].fields_.Get(dex_field_idx); + uint32_t field_idx = types[holder_type.index_].fields_.Get(dex_field_idx); ExtendPrefix(&out_reg, &field_idx); if (InstNibbles(new_opcode, {out_reg, field_idx})) { continue; } } } else { - if (count_types_) { - ++types_[current_class_type.index_].types_.FindOrAdd(holder_type.index_)->second; - ++types_[holder_type.index_].fields_.FindOrAdd(dex_field_idx)->second; + if (count_types) { + ++types[current_class_type.index_].types_.FindOrAdd(holder_type.index_)->second; + ++types[holder_type.index_].fields_.FindOrAdd(dex_field_idx)->second; } else { uint32_t type_idx = current_type.types_.Get(holder_type.index_); - uint32_t field_idx = types_[holder_type.index_].fields_.Get(dex_field_idx); + uint32_t field_idx = types[holder_type.index_].fields_.Get(dex_field_idx); ExtendPrefix(&type_idx, &field_idx); if (InstNibbles(new_opcode, {out_reg >> 4, out_reg & 0xF, type_idx, field_idx})) { continue; @@ -318,9 +306,9 @@ void InstructionBuilder::Process(const DexFile& dex_file, const DexFile::MethodId& method = dex_file.GetMethodId(method_idx); const dex::TypeIndex receiver_type = method.class_idx_; if (Enabled(kExperimentInvoke)) { - if (count_types_) { + if (count_types) { ++current_type.types_.FindOrAdd(receiver_type.index_)->second; - ++types_[receiver_type.index_].methods_.FindOrAdd(method_idx)->second; + ++types[receiver_type.index_].methods_.FindOrAdd(method_idx)->second; } else { uint32_t args[6] = {}; uint32_t arg_count = inst->GetVarArgs(args); @@ -340,7 +328,7 @@ void InstructionBuilder::Process(const DexFile& dex_file, bool result = false; uint32_t type_idx = current_type.types_.Get(receiver_type.index_); - uint32_t local_idx = types_[receiver_type.index_].methods_.Get(method_idx); + uint32_t local_idx = types[receiver_type.index_].methods_.Get(method_idx); ExtendPrefix(&type_idx, &local_idx); ExtendPrefix(&dest_reg, &local_idx); if (arg_count == 0) { @@ -373,7 +361,7 @@ void InstructionBuilder::Process(const DexFile& dex_file, case Instruction::IF_NEZ: { uint32_t reg = inst->VRegA_21t(); int16_t offset = inst->VRegB_21t(); - if (!count_types_ && + if (!count_types && Enabled(kExperimentSmallIf) && InstNibbles(opcode, {reg, static_cast<uint16_t>(offset)})) { continue; @@ -384,7 +372,7 @@ void InstructionBuilder::Process(const DexFile& dex_file, uint32_t type_idx = inst->VRegC_22c(); uint32_t in_reg = inst->VRegB_22c(); uint32_t out_reg = inst->VRegA_22c(); - if (count_types_) { + if (count_types) { ++current_type.types_.FindOrAdd(type_idx)->second; } else { uint32_t local_type = current_type.types_.Get(type_idx); @@ -398,7 +386,7 @@ void InstructionBuilder::Process(const DexFile& dex_file, uint32_t len_reg = inst->VRegB_22c(); uint32_t type_idx = inst->VRegC_22c(); uint32_t out_reg = inst->VRegA_22c(); - if (count_types_) { + if (count_types) { ++current_type.types_.FindOrAdd(type_idx)->second; } else { uint32_t local_type = current_type.types_.Get(type_idx); @@ -414,7 +402,7 @@ void InstructionBuilder::Process(const DexFile& dex_file, uint32_t type_idx = inst->VRegB_21c(); uint32_t out_reg = inst->VRegA_21c(); if (Enabled(kExperimentLocalType)) { - if (count_types_) { + if (count_types) { ++current_type.types_.FindOrAdd(type_idx)->second; } else { bool next_is_init = false; @@ -445,7 +433,7 @@ void InstructionBuilder::Process(const DexFile& dex_file, case Instruction::RETURN_OBJECT: case Instruction::RETURN_WIDE: case Instruction::RETURN_VOID: { - if (!count_types_ && Enabled(kExperimentReturn)) { + if (!count_types && Enabled(kExperimentReturn)) { if (opcode == Instruction::RETURN_VOID || inst->VRegA_11x() == 0) { if (InstNibbles(opcode, {})) { continue; @@ -457,7 +445,7 @@ void InstructionBuilder::Process(const DexFile& dex_file, default: break; } - if (!count_types_) { + if (!count_types) { Add(new_opcode, inst.Inst()); } } @@ -468,13 +456,13 @@ void InstructionBuilder::Process(const DexFile& dex_file, } } -void InstructionBuilder::Add(Instruction::Code opcode, const Instruction& inst) { +void NewRegisterInstructions::Add(Instruction::Code opcode, const Instruction& inst) { const uint8_t* start = reinterpret_cast<const uint8_t*>(&inst); buffer_.push_back(opcode); buffer_.insert(buffer_.end(), start + 1, start + 2 * inst.SizeInCodeUnits()); } -void InstructionBuilder::ExtendPrefix(uint32_t* value1, uint32_t* value2) { +void NewRegisterInstructions::ExtendPrefix(uint32_t* value1, uint32_t* value2) { if (*value1 < 16 && *value2 < 16) { return; } @@ -504,7 +492,7 @@ void InstructionBuilder::ExtendPrefix(uint32_t* value1, uint32_t* value2) { *value2 &= 0XF; } -bool InstructionBuilder::InstNibblesAndIndex(uint8_t opcode, +bool NewRegisterInstructions::InstNibblesAndIndex(uint8_t opcode, uint16_t idx, const std::vector<uint32_t>& args) { if (!InstNibbles(opcode, args)) { @@ -515,7 +503,7 @@ bool InstructionBuilder::InstNibblesAndIndex(uint8_t opcode, return true; } -bool InstructionBuilder::InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args) { +bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args) { if (dump_) { std::cout << " ==> " << Instruction::Name(static_cast<Instruction::Code>(opcode)) << " "; for (int v : args) { diff --git a/tools/dexanalyze/dexanalyze_bytecode.h b/tools/dexanalyze/dexanalyze_bytecode.h index e7c5e7b572..9ea819bec2 100644 --- a/tools/dexanalyze/dexanalyze_bytecode.h +++ b/tools/dexanalyze/dexanalyze_bytecode.h @@ -51,16 +51,17 @@ struct TypeLinkage { SafeMap<size_t, size_t> strings_; }; -class InstructionBuilder { +class NewRegisterInstructions : public Experiment { public: - InstructionBuilder(std::map<size_t, TypeLinkage>& types, - bool count_types, - bool dump, - uint64_t experiments, - std::map<std::vector<uint8_t>, size_t>& instruction_freq); - void Process(const DexFile& dex_file, - const CodeItemDataAccessor& code_item, - dex::TypeIndex current_class_type); + explicit NewRegisterInstructions(uint64_t experiments) : experiments_(experiments) {} + void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files); + void Dump(std::ostream& os, uint64_t total_size) const; + + void ProcessCodeItem(const DexFile& dex_file, + const CodeItemDataAccessor& code_item, + dex::TypeIndex current_class_type, + bool count_types, + std::map<size_t, TypeLinkage>& types); void Add(Instruction::Code opcode, const Instruction& inst); bool InstNibblesAndIndex(uint8_t opcode, uint16_t idx, const std::vector<uint32_t>& args); bool InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args); @@ -69,25 +70,8 @@ class InstructionBuilder { return experiments_ & (1u << static_cast<uint64_t>(experiment)); } - size_t alignment_ = 1u; - std::vector<uint8_t> buffer_; - // Global index -> local index maps. - std::map<size_t, TypeLinkage>& types_; - uint64_t missing_field_idx_count_ = 0u; - uint64_t missing_method_idx_count_ = 0u; - const bool count_types_; - const bool dump_; - uint64_t experiments_ = std::numeric_limits<uint64_t>::max(); - std::map<std::vector<uint8_t>, size_t>& instruction_freq_; -}; - -class NewRegisterInstructions : public Experiment { - public: - explicit NewRegisterInstructions(uint64_t experiments) : experiments_(experiments) {} - void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files); - void Dump(std::ostream& os, uint64_t total_size) const; - private: + size_t alignment_ = 1u; uint64_t output_size_ = 0u; uint64_t deduped_size_ = 0u; uint64_t dex_code_bytes_ = 0u; @@ -95,6 +79,8 @@ class NewRegisterInstructions : public Experiment { uint64_t missing_method_idx_count_ = 0u; uint64_t experiments_ = std::numeric_limits<uint64_t>::max(); std::map<std::vector<uint8_t>, size_t> instruction_freq_; + // Output instruction buffer. + std::vector<uint8_t> buffer_; }; } // namespace dexanalyze |