diff options
author | 2023-12-06 10:31:10 +0000 | |
---|---|---|
committer | 2023-12-07 09:20:17 +0000 | |
commit | fee0362d03e5ee1b0beaa4839e7f3653ed5ce72d (patch) | |
tree | af4d5634ac93669d644d174a380e9b9c2f2a784c /runtime/jit/jit_code_cache.cc | |
parent | c60b6f7754974809f7467a07d63475248274026b (diff) |
Remove logic of polling liveness of compiled code.
Data shows the current logic is having too many misses by
deleting baseline compiled code which could still be useful.
Reduces jank on compose view scrolling for 20 seconds, average of 50
runs:
- For Go Mokey:
- Before: ~433 frames drawn / ~18.47% janky frames
- After: ~485 frames drawn / ~17.02% janky frames
- For Pixel 8 pro:
- Before: ~2428 frames drawn / 1.20% janky frames
- After: ~2433 frames drawn / 1.02% jank frames
Test: test.py
Change-Id: I06b24ce069be493394a1af5f7402f0a381e00dfb
Diffstat (limited to 'runtime/jit/jit_code_cache.cc')
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 118 |
1 files changed, 21 insertions, 97 deletions
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 2474399f59..8ca80131f9 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -261,7 +261,6 @@ JitCodeCache::JitCodeCache() zygote_map_(&shared_region_), lock_cond_("Jit code cache condition variable", *Locks::jit_lock_), collection_in_progress_(false), - last_collection_increased_code_cache_(false), garbage_collect_code_(true), number_of_baseline_compilations_(0), number_of_optimized_compilations_(0), @@ -1097,23 +1096,6 @@ bool JitCodeCache::IsAtMaxCapacity() const { return private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity(); } -bool JitCodeCache::ShouldDoFullCollection() { - if (IsAtMaxCapacity()) { - // Always do a full collection when the code cache is full. - return true; - } else if (private_region_.GetCurrentCapacity() < GetReservedCapacity()) { - // Always do partial collection when the code cache size is below the reserved - // capacity. - return false; - } else if (last_collection_increased_code_cache_) { - // This time do a full collection. - return true; - } else { - // This time do a partial collection. - return false; - } -} - void JitCodeCache::GarbageCollectCache(Thread* self) { ScopedTrace trace(__FUNCTION__); // Wait for an existing collection, or let everyone know we are starting one. @@ -1140,19 +1122,11 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { { TimingLogger::ScopedTiming st("Code cache collection", &logger); - bool do_full_collection = false; - { - MutexLock mu(self, *Locks::jit_lock_); - do_full_collection = ShouldDoFullCollection(); - } - - VLOG(jit) << "Do " - << (do_full_collection ? "full" : "partial") - << " code cache collection, code=" + VLOG(jit) << "Do code cache collection, code=" << PrettySize(CodeCacheSize()) << ", data=" << PrettySize(DataCacheSize()); - DoCollection(self, /* collect_profiling_info= */ do_full_collection); + DoCollection(self); VLOG(jit) << "After code cache collection, code=" << PrettySize(CodeCacheSize()) @@ -1160,43 +1134,7 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { { MutexLock mu(self, *Locks::jit_lock_); - - // Increase the code cache only when we do partial collections. - // TODO: base this strategy on how full the code cache is? - if (do_full_collection) { - last_collection_increased_code_cache_ = false; - } else { - last_collection_increased_code_cache_ = true; - private_region_.IncreaseCodeCacheCapacity(); - } - - bool next_collection_will_be_full = ShouldDoFullCollection(); - - // Start polling the liveness of compiled code to prepare for the next full collection. - if (next_collection_will_be_full) { - ScopedDebugDisallowReadBarriers sddrb(self); - for (auto it : profiling_infos_) { - it.second->ResetCounter(); - } - - // Change entry points of native methods back to the GenericJNI entrypoint. - for (const auto& entry : jni_stubs_map_) { - const JniStubData& data = entry.second; - if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) { - continue; - } - const OatQuickMethodHeader* method_header = - OatQuickMethodHeader::FromCodePointer(data.GetCode()); - for (ArtMethod* method : data.GetMethods()) { - if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) { - // Don't call Instrumentation::UpdateMethodsCode(), same as for normal methods above. - // Make sure a single invocation of the GenericJNI trampoline tries to recompile. - method->SetHotCounter(); - method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub()); - } - } - } - } + private_region_.IncreaseCodeCacheCapacity(); live_bitmap_.reset(nullptr); NotifyCollectionDone(self); } @@ -1340,35 +1278,11 @@ void JitCodeCache::ResetHotnessCounter(ArtMethod* method, Thread* self) { } -void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { +void JitCodeCache::DoCollection(Thread* self) { ScopedTrace trace(__FUNCTION__); { ScopedDebugDisallowReadBarriers sddrb(self); MutexLock mu(self, *Locks::jit_lock_); - - // Update to interpreter the methods that have baseline entrypoints and whose baseline - // hotness count hasn't changed. - // Note that these methods may be in thread stack or concurrently revived - // between. That's OK, as the thread executing it will mark it. - uint16_t warmup_threshold = Runtime::Current()->GetJITOptions()->GetWarmupThreshold(); - for (auto it : profiling_infos_) { - ProfilingInfo* info = it.second; - if (!info->CounterHasChanged()) { - const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); - if (ContainsPc(entry_point)) { - OatQuickMethodHeader* method_header = - OatQuickMethodHeader::FromEntryPoint(entry_point); - if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr())) { - info->GetMethod()->ResetCounter(warmup_threshold); - Runtime::Current()->GetInstrumentation()->InitializeMethodsCode( - info->GetMethod(), /*aot_code=*/ nullptr); - } - } - } - } - // TODO: collect profiling info - // TODO: collect optimized code - // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not // an entry point is either: // - an osr compiled code, that will be removed if not in a thread call stack. @@ -1411,10 +1325,6 @@ void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { // change. We do know they cannot change to a code cache entry that is not marked, // therefore we can safely remove those entries. RemoveUnmarkedCode(self); - - if (collect_profiling_info) { - // TODO: Collect unused profiling infos. - } } OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) { @@ -1824,7 +1734,22 @@ void JitCodeCache::InvalidateAllCompiledCode() { Runtime* runtime = Runtime::Current(); ClassLinker* linker = runtime->GetClassLinker(); instrumentation::Instrumentation* instr = runtime->GetInstrumentation(); - // TODO: Clear `jni_stubs_map_`? + + // Change entry points of native methods back to the GenericJNI entrypoint. + for (const auto& entry : jni_stubs_map_) { + const JniStubData& data = entry.second; + if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) { + continue; + } + const OatQuickMethodHeader* method_header = + OatQuickMethodHeader::FromCodePointer(data.GetCode()); + for (ArtMethod* method : data.GetMethods()) { + if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) { + ClearMethodCounter(method, /*was_warm=*/true); + instr->InitializeMethodsCode(method, /*aot_code=*/ nullptr); + } + } + } for (const auto& entry : method_code_map_) { ArtMethod* meth = entry.second; // We were compiled, so we must be warm. @@ -1843,8 +1768,7 @@ void JitCodeCache::InvalidateAllCompiledCode() { if (entry.method->IsPreCompiled()) { entry.method->ClearPreCompiled(); } - Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(entry.method, - /*aot_code=*/nullptr); + instr->InitializeMethodsCode(entry.method, /*aot_code=*/nullptr); } saved_compiled_methods_map_.clear(); |