diff options
Diffstat (limited to 'runtime')
| -rw-r--r-- | runtime/gc/heap.cc | 8 | ||||
| -rw-r--r-- | runtime/jit/jit_code_cache.cc | 29 | ||||
| -rw-r--r-- | runtime/jit/jit_code_cache.h | 10 | ||||
| -rw-r--r-- | runtime/jit/profiling_info.cc | 4 | ||||
| -rw-r--r-- | runtime/jit/profiling_info.h | 30 |
5 files changed, 74 insertions, 7 deletions
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index faa3d3bc14..2e5b599940 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -59,6 +59,8 @@ #include "heap-inl.h" #include "image.h" #include "intern_table.h" +#include "jit/jit.h" +#include "jit/jit_code_cache.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" @@ -2668,6 +2670,12 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, // permanantly disabled. b/17942071 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); } + + if ((gc_type == collector::kGcTypeFull) && runtime->UseJit()) { + // It's time to clear all inline caches, in case some classes can be unloaded. + runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self); + } + CHECK(collector != nullptr) << "Could not find garbage collector with collector_type=" << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type; diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 0b0f926ed1..050bb68336 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -297,6 +297,15 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { } } +void JitCodeCache::ClearGcRootsInInlineCaches(Thread* self) { + MutexLock mu(self, lock_); + for (ProfilingInfo* info : profiling_infos_) { + if (!info->IsInUseByCompiler()) { + info->ClearGcRootsInInlineCaches(); + } + } +} + uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, ArtMethod* method, const uint8_t* mapping_table, @@ -679,7 +688,7 @@ void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { // Also remove the saved entry point from the ProfilingInfo objects. for (ProfilingInfo* info : profiling_infos_) { const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); - if (!ContainsPc(ptr) && !info->IsMethodBeingCompiled()) { + if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) { info->GetMethod()->SetProfilingInfo(nullptr); } info->SetSavedEntryPoint(nullptr); @@ -731,7 +740,7 @@ void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { // code cache collection. if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) { // We clear the inline caches as classes in it might be stalled. - info->ClearInlineCaches(); + info->ClearGcRootsInInlineCaches(); // Do a fence to make sure the clearing is seen before attaching to the method. QuasiAtomic::ThreadFenceRelease(); info->GetMethod()->SetProfilingInfo(info); @@ -919,6 +928,22 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr return true; } +void JitCodeCache::NotifyInliningOf(ArtMethod* method, Thread* self) { + MutexLock mu(self, lock_); + ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); + if (info != nullptr) { + info->IncrementInlineUse(); + } +} + +void JitCodeCache::DoneInlining(ArtMethod* method, Thread* self) { + MutexLock mu(self, lock_); + ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); + if (info != nullptr) { + info->DecrementInlineUse(); + } +} + void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED) { ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); DCHECK(info->IsMethodBeingCompiled()); diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 0bd4f7dd1b..113bebfa65 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -71,10 +71,18 @@ class JitCodeCache { SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); + void NotifyInliningOf(ArtMethod* method, Thread* self) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!lock_); + void DoneCompiling(ArtMethod* method, Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); + void DoneInlining(ArtMethod* method, Thread* self) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!lock_); + // Allocate and write code and its metadata to the code cache. uint8_t* CommitCode(Thread* self, ArtMethod* method, @@ -143,6 +151,8 @@ class JitCodeCache { REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_); + // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true, // will collect and retry if the first allocation is unsuccessful. ProfilingInfo* AddProfilingInfo(Thread* self, diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc index 3820592c4c..07c8051214 100644 --- a/runtime/jit/profiling_info.cc +++ b/runtime/jit/profiling_info.cc @@ -97,8 +97,8 @@ void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) { } } } - // Unsuccessfull - cache is full, making it megamorphic. - DCHECK(cache->IsMegamorphic()); + // Unsuccessfull - cache is full, making it megamorphic. We do not DCHECK it though, + // as the garbage collector might clear the entries concurrently. } } // namespace art diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h index a8c056c7c9..73c1a1edb0 100644 --- a/runtime/jit/profiling_info.h +++ b/runtime/jit/profiling_info.h @@ -134,8 +134,27 @@ class ProfilingInfo { return saved_entry_point_; } - void ClearInlineCaches() { - memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache)); + void ClearGcRootsInInlineCaches() { + for (size_t i = 0; i < number_of_inline_caches_; ++i) { + InlineCache* cache = &cache_[i]; + memset(&cache->classes_[0], + 0, + InlineCache::kIndividualCacheSize * sizeof(GcRoot<mirror::Class>)); + } + } + + void IncrementInlineUse() { + DCHECK_NE(current_inline_uses_, std::numeric_limits<uint16_t>::max()); + current_inline_uses_++; + } + + void DecrementInlineUse() { + DCHECK_GT(current_inline_uses_, 0); + current_inline_uses_--; + } + + bool IsInUseByCompiler() const { + return IsMethodBeingCompiled() || (current_inline_uses_ > 0); } private: @@ -143,8 +162,9 @@ class ProfilingInfo { : number_of_inline_caches_(entries.size()), method_(method), is_method_being_compiled_(false), + current_inline_uses_(0), saved_entry_point_(nullptr) { - ClearInlineCaches(); + memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache)); for (size_t i = 0; i < number_of_inline_caches_; ++i) { cache_[i].dex_pc_ = entries[i]; } @@ -161,6 +181,10 @@ class ProfilingInfo { // TODO: Make the JIT code cache lock global. bool is_method_being_compiled_; + // When the compiler inlines the method associated to this ProfilingInfo, + // it updates this counter so that the GC does not try to clear the inline caches. + uint16_t current_inline_uses_; + // Entry point of the corresponding ArtMethod, while the JIT code cache // is poking for the liveness of compiled code. const void* saved_entry_point_; |