diff options
Diffstat (limited to 'runtime/jit/jit_code_cache.h')
-rw-r--r-- | runtime/jit/jit_code_cache.h | 50 |
1 files changed, 22 insertions, 28 deletions
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 5202c2c37e..96fc7e2706 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -285,9 +285,11 @@ class JitCodeCache { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); void FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data) + REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::jit_lock_); - void IncreaseCodeCacheCapacity(Thread* self) + // Perform a collection on the code cache. + EXPORT void GarbageCollectCache(Thread* self) REQUIRES(!Locks::jit_lock_) REQUIRES_SHARED(Locks::mutator_lock_); @@ -421,20 +423,9 @@ class JitCodeCache { Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); - // NO_THREAD_SAFETY_ANALYSIS because we may be called with the JIT lock held - // or not. The implementation of this method handles the two cases. - void AddZombieCode(ArtMethod* method, const void* code_ptr) NO_THREAD_SAFETY_ANALYSIS; - - EXPORT void DoCollection(Thread* self) - REQUIRES(!Locks::jit_lock_); - private: JitCodeCache(); - void AddZombieCodeInternal(ArtMethod* method, const void* code_ptr) - REQUIRES(Locks::jit_lock_) - REQUIRES_SHARED(Locks::mutator_lock_); - ProfilingInfo* AddProfilingInfoInternal(Thread* self, ArtMethod* method, const std::vector<uint32_t>& inline_cache_entries, @@ -442,16 +433,21 @@ class JitCodeCache { REQUIRES(Locks::jit_lock_) REQUIRES_SHARED(Locks::mutator_lock_); + // If a collection is in progress, wait for it to finish. Must be called with the mutator lock. + // The non-mutator lock version should be used if possible. This method will release then + // re-acquire the mutator lock. + void WaitForPotentialCollectionToCompleteRunnable(Thread* self) + REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_); + // If a collection is in progress, wait for it to finish. Return // whether the thread actually waited. bool WaitForPotentialCollectionToComplete(Thread* self) - REQUIRES(Locks::jit_lock_) REQUIRES_SHARED(!Locks::mutator_lock_); - - // Notify all waiting threads that a collection is done. - void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_); + REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_); // Remove CHA dependents and underlying allocations for entries in `method_headers`. void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::cha_lock_); // Removes method from the cache. The caller must ensure that all threads @@ -466,7 +462,8 @@ class JitCodeCache { // Free code and data allocations for `code_ptr`. void FreeCodeAndData(const void* code_ptr) - REQUIRES(Locks::jit_lock_); + REQUIRES(Locks::jit_lock_) + REQUIRES_SHARED(Locks::mutator_lock_); // Number of bytes allocated in the code cache. size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_); @@ -480,9 +477,16 @@ class JitCodeCache { // Number of bytes allocated in the data cache. size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_); + // Notify all waiting threads that a collection is done. + void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_); + // Return whether the code cache's capacity is at its maximum. bool IsAtMaxCapacity() const REQUIRES(Locks::jit_lock_); + void DoCollection(Thread* self) + REQUIRES(!Locks::jit_lock_) + REQUIRES_SHARED(Locks::mutator_lock_); + void RemoveUnmarkedCode(Thread* self) REQUIRES(!Locks::jit_lock_) REQUIRES_SHARED(Locks::mutator_lock_); @@ -559,28 +563,18 @@ class JitCodeCache { // ProfilingInfo objects we have allocated. SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_); - // Zombie code and JNI methods to consider for collection. - std::set<const void*> zombie_code_ GUARDED_BY(Locks::jit_lock_); - std::set<ArtMethod*> zombie_jni_code_ GUARDED_BY(Locks::jit_lock_); - - std::set<const void*> processed_zombie_code_ GUARDED_BY(Locks::jit_lock_); - std::set<ArtMethod*> processed_zombie_jni_code_ GUARDED_BY(Locks::jit_lock_); - // Methods that the zygote has compiled and can be shared across processes // forked from the zygote. ZygoteMap zygote_map_; // -------------- JIT GC related data structures ----------------------- // - // Condition to wait on during collection and for accessing weak references in inline caches. + // Condition to wait on during collection. ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_); // Whether there is a code cache collection in progress. bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_); - // Whether a GC task is already scheduled. - bool gc_task_scheduled_ GUARDED_BY(Locks::jit_lock_); - // Bitmap for collecting code and data. std::unique_ptr<CodeCacheBitmap> live_bitmap_; |