summaryrefslogtreecommitdiff
path: root/runtime/jit/jit_code_cache.h
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2024-04-17 15:19:55 +0100
committer Nicolas Geoffray <ngeoffray@google.com> 2024-04-18 10:25:36 +0000
commit16567ac3d6925c0b84c819f2e475c2cadbde5dc3 (patch)
tree75e41b7b4da978a857400410a34a06d2271322b2 /runtime/jit/jit_code_cache.h
parent36aba6488cd335f651161be607ac0af240faddc8 (diff)
Reland "Revamp JIT GC."
This reverts commit de8e6689cb3f829b73e921d843cfa38bbfe996bf. Reason for revert: - Adjust test for new behavior - Enter MarkCompiledCodeOnThreadStacks with the mutator lock, as expected by the checkpoint. Change-Id: Ic9fca3e5fd04da62081701f4deccbc68bee56c2f
Diffstat (limited to 'runtime/jit/jit_code_cache.h')
-rw-r--r--runtime/jit/jit_code_cache.h47
1 files changed, 25 insertions, 22 deletions
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 96fc7e2706..08572edd2d 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -285,11 +285,9 @@ class JitCodeCache {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
void FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data)
- REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::jit_lock_);
- // Perform a collection on the code cache.
- EXPORT void GarbageCollectCache(Thread* self)
+ void IncreaseCodeCacheCapacity(Thread* self)
REQUIRES(!Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -423,9 +421,20 @@ class JitCodeCache {
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // NO_THREAD_SAFETY_ANALYSIS because we may be called with the JIT lock held
+ // or not. The implementation of this method handles the two cases.
+ void AddZombieCode(ArtMethod* method, const void* code_ptr) NO_THREAD_SAFETY_ANALYSIS;
+
+ EXPORT void DoCollection(Thread* self)
+ REQUIRES(!Locks::jit_lock_);
+
private:
JitCodeCache();
+ void AddZombieCodeInternal(ArtMethod* method, const void* code_ptr)
+ REQUIRES(Locks::jit_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
ProfilingInfo* AddProfilingInfoInternal(Thread* self,
ArtMethod* method,
const std::vector<uint32_t>& inline_cache_entries,
@@ -433,21 +442,16 @@ class JitCodeCache {
REQUIRES(Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
- // The non-mutator lock version should be used if possible. This method will release then
- // re-acquire the mutator lock.
- void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
- REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
-
// If a collection is in progress, wait for it to finish. Return
// whether the thread actually waited.
bool WaitForPotentialCollectionToComplete(Thread* self)
- REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_);
+ REQUIRES(Locks::jit_lock_) REQUIRES_SHARED(!Locks::mutator_lock_);
+
+ // Notify all waiting threads that a collection is done.
+ void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_);
// Remove CHA dependents and underlying allocations for entries in `method_headers`.
void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(Locks::jit_lock_)
REQUIRES(!Locks::cha_lock_);
// Removes method from the cache. The caller must ensure that all threads
@@ -462,8 +466,7 @@ class JitCodeCache {
// Free code and data allocations for `code_ptr`.
void FreeCodeAndData(const void* code_ptr)
- REQUIRES(Locks::jit_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(Locks::jit_lock_);
// Number of bytes allocated in the code cache.
size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_);
@@ -477,16 +480,9 @@ class JitCodeCache {
// Number of bytes allocated in the data cache.
size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_);
- // Notify all waiting threads that a collection is done.
- void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_);
-
// Return whether the code cache's capacity is at its maximum.
bool IsAtMaxCapacity() const REQUIRES(Locks::jit_lock_);
- void DoCollection(Thread* self)
- REQUIRES(!Locks::jit_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
void RemoveUnmarkedCode(Thread* self)
REQUIRES(!Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -563,18 +559,25 @@ class JitCodeCache {
// ProfilingInfo objects we have allocated.
SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
+ // Zombie code and JNI methods to consider for collection.
+ std::set<const void*> zombie_code_ GUARDED_BY(Locks::jit_lock_);
+ std::set<ArtMethod*> zombie_jni_code_ GUARDED_BY(Locks::jit_lock_);
+
// Methods that the zygote has compiled and can be shared across processes
// forked from the zygote.
ZygoteMap zygote_map_;
// -------------- JIT GC related data structures ----------------------- //
- // Condition to wait on during collection.
+ // Condition to wait on during collection and for accessing weak references in inline caches.
ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_);
// Whether there is a code cache collection in progress.
bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_);
+ // Whether a GC task is already scheduled.
+ bool gc_task_scheduled_ GUARDED_BY(Locks::jit_lock_);
+
// Bitmap for collecting code and data.
std::unique_ptr<CodeCacheBitmap> live_bitmap_;