Move inline caches GC handling in JitCodeCache.

Make the classes they hold weak references and visit
them during SweepJitRoots.

This fixes the newly introduced deadlock:

Thread1:
1) Lock JitCodeCache lock to create Profiling info for
  ArtMethod m.
2) m is a copied method, we need to track the actual holder,
   needing to decode a weak reference.
3) Weak references are not accessible due to GC.

GC Thread:
- Disallow weak reference access.
- Wait for checkpoint.

Thread2:
- Try to lock JitCodeCache lock
- Deadlock, as Thread1 owns the JitCodeCache lock.

Test: test-art-host
bug: 31289185
bug: 33198826

Change-Id: I7ee17631015450ace8d2a0264415a81c5a902bb8
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a97ef68..ef6e298 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -36,6 +36,7 @@
 
 class ArtMethod;
 class LinearAlloc;
+class InlineCache;
 class ProfilingInfo;
 
 namespace jit {
@@ -156,7 +157,9 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
+  void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
+      REQUIRES(!lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
   // will collect and retry if the first allocation is unsuccessful.
@@ -200,6 +203,12 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // The GC needs to disallow the reading of inline caches when it processes them,
+  // to avoid having a class being used while it is being deleted.
+  void AllowInlineCacheAccess() REQUIRES(!lock_);
+  void DisallowInlineCacheAccess() REQUIRES(!lock_);
+  void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
+
  private:
   // Take ownership of maps.
   JitCodeCache(MemMap* code_map,
@@ -275,6 +284,11 @@
   void FreeData(uint8_t* data) REQUIRES(lock_);
   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
 
+  bool IsWeakAccessEnabled(Thread* self) const;
+  void WaitUntilInlineCacheAccessible(Thread* self)
+      REQUIRES(!lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Lock for guarding allocations, collections, and the method_code_map_.
   Mutex lock_;
   // Condition to wait on during collection.
@@ -347,6 +361,14 @@
   // Histograms for keeping track of profiling info statistics.
   Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
 
+  // Whether the GC allows accessing weaks in inline caches. Note that this
+  // is not used by the concurrent collector, which uses
+  // Thread::SetWeakRefAccessEnabled instead.
+  Atomic<bool> is_weak_access_enabled_;
+
+  // Condition to wait on for accessing inline caches.
+  ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
 };