Move inline caches GC handling in JitCodeCache.

Make the classes they hold weak references and visit
them during SweepJitRoots.

This fixes the newly introduced deadlock:

Thread1:
1) Lock JitCodeCache lock to create Profiling info for
  ArtMethod m.
2) m is a copied method, we need to track the actual holder,
   needing to decode a weak reference.
3) Weak references are not accessible due to GC.

GC Thread:
- Disallow weak reference access.
- Wait for checkpoint.

Thread2:
- Try to lock JitCodeCache lock
- Deadlock, as Thread1 owns the JitCodeCache lock.

Test: test-art-host
bug: 31289185
bug: 33198826

Change-Id: I7ee17631015450ace8d2a0264415a81c5a902bb8
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 719faed..1021db0 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -133,7 +133,7 @@
                            size_t max_capacity,
                            bool garbage_collect_code)
     : lock_("Jit code cache", kJitCodeCacheLock),
-      lock_cond_("Jit code cache variable", lock_),
+      lock_cond_("Jit code cache condition variable", lock_),
       collection_in_progress_(false),
       code_map_(code_map),
       data_map_(data_map),
@@ -152,7 +152,9 @@
       number_of_collections_(0),
       histogram_stack_map_memory_use_("Memory used for stack maps", 16),
       histogram_code_memory_use_("Memory used for compiled code", 16),
-      histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
+      histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
+      is_weak_access_enabled_(true),
+      inline_cache_cond_("Jit inline cache condition variable", lock_) {
 
   DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
   code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
@@ -323,6 +325,34 @@
       }
     }
   }
+  // Walk over inline caches to clear entries containing unloaded classes.
+  for (ProfilingInfo* info : profiling_infos_) {
+    for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
+      InlineCache* cache = &info->cache_[i];
+      for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
+        // This does not need a read barrier because this is called by GC.
+        mirror::Class* cls = cache->classes_[j].Read<kWithoutReadBarrier>();
+        if (cls != nullptr) {
+          // Look at the classloader of the class to know if it has been
+          // unloaded.
+          // This does not need a read barrier because this is called by GC.
+          mirror::Object* class_loader =
+              cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>();
+          if (visitor->IsMarked(class_loader) != nullptr) {
+            // The class loader is live, update the entry if the class has moved.
+            mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls));
+            // Note that new_object can be null for CMS and newly allocated objects.
+            if (new_cls != nullptr && new_cls != cls) {
+              cache->classes_[j] = GcRoot<mirror::Class>(new_cls);
+            }
+          } else {
+            // The class loader is not live, clear the entry.
+            cache->classes_[j] = GcRoot<mirror::Class>(nullptr);
+          }
+        }
+      }
+    }
+  }
 }
 
 void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
@@ -371,11 +401,51 @@
   }
 }
 
-void JitCodeCache::ClearGcRootsInInlineCaches(Thread* self) {
+bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
+  return kUseReadBarrier
+      ? self->GetWeakRefAccessEnabled()
+      : is_weak_access_enabled_.LoadSequentiallyConsistent();
+}
+
+void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) {
+  if (IsWeakAccessEnabled(self)) {
+    return;
+  }
+  ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
   MutexLock mu(self, lock_);
-  for (ProfilingInfo* info : profiling_infos_) {
-    if (!info->IsInUseByCompiler()) {
-      info->ClearGcRootsInInlineCaches();
+  while (!IsWeakAccessEnabled(self)) {
+    inline_cache_cond_.Wait(self);
+  }
+}
+
+void JitCodeCache::BroadcastForInlineCacheAccess() {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, lock_);
+  inline_cache_cond_.Broadcast(self);
+}
+
+void JitCodeCache::AllowInlineCacheAccess() {
+  DCHECK(!kUseReadBarrier);
+  is_weak_access_enabled_.StoreSequentiallyConsistent(true);
+  BroadcastForInlineCacheAccess();
+}
+
+void JitCodeCache::DisallowInlineCacheAccess() {
+  DCHECK(!kUseReadBarrier);
+  is_weak_access_enabled_.StoreSequentiallyConsistent(false);
+}
+
+void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
+                                       Handle<mirror::ObjectArray<mirror::Class>> array) {
+  WaitUntilInlineCacheAccessible(Thread::Current());
+  // Note that we don't need to lock `lock_` here, the compiler calling
+  // this method has already ensured the inline cache will not be deleted.
+  for (size_t in_cache = 0, in_array = 0;
+       in_cache < InlineCache::kIndividualCacheSize;
+       ++in_cache) {
+    mirror::Class* object = ic.classes_[in_cache].Read();
+    if (object != nullptr) {
+      array->Set(in_array++, object);
     }
   }
 }
@@ -816,8 +886,6 @@
 
   if (collect_profiling_info) {
     ScopedThreadSuspension sts(self, kSuspended);
-    gc::ScopedGCCriticalSection gcs(
-        self, gc::kGcCauseJitCodeCache, gc::kCollectorTypeJitCodeCache);
     MutexLock mu(self, lock_);
     // Free all profiling infos of methods not compiled nor being compiled.
     auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
@@ -831,10 +899,6 @@
         // code cache collection.
         if (ContainsPc(ptr) &&
             info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
-          // We clear the inline caches as classes in it might be stalled.
-          info->ClearGcRootsInInlineCaches();
-          // Do a fence to make sure the clearing is seen before attaching to the method.
-          QuasiAtomic::ThreadFenceRelease();
           info->GetMethod()->SetProfilingInfo(info);
         } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
           // No need for this ProfilingInfo object anymore.
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a97ef68..ef6e298 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -36,6 +36,7 @@
 
 class ArtMethod;
 class LinearAlloc;
+class InlineCache;
 class ProfilingInfo;
 
 namespace jit {
@@ -156,7 +157,9 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
+  void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
+      REQUIRES(!lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
   // will collect and retry if the first allocation is unsuccessful.
@@ -200,6 +203,12 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // The GC needs to disallow the reading of inline caches when it processes them,
+  // to avoid having a class being used while it is being deleted.
+  void AllowInlineCacheAccess() REQUIRES(!lock_);
+  void DisallowInlineCacheAccess() REQUIRES(!lock_);
+  void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
+
  private:
   // Take ownership of maps.
   JitCodeCache(MemMap* code_map,
@@ -275,6 +284,11 @@
   void FreeData(uint8_t* data) REQUIRES(lock_);
   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
 
+  bool IsWeakAccessEnabled(Thread* self) const;
+  void WaitUntilInlineCacheAccessible(Thread* self)
+      REQUIRES(!lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Lock for guarding allocations, collections, and the method_code_map_.
   Mutex lock_;
   // Condition to wait on during collection.
@@ -347,6 +361,14 @@
   // Histograms for keeping track of profiling info statistics.
   Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
 
+  // Whether the GC allows accessing weaks in inline caches. Note that this
+  // is not used by the concurrent collector, which uses
+  // Thread::SetWeakRefAccessEnabled instead.
+  Atomic<bool> is_weak_access_enabled_;
+
+  // Condition to wait on for accessing inline caches.
+  ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
 };
 
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 9ec46f0..405280d 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -36,15 +36,6 @@
   for (size_t i = 0; i < number_of_inline_caches_; ++i) {
     cache_[i].dex_pc_ = entries[i];
   }
-  if (method->IsCopied()) {
-    // GetHoldingClassOfCopiedMethod is expensive, but creating a profiling info for a copied method
-    // appears to happen very rarely in practice.
-    holding_class_ = GcRoot<mirror::Class>(
-        Runtime::Current()->GetClassLinker()->GetHoldingClassOfCopiedMethod(method));
-  } else {
-    holding_class_ = GcRoot<mirror::Class>(method->GetDeclaringClass());
-  }
-  DCHECK(!holding_class_.IsNull());
 }
 
 bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
@@ -116,14 +107,6 @@
         --i;
       } else {
         // We successfully set `cls`, just return.
-        // Since the instrumentation is marked from the declaring class we need to mark the card so
-        // that mod-union tables and card rescanning know about the update.
-        // Note that the declaring class is not necessarily the holding class if the method is
-        // copied. We need the card mark to be in the holding class since that is from where we
-        // will visit the profiling info.
-        if (!holding_class_.IsNull()) {
-          Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(holding_class_.Read());
-        }
         return;
       }
     }
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 1056fac..9902bb5 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -39,46 +39,13 @@
 // Once the classes_ array is full, we consider the INVOKE to be megamorphic.
 class InlineCache {
  public:
-  bool IsMonomorphic() const {
-    DCHECK_GE(kIndividualCacheSize, 2);
-    return !classes_[0].IsNull() && classes_[1].IsNull();
-  }
-
-  bool IsMegamorphic() const {
-    for (size_t i = 0; i < kIndividualCacheSize; ++i) {
-      if (classes_[i].IsNull()) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  mirror::Class* GetMonomorphicType() const REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Note that we cannot ensure the inline cache is actually monomorphic
-    // at this point, as other threads may have updated it.
-    DCHECK(!classes_[0].IsNull());
-    return classes_[0].Read();
-  }
-
-  bool IsUninitialized() const {
-    return classes_[0].IsNull();
-  }
-
-  bool IsPolymorphic() const {
-    DCHECK_GE(kIndividualCacheSize, 3);
-    return !classes_[1].IsNull() && classes_[kIndividualCacheSize - 1].IsNull();
-  }
-
-  mirror::Class* GetTypeAt(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
-    return classes_[i].Read();
-  }
-
   static constexpr uint16_t kIndividualCacheSize = 5;
 
  private:
   uint32_t dex_pc_;
   GcRoot<mirror::Class> classes_[kIndividualCacheSize];
 
+  friend class jit::JitCodeCache;
   friend class ProfilingInfo;
 
   DISALLOW_COPY_AND_ASSIGN(InlineCache);
@@ -102,18 +69,6 @@
       REQUIRES(Roles::uninterruptible_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
-  template<typename RootVisitorType>
-  void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS {
-    visitor.VisitRootIfNonNull(holding_class_.AddressWithoutBarrier());
-    for (size_t i = 0; i < number_of_inline_caches_; ++i) {
-      InlineCache* cache = &cache_[i];
-      for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
-        visitor.VisitRootIfNonNull(cache->classes_[j].AddressWithoutBarrier());
-      }
-    }
-  }
-
   ArtMethod* GetMethod() const {
     return method_;
   }
@@ -175,9 +130,6 @@
   // Method this profiling info is for.
   ArtMethod* const method_;
 
-  // Holding class for the method in case method is a copied method.
-  GcRoot<mirror::Class> holding_class_;
-
   // Whether the ArtMethod is currently being compiled. This flag
   // is implicitly guarded by the JIT code cache lock.
   // TODO: Make the JIT code cache lock global.