Clear inline caches at each full GC.

This fixes occasional failures of 141-class-unload.

Also fix a bug where clearing inline caches also cleared the dex
pc associated with it.

bug:26846185
bug:23128949
Change-Id: I77bf1dee229d7764c3cc21440829c7fba7b37001
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 3480483..01dff19 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -59,6 +59,8 @@
 #include "heap-inl.h"
 #include "image.h"
 #include "intern_table.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
@@ -2669,6 +2671,12 @@
     // permanantly disabled. b/17942071
     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
   }
+
+  if ((gc_type == collector::kGcTypeFull) && runtime->UseJit()) {
+    // It's time to clear all inline caches, in case some classes can be unloaded.
+    runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
+  }
+
   CHECK(collector != nullptr)
       << "Could not find garbage collector with collector_type="
       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 1545cb7..4f87e5b 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -293,6 +293,15 @@
   }
 }
 
+void JitCodeCache::ClearGcRootsInInlineCaches(Thread* self) {
+  MutexLock mu(self, lock_);
+  for (ProfilingInfo* info : profiling_infos_) {
+    if (!info->IsInUseByCompiler()) {
+      info->ClearGcRootsInInlineCaches();
+    }
+  }
+}
+
 uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
                                           ArtMethod* method,
                                           const uint8_t* mapping_table,
@@ -675,7 +684,7 @@
       // Also remove the saved entry point from the ProfilingInfo objects.
       for (ProfilingInfo* info : profiling_infos_) {
         const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
-        if (!ContainsPc(ptr) && !info->IsMethodBeingCompiled()) {
+        if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) {
           info->GetMethod()->SetProfilingInfo(nullptr);
         }
         info->SetSavedEntryPoint(nullptr);
@@ -727,7 +736,7 @@
         // code cache collection.
         if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
           // We clear the inline caches as classes in it might be stalled.
-          info->ClearInlineCaches();
+          info->ClearGcRootsInInlineCaches();
           // Do a fence to make sure the clearing is seen before attaching to the method.
           QuasiAtomic::ThreadFenceRelease();
           info->GetMethod()->SetProfilingInfo(info);
@@ -915,6 +924,22 @@
   return true;
 }
 
+void JitCodeCache::NotifyInliningOf(ArtMethod* method, Thread* self) {
+  MutexLock mu(self, lock_);
+  ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+  if (info != nullptr) {
+    info->IncrementInlineUse();
+  }
+}
+
+void JitCodeCache::DoneInlining(ArtMethod* method, Thread* self) {
+  MutexLock mu(self, lock_);
+  ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+  if (info != nullptr) {
+    info->DecrementInlineUse();
+  }
+}
+
 void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED) {
   ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
   DCHECK(info->IsMethodBeingCompiled());
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 0bd4f7d..113bebf 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -71,10 +71,18 @@
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
+  void NotifyInliningOf(ArtMethod* method, Thread* self)
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!lock_);
+
   void DoneCompiling(ArtMethod* method, Thread* self)
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
+  void DoneInlining(ArtMethod* method, Thread* self)
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!lock_);
+
   // Allocate and write code and its metadata to the code cache.
   uint8_t* CommitCode(Thread* self,
                       ArtMethod* method,
@@ -143,6 +151,8 @@
       REQUIRES(Locks::classlinker_classes_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
+
   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
   // will collect and retry if the first allocation is unsuccessful.
   ProfilingInfo* AddProfilingInfo(Thread* self,
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 3820592..07c8051 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -97,8 +97,8 @@
       }
     }
   }
-  // Unsuccessfull - cache is full, making it megamorphic.
-  DCHECK(cache->IsMegamorphic());
+  // Unsuccessfull - cache is full, making it megamorphic. We do not DCHECK it though,
+  // as the garbage collector might clear the entries concurrently.
 }
 
 }  // namespace art
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a8c056c..73c1a1e 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -134,8 +134,27 @@
     return saved_entry_point_;
   }
 
-  void ClearInlineCaches() {
-    memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+  void ClearGcRootsInInlineCaches() {
+    for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+      InlineCache* cache = &cache_[i];
+      memset(&cache->classes_[0],
+             0,
+             InlineCache::kIndividualCacheSize * sizeof(GcRoot<mirror::Class>));
+    }
+  }
+
+  void IncrementInlineUse() {
+    DCHECK_NE(current_inline_uses_, std::numeric_limits<uint16_t>::max());
+    current_inline_uses_++;
+  }
+
+  void DecrementInlineUse() {
+    DCHECK_GT(current_inline_uses_, 0);
+    current_inline_uses_--;
+  }
+
+  bool IsInUseByCompiler() const {
+    return IsMethodBeingCompiled() || (current_inline_uses_ > 0);
   }
 
  private:
@@ -143,8 +162,9 @@
       : number_of_inline_caches_(entries.size()),
         method_(method),
         is_method_being_compiled_(false),
+        current_inline_uses_(0),
         saved_entry_point_(nullptr) {
-    ClearInlineCaches();
+    memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
     for (size_t i = 0; i < number_of_inline_caches_; ++i) {
       cache_[i].dex_pc_ = entries[i];
     }
@@ -161,6 +181,10 @@
   // TODO: Make the JIT code cache lock global.
   bool is_method_being_compiled_;
 
+  // When the compiler inlines the method associated to this ProfilingInfo,
+  // it updates this counter so that the GC does not try to clear the inline caches.
+  uint16_t current_inline_uses_;
+
   // Entry point of the corresponding ArtMethod, while the JIT code cache
   // is poking for the liveness of compiled code.
   const void* saved_entry_point_;