Refactor Thread::SweepInterpreterCaches

Extracted from the shared interpreter cache CL.

Bring the loop-over-all-threads inside the sweep method,
so that we can handle shared cache there as well.

Also add DCHECK to assert that we handle all opcodes.
This will complain if we add opcode in the cache, but
forget to add sweep handler for it, or if the cache
consents gets corrupted/invalid for some reason.

Test: ./art/test.py -b -r --host
Change-Id: I491d9be898bbd35800645ba994907e4d9be83b80
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 48a6640..0bcdb13 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -739,7 +739,7 @@
     // from mutators. See b/32167580.
     GetJit()->GetCodeCache()->SweepRootTables(visitor);
   }
-  thread_list_->SweepInterpreterCaches(visitor);
+  Thread::SweepInterpreterCaches(visitor);
 
   // All other generic system-weak holders.
   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 9505007..0a54e19 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -4280,39 +4280,63 @@
 }
 #pragma GCC diagnostic pop
 
-void Thread::SweepInterpreterCache(IsMarkedVisitor* visitor) {
-  for (InterpreterCache::Entry& entry : GetInterpreterCache()->GetArray()) {
-    const Instruction* inst = reinterpret_cast<const Instruction*>(entry.first);
-    if (inst != nullptr) {
-      if (inst->Opcode() == Instruction::NEW_INSTANCE ||
-          inst->Opcode() == Instruction::CHECK_CAST ||
-          inst->Opcode() == Instruction::INSTANCE_OF ||
-          inst->Opcode() == Instruction::NEW_ARRAY ||
-          inst->Opcode() == Instruction::CONST_CLASS) {
-        mirror::Class* cls = reinterpret_cast<mirror::Class*>(entry.second);
-        if (cls == nullptr || cls == Runtime::GetWeakClassSentinel()) {
-          // Entry got deleted in a previous sweep.
-          continue;
-        }
-        Runtime::ProcessWeakClass(
-            reinterpret_cast<GcRoot<mirror::Class>*>(&entry.second),
-            visitor,
-            Runtime::GetWeakClassSentinel());
-      } else if (inst->Opcode() == Instruction::CONST_STRING ||
-                 inst->Opcode() == Instruction::CONST_STRING_JUMBO) {
-        mirror::Object* object = reinterpret_cast<mirror::Object*>(entry.second);
-        mirror::Object* new_object = visitor->IsMarked(object);
-        // We know the string is marked because it's a strongly-interned string that
-        // is always alive (see b/117621117 for trying to make those strings weak).
-        // The IsMarked implementation of the CMS collector returns
-        // null for newly allocated objects, but we know those haven't moved. Therefore,
-        // only update the entry if we get a different non-null string.
-        if (new_object != nullptr && new_object != object) {
-          entry.second = reinterpret_cast<size_t>(new_object);
-        }
-      }
-    }
+static void SweepCacheEntry(IsMarkedVisitor* visitor, const Instruction* inst, size_t* value)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (inst == nullptr) {
+    return;
   }
+  using Opcode = Instruction::Code;
+  Opcode opcode = inst->Opcode();
+  switch (opcode) {
+    case Opcode::NEW_INSTANCE:
+    case Opcode::CHECK_CAST:
+    case Opcode::INSTANCE_OF:
+    case Opcode::NEW_ARRAY:
+    case Opcode::CONST_CLASS: {
+      mirror::Class* cls = reinterpret_cast<mirror::Class*>(*value);
+      if (cls == nullptr || cls == Runtime::GetWeakClassSentinel()) {
+        // Entry got deleted in a previous sweep.
+        return;
+      }
+      Runtime::ProcessWeakClass(
+          reinterpret_cast<GcRoot<mirror::Class>*>(value),
+          visitor,
+          Runtime::GetWeakClassSentinel());
+      return;
+    }
+    case Opcode::CONST_STRING:
+    case Opcode::CONST_STRING_JUMBO: {
+      mirror::Object* object = reinterpret_cast<mirror::Object*>(*value);
+      mirror::Object* new_object = visitor->IsMarked(object);
+      // We know the string is marked because it's a strongly-interned string that
+      // is always alive (see b/117621117 for trying to make those strings weak).
+      // The IsMarked implementation of the CMS collector returns
+      // null for newly allocated objects, but we know those haven't moved. Therefore,
+      // only update the entry if we get a different non-null string.
+      if (new_object != nullptr && new_object != object) {
+        *value = reinterpret_cast<size_t>(new_object);
+      }
+      return;
+    }
+    default:
+      // The following opcode ranges store non-reference values.
+      if ((Opcode::IGET <= opcode && opcode <= Opcode::SPUT_SHORT) ||
+          (Opcode::INVOKE_VIRTUAL <= opcode && opcode <= Opcode::INVOKE_INTERFACE_RANGE)) {
+        return;  // Nothing to do for the GC.
+      }
+      // New opcode is using the cache. We need to explicitly handle it in this method.
+      DCHECK(false) << "Unhandled opcode " << inst->Opcode();
+  }
+};
+
+void Thread::SweepInterpreterCaches(IsMarkedVisitor* visitor) {
+  MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+  Runtime::Current()->GetThreadList()->ForEach([visitor](Thread* thread) {
+    Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+    for (InterpreterCache::Entry& entry : thread->GetInterpreterCache()->GetArray()) {
+      SweepCacheEntry(visitor, reinterpret_cast<const Instruction*>(entry.first), &entry.second);
+    }
+  });
 }
 
 // FIXME: clang-r433403 reports the below function exceeds frame size limit.
diff --git a/runtime/thread.h b/runtime/thread.h
index a7ff1a9..d1829a3 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1541,7 +1541,8 @@
   template <bool kPrecise>
   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+  static void SweepInterpreterCaches(IsMarkedVisitor* visitor)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   static bool IsAotCompiler();
 
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 4e3b40b..6482e72 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1407,13 +1407,6 @@
   }
 }
 
-void ThreadList::SweepInterpreterCaches(IsMarkedVisitor* visitor) const {
-  MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
-  for (const auto& thread : list_) {
-    thread->SweepInterpreterCache(visitor);
-  }
-}
-
 void ThreadList::VisitReflectiveTargets(ReflectiveValueVisitor *visitor) const {
   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
   for (const auto& thread : list_) {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index f5b58a0..29b0c52 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -179,10 +179,6 @@
     return empty_checkpoint_barrier_.get();
   }
 
-  void SweepInterpreterCaches(IsMarkedVisitor* visitor) const
-      REQUIRES(!Locks::thread_list_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   void WaitForOtherNonDaemonThreadsToExit(bool check_no_birth = true)
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
                !Locks::mutator_lock_);