Clean up `IndirectReferenceTable` visiting.

Move weak global sweeping to the IRT class and clean up the
entry visiting to avoid the need for the `IrtIterator`.
Move `JNIEnvExt::UpdateLocal` to the `jni_env_ext-inl.h`
because it requires `indirect_reference_table-inl.h`.

Test: m test-art-host-gtest
Test: tesrunner.py --host --optimizing
Bug: 172332525
Change-Id: Ia96d31e0d8d701c93d7752bf76f633719a836f3f
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 1e1adf0..2cf1d83 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -27,7 +27,7 @@
 #include "mirror/object-inl.h"
 #include "nth_caller_visitor.h"
 #include "reference_table.h"
-#include "runtime.h"
+#include "runtime-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread.h"
 
@@ -528,7 +528,8 @@
 
 void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
   BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
-  for (auto ref : *this) {
+  for (size_t i = 0, capacity = Capacity(); i != capacity; ++i) {
+    GcRoot<mirror::Object>* ref = table_[i].GetReference();
     if (!ref->IsNull()) {
       root_visitor.VisitRoot(*ref);
       DCHECK(!ref->IsNull());
@@ -536,6 +537,24 @@
   }
 }
 
+void IndirectReferenceTable::SweepJniWeakGlobals(IsMarkedVisitor* visitor) {
+  CHECK_EQ(kind_, kWeakGlobal);
+  MutexLock mu(Thread::Current(), *Locks::jni_weak_globals_lock_);
+  Runtime* const runtime = Runtime::Current();
+  for (size_t i = 0, capacity = Capacity(); i != capacity; ++i) {
+    GcRoot<mirror::Object>* entry = table_[i].GetReference();
+    // Need to skip null here to distinguish between null entries and cleared weak ref entries.
+    if (!entry->IsNull()) {
+      mirror::Object* obj = entry->Read<kWithoutReadBarrier>();
+      mirror::Object* new_obj = visitor->IsMarked(obj);
+      if (new_obj == nullptr) {
+        new_obj = runtime->GetClearedJniWeakGlobal();
+      }
+      *entry = GcRoot<mirror::Object>(new_obj);
+    }
+  }
+}
+
 void IndirectReferenceTable::Dump(std::ostream& os) const {
   os << kind_ << " table dump:\n";
   ReferenceTable::Table entries;
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 18d0730..545958e 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -37,6 +37,7 @@
 
 namespace art {
 
+class IsMarkedVisitor;
 class RootInfo;
 
 namespace mirror {
@@ -181,42 +182,6 @@
 static_assert(sizeof(IrtEntry) == 2 * sizeof(uint32_t), "Unexpected sizeof(IrtEntry)");
 static_assert(IsPowerOfTwo(sizeof(IrtEntry)), "Unexpected sizeof(IrtEntry)");
 
-class IrtIterator {
- public:
-  IrtIterator(IrtEntry* table, size_t i, size_t capacity) REQUIRES_SHARED(Locks::mutator_lock_)
-      : table_(table), i_(i), capacity_(capacity) {
-    // capacity_ is used in some target; has warning with unused attribute.
-    UNUSED(capacity_);
-  }
-
-  IrtIterator& operator++() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ++i_;
-    return *this;
-  }
-
-  GcRoot<mirror::Object>* operator*() REQUIRES_SHARED(Locks::mutator_lock_) {
-    // This does not have a read barrier as this is used to visit roots.
-    return table_[i_].GetReference();
-  }
-
-  bool equals(const IrtIterator& rhs) const {
-    return (i_ == rhs.i_ && table_ == rhs.table_);
-  }
-
- private:
-  IrtEntry* const table_;
-  size_t i_;
-  const size_t capacity_;
-};
-
-bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) {
-  return lhs.equals(rhs);
-}
-
-bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) {
-  return !lhs.equals(rhs);
-}
-
 // We initially allocate local reference tables with a very small number of entries, packing
 // multiple tables into a single page. If we need to expand one, we allocate them in units of
 // pages.
@@ -340,15 +305,6 @@
   // without recovering holes. Thus this is a conservative estimate.
   size_t FreeCapacity() const;
 
-  // Note IrtIterator does not have a read barrier as it's used to visit roots.
-  IrtIterator begin() {
-    return IrtIterator(table_, 0, Capacity());
-  }
-
-  IrtIterator end() {
-    return IrtIterator(table_, Capacity(), Capacity());
-  }
-
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -377,6 +333,10 @@
   bool IsValidReference(IndirectRef, /*out*/std::string* error_msg) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Locks::jni_weak_globals_lock_);
+
  private:
   static constexpr uint32_t kShiftedSerialMask = (1u << kIRTSerialBits) - 1;
 
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index 248335a..f28532b 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -1169,23 +1169,6 @@
   return native_method;
 }
 
-void JavaVMExt::SweepJniWeakGlobals(IsMarkedVisitor* visitor) {
-  MutexLock mu(Thread::Current(), *Locks::jni_weak_globals_lock_);
-  Runtime* const runtime = Runtime::Current();
-  for (auto* entry : weak_globals_) {
-    // Need to skip null here to distinguish between null entries and cleared weak ref entries.
-    if (!entry->IsNull()) {
-      // Since this is called by the GC, we don't need a read barrier.
-      mirror::Object* obj = entry->Read<kWithoutReadBarrier>();
-      mirror::Object* new_obj = visitor->IsMarked(obj);
-      if (new_obj == nullptr) {
-        new_obj = runtime->GetClearedJniWeakGlobal();
-      }
-      *entry = GcRoot<mirror::Object>(new_obj);
-    }
-  }
-}
-
 void JavaVMExt::TrimGlobals() {
   WriterMutexLock mu(Thread::Current(), *Locks::jni_globals_lock_);
   globals_.Trim();
diff --git a/runtime/jni/java_vm_ext.h b/runtime/jni/java_vm_ext.h
index b97088a..8d81af1 100644
--- a/runtime/jni/java_vm_ext.h
+++ b/runtime/jni/java_vm_ext.h
@@ -165,7 +165,9 @@
 
   void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::jni_weak_globals_lock_);
+      REQUIRES(!Locks::jni_weak_globals_lock_) {
+    weak_globals_.SweepJniWeakGlobals(visitor);
+  }
 
   ObjPtr<mirror::Object> DecodeGlobal(IndirectRef ref)
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/jni/jni_env_ext-inl.h b/runtime/jni/jni_env_ext-inl.h
index 7609a9e..d66ac1a 100644
--- a/runtime/jni/jni_env_ext-inl.h
+++ b/runtime/jni/jni_env_ext-inl.h
@@ -19,6 +19,7 @@
 
 #include "jni_env_ext.h"
 
+#include "indirect_reference_table-inl.h"
 #include "mirror/object.h"
 
 namespace art {
@@ -49,6 +50,10 @@
   return reinterpret_cast<T>(ref);
 }
 
+inline void JNIEnvExt::UpdateLocal(IndirectRef iref, ObjPtr<mirror::Object> obj) {
+  locals_.Update(iref, obj);
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_JNI_JNI_ENV_EXT_INL_H_
diff --git a/runtime/jni/jni_env_ext.h b/runtime/jni/jni_env_ext.h
index 5952b3b..3614213 100644
--- a/runtime/jni/jni_env_ext.h
+++ b/runtime/jni/jni_env_ext.h
@@ -63,9 +63,8 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::alloc_tracker_lock_);
 
-  void UpdateLocal(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_) {
-    locals_.Update(iref, obj);
-  }
+  void UpdateLocal(IndirectRef iref, ObjPtr<mirror::Object> obj)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   jobject NewLocalRef(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
   void DeleteLocalRef(jobject obj) REQUIRES_SHARED(Locks::mutator_lock_);