Revert^4 "Walk internal ArtField/ArtMethod pointers"

Several of the new tests make use of the invoke-custom opcode. This
opcode is not supported by dexter/slicer causing the tests to fail.

This reverts commit c34eab45161c51bf63e548e44645cbcc59d01268.

Reason for revert: Added tests to redefine-stress known failures
Test: ./test.py --host --redefine-stress
Bug: 134162467

Change-Id: Ic1b375a0cb1e44d0252c17115af92c269fb8efc5
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 81e1ecf..cfa16f8 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -189,6 +189,7 @@
         "read_barrier.cc",
         "reference_table.cc",
         "reflection.cc",
+        "reflective_handle_scope.cc",
         "reflective_value_visitor.cc",
         "runtime.cc",
         "runtime_callbacks.cc",
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index 2f86fbc..882e3ce 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -35,6 +35,8 @@
 #include "mirror/class.h"
 #include "mirror/object.h"
 #include "obj_ptr-inl.h"
+#include "reflective_handle.h"
+#include "reflective_handle_scope.h"
 #include "runtime.h"
 #include "stack.h"
 #include "thread.h"
@@ -100,8 +102,10 @@
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
     StackHandleScope<1> hs(self);
+    StackArtFieldHandleScope<1> rhs(self);
     // Wrap in handle wrapper in case the listener does thread suspension.
     HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
+    ReflectiveHandleWrapper<ArtField> fh(rhs.NewReflectiveHandleWrapper(&field));
     ObjPtr<mirror::Object> this_object;
     if (!field->IsStatic()) {
       this_object = obj;
@@ -159,8 +163,10 @@
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
     StackHandleScope<2> hs(self);
+    StackArtFieldHandleScope<1> rhs(self);
     // Save this and return value (if needed) in case the instrumentation causes a suspend.
     HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
+    ReflectiveHandleWrapper<ArtField> fh(rhs.NewReflectiveHandleWrapper(&field));
     ObjPtr<mirror::Object> this_object = field->IsStatic() ? nullptr : obj;
     mirror::Object* fake_root = nullptr;
     HandleWrapper<mirror::Object> ret(hs.NewHandleWrapper<mirror::Object>(
@@ -210,8 +216,10 @@
         ObjPtr<mirror::Class> field_class;
         {
           StackHandleScope<2> hs(self);
+          StackArtFieldHandleScope<1> rhs(self);
           HandleWrapperObjPtr<mirror::Object> h_reg(hs.NewHandleWrapper(&reg));
           HandleWrapperObjPtr<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
+          ReflectiveHandleWrapper<ArtField> fh(rhs.NewReflectiveHandleWrapper(&field));
           field_class = field->ResolveType();
         }
         // ArtField::ResolveType() may fail as evidenced with a dexing bug (b/78788577).
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 4cc3583..b28868f 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -71,6 +71,8 @@
 #include "oat_file.h"
 #include "obj_ptr-inl.h"
 #include "reflection.h"
+#include "reflective_handle.h"
+#include "reflective_handle_scope-inl.h"
 #include "runtime-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
@@ -4056,18 +4058,19 @@
   soa.Self()->AssertNoPendingException();
 
   // Translate the method through the vtable, unless the debugger wants to suppress it.
-  ArtMethod* m = pReq->method;
+  StackArtMethodHandleScope<2> rhs(soa.Self());
+  MutableReflectiveHandle<ArtMethod> m(rhs.NewHandle(pReq->method));
   PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
   if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
-    ArtMethod* actual_method =
-        pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
-    if (actual_method != m) {
-      VLOG(jdwp) << "ExecuteMethod translated " << ArtMethod::PrettyMethod(m)
-                 << " to " << ArtMethod::PrettyMethod(actual_method);
+    MutableReflectiveHandle<ArtMethod> actual_method(rhs.NewHandle(
+        pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m.Get(), image_pointer_size)));
+    if (actual_method.Get() != m.Get()) {
+      VLOG(jdwp) << "ExecuteMethod translated " << m->PrettyMethod()
+                 << " to " << actual_method->PrettyMethod();
       m = actual_method;
     }
   }
-  VLOG(jdwp) << "ExecuteMethod " << ArtMethod::PrettyMethod(m)
+  VLOG(jdwp) << "ExecuteMethod " << m->PrettyMethod()
              << " receiver=" << pReq->receiver.Read()
              << " arg_count=" << pReq->arg_count;
   CHECK(m != nullptr);
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 2556079..fe12a7c 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -39,6 +39,7 @@
 #include "mirror/object-inl.h"
 #include "mirror/throwable.h"
 #include "nth_caller_visitor.h"
+#include "reflective_handle_scope-inl.h"
 #include "runtime.h"
 #include "stack_map.h"
 #include "thread.h"
@@ -381,9 +382,11 @@
       return resolved_field;
     } else {
       StackHandleScope<1> hs(self);
+      StackArtFieldHandleScope<1> rhs(self);
+      ReflectiveHandle<ArtField> resolved_field_handle(rhs.NewHandle(resolved_field));
       if (LIKELY(class_linker->EnsureInitialized(self, hs.NewHandle(fields_class), true, true))) {
         // Otherwise let's ensure the class is initialized before resolving the field.
-        return resolved_field;
+        return resolved_field_handle.Get();
       }
       DCHECK(self->IsExceptionPending());  // Throw exception and unwind
       return nullptr;  // Failure.
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 5b4275c..5235f65 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -136,10 +136,13 @@
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, flip_function, method_verifier, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, method_verifier, thread_local_mark_stack, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_mark_stack, async_exception, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, async_exception, top_reflective_handle_scope,
+                        sizeof(void*));
     // The first field after tlsPtr_ is forced to a 16 byte alignment so it might have some space.
     auto offset_tlsptr_end = OFFSETOF_MEMBER(Thread, tlsPtr_) +
         sizeof(decltype(reinterpret_cast<Thread*>(16)->tlsPtr_));
-    CHECKED(offset_tlsptr_end - OFFSETOF_MEMBER(Thread, tlsPtr_.async_exception) == sizeof(void*),
+    CHECKED(offset_tlsptr_end - OFFSETOF_MEMBER(Thread, tlsPtr_.top_reflective_handle_scope) ==
+                sizeof(void*),
             "async_exception last field");
   }
 
diff --git a/runtime/jni/jni_id_manager.cc b/runtime/jni/jni_id_manager.cc
index 2553fdf..4b6335b 100644
--- a/runtime/jni/jni_id_manager.cc
+++ b/runtime/jni/jni_id_manager.cc
@@ -16,6 +16,10 @@
 
 #include "jni_id_manager.h"
 
+#include <algorithm>
+#include <cstdint>
+#include <type_traits>
+
 #include "android-base/macros.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
@@ -34,12 +38,11 @@
 #include "mirror/class_ext-inl.h"
 #include "mirror/object-inl.h"
 #include "obj_ptr-inl.h"
+#include "reflective_handle_scope-inl.h"
+#include "reflective_handle_scope.h"
 #include "reflective_value_visitor.h"
 #include "thread-inl.h"
 #include "thread.h"
-#include <algorithm>
-#include <cstdint>
-#include <type_traits>
 
 namespace art {
 namespace jni {
@@ -60,65 +63,63 @@
 }
 
 template <typename ArtType>
-ObjPtr<mirror::PointerArray> GetOrCreateIds(Thread* self,
-                                            ObjPtr<mirror::Class> k,
-                                            ArtType* t,
-                                            /*out*/bool* allocation_failure)
+ObjPtr<mirror::PointerArray> GetIds(ObjPtr<mirror::Class> k, ArtType* t)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if constexpr (std::is_same_v<ArtType, ArtField>) {
+    return t->IsStatic() ? k->GetStaticFieldIds() : k->GetInstanceFieldIds();
+  } else {
+    return t->IsObsolete() ? nullptr : k->GetMethodIds();
+  }
+}
+
+// Forces the appropriate id array to be present if possible. Returns true if allocation was
+// attempted but failed.
+template <typename ArtType>
+bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtType* t)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
 template <>
-ObjPtr<mirror::PointerArray> GetOrCreateIds(Thread* self,
-                                            ObjPtr<mirror::Class> k,
-                                            ArtField* field,
-                                            /*out*/bool* allocation_failure) {
+bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtField* field) {
   ScopedExceptionStorage ses(self);
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> h_k(hs.NewHandle(k));
-  ObjPtr<mirror::PointerArray> res;
   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
-    res = field->IsStatic() ? h_k->GetStaticFieldIds() : h_k->GetInstanceFieldIds();
+    return false;
   } else {
-    res = field->IsStatic() ? mirror::Class::GetOrCreateStaticFieldIds(h_k)
-                            : mirror::Class::GetOrCreateInstanceFieldIds(h_k);
+    // NB This modifies the class to allocate the ClassExt and the ids array.
+    field->IsStatic() ? mirror::Class::GetOrCreateStaticFieldIds(h_k)
+                      : mirror::Class::GetOrCreateInstanceFieldIds(h_k);
   }
   if (self->IsExceptionPending()) {
     self->AssertPendingOOMException();
     ses.SuppressOldException("Failed to allocate maps for jmethodIDs. ");
-    *allocation_failure = true;
-  } else {
-    *allocation_failure = false;
+    return true;
   }
-  return res;
+  return false;
 }
 
 template <>
-ObjPtr<mirror::PointerArray> GetOrCreateIds(Thread* self,
-                                            ObjPtr<mirror::Class> k,
-                                            ArtMethod* method,
-                                            /*out*/bool* allocation_failure) {
+bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtMethod* method) {
   if (method->IsObsolete()) {
     if (kTraceIds) {
       LOG(INFO) << "jmethodID for Obsolete method " << method->PrettyMethod() << " requested!";
     }
     // No ids array for obsolete methods. Just do a linear scan.
-    *allocation_failure = false;
-    return nullptr;
+    return false;
   }
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> h_k(hs.NewHandle(k));
-  ObjPtr<mirror::PointerArray> res;
   if (Locks::mutator_lock_->IsExclusiveHeld(self) || !Locks::mutator_lock_->IsSharedHeld(self)) {
-    res = h_k->GetMethodIds();
+    return false;
   } else {
-    res = mirror::Class::GetOrCreateMethodIds(h_k);
+    // NB This modifies the class to allocate the ClassExt and the ids array.
+    mirror::Class::GetOrCreateMethodIds(h_k);
   }
   if (self->IsExceptionPending()) {
     self->AssertPendingOOMException();
-    *allocation_failure = true;
-  } else {
-    *allocation_failure = false;
+    return true;
   }
-  return res;
+  return false;
 }
 
 template <typename ArtType>
@@ -135,33 +136,49 @@
 
 // Calls the relevant PrettyMethod/PrettyField on the input.
 template <typename ArtType>
-std::string PrettyGeneric(ArtType* t) REQUIRES_SHARED(Locks::mutator_lock_);
-template <> std::string PrettyGeneric(ArtMethod* f) {
+std::string PrettyGeneric(ArtType t) REQUIRES_SHARED(Locks::mutator_lock_);
+template <>
+std::string PrettyGeneric(ArtMethod* f) {
   return f->PrettyMethod();
 }
-template <> std::string PrettyGeneric(ArtField* f) {
+template <>
+std::string PrettyGeneric(ReflectiveHandle<ArtMethod> f) {
+  return f->PrettyMethod();
+}
+template <>
+std::string PrettyGeneric(ArtField* f) {
+  return f->PrettyField();
+}
+template <>
+std::string PrettyGeneric(ReflectiveHandle<ArtField> f) {
   return f->PrettyField();
 }
 
 // Checks if the field or method is obsolete.
-template <typename ArtType> bool IsObsolete(ArtType* t) REQUIRES_SHARED(Locks::mutator_lock_);
-template <> bool IsObsolete(ArtField* t ATTRIBUTE_UNUSED) {
+template <typename ArtType>
+bool IsObsolete(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
+template <>
+bool IsObsolete(ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED) {
   return false;
 }
-template <> bool IsObsolete(ArtMethod* t) {
+template <>
+bool IsObsolete(ReflectiveHandle<ArtMethod> t) {
   return t->IsObsolete();
 }
 
 // Get the canonical (non-copied) version of the field or method. Only relevant for methods.
-template <typename ArtType> ArtType* Canonicalize(ArtType* t) REQUIRES_SHARED(Locks::mutator_lock_);
-template <> ArtField* Canonicalize(ArtField* t) {
-  return t;
+template <typename ArtType>
+ArtType* Canonicalize(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
+template <>
+ArtField* Canonicalize(ReflectiveHandle<ArtField> t) {
+  return t.Get();
 }
-template <> ArtMethod* Canonicalize(ArtMethod* t) {
+template <>
+ArtMethod* Canonicalize(ReflectiveHandle<ArtMethod> t) {
   if (UNLIKELY(t->IsCopied())) {
-    t = t->GetCanonicalMethod();
+    return t->GetCanonicalMethod();
   }
-  return t;
+  return t.Get();
 }
 
 };  // namespace
@@ -169,7 +186,8 @@
 // We increment the id by 2 each time to allow us to use the LSB as a flag that the ID is an index
 // and not a pointer. This gives us 2**31 unique methods that can be addressed on 32-bit art, which
 // should be more than enough.
-template <> uintptr_t JniIdManager::GetNextId<ArtField>(JniIdType type, ArtField* f) {
+template <>
+uintptr_t JniIdManager::GetNextId<ArtField>(JniIdType type, ReflectiveHandle<ArtField> f) {
   if (LIKELY(type == JniIdType::kIndices)) {
     uintptr_t res = next_field_id_;
     next_field_id_ += 2;
@@ -177,11 +195,12 @@
     return res;
   } else {
     DCHECK_EQ(type, JniIdType::kSwapablePointer);
-    return reinterpret_cast<uintptr_t>(f);
+    return reinterpret_cast<uintptr_t>(f.Get());
   }
 }
 
-template <> uintptr_t JniIdManager::GetNextId<ArtMethod>(JniIdType type, ArtMethod* m) {
+template <>
+uintptr_t JniIdManager::GetNextId<ArtMethod>(JniIdType type, ReflectiveHandle<ArtMethod> m) {
   if (LIKELY(type == JniIdType::kIndices)) {
     uintptr_t res = next_method_id_;
     next_method_id_ += 2;
@@ -189,21 +208,26 @@
     return res;
   } else {
     DCHECK_EQ(type, JniIdType::kSwapablePointer);
-    return reinterpret_cast<uintptr_t>(m);
+    return reinterpret_cast<uintptr_t>(m.Get());
   }
 }
-template <> std::vector<ArtField*>& JniIdManager::GetGenericMap<ArtField>() {
+template <>
+std::vector<ArtField*>& JniIdManager::GetGenericMap<ArtField>() {
   return field_id_map_;
 }
 
-template <> std::vector<ArtMethod*>& JniIdManager::GetGenericMap<ArtMethod>() {
+template <>
+std::vector<ArtMethod*>& JniIdManager::GetGenericMap<ArtMethod>() {
   return method_id_map_;
 }
-template <> size_t JniIdManager::GetLinearSearchStartId<ArtField>(ArtField* t ATTRIBUTE_UNUSED) {
+template <>
+size_t JniIdManager::GetLinearSearchStartId<ArtField>(
+    ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED) {
   return deferred_allocation_field_id_start_;
 }
 
-template <> size_t JniIdManager::GetLinearSearchStartId<ArtMethod>(ArtMethod* m) {
+template <>
+size_t JniIdManager::GetLinearSearchStartId<ArtMethod>(ReflectiveHandle<ArtMethod> m) {
   if (m->IsObsolete()) {
     return 1;
   } else {
@@ -212,20 +236,24 @@
 }
 
 // TODO need to fix races in here with visitors
-template <typename ArtType> uintptr_t JniIdManager::EncodeGenericId(ArtType* t) {
+template <typename ArtType>
+uintptr_t JniIdManager::EncodeGenericId(ReflectiveHandle<ArtType> t) {
+  static_assert(std::is_same_v<ArtType, ArtField> || std::is_same_v<ArtType, ArtMethod>,
+                "Expected ArtField or ArtMethod");
   Runtime* runtime = Runtime::Current();
   JniIdType id_type = runtime->GetJniIdType();
   if (id_type == JniIdType::kPointer || t == nullptr) {
-    return reinterpret_cast<uintptr_t>(t);
+    return reinterpret_cast<uintptr_t>(t.Get());
   }
   Thread* self = Thread::Current();
   ScopedExceptionStorage ses(self);
-  t = Canonicalize(t);
   ObjPtr<mirror::Class> klass = t->GetDeclaringClass();
   DCHECK(!klass.IsNull()) << "Null declaring class " << PrettyGeneric(t);
-  size_t off = GetIdOffset(klass, t, kRuntimePointerSize);
-  bool allocation_failure = false;
-  ObjPtr<mirror::PointerArray> ids(GetOrCreateIds(self, klass, t, &allocation_failure));
+  size_t off = GetIdOffset(klass, Canonicalize(t), kRuntimePointerSize);
+  // Here is the earliest point we can suspend.
+  bool allocation_failure = EnsureIdsArray(self, klass, t.Get());
+  klass = t->GetDeclaringClass();
+  ObjPtr<mirror::PointerArray> ids(GetIds(klass, t.Get()));
   if (allocation_failure) {
     self->AssertPendingOOMException();
     ses.SuppressOldException("OOM exception while trying to allocate JNI ids.");
@@ -234,14 +262,27 @@
   uintptr_t cur_id = 0;
   if (!ids.IsNull()) {
     DCHECK_GT(ids->GetLength(), static_cast<int32_t>(off)) << " is " << PrettyGeneric(t);
+    DCHECK_LE(0, static_cast<int32_t>(off)) << " is " << PrettyGeneric(t);
     cur_id = ids->GetElementPtrSize<uintptr_t>(off, kRuntimePointerSize);
   }
   if (cur_id != 0) {
     return cur_id;
   }
   WriterMutexLock mu(self, *Locks::jni_id_lock_);
+  ScopedAssertNoThreadSuspension sants("EncodeJniId critical section.");
   // Check the ids array for a racing id.
+  constexpr std::pair<size_t, size_t> counts {
+    std::is_same_v<ArtType, ArtField> ? 1 : 0,
+    std::is_same_v<ArtType, ArtField> ? 0 : 1,
+  };
+  StackReflectiveHandleScope<counts.first, counts.second> hs(self);
+  t = hs.NewHandle(Canonicalize(t));
   if (!ids.IsNull()) {
+    // It's possible we got suspended and structurally redefined during the EnsureIdsArray. We need
+    // to get the information again.
+    ids = GetIds(klass, t.Get());
+    off = GetIdOffset(klass, Canonicalize(t), kRuntimePointerSize);
+    CHECK(!ids.IsNull());
     cur_id = ids->GetElementPtrSize<uintptr_t>(off, kRuntimePointerSize);
     if (cur_id != 0) {
       // We were racing some other thread and lost.
@@ -259,17 +300,18 @@
     size_t search_start_index = IdToIndex(GetLinearSearchStartId(t));
     size_t index = std::count_if(vec.cbegin() + search_start_index,
                                  vec.cend(),
-                                 [&found, t](const ArtType* candidate) {
-                                   found = found || candidate == t;
+                                 [&found, &self, t](const ArtType* candidate) {
+                                   Locks::mutator_lock_->AssertSharedHeld(self);
+                                   found = found || candidate == t.Get();
                                    return !found;
                                  }) +
                    search_start_index;
     if (found) {
       // We were either racing some other thread and lost or this thread was asked to encode the
       // same method multiple times while holding the mutator lock.
-      DCHECK_EQ(vec[index], t) << "Expected: " << PrettyGeneric(vec[index]) << " got "
-                               << PrettyGeneric(t) << " at index " << index
-                               << " (id: " << IndexToId(index) << ").";
+      DCHECK_EQ(vec[index], t.Get())
+          << "Expected: " << PrettyGeneric(vec[index]) << " got " << PrettyGeneric(t)
+          << " at index " << index << " (id: " << IndexToId(index) << ").";
       return IndexToId(index);
     }
   }
@@ -280,10 +322,10 @@
     std::vector<ArtType*>& vec = GetGenericMap<ArtType>();
     vec.reserve(cur_index + 1);
     vec.resize(std::max(vec.size(), cur_index + 1), nullptr);
-    vec[cur_index] = t;
+    vec[cur_index] = t.Get();
   } else {
     DCHECK_EQ(cur_id % 2, 0u);
-    DCHECK_EQ(cur_id, reinterpret_cast<uintptr_t>(t));
+    DCHECK_EQ(cur_id, reinterpret_cast<uintptr_t>(t.Get()));
   }
   if (ids.IsNull()) {
     if (kIsDebugBuild && !IsObsolete(t)) {
@@ -298,13 +340,24 @@
 }
 
 jfieldID JniIdManager::EncodeFieldId(ArtField* field) {
+  StackArtFieldHandleScope<1> rhs(Thread::Current());
+  return EncodeFieldId(rhs.NewHandle(field));
+}
+
+jfieldID JniIdManager::EncodeFieldId(ReflectiveHandle<ArtField> field) {
   auto* res = reinterpret_cast<jfieldID>(EncodeGenericId(field));
   if (kTraceIds && field != nullptr) {
     LOG(INFO) << "Returning " << res << " for field " << field->PrettyField();
   }
   return res;
 }
+
 jmethodID JniIdManager::EncodeMethodId(ArtMethod* method) {
+  StackArtMethodHandleScope<1> rhs(Thread::Current());
+  return EncodeMethodId(rhs.NewHandle(method));
+}
+
+jmethodID JniIdManager::EncodeMethodId(ReflectiveHandle<ArtMethod> method) {
   auto* res = reinterpret_cast<jmethodID>(EncodeGenericId(method));
   if (kTraceIds && method != nullptr) {
     LOG(INFO) << "Returning " << res << " for method " << method->PrettyMethod();
@@ -445,17 +498,70 @@
   }
 }
 
+class JniIdDeferStackReflectiveScope : public BaseReflectiveHandleScope {
+ public:
+  JniIdDeferStackReflectiveScope() REQUIRES_SHARED(art::Locks::mutator_lock_)
+      : BaseReflectiveHandleScope(), methods_(), fields_() {
+    PushScope(Thread::Current());
+  }
+
+  void Initialize(const std::vector<ArtMethod*>& methods, const std::vector<ArtField*>& fields)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Roles::uninterruptible_) {
+    methods_ = methods;
+    fields_ = fields;
+  }
+
+  ~JniIdDeferStackReflectiveScope() REQUIRES_SHARED(Locks::mutator_lock_) {
+    PopScope();
+  }
+
+  void VisitTargets(ReflectiveValueVisitor* visitor) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    for (auto it = methods_.begin(); it != methods_.end(); ++it) {
+      if (*it == nullptr) {
+        continue;
+      }
+      *it = visitor->VisitMethod(*it, ReflectiveHandleScopeSourceInfo(this));
+    }
+    for (auto it = fields_.begin(); it != fields_.end(); ++it) {
+      if (*it == nullptr) {
+        continue;
+      }
+      *it = visitor->VisitField(*it, ReflectiveHandleScopeSourceInfo(this));
+    }
+  }
+
+  ArtField** GetFieldPtr(size_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+    return &fields_[idx];
+  }
+
+  ArtMethod** GetMethodPtr(size_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+    return &methods_[idx];
+  }
+
+  size_t NumFields() const {
+    return fields_.size();
+  }
+  size_t NumMethods() const {
+    return methods_.size();
+  }
+
+ private:
+  std::vector<ArtMethod*> methods_;
+  std::vector<ArtField*> fields_;
+};
+
 void JniIdManager::EndDefer() {
   // Fixup the method->id map.
   Thread* self = Thread::Current();
-  auto set_id = [&](auto* t, uintptr_t id) REQUIRES_SHARED(Locks::mutator_lock_) {
+  auto set_id = [&](auto** t, uintptr_t id) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (t == nullptr) {
       return;
     }
-    ObjPtr<mirror::Class> klass(t->GetDeclaringClass());
-    size_t off = GetIdOffset(klass, t, kRuntimePointerSize);
-    bool alloc_failure = false;
-    ObjPtr<mirror::PointerArray> ids = GetOrCreateIds(self, klass, t, &alloc_failure);
+    bool alloc_failure = EnsureIdsArray(self, (*t)->GetDeclaringClass(), *t);
+    ObjPtr<mirror::Class> klass((*t)->GetDeclaringClass());
+    size_t off = GetIdOffset(klass, (*t), kRuntimePointerSize);
+    ObjPtr<mirror::PointerArray> ids = GetIds(klass, (*t));
     CHECK(!alloc_failure) << "Could not allocate jni ids array!";
     if (ids.IsNull()) {
       return;
@@ -482,25 +588,24 @@
   // ids when it finishes.
   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
   Locks::mutator_lock_->AssertSharedHeld(self);
-  std::vector<ArtMethod*> method_id_copy;
-  std::vector<ArtField*> field_id_copy;
+  JniIdDeferStackReflectiveScope jidsrs;
   uintptr_t method_start_id;
   uintptr_t field_start_id;
   {
     ReaderMutexLock mu(self, *Locks::jni_id_lock_);
-    method_id_copy = method_id_map_;
-    field_id_copy = field_id_map_;
+    ScopedAssertNoThreadSuspension sants(__FUNCTION__);
+    jidsrs.Initialize(method_id_map_, field_id_map_);
     method_start_id = deferred_allocation_method_id_start_;
     field_start_id = deferred_allocation_field_id_start_;
   }
 
-  for (size_t index = kIsDebugBuild ? 0 : IdToIndex(method_start_id); index < method_id_copy.size();
+  for (size_t index = kIsDebugBuild ? 0 : IdToIndex(method_start_id); index < jidsrs.NumMethods();
        ++index) {
-    set_id(method_id_copy[index], IndexToId(index));
+    set_id(jidsrs.GetMethodPtr(index), IndexToId(index));
   }
-  for (size_t index = kIsDebugBuild ? 0 : IdToIndex(field_start_id); index < field_id_copy.size();
+  for (size_t index = kIsDebugBuild ? 0 : IdToIndex(field_start_id); index < jidsrs.NumFields();
        ++index) {
-    set_id(field_id_copy[index], IndexToId(index));
+    set_id(jidsrs.GetFieldPtr(index), IndexToId(index));
   }
   WriterMutexLock mu(self, *Locks::jni_id_lock_);
   DCHECK_GE(deferred_allocation_refcount_, 1u);
diff --git a/runtime/jni/jni_id_manager.h b/runtime/jni/jni_id_manager.h
index 6b43534..1cfcefb 100644
--- a/runtime/jni/jni_id_manager.h
+++ b/runtime/jni/jni_id_manager.h
@@ -17,8 +17,9 @@
 #ifndef ART_RUNTIME_JNI_JNI_ID_MANAGER_H_
 #define ART_RUNTIME_JNI_JNI_ID_MANAGER_H_
 
-#include <atomic>
 #include <jni.h>
+
+#include <atomic>
 #include <vector>
 
 #include "art_field.h"
@@ -28,6 +29,8 @@
 #include "reflective_value_visitor.h"
 
 namespace art {
+template<typename RT> class ReflectiveHandle;
+
 namespace jni {
 
 class ScopedEnableSuspendAllJniIdQueries;
@@ -42,8 +45,12 @@
 
   ArtMethod* DecodeMethodId(jmethodID method) REQUIRES(!Locks::jni_id_lock_);
   ArtField* DecodeFieldId(jfieldID field) REQUIRES(!Locks::jni_id_lock_);
+  jmethodID EncodeMethodId(ReflectiveHandle<ArtMethod> method) REQUIRES(!Locks::jni_id_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
   jmethodID EncodeMethodId(ArtMethod* method) REQUIRES(!Locks::jni_id_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  jfieldID EncodeFieldId(ReflectiveHandle<ArtField> field) REQUIRES(!Locks::jni_id_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
   jfieldID EncodeFieldId(ArtField* field) REQUIRES(!Locks::jni_id_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -52,15 +59,20 @@
 
  private:
   template <typename ArtType>
-  uintptr_t EncodeGenericId(ArtType* t) REQUIRES(!Locks::jni_id_lock_)
+  uintptr_t EncodeGenericId(ReflectiveHandle<ArtType> t) REQUIRES(!Locks::jni_id_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
   template <typename ArtType>
   ArtType* DecodeGenericId(uintptr_t input) REQUIRES(!Locks::jni_id_lock_);
-  template <typename ArtType> std::vector<ArtType*>& GetGenericMap() REQUIRES(Locks::jni_id_lock_);
+  template <typename ArtType> std::vector<ArtType*>& GetGenericMap()
+      REQUIRES(Locks::jni_id_lock_);
   template <typename ArtType>
-  uintptr_t GetNextId(JniIdType id, ArtType* t) REQUIRES(Locks::jni_id_lock_);
+  uintptr_t GetNextId(JniIdType id, ReflectiveHandle<ArtType> t)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(Locks::jni_id_lock_);
   template <typename ArtType>
-  size_t GetLinearSearchStartId(ArtType* t) REQUIRES(Locks::jni_id_lock_);
+  size_t GetLinearSearchStartId(ReflectiveHandle<ArtType> t)
+      REQUIRES(Locks::jni_id_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void StartDefer() REQUIRES(!Locks::jni_id_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
   void EndDefer() REQUIRES(!Locks::jni_id_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/jni/jni_internal.h b/runtime/jni/jni_internal.h
index da17922..1616ee5 100644
--- a/runtime/jni/jni_internal.h
+++ b/runtime/jni/jni_internal.h
@@ -22,7 +22,10 @@
 
 #include "base/locks.h"
 #include "base/macros.h"
+#include "reflective_handle.h"
+#include "reflective_handle_scope.h"
 #include "runtime.h"
+#include "thread.h"
 
 namespace art {
 
@@ -87,6 +90,16 @@
 }
 
 template <bool kEnableIndexIds = true>
+ALWAYS_INLINE static inline jfieldID EncodeArtField(ReflectiveHandle<ArtField> field)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (kEnableIndexIds && Runtime::Current()->GetJniIdType() != JniIdType::kPointer) {
+    return Runtime::Current()->GetJniIdManager()->EncodeFieldId(field);
+  } else {
+    return reinterpret_cast<jfieldID>(field.Get());
+  }
+}
+
+template <bool kEnableIndexIds = true>
 ALWAYS_INLINE
 static inline jfieldID EncodeArtField(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_)  {
   if (kEnableIndexIds && Runtime::Current()->GetJniIdType() != JniIdType::kPointer) {
@@ -98,6 +111,17 @@
 
 template <bool kEnableIndexIds = true>
 ALWAYS_INLINE
+static inline jmethodID EncodeArtMethod(ReflectiveHandle<ArtMethod> art_method)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (kEnableIndexIds && Runtime::Current()->GetJniIdType() != JniIdType::kPointer) {
+    return Runtime::Current()->GetJniIdManager()->EncodeMethodId(art_method);
+  } else {
+    return reinterpret_cast<jmethodID>(art_method.Get());
+  }
+}
+
+template <bool kEnableIndexIds = true>
+ALWAYS_INLINE
 static inline jmethodID EncodeArtMethod(ArtMethod* art_method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (kEnableIndexIds && Runtime::Current()->GetJniIdType() != JniIdType::kPointer) {
diff --git a/runtime/reflective_handle.h b/runtime/reflective_handle.h
new file mode 100644
index 0000000..014d976
--- /dev/null
+++ b/runtime/reflective_handle.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_REFLECTIVE_HANDLE_H_
+#define ART_RUNTIME_REFLECTIVE_HANDLE_H_
+
+#include "base/value_object.h"
+#include "reflective_reference.h"
+
+namespace art {
+
+// This is a holder similar to Handle<T> that is used to hold reflective references to ArtField and
+// ArtMethod structures. A reflective reference is one that must be updated if the underlying class
+// or instances are replaced due to structural redefinition or some other process. In general these
+// don't need to be used. It's only when it's important that a reference to a field not become
+// obsolete and it needs to be held over a suspend point that this should be used.
+template <typename T>
+class ReflectiveHandle : public ValueObject {
+ public:
+  static_assert(std::is_same_v<T, ArtField> || std::is_same_v<T, ArtMethod>,
+                "Expected ArtField or ArtMethod");
+
+  ReflectiveHandle() : reference_(nullptr) {}
+
+  ALWAYS_INLINE ReflectiveHandle(const ReflectiveHandle<T>& handle) = default;
+  ALWAYS_INLINE ReflectiveHandle<T>& operator=(const ReflectiveHandle<T>& handle) = default;
+
+  ALWAYS_INLINE explicit ReflectiveHandle(ReflectiveReference<T>* reference)
+      : reference_(reference) {}
+
+  ALWAYS_INLINE T& operator*() const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return *Get();
+  }
+
+  ALWAYS_INLINE T* operator->() const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return Get();
+  }
+
+  ALWAYS_INLINE T* Get() const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return reference_->Ptr();
+  }
+
+  ALWAYS_INLINE bool IsNull() const {
+    // It's safe to null-check it without a read barrier.
+    return reference_->IsNull();
+  }
+
+  ALWAYS_INLINE bool operator!=(std::nullptr_t) const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return !IsNull();
+  }
+
+  ALWAYS_INLINE bool operator==(std::nullptr_t) const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return IsNull();
+  }
+
+ protected:
+  ReflectiveReference<T>* reference_;
+
+ private:
+  friend class BaseReflectiveHandleScope;
+  template <size_t kNumFieldReferences, size_t kNumMethodReferences>
+  friend class StackReflectiveHandleScope;
+};
+
+// Handles that support assignment.
+template <typename T>
+class MutableReflectiveHandle : public ReflectiveHandle<T> {
+ public:
+  MutableReflectiveHandle() {}
+
+  ALWAYS_INLINE MutableReflectiveHandle(const MutableReflectiveHandle<T>& handle)
+      REQUIRES_SHARED(Locks::mutator_lock_) = default;
+
+  ALWAYS_INLINE MutableReflectiveHandle<T>& operator=(const MutableReflectiveHandle<T>& handle)
+      REQUIRES_SHARED(Locks::mutator_lock_) = default;
+
+  ALWAYS_INLINE explicit MutableReflectiveHandle(ReflectiveReference<T>* reference)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      : ReflectiveHandle<T>(reference) {}
+
+  ALWAYS_INLINE T* Assign(T* reference) REQUIRES_SHARED(Locks::mutator_lock_) {
+    ReflectiveReference<T>* ref = ReflectiveHandle<T>::reference_;
+    T* old = ref->Ptr();
+    ref->Assign(reference);
+    return old;
+  }
+
+ private:
+  friend class BaseReflectiveHandleScope;
+  template <size_t kNumFieldReferences, size_t kNumMethodReferences>
+  friend class StackReflectiveHandleScope;
+};
+
+template<typename T>
+class ReflectiveHandleWrapper : public MutableReflectiveHandle<T> {
+ public:
+  ReflectiveHandleWrapper(T** obj, const MutableReflectiveHandle<T>& handle)
+     : MutableReflectiveHandle<T>(handle), obj_(obj) {
+  }
+
+  ReflectiveHandleWrapper(const ReflectiveHandleWrapper&) = default;
+
+  ~ReflectiveHandleWrapper() {
+    *obj_ = MutableReflectiveHandle<T>::Get();
+  }
+
+ private:
+  T** const obj_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_REFLECTIVE_HANDLE_H_
diff --git a/runtime/reflective_handle_scope-inl.h b/runtime/reflective_handle_scope-inl.h
new file mode 100644
index 0000000..64ea9f9
--- /dev/null
+++ b/runtime/reflective_handle_scope-inl.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_INL_H_
+#define ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_INL_H_
+
+#include "android-base/thread_annotations.h"
+#include "base/mutex.h"
+#include "reflective_handle.h"
+#include "reflective_handle_scope.h"
+#include "thread-current-inl.h"
+
+namespace art {
+
+template <size_t kNumFields, size_t kNumMethods>
+StackReflectiveHandleScope<kNumFields, kNumMethods>::StackReflectiveHandleScope(Thread* self) : field_pos_(0), method_pos_(0) {
+  DCHECK_EQ(self, Thread::Current());
+  PushScope(self);
+}
+
+template <size_t kNumFields, size_t kNumMethods>
+void StackReflectiveHandleScope<kNumFields, kNumMethods>::VisitTargets(
+    ReflectiveValueVisitor* visitor) {
+  Thread* self = Thread::Current();
+  DCHECK(GetThread() == self ||
+         Locks::mutator_lock_->IsExclusiveHeld(self))
+      << *GetThread() << " on thread " << *self;
+  auto visit_one = [&](auto& rv) NO_THREAD_SAFETY_ANALYSIS {
+    Locks::mutator_lock_->AssertSharedHeld(self);
+    if (!rv.IsNull()) {
+      rv.Assign((*visitor)(rv.Ptr(), ReflectiveHandleScopeSourceInfo(this)));
+    }
+  };
+  std::for_each(fields_.begin(), fields_.begin() + field_pos_, visit_one);
+  std::for_each(methods_.begin(), methods_.begin() + method_pos_, visit_one);
+}
+
+template <size_t kNumFields, size_t kNumMethods>
+StackReflectiveHandleScope<kNumFields, kNumMethods>::~StackReflectiveHandleScope() {
+  PopScope();
+}
+
+void BaseReflectiveHandleScope::PushScope(Thread* self) {
+  DCHECK_EQ(self, Thread::Current());
+  self_ = self;
+  link_ = self_->GetTopReflectiveHandleScope();
+  self_->PushReflectiveHandleScope(this);
+}
+
+void BaseReflectiveHandleScope::PopScope() {
+  auto* prev = self_->PopReflectiveHandleScope();
+  CHECK_EQ(prev, this);
+  link_ = nullptr;
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_INL_H_
diff --git a/runtime/reflective_handle_scope.cc b/runtime/reflective_handle_scope.cc
new file mode 100644
index 0000000..2c3ae5e
--- /dev/null
+++ b/runtime/reflective_handle_scope.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reflective_handle_scope.h"
+
+#include <ostream>
+
+#include "thread.h"
+
+namespace art {
+
+
+void BaseReflectiveHandleScope::Describe(std::ostream& os) const {
+  os << "[BaseReflectiveHandleScope self_=" << *self_ << ", link_=" << link_ << "]";
+}
+
+std::ostream& operator<<(std::ostream& os, const BaseReflectiveHandleScope& brhs) {
+  brhs.Describe(os);
+  return os;
+}
+
+}  // namespace art
diff --git a/runtime/reflective_handle_scope.h b/runtime/reflective_handle_scope.h
new file mode 100644
index 0000000..46cff8b
--- /dev/null
+++ b/runtime/reflective_handle_scope.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_H_
+#define ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_H_
+
+#include <android-base/logging.h>
+
+#include <array>
+#include <compare>
+#include <functional>
+#include <stack>
+
+#include "android-base/macros.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "base/locks.h"
+#include "base/macros.h"
+#include "base/value_object.h"
+#include "reflective_handle.h"
+#include "reflective_reference.h"
+#include "reflective_value_visitor.h"
+
+namespace art {
+
+class ArtField;
+class ArtMethod;
+class BaseReflectiveHandleScope;
+class Thread;
+
+// This is a holder similar to StackHandleScope that is used to hold reflective references to
+// ArtField and ArtMethod structures. A reflective reference is one that must be updated if the
+// underlying class or instances are replaced due to structural redefinition or some other process.
+// In general these don't need to be used. It's only when it's important that a reference to a field
+// not become obsolete and it needs to be held over a suspend point that this should be used. This
+// takes care of the book-keeping to allow the runtime to visit and update ReflectiveHandles when
+// structural redefinition occurs.
+class BaseReflectiveHandleScope {
+ public:
+  template <typename Visitor>
+  ALWAYS_INLINE void VisitTargets(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+    FunctionReflectiveValueVisitor v(&visitor);
+    VisitTargets(&v);
+  }
+
+  ALWAYS_INLINE virtual ~BaseReflectiveHandleScope() {
+    DCHECK(link_ == nullptr);
+  }
+
+  virtual void VisitTargets(ReflectiveValueVisitor* visitor)
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+  BaseReflectiveHandleScope* GetLink() {
+    return link_;
+  }
+
+  Thread* GetThread() {
+    return self_;
+  }
+
+  void Describe(std::ostream& os) const;
+
+ protected:
+  ALWAYS_INLINE BaseReflectiveHandleScope() : self_(nullptr), link_(nullptr) {}
+
+  ALWAYS_INLINE inline void PushScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+  ALWAYS_INLINE inline void PopScope() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Thread this node is rooted in.
+  Thread* self_;
+  // Next node in the handle-scope linked list. Root is held by Thread.
+  BaseReflectiveHandleScope* link_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BaseReflectiveHandleScope);
+};
+std::ostream& operator<<(std::ostream& os, const BaseReflectiveHandleScope& brhs);
+
+template <size_t kNumFields, size_t kNumMethods>
+class StackReflectiveHandleScope : public BaseReflectiveHandleScope {
+ private:
+  static constexpr bool kHasFields = kNumFields > 0;
+  static constexpr bool kHasMethods = kNumMethods > 0;
+
+ public:
+  ALWAYS_INLINE explicit StackReflectiveHandleScope(Thread* self)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  ALWAYS_INLINE ~StackReflectiveHandleScope() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void VisitTargets(ReflectiveValueVisitor* visitor) override REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template <typename T,
+            typename = typename std::enable_if_t<(kHasFields && std::is_same_v<T, ArtField>) ||
+                                                 (kHasMethods && std::is_same_v<T, ArtMethod>)>>
+  ALWAYS_INLINE MutableReflectiveHandle<T> NewHandle(T* t) REQUIRES_SHARED(Locks::mutator_lock_) {
+    if constexpr (std::is_same_v<T, ArtField>) {
+      return NewFieldHandle(t);
+    } else {
+      static_assert(std::is_same_v<T, ArtMethod>, "Expected ArtField or ArtMethod");
+      return NewMethodHandle(t);
+    }
+  }
+  template<typename T>
+  ALWAYS_INLINE ReflectiveHandleWrapper<T> NewReflectiveHandleWrapper(T** t)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return ReflectiveHandleWrapper<T>(t, NewHandle(*t));
+  }
+
+  ALWAYS_INLINE MutableReflectiveHandle<ArtField> NewFieldHandle(ArtField* f)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    static_assert(kHasFields, "No fields");
+    DCHECK_LT(field_pos_, kNumFields);
+    MutableReflectiveHandle<ArtField> fh(GetMutableFieldHandle(field_pos_++));
+    fh.Assign(f);
+    return fh;
+  }
+  ALWAYS_INLINE ReflectiveHandleWrapper<ArtField> NewReflectiveFieldHandleWrapper(ArtField** f)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return ReflectiveHandleWrapper<ArtField>(f, NewMethodHandle(*f));
+  }
+
+  ALWAYS_INLINE ArtField* GetField(size_t i) {
+    static_assert(kHasFields, "No fields");
+    return GetFieldReference(i)->Ptr();
+  }
+  ALWAYS_INLINE ReflectiveHandle<ArtField> GetFieldHandle(size_t i) {
+    static_assert(kHasFields, "No fields");
+    return ReflectiveHandle<ArtField>(GetFieldReference(i));
+  }
+  ALWAYS_INLINE MutableReflectiveHandle<ArtField> GetMutableFieldHandle(size_t i) {
+    static_assert(kHasFields, "No fields");
+    return MutableReflectiveHandle<ArtField>(GetFieldReference(i));
+  }
+
+  ALWAYS_INLINE MutableReflectiveHandle<ArtMethod> NewMethodHandle(ArtMethod* m)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    static_assert(kHasMethods, "No methods");
+    DCHECK_LT(method_pos_, kNumMethods);
+    MutableReflectiveHandle<ArtMethod> mh(GetMutableMethodHandle(method_pos_++));
+    mh.Assign(m);
+    return mh;
+  }
+  ALWAYS_INLINE ReflectiveHandleWrapper<ArtMethod> NewReflectiveMethodHandleWrapper(ArtMethod** m)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return ReflectiveHandleWrapper<ArtMethod>(m, NewMethodHandle(*m));
+  }
+
+  ALWAYS_INLINE ArtMethod* GetMethod(size_t i) {
+    static_assert(kHasMethods, "No methods");
+    return GetMethodReference(i)->Ptr();
+  }
+  ALWAYS_INLINE ReflectiveHandle<ArtMethod> GetMethodHandle(size_t i) {
+    static_assert(kHasMethods, "No methods");
+    return ReflectiveHandle<ArtMethod>(GetMethodReference(i));
+  }
+  ALWAYS_INLINE MutableReflectiveHandle<ArtMethod> GetMutableMethodHandle(size_t i) {
+    static_assert(kHasMethods, "No methods");
+    return MutableReflectiveHandle<ArtMethod>(GetMethodReference(i));
+  }
+
+  size_t RemainingFieldSlots() const {
+    return kNumFields - field_pos_;
+  }
+
+  size_t RemainingMethodSlots() const {
+    return kNumMethods - method_pos_;
+  }
+
+ private:
+  ReflectiveReference<ArtMethod>* GetMethodReference(size_t i) {
+    DCHECK_LT(i, method_pos_);
+    return &methods_[i];
+  }
+
+  ReflectiveReference<ArtField>* GetFieldReference(size_t i) {
+    DCHECK_LT(i, field_pos_);
+    return &fields_[i];
+  }
+
+  size_t field_pos_;
+  size_t method_pos_;
+  std::array<ReflectiveReference<ArtField>, kNumFields> fields_;
+  std::array<ReflectiveReference<ArtMethod>, kNumMethods> methods_;
+};
+
+template <size_t kNumMethods>
+using StackArtMethodHandleScope = StackReflectiveHandleScope</*kNumFields=*/0, kNumMethods>;
+
+template <size_t kNumFields>
+using StackArtFieldHandleScope = StackReflectiveHandleScope<kNumFields, /*kNumMethods=*/0>;
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_H_
diff --git a/runtime/reflective_reference.h b/runtime/reflective_reference.h
new file mode 100644
index 0000000..f57c030
--- /dev/null
+++ b/runtime/reflective_reference.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_REFLECTIVE_REFERENCE_H_
+#define ART_RUNTIME_REFLECTIVE_REFERENCE_H_
+
+#include "android-base/macros.h"
+#include "base/macros.h"
+#include "mirror/object_reference.h"
+
+namespace art {
+
+class ArtField;
+class ArtMethod;
+// A reference to a ArtField or ArtMethod.
+template <class ReflectiveType>
+class ReflectiveReference {
+ public:
+  static_assert(std::is_same_v<ReflectiveType, ArtMethod> ||
+                    std::is_same_v<ReflectiveType, ArtField>,
+                "Uknown type!");
+  ReflectiveReference() : val_(nullptr) {}
+  explicit ReflectiveReference(ReflectiveType* r) : val_(r) {}
+  ReflectiveReference<ReflectiveType>& operator=(const ReflectiveReference<ReflectiveType>& t) =
+      default;
+
+  ReflectiveType* Ptr() {
+    return val_;
+  }
+
+  void Assign(ReflectiveType* r) {
+    val_ = r;
+  }
+
+  bool IsNull() const {
+    return val_ == nullptr;
+  }
+
+  bool operator==(const ReflectiveReference<ReflectiveType>& rr) const {
+    return val_ == rr.val_;
+  }
+  bool operator!=(const ReflectiveReference<ReflectiveType>& rr) const {
+    return !operator==(rr);
+  }
+  bool operator==(std::nullptr_t) const {
+    return IsNull();
+  }
+  bool operator!=(std::nullptr_t) const {
+    return !IsNull();
+  }
+
+ private:
+  ReflectiveType* val_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_REFLECTIVE_REFERENCE_H_
diff --git a/runtime/reflective_value_visitor.cc b/runtime/reflective_value_visitor.cc
index 69fd51f..5a288d3 100644
--- a/runtime/reflective_value_visitor.cc
+++ b/runtime/reflective_value_visitor.cc
@@ -42,4 +42,13 @@
   os << " jmethodID=" << reinterpret_cast<uintptr_t>(id_);
 }
 
+void ReflectiveHandleScopeSourceInfo::Describe(std::ostream& os) const {
+  ReflectionSourceInfo::Describe(os);
+  os << " source= (" << source_ << ") ";
+  if (source_ == nullptr) {
+    os << "nullptr";
+  } else {
+    os << *source_;
+  }
+}
 }  // namespace art
diff --git a/runtime/reflective_value_visitor.h b/runtime/reflective_value_visitor.h
index 8823fcb..0b09a0b 100644
--- a/runtime/reflective_value_visitor.h
+++ b/runtime/reflective_value_visitor.h
@@ -124,10 +124,7 @@
   explicit ReflectiveHandleScopeSourceInfo(BaseReflectiveHandleScope* source)
       : ReflectionSourceInfo(kSourceThreadHandleScope), source_(source) {}
 
-  void Describe(std::ostream& os) const override {
-    ReflectionSourceInfo::Describe(os);
-    os << " source=" << source_;
-  }
+  void Describe(std::ostream& os) const override;
 
  private:
   BaseReflectiveHandleScope* source_;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 8f68b55..0c9aece 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -2198,6 +2198,7 @@
 }
 
 void Runtime::VisitReflectiveTargets(ReflectiveValueVisitor *visitor) {
+  thread_list_->VisitReflectiveTargets(visitor);
   heap_->VisitReflectiveTargets(visitor);
   jni_id_manager_->VisitReflectiveTargets(visitor);
   callbacks_->VisitReflectiveTargets(visitor);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 088f997..3aacfcb 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -98,6 +98,7 @@
 #include "quick_exception_handler.h"
 #include "read_barrier-inl.h"
 #include "reflection.h"
+#include "reflective_handle_scope-inl.h"
 #include "runtime-inl.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
@@ -3957,6 +3958,14 @@
   const uint32_t tid_;
 };
 
+void Thread::VisitReflectiveTargets(ReflectiveValueVisitor* visitor) {
+  for (BaseReflectiveHandleScope* brhs = GetTopReflectiveHandleScope();
+       brhs != nullptr;
+       brhs = brhs->GetLink()) {
+    brhs->VisitTargets(visitor);
+  }
+}
+
 template <bool kPrecise>
 void Thread::VisitRoots(RootVisitor* visitor) {
   const pid_t thread_id = GetThreadId();
diff --git a/runtime/thread.h b/runtime/thread.h
index 22c9d06..16ca2aa 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -40,6 +40,7 @@
 #include "managed_stack.h"
 #include "offsets.h"
 #include "read_barrier_config.h"
+#include "reflective_handle_scope.h"
 #include "runtime_globals.h"
 #include "runtime_stats.h"
 #include "thread_state.h"
@@ -635,6 +636,9 @@
   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
+      REQUIRES(Locks::mutator_lock_);
+
   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
     if (kVerifyStack) {
       VerifyStackImpl();
@@ -892,6 +896,23 @@
                                                                 top_handle_scope));
   }
 
+  BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
+    return tlsPtr_.top_reflective_handle_scope;
+  }
+
+  void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
+    DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
+    DCHECK_EQ(scope->GetThread(), this);
+    tlsPtr_.top_reflective_handle_scope = scope;
+  }
+
+  BaseReflectiveHandleScope* PopReflectiveHandleScope() {
+    BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
+    DCHECK(handle_scope != nullptr);
+    tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
+    return handle_scope;
+  }
+
   DebugInvokeReq* GetInvokeReq() const {
     return tlsPtr_.debug_invoke_req;
   }
@@ -1639,7 +1660,7 @@
       thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
       thread_local_alloc_stack_end(nullptr),
       flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
-      async_exception(nullptr) {
+      async_exception(nullptr), top_reflective_handle_scope(nullptr) {
       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
     }
 
@@ -1797,6 +1818,9 @@
 
     // The pending async-exception or null.
     mirror::Throwable* async_exception;
+
+    // Top of the linked-list for reflective-handle scopes or null if none.
+    BaseReflectiveHandleScope* top_reflective_handle_scope;
   } tlsPtr_;
 
   // Small thread-local cache to be used from the interpreter.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 28bc184..4e8f468 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1542,6 +1542,13 @@
   }
 }
 
+void ThreadList::VisitReflectiveTargets(ReflectiveValueVisitor *visitor) const {
+  MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+  for (const auto& thread : list_) {
+    thread->VisitReflectiveTargets(visitor);
+  }
+}
+
 uint32_t ThreadList::AllocThreadId(Thread* self) {
   MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
   for (size_t i = 0; i < allocated_ids_.size(); ++i) {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index b5b4450..dad896e 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -22,6 +22,7 @@
 #include "base/mutex.h"
 #include "base/value_object.h"
 #include "jni.h"
+#include "reflective_handle_scope.h"
 #include "suspend_reason.h"
 
 #include <bitset>
@@ -173,6 +174,8 @@
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) const REQUIRES(Locks::mutator_lock_);
+
   // Return a copy of the thread list.
   std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) {
     return list_;