Split local reference table out of `IndirectReferenceTable`.

In preparation for rewriting the representation of local JNI
references, split their implementation out of the shared
`IndirectReferenceTable` which shall be used only for global
and weak global references going forward. Make the new
`LocalReferenceTable` always resizable (remove the enum
`ResizableCapacity`) and rename the memory mappings for LRT
to "local ref table".

Remove `IndirectReferenceTable` code that was needed only
for local references, make these tables non-resizable.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 172332525
Change-Id: I87f02c93694577d1b577c4114fa86c2cd23b4c97
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 1cba4b5..9be74e1 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -201,6 +201,7 @@
         "jni/jni_env_ext.cc",
         "jni/jni_id_manager.cc",
         "jni/jni_internal.cc",
+        "jni/local_reference_table.cc",
         "method_handles.cc",
         "metrics/reporter.cc",
         "mirror/array.cc",
@@ -865,6 +866,7 @@
         "jit/profiling_info_test.cc",
         "jni/java_vm_ext_test.cc",
         "jni/jni_internal_test.cc",
+        "jni/local_reference_table_test.cc",
         "method_handles_test.cc",
         "metrics/reporter_test.cc",
         "mirror/dex_cache_test.cc",
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index fafa3c7..6f69001 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -43,8 +43,8 @@
                                  uint64_t* gpr_result,
                                  uint64_t* fpr_result);
 
-static_assert(sizeof(IRTSegmentState) == sizeof(uint32_t), "IRTSegmentState size unexpected");
-static_assert(std::is_trivial<IRTSegmentState>::value, "IRTSegmentState not trivial");
+static_assert(sizeof(jni::LRTSegmentState) == sizeof(uint32_t), "LRTSegmentState size unexpected");
+static_assert(std::is_trivial<jni::LRTSegmentState>::value, "LRTSegmentState not trivial");
 
 extern "C" void artJniReadBarrier(ArtMethod* method) {
   DCHECK(gUseReadBarrier);
@@ -82,7 +82,7 @@
     env->CheckNoHeldMonitors();
   }
   env->SetLocalSegmentState(env->GetLocalRefCookie());
-  env->SetLocalRefCookie(bit_cast<IRTSegmentState>(saved_local_ref_cookie));
+  env->SetLocalRefCookie(bit_cast<jni::LRTSegmentState>(saved_local_ref_cookie));
 }
 
 // TODO: annotalysis disabled as monitor semantics are maintained in Java code.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 9cb8a93..87ef22a 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1783,7 +1783,7 @@
 
     // Add space for cookie.
     DCHECK_ALIGNED(managed_sp, sizeof(uintptr_t));
-    static_assert(sizeof(uintptr_t) >= sizeof(IRTSegmentState));
+    static_assert(sizeof(uintptr_t) >= sizeof(jni::LRTSegmentState));
     uint8_t* sp8 = reinterpret_cast<uint8_t*>(managed_sp) - sizeof(uintptr_t);
 
     // Layout stack arguments.
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 6ea035b..23df2c8 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -37,7 +37,7 @@
                                                      /*out*/std::string* error_msg) const {
   DCHECK(iref != nullptr);
   DCHECK_EQ(GetIndirectRefKind(iref), kind_);
-  const uint32_t top_index = segment_state_.top_index;
+  const uint32_t top_index = top_index_;
   uint32_t idx = ExtractIndex(iref);
   if (UNLIKELY(idx >= top_index)) {
     *error_msg = android::base::StringPrintf("deleted reference at index %u in a table of size %u",
@@ -82,7 +82,7 @@
 inline ObjPtr<mirror::Object> IndirectReferenceTable::Get(IndirectRef iref) const {
   DCHECK_EQ(GetIndirectRefKind(iref), kind_);
   uint32_t idx = ExtractIndex(iref);
-  DCHECK_LT(idx, segment_state_.top_index);
+  DCHECK_LT(idx, top_index_);
   DCHECK_EQ(DecodeSerial(reinterpret_cast<uintptr_t>(iref)), table_[idx].GetSerial());
   DCHECK(!table_[idx].GetReference()->IsNull());
   ObjPtr<mirror::Object> obj = table_[idx].GetReference()->Read<kReadBarrierOption>();
@@ -93,7 +93,7 @@
 inline void IndirectReferenceTable::Update(IndirectRef iref, ObjPtr<mirror::Object> obj) {
   DCHECK_EQ(GetIndirectRefKind(iref), kind_);
   uint32_t idx = ExtractIndex(iref);
-  DCHECK_LT(idx, segment_state_.top_index);
+  DCHECK_LT(idx, top_index_);
   DCHECK_EQ(DecodeSerial(reinterpret_cast<uintptr_t>(iref)), table_[idx].GetSerial());
   DCHECK(!table_[idx].GetReference()->IsNull());
   table_[idx].SetReference(obj);
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index a228d70..479eda5 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -36,7 +36,6 @@
 
 namespace art {
 
-static constexpr bool kDumpStackOnNonLocalReference = false;
 static constexpr bool kDebugIRT = false;
 
 // Maximum table size we allow.
@@ -80,48 +79,15 @@
   return result;
 }
 
-SmallIrtAllocator::SmallIrtAllocator()
-    : small_irt_freelist_(nullptr), lock_("Small IRT table lock", LockLevel::kGenericBottomLock) {
-}
-
-// Allocate an IRT table for kSmallIrtEntries.
-IrtEntry* SmallIrtAllocator::Allocate(std::string* error_msg) {
-  MutexLock lock(Thread::Current(), lock_);
-  if (small_irt_freelist_ == nullptr) {
-    // Refill.
-    MemMap map = NewIRTMap(kPageSize, error_msg);
-    if (map.IsValid()) {
-      small_irt_freelist_ = reinterpret_cast<IrtEntry*>(map.Begin());
-      for (uint8_t* p = map.Begin(); p + kInitialIrtBytes < map.End(); p += kInitialIrtBytes) {
-        *reinterpret_cast<IrtEntry**>(p) = reinterpret_cast<IrtEntry*>(p + kInitialIrtBytes);
-      }
-      shared_irt_maps_.emplace_back(std::move(map));
-    }
-  }
-  if (small_irt_freelist_ == nullptr) {
-    return nullptr;
-  }
-  IrtEntry* result = small_irt_freelist_;
-  small_irt_freelist_ = *reinterpret_cast<IrtEntry**>(small_irt_freelist_);
-  // Clear pointer in first entry.
-  new(result) IrtEntry();
-  return result;
-}
-
-void SmallIrtAllocator::Deallocate(IrtEntry* unneeded) {
-  MutexLock lock(Thread::Current(), lock_);
-  *reinterpret_cast<IrtEntry**>(unneeded) = small_irt_freelist_;
-  small_irt_freelist_ = unneeded;
-}
-
-IndirectReferenceTable::IndirectReferenceTable(IndirectRefKind kind, ResizableCapacity resizable)
-    : segment_state_(kIRTFirstSegment),
+IndirectReferenceTable::IndirectReferenceTable(IndirectRefKind kind)
+    : table_mem_map_(),
       table_(nullptr),
       kind_(kind),
+      top_index_(0u),
       max_entries_(0u),
-      current_num_holes_(0),
-      resizable_(resizable) {
+      current_num_holes_(0) {
   CHECK_NE(kind, kJniTransition);
+  CHECK_NE(kind, kLocal);
 }
 
 bool IndirectReferenceTable::Initialize(size_t max_count, std::string* error_msg) {
@@ -130,34 +96,20 @@
   // Overflow and maximum check.
   CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
 
-  if (max_count <= kSmallIrtEntries) {
-    table_ = Runtime::Current()->GetSmallIrtAllocator()->Allocate(error_msg);
-    if (table_ != nullptr) {
-      max_entries_ = kSmallIrtEntries;
-      // table_mem_map_ remains invalid.
-    }
+  const size_t table_bytes = RoundUp(max_count * sizeof(IrtEntry), kPageSize);
+  table_mem_map_ = NewIRTMap(table_bytes, error_msg);
+  if (!table_mem_map_.IsValid()) {
+    DCHECK(!error_msg->empty());
+    return false;
   }
-  if (table_ == nullptr) {
-    const size_t table_bytes = RoundUp(max_count * sizeof(IrtEntry), kPageSize);
-    table_mem_map_ = NewIRTMap(table_bytes, error_msg);
-    if (!table_mem_map_.IsValid()) {
-      DCHECK(!error_msg->empty());
-      return false;
-    }
 
-    table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
-    // Take into account the actual length.
-    max_entries_ = table_bytes / sizeof(IrtEntry);
-  }
-  segment_state_ = kIRTFirstSegment;
-  last_known_previous_state_ = kIRTFirstSegment;
+  table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
+  // Take into account the actual length.
+  max_entries_ = table_bytes / sizeof(IrtEntry);
   return true;
 }
 
 IndirectReferenceTable::~IndirectReferenceTable() {
-  if (table_ != nullptr && !table_mem_map_.IsValid()) {
-    Runtime::Current()->GetSmallIrtAllocator()->Deallocate(table_);
-  }
 }
 
 void IndirectReferenceTable::ConstexprChecks() {
@@ -188,10 +140,6 @@
   static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
 }
 
-bool IndirectReferenceTable::IsValid() const {
-  return table_ != nullptr;
-}
-
 // Holes:
 //
 // To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
@@ -199,37 +147,10 @@
 // similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
 // are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
 // scans when there are no holes, the number of known holes should be tracked.
-//
-// A previous implementation stored the top index and the number of holes as the segment state.
-// This constraints the maximum number of references to 16-bit. We want to relax this, as it
-// is easy to require more references (e.g., to list all classes in large applications). Thus,
-// the implicitly stack-stored state, the IRTSegmentState, is only the top index.
-//
-// Thus, hole count is a local property of the current segment, and needs to be recovered when
-// (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
-// cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
-// hole count is correct.
-//
-// To be able to detect segment changes, we require an additional local field that can describe
-// the known segment. This is last_known_previous_state_. The requirement will become clear with
-// the following (some non-trivial) cases that have to be supported:
-//
-// 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
-// 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
-// 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
-//    reference
-// 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
-// 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
-//    reference
-//
-// Storing the last known *previous* state (bottom index) allows conservatively detecting all the
-// segment changes above. The condition is simply that the last known state is greater than or
-// equal to the current previous state, and smaller than the current state (top index). The
-// condition is conservative as it adds O(1) overhead to operations on an empty segment.
 
-static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) {
+static size_t CountNullEntries(const IrtEntry* table, size_t to) {
   size_t count = 0;
-  for (size_t index = from; index != to; ++index) {
+  for (size_t index = 0u; index != to; ++index) {
     if (table[index].GetReference()->IsNull()) {
       count++;
     }
@@ -237,121 +158,37 @@
   return count;
 }
 
-void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) {
-  if (last_known_previous_state_.top_index >= segment_state_.top_index ||
-      last_known_previous_state_.top_index < prev_state.top_index) {
-    const size_t top_index = segment_state_.top_index;
-    size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
-
-    if (kDebugIRT) {
-      LOG(INFO) << "+++ Recovered holes: "
-                << " Current prev=" << prev_state.top_index
-                << " Current top_index=" << top_index
-                << " Old num_holes=" << current_num_holes_
-                << " New num_holes=" << count;
-    }
-
-    current_num_holes_ = count;
-    last_known_previous_state_ = prev_state;
-  } else if (kDebugIRT) {
-    LOG(INFO) << "No need to recover holes";
-  }
-}
-
 ALWAYS_INLINE
 static inline void CheckHoleCount(IrtEntry* table,
                                   size_t exp_num_holes,
-                                  IRTSegmentState prev_state,
-                                  IRTSegmentState cur_state) {
+                                  size_t top_index) {
   if (kIsDebugBuild) {
-    size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
-    CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
-                                   << " topIndex=" << cur_state.top_index;
+    size_t count = CountNullEntries(table, top_index);
+    CHECK_EQ(exp_num_holes, count) << " topIndex=" << top_index;
   }
 }
 
-bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
-  CHECK_GT(new_size, max_entries_);
-
-  constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(IrtEntry);
-  if (new_size > kMaxEntries) {
-    *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size);
-    return false;
-  }
-  // Note: the above check also ensures that there is no overflow below.
-
-  const size_t table_bytes = RoundUp(new_size * sizeof(IrtEntry), kPageSize);
-
-  MemMap new_map = NewIRTMap(table_bytes, error_msg);
-  if (!new_map.IsValid()) {
-    return false;
-  }
-
-  memcpy(new_map.Begin(), table_, max_entries_ * sizeof(IrtEntry));
-  if (!table_mem_map_.IsValid()) {
-    // Didn't have its own map; deallocate old table.
-    Runtime::Current()->GetSmallIrtAllocator()->Deallocate(table_);
-  }
-  table_mem_map_ = std::move(new_map);
-  table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
-  const size_t real_new_size = table_bytes / sizeof(IrtEntry);
-  DCHECK_GE(real_new_size, new_size);
-  max_entries_ = real_new_size;
-
-  return true;
-}
-
-IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state,
-                                        ObjPtr<mirror::Object> obj,
-                                        std::string* error_msg) {
+IndirectRef IndirectReferenceTable::Add(ObjPtr<mirror::Object> obj, std::string* error_msg) {
   if (kDebugIRT) {
-    LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
-              << " top_index=" << segment_state_.top_index
-              << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+    LOG(INFO) << "+++ Add: top_index=" << top_index_
               << " holes=" << current_num_holes_;
   }
 
-  size_t top_index = segment_state_.top_index;
-
   CHECK(obj != nullptr);
   VerifyObject(obj);
   DCHECK(table_ != nullptr);
 
-  if (top_index == max_entries_) {
-    if (resizable_ == ResizableCapacity::kNo) {
-      std::ostringstream oss;
-      oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
-          << "(max=" << max_entries_ << ")"
-          << MutatorLockedDumpable<IndirectReferenceTable>(*this);
-      *error_msg = oss.str();
-      return nullptr;
-    }
-
-    // Try to double space.
-    if (std::numeric_limits<size_t>::max() / 2 < max_entries_) {
-      std::ostringstream oss;
-      oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
-          << "(max=" << max_entries_ << ")" << std::endl
-          << MutatorLockedDumpable<IndirectReferenceTable>(*this)
-          << " Resizing failed: exceeds size_t";
-      *error_msg = oss.str();
-      return nullptr;
-    }
-
-    std::string inner_error_msg;
-    if (!Resize(max_entries_ * 2, &inner_error_msg)) {
-      std::ostringstream oss;
-      oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
-          << "(max=" << max_entries_ << ")" << std::endl
-          << MutatorLockedDumpable<IndirectReferenceTable>(*this)
-          << " Resizing failed: " << inner_error_msg;
-      *error_msg = oss.str();
-      return nullptr;
-    }
+  if (top_index_ == max_entries_) {
+    // TODO: Fill holes before reporting error.
+    std::ostringstream oss;
+    oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
+        << "(max=" << max_entries_ << ")"
+        << MutatorLockedDumpable<IndirectReferenceTable>(*this);
+    *error_msg = oss.str();
+    return nullptr;
   }
 
-  RecoverHoles(previous_state);
-  CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+  CheckHoleCount(table_, current_num_holes_, top_index_);
 
   // We know there's enough room in the table.  Now we just need to find
   // the right spot.  If there's a hole, find it and fill it; otherwise,
@@ -359,26 +196,26 @@
   IndirectRef result;
   size_t index;
   if (current_num_holes_ > 0) {
-    DCHECK_GT(top_index, 1U);
+    DCHECK_GT(top_index_, 1U);
     // Find the first hole; likely to be near the end of the list.
-    IrtEntry* p_scan = &table_[top_index - 1];
+    IrtEntry* p_scan = &table_[top_index_ - 1];
     DCHECK(!p_scan->GetReference()->IsNull());
     --p_scan;
     while (!p_scan->GetReference()->IsNull()) {
-      DCHECK_GE(p_scan, table_ + previous_state.top_index);
+      DCHECK_GT(p_scan, table_);
       --p_scan;
     }
     index = p_scan - table_;
     current_num_holes_--;
   } else {
     // Add to the end.
-    index = top_index++;
-    segment_state_.top_index = top_index;
+    index = top_index_;
+    ++top_index_;
   }
   table_[index].Add(obj);
   result = ToIndirectRef(index);
   if (kDebugIRT) {
-    LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
+    LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << top_index_
               << " holes=" << current_num_holes_;
   }
 
@@ -386,72 +223,31 @@
   return result;
 }
 
-void IndirectReferenceTable::AssertEmpty() {
-  for (size_t i = 0; i < Capacity(); ++i) {
-    if (!table_[i].GetReference()->IsNull()) {
-      LOG(FATAL) << "Internal Error: non-empty local reference table\n"
-                 << MutatorLockedDumpable<IndirectReferenceTable>(*this);
-      UNREACHABLE();
-    }
-  }
-}
-
 // Removes an object. We extract the table offset bits from "iref"
 // and zap the corresponding entry, leaving a hole if it's not at the top.
-// If the entry is not between the current top index and the bottom index
-// specified by the cookie, we don't remove anything. This is the behavior
-// required by JNI's DeleteLocalRef function.
-// This method is not called when a local frame is popped; this is only used
-// for explicit single removals.
 // Returns "false" if nothing was removed.
-bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) {
+bool IndirectReferenceTable::Remove(IndirectRef iref) {
   if (kDebugIRT) {
-    LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
-              << " top_index=" << segment_state_.top_index
-              << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+    LOG(INFO) << "+++ Remove: top_index=" << top_index_
               << " holes=" << current_num_holes_;
   }
 
-  const uint32_t top_index = segment_state_.top_index;
-  const uint32_t bottom_index = previous_state.top_index;
+  // TODO: We should eagerly check the ref kind against the `kind_` instead of postponing until
+  // `CheckEntry()` below. Passing the wrong kind shall currently result in misleading warnings.
+
+  const uint32_t top_index = top_index_;
 
   DCHECK(table_ != nullptr);
 
-  // TODO: We should eagerly check the ref kind against the `kind_` instead of
-  // relying on this weak check and postponing the rest until `CheckEntry()` below.
-  // Passing the wrong kind shall currently result in misleading warnings.
-  if (GetIndirectRefKind(iref) == kJniTransition) {
-    auto* self = Thread::Current();
-    ScopedObjectAccess soa(self);
-    if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
-      auto* env = self->GetJniEnv();
-      DCHECK(env != nullptr);
-      if (env->IsCheckJniEnabled()) {
-        LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
-        if (kDumpStackOnNonLocalReference) {
-          self->Dump(LOG_STREAM(WARNING));
-        }
-      }
-      return true;
-    }
-  }
-
   const uint32_t idx = ExtractIndex(iref);
-  if (idx < bottom_index) {
-    // Wrong segment.
-    LOG(WARNING) << "Attempt to remove index outside index area (" << idx
-                 << " vs " << bottom_index << "-" << top_index << ")";
-    return false;
-  }
   if (idx >= top_index) {
     // Bad --- stale reference?
     LOG(WARNING) << "Attempt to remove invalid index " << idx
-                 << " (bottom=" << bottom_index << " top=" << top_index << ")";
+                 << " (top=" << top_index << ")";
     return false;
   }
 
-  RecoverHoles(previous_state);
-  CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+  CheckHoleCount(table_, current_num_holes_, top_index_);
 
   if (idx == top_index - 1) {
     // Top-most entry.  Scan up and consume holes.
@@ -463,11 +259,10 @@
     *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
     if (current_num_holes_ != 0) {
       uint32_t collapse_top_index = top_index;
-      while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
+      while (--collapse_top_index > 0u && current_num_holes_ != 0) {
         if (kDebugIRT) {
           ScopedObjectAccess soa(Thread::Current());
-          LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
-                    << " (previous_state=" << bottom_index << ") val="
+          LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1 << " val="
                     << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
         }
         if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
@@ -478,11 +273,11 @@
         }
         current_num_holes_--;
       }
-      segment_state_.top_index = collapse_top_index;
+      top_index_ = collapse_top_index;
 
-      CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+      CheckHoleCount(table_, current_num_holes_, top_index_);
     } else {
-      segment_state_.top_index = top_index - 1;
+      top_index_ = top_index - 1;
       if (kDebugIRT) {
         LOG(INFO) << "+++ ate last entry " << top_index - 1;
       }
@@ -500,7 +295,7 @@
 
     *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
     current_num_holes_++;
-    CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+    CheckHoleCount(table_, current_num_holes_, top_index_);
     if (kDebugIRT) {
       LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
     }
@@ -511,10 +306,7 @@
 
 void IndirectReferenceTable::Trim() {
   ScopedTrace trace(__PRETTY_FUNCTION__);
-  if (!table_mem_map_.IsValid()) {
-    // Small table; nothing to do here.
-    return;
-  }
+  DCHECK(table_mem_map_.IsValid());
   const size_t top_index = Capacity();
   uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
   uint8_t* release_end = static_cast<uint8_t*>(table_mem_map_.BaseEnd());
@@ -568,47 +360,8 @@
   ReferenceTable::Dump(os, entries);
 }
 
-void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) {
-  if (kDebugIRT) {
-    LOG(INFO) << "Setting segment state: "
-              << segment_state_.top_index
-              << " -> "
-              << new_state.top_index;
-  }
-  segment_state_ = new_state;
-}
-
-bool IndirectReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
-  DCHECK_GE(free_capacity, static_cast<size_t>(1));
-  if (free_capacity > kMaxTableSizeInBytes) {
-    // Arithmetic might even overflow.
-    *error_msg = "Requested table size implausibly large";
-    return false;
-  }
-  size_t top_index = segment_state_.top_index;
-  if (top_index + free_capacity <= max_entries_) {
-    return true;
-  }
-
-  // We're only gonna do a simple best-effort here, ensuring the asked-for capacity at the end.
-  if (resizable_ == ResizableCapacity::kNo) {
-    *error_msg = "Table is not resizable";
-    return false;
-  }
-
-  // Try to increase the table size.
-  if (!Resize(top_index + free_capacity, error_msg)) {
-    LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity
-                 << "): " << std::endl
-                 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
-                 << " Resizing failed: " << *error_msg;
-    return false;
-  }
-  return true;
-}
-
 size_t IndirectReferenceTable::FreeCapacity() const {
-  return max_entries_ - segment_state_.top_index;
+  return max_entries_ - top_index_;
 }
 
 }  // namespace art
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 30688c8..59729ac 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -44,47 +44,21 @@
 class Object;
 }  // namespace mirror
 
-// Maintain a table of indirect references.  Used for local/global JNI references.
-//
-// The table contains object references, where the strong (local/global) references are part of the
-// GC root set (but not the weak global references). When an object is added we return an
-// IndirectRef that is not a valid pointer but can be used to find the original value in O(1) time.
-// Conversions to and from indirect references are performed on upcalls and downcalls, so they need
-// to be very fast.
-//
-// To be efficient for JNI local variable storage, we need to provide operations that allow us to
-// operate on segments of the table, where segments are pushed and popped as if on a stack. For
-// example, deletion of an entry should only succeed if it appears in the current segment, and we
-// want to be able to strip off the current segment quickly when a method returns. Additions to the
-// table must be made in the current segment even if space is available in an earlier area.
-//
-// A new segment is created when we call into native code from interpreted code, or when we handle
-// the JNI PushLocalFrame function.
-//
-// The GC must be able to scan the entire table quickly.
-//
-// In summary, these must be very fast:
-//  - adding or removing a segment
-//  - adding references to a new segment
-//  - converting an indirect reference back to an Object
-// These can be a little slower, but must still be pretty quick:
-//  - adding references to a "mature" segment
-//  - removing individual references
-//  - scanning the entire table straight through
-//
-// If there's more than one segment, we don't guarantee that the table will fill completely before
-// we fail due to lack of space. We do ensure that the current segment will pack tightly, which
-// should satisfy JNI requirements (e.g. EnsureLocalCapacity).
-
 // Indirect reference definition.  This must be interchangeable with JNI's jobject, and it's
 // convenient to let null be null, so we use void*.
 //
-// We need a (potentially) large table index and a 2-bit reference type (global, local, weak
-// global). We also reserve some bits to be used to detect stale indirect references: we put a
-// serial number in the extra bits, and keep a copy of the serial number in the table. This requires
-// more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch
-// additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj,
-// lookup iref1. A pattern based on object bits will miss this.
+// We need a 2-bit reference kind (global, local, weak global) and the rest of the `IndirectRef`
+// is used to locate the actual reference storage.
+//
+// For global and weak global references, we need a (potentially) large table index and we also
+// reserve some bits to be used to detect stale indirect references: we put a serial number in
+// the extra bits, and keep a copy of the serial number in the table. This requires more memory
+// and additional memory accesses on add/get, but is moving-GC safe. It will catch additional
+// problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj, lookup iref1.
+// A pattern based on object bits will miss this.
+//
+// Local references use the same bits for the reference kind but the rest of their `IndirectRef`
+// encoding is different, see `LocalReferenceTable` for details.
 using IndirectRef = void*;
 
 // Indirect reference kind, used as the two low bits of IndirectRef.
@@ -101,11 +75,27 @@
 std::ostream& operator<<(std::ostream& os, IndirectRefKind rhs);
 const char* GetIndirectRefKindString(IndirectRefKind kind);
 
+// Maintain a table of indirect references.  Used for global and weak global JNI references.
+//
+// The table contains object references, where the strong global references are part of the
+// GC root set (but not the weak global references). When an object is added we return an
+// `IndirectRef` that is not a valid pointer but can be used to find the original value in O(1)
+// time. Conversions to and from indirect references are performed in JNI functions and when
+// returning from native methods to managed code, so they need to be very fast.
+//
+// The GC must be able to scan the entire table quickly.
+//
+// In summary, these must be very fast:
+//  - adding references
+//  - converting an indirect reference back to an Object
+// These can be a little slower, but must still be pretty quick:
+//  - removing individual references
+//  - scanning the entire table straight through
+
 // Table definition.
 //
-// For the global reference table, the expected common operations are adding a new entry and
-// removing a recently-added entry (usually the most-recently-added entry).  For JNI local
-// references, the common operations are adding a new entry and removing an entire table segment.
+// For the global reference tables, the expected common operations are adding a new entry and
+// removing a recently-added entry (usually the most-recently-added entry).
 //
 // If we delete entries from the middle of the list, we will be left with "holes".  We track the
 // number of holes so that, when adding new elements, we can quickly decide to do a trivial append
@@ -114,18 +104,6 @@
 // When the top-most entry is removed, any holes immediately below it are also removed. Thus,
 // deletion of an entry may reduce "top_index" by more than one.
 //
-// To get the desired behavior for JNI locals, we need to know the bottom and top of the current
-// "segment". The top is managed internally, and the bottom is passed in as a function argument.
-// When we call a native method or push a local frame, the current top index gets pushed on, and
-// serves as the new bottom. When we pop a frame off, the value from the stack becomes the new top
-// index, and the value stored in the previous frame becomes the new bottom.
-//
-// Holes are being locally cached for the segment. Otherwise we'd have to pass bottom index and
-// number of holes, which restricts us to 16 bits for the top index. The value is cached within the
-// table. To avoid code in generated JNI transitions, which implicitly form segments, the code for
-// adding and removing references needs to detect the change of a segment. Helper fields are used
-// for this detection.
-//
 // Common alternative implementation: make IndirectRef a pointer to the actual reference slot.
 // Instead of getting a table and doing a lookup, the lookup can be done instantly. Operations like
 // determining the type and deleting the reference are more expensive because the table must be
@@ -135,20 +113,7 @@
 // approaches).
 //
 // TODO: consider a "lastDeleteIndex" for quick hole-filling when an add immediately follows a
-// delete; must invalidate after segment pop might be worth only using it for JNI globals.
-//
-// TODO: may want completely different add/remove algorithms for global and local refs to improve
-// performance.  A large circular buffer might reduce the amortized cost of adding global
-// references.
-
-// The state of the current segment. We only store the index. Splitting it for index and hole
-// count restricts the range too much.
-struct IRTSegmentState {
-  uint32_t top_index;
-};
-
-// Use as initial value for "cookie", and when table has only one segment.
-static constexpr IRTSegmentState kIRTFirstSegment = { 0 };
+// delete.
 
 // We associate a few bits of serial number with each reference, for error checking.
 static constexpr unsigned int kIRTSerialBits = 3;
@@ -181,71 +146,22 @@
 static_assert(sizeof(IrtEntry) == 2 * sizeof(uint32_t), "Unexpected sizeof(IrtEntry)");
 static_assert(IsPowerOfTwo(sizeof(IrtEntry)), "Unexpected sizeof(IrtEntry)");
 
-// We initially allocate local reference tables with a very small number of entries, packing
-// multiple tables into a single page. If we need to expand one, we allocate them in units of
-// pages.
-// TODO: We should allocate all IRT tables as nonmovable Java objects, That in turn works better
-// if we break up each table into 2 parallel arrays, one for the Java reference, and one for the
-// serial number. The current scheme page-aligns regions containing IRT tables, and so allows them
-// to be identified and page-protected in the future.
-constexpr size_t kInitialIrtBytes = 512;  // Number of bytes in an initial local table.
-constexpr size_t kSmallIrtEntries = kInitialIrtBytes / sizeof(IrtEntry);
-static_assert(kPageSize % kInitialIrtBytes == 0);
-static_assert(kInitialIrtBytes % sizeof(IrtEntry) == 0);
-static_assert(kInitialIrtBytes % sizeof(void *) == 0);
-
-// A minimal stopgap allocator for initial small local IRT tables.
-class SmallIrtAllocator {
- public:
-  SmallIrtAllocator();
-
-  // Allocate an IRT table for kSmallIrtEntries.
-  IrtEntry* Allocate(std::string* error_msg) REQUIRES(!lock_);
-
-  void Deallocate(IrtEntry* unneeded) REQUIRES(!lock_);
-
- private:
-  // A free list of kInitialIrtBytes chunks linked through the first word.
-  IrtEntry* small_irt_freelist_;
-
-  // Repository of MemMaps used for small IRT tables.
-  std::vector<MemMap> shared_irt_maps_;
-
-  Mutex lock_;  // Level kGenericBottomLock; acquired before mem_map_lock_, which is a C++ mutex.
-};
-
 class IndirectReferenceTable {
  public:
-  enum class ResizableCapacity {
-    kNo,
-    kYes
-  };
-
   // Constructs an uninitialized indirect reference table. Use `Initialize()` to initialize it.
-  IndirectReferenceTable(IndirectRefKind kind, ResizableCapacity resizable);
+  explicit IndirectReferenceTable(IndirectRefKind kind);
 
   // Initialize the indirect reference table.
   //
-  // Max_count is the minimum initial capacity (resizable), or minimum total capacity
-  // (not resizable). A value of 1 indicates an implementation-convenient small size.
+  // Max_count is the requested total capacity (not resizable). The actual total capacity
+  // can be higher to utilize all allocated memory (rounding up to whole pages).
   bool Initialize(size_t max_count, std::string* error_msg);
 
   ~IndirectReferenceTable();
 
-  /*
-   * Checks whether construction of the IndirectReferenceTable succeeded.
-   *
-   * This object must only be used if IsValid() returns true. It is safe to
-   * call IsValid from multiple threads without locking or other explicit
-   * synchronization.
-   */
-  bool IsValid() const;
-
   // Add a new entry. "obj" must be a valid non-null object reference. This function will
   // return null if an error happened (with an appropriate error message set).
-  IndirectRef Add(IRTSegmentState previous_state,
-                  ObjPtr<mirror::Object> obj,
-                  std::string* error_msg)
+  IndirectRef Add(ObjPtr<mirror::Object> obj, std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Given an IndirectRef in the table, return the Object it refers to.
@@ -265,9 +181,7 @@
   // required by JNI's DeleteLocalRef function.
   //
   // Returns "false" if nothing was removed.
-  bool Remove(IRTSegmentState previous_state, IndirectRef iref);
-
-  void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_);
+  bool Remove(IndirectRef iref);
 
   void Dump(std::ostream& os) const
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -280,39 +194,22 @@
   // Return the #of entries in the entire table.  This includes holes, and
   // so may be larger than the actual number of "live" entries.
   size_t Capacity() const {
-    return segment_state_.top_index;
+    return top_index_;
   }
 
   // Return the number of non-null entries in the table. Only reliable for a
   // single segment table.
   int32_t NEntriesForGlobal() {
-    return segment_state_.top_index - current_num_holes_;
+    return top_index_ - current_num_holes_;
   }
 
-  // Ensure that at least free_capacity elements are available, or return false.
-  // Caller ensures free_capacity > 0.
-  bool EnsureFreeCapacity(size_t free_capacity, std::string* error_msg)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  // See implementation of EnsureFreeCapacity. We'll only state here how much is trivially free,
-  // without recovering holes. Thus this is a conservative estimate.
+  // We'll only state here how much is trivially free, without recovering holes.
+  // Thus this is a conservative estimate.
   size_t FreeCapacity() const;
 
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  IRTSegmentState GetSegmentState() const {
-    return segment_state_;
-  }
-
-  void SetSegmentState(IRTSegmentState new_state);
-
-  static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
-    // Note: Currently segment_state_ is at offset 0. We're testing the expected value in
-    //       jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that
-    //       is not pointer-size-safe.
-    return Offset(0);
-  }
-
   // Release pages past the end of the table that may have previously held references.
   void Trim() REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -378,23 +275,13 @@
     return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial));
   }
 
-  // Resize the backing table to be at least new_size elements long. Currently
-  // must be larger than the current size. After return max_entries_ >= new_size.
-  bool Resize(size_t new_size, std::string* error_msg);
-
-  void RecoverHoles(IRTSegmentState from);
-
   // Abort if check_jni is not enabled. Otherwise, just log as an error.
   static void AbortIfNoCheckJNI(const std::string& msg);
 
   /* extra debugging checks */
   bool CheckEntry(const char*, IndirectRef, uint32_t) const;
 
-  /// semi-public - read/write by jni down calls.
-  IRTSegmentState segment_state_;
-
-  // Mem map where we store the indirect refs. If it's invalid, and table_ is non-null, then
-  // table_ is valid, but was allocated via allocSmallIRT();
+  // Mem map where we store the indirect refs.
   MemMap table_mem_map_;
   // Bottom of the stack. Do not directly access the object references
   // in this as they are roots. Use Get() that has a read barrier.
@@ -402,18 +289,16 @@
   // Bit mask, ORed into all irefs.
   const IndirectRefKind kind_;
 
-  // max #of entries allowed (modulo resizing).
+  // The "top of stack" index where new references are added.
+  size_t top_index_;
+
+  // Maximum number of entries allowed.
   size_t max_entries_;
 
-  // Some values to retain old behavior with holes. Description of the algorithm is in the .cc
-  // file.
+  // Some values to retain old behavior with holes.
+  // Description of the algorithm is in the .cc file.
   // TODO: Consider other data structures for compact tables, e.g., free lists.
   size_t current_num_holes_;  // Number of holes in the current / top segment.
-  IRTSegmentState last_known_previous_state_;
-
-  // Whether the table's capacity may be resized. As there are no locks used, it is the caller's
-  // responsibility to ensure thread-safety.
-  ResizableCapacity resizable_;
 };
 
 }  // namespace art
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index 8e04469..ac22f3f 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -59,8 +59,8 @@
 
   ScopedObjectAccess soa(Thread::Current());
   static const size_t kTableMax = 20;
+  IndirectReferenceTable irt(kGlobal);
   std::string error_msg;
-  IndirectReferenceTable irt(kGlobal, IndirectReferenceTable::ResizableCapacity::kNo);
   bool success = irt.Initialize(kTableMax, &error_msg);
   ASSERT_TRUE(success) << error_msg;
 
@@ -77,21 +77,19 @@
   Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
   ASSERT_TRUE(obj3 != nullptr);
 
-  const IRTSegmentState cookie = kIRTFirstSegment;
-
   CheckDump(&irt, 0, 0);
 
   IndirectRef iref0 = (IndirectRef) 0x11110;
-  EXPECT_FALSE(irt.Remove(cookie, iref0)) << "unexpectedly successful removal";
+  EXPECT_FALSE(irt.Remove(iref0)) << "unexpectedly successful removal";
 
   // Add three, check, remove in the order in which they were added.
-  iref0 = irt.Add(cookie, obj0.Get(), &error_msg);
+  iref0 = irt.Add(obj0.Get(), &error_msg);
   EXPECT_TRUE(iref0 != nullptr);
   CheckDump(&irt, 1, 1);
-  IndirectRef iref1 = irt.Add(cookie, obj1.Get(), &error_msg);
+  IndirectRef iref1 = irt.Add(obj1.Get(), &error_msg);
   EXPECT_TRUE(iref1 != nullptr);
   CheckDump(&irt, 2, 2);
-  IndirectRef iref2 = irt.Add(cookie, obj2.Get(), &error_msg);
+  IndirectRef iref2 = irt.Add(obj2.Get(), &error_msg);
   EXPECT_TRUE(iref2 != nullptr);
   CheckDump(&irt, 3, 3);
 
@@ -99,11 +97,11 @@
   EXPECT_OBJ_PTR_EQ(obj1.Get(), irt.Get(iref1));
   EXPECT_OBJ_PTR_EQ(obj2.Get(), irt.Get(iref2));
 
-  EXPECT_TRUE(irt.Remove(cookie, iref0));
+  EXPECT_TRUE(irt.Remove(iref0));
   CheckDump(&irt, 2, 2);
-  EXPECT_TRUE(irt.Remove(cookie, iref1));
+  EXPECT_TRUE(irt.Remove(iref1));
   CheckDump(&irt, 1, 1);
-  EXPECT_TRUE(irt.Remove(cookie, iref2));
+  EXPECT_TRUE(irt.Remove(iref2));
   CheckDump(&irt, 0, 0);
 
   // Table should be empty now.
@@ -114,19 +112,19 @@
   EXPECT_FALSE(irt.IsValidReference(iref0, &error_msg));
 
   // Add three, remove in the opposite order.
-  iref0 = irt.Add(cookie, obj0.Get(), &error_msg);
+  iref0 = irt.Add(obj0.Get(), &error_msg);
   EXPECT_TRUE(iref0 != nullptr);
-  iref1 = irt.Add(cookie, obj1.Get(), &error_msg);
+  iref1 = irt.Add(obj1.Get(), &error_msg);
   EXPECT_TRUE(iref1 != nullptr);
-  iref2 = irt.Add(cookie, obj2.Get(), &error_msg);
+  iref2 = irt.Add(obj2.Get(), &error_msg);
   EXPECT_TRUE(iref2 != nullptr);
   CheckDump(&irt, 3, 3);
 
-  ASSERT_TRUE(irt.Remove(cookie, iref2));
+  ASSERT_TRUE(irt.Remove(iref2));
   CheckDump(&irt, 2, 2);
-  ASSERT_TRUE(irt.Remove(cookie, iref1));
+  ASSERT_TRUE(irt.Remove(iref1));
   CheckDump(&irt, 1, 1);
-  ASSERT_TRUE(irt.Remove(cookie, iref0));
+  ASSERT_TRUE(irt.Remove(iref0));
   CheckDump(&irt, 0, 0);
 
   // Table should be empty now.
@@ -134,27 +132,27 @@
 
   // Add three, remove middle / middle / bottom / top.  (Second attempt
   // to remove middle should fail.)
-  iref0 = irt.Add(cookie, obj0.Get(), &error_msg);
+  iref0 = irt.Add(obj0.Get(), &error_msg);
   EXPECT_TRUE(iref0 != nullptr);
-  iref1 = irt.Add(cookie, obj1.Get(), &error_msg);
+  iref1 = irt.Add(obj1.Get(), &error_msg);
   EXPECT_TRUE(iref1 != nullptr);
-  iref2 = irt.Add(cookie, obj2.Get(), &error_msg);
+  iref2 = irt.Add(obj2.Get(), &error_msg);
   EXPECT_TRUE(iref2 != nullptr);
   CheckDump(&irt, 3, 3);
 
   ASSERT_EQ(3U, irt.Capacity());
 
-  ASSERT_TRUE(irt.Remove(cookie, iref1));
+  ASSERT_TRUE(irt.Remove(iref1));
   CheckDump(&irt, 2, 2);
-  ASSERT_FALSE(irt.Remove(cookie, iref1));
+  ASSERT_FALSE(irt.Remove(iref1));
   CheckDump(&irt, 2, 2);
 
   // Check that the reference to the hole is not valid.
   EXPECT_FALSE(irt.IsValidReference(iref1, &error_msg));
 
-  ASSERT_TRUE(irt.Remove(cookie, iref2));
+  ASSERT_TRUE(irt.Remove(iref2));
   CheckDump(&irt, 1, 1);
-  ASSERT_TRUE(irt.Remove(cookie, iref0));
+  ASSERT_TRUE(irt.Remove(iref0));
   CheckDump(&irt, 0, 0);
 
   // Table should be empty now.
@@ -163,35 +161,35 @@
   // Add four entries.  Remove #1, add new entry, verify that table size
   // is still 4 (i.e. holes are getting filled).  Remove #1 and #3, verify
   // that we delete one and don't hole-compact the other.
-  iref0 = irt.Add(cookie, obj0.Get(), &error_msg);
+  iref0 = irt.Add(obj0.Get(), &error_msg);
   EXPECT_TRUE(iref0 != nullptr);
-  iref1 = irt.Add(cookie, obj1.Get(), &error_msg);
+  iref1 = irt.Add(obj1.Get(), &error_msg);
   EXPECT_TRUE(iref1 != nullptr);
-  iref2 = irt.Add(cookie, obj2.Get(), &error_msg);
+  iref2 = irt.Add(obj2.Get(), &error_msg);
   EXPECT_TRUE(iref2 != nullptr);
-  IndirectRef iref3 = irt.Add(cookie, obj3.Get(), &error_msg);
+  IndirectRef iref3 = irt.Add(obj3.Get(), &error_msg);
   EXPECT_TRUE(iref3 != nullptr);
   CheckDump(&irt, 4, 4);
 
-  ASSERT_TRUE(irt.Remove(cookie, iref1));
+  ASSERT_TRUE(irt.Remove(iref1));
   CheckDump(&irt, 3, 3);
 
-  iref1 = irt.Add(cookie, obj1.Get(), &error_msg);
+  iref1 = irt.Add(obj1.Get(), &error_msg);
   EXPECT_TRUE(iref1 != nullptr);
 
   ASSERT_EQ(4U, irt.Capacity()) << "hole not filled";
   CheckDump(&irt, 4, 4);
 
-  ASSERT_TRUE(irt.Remove(cookie, iref1));
+  ASSERT_TRUE(irt.Remove(iref1));
   CheckDump(&irt, 3, 3);
-  ASSERT_TRUE(irt.Remove(cookie, iref3));
+  ASSERT_TRUE(irt.Remove(iref3));
   CheckDump(&irt, 2, 2);
 
   ASSERT_EQ(3U, irt.Capacity()) << "should be 3 after two deletions";
 
-  ASSERT_TRUE(irt.Remove(cookie, iref2));
+  ASSERT_TRUE(irt.Remove(iref2));
   CheckDump(&irt, 1, 1);
-  ASSERT_TRUE(irt.Remove(cookie, iref0));
+  ASSERT_TRUE(irt.Remove(iref0));
   CheckDump(&irt, 0, 0);
 
   ASSERT_EQ(0U, irt.Capacity()) << "not empty after split remove";
@@ -199,308 +197,72 @@
   // Add an entry, remove it, add a new entry, and try to use the original
   // iref.  They have the same slot number but are for different objects.
   // With the extended checks in place, this should fail.
-  iref0 = irt.Add(cookie, obj0.Get(), &error_msg);
+  iref0 = irt.Add(obj0.Get(), &error_msg);
   EXPECT_TRUE(iref0 != nullptr);
   CheckDump(&irt, 1, 1);
-  ASSERT_TRUE(irt.Remove(cookie, iref0));
+  ASSERT_TRUE(irt.Remove(iref0));
   CheckDump(&irt, 0, 0);
-  iref1 = irt.Add(cookie, obj1.Get(), &error_msg);
+  iref1 = irt.Add(obj1.Get(), &error_msg);
   EXPECT_TRUE(iref1 != nullptr);
   CheckDump(&irt, 1, 1);
-  ASSERT_FALSE(irt.Remove(cookie, iref0)) << "mismatched del succeeded";
+  ASSERT_FALSE(irt.Remove(iref0)) << "mismatched del succeeded";
   CheckDump(&irt, 1, 1);
-  ASSERT_TRUE(irt.Remove(cookie, iref1)) << "switched del failed";
+  ASSERT_TRUE(irt.Remove(iref1)) << "switched del failed";
   ASSERT_EQ(0U, irt.Capacity()) << "switching del not empty";
   CheckDump(&irt, 0, 0);
 
   // Same as above, but with the same object.  A more rigorous checker
   // (e.g. with slot serialization) will catch this.
-  iref0 = irt.Add(cookie, obj0.Get(), &error_msg);
+  iref0 = irt.Add(obj0.Get(), &error_msg);
   EXPECT_TRUE(iref0 != nullptr);
   CheckDump(&irt, 1, 1);
-  ASSERT_TRUE(irt.Remove(cookie, iref0));
+  ASSERT_TRUE(irt.Remove(iref0));
   CheckDump(&irt, 0, 0);
-  iref1 = irt.Add(cookie, obj0.Get(), &error_msg);
+  iref1 = irt.Add(obj0.Get(), &error_msg);
   EXPECT_TRUE(iref1 != nullptr);
   CheckDump(&irt, 1, 1);
   if (iref0 != iref1) {
     // Try 0, should not work.
-    ASSERT_FALSE(irt.Remove(cookie, iref0)) << "temporal del succeeded";
+    ASSERT_FALSE(irt.Remove(iref0)) << "temporal del succeeded";
   }
-  ASSERT_TRUE(irt.Remove(cookie, iref1)) << "temporal cleanup failed";
+  ASSERT_TRUE(irt.Remove(iref1)) << "temporal cleanup failed";
   ASSERT_EQ(0U, irt.Capacity()) << "temporal del not empty";
   CheckDump(&irt, 0, 0);
 
   // Stale reference is not valid.
-  iref0 = irt.Add(cookie, obj0.Get(), &error_msg);
+  iref0 = irt.Add(obj0.Get(), &error_msg);
   EXPECT_TRUE(iref0 != nullptr);
   CheckDump(&irt, 1, 1);
-  ASSERT_TRUE(irt.Remove(cookie, iref0));
+  ASSERT_TRUE(irt.Remove(iref0));
   EXPECT_FALSE(irt.IsValidReference(iref0, &error_msg)) << "stale lookup succeeded";
   CheckDump(&irt, 0, 0);
 
-  // Test table resizing.
-  // These ones fit...
+  // Test deleting all but the last entry.
+  // We shall delete these.
   static const size_t kTableInitial = kTableMax / 2;
   IndirectRef manyRefs[kTableInitial];
   for (size_t i = 0; i < kTableInitial; i++) {
-    manyRefs[i] = irt.Add(cookie, obj0.Get(), &error_msg);
+    manyRefs[i] = irt.Add(obj0.Get(), &error_msg);
     ASSERT_TRUE(manyRefs[i] != nullptr) << "Failed adding " << i;
     CheckDump(&irt, i + 1, 1);
   }
-  // ...this one causes overflow.
-  iref0 = irt.Add(cookie, obj0.Get(), &error_msg);
+  // We shall keep this one.
+  iref0 = irt.Add(obj0.Get(), &error_msg);
   ASSERT_TRUE(iref0 != nullptr);
   ASSERT_EQ(kTableInitial + 1, irt.Capacity());
   CheckDump(&irt, kTableInitial + 1, 1);
-
+  // Delete all but the last entry.
   for (size_t i = 0; i < kTableInitial; i++) {
-    ASSERT_TRUE(irt.Remove(cookie, manyRefs[i])) << "failed removing " << i;
+    ASSERT_TRUE(irt.Remove(manyRefs[i])) << "failed removing " << i;
     CheckDump(&irt, kTableInitial - i, 1);
   }
   // Because of removal order, should have 11 entries, 10 of them holes.
   ASSERT_EQ(kTableInitial + 1, irt.Capacity());
 
-  ASSERT_TRUE(irt.Remove(cookie, iref0)) << "multi-remove final failed";
+  ASSERT_TRUE(irt.Remove(iref0)) << "multi-remove final failed";
 
   ASSERT_EQ(0U, irt.Capacity()) << "multi-del not empty";
   CheckDump(&irt, 0, 0);
 }
 
-TEST_F(IndirectReferenceTableTest, Holes) {
-  // Test the explicitly named cases from the IRT implementation:
-  //
-  // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
-  // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
-  // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
-  //    reference
-  // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
-  // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
-  //    reference
-
-  ScopedObjectAccess soa(Thread::Current());
-  static const size_t kTableMax = 10;
-
-  StackHandleScope<6> hs(soa.Self());
-  Handle<mirror::Class> c = hs.NewHandle(
-      class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
-  ASSERT_TRUE(c != nullptr);
-  Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj0 != nullptr);
-  Handle<mirror::Object> obj1 = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj1 != nullptr);
-  Handle<mirror::Object> obj2 = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj2 != nullptr);
-  Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj3 != nullptr);
-  Handle<mirror::Object> obj4 = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj4 != nullptr);
-
-  std::string error_msg;
-
-  // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference.
-  {
-    IndirectReferenceTable irt(kGlobal, IndirectReferenceTable::ResizableCapacity::kNo);
-    bool success = irt.Initialize(kTableMax, &error_msg);
-    ASSERT_TRUE(success) << error_msg;
-
-    const IRTSegmentState cookie0 = kIRTFirstSegment;
-
-    CheckDump(&irt, 0, 0);
-
-    IndirectRef iref0 = irt.Add(cookie0, obj0.Get(), &error_msg);
-    IndirectRef iref1 = irt.Add(cookie0, obj1.Get(), &error_msg);
-    IndirectRef iref2 = irt.Add(cookie0, obj2.Get(), &error_msg);
-
-    EXPECT_TRUE(irt.Remove(cookie0, iref1));
-
-    // New segment.
-    const IRTSegmentState cookie1 = irt.GetSegmentState();
-
-    IndirectRef iref3 = irt.Add(cookie1, obj3.Get(), &error_msg);
-
-    // Must not have filled the previous hole.
-    EXPECT_EQ(irt.Capacity(), 4u);
-    EXPECT_FALSE(irt.IsValidReference(iref1, &error_msg));
-    CheckDump(&irt, 3, 3);
-
-    UNUSED(iref0, iref1, iref2, iref3);
-  }
-
-  // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
-  {
-    IndirectReferenceTable irt(kGlobal, IndirectReferenceTable::ResizableCapacity::kNo);
-    bool success = irt.Initialize(kTableMax, &error_msg);
-    ASSERT_TRUE(success) << error_msg;
-
-    const IRTSegmentState cookie0 = kIRTFirstSegment;
-
-    CheckDump(&irt, 0, 0);
-
-    IndirectRef iref0 = irt.Add(cookie0, obj0.Get(), &error_msg);
-
-    // New segment.
-    const IRTSegmentState cookie1 = irt.GetSegmentState();
-
-    IndirectRef iref1 = irt.Add(cookie1, obj1.Get(), &error_msg);
-    IndirectRef iref2 = irt.Add(cookie1, obj2.Get(), &error_msg);
-    IndirectRef iref3 = irt.Add(cookie1, obj3.Get(), &error_msg);
-
-    EXPECT_TRUE(irt.Remove(cookie1, iref2));
-
-    // Pop segment.
-    irt.SetSegmentState(cookie1);
-
-    IndirectRef iref4 = irt.Add(cookie1, obj4.Get(), &error_msg);
-
-    EXPECT_EQ(irt.Capacity(), 2u);
-    EXPECT_FALSE(irt.IsValidReference(iref2, &error_msg));
-    CheckDump(&irt, 2, 2);
-
-    UNUSED(iref0, iref1, iref2, iref3, iref4);
-  }
-
-  // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
-  //    reference.
-  {
-    IndirectReferenceTable irt(kGlobal, IndirectReferenceTable::ResizableCapacity::kNo);
-    bool success = irt.Initialize(kTableMax, &error_msg);
-    ASSERT_TRUE(success) << error_msg;
-
-    const IRTSegmentState cookie0 = kIRTFirstSegment;
-
-    CheckDump(&irt, 0, 0);
-
-    IndirectRef iref0 = irt.Add(cookie0, obj0.Get(), &error_msg);
-
-    // New segment.
-    const IRTSegmentState cookie1 = irt.GetSegmentState();
-
-    IndirectRef iref1 = irt.Add(cookie1, obj1.Get(), &error_msg);
-    IndirectRef iref2 = irt.Add(cookie1, obj2.Get(), &error_msg);
-
-    EXPECT_TRUE(irt.Remove(cookie1, iref1));
-
-    // New segment.
-    const IRTSegmentState cookie2 = irt.GetSegmentState();
-
-    IndirectRef iref3 = irt.Add(cookie2, obj3.Get(), &error_msg);
-
-    // Pop segment.
-    irt.SetSegmentState(cookie2);
-
-    IndirectRef iref4 = irt.Add(cookie1, obj4.Get(), &error_msg);
-
-    EXPECT_EQ(irt.Capacity(), 3u);
-    EXPECT_FALSE(irt.IsValidReference(iref1, &error_msg));
-    CheckDump(&irt, 3, 3);
-
-    UNUSED(iref0, iref1, iref2, iref3, iref4);
-  }
-
-  // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference.
-  {
-    IndirectReferenceTable irt(kGlobal, IndirectReferenceTable::ResizableCapacity::kNo);
-    bool success = irt.Initialize(kTableMax, &error_msg);
-    ASSERT_TRUE(success) << error_msg;
-
-    const IRTSegmentState cookie0 = kIRTFirstSegment;
-
-    CheckDump(&irt, 0, 0);
-
-    IndirectRef iref0 = irt.Add(cookie0, obj0.Get(), &error_msg);
-
-    // New segment.
-    const IRTSegmentState cookie1 = irt.GetSegmentState();
-
-    IndirectRef iref1 = irt.Add(cookie1, obj1.Get(), &error_msg);
-    EXPECT_TRUE(irt.Remove(cookie1, iref1));
-
-    // Emptied segment, push new one.
-    const IRTSegmentState cookie2 = irt.GetSegmentState();
-
-    IndirectRef iref2 = irt.Add(cookie1, obj1.Get(), &error_msg);
-    IndirectRef iref3 = irt.Add(cookie1, obj2.Get(), &error_msg);
-    IndirectRef iref4 = irt.Add(cookie1, obj3.Get(), &error_msg);
-
-    EXPECT_TRUE(irt.Remove(cookie1, iref3));
-
-    // Pop segment.
-    UNUSED(cookie2);
-    irt.SetSegmentState(cookie1);
-
-    IndirectRef iref5 = irt.Add(cookie1, obj4.Get(), &error_msg);
-
-    EXPECT_EQ(irt.Capacity(), 2u);
-    EXPECT_FALSE(irt.IsValidReference(iref3, &error_msg));
-    CheckDump(&irt, 2, 2);
-
-    UNUSED(iref0, iref1, iref2, iref3, iref4, iref5);
-  }
-
-  // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
-  //    reference
-  {
-    IndirectReferenceTable irt(kGlobal, IndirectReferenceTable::ResizableCapacity::kNo);
-    bool success = irt.Initialize(kTableMax, &error_msg);
-    ASSERT_TRUE(success) << error_msg;
-
-    const IRTSegmentState cookie0 = kIRTFirstSegment;
-
-    CheckDump(&irt, 0, 0);
-
-    IndirectRef iref0 = irt.Add(cookie0, obj0.Get(), &error_msg);
-
-    // New segment.
-    const IRTSegmentState cookie1 = irt.GetSegmentState();
-
-    IndirectRef iref1 = irt.Add(cookie1, obj1.Get(), &error_msg);
-    IndirectRef iref2 = irt.Add(cookie1, obj1.Get(), &error_msg);
-    IndirectRef iref3 = irt.Add(cookie1, obj2.Get(), &error_msg);
-
-    EXPECT_TRUE(irt.Remove(cookie1, iref2));
-
-    // Pop segment.
-    irt.SetSegmentState(cookie1);
-
-    // Push segment.
-    const IRTSegmentState cookie1_second = irt.GetSegmentState();
-    UNUSED(cookie1_second);
-
-    IndirectRef iref4 = irt.Add(cookie1, obj3.Get(), &error_msg);
-
-    EXPECT_EQ(irt.Capacity(), 2u);
-    EXPECT_FALSE(irt.IsValidReference(iref3, &error_msg));
-    CheckDump(&irt, 2, 2);
-
-    UNUSED(iref0, iref1, iref2, iref3, iref4);
-  }
-}
-
-TEST_F(IndirectReferenceTableTest, Resize) {
-  ScopedObjectAccess soa(Thread::Current());
-  static const size_t kTableMax = 512;
-
-  StackHandleScope<2> hs(soa.Self());
-  Handle<mirror::Class> c = hs.NewHandle(
-      class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
-  ASSERT_TRUE(c != nullptr);
-  Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj0 != nullptr);
-
-  std::string error_msg;
-  IndirectReferenceTable irt(kLocal, IndirectReferenceTable::ResizableCapacity::kYes);
-  bool success = irt.Initialize(kTableMax, &error_msg);
-  ASSERT_TRUE(success) << error_msg;
-
-  CheckDump(&irt, 0, 0);
-  const IRTSegmentState cookie = kIRTFirstSegment;
-
-  for (size_t i = 0; i != kTableMax + 1; ++i) {
-    irt.Add(cookie, obj0.Get(), &error_msg);
-  }
-
-  EXPECT_EQ(irt.Capacity(), kTableMax + 1);
-}
-
 }  // namespace art
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index 5f0e468..eb54f98 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -38,6 +38,7 @@
 #include "indirect_reference_table-inl.h"
 #include "java_vm_ext.h"
 #include "jni_internal.h"
+#include "local_reference_table-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/field.h"
 #include "mirror/method.h"
@@ -57,14 +58,19 @@
 inline IndirectReferenceTable* GetIndirectReferenceTable(ScopedObjectAccess& soa,
                                                          IndirectRefKind kind) {
   DCHECK_NE(kind, kJniTransition);
-  JNIEnvExt* env = soa.Env();
-  IndirectReferenceTable* irt =
-      (kind == kLocal) ? &env->locals_
-                       : ((kind == kGlobal) ? &env->vm_->globals_ : &env->vm_->weak_globals_);
+  DCHECK_NE(kind, kLocal);
+  JavaVMExt* vm = soa.Env()->GetVm();
+  IndirectReferenceTable* irt = (kind == kGlobal) ? &vm->globals_ : &vm->weak_globals_;
   DCHECK_EQ(irt->GetKind(), kind);
   return irt;
 }
 
+// This helper cannot be in the anonymous namespace because it needs to be
+// declared as a friend by JniEnvExt.
+inline jni::LocalReferenceTable* GetLocalReferenceTable(ScopedObjectAccess& soa) {
+  return &soa.Env()->locals_;
+}
+
 namespace {
 
 using android::base::StringAppendF;
@@ -873,6 +879,12 @@
       } else {
         obj = soa.Decode<mirror::Object>(java_object);
       }
+    } else if (ref_kind == kLocal) {
+      jni::LocalReferenceTable* lrt = GetLocalReferenceTable(soa);
+      okay = lrt->IsValidReference(java_object, &error_msg);
+      if (okay) {
+        obj = lrt->Get(ref);
+      }
     } else {
       IndirectReferenceTable* irt = GetIndirectReferenceTable(soa, ref_kind);
       okay = irt->IsValidReference(java_object, &error_msg);
@@ -881,10 +893,7 @@
         // Note: The `IsValidReference()` checks for null but we do not prevent races,
         // so the null check below can still fail. Even if it succeeds, another thread
         // could delete the global or weak global before it's used by JNI.
-        if (ref_kind == kLocal) {
-          // Local references do not need a read barrier.
-          obj = irt->Get<kWithoutReadBarrier>(ref);
-        } else if (ref_kind == kGlobal) {
+        if (ref_kind == kGlobal) {
           obj = soa.Env()->GetVm()->DecodeGlobal(ref);
         } else {
           obj = soa.Env()->GetVm()->DecodeWeakGlobal(soa.Self(), ref);
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index f1d4c3b..9c695ca 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -504,10 +504,10 @@
       tracing_enabled_(runtime_options.Exists(RuntimeArgumentMap::JniTrace)
                        || VLOG_IS_ON(third_party_jni)),
       trace_(runtime_options.GetOrDefault(RuntimeArgumentMap::JniTrace)),
-      globals_(kGlobal, IndirectReferenceTable::ResizableCapacity::kNo),
+      globals_(kGlobal),
       libraries_(new Libraries),
       unchecked_functions_(&gJniInvokeInterface),
-      weak_globals_(kWeakGlobal, IndirectReferenceTable::ResizableCapacity::kNo),
+      weak_globals_(kWeakGlobal),
       allow_accessing_weak_globals_(true),
       weak_globals_add_condition_("weak globals add condition",
                                   (CHECK(Locks::jni_weak_globals_lock_ != nullptr),
@@ -695,7 +695,7 @@
   std::string error_msg;
   {
     WriterMutexLock mu(self, *Locks::jni_globals_lock_);
-    ref = globals_.Add(kIRTFirstSegment, obj, &error_msg);
+    ref = globals_.Add(obj, &error_msg);
     MaybeTraceGlobals();
   }
   if (UNLIKELY(ref == nullptr)) {
@@ -731,7 +731,7 @@
     WaitForWeakGlobalsAccess(self);
   }
   std::string error_msg;
-  IndirectRef ref = weak_globals_.Add(kIRTFirstSegment, obj, &error_msg);
+  IndirectRef ref = weak_globals_.Add(obj, &error_msg);
   MaybeTraceWeakGlobals();
   if (UNLIKELY(ref == nullptr)) {
     LOG(FATAL) << error_msg;
@@ -746,7 +746,7 @@
   }
   {
     WriterMutexLock mu(self, *Locks::jni_globals_lock_);
-    if (!globals_.Remove(kIRTFirstSegment, obj)) {
+    if (!globals_.Remove(obj)) {
       LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
                    << "failed to find entry";
     }
@@ -760,7 +760,7 @@
     return;
   }
   MutexLock mu(self, *Locks::jni_weak_globals_lock_);
-  if (!weak_globals_.Remove(kIRTFirstSegment, obj)) {
+  if (!weak_globals_.Remove(obj)) {
     LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
                  << "failed to find entry";
   }
diff --git a/runtime/jni/jni_env_ext-inl.h b/runtime/jni/jni_env_ext-inl.h
index d66ac1a..0c04192 100644
--- a/runtime/jni/jni_env_ext-inl.h
+++ b/runtime/jni/jni_env_ext-inl.h
@@ -19,7 +19,7 @@
 
 #include "jni_env_ext.h"
 
-#include "indirect_reference_table-inl.h"
+#include "local_reference_table-inl.h"
 #include "mirror/object.h"
 
 namespace art {
diff --git a/runtime/jni/jni_env_ext.cc b/runtime/jni/jni_env_ext.cc
index 7d522c1..619e1de 100644
--- a/runtime/jni/jni_env_ext.cc
+++ b/runtime/jni/jni_env_ext.cc
@@ -69,8 +69,8 @@
 JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in)
     : self_(self_in),
       vm_(vm_in),
-      local_ref_cookie_(kIRTFirstSegment),
-      locals_(kLocal, IndirectReferenceTable::ResizableCapacity::kYes),
+      local_ref_cookie_(jni::kLRTFirstSegment),
+      locals_(),
       monitors_("monitors", kMonitorsInitial, kMonitorsMax),
       critical_(0),
       check_jni_(false),
@@ -154,7 +154,7 @@
                          4 +                         // local_ref_cookie.
                          (pointer_size - 4);         // Padding.
   size_t irt_segment_state_offset =
-      IndirectReferenceTable::SegmentStateOffset(pointer_size).Int32Value();
+      jni::LocalReferenceTable::SegmentStateOffset(pointer_size).Int32Value();
   return MemberOffset(locals_offset + irt_segment_state_offset);
 }
 
diff --git a/runtime/jni/jni_env_ext.h b/runtime/jni/jni_env_ext.h
index 3614213..1f57658 100644
--- a/runtime/jni/jni_env_ext.h
+++ b/runtime/jni/jni_env_ext.h
@@ -21,7 +21,7 @@
 
 #include "base/locks.h"
 #include "base/macros.h"
-#include "indirect_reference_table.h"
+#include "local_reference_table.h"
 #include "obj_ptr.h"
 #include "reference_table.h"
 
@@ -79,13 +79,13 @@
     return locals_.Capacity();
   }
 
-  IRTSegmentState GetLocalRefCookie() const { return local_ref_cookie_; }
-  void SetLocalRefCookie(IRTSegmentState new_cookie) { local_ref_cookie_ = new_cookie; }
+  jni::LRTSegmentState GetLocalRefCookie() const { return local_ref_cookie_; }
+  void SetLocalRefCookie(jni::LRTSegmentState new_cookie) { local_ref_cookie_ = new_cookie; }
 
-  IRTSegmentState GetLocalsSegmentState() const REQUIRES_SHARED(Locks::mutator_lock_) {
+  jni::LRTSegmentState GetLocalsSegmentState() const REQUIRES_SHARED(Locks::mutator_lock_) {
     return locals_.GetSegmentState();
   }
-  void SetLocalSegmentState(IRTSegmentState new_state) REQUIRES_SHARED(Locks::mutator_lock_) {
+  void SetLocalSegmentState(jni::LRTSegmentState new_state) REQUIRES_SHARED(Locks::mutator_lock_) {
     locals_.SetSegmentState(new_state);
   }
 
@@ -169,15 +169,15 @@
   JavaVMExt* const vm_;
 
   // Cookie used when using the local indirect reference table.
-  IRTSegmentState local_ref_cookie_;
+  jni::LRTSegmentState local_ref_cookie_;
 
   // JNI local references.
-  IndirectReferenceTable locals_;
+  jni::LocalReferenceTable locals_;
 
   // Stack of cookies corresponding to PushLocalFrame/PopLocalFrame calls.
   // TODO: to avoid leaks (and bugs), we need to clear this vector on entry (or return)
   // to a native method.
-  std::vector<IRTSegmentState> stacked_local_ref_cookies_;
+  std::vector<jni::LRTSegmentState> stacked_local_ref_cookies_;
 
   // Entered JNI monitors, for bulk exit on thread detach.
   ReferenceTable monitors_;
@@ -208,6 +208,7 @@
   friend class Thread;
   friend IndirectReferenceTable* GetIndirectReferenceTable(ScopedObjectAccess& soa,
                                                            IndirectRefKind kind);
+  friend jni::LocalReferenceTable* GetLocalReferenceTable(ScopedObjectAccess& soa);
   friend void ThreadResetFunctionTable(Thread* thread, void* arg);
   ART_FRIEND_TEST(JniInternalTest, JNIEnvExtOffsets);
 };
@@ -229,7 +230,7 @@
 
  private:
   JNIEnvExt* const env_;
-  const IRTSegmentState saved_local_ref_cookie_;
+  const jni::LRTSegmentState saved_local_ref_cookie_;
 
   DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
 };
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index a41043b..5abedb9 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -21,7 +21,7 @@
 #include "art_method-inl.h"
 #include "base/mem_map.h"
 #include "common_runtime_test.h"
-#include "indirect_reference_table.h"
+#include "local_reference_table.h"
 #include "java_vm_ext.h"
 #include "jni_env_ext.h"
 #include "mirror/string-inl.h"
@@ -2580,24 +2580,23 @@
   // by modifying memory.
   // The parameters don't really matter here.
   std::string error_msg;
-  IndirectReferenceTable irt(IndirectRefKind::kGlobal,
-                             IndirectReferenceTable::ResizableCapacity::kNo);
-  bool success = irt.Initialize(/*max_count=*/ 5, &error_msg);
+  jni::LocalReferenceTable lrt;
+  bool success = lrt.Initialize(/*max_count=*/ 5, &error_msg);
   ASSERT_TRUE(success) << error_msg;
-  IRTSegmentState old_state = irt.GetSegmentState();
+  jni::LRTSegmentState old_state = lrt.GetSegmentState();
 
   // Write some new state directly. We invert parts of old_state to ensure a new value.
-  IRTSegmentState new_state;
+  jni::LRTSegmentState new_state;
   new_state.top_index = old_state.top_index ^ 0x07705005;
   ASSERT_NE(old_state.top_index, new_state.top_index);
 
-  uint8_t* base = reinterpret_cast<uint8_t*>(&irt);
+  uint8_t* base = reinterpret_cast<uint8_t*>(&lrt);
   int32_t segment_state_offset =
-      IndirectReferenceTable::SegmentStateOffset(sizeof(void*)).Int32Value();
-  *reinterpret_cast<IRTSegmentState*>(base + segment_state_offset) = new_state;
+      jni::LocalReferenceTable::SegmentStateOffset(sizeof(void*)).Int32Value();
+  *reinterpret_cast<jni::LRTSegmentState*>(base + segment_state_offset) = new_state;
 
   // Read and compare.
-  EXPECT_EQ(new_state.top_index, irt.GetSegmentState().top_index);
+  EXPECT_EQ(new_state.top_index, lrt.GetSegmentState().top_index);
 }
 
 // Test the offset computation of JNIEnvExt offsets. b/26071368.
@@ -2611,7 +2610,7 @@
   // hope it to be.
   uint32_t segment_state_now =
       OFFSETOF_MEMBER(JNIEnvExt, locals_) +
-      IndirectReferenceTable::SegmentStateOffset(sizeof(void*)).Uint32Value();
+      jni::LocalReferenceTable::SegmentStateOffset(sizeof(void*)).Uint32Value();
   uint32_t segment_state_computed = JNIEnvExt::SegmentStateOffset(sizeof(void*)).Uint32Value();
   EXPECT_EQ(segment_state_now, segment_state_computed);
 }
diff --git a/runtime/jni/local_reference_table-inl.h b/runtime/jni/local_reference_table-inl.h
new file mode 100644
index 0000000..b65dea7
--- /dev/null
+++ b/runtime/jni/local_reference_table-inl.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JNI_LOCAL_REFERENCE_TABLE_INL_H_
+#define ART_RUNTIME_JNI_LOCAL_REFERENCE_TABLE_INL_H_
+
+#include "local_reference_table.h"
+
+#include "android-base/stringprintf.h"
+
+#include "base/dumpable.h"
+#include "gc_root-inl.h"
+#include "obj_ptr-inl.h"
+#include "verify_object.h"
+
+namespace art {
+namespace mirror {
+class Object;
+}  // namespace mirror
+
+namespace jni {
+
+// Verifies that the indirect table lookup is valid.
+// Returns "false" if something looks bad.
+inline bool LocalReferenceTable::IsValidReference(IndirectRef iref,
+                                                     /*out*/std::string* error_msg) const {
+  DCHECK(iref != nullptr);
+  DCHECK_EQ(GetIndirectRefKind(iref), kLocal);
+  const uint32_t top_index = segment_state_.top_index;
+  uint32_t idx = ExtractIndex(iref);
+  if (UNLIKELY(idx >= top_index)) {
+    *error_msg = android::base::StringPrintf("deleted reference at index %u in a table of size %u",
+                                             idx,
+                                             top_index);
+    return false;
+  }
+  if (UNLIKELY(table_[idx].GetReference()->IsNull())) {
+    *error_msg = android::base::StringPrintf("deleted reference at index %u", idx);
+    return false;
+  }
+  uint32_t iref_serial = DecodeSerial(reinterpret_cast<uintptr_t>(iref));
+  uint32_t entry_serial = table_[idx].GetSerial();
+  if (UNLIKELY(iref_serial != entry_serial)) {
+    *error_msg = android::base::StringPrintf("stale reference with serial number %u v. current %u",
+                                             iref_serial,
+                                             entry_serial);
+    return false;
+  }
+  return true;
+}
+
+// Make sure that the entry at "idx" is correctly paired with "iref".
+inline bool LocalReferenceTable::CheckEntry(const char* what,
+                                               IndirectRef iref,
+                                               uint32_t idx) const {
+  IndirectRef checkRef = ToIndirectRef(idx);
+  if (UNLIKELY(checkRef != iref)) {
+    std::string msg = android::base::StringPrintf(
+        "JNI ERROR (app bug): attempt to %s stale %s %p (should be %p)",
+        what,
+        GetIndirectRefKindString(kLocal),
+        iref,
+        checkRef);
+    AbortIfNoCheckJNI(msg);
+    return false;
+  }
+  return true;
+}
+
+template<ReadBarrierOption kReadBarrierOption>
+inline ObjPtr<mirror::Object> LocalReferenceTable::Get(IndirectRef iref) const {
+  DCHECK_EQ(GetIndirectRefKind(iref), kLocal);
+  uint32_t idx = ExtractIndex(iref);
+  DCHECK_LT(idx, segment_state_.top_index);
+  DCHECK_EQ(DecodeSerial(reinterpret_cast<uintptr_t>(iref)), table_[idx].GetSerial());
+  DCHECK(!table_[idx].GetReference()->IsNull());
+  ObjPtr<mirror::Object> obj = table_[idx].GetReference()->Read<kReadBarrierOption>();
+  VerifyObject(obj);
+  return obj;
+}
+
+inline void LocalReferenceTable::Update(IndirectRef iref, ObjPtr<mirror::Object> obj) {
+  DCHECK_EQ(GetIndirectRefKind(iref), kLocal);
+  uint32_t idx = ExtractIndex(iref);
+  DCHECK_LT(idx, segment_state_.top_index);
+  DCHECK_EQ(DecodeSerial(reinterpret_cast<uintptr_t>(iref)), table_[idx].GetSerial());
+  DCHECK(!table_[idx].GetReference()->IsNull());
+  table_[idx].SetReference(obj);
+}
+
+inline void LrtEntry::Add(ObjPtr<mirror::Object> obj) {
+  ++serial_;
+  if (serial_ == kLRTMaxSerial) {
+    serial_ = 0;
+  }
+  reference_ = GcRoot<mirror::Object>(obj);
+}
+
+inline void LrtEntry::SetReference(ObjPtr<mirror::Object> obj) {
+  DCHECK_LT(serial_, kLRTMaxSerial);
+  reference_ = GcRoot<mirror::Object>(obj);
+}
+
+}  // namespace jni
+}  // namespace art
+
+#endif  // ART_RUNTIME_JNI_LOCAL_REFERENCE_TABLE_INL_H_
diff --git a/runtime/jni/local_reference_table.cc b/runtime/jni/local_reference_table.cc
new file mode 100644
index 0000000..6cbbde7
--- /dev/null
+++ b/runtime/jni/local_reference_table.cc
@@ -0,0 +1,564 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "local_reference_table-inl.h"
+
+#include "base/globals.h"
+#include "base/mutator_locked_dumpable.h"
+#include "base/systrace.h"
+#include "base/utils.h"
+#include "indirect_reference_table.h"
+#include "jni/java_vm_ext.h"
+#include "jni/jni_internal.h"
+#include "mirror/object-inl.h"
+#include "nth_caller_visitor.h"
+#include "reference_table.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread.h"
+
+#include <cstdlib>
+
+namespace art {
+namespace jni {
+
+static constexpr bool kDumpStackOnNonLocalReference = false;
+static constexpr bool kDebugLRT = false;
+
+// Maximum table size we allow.
+static constexpr size_t kMaxTableSizeInBytes = 128 * MB;
+
+void LocalReferenceTable::AbortIfNoCheckJNI(const std::string& msg) {
+  // If -Xcheck:jni is on, it'll give a more detailed error before aborting.
+  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
+  if (!vm->IsCheckJniEnabled()) {
+    // Otherwise, we want to abort rather than hand back a bad reference.
+    LOG(FATAL) << msg;
+  } else {
+    LOG(ERROR) << msg;
+  }
+}
+
+// Mmap an "indirect ref table region. Table_bytes is a multiple of a page size.
+static inline MemMap NewLRTMap(size_t table_bytes, std::string* error_msg) {
+  MemMap result = MemMap::MapAnonymous("local ref table",
+                                       table_bytes,
+                                       PROT_READ | PROT_WRITE,
+                                       /*low_4gb=*/ false,
+                                       error_msg);
+  if (!result.IsValid() && error_msg->empty()) {
+      *error_msg = "Unable to map memory for indirect ref table";
+  }
+  return result;
+}
+
+SmallLrtAllocator::SmallLrtAllocator()
+    : small_lrt_freelist_(nullptr), lock_("Small LRT table lock", LockLevel::kGenericBottomLock) {
+}
+
+// Allocate a LRT table for kSmallLrtEntries.
+LrtEntry* SmallLrtAllocator::Allocate(std::string* error_msg) {
+  MutexLock lock(Thread::Current(), lock_);
+  if (small_lrt_freelist_ == nullptr) {
+    // Refill.
+    MemMap map = NewLRTMap(kPageSize, error_msg);
+    if (map.IsValid()) {
+      small_lrt_freelist_ = reinterpret_cast<LrtEntry*>(map.Begin());
+      for (uint8_t* p = map.Begin(); p + kInitialLrtBytes < map.End(); p += kInitialLrtBytes) {
+        *reinterpret_cast<LrtEntry**>(p) = reinterpret_cast<LrtEntry*>(p + kInitialLrtBytes);
+      }
+      shared_lrt_maps_.emplace_back(std::move(map));
+    }
+  }
+  if (small_lrt_freelist_ == nullptr) {
+    return nullptr;
+  }
+  LrtEntry* result = small_lrt_freelist_;
+  small_lrt_freelist_ = *reinterpret_cast<LrtEntry**>(small_lrt_freelist_);
+  // Clear pointer in first entry.
+  new(result) LrtEntry();
+  return result;
+}
+
+void SmallLrtAllocator::Deallocate(LrtEntry* unneeded) {
+  MutexLock lock(Thread::Current(), lock_);
+  *reinterpret_cast<LrtEntry**>(unneeded) = small_lrt_freelist_;
+  small_lrt_freelist_ = unneeded;
+}
+
+LocalReferenceTable::LocalReferenceTable()
+    : segment_state_(kLRTFirstSegment),
+      table_(nullptr),
+      max_entries_(0u),
+      current_num_holes_(0) {
+}
+
+bool LocalReferenceTable::Initialize(size_t max_count, std::string* error_msg) {
+  CHECK(error_msg != nullptr);
+
+  // Overflow and maximum check.
+  CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(LrtEntry));
+
+  if (max_count <= kSmallLrtEntries) {
+    table_ = Runtime::Current()->GetSmallLrtAllocator()->Allocate(error_msg);
+    if (table_ != nullptr) {
+      max_entries_ = kSmallLrtEntries;
+      // table_mem_map_ remains invalid.
+    }
+  }
+  if (table_ == nullptr) {
+    const size_t table_bytes = RoundUp(max_count * sizeof(LrtEntry), kPageSize);
+    table_mem_map_ = NewLRTMap(table_bytes, error_msg);
+    if (!table_mem_map_.IsValid()) {
+      DCHECK(!error_msg->empty());
+      return false;
+    }
+
+    table_ = reinterpret_cast<LrtEntry*>(table_mem_map_.Begin());
+    // Take into account the actual length.
+    max_entries_ = table_bytes / sizeof(LrtEntry);
+  }
+  segment_state_ = kLRTFirstSegment;
+  last_known_previous_state_ = kLRTFirstSegment;
+  return true;
+}
+
+LocalReferenceTable::~LocalReferenceTable() {
+  if (table_ != nullptr && !table_mem_map_.IsValid()) {
+    Runtime::Current()->GetSmallLrtAllocator()->Deallocate(table_);
+  }
+}
+
+void LocalReferenceTable::ConstexprChecks() {
+  // Use this for some assertions. They can't be put into the header as C++ wants the class
+  // to be complete.
+
+  // Check kind.
+  static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error");
+  static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error");
+  static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error");
+  static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal,
+                "Kind encoding error");
+  static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal,
+                "Kind encoding error");
+  static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal,
+                "Kind encoding error");
+
+  // Check serial.
+  static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error");
+  static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error");
+  static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error");
+  static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error");
+
+  // Table index.
+  static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error");
+  static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error");
+  static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error");
+  static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
+}
+
+bool LocalReferenceTable::IsValid() const {
+  return table_ != nullptr;
+}
+
+// Holes:
+//
+// To keep the LRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
+// operation sequences. For simplicity and lower memory overhead, we do not use a free list or
+// similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
+// are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
+// scans when there are no holes, the number of known holes should be tracked.
+//
+// A previous implementation stored the top index and the number of holes as the segment state.
+// This constraints the maximum number of references to 16-bit. We want to relax this, as it
+// is easy to require more references (e.g., to list all classes in large applications). Thus,
+// the implicitly stack-stored state, the LRTSegmentState, is only the top index.
+//
+// Thus, hole count is a local property of the current segment, and needs to be recovered when
+// (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
+// cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
+// hole count is correct.
+//
+// To be able to detect segment changes, we require an additional local field that can describe
+// the known segment. This is last_known_previous_state_. The requirement will become clear with
+// the following (some non-trivial) cases that have to be supported:
+//
+// 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
+// 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+// 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+//    reference
+// 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
+// 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+//    reference
+//
+// Storing the last known *previous* state (bottom index) allows conservatively detecting all the
+// segment changes above. The condition is simply that the last known state is greater than or
+// equal to the current previous state, and smaller than the current state (top index). The
+// condition is conservative as it adds O(1) overhead to operations on an empty segment.
+
+static size_t CountNullEntries(const LrtEntry* table, size_t from, size_t to) {
+  size_t count = 0;
+  for (size_t index = from; index != to; ++index) {
+    if (table[index].GetReference()->IsNull()) {
+      count++;
+    }
+  }
+  return count;
+}
+
+void LocalReferenceTable::RecoverHoles(LRTSegmentState prev_state) {
+  if (last_known_previous_state_.top_index >= segment_state_.top_index ||
+      last_known_previous_state_.top_index < prev_state.top_index) {
+    const size_t top_index = segment_state_.top_index;
+    size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
+
+    if (kDebugLRT) {
+      LOG(INFO) << "+++ Recovered holes: "
+                << " Current prev=" << prev_state.top_index
+                << " Current top_index=" << top_index
+                << " Old num_holes=" << current_num_holes_
+                << " New num_holes=" << count;
+    }
+
+    current_num_holes_ = count;
+    last_known_previous_state_ = prev_state;
+  } else if (kDebugLRT) {
+    LOG(INFO) << "No need to recover holes";
+  }
+}
+
+ALWAYS_INLINE
+static inline void CheckHoleCount(LrtEntry* table,
+                                  size_t exp_num_holes,
+                                  LRTSegmentState prev_state,
+                                  LRTSegmentState cur_state) {
+  if (kIsDebugBuild) {
+    size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
+    CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
+                                   << " topIndex=" << cur_state.top_index;
+  }
+}
+
+bool LocalReferenceTable::Resize(size_t new_size, std::string* error_msg) {
+  CHECK_GT(new_size, max_entries_);
+
+  constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(LrtEntry);
+  if (new_size > kMaxEntries) {
+    *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size);
+    return false;
+  }
+  // Note: the above check also ensures that there is no overflow below.
+
+  const size_t table_bytes = RoundUp(new_size * sizeof(LrtEntry), kPageSize);
+
+  MemMap new_map = NewLRTMap(table_bytes, error_msg);
+  if (!new_map.IsValid()) {
+    return false;
+  }
+
+  memcpy(new_map.Begin(), table_, max_entries_ * sizeof(LrtEntry));
+  if (!table_mem_map_.IsValid()) {
+    // Didn't have its own map; deallocate old table.
+    Runtime::Current()->GetSmallLrtAllocator()->Deallocate(table_);
+  }
+  table_mem_map_ = std::move(new_map);
+  table_ = reinterpret_cast<LrtEntry*>(table_mem_map_.Begin());
+  const size_t real_new_size = table_bytes / sizeof(LrtEntry);
+  DCHECK_GE(real_new_size, new_size);
+  max_entries_ = real_new_size;
+
+  return true;
+}
+
+IndirectRef LocalReferenceTable::Add(LRTSegmentState previous_state,
+                                     ObjPtr<mirror::Object> obj,
+                                     std::string* error_msg) {
+  if (kDebugLRT) {
+    LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
+              << " top_index=" << segment_state_.top_index
+              << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+              << " holes=" << current_num_holes_;
+  }
+
+  size_t top_index = segment_state_.top_index;
+
+  CHECK(obj != nullptr);
+  VerifyObject(obj);
+  DCHECK(table_ != nullptr);
+
+  if (top_index == max_entries_) {
+    // Try to double space.
+    if (std::numeric_limits<size_t>::max() / 2 < max_entries_) {
+      std::ostringstream oss;
+      oss << "JNI ERROR (app bug): " << kLocal << " table overflow "
+          << "(max=" << max_entries_ << ")" << std::endl
+          << MutatorLockedDumpable<LocalReferenceTable>(*this)
+          << " Resizing failed: exceeds size_t";
+      *error_msg = oss.str();
+      return nullptr;
+    }
+
+    std::string inner_error_msg;
+    if (!Resize(max_entries_ * 2, &inner_error_msg)) {
+      std::ostringstream oss;
+      oss << "JNI ERROR (app bug): " << kLocal << " table overflow "
+          << "(max=" << max_entries_ << ")" << std::endl
+          << MutatorLockedDumpable<LocalReferenceTable>(*this)
+          << " Resizing failed: " << inner_error_msg;
+      *error_msg = oss.str();
+      return nullptr;
+    }
+  }
+
+  RecoverHoles(previous_state);
+  CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
+  // We know there's enough room in the table.  Now we just need to find
+  // the right spot.  If there's a hole, find it and fill it; otherwise,
+  // add to the end of the list.
+  IndirectRef result;
+  size_t index;
+  if (current_num_holes_ > 0) {
+    DCHECK_GT(top_index, 1U);
+    // Find the first hole; likely to be near the end of the list.
+    LrtEntry* p_scan = &table_[top_index - 1];
+    DCHECK(!p_scan->GetReference()->IsNull());
+    --p_scan;
+    while (!p_scan->GetReference()->IsNull()) {
+      DCHECK_GE(p_scan, table_ + previous_state.top_index);
+      --p_scan;
+    }
+    index = p_scan - table_;
+    current_num_holes_--;
+  } else {
+    // Add to the end.
+    index = top_index++;
+    segment_state_.top_index = top_index;
+  }
+  table_[index].Add(obj);
+  result = ToIndirectRef(index);
+  if (kDebugLRT) {
+    LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
+              << " holes=" << current_num_holes_;
+  }
+
+  DCHECK(result != nullptr);
+  return result;
+}
+
+void LocalReferenceTable::AssertEmpty() {
+  for (size_t i = 0; i < Capacity(); ++i) {
+    if (!table_[i].GetReference()->IsNull()) {
+      LOG(FATAL) << "Internal Error: non-empty local reference table\n"
+                 << MutatorLockedDumpable<LocalReferenceTable>(*this);
+      UNREACHABLE();
+    }
+  }
+}
+
+// Removes an object. We extract the table offset bits from "iref"
+// and zap the corresponding entry, leaving a hole if it's not at the top.
+// If the entry is not between the current top index and the bottom index
+// specified by the cookie, we don't remove anything. This is the behavior
+// required by JNI's DeleteLocalRef function.
+// This method is not called when a local frame is popped; this is only used
+// for explicit single removals.
+// Returns "false" if nothing was removed.
+bool LocalReferenceTable::Remove(LRTSegmentState previous_state, IndirectRef iref) {
+  if (kDebugLRT) {
+    LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
+              << " top_index=" << segment_state_.top_index
+              << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+              << " holes=" << current_num_holes_;
+  }
+
+  const uint32_t top_index = segment_state_.top_index;
+  const uint32_t bottom_index = previous_state.top_index;
+
+  DCHECK(table_ != nullptr);
+
+  // TODO: We should eagerly check the ref kind against the `kLocal` kind instead of
+  // relying on this weak check and postponing the rest until `CheckEntry()` below.
+  // Passing the wrong kind shall currently result in misleading warnings.
+  if (GetIndirectRefKind(iref) == kJniTransition) {
+    auto* self = Thread::Current();
+    ScopedObjectAccess soa(self);
+    if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
+      auto* env = self->GetJniEnv();
+      DCHECK(env != nullptr);
+      if (env->IsCheckJniEnabled()) {
+        LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
+        if (kDumpStackOnNonLocalReference) {
+          self->Dump(LOG_STREAM(WARNING));
+        }
+      }
+      return true;
+    }
+  }
+
+  const uint32_t idx = ExtractIndex(iref);
+  if (idx < bottom_index) {
+    // Wrong segment.
+    LOG(WARNING) << "Attempt to remove index outside index area (" << idx
+                 << " vs " << bottom_index << "-" << top_index << ")";
+    return false;
+  }
+  if (idx >= top_index) {
+    // Bad --- stale reference?
+    LOG(WARNING) << "Attempt to remove invalid index " << idx
+                 << " (bottom=" << bottom_index << " top=" << top_index << ")";
+    return false;
+  }
+
+  RecoverHoles(previous_state);
+  CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
+  if (idx == top_index - 1) {
+    // Top-most entry.  Scan up and consume holes.
+
+    if (!CheckEntry("remove", iref, idx)) {
+      return false;
+    }
+
+    *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
+    if (current_num_holes_ != 0) {
+      uint32_t collapse_top_index = top_index;
+      while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
+        if (kDebugLRT) {
+          ScopedObjectAccess soa(Thread::Current());
+          LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
+                    << " (previous_state=" << bottom_index << ") val="
+                    << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
+        }
+        if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
+          break;
+        }
+        if (kDebugLRT) {
+          LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1);
+        }
+        current_num_holes_--;
+      }
+      segment_state_.top_index = collapse_top_index;
+
+      CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+    } else {
+      segment_state_.top_index = top_index - 1;
+      if (kDebugLRT) {
+        LOG(INFO) << "+++ ate last entry " << top_index - 1;
+      }
+    }
+  } else {
+    // Not the top-most entry.  This creates a hole.  We null out the entry to prevent somebody
+    // from deleting it twice and screwing up the hole count.
+    if (table_[idx].GetReference()->IsNull()) {
+      LOG(INFO) << "--- WEIRD: removing null entry " << idx;
+      return false;
+    }
+    if (!CheckEntry("remove", iref, idx)) {
+      return false;
+    }
+
+    *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
+    current_num_holes_++;
+    CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+    if (kDebugLRT) {
+      LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
+    }
+  }
+
+  return true;
+}
+
+void LocalReferenceTable::Trim() {
+  ScopedTrace trace(__PRETTY_FUNCTION__);
+  if (!table_mem_map_.IsValid()) {
+    // Small table; nothing to do here.
+    return;
+  }
+  const size_t top_index = Capacity();
+  uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
+  uint8_t* release_end = static_cast<uint8_t*>(table_mem_map_.BaseEnd());
+  DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start));
+  DCHECK_ALIGNED(release_end, kPageSize);
+  DCHECK_ALIGNED(release_end - release_start, kPageSize);
+  if (release_start != release_end) {
+    madvise(release_start, release_end - release_start, MADV_DONTNEED);
+  }
+}
+
+void LocalReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
+  BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
+  for (size_t i = 0, capacity = Capacity(); i != capacity; ++i) {
+    GcRoot<mirror::Object>* ref = table_[i].GetReference();
+    if (!ref->IsNull()) {
+      root_visitor.VisitRoot(*ref);
+      DCHECK(!ref->IsNull());
+    }
+  }
+}
+
+void LocalReferenceTable::Dump(std::ostream& os) const {
+  os << kLocal << " table dump:\n";
+  ReferenceTable::Table entries;
+  for (size_t i = 0; i < Capacity(); ++i) {
+    ObjPtr<mirror::Object> obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
+    if (obj != nullptr) {
+      obj = table_[i].GetReference()->Read();
+      entries.push_back(GcRoot<mirror::Object>(obj));
+    }
+  }
+  ReferenceTable::Dump(os, entries);
+}
+
+void LocalReferenceTable::SetSegmentState(LRTSegmentState new_state) {
+  if (kDebugLRT) {
+    LOG(INFO) << "Setting segment state: "
+              << segment_state_.top_index
+              << " -> "
+              << new_state.top_index;
+  }
+  segment_state_ = new_state;
+}
+
+bool LocalReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
+  DCHECK_GE(free_capacity, static_cast<size_t>(1));
+  if (free_capacity > kMaxTableSizeInBytes) {
+    // Arithmetic might even overflow.
+    *error_msg = "Requested table size implausibly large";
+    return false;
+  }
+  size_t top_index = segment_state_.top_index;
+  if (top_index + free_capacity <= max_entries_) {
+    return true;
+  }
+
+  // Try to increase the table size.
+  if (!Resize(top_index + free_capacity, error_msg)) {
+    LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity
+                 << "): " << std::endl
+                 << MutatorLockedDumpable<LocalReferenceTable>(*this)
+                 << " Resizing failed: " << *error_msg;
+    return false;
+  }
+  return true;
+}
+
+size_t LocalReferenceTable::FreeCapacity() const {
+  return max_entries_ - segment_state_.top_index;
+}
+
+}  // namespace jni
+}  // namespace art
diff --git a/runtime/jni/local_reference_table.h b/runtime/jni/local_reference_table.h
new file mode 100644
index 0000000..debaa8b
--- /dev/null
+++ b/runtime/jni/local_reference_table.h
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JNI_LOCAL_REFERENCE_TABLE_H_
+#define ART_RUNTIME_JNI_LOCAL_REFERENCE_TABLE_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <limits>
+#include <string>
+
+#include <android-base/logging.h>
+
+#include "base/bit_utils.h"
+#include "base/locks.h"
+#include "base/macros.h"
+#include "base/mem_map.h"
+#include "base/mutex.h"
+#include "gc_root.h"
+#include "indirect_reference_table.h"
+#include "obj_ptr.h"
+#include "offsets.h"
+#include "read_barrier_option.h"
+
+namespace art {
+
+class RootInfo;
+
+namespace mirror {
+class Object;
+}  // namespace mirror
+
+namespace jni {
+
+// Maintain a table of local references.  Used for local JNI references.
+// TODO: Rewrite the implementation, so that valid local references are effectively
+// `CompressedReference<Object>*`, so that it can be decoded very quickly.
+//
+// The table contains object references, where the strong (local/global) references are part of the
+// GC root set (but not the weak global references). When an object is added we return an
+// IndirectRef that is not a valid pointer but can be used to find the original value in O(1) time.
+// Conversions to and from indirect references are performed on upcalls and downcalls, so they need
+// to be very fast.
+//
+// To be efficient for JNI local variable storage, we need to provide operations that allow us to
+// operate on segments of the table, where segments are pushed and popped as if on a stack. For
+// example, deletion of an entry should only succeed if it appears in the current segment, and we
+// want to be able to strip off the current segment quickly when a method returns. Additions to the
+// table must be made in the current segment even if space is available in an earlier area.
+//
+// A new segment is created when we call into native code from interpreted code, or when we handle
+// the JNI PushLocalFrame function.
+//
+// The GC must be able to scan the entire table quickly.
+//
+// In summary, these must be very fast:
+//  - adding or removing a segment
+//  - adding references to a new segment
+//  - converting an indirect reference back to an Object
+// These can be a little slower, but must still be pretty quick:
+//  - adding references to a "mature" segment
+//  - removing individual references
+//  - scanning the entire table straight through
+//
+// If there's more than one segment, we don't guarantee that the table will fill completely before
+// we fail due to lack of space. We do ensure that the current segment will pack tightly, which
+// should satisfy JNI requirements (e.g. EnsureLocalCapacity).
+
+// Indirect reference definition.  This must be interchangeable with JNI's jobject, and it's
+// convenient to let null be null, so we use void*.
+//
+// We need a (potentially) large table index and a 2-bit reference type (global, local, weak
+// global). We also reserve some bits to be used to detect stale indirect references: we put a
+// serial number in the extra bits, and keep a copy of the serial number in the table. This requires
+// more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch
+// additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj,
+// lookup iref1. A pattern based on object bits will miss this.
+
+// Table definition.
+//
+// For the global reference table, the expected common operations are adding a new entry and
+// removing a recently-added entry (usually the most-recently-added entry).  For JNI local
+// references, the common operations are adding a new entry and removing an entire table segment.
+//
+// If we delete entries from the middle of the list, we will be left with "holes".  We track the
+// number of holes so that, when adding new elements, we can quickly decide to do a trivial append
+// or go slot-hunting.
+//
+// When the top-most entry is removed, any holes immediately below it are also removed. Thus,
+// deletion of an entry may reduce "top_index" by more than one.
+//
+// To get the desired behavior for JNI locals, we need to know the bottom and top of the current
+// "segment". The top is managed internally, and the bottom is passed in as a function argument.
+// When we call a native method or push a local frame, the current top index gets pushed on, and
+// serves as the new bottom. When we pop a frame off, the value from the stack becomes the new top
+// index, and the value stored in the previous frame becomes the new bottom.
+//
+// Holes are being locally cached for the segment. Otherwise we'd have to pass bottom index and
+// number of holes, which restricts us to 16 bits for the top index. The value is cached within the
+// table. To avoid code in generated JNI transitions, which implicitly form segments, the code for
+// adding and removing references needs to detect the change of a segment. Helper fields are used
+// for this detection.
+//
+// Common alternative implementation: make IndirectRef a pointer to the actual reference slot.
+// Instead of getting a table and doing a lookup, the lookup can be done instantly. Operations like
+// determining the type and deleting the reference are more expensive because the table must be
+// hunted for (i.e. you have to do a pointer comparison to see which table it's in), you can't move
+// the table when expanding it (so realloc() is out), and tricks like serial number checking to
+// detect stale references aren't possible (though we may be able to get similar benefits with other
+// approaches).
+//
+// TODO: consider a "lastDeleteIndex" for quick hole-filling when an add immediately follows a
+// delete; must invalidate after segment pop might be worth only using it for JNI globals.
+//
+// TODO: may want completely different add/remove algorithms for global and local refs to improve
+// performance.  A large circular buffer might reduce the amortized cost of adding global
+// references.
+
+// The state of the current segment. We only store the index. Splitting it for index and hole
+// count restricts the range too much.
+struct LRTSegmentState {
+  uint32_t top_index;
+};
+
+// Use as initial value for "cookie", and when table has only one segment.
+static constexpr LRTSegmentState kLRTFirstSegment = { 0 };
+
+// We associate a few bits of serial number with each reference, for error checking.
+static constexpr unsigned int kLRTSerialBits = 3;
+static constexpr uint32_t kLRTMaxSerial = ((1 << kLRTSerialBits) - 1);
+
+class LrtEntry {
+ public:
+  void Add(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  GcRoot<mirror::Object>* GetReference() {
+    DCHECK_LE(serial_, kLRTMaxSerial);
+    return &reference_;
+  }
+
+  const GcRoot<mirror::Object>* GetReference() const {
+    DCHECK_LE(serial_, kLRTMaxSerial);
+    return &reference_;
+  }
+
+  uint32_t GetSerial() const {
+    return serial_;
+  }
+
+  void SetReference(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  uint32_t serial_;  // Incremented for each reuse; checked against reference.
+  GcRoot<mirror::Object> reference_;
+};
+static_assert(sizeof(LrtEntry) == 2 * sizeof(uint32_t), "Unexpected sizeof(LrtEntry)");
+static_assert(IsPowerOfTwo(sizeof(LrtEntry)), "Unexpected sizeof(LrtEntry)");
+
+// We initially allocate local reference tables with a very small number of entries, packing
+// multiple tables into a single page. If we need to expand one, we allocate them in units of
+// pages.
+// TODO: We should allocate all LRT tables as nonmovable Java objects, That in turn works better
+// if we break up each table into 2 parallel arrays, one for the Java reference, and one for the
+// serial number. The current scheme page-aligns regions containing LRT tables, and so allows them
+// to be identified and page-protected in the future.
+constexpr size_t kInitialLrtBytes = 512;  // Number of bytes in an initial local table.
+constexpr size_t kSmallLrtEntries = kInitialLrtBytes / sizeof(LrtEntry);
+static_assert(kPageSize % kInitialLrtBytes == 0);
+static_assert(kInitialLrtBytes % sizeof(LrtEntry) == 0);
+static_assert(kInitialLrtBytes % sizeof(void *) == 0);
+
+// A minimal stopgap allocator for initial small local LRT tables.
+class SmallLrtAllocator {
+ public:
+  SmallLrtAllocator();
+
+  // Allocate a LRT table for kSmallLrtEntries.
+  LrtEntry* Allocate(std::string* error_msg) REQUIRES(!lock_);
+
+  void Deallocate(LrtEntry* unneeded) REQUIRES(!lock_);
+
+ private:
+  // A free list of kInitialLrtBytes chunks linked through the first word.
+  LrtEntry* small_lrt_freelist_;
+
+  // Repository of MemMaps used for small LRT tables.
+  std::vector<MemMap> shared_lrt_maps_;
+
+  Mutex lock_;  // Level kGenericBottomLock; acquired before mem_map_lock_, which is a C++ mutex.
+};
+
+class LocalReferenceTable {
+ public:
+  // Constructs an uninitialized indirect reference table. Use `Initialize()` to initialize it.
+  LocalReferenceTable();
+
+  // Initialize the indirect reference table.
+  //
+  // Max_count is the minimum initial capacity (resizable).
+  // A value of 1 indicates an implementation-convenient small size.
+  bool Initialize(size_t max_count, std::string* error_msg);
+
+  ~LocalReferenceTable();
+
+  /*
+   * Checks whether construction of the LocalReferenceTable succeeded.
+   *
+   * This object must only be used if IsValid() returns true. It is safe to
+   * call IsValid from multiple threads without locking or other explicit
+   * synchronization.
+   */
+  bool IsValid() const;
+
+  // Add a new entry. "obj" must be a valid non-null object reference. This function will
+  // return null if an error happened (with an appropriate error message set).
+  IndirectRef Add(LRTSegmentState previous_state,
+                  ObjPtr<mirror::Object> obj,
+                  std::string* error_msg)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Given an IndirectRef in the table, return the Object it refers to.
+  //
+  // This function may abort under error conditions.
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_)
+      ALWAYS_INLINE;
+
+  // Updates an existing indirect reference to point to a new object.
+  void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Remove an existing entry.
+  //
+  // If the entry is not between the current top index and the bottom index
+  // specified by the cookie, we don't remove anything.  This is the behavior
+  // required by JNI's DeleteLocalRef function.
+  //
+  // Returns "false" if nothing was removed.
+  bool Remove(LRTSegmentState previous_state, IndirectRef iref);
+
+  void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void Dump(std::ostream& os) const
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Locks::alloc_tracker_lock_);
+
+  IndirectRefKind GetKind() const {
+    return kLocal;
+  }
+
+  // Return the #of entries in the entire table.  This includes holes, and
+  // so may be larger than the actual number of "live" entries.
+  size_t Capacity() const {
+    return segment_state_.top_index;
+  }
+
+  // Return the number of non-null entries in the table. Only reliable for a
+  // single segment table.
+  int32_t NEntriesForGlobal() {
+    return segment_state_.top_index - current_num_holes_;
+  }
+
+  // Ensure that at least free_capacity elements are available, or return false.
+  // Caller ensures free_capacity > 0.
+  bool EnsureFreeCapacity(size_t free_capacity, std::string* error_msg)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  // See implementation of EnsureFreeCapacity. We'll only state here how much is trivially free,
+  // without recovering holes. Thus this is a conservative estimate.
+  size_t FreeCapacity() const;
+
+  void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  LRTSegmentState GetSegmentState() const {
+    return segment_state_;
+  }
+
+  void SetSegmentState(LRTSegmentState new_state);
+
+  static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
+    // Note: Currently segment_state_ is at offset 0. We're testing the expected value in
+    //       jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that
+    //       is not pointer-size-safe.
+    return Offset(0);
+  }
+
+  // Release pages past the end of the table that may have previously held references.
+  void Trim() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Determine what kind of indirect reference this is. Opposite of EncodeIndirectRefKind.
+  ALWAYS_INLINE static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
+    return DecodeIndirectRefKind(reinterpret_cast<uintptr_t>(iref));
+  }
+
+  /* Reference validation for CheckJNI. */
+  bool IsValidReference(IndirectRef, /*out*/std::string* error_msg) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  static constexpr uint32_t kShiftedSerialMask = (1u << kLRTSerialBits) - 1;
+
+  static constexpr size_t kKindBits = MinimumBitsToStore(
+      static_cast<uint32_t>(IndirectRefKind::kLastKind));
+  static constexpr uint32_t kKindMask = (1u << kKindBits) - 1;
+
+  static constexpr uintptr_t EncodeIndex(uint32_t table_index) {
+    static_assert(sizeof(IndirectRef) == sizeof(uintptr_t), "Unexpected IndirectRef size");
+    DCHECK_LE(MinimumBitsToStore(table_index), BitSizeOf<uintptr_t>() - kLRTSerialBits - kKindBits);
+    return (static_cast<uintptr_t>(table_index) << kKindBits << kLRTSerialBits);
+  }
+  static constexpr uint32_t DecodeIndex(uintptr_t uref) {
+    return static_cast<uint32_t>((uref >> kKindBits) >> kLRTSerialBits);
+  }
+
+  static constexpr uintptr_t EncodeIndirectRefKind(IndirectRefKind kind) {
+    return static_cast<uintptr_t>(kind);
+  }
+  static constexpr IndirectRefKind DecodeIndirectRefKind(uintptr_t uref) {
+    return static_cast<IndirectRefKind>(uref & kKindMask);
+  }
+
+  static constexpr uintptr_t EncodeSerial(uint32_t serial) {
+    DCHECK_LE(MinimumBitsToStore(serial), kLRTSerialBits);
+    return serial << kKindBits;
+  }
+  static constexpr uint32_t DecodeSerial(uintptr_t uref) {
+    return static_cast<uint32_t>(uref >> kKindBits) & kShiftedSerialMask;
+  }
+
+  constexpr uintptr_t EncodeIndirectRef(uint32_t table_index, uint32_t serial) const {
+    DCHECK_LT(table_index, max_entries_);
+    return EncodeIndex(table_index) | EncodeSerial(serial) | EncodeIndirectRefKind(kLocal);
+  }
+
+  static void ConstexprChecks();
+
+  // Extract the table index from an indirect reference.
+  ALWAYS_INLINE static uint32_t ExtractIndex(IndirectRef iref) {
+    return DecodeIndex(reinterpret_cast<uintptr_t>(iref));
+  }
+
+  IndirectRef ToIndirectRef(uint32_t table_index) const {
+    DCHECK_LT(table_index, max_entries_);
+    uint32_t serial = table_[table_index].GetSerial();
+    return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial));
+  }
+
+  // Resize the backing table to be at least new_size elements long. Currently
+  // must be larger than the current size. After return max_entries_ >= new_size.
+  bool Resize(size_t new_size, std::string* error_msg);
+
+  void RecoverHoles(LRTSegmentState from);
+
+  // Abort if check_jni is not enabled. Otherwise, just log as an error.
+  static void AbortIfNoCheckJNI(const std::string& msg);
+
+  /* extra debugging checks */
+  bool CheckEntry(const char*, IndirectRef, uint32_t) const;
+
+  /// semi-public - read/write by jni down calls.
+  LRTSegmentState segment_state_;
+
+  // Mem map where we store the indirect refs. If it's invalid, and table_ is non-null, then
+  // table_ is valid, but was allocated via `SmallLrtAllocator`;
+  MemMap table_mem_map_;
+  // bottom of the stack. Do not directly access the object references
+  // in this as they are roots. Use Get() that has a read barrier.
+  LrtEntry* table_;
+
+  // max #of entries allowed (modulo resizing).
+  size_t max_entries_;
+
+  // Some values to retain old behavior with holes. Description of the algorithm is in the .cc
+  // file.
+  // TODO: Consider other data structures for compact tables, e.g., free lists.
+  size_t current_num_holes_;  // Number of holes in the current / top segment.
+  LRTSegmentState last_known_previous_state_;
+};
+
+}  // namespace jni
+}  // namespace art
+
+#endif  // ART_RUNTIME_JNI_LOCAL_REFERENCE_TABLE_H_
diff --git a/runtime/jni/local_reference_table_test.cc b/runtime/jni/local_reference_table_test.cc
new file mode 100644
index 0000000..84bb189
--- /dev/null
+++ b/runtime/jni/local_reference_table_test.cc
@@ -0,0 +1,508 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "indirect_reference_table-inl.h"
+
+#include "android-base/stringprintf.h"
+
+#include "class_linker-inl.h"
+#include "common_runtime_test.h"
+#include "mirror/class-alloc-inl.h"
+#include "mirror/object-inl.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+namespace jni {
+
+using android::base::StringPrintf;
+
+class LocalReferenceTableTest : public CommonRuntimeTest {
+ protected:
+  LocalReferenceTableTest() {
+    use_boot_image_ = true;  // Make the Runtime creation cheaper.
+  }
+};
+
+static void CheckDump(LocalReferenceTable* lrt, size_t num_objects, size_t num_unique)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  std::ostringstream oss;
+  lrt->Dump(oss);
+  if (num_objects == 0) {
+    EXPECT_EQ(oss.str().find("java.lang.Object"), std::string::npos) << oss.str();
+  } else if (num_objects == 1) {
+    EXPECT_NE(oss.str().find("1 of java.lang.Object"), std::string::npos) << oss.str();
+  } else {
+    EXPECT_NE(oss.str().find(StringPrintf("%zd of java.lang.Object (%zd unique instances)",
+                                          num_objects, num_unique)),
+              std::string::npos)
+                  << "\n Expected number of objects: " << num_objects
+                  << "\n Expected unique objects: " << num_unique << "\n"
+                  << oss.str();
+  }
+}
+
+TEST_F(LocalReferenceTableTest, BasicTest) {
+  // This will lead to error messages in the log.
+  ScopedLogSeverity sls(LogSeverity::FATAL);
+
+  ScopedObjectAccess soa(Thread::Current());
+  static const size_t kTableMax = 20;
+  std::string error_msg;
+  LocalReferenceTable lrt;
+  bool success = lrt.Initialize(kTableMax, &error_msg);
+  ASSERT_TRUE(success) << error_msg;
+
+  StackHandleScope<5> hs(soa.Self());
+  Handle<mirror::Class> c =
+      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
+  ASSERT_TRUE(c != nullptr);
+  Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj0 != nullptr);
+  Handle<mirror::Object> obj1 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj1 != nullptr);
+  Handle<mirror::Object> obj2 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj2 != nullptr);
+  Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj3 != nullptr);
+
+  const LRTSegmentState cookie = kLRTFirstSegment;
+
+  CheckDump(&lrt, 0, 0);
+
+  IndirectRef iref0 = (IndirectRef) 0x11110;
+  EXPECT_FALSE(lrt.Remove(cookie, iref0)) << "unexpectedly successful removal";
+
+  // Add three, check, remove in the order in which they were added.
+  iref0 = lrt.Add(cookie, obj0.Get(), &error_msg);
+  EXPECT_TRUE(iref0 != nullptr);
+  CheckDump(&lrt, 1, 1);
+  IndirectRef iref1 = lrt.Add(cookie, obj1.Get(), &error_msg);
+  EXPECT_TRUE(iref1 != nullptr);
+  CheckDump(&lrt, 2, 2);
+  IndirectRef iref2 = lrt.Add(cookie, obj2.Get(), &error_msg);
+  EXPECT_TRUE(iref2 != nullptr);
+  CheckDump(&lrt, 3, 3);
+
+  EXPECT_OBJ_PTR_EQ(obj0.Get(), lrt.Get(iref0));
+  EXPECT_OBJ_PTR_EQ(obj1.Get(), lrt.Get(iref1));
+  EXPECT_OBJ_PTR_EQ(obj2.Get(), lrt.Get(iref2));
+
+  EXPECT_TRUE(lrt.Remove(cookie, iref0));
+  CheckDump(&lrt, 2, 2);
+  EXPECT_TRUE(lrt.Remove(cookie, iref1));
+  CheckDump(&lrt, 1, 1);
+  EXPECT_TRUE(lrt.Remove(cookie, iref2));
+  CheckDump(&lrt, 0, 0);
+
+  // Table should be empty now.
+  EXPECT_EQ(0U, lrt.Capacity());
+
+  // Check that the entry off the end of the list is not valid.
+  // (CheckJNI shall abort for such entries.)
+  EXPECT_FALSE(lrt.IsValidReference(iref0, &error_msg));
+
+  // Add three, remove in the opposite order.
+  iref0 = lrt.Add(cookie, obj0.Get(), &error_msg);
+  EXPECT_TRUE(iref0 != nullptr);
+  iref1 = lrt.Add(cookie, obj1.Get(), &error_msg);
+  EXPECT_TRUE(iref1 != nullptr);
+  iref2 = lrt.Add(cookie, obj2.Get(), &error_msg);
+  EXPECT_TRUE(iref2 != nullptr);
+  CheckDump(&lrt, 3, 3);
+
+  ASSERT_TRUE(lrt.Remove(cookie, iref2));
+  CheckDump(&lrt, 2, 2);
+  ASSERT_TRUE(lrt.Remove(cookie, iref1));
+  CheckDump(&lrt, 1, 1);
+  ASSERT_TRUE(lrt.Remove(cookie, iref0));
+  CheckDump(&lrt, 0, 0);
+
+  // Table should be empty now.
+  ASSERT_EQ(0U, lrt.Capacity());
+
+  // Add three, remove middle / middle / bottom / top.  (Second attempt
+  // to remove middle should fail.)
+  iref0 = lrt.Add(cookie, obj0.Get(), &error_msg);
+  EXPECT_TRUE(iref0 != nullptr);
+  iref1 = lrt.Add(cookie, obj1.Get(), &error_msg);
+  EXPECT_TRUE(iref1 != nullptr);
+  iref2 = lrt.Add(cookie, obj2.Get(), &error_msg);
+  EXPECT_TRUE(iref2 != nullptr);
+  CheckDump(&lrt, 3, 3);
+
+  ASSERT_EQ(3U, lrt.Capacity());
+
+  ASSERT_TRUE(lrt.Remove(cookie, iref1));
+  CheckDump(&lrt, 2, 2);
+  ASSERT_FALSE(lrt.Remove(cookie, iref1));
+  CheckDump(&lrt, 2, 2);
+
+  // Check that the reference to the hole is not valid.
+  EXPECT_FALSE(lrt.IsValidReference(iref1, &error_msg));
+
+  ASSERT_TRUE(lrt.Remove(cookie, iref2));
+  CheckDump(&lrt, 1, 1);
+  ASSERT_TRUE(lrt.Remove(cookie, iref0));
+  CheckDump(&lrt, 0, 0);
+
+  // Table should be empty now.
+  ASSERT_EQ(0U, lrt.Capacity());
+
+  // Add four entries.  Remove #1, add new entry, verify that table size
+  // is still 4 (i.e. holes are getting filled).  Remove #1 and #3, verify
+  // that we delete one and don't hole-compact the other.
+  iref0 = lrt.Add(cookie, obj0.Get(), &error_msg);
+  EXPECT_TRUE(iref0 != nullptr);
+  iref1 = lrt.Add(cookie, obj1.Get(), &error_msg);
+  EXPECT_TRUE(iref1 != nullptr);
+  iref2 = lrt.Add(cookie, obj2.Get(), &error_msg);
+  EXPECT_TRUE(iref2 != nullptr);
+  IndirectRef iref3 = lrt.Add(cookie, obj3.Get(), &error_msg);
+  EXPECT_TRUE(iref3 != nullptr);
+  CheckDump(&lrt, 4, 4);
+
+  ASSERT_TRUE(lrt.Remove(cookie, iref1));
+  CheckDump(&lrt, 3, 3);
+
+  iref1 = lrt.Add(cookie, obj1.Get(), &error_msg);
+  EXPECT_TRUE(iref1 != nullptr);
+
+  ASSERT_EQ(4U, lrt.Capacity()) << "hole not filled";
+  CheckDump(&lrt, 4, 4);
+
+  ASSERT_TRUE(lrt.Remove(cookie, iref1));
+  CheckDump(&lrt, 3, 3);
+  ASSERT_TRUE(lrt.Remove(cookie, iref3));
+  CheckDump(&lrt, 2, 2);
+
+  ASSERT_EQ(3U, lrt.Capacity()) << "should be 3 after two deletions";
+
+  ASSERT_TRUE(lrt.Remove(cookie, iref2));
+  CheckDump(&lrt, 1, 1);
+  ASSERT_TRUE(lrt.Remove(cookie, iref0));
+  CheckDump(&lrt, 0, 0);
+
+  ASSERT_EQ(0U, lrt.Capacity()) << "not empty after split remove";
+
+  // Add an entry, remove it, add a new entry, and try to use the original
+  // iref.  They have the same slot number but are for different objects.
+  // With the extended checks in place, this should fail.
+  iref0 = lrt.Add(cookie, obj0.Get(), &error_msg);
+  EXPECT_TRUE(iref0 != nullptr);
+  CheckDump(&lrt, 1, 1);
+  ASSERT_TRUE(lrt.Remove(cookie, iref0));
+  CheckDump(&lrt, 0, 0);
+  iref1 = lrt.Add(cookie, obj1.Get(), &error_msg);
+  EXPECT_TRUE(iref1 != nullptr);
+  CheckDump(&lrt, 1, 1);
+  ASSERT_FALSE(lrt.Remove(cookie, iref0)) << "mismatched del succeeded";
+  CheckDump(&lrt, 1, 1);
+  ASSERT_TRUE(lrt.Remove(cookie, iref1)) << "switched del failed";
+  ASSERT_EQ(0U, lrt.Capacity()) << "switching del not empty";
+  CheckDump(&lrt, 0, 0);
+
+  // Same as above, but with the same object.  A more rigorous checker
+  // (e.g. with slot serialization) will catch this.
+  iref0 = lrt.Add(cookie, obj0.Get(), &error_msg);
+  EXPECT_TRUE(iref0 != nullptr);
+  CheckDump(&lrt, 1, 1);
+  ASSERT_TRUE(lrt.Remove(cookie, iref0));
+  CheckDump(&lrt, 0, 0);
+  iref1 = lrt.Add(cookie, obj0.Get(), &error_msg);
+  EXPECT_TRUE(iref1 != nullptr);
+  CheckDump(&lrt, 1, 1);
+  if (iref0 != iref1) {
+    // Try 0, should not work.
+    ASSERT_FALSE(lrt.Remove(cookie, iref0)) << "temporal del succeeded";
+  }
+  ASSERT_TRUE(lrt.Remove(cookie, iref1)) << "temporal cleanup failed";
+  ASSERT_EQ(0U, lrt.Capacity()) << "temporal del not empty";
+  CheckDump(&lrt, 0, 0);
+
+  // Stale reference is not valid.
+  iref0 = lrt.Add(cookie, obj0.Get(), &error_msg);
+  EXPECT_TRUE(iref0 != nullptr);
+  CheckDump(&lrt, 1, 1);
+  ASSERT_TRUE(lrt.Remove(cookie, iref0));
+  EXPECT_FALSE(lrt.IsValidReference(iref0, &error_msg)) << "stale lookup succeeded";
+  CheckDump(&lrt, 0, 0);
+
+  // Test table resizing.
+  // These ones fit...
+  static const size_t kTableInitial = kTableMax / 2;
+  IndirectRef manyRefs[kTableInitial];
+  for (size_t i = 0; i < kTableInitial; i++) {
+    manyRefs[i] = lrt.Add(cookie, obj0.Get(), &error_msg);
+    ASSERT_TRUE(manyRefs[i] != nullptr) << "Failed adding " << i;
+    CheckDump(&lrt, i + 1, 1);
+  }
+  // ...this one causes overflow.
+  iref0 = lrt.Add(cookie, obj0.Get(), &error_msg);
+  ASSERT_TRUE(iref0 != nullptr);
+  ASSERT_EQ(kTableInitial + 1, lrt.Capacity());
+  CheckDump(&lrt, kTableInitial + 1, 1);
+
+  for (size_t i = 0; i < kTableInitial; i++) {
+    ASSERT_TRUE(lrt.Remove(cookie, manyRefs[i])) << "failed removing " << i;
+    CheckDump(&lrt, kTableInitial - i, 1);
+  }
+  // Because of removal order, should have 11 entries, 10 of them holes.
+  ASSERT_EQ(kTableInitial + 1, lrt.Capacity());
+
+  ASSERT_TRUE(lrt.Remove(cookie, iref0)) << "multi-remove final failed";
+
+  ASSERT_EQ(0U, lrt.Capacity()) << "multi-del not empty";
+  CheckDump(&lrt, 0, 0);
+}
+
+TEST_F(LocalReferenceTableTest, Holes) {
+  // Test the explicitly named cases from the LRT implementation:
+  //
+  // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
+  // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+  // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+  //    reference
+  // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
+  // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+  //    reference
+
+  ScopedObjectAccess soa(Thread::Current());
+  static const size_t kTableMax = 10;
+
+  StackHandleScope<6> hs(soa.Self());
+  Handle<mirror::Class> c = hs.NewHandle(
+      class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
+  ASSERT_TRUE(c != nullptr);
+  Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj0 != nullptr);
+  Handle<mirror::Object> obj1 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj1 != nullptr);
+  Handle<mirror::Object> obj2 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj2 != nullptr);
+  Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj3 != nullptr);
+  Handle<mirror::Object> obj4 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj4 != nullptr);
+
+  std::string error_msg;
+
+  // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference.
+  {
+    LocalReferenceTable lrt;
+    bool success = lrt.Initialize(kTableMax, &error_msg);
+    ASSERT_TRUE(success) << error_msg;
+
+    const LRTSegmentState cookie0 = kLRTFirstSegment;
+
+    CheckDump(&lrt, 0, 0);
+
+    IndirectRef iref0 = lrt.Add(cookie0, obj0.Get(), &error_msg);
+    IndirectRef iref1 = lrt.Add(cookie0, obj1.Get(), &error_msg);
+    IndirectRef iref2 = lrt.Add(cookie0, obj2.Get(), &error_msg);
+
+    EXPECT_TRUE(lrt.Remove(cookie0, iref1));
+
+    // New segment.
+    const LRTSegmentState cookie1 = lrt.GetSegmentState();
+
+    IndirectRef iref3 = lrt.Add(cookie1, obj3.Get(), &error_msg);
+
+    // Must not have filled the previous hole.
+    EXPECT_EQ(lrt.Capacity(), 4u);
+    EXPECT_FALSE(lrt.IsValidReference(iref1, &error_msg));
+    CheckDump(&lrt, 3, 3);
+
+    UNUSED(iref0, iref1, iref2, iref3);
+  }
+
+  // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+  {
+    LocalReferenceTable lrt;
+    bool success = lrt.Initialize(kTableMax, &error_msg);
+    ASSERT_TRUE(success) << error_msg;
+
+    const LRTSegmentState cookie0 = kLRTFirstSegment;
+
+    CheckDump(&lrt, 0, 0);
+
+    IndirectRef iref0 = lrt.Add(cookie0, obj0.Get(), &error_msg);
+
+    // New segment.
+    const LRTSegmentState cookie1 = lrt.GetSegmentState();
+
+    IndirectRef iref1 = lrt.Add(cookie1, obj1.Get(), &error_msg);
+    IndirectRef iref2 = lrt.Add(cookie1, obj2.Get(), &error_msg);
+    IndirectRef iref3 = lrt.Add(cookie1, obj3.Get(), &error_msg);
+
+    EXPECT_TRUE(lrt.Remove(cookie1, iref2));
+
+    // Pop segment.
+    lrt.SetSegmentState(cookie1);
+
+    IndirectRef iref4 = lrt.Add(cookie1, obj4.Get(), &error_msg);
+
+    EXPECT_EQ(lrt.Capacity(), 2u);
+    EXPECT_FALSE(lrt.IsValidReference(iref2, &error_msg));
+    CheckDump(&lrt, 2, 2);
+
+    UNUSED(iref0, iref1, iref2, iref3, iref4);
+  }
+
+  // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+  //    reference.
+  {
+    LocalReferenceTable lrt;
+    bool success = lrt.Initialize(kTableMax, &error_msg);
+    ASSERT_TRUE(success) << error_msg;
+
+    const LRTSegmentState cookie0 = kLRTFirstSegment;
+
+    CheckDump(&lrt, 0, 0);
+
+    IndirectRef iref0 = lrt.Add(cookie0, obj0.Get(), &error_msg);
+
+    // New segment.
+    const LRTSegmentState cookie1 = lrt.GetSegmentState();
+
+    IndirectRef iref1 = lrt.Add(cookie1, obj1.Get(), &error_msg);
+    IndirectRef iref2 = lrt.Add(cookie1, obj2.Get(), &error_msg);
+
+    EXPECT_TRUE(lrt.Remove(cookie1, iref1));
+
+    // New segment.
+    const LRTSegmentState cookie2 = lrt.GetSegmentState();
+
+    IndirectRef iref3 = lrt.Add(cookie2, obj3.Get(), &error_msg);
+
+    // Pop segment.
+    lrt.SetSegmentState(cookie2);
+
+    IndirectRef iref4 = lrt.Add(cookie1, obj4.Get(), &error_msg);
+
+    EXPECT_EQ(lrt.Capacity(), 3u);
+    EXPECT_FALSE(lrt.IsValidReference(iref1, &error_msg));
+    CheckDump(&lrt, 3, 3);
+
+    UNUSED(iref0, iref1, iref2, iref3, iref4);
+  }
+
+  // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference.
+  {
+    LocalReferenceTable lrt;
+    bool success = lrt.Initialize(kTableMax, &error_msg);
+    ASSERT_TRUE(success) << error_msg;
+
+    const LRTSegmentState cookie0 = kLRTFirstSegment;
+
+    CheckDump(&lrt, 0, 0);
+
+    IndirectRef iref0 = lrt.Add(cookie0, obj0.Get(), &error_msg);
+
+    // New segment.
+    const LRTSegmentState cookie1 = lrt.GetSegmentState();
+
+    IndirectRef iref1 = lrt.Add(cookie1, obj1.Get(), &error_msg);
+    EXPECT_TRUE(lrt.Remove(cookie1, iref1));
+
+    // Emptied segment, push new one.
+    const LRTSegmentState cookie2 = lrt.GetSegmentState();
+
+    IndirectRef iref2 = lrt.Add(cookie1, obj1.Get(), &error_msg);
+    IndirectRef iref3 = lrt.Add(cookie1, obj2.Get(), &error_msg);
+    IndirectRef iref4 = lrt.Add(cookie1, obj3.Get(), &error_msg);
+
+    EXPECT_TRUE(lrt.Remove(cookie1, iref3));
+
+    // Pop segment.
+    UNUSED(cookie2);
+    lrt.SetSegmentState(cookie1);
+
+    IndirectRef iref5 = lrt.Add(cookie1, obj4.Get(), &error_msg);
+
+    EXPECT_EQ(lrt.Capacity(), 2u);
+    EXPECT_FALSE(lrt.IsValidReference(iref3, &error_msg));
+    CheckDump(&lrt, 2, 2);
+
+    UNUSED(iref0, iref1, iref2, iref3, iref4, iref5);
+  }
+
+  // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+  //    reference
+  {
+    LocalReferenceTable lrt;
+    bool success = lrt.Initialize(kTableMax, &error_msg);
+    ASSERT_TRUE(success) << error_msg;
+
+    const LRTSegmentState cookie0 = kLRTFirstSegment;
+
+    CheckDump(&lrt, 0, 0);
+
+    IndirectRef iref0 = lrt.Add(cookie0, obj0.Get(), &error_msg);
+
+    // New segment.
+    const LRTSegmentState cookie1 = lrt.GetSegmentState();
+
+    IndirectRef iref1 = lrt.Add(cookie1, obj1.Get(), &error_msg);
+    IndirectRef iref2 = lrt.Add(cookie1, obj1.Get(), &error_msg);
+    IndirectRef iref3 = lrt.Add(cookie1, obj2.Get(), &error_msg);
+
+    EXPECT_TRUE(lrt.Remove(cookie1, iref2));
+
+    // Pop segment.
+    lrt.SetSegmentState(cookie1);
+
+    // Push segment.
+    const LRTSegmentState cookie1_second = lrt.GetSegmentState();
+    UNUSED(cookie1_second);
+
+    IndirectRef iref4 = lrt.Add(cookie1, obj3.Get(), &error_msg);
+
+    EXPECT_EQ(lrt.Capacity(), 2u);
+    EXPECT_FALSE(lrt.IsValidReference(iref3, &error_msg));
+    CheckDump(&lrt, 2, 2);
+
+    UNUSED(iref0, iref1, iref2, iref3, iref4);
+  }
+}
+
+TEST_F(LocalReferenceTableTest, Resize) {
+  ScopedObjectAccess soa(Thread::Current());
+  static const size_t kTableMax = 512;
+
+  StackHandleScope<2> hs(soa.Self());
+  Handle<mirror::Class> c = hs.NewHandle(
+      class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
+  ASSERT_TRUE(c != nullptr);
+  Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj0 != nullptr);
+
+  std::string error_msg;
+  LocalReferenceTable lrt;
+  bool success = lrt.Initialize(kTableMax, &error_msg);
+  ASSERT_TRUE(success) << error_msg;
+
+  CheckDump(&lrt, 0, 0);
+  const LRTSegmentState cookie = kLRTFirstSegment;
+
+  for (size_t i = 0; i != kTableMax + 1; ++i) {
+    lrt.Add(cookie, obj0.Get(), &error_msg);
+  }
+
+  EXPECT_EQ(lrt.Capacity(), kTableMax + 1);
+}
+
+}  // namespace jni
+}  // namespace art
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 2ffd866..b204533 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -28,6 +28,9 @@
 #include "obj_ptr.h"
 
 namespace art {
+namespace jni {
+class LocalReferenceTable;
+}  // namespace jni
 namespace mirror {
 class Object;
 }  // namespace mirror
@@ -61,6 +64,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::alloc_tracker_lock_);
   friend class IndirectReferenceTable;  // For Dump.
+  friend class jni::LocalReferenceTable;  // For Dump.
 
   std::string name_;
   Table entries_;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index e99eaec..8c4062e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -512,8 +512,8 @@
   monitor_pool_ = nullptr;
   delete class_linker_;
   class_linker_ = nullptr;
-  delete small_irt_allocator_;
-  small_irt_allocator_ = nullptr;
+  delete small_lrt_allocator_;
+  small_lrt_allocator_ = nullptr;
   delete heap_;
   heap_ = nullptr;
   delete intern_table_;
@@ -1727,7 +1727,7 @@
   linear_alloc_.reset(CreateLinearAlloc());
   startup_linear_alloc_.reset(CreateLinearAlloc());
 
-  small_irt_allocator_ = new SmallIrtAllocator();
+  small_lrt_allocator_ = new jni::SmallLrtAllocator();
 
   BlockSignals();
   InitPlatformSignalHandlers();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 9b6f545..c3f1a70 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -68,6 +68,10 @@
 class JitOptions;
 }  // namespace jit
 
+namespace jni {
+class SmallLrtAllocator;
+}  // namespace jni
+
 namespace mirror {
 class Array;
 class ClassLoader;
@@ -107,7 +111,6 @@
 struct RuntimeArgumentMap;
 class RuntimeCallbacks;
 class SignalCatcher;
-class SmallIrtAllocator;
 class StackOverflowHandler;
 class SuspensionHandler;
 class ThreadList;
@@ -370,8 +373,8 @@
     return class_linker_;
   }
 
-  SmallIrtAllocator* GetSmallIrtAllocator() const {
-    return small_irt_allocator_;
+  jni::SmallLrtAllocator* GetSmallLrtAllocator() const {
+    return small_lrt_allocator_;
   }
 
   jni::JniIdManager* GetJniIdManager() const {
@@ -1319,7 +1322,7 @@
 
   SignalCatcher* signal_catcher_;
 
-  SmallIrtAllocator* small_irt_allocator_;
+  jni::SmallLrtAllocator* small_lrt_allocator_;
 
   std::unique_ptr<jni::JniIdManager> jni_id_manager_;
 
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a8b0e17..751bd09 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2768,9 +2768,9 @@
   bool expect_null = false;
   // The "kinds" below are sorted by the frequency we expect to encounter them.
   if (kind == kLocal) {
-    IndirectReferenceTable& locals = tlsPtr_.jni_env->locals_;
+    jni::LocalReferenceTable& locals = tlsPtr_.jni_env->locals_;
     // Local references do not need a read barrier.
-    result = locals.Get<kWithoutReadBarrier>(ref);
+    result = locals.Get(ref);
   } else if (kind == kJniTransition) {
     // The `jclass` for a static method points to the CompressedReference<> in the
     // `ArtMethod::declaring_class_`. Other `jobject` arguments point to spilled stack