Revert "Rewrite `LocalReferenceTable`."

This reverts commit db18e88d8a9e2a45639c6fd25e91341dd3f32f07.

Reason for revert: Crashes seen on virtual x86_64 devices.

Bug: 172332525
Bug: 276210372
Change-Id: I50e47f45f60253a31d246e9f38e65ebf82da7765
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 67010f3..479eda5 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -138,12 +138,6 @@
   static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error");
   static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error");
   static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
-
-  // Distinguishing between local and (weak) global references.
-  static_assert((GetGlobalOrWeakGlobalMask() & EncodeIndirectRefKind(kJniTransition)) == 0u);
-  static_assert((GetGlobalOrWeakGlobalMask() & EncodeIndirectRefKind(kLocal)) == 0u);
-  static_assert((GetGlobalOrWeakGlobalMask() & EncodeIndirectRefKind(kGlobal)) != 0u);
-  static_assert((GetGlobalOrWeakGlobalMask() & EncodeIndirectRefKind(kWeakGlobal)) != 0u);
 }
 
 // Holes:
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 9773f15..59729ac 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -87,9 +87,9 @@
 //
 // In summary, these must be very fast:
 //  - adding references
-//  - removing references
 //  - converting an indirect reference back to an Object
 // These can be a little slower, but must still be pretty quick:
+//  - removing individual references
 //  - scanning the entire table straight through
 
 // Table definition.
@@ -218,31 +218,6 @@
     return DecodeIndirectRefKind(reinterpret_cast<uintptr_t>(iref));
   }
 
-  static constexpr uintptr_t GetGlobalOrWeakGlobalMask() {
-    constexpr uintptr_t mask = enum_cast<uintptr_t>(kGlobal);
-    static_assert(IsPowerOfTwo(mask));
-    static_assert((mask & kJniTransition) == 0u);
-    static_assert((mask & kLocal) == 0u);
-    static_assert((mask & kGlobal) != 0u);
-    static_assert((mask & kWeakGlobal) != 0u);
-    return mask;
-  }
-
-  static bool IsGlobalOrWeakGlobalReference(IndirectRef iref) {
-    return (reinterpret_cast<uintptr_t>(iref) & GetGlobalOrWeakGlobalMask()) != 0u;
-  }
-
-  static bool IsJniTransitionOrLocalReference(IndirectRef iref) {
-    return !IsGlobalOrWeakGlobalReference(iref);
-  }
-
-  template <typename T>
-  static T ClearIndirectRefKind(IndirectRef iref) {
-    static_assert(std::is_pointer_v<T>);
-    return reinterpret_cast<T>(
-        reinterpret_cast<uintptr_t>(iref) & ~static_cast<uintptr_t>(kKindMask));
-  }
-
   /* Reference validation for CheckJNI. */
   bool IsValidReference(IndirectRef, /*out*/std::string* error_msg) const
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/jni/jni_env_ext.cc b/runtime/jni/jni_env_ext.cc
index bef0fd3..619e1de 100644
--- a/runtime/jni/jni_env_ext.cc
+++ b/runtime/jni/jni_env_ext.cc
@@ -33,7 +33,6 @@
 #include "nth_caller_visitor.h"
 #include "scoped_thread_state_change.h"
 #include "thread-current-inl.h"
-#include "thread-inl.h"
 #include "thread_list.h"
 
 namespace art {
@@ -71,7 +70,7 @@
     : self_(self_in),
       vm_(vm_in),
       local_ref_cookie_(jni::kLRTFirstSegment),
-      locals_(vm_in->IsCheckJniEnabled()),
+      locals_(),
       monitors_("monitors", kMonitorsInitial, kMonitorsMax),
       critical_(0),
       check_jni_(false),
@@ -115,7 +114,6 @@
 
 void JNIEnvExt::SetCheckJniEnabled(bool enabled) {
   check_jni_ = enabled;
-  locals_.SetCheckJniEnabled(enabled);
   MutexLock mu(Thread::Current(), *Locks::jni_function_table_lock_);
   functions = GetFunctionTable(enabled);
   // Check whether this is a no-op because of override.
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index 3d7f7c4..5abedb9 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -2580,7 +2580,7 @@
   // by modifying memory.
   // The parameters don't really matter here.
   std::string error_msg;
-  jni::LocalReferenceTable lrt(/*check_jni=*/ true);
+  jni::LocalReferenceTable lrt;
   bool success = lrt.Initialize(/*max_count=*/ 5, &error_msg);
   ASSERT_TRUE(success) << error_msg;
   jni::LRTSegmentState old_state = lrt.GetSegmentState();
diff --git a/runtime/jni/local_reference_table-inl.h b/runtime/jni/local_reference_table-inl.h
index 8b46049..b65dea7 100644
--- a/runtime/jni/local_reference_table-inl.h
+++ b/runtime/jni/local_reference_table-inl.h
@@ -21,131 +21,97 @@
 
 #include "android-base/stringprintf.h"
 
-#include "base/casts.h"
+#include "base/dumpable.h"
 #include "gc_root-inl.h"
 #include "obj_ptr-inl.h"
-#include "mirror/object_reference.h"
 #include "verify_object.h"
 
 namespace art {
+namespace mirror {
+class Object;
+}  // namespace mirror
+
 namespace jni {
 
-inline void LrtEntry::SetReference(ObjPtr<mirror::Object> ref) {
-  root_ = GcRoot<mirror::Object>(
-      mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref.Ptr()));
-  DCHECK(!IsFree());
-  DCHECK(!IsSerialNumber());
-}
-
-inline ObjPtr<mirror::Object> LrtEntry::GetReference() {
-  DCHECK(!IsFree());
-  DCHECK(!IsSerialNumber());
-  DCHECK(!IsNull());
-  // Local references do not need read barriers. They are marked during the thread root flip.
-  return root_.Read<kWithoutReadBarrier>();
-}
-
-inline void LrtEntry::SetNextFree(uint32_t next_free) {
-  SetVRegValue(NextFreeField::Update(next_free, 1u << kFlagFree));
-  DCHECK(IsFree());
-  DCHECK(!IsSerialNumber());
-}
-
-inline void LrtEntry::SetSerialNumber(uint32_t serial_number) {
-  SetVRegValue(SerialNumberField::Update(serial_number, 1u << kFlagSerialNumber));
-  DCHECK(!IsFree());
-  DCHECK(IsSerialNumber());
-}
-
-inline void LrtEntry::SetVRegValue(uint32_t value) {
-  root_ = GcRoot<mirror::Object>(
-      mirror::CompressedReference<mirror::Object>::FromVRegValue(value));
-}
-
-inline uint32_t LocalReferenceTable::GetReferenceEntryIndex(IndirectRef iref) const {
-  DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(iref), kLocal);
-  LrtEntry* entry = ToLrtEntry(iref);
-
-  if (LIKELY(small_table_ != nullptr)) {
-    DCHECK(tables_.empty());
-    if (!std::less<const LrtEntry*>()(entry, small_table_) &&
-        std::less<const LrtEntry*>()(entry, small_table_ + kSmallLrtEntries)) {
-      return dchecked_integral_cast<uint32_t>(entry - small_table_);
-    }
-  } else {
-    for (size_t i = 0, size = tables_.size(); i != size; ++i) {
-      LrtEntry* table = tables_[i];
-      size_t table_size = GetTableSize(i);
-      if (!std::less<const LrtEntry*>()(entry, table) &&
-          std::less<const LrtEntry*>()(entry, table + table_size)) {
-        return dchecked_integral_cast<size_t>(i != 0u ? table_size : 0u) +
-               dchecked_integral_cast<size_t>(entry - table);
-      }
-    }
-  }
-  return std::numeric_limits<uint32_t>::max();
-}
-
+// Verifies that the indirect table lookup is valid.
+// Returns "false" if something looks bad.
 inline bool LocalReferenceTable::IsValidReference(IndirectRef iref,
-                                                  /*out*/std::string* error_msg) const {
-  uint32_t entry_index = GetReferenceEntryIndex(iref);
-  if (UNLIKELY(entry_index == std::numeric_limits<uint32_t>::max())) {
-    *error_msg = android::base::StringPrintf("reference outside the table: %p", iref);
+                                                     /*out*/std::string* error_msg) const {
+  DCHECK(iref != nullptr);
+  DCHECK_EQ(GetIndirectRefKind(iref), kLocal);
+  const uint32_t top_index = segment_state_.top_index;
+  uint32_t idx = ExtractIndex(iref);
+  if (UNLIKELY(idx >= top_index)) {
+    *error_msg = android::base::StringPrintf("deleted reference at index %u in a table of size %u",
+                                             idx,
+                                             top_index);
     return false;
   }
-  if (UNLIKELY(entry_index >= segment_state_.top_index)) {
-    *error_msg = android::base::StringPrintf("popped reference at index %u in a table of size %u",
-                                             entry_index,
-                                             segment_state_.top_index);
+  if (UNLIKELY(table_[idx].GetReference()->IsNull())) {
+    *error_msg = android::base::StringPrintf("deleted reference at index %u", idx);
     return false;
   }
-  LrtEntry* entry = ToLrtEntry(iref);
-  LrtEntry* serial_number_entry = GetCheckJniSerialNumberEntry(entry);
-  if (serial_number_entry->IsSerialNumber()) {
-    // This reference was created with CheckJNI enabled.
-    uint32_t expected_serial_number = serial_number_entry->GetSerialNumber();
-    uint32_t serial_number = entry - serial_number_entry;
-    DCHECK_LT(serial_number, kCheckJniEntriesPerReference);
-    if (serial_number != expected_serial_number || serial_number == 0u) {
-      *error_msg = android::base::StringPrintf(
-          "reference at index %u with bad serial number %u v. %u (valid 1 - %u)",
-          entry_index,
-          serial_number,
-          expected_serial_number,
-          dchecked_integral_cast<uint32_t>(kCheckJniEntriesPerReference - 1u));
-      return false;
-    }
-  }
-  if (UNLIKELY(entry->IsFree())) {
-    *error_msg = android::base::StringPrintf("deleted reference at index %u", entry_index);
-    return false;
-  }
-  if (UNLIKELY(entry->IsNull())) {
-    // This should never really happen and may indicate memory coruption.
-    *error_msg = android::base::StringPrintf("null reference at index %u", entry_index);
+  uint32_t iref_serial = DecodeSerial(reinterpret_cast<uintptr_t>(iref));
+  uint32_t entry_serial = table_[idx].GetSerial();
+  if (UNLIKELY(iref_serial != entry_serial)) {
+    *error_msg = android::base::StringPrintf("stale reference with serial number %u v. current %u",
+                                             iref_serial,
+                                             entry_serial);
     return false;
   }
   return true;
 }
 
-inline void LocalReferenceTable::DCheckValidReference(IndirectRef iref) const {
-  // If CheckJNI is performing the checks, we should not reach this point with an invalid
-  // reference with the exception of gtests that intercept the CheckJNI abort and proceed
-  // to decode the reference anyway and we do not want to abort again in this case.
-  if (kIsDebugBuild && !IsCheckJniEnabled()) {
-    std::string error_msg;
-    CHECK(IsValidReference(iref, &error_msg)) << error_msg;
+// Make sure that the entry at "idx" is correctly paired with "iref".
+inline bool LocalReferenceTable::CheckEntry(const char* what,
+                                               IndirectRef iref,
+                                               uint32_t idx) const {
+  IndirectRef checkRef = ToIndirectRef(idx);
+  if (UNLIKELY(checkRef != iref)) {
+    std::string msg = android::base::StringPrintf(
+        "JNI ERROR (app bug): attempt to %s stale %s %p (should be %p)",
+        what,
+        GetIndirectRefKindString(kLocal),
+        iref,
+        checkRef);
+    AbortIfNoCheckJNI(msg);
+    return false;
   }
+  return true;
 }
 
+template<ReadBarrierOption kReadBarrierOption>
 inline ObjPtr<mirror::Object> LocalReferenceTable::Get(IndirectRef iref) const {
-  DCheckValidReference(iref);
-  return ToLrtEntry(iref)->GetReference();
+  DCHECK_EQ(GetIndirectRefKind(iref), kLocal);
+  uint32_t idx = ExtractIndex(iref);
+  DCHECK_LT(idx, segment_state_.top_index);
+  DCHECK_EQ(DecodeSerial(reinterpret_cast<uintptr_t>(iref)), table_[idx].GetSerial());
+  DCHECK(!table_[idx].GetReference()->IsNull());
+  ObjPtr<mirror::Object> obj = table_[idx].GetReference()->Read<kReadBarrierOption>();
+  VerifyObject(obj);
+  return obj;
 }
 
 inline void LocalReferenceTable::Update(IndirectRef iref, ObjPtr<mirror::Object> obj) {
-  DCheckValidReference(iref);
-  ToLrtEntry(iref)->SetReference(obj);
+  DCHECK_EQ(GetIndirectRefKind(iref), kLocal);
+  uint32_t idx = ExtractIndex(iref);
+  DCHECK_LT(idx, segment_state_.top_index);
+  DCHECK_EQ(DecodeSerial(reinterpret_cast<uintptr_t>(iref)), table_[idx].GetSerial());
+  DCHECK(!table_[idx].GetReference()->IsNull());
+  table_[idx].SetReference(obj);
+}
+
+inline void LrtEntry::Add(ObjPtr<mirror::Object> obj) {
+  ++serial_;
+  if (serial_ == kLRTMaxSerial) {
+    serial_ = 0;
+  }
+  reference_ = GcRoot<mirror::Object>(obj);
+}
+
+inline void LrtEntry::SetReference(ObjPtr<mirror::Object> obj) {
+  DCHECK_LT(serial_, kLRTMaxSerial);
+  reference_ = GcRoot<mirror::Object>(obj);
 }
 
 }  // namespace jni
diff --git a/runtime/jni/local_reference_table.cc b/runtime/jni/local_reference_table.cc
index 8293d75..6cbbde7 100644
--- a/runtime/jni/local_reference_table.cc
+++ b/runtime/jni/local_reference_table.cc
@@ -16,8 +16,6 @@
 
 #include "local_reference_table-inl.h"
 
-#include "base/bit_utils.h"
-#include "base/casts.h"
 #include "base/globals.h"
 #include "base/mutator_locked_dumpable.h"
 #include "base/systrace.h"
@@ -28,7 +26,7 @@
 #include "mirror/object-inl.h"
 #include "nth_caller_visitor.h"
 #include "reference_table.h"
-#include "runtime-inl.h"
+#include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread.h"
 
@@ -40,118 +38,72 @@
 static constexpr bool kDumpStackOnNonLocalReference = false;
 static constexpr bool kDebugLRT = false;
 
+// Maximum table size we allow.
+static constexpr size_t kMaxTableSizeInBytes = 128 * MB;
+
+void LocalReferenceTable::AbortIfNoCheckJNI(const std::string& msg) {
+  // If -Xcheck:jni is on, it'll give a more detailed error before aborting.
+  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
+  if (!vm->IsCheckJniEnabled()) {
+    // Otherwise, we want to abort rather than hand back a bad reference.
+    LOG(FATAL) << msg;
+  } else {
+    LOG(ERROR) << msg;
+  }
+}
+
 // Mmap an "indirect ref table region. Table_bytes is a multiple of a page size.
 static inline MemMap NewLRTMap(size_t table_bytes, std::string* error_msg) {
-  return MemMap::MapAnonymous("local ref table",
-                              table_bytes,
-                              PROT_READ | PROT_WRITE,
-                              /*low_4gb=*/ false,
-                              error_msg);
+  MemMap result = MemMap::MapAnonymous("local ref table",
+                                       table_bytes,
+                                       PROT_READ | PROT_WRITE,
+                                       /*low_4gb=*/ false,
+                                       error_msg);
+  if (!result.IsValid() && error_msg->empty()) {
+      *error_msg = "Unable to map memory for indirect ref table";
+  }
+  return result;
 }
 
 SmallLrtAllocator::SmallLrtAllocator()
-    : free_lists_(kNumSlots, nullptr),
-      shared_lrt_maps_(),
-      lock_("Small LRT allocator lock", LockLevel::kGenericBottomLock) {
+    : small_lrt_freelist_(nullptr), lock_("Small LRT table lock", LockLevel::kGenericBottomLock) {
 }
 
-inline size_t SmallLrtAllocator::GetIndex(size_t size) {
-  DCHECK_GE(size, kSmallLrtEntries);
-  DCHECK_LT(size, kPageSize / sizeof(LrtEntry));
-  DCHECK(IsPowerOfTwo(size));
-  size_t index = WhichPowerOf2(size / kSmallLrtEntries);
-  DCHECK_LT(index, kNumSlots);
-  return index;
-}
-
-LrtEntry* SmallLrtAllocator::Allocate(size_t size, std::string* error_msg) {
-  size_t index = GetIndex(size);
+// Allocate a LRT table for kSmallLrtEntries.
+LrtEntry* SmallLrtAllocator::Allocate(std::string* error_msg) {
   MutexLock lock(Thread::Current(), lock_);
-  size_t fill_from = index;
-  while (fill_from != kNumSlots && free_lists_[fill_from] == nullptr) {
-    ++fill_from;
-  }
-  void* result = nullptr;
-  if (fill_from != kNumSlots) {
-    // We found a slot with enough memory.
-    result = free_lists_[fill_from];
-    free_lists_[fill_from] = *reinterpret_cast<void**>(result);
-  } else {
-    // We need to allocate a new page and split it into smaller pieces.
+  if (small_lrt_freelist_ == nullptr) {
+    // Refill.
     MemMap map = NewLRTMap(kPageSize, error_msg);
-    if (!map.IsValid()) {
-      return nullptr;
+    if (map.IsValid()) {
+      small_lrt_freelist_ = reinterpret_cast<LrtEntry*>(map.Begin());
+      for (uint8_t* p = map.Begin(); p + kInitialLrtBytes < map.End(); p += kInitialLrtBytes) {
+        *reinterpret_cast<LrtEntry**>(p) = reinterpret_cast<LrtEntry*>(p + kInitialLrtBytes);
+      }
+      shared_lrt_maps_.emplace_back(std::move(map));
     }
-    result = map.Begin();
-    shared_lrt_maps_.emplace_back(std::move(map));
   }
-  while (fill_from != index) {
-    --fill_from;
-    // Store the second half of the current buffer in appropriate free list slot.
-    void* mid = reinterpret_cast<uint8_t*>(result) + (kInitialLrtBytes << fill_from);
-    DCHECK(free_lists_[fill_from] == nullptr);
-    *reinterpret_cast<void**>(mid) = nullptr;
-    free_lists_[fill_from] = mid;
+  if (small_lrt_freelist_ == nullptr) {
+    return nullptr;
   }
-  // Clear the memory we return to the caller.
-  std::memset(result, 0, kInitialLrtBytes << index);
-  return reinterpret_cast<LrtEntry*>(result);
+  LrtEntry* result = small_lrt_freelist_;
+  small_lrt_freelist_ = *reinterpret_cast<LrtEntry**>(small_lrt_freelist_);
+  // Clear pointer in first entry.
+  new(result) LrtEntry();
+  return result;
 }
 
-void SmallLrtAllocator::Deallocate(LrtEntry* unneeded, size_t size) {
-  size_t index = GetIndex(size);
+void SmallLrtAllocator::Deallocate(LrtEntry* unneeded) {
   MutexLock lock(Thread::Current(), lock_);
-  while (index < kNumSlots) {
-    // Check if we can merge this free block with another block with the same size.
-    void** other = reinterpret_cast<void**>(
-        reinterpret_cast<uintptr_t>(unneeded) ^ (kInitialLrtBytes << index));
-    void** before = &free_lists_[index];
-    if (index + 1u == kNumSlots && *before == other && *other == nullptr) {
-      // Do not unmap the page if we do not have other free blocks with index `kNumSlots - 1`.
-      // (Keep at least one free block to avoid a situation where creating and destroying a single
-      // thread with no local references would map and unmap a page in the `SmallLrtAllocator`.)
-      break;
-    }
-    while (*before != nullptr && *before != other) {
-      before = reinterpret_cast<void**>(*before);
-    }
-    if (*before == nullptr) {
-      break;
-    }
-    // Remove `other` from the free list and merge it with the `unneeded` block.
-    DCHECK(*before == other);
-    *before = *reinterpret_cast<void**>(other);
-    ++index;
-    unneeded = reinterpret_cast<LrtEntry*>(
-        reinterpret_cast<uintptr_t>(unneeded) & reinterpret_cast<uintptr_t>(other));
-  }
-  if (index == kNumSlots) {
-    // Free the entire page.
-    DCHECK(free_lists_[kNumSlots - 1u] != nullptr);
-    auto match = [=](MemMap& map) { return unneeded == reinterpret_cast<LrtEntry*>(map.Begin()); };
-    auto it = std::find_if(shared_lrt_maps_.begin(), shared_lrt_maps_.end(), match);
-    DCHECK(it != shared_lrt_maps_.end());
-    shared_lrt_maps_.erase(it);
-    DCHECK(!shared_lrt_maps_.empty());
-    return;
-  }
-  *reinterpret_cast<void**>(unneeded) = free_lists_[index];
-  free_lists_[index] = unneeded;
+  *reinterpret_cast<LrtEntry**>(unneeded) = small_lrt_freelist_;
+  small_lrt_freelist_ = unneeded;
 }
 
-LocalReferenceTable::LocalReferenceTable(bool check_jni)
+LocalReferenceTable::LocalReferenceTable()
     : segment_state_(kLRTFirstSegment),
+      table_(nullptr),
       max_entries_(0u),
-      free_entries_list_(
-          FirstFreeField::Update(kFreeListEnd, check_jni ? 1u << kFlagCheckJni : 0u)),
-      small_table_(nullptr),
-      tables_(),
-      table_mem_maps_() {
-}
-
-void LocalReferenceTable::SetCheckJniEnabled(bool enabled) {
-  free_entries_list_ =
-      (free_entries_list_ & ~(1u << kFlagCheckJni)) | (enabled ? 1u << kFlagCheckJni : 0u);
+      current_num_holes_(0) {
 }
 
 bool LocalReferenceTable::Initialize(size_t max_count, std::string* error_msg) {
@@ -159,227 +111,208 @@
 
   // Overflow and maximum check.
   CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(LrtEntry));
-  if (IsCheckJniEnabled()) {
-    CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(LrtEntry) / kCheckJniEntriesPerReference);
-    max_count *= kCheckJniEntriesPerReference;
-  }
 
-  SmallLrtAllocator* small_lrt_allocator = Runtime::Current()->GetSmallLrtAllocator();
-  LrtEntry* first_table = small_lrt_allocator->Allocate(kSmallLrtEntries, error_msg);
-  if (first_table == nullptr) {
-    DCHECK(!error_msg->empty());
-    return false;
+  if (max_count <= kSmallLrtEntries) {
+    table_ = Runtime::Current()->GetSmallLrtAllocator()->Allocate(error_msg);
+    if (table_ != nullptr) {
+      max_entries_ = kSmallLrtEntries;
+      // table_mem_map_ remains invalid.
+    }
   }
-  DCHECK_ALIGNED(first_table, kCheckJniEntriesPerReference * sizeof(LrtEntry));
-  small_table_ = first_table;
-  max_entries_ = kSmallLrtEntries;
-  return (max_count <= kSmallLrtEntries) || Resize(max_count, error_msg);
+  if (table_ == nullptr) {
+    const size_t table_bytes = RoundUp(max_count * sizeof(LrtEntry), kPageSize);
+    table_mem_map_ = NewLRTMap(table_bytes, error_msg);
+    if (!table_mem_map_.IsValid()) {
+      DCHECK(!error_msg->empty());
+      return false;
+    }
+
+    table_ = reinterpret_cast<LrtEntry*>(table_mem_map_.Begin());
+    // Take into account the actual length.
+    max_entries_ = table_bytes / sizeof(LrtEntry);
+  }
+  segment_state_ = kLRTFirstSegment;
+  last_known_previous_state_ = kLRTFirstSegment;
+  return true;
 }
 
 LocalReferenceTable::~LocalReferenceTable() {
-  SmallLrtAllocator* small_lrt_allocator =
-      max_entries_ != 0u ? Runtime::Current()->GetSmallLrtAllocator() : nullptr;
-  if (small_table_ != nullptr) {
-    small_lrt_allocator->Deallocate(small_table_, kSmallLrtEntries);
-    DCHECK(tables_.empty());
-  } else {
-    size_t num_small_tables = std::min(tables_.size(), MaxSmallTables());
-    for (size_t i = 0; i != num_small_tables; ++i) {
-      small_lrt_allocator->Deallocate(tables_[i], GetTableSize(i));
+  if (table_ != nullptr && !table_mem_map_.IsValid()) {
+    Runtime::Current()->GetSmallLrtAllocator()->Deallocate(table_);
+  }
+}
+
+void LocalReferenceTable::ConstexprChecks() {
+  // Use this for some assertions. They can't be put into the header as C++ wants the class
+  // to be complete.
+
+  // Check kind.
+  static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error");
+  static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error");
+  static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error");
+  static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal,
+                "Kind encoding error");
+  static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal,
+                "Kind encoding error");
+  static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal,
+                "Kind encoding error");
+
+  // Check serial.
+  static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error");
+  static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error");
+  static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error");
+  static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error");
+
+  // Table index.
+  static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error");
+  static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error");
+  static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error");
+  static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
+}
+
+bool LocalReferenceTable::IsValid() const {
+  return table_ != nullptr;
+}
+
+// Holes:
+//
+// To keep the LRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
+// operation sequences. For simplicity and lower memory overhead, we do not use a free list or
+// similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
+// are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
+// scans when there are no holes, the number of known holes should be tracked.
+//
+// A previous implementation stored the top index and the number of holes as the segment state.
+// This constraints the maximum number of references to 16-bit. We want to relax this, as it
+// is easy to require more references (e.g., to list all classes in large applications). Thus,
+// the implicitly stack-stored state, the LRTSegmentState, is only the top index.
+//
+// Thus, hole count is a local property of the current segment, and needs to be recovered when
+// (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
+// cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
+// hole count is correct.
+//
+// To be able to detect segment changes, we require an additional local field that can describe
+// the known segment. This is last_known_previous_state_. The requirement will become clear with
+// the following (some non-trivial) cases that have to be supported:
+//
+// 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
+// 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+// 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+//    reference
+// 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
+// 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+//    reference
+//
+// Storing the last known *previous* state (bottom index) allows conservatively detecting all the
+// segment changes above. The condition is simply that the last known state is greater than or
+// equal to the current previous state, and smaller than the current state (top index). The
+// condition is conservative as it adds O(1) overhead to operations on an empty segment.
+
+static size_t CountNullEntries(const LrtEntry* table, size_t from, size_t to) {
+  size_t count = 0;
+  for (size_t index = from; index != to; ++index) {
+    if (table[index].GetReference()->IsNull()) {
+      count++;
     }
   }
+  return count;
+}
+
+void LocalReferenceTable::RecoverHoles(LRTSegmentState prev_state) {
+  if (last_known_previous_state_.top_index >= segment_state_.top_index ||
+      last_known_previous_state_.top_index < prev_state.top_index) {
+    const size_t top_index = segment_state_.top_index;
+    size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
+
+    if (kDebugLRT) {
+      LOG(INFO) << "+++ Recovered holes: "
+                << " Current prev=" << prev_state.top_index
+                << " Current top_index=" << top_index
+                << " Old num_holes=" << current_num_holes_
+                << " New num_holes=" << count;
+    }
+
+    current_num_holes_ = count;
+    last_known_previous_state_ = prev_state;
+  } else if (kDebugLRT) {
+    LOG(INFO) << "No need to recover holes";
+  }
+}
+
+ALWAYS_INLINE
+static inline void CheckHoleCount(LrtEntry* table,
+                                  size_t exp_num_holes,
+                                  LRTSegmentState prev_state,
+                                  LRTSegmentState cur_state) {
+  if (kIsDebugBuild) {
+    size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
+    CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
+                                   << " topIndex=" << cur_state.top_index;
+  }
 }
 
 bool LocalReferenceTable::Resize(size_t new_size, std::string* error_msg) {
-  DCHECK_GE(max_entries_, kSmallLrtEntries);
-  DCHECK(IsPowerOfTwo(max_entries_));
-  DCHECK_GT(new_size, max_entries_);
-  DCHECK_LE(new_size, kMaxTableSizeInBytes / sizeof(LrtEntry));
-  size_t required_size = RoundUpToPowerOfTwo(new_size);
-  size_t num_required_tables = NumTablesForSize(required_size);
-  DCHECK_GE(num_required_tables, 2u);
-  // Delay moving the `small_table_` to `tables_` until after the next table allocation succeeds.
-  size_t num_tables = (small_table_ != nullptr) ? 1u : tables_.size();
-  DCHECK_EQ(num_tables, NumTablesForSize(max_entries_));
-  for (; num_tables != num_required_tables; ++num_tables) {
-    size_t new_table_size = GetTableSize(num_tables);
-    if (num_tables < MaxSmallTables()) {
-      SmallLrtAllocator* small_lrt_allocator = Runtime::Current()->GetSmallLrtAllocator();
-      LrtEntry* new_table = small_lrt_allocator->Allocate(new_table_size, error_msg);
-      if (new_table == nullptr) {
-        DCHECK(!error_msg->empty());
-        return false;
-      }
-      DCHECK_ALIGNED(new_table, kCheckJniEntriesPerReference * sizeof(LrtEntry));
-      tables_.push_back(new_table);
-    } else {
-      MemMap new_map = NewLRTMap(new_table_size * sizeof(LrtEntry), error_msg);
-      if (!new_map.IsValid()) {
-        DCHECK(!error_msg->empty());
-        return false;
-      }
-      DCHECK_ALIGNED(new_map.Begin(), kCheckJniEntriesPerReference * sizeof(LrtEntry));
-      tables_.push_back(reinterpret_cast<LrtEntry*>(new_map.Begin()));
-      table_mem_maps_.push_back(std::move(new_map));
-    }
-    DCHECK_EQ(num_tables == 1u, small_table_ != nullptr);
-    if (num_tables == 1u) {
-      tables_.insert(tables_.begin(), small_table_);
-      small_table_ = nullptr;
-    }
-    // Record the new available capacity after each successful allocation.
-    DCHECK_EQ(max_entries_, new_table_size);
-    max_entries_ = 2u * new_table_size;
+  CHECK_GT(new_size, max_entries_);
+
+  constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(LrtEntry);
+  if (new_size > kMaxEntries) {
+    *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size);
+    return false;
   }
-  DCHECK_EQ(num_required_tables, tables_.size());
+  // Note: the above check also ensures that there is no overflow below.
+
+  const size_t table_bytes = RoundUp(new_size * sizeof(LrtEntry), kPageSize);
+
+  MemMap new_map = NewLRTMap(table_bytes, error_msg);
+  if (!new_map.IsValid()) {
+    return false;
+  }
+
+  memcpy(new_map.Begin(), table_, max_entries_ * sizeof(LrtEntry));
+  if (!table_mem_map_.IsValid()) {
+    // Didn't have its own map; deallocate old table.
+    Runtime::Current()->GetSmallLrtAllocator()->Deallocate(table_);
+  }
+  table_mem_map_ = std::move(new_map);
+  table_ = reinterpret_cast<LrtEntry*>(table_mem_map_.Begin());
+  const size_t real_new_size = table_bytes / sizeof(LrtEntry);
+  DCHECK_GE(real_new_size, new_size);
+  max_entries_ = real_new_size;
+
   return true;
 }
 
-template <typename EntryGetter>
-inline void LocalReferenceTable::PrunePoppedFreeEntries(EntryGetter&& get_entry) {
-  const uint32_t top_index = segment_state_.top_index;
-  uint32_t free_entries_list = free_entries_list_;
-  uint32_t free_entry_index = FirstFreeField::Decode(free_entries_list);
-  DCHECK_NE(free_entry_index, kFreeListEnd);
-  DCHECK_GE(free_entry_index, top_index);
-  do {
-    free_entry_index = get_entry(free_entry_index)->GetNextFree();
-  } while (free_entry_index != kFreeListEnd && free_entry_index >= top_index);
-  free_entries_list_ = FirstFreeField::Update(free_entry_index, free_entries_list);
-}
-
-inline uint32_t LocalReferenceTable::IncrementSerialNumber(LrtEntry* serial_number_entry) {
-  DCHECK_EQ(serial_number_entry, GetCheckJniSerialNumberEntry(serial_number_entry));
-  // The old serial number can be 0 if it was not used before. It can also be bits from the
-  // representation of an object reference, or a link to the next free entry written in this
-  // slot before enabling the CheckJNI. (Some gtests repeatedly enable and disable CheckJNI.)
-  uint32_t old_serial_number =
-      serial_number_entry->GetSerialNumberUnchecked() % kCheckJniEntriesPerReference;
-  uint32_t new_serial_number =
-      (old_serial_number + 1u) != kCheckJniEntriesPerReference ? old_serial_number + 1u : 1u;
-  DCHECK(IsValidSerialNumber(new_serial_number));
-  serial_number_entry->SetSerialNumber(new_serial_number);
-  return new_serial_number;
-}
-
 IndirectRef LocalReferenceTable::Add(LRTSegmentState previous_state,
                                      ObjPtr<mirror::Object> obj,
                                      std::string* error_msg) {
   if (kDebugLRT) {
     LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
-              << " top_index=" << segment_state_.top_index;
+              << " top_index=" << segment_state_.top_index
+              << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+              << " holes=" << current_num_holes_;
   }
 
-  DCHECK(obj != nullptr);
+  size_t top_index = segment_state_.top_index;
+
+  CHECK(obj != nullptr);
   VerifyObject(obj);
+  DCHECK(table_ != nullptr);
 
-  DCHECK_LE(previous_state.top_index, segment_state_.top_index);
-  DCHECK(max_entries_ == kSmallLrtEntries ? small_table_ != nullptr : !tables_.empty());
-
-  auto store_obj = [obj, this](LrtEntry* free_entry, const char* tag)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    free_entry->SetReference(obj);
-    IndirectRef result = ToIndirectRef(free_entry);
-    if (kDebugLRT) {
-      LOG(INFO) << "+++ " << tag << ": added at index " << GetReferenceEntryIndex(result)
-                << ", top=" << segment_state_.top_index;
-    }
-    return result;
-  };
-
-  // Fast-path for small table with CheckJNI disabled.
-  uint32_t top_index = segment_state_.top_index;
-  LrtEntry* const small_table = small_table_;
-  if (LIKELY(small_table != nullptr)) {
-    DCHECK_EQ(max_entries_, kSmallLrtEntries);
-    DCHECK_LE(segment_state_.top_index, kSmallLrtEntries);
-    auto get_entry = [small_table](uint32_t index) ALWAYS_INLINE {
-      DCHECK_LT(index, kSmallLrtEntries);
-      return &small_table[index];
-    };
-    if (LIKELY(free_entries_list_ == kEmptyFreeListAndCheckJniDisabled)) {
-      if (LIKELY(top_index != kSmallLrtEntries)) {
-        LrtEntry* free_entry = get_entry(top_index);
-        segment_state_.top_index = top_index + 1u;
-        return store_obj(free_entry, "small_table/empty-free-list");
-      }
-    } else if (LIKELY(!IsCheckJniEnabled())) {
-      uint32_t first_free_index = GetFirstFreeIndex();
-      DCHECK_NE(first_free_index, kFreeListEnd);
-      if (UNLIKELY(first_free_index >= top_index)) {
-        PrunePoppedFreeEntries(get_entry);
-        first_free_index = GetFirstFreeIndex();
-      }
-      if (first_free_index != kFreeListEnd && first_free_index >= previous_state.top_index) {
-        DCHECK_LT(first_free_index, segment_state_.top_index);  // Popped entries pruned above.
-        LrtEntry* free_entry = get_entry(first_free_index);
-        // Use the `free_entry` only if it was created with CheckJNI disabled.
-        LrtEntry* serial_number_entry = GetCheckJniSerialNumberEntry(free_entry);
-        if (!serial_number_entry->IsSerialNumber()) {
-          free_entries_list_ = FirstFreeField::Update(free_entry->GetNextFree(), 0u);
-          return store_obj(free_entry, "small_table/reuse-empty-slot");
-        }
-      }
-      if (top_index != kSmallLrtEntries) {
-        LrtEntry* free_entry = get_entry(top_index);
-        segment_state_.top_index = top_index + 1u;
-        return store_obj(free_entry, "small_table/pruned-free-list");
-      }
-    }
-  }
-  DCHECK(IsCheckJniEnabled() || small_table == nullptr || top_index == kSmallLrtEntries);
-
-  // Process free list: prune, reuse free entry or pad for CheckJNI.
-  uint32_t first_free_index = GetFirstFreeIndex();
-  if (first_free_index != kFreeListEnd && first_free_index >= top_index) {
-    PrunePoppedFreeEntries([&](size_t index) { return GetEntry(index); });
-    first_free_index = GetFirstFreeIndex();
-  }
-  if (first_free_index != kFreeListEnd && first_free_index >= previous_state.top_index) {
-    // Reuse the free entry if it was created with the same CheckJNI setting.
-    DCHECK_LT(first_free_index, top_index);  // Popped entries have been pruned above.
-    LrtEntry* free_entry = GetEntry(first_free_index);
-    LrtEntry* serial_number_entry = GetCheckJniSerialNumberEntry(free_entry);
-    if (serial_number_entry->IsSerialNumber() == IsCheckJniEnabled()) {
-      free_entries_list_ = FirstFreeField::Update(free_entry->GetNextFree(), free_entries_list_);
-      if (UNLIKELY(IsCheckJniEnabled())) {
-        DCHECK_NE(free_entry, serial_number_entry);
-        uint32_t serial_number = IncrementSerialNumber(serial_number_entry);
-        free_entry = serial_number_entry + serial_number;
-        DCHECK_EQ(
-            free_entry,
-            GetEntry(RoundDown(first_free_index, kCheckJniEntriesPerReference) + serial_number));
-      }
-      return store_obj(free_entry, "reuse-empty-slot");
-    }
-  }
-  if (UNLIKELY(IsCheckJniEnabled()) && !IsAligned<kCheckJniEntriesPerReference>(top_index)) {
-    // Add non-CheckJNI holes up to the next serial number entry.
-    for (; !IsAligned<kCheckJniEntriesPerReference>(top_index); ++top_index) {
-      GetEntry(top_index)->SetNextFree(first_free_index);
-      first_free_index = top_index;
-    }
-    free_entries_list_ = FirstFreeField::Update(first_free_index, 1u << kFlagCheckJni);
-    segment_state_.top_index = top_index;
-  }
-
-  // Resize (double the space) if needed.
-  if (UNLIKELY(top_index == max_entries_)) {
-    static_assert(IsPowerOfTwo(kMaxTableSizeInBytes));
-    static_assert(IsPowerOfTwo(sizeof(LrtEntry)));
-    DCHECK(IsPowerOfTwo(max_entries_));
-    if (kMaxTableSizeInBytes == max_entries_ * sizeof(LrtEntry)) {
+  if (top_index == max_entries_) {
+    // Try to double space.
+    if (std::numeric_limits<size_t>::max() / 2 < max_entries_) {
       std::ostringstream oss;
       oss << "JNI ERROR (app bug): " << kLocal << " table overflow "
           << "(max=" << max_entries_ << ")" << std::endl
           << MutatorLockedDumpable<LocalReferenceTable>(*this)
-          << " Resizing failed: Cannot resize over the maximum permitted size.";
+          << " Resizing failed: exceeds size_t";
       *error_msg = oss.str();
       return nullptr;
     }
 
     std::string inner_error_msg;
-    if (!Resize(max_entries_ * 2u, &inner_error_msg)) {
+    if (!Resize(max_entries_ * 2, &inner_error_msg)) {
       std::ostringstream oss;
       oss << "JNI ERROR (app bug): " << kLocal << " table overflow "
           << "(max=" << max_entries_ << ")" << std::endl
@@ -390,288 +323,203 @@
     }
   }
 
-  // Use the next entry.
-  if (UNLIKELY(IsCheckJniEnabled())) {
-    DCHECK_ALIGNED(top_index, kCheckJniEntriesPerReference);
-    DCHECK_ALIGNED(previous_state.top_index, kCheckJniEntriesPerReference);
-    DCHECK_ALIGNED(max_entries_, kCheckJniEntriesPerReference);
-    LrtEntry* serial_number_entry = GetEntry(top_index);
-    uint32_t serial_number = IncrementSerialNumber(serial_number_entry);
-    LrtEntry* free_entry = serial_number_entry + serial_number;
-    DCHECK_EQ(free_entry, GetEntry(top_index + serial_number));
-    segment_state_.top_index = top_index + kCheckJniEntriesPerReference;
-    return store_obj(free_entry, "slow-path/check-jni");
+  RecoverHoles(previous_state);
+  CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
+  // We know there's enough room in the table.  Now we just need to find
+  // the right spot.  If there's a hole, find it and fill it; otherwise,
+  // add to the end of the list.
+  IndirectRef result;
+  size_t index;
+  if (current_num_holes_ > 0) {
+    DCHECK_GT(top_index, 1U);
+    // Find the first hole; likely to be near the end of the list.
+    LrtEntry* p_scan = &table_[top_index - 1];
+    DCHECK(!p_scan->GetReference()->IsNull());
+    --p_scan;
+    while (!p_scan->GetReference()->IsNull()) {
+      DCHECK_GE(p_scan, table_ + previous_state.top_index);
+      --p_scan;
+    }
+    index = p_scan - table_;
+    current_num_holes_--;
+  } else {
+    // Add to the end.
+    index = top_index++;
+    segment_state_.top_index = top_index;
   }
-  LrtEntry* free_entry = GetEntry(top_index);
-  segment_state_.top_index = top_index + 1u;
-  return store_obj(free_entry, "slow-path");
+  table_[index].Add(obj);
+  result = ToIndirectRef(index);
+  if (kDebugLRT) {
+    LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
+              << " holes=" << current_num_holes_;
+  }
+
+  DCHECK(result != nullptr);
+  return result;
 }
 
-// Removes an object.
-//
+void LocalReferenceTable::AssertEmpty() {
+  for (size_t i = 0; i < Capacity(); ++i) {
+    if (!table_[i].GetReference()->IsNull()) {
+      LOG(FATAL) << "Internal Error: non-empty local reference table\n"
+                 << MutatorLockedDumpable<LocalReferenceTable>(*this);
+      UNREACHABLE();
+    }
+  }
+}
+
+// Removes an object. We extract the table offset bits from "iref"
+// and zap the corresponding entry, leaving a hole if it's not at the top.
+// If the entry is not between the current top index and the bottom index
+// specified by the cookie, we don't remove anything. This is the behavior
+// required by JNI's DeleteLocalRef function.
 // This method is not called when a local frame is popped; this is only used
 // for explicit single removals.
-//
-// If the entry is not at the top, we just add it to the free entry list.
-// If the entry is at the top, we pop it from the top and check if there are
-// free entries under it to remove in order to reduce the size of the table.
-//
 // Returns "false" if nothing was removed.
 bool LocalReferenceTable::Remove(LRTSegmentState previous_state, IndirectRef iref) {
   if (kDebugLRT) {
     LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
-              << " top_index=" << segment_state_.top_index;
+              << " top_index=" << segment_state_.top_index
+              << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+              << " holes=" << current_num_holes_;
   }
 
-  IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(iref);
-  if (UNLIKELY(kind != kLocal)) {
-    Thread* self = Thread::Current();
-    if (kind == kJniTransition) {
-      if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
-        // Transition references count as local but they cannot be deleted.
-        // TODO: They could actually be cleared on the stack, except for the `jclass`
-        // reference for static methods that points to the method's declaring class.
-        JNIEnvExt* env = self->GetJniEnv();
-        DCHECK(env != nullptr);
-        if (env->IsCheckJniEnabled()) {
-          const char* msg = kDumpStackOnNonLocalReference
-              ? "Attempt to remove non-JNI local reference, dumping thread"
-              : "Attempt to remove non-JNI local reference";
-          LOG(WARNING) << msg;
-          if (kDumpStackOnNonLocalReference) {
-            self->Dump(LOG_STREAM(WARNING));
-          }
-        }
-        return true;
-      }
-    }
-    if (kDumpStackOnNonLocalReference && IsCheckJniEnabled()) {
-      // Log the error message and stack. Repeat the message as FATAL later.
-      LOG(ERROR) << "Attempt to delete " << kind
-                 << " reference as local JNI reference, dumping stack";
-      self->Dump(LOG_STREAM(ERROR));
-    }
-    LOG(IsCheckJniEnabled() ? ERROR : FATAL)
-        << "Attempt to delete " << kind << " reference as local JNI reference";
-    return false;
-  }
-
-  DCHECK_LE(previous_state.top_index, segment_state_.top_index);
-  DCHECK(max_entries_ == kSmallLrtEntries ? small_table_ != nullptr : !tables_.empty());
-  DCheckValidReference(iref);
-
-  LrtEntry* entry = ToLrtEntry(iref);
-  uint32_t entry_index = GetReferenceEntryIndex(iref);
-  uint32_t top_index = segment_state_.top_index;
+  const uint32_t top_index = segment_state_.top_index;
   const uint32_t bottom_index = previous_state.top_index;
 
-  if (entry_index < bottom_index) {
+  DCHECK(table_ != nullptr);
+
+  // TODO: We should eagerly check the ref kind against the `kLocal` kind instead of
+  // relying on this weak check and postponing the rest until `CheckEntry()` below.
+  // Passing the wrong kind shall currently result in misleading warnings.
+  if (GetIndirectRefKind(iref) == kJniTransition) {
+    auto* self = Thread::Current();
+    ScopedObjectAccess soa(self);
+    if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
+      auto* env = self->GetJniEnv();
+      DCHECK(env != nullptr);
+      if (env->IsCheckJniEnabled()) {
+        LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
+        if (kDumpStackOnNonLocalReference) {
+          self->Dump(LOG_STREAM(WARNING));
+        }
+      }
+      return true;
+    }
+  }
+
+  const uint32_t idx = ExtractIndex(iref);
+  if (idx < bottom_index) {
     // Wrong segment.
-    LOG(WARNING) << "Attempt to remove index outside index area (" << entry_index
+    LOG(WARNING) << "Attempt to remove index outside index area (" << idx
                  << " vs " << bottom_index << "-" << top_index << ")";
     return false;
   }
+  if (idx >= top_index) {
+    // Bad --- stale reference?
+    LOG(WARNING) << "Attempt to remove invalid index " << idx
+                 << " (bottom=" << bottom_index << " top=" << top_index << ")";
+    return false;
+  }
 
-  if (UNLIKELY(IsCheckJniEnabled())) {
-    // Ignore invalid references. CheckJNI should have aborted before passing this reference
-    // to `LocalReferenceTable::Remove()` but gtests intercept the abort and proceed anyway.
-    std::string error_msg;
-    if (!IsValidReference(iref, &error_msg)) {
-      LOG(WARNING) << "Attempt to remove invalid reference: " << error_msg;
+  RecoverHoles(previous_state);
+  CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
+  if (idx == top_index - 1) {
+    // Top-most entry.  Scan up and consume holes.
+
+    if (!CheckEntry("remove", iref, idx)) {
       return false;
     }
-  }
-  DCHECK_LT(entry_index, top_index);
 
-  // Check if we're removing the top entry (created with any CheckJNI setting).
-  bool is_top_entry = false;
-  uint32_t prune_end = entry_index;
-  if (GetCheckJniSerialNumberEntry(entry)->IsSerialNumber()) {
-    LrtEntry* serial_number_entry = GetCheckJniSerialNumberEntry(entry);
-    uint32_t serial_number = dchecked_integral_cast<uint32_t>(entry - serial_number_entry);
-    DCHECK_EQ(serial_number, serial_number_entry->GetSerialNumber());
-    prune_end = entry_index - serial_number;
-    is_top_entry = (prune_end == top_index - kCheckJniEntriesPerReference);
-  } else {
-    is_top_entry = (entry_index == top_index - 1u);
-  }
-  if (is_top_entry) {
-    // Top-most entry. Scan up and consume holes created with the current CheckJNI setting.
-    constexpr uint32_t kDeadLocalValue = 0xdead10c0;
-    entry->SetReference(reinterpret_cast32<mirror::Object*>(kDeadLocalValue));
-
-    // TODO: Maybe we should not prune free entries from the top of the segment
-    // because it has quadratic worst-case complexity. We could still prune while
-    // the first free list entry is at the top.
-    uint32_t prune_start = prune_end;
-    size_t prune_count;
-    auto find_prune_range = [&](size_t chunk_size, auto is_prev_entry_free) {
-      while (prune_start > bottom_index && is_prev_entry_free(prune_start)) {
-        prune_start -= chunk_size;
-      }
-      prune_count = (prune_end - prune_start) / chunk_size;
-    };
-
-    if (UNLIKELY(IsCheckJniEnabled())) {
-      auto is_prev_entry_free = [&](size_t index) {
-        DCHECK_ALIGNED(index, kCheckJniEntriesPerReference);
-        LrtEntry* serial_number_entry = GetEntry(index - kCheckJniEntriesPerReference);
-        DCHECK_ALIGNED(serial_number_entry, kCheckJniEntriesPerReference * sizeof(LrtEntry));
-        if (!serial_number_entry->IsSerialNumber()) {
-          return false;
+    *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
+    if (current_num_holes_ != 0) {
+      uint32_t collapse_top_index = top_index;
+      while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
+        if (kDebugLRT) {
+          ScopedObjectAccess soa(Thread::Current());
+          LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
+                    << " (previous_state=" << bottom_index << ") val="
+                    << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
         }
-        uint32_t serial_number = serial_number_entry->GetSerialNumber();
-        DCHECK(IsValidSerialNumber(serial_number));
-        LrtEntry* entry = serial_number_entry + serial_number;
-        DCHECK_EQ(entry, GetEntry(prune_start - kCheckJniEntriesPerReference + serial_number));
-        return entry->IsFree();
-      };
-      find_prune_range(kCheckJniEntriesPerReference, is_prev_entry_free);
+        if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
+          break;
+        }
+        if (kDebugLRT) {
+          LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1);
+        }
+        current_num_holes_--;
+      }
+      segment_state_.top_index = collapse_top_index;
+
+      CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
     } else {
-      auto is_prev_entry_free = [&](size_t index) {
-        LrtEntry* entry = GetEntry(index - 1u);
-        return entry->IsFree() && !GetCheckJniSerialNumberEntry(entry)->IsSerialNumber();
-      };
-      find_prune_range(1u, is_prev_entry_free);
-    }
-
-    if (prune_count != 0u) {
-      // Remove pruned entries from the free list.
-      size_t remaining = prune_count;
-      uint32_t free_index = GetFirstFreeIndex();
-      while (remaining != 0u && free_index >= prune_start) {
-        DCHECK_NE(free_index, kFreeListEnd);
-        LrtEntry* pruned_entry = GetEntry(free_index);
-        free_index = pruned_entry->GetNextFree();
-        pruned_entry->SetReference(reinterpret_cast32<mirror::Object*>(kDeadLocalValue));
-        --remaining;
+      segment_state_.top_index = top_index - 1;
+      if (kDebugLRT) {
+        LOG(INFO) << "+++ ate last entry " << top_index - 1;
       }
-      free_entries_list_ = FirstFreeField::Update(free_index, free_entries_list_);
-      while (remaining != 0u) {
-        DCHECK_NE(free_index, kFreeListEnd);
-        DCHECK_LT(free_index, prune_start);
-        DCHECK_GE(free_index, bottom_index);
-        LrtEntry* free_entry = GetEntry(free_index);
-        while (free_entry->GetNextFree() < prune_start) {
-          free_index = free_entry->GetNextFree();
-          DCHECK_GE(free_index, bottom_index);
-          free_entry = GetEntry(free_index);
-        }
-        LrtEntry* pruned_entry = GetEntry(free_entry->GetNextFree());
-        free_entry->SetNextFree(pruned_entry->GetNextFree());
-        pruned_entry->SetReference(reinterpret_cast32<mirror::Object*>(kDeadLocalValue));
-        --remaining;
-      }
-      DCHECK(free_index == kFreeListEnd || free_index < prune_start)
-          << "free_index=" << free_index << ", prune_start=" << prune_start;
-    }
-    segment_state_.top_index = prune_start;
-    if (kDebugLRT) {
-      LOG(INFO) << "+++ removed last entry, pruned " << prune_count
-                << ", new top= " << segment_state_.top_index;
     }
   } else {
-    // Not the top-most entry. This creates a hole.
-    entry->SetNextFree(GetFirstFreeIndex());
-    free_entries_list_ = FirstFreeField::Update(entry_index, free_entries_list_);
+    // Not the top-most entry.  This creates a hole.  We null out the entry to prevent somebody
+    // from deleting it twice and screwing up the hole count.
+    if (table_[idx].GetReference()->IsNull()) {
+      LOG(INFO) << "--- WEIRD: removing null entry " << idx;
+      return false;
+    }
+    if (!CheckEntry("remove", iref, idx)) {
+      return false;
+    }
+
+    *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
+    current_num_holes_++;
+    CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
     if (kDebugLRT) {
-      LOG(INFO) << "+++ removed entry and left hole at " << entry_index;
+      LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
     }
   }
 
   return true;
 }
 
-void LocalReferenceTable::AssertEmpty() {
-  CHECK_EQ(Capacity(), 0u) << "Internal Error: non-empty local reference table.";
-}
-
 void LocalReferenceTable::Trim() {
   ScopedTrace trace(__PRETTY_FUNCTION__);
-  const size_t num_mem_maps = table_mem_maps_.size();
-  if (num_mem_maps == 0u) {
-    // Only small tables; nothing to do here. (Do not unnecessarily prune popped free entries.)
+  if (!table_mem_map_.IsValid()) {
+    // Small table; nothing to do here.
     return;
   }
-  DCHECK_EQ(tables_.size(), num_mem_maps + MaxSmallTables());
-  const size_t top_index = segment_state_.top_index;
-  // Prune popped free entries before potentially losing their memory.
-  if (UNLIKELY(GetFirstFreeIndex() != kFreeListEnd) &&
-      UNLIKELY(GetFirstFreeIndex() >= segment_state_.top_index)) {
-    PrunePoppedFreeEntries([&](size_t index) { return GetEntry(index); });
-  }
-  // Small tables can hold as many entries as the next table.
-  constexpr size_t kSmallTablesCapacity = GetTableSize(MaxSmallTables());
-  size_t mem_map_index = 0u;
-  if (top_index > kSmallTablesCapacity) {
-    const size_t table_size = TruncToPowerOfTwo(top_index);
-    const size_t table_index = NumTablesForSize(table_size);
-    const size_t start_index = top_index - table_size;
-    LrtEntry* table = tables_[table_index];
-    uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table[start_index]), kPageSize);
-    uint8_t* release_end = reinterpret_cast<uint8_t*>(&table[table_size]);
-    DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start));
-    DCHECK_ALIGNED(release_end, kPageSize);
-    DCHECK_ALIGNED(release_end - release_start, kPageSize);
-    if (release_start != release_end) {
-      madvise(release_start, release_end - release_start, MADV_DONTNEED);
-    }
-    mem_map_index = table_index + 1u - MaxSmallTables();
-  }
-  for (MemMap& mem_map : ArrayRef<MemMap>(table_mem_maps_).SubArray(mem_map_index)) {
-    madvise(mem_map.Begin(), mem_map.Size(), MADV_DONTNEED);
-  }
-}
-
-template <typename Visitor>
-void LocalReferenceTable::VisitRootsInternal(Visitor&& visitor) const {
-  auto visit_table = [&](LrtEntry* table, size_t count) REQUIRES_SHARED(Locks::mutator_lock_) {
-    for (size_t i = 0; i != count; ) {
-      LrtEntry* entry;
-      if (i % kCheckJniEntriesPerReference == 0u && table[i].IsSerialNumber()) {
-        entry = &table[i + table[i].GetSerialNumber()];
-        i += kCheckJniEntriesPerReference;
-        DCHECK_LE(i, count);
-      } else {
-        entry = &table[i];
-        i += 1u;
-      }
-      DCHECK(!entry->IsSerialNumber());
-      if (!entry->IsFree()) {
-        GcRoot<mirror::Object>* root = entry->GetRootAddress();
-        DCHECK(!root->IsNull());
-        visitor(root);
-      }
-    }
-  };
-
-  if (small_table_ != nullptr) {
-    visit_table(small_table_, segment_state_.top_index);
-  } else {
-    uint32_t remaining = segment_state_.top_index;
-    size_t table_index = 0u;
-    while (remaining != 0u) {
-      size_t count = std::min<size_t>(remaining, GetTableSize(table_index));
-      visit_table(tables_[table_index], count);
-      ++table_index;
-      remaining -= count;
-    }
+  const size_t top_index = Capacity();
+  uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
+  uint8_t* release_end = static_cast<uint8_t*>(table_mem_map_.BaseEnd());
+  DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start));
+  DCHECK_ALIGNED(release_end, kPageSize);
+  DCHECK_ALIGNED(release_end - release_start, kPageSize);
+  if (release_start != release_end) {
+    madvise(release_start, release_end - release_start, MADV_DONTNEED);
   }
 }
 
 void LocalReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
   BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
-  VisitRootsInternal([&](GcRoot<mirror::Object>* root) REQUIRES_SHARED(Locks::mutator_lock_) {
-                       root_visitor.VisitRoot(*root);
-                     });
+  for (size_t i = 0, capacity = Capacity(); i != capacity; ++i) {
+    GcRoot<mirror::Object>* ref = table_[i].GetReference();
+    if (!ref->IsNull()) {
+      root_visitor.VisitRoot(*ref);
+      DCHECK(!ref->IsNull());
+    }
+  }
 }
 
 void LocalReferenceTable::Dump(std::ostream& os) const {
   os << kLocal << " table dump:\n";
   ReferenceTable::Table entries;
-  VisitRootsInternal([&](GcRoot<mirror::Object>* root) REQUIRES_SHARED(Locks::mutator_lock_) {
-                       entries.push_back(*root);
-                     });
+  for (size_t i = 0; i < Capacity(); ++i) {
+    ObjPtr<mirror::Object> obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
+    if (obj != nullptr) {
+      obj = table_[i].GetReference()->Read();
+      entries.push_back(GcRoot<mirror::Object>(obj));
+    }
+  }
   ReferenceTable::Dump(os, entries);
 }
 
@@ -686,33 +534,16 @@
 }
 
 bool LocalReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
-  // TODO: Pass `previous_state` so that we can check holes.
   DCHECK_GE(free_capacity, static_cast<size_t>(1));
-  size_t top_index = segment_state_.top_index;
-  DCHECK_LE(top_index, max_entries_);
-
-  if (IsCheckJniEnabled()) {
-    // High values lead to the maximum size check failing below.
-    if (free_capacity >= std::numeric_limits<size_t>::max() / kCheckJniEntriesPerReference) {
-      free_capacity = std::numeric_limits<size_t>::max();
-    } else {
-      free_capacity *= kCheckJniEntriesPerReference;
-    }
-  }
-
-  // TODO: Include holes from the current segment in the calculation.
-  if (free_capacity <= max_entries_ - top_index) {
-    return true;
-  }
-
-  if (free_capacity > kMaxTableSize - top_index) {
-    *error_msg = android::base::StringPrintf(
-        "Requested size exceeds maximum: %zu > %zu (%zu used)",
-        free_capacity,
-        kMaxTableSize - top_index,
-        top_index);
+  if (free_capacity > kMaxTableSizeInBytes) {
+    // Arithmetic might even overflow.
+    *error_msg = "Requested table size implausibly large";
     return false;
   }
+  size_t top_index = segment_state_.top_index;
+  if (top_index + free_capacity <= max_entries_) {
+    return true;
+  }
 
   // Try to increase the table size.
   if (!Resize(top_index + free_capacity, error_msg)) {
@@ -726,14 +557,7 @@
 }
 
 size_t LocalReferenceTable::FreeCapacity() const {
-  // TODO: Include holes in current segment.
-  if (IsCheckJniEnabled()) {
-    DCHECK_ALIGNED(max_entries_, kCheckJniEntriesPerReference);
-    // The `segment_state_.top_index` is not necessarily aligned; rounding down.
-    return (max_entries_ - segment_state_.top_index) / kCheckJniEntriesPerReference;
-  } else {
-    return max_entries_ - segment_state_.top_index;
-  }
+  return max_entries_ - segment_state_.top_index;
 }
 
 }  // namespace jni
diff --git a/runtime/jni/local_reference_table.h b/runtime/jni/local_reference_table.h
index 900e4c3..debaa8b 100644
--- a/runtime/jni/local_reference_table.h
+++ b/runtime/jni/local_reference_table.h
@@ -25,19 +25,16 @@
 
 #include <android-base/logging.h>
 
-#include "base/bit_field.h"
 #include "base/bit_utils.h"
-#include "base/casts.h"
-#include "base/dchecked_vector.h"
 #include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
 #include "base/mutex.h"
 #include "gc_root.h"
 #include "indirect_reference_table.h"
-#include "mirror/object_reference.h"
 #include "obj_ptr.h"
 #include "offsets.h"
+#include "read_barrier_option.h"
 
 namespace art {
 
@@ -49,12 +46,15 @@
 
 namespace jni {
 
-// Maintain a table of local JNI references.
+// Maintain a table of local references.  Used for local JNI references.
+// TODO: Rewrite the implementation, so that valid local references are effectively
+// `CompressedReference<Object>*`, so that it can be decoded very quickly.
 //
-// The table contains object references that are part of the GC root set. When an object is
-// added we return an `IndirectRef` that is not a valid pointer but can be used to find the
-// original value in O(1) time. Conversions to and from local JNI references are performed
-// on upcalls and downcalls as well as in JNI functions, so they need to be very fast.
+// The table contains object references, where the strong (local/global) references are part of the
+// GC root set (but not the weak global references). When an object is added we return an
+// IndirectRef that is not a valid pointer but can be used to find the original value in O(1) time.
+// Conversions to and from indirect references are performed on upcalls and downcalls, so they need
+// to be very fast.
 //
 // To be efficient for JNI local variable storage, we need to provide operations that allow us to
 // operate on segments of the table, where segments are pushed and popped as if on a stack. For
@@ -62,16 +62,17 @@
 // want to be able to strip off the current segment quickly when a method returns. Additions to the
 // table must be made in the current segment even if space is available in an earlier area.
 //
-// A new segment is created when we call into native code from managed code, or when we handle
+// A new segment is created when we call into native code from interpreted code, or when we handle
 // the JNI PushLocalFrame function.
 //
 // The GC must be able to scan the entire table quickly.
 //
 // In summary, these must be very fast:
 //  - adding or removing a segment
-//  - adding references (always adding to the current segment)
-//  - converting a local reference back to an Object
+//  - adding references to a new segment
+//  - converting an indirect reference back to an Object
 // These can be a little slower, but must still be pretty quick:
+//  - adding references to a "mature" segment
 //  - removing individual references
 //  - scanning the entire table straight through
 //
@@ -79,34 +80,58 @@
 // we fail due to lack of space. We do ensure that the current segment will pack tightly, which
 // should satisfy JNI requirements (e.g. EnsureLocalCapacity).
 
+// Indirect reference definition.  This must be interchangeable with JNI's jobject, and it's
+// convenient to let null be null, so we use void*.
+//
+// We need a (potentially) large table index and a 2-bit reference type (global, local, weak
+// global). We also reserve some bits to be used to detect stale indirect references: we put a
+// serial number in the extra bits, and keep a copy of the serial number in the table. This requires
+// more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch
+// additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj,
+// lookup iref1. A pattern based on object bits will miss this.
+
+// Table definition.
+//
+// For the global reference table, the expected common operations are adding a new entry and
+// removing a recently-added entry (usually the most-recently-added entry).  For JNI local
+// references, the common operations are adding a new entry and removing an entire table segment.
+//
+// If we delete entries from the middle of the list, we will be left with "holes".  We track the
+// number of holes so that, when adding new elements, we can quickly decide to do a trivial append
+// or go slot-hunting.
+//
+// When the top-most entry is removed, any holes immediately below it are also removed. Thus,
+// deletion of an entry may reduce "top_index" by more than one.
+//
 // To get the desired behavior for JNI locals, we need to know the bottom and top of the current
 // "segment". The top is managed internally, and the bottom is passed in as a function argument.
 // When we call a native method or push a local frame, the current top index gets pushed on, and
 // serves as the new bottom. When we pop a frame off, the value from the stack becomes the new top
 // index, and the value stored in the previous frame becomes the new bottom.
-// TODO: Move the bottom index from `JniEnvExt` to the `LocalReferenceTable`. Use this in the JNI
-// compiler to improve the emitted local frame push/pop code by using two-register loads/stores
-// where available (LDRD/STRD on arm, LDP/STP on arm64).
 //
-// If we delete entries from the middle of the list, we will be left with "holes" which we track
-// with a singly-linked list, so that they can be reused quickly. After a segment has been removed,
-// we need to prune removed free entries from the front of this singly-linked list before we can
-// reuse a free entry from the current segment. This is linear in the number of entries removed
-// and may appear as a slow reference addition but this slow down is attributable to the previous
-// removals with a constant time per removal.
+// Holes are being locally cached for the segment. Otherwise we'd have to pass bottom index and
+// number of holes, which restricts us to 16 bits for the top index. The value is cached within the
+// table. To avoid code in generated JNI transitions, which implicitly form segments, the code for
+// adding and removing references needs to detect the change of a segment. Helper fields are used
+// for this detection.
 //
-// Without CheckJNI, we aim for the fastest possible implementation, so there is no error checking
-// (in release build) and stale references can be erroneously used, especially after the same slot
-// has been reused for another reference which we cannot easily detect (even in debug build).
+// Common alternative implementation: make IndirectRef a pointer to the actual reference slot.
+// Instead of getting a table and doing a lookup, the lookup can be done instantly. Operations like
+// determining the type and deleting the reference are more expensive because the table must be
+// hunted for (i.e. you have to do a pointer comparison to see which table it's in), you can't move
+// the table when expanding it (so realloc() is out), and tricks like serial number checking to
+// detect stale references aren't possible (though we may be able to get similar benefits with other
+// approaches).
 //
-// With CheckJNI, we rotate the slots that we use based on a "serial number".
-// This increases the memory use but it allows for decent error detection.
+// TODO: consider a "lastDeleteIndex" for quick hole-filling when an add immediately follows a
+// delete; must invalidate after segment pop might be worth only using it for JNI globals.
 //
-// We allow switching between CheckJNI enabled and disabled but entries created with CheckJNI
-// disabled shall have weaker checking even after enabling CheckJNI and the switch can also
-// prevent reusing a hole that held a reference created with a different CheckJNI setting.
+// TODO: may want completely different add/remove algorithms for global and local refs to improve
+// performance.  A large circular buffer might reduce the amortized cost of adding global
+// references.
 
-// The state of the current segment contains the top index.
+// The state of the current segment. We only store the index. Splitting it for index and hole
+// count restricts the range too much.
 struct LRTSegmentState {
   uint32_t top_index;
 };
@@ -114,173 +139,107 @@
 // Use as initial value for "cookie", and when table has only one segment.
 static constexpr LRTSegmentState kLRTFirstSegment = { 0 };
 
-// Each entry in the `LocalReferenceTable` can contain a null (initially or after a `Trim()`)
-// or reference, or it can be marked as free and hold the index of the next free entry.
-// If CheckJNI is (or was) enabled, some entries can contain serial numbers instead and
-// only one other entry in a CheckJNI chunk starting with a serial number is active.
-//
-// Valid bit patterns:
-//                   33222222222211111111110000000000
-//                   10987654321098765432109876543210
-//   null:           00000000000000000000000000000000  // Only above the top index.
-//   reference:      <----- reference value ----->000  // See also `kObjectAlignment`.
-//   free:           <-------- next free --------->01
-//   serial number:  <------ serial number ------->10  // CheckJNI entry.
-// Note that serial number entries can appear only as the first entry of a 16-byte aligned
-// chunk of four entries and the serial number in the range [1, 3] specifies which of the
-// other three entries in the chunk is currently used.
+// We associate a few bits of serial number with each reference, for error checking.
+static constexpr unsigned int kLRTSerialBits = 3;
+static constexpr uint32_t kLRTMaxSerial = ((1 << kLRTSerialBits) - 1);
+
 class LrtEntry {
  public:
-  void SetReference(ObjPtr<mirror::Object> ref) REQUIRES_SHARED(Locks::mutator_lock_);
+  void Add(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ObjPtr<mirror::Object> GetReference() REQUIRES_SHARED(Locks::mutator_lock_);
-
-  bool IsNull() const {
-    return root_.IsNull();
+  GcRoot<mirror::Object>* GetReference() {
+    DCHECK_LE(serial_, kLRTMaxSerial);
+    return &reference_;
   }
 
-  void SetNextFree(uint32_t next_free) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  uint32_t GetNextFree() {
-    DCHECK(IsFree());
-    DCHECK(!IsSerialNumber());
-    return NextFreeField::Decode(GetRawValue());
+  const GcRoot<mirror::Object>* GetReference() const {
+    DCHECK_LE(serial_, kLRTMaxSerial);
+    return &reference_;
   }
 
-  bool IsFree() {
-    return (GetRawValue() & (1u << kFlagFree)) != 0u;
+  uint32_t GetSerial() const {
+    return serial_;
   }
 
-  void SetSerialNumber(uint32_t serial_number) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  uint32_t GetSerialNumber() {
-    DCHECK(IsSerialNumber());
-    DCHECK(!IsFree());
-    return GetSerialNumberUnchecked();
-  }
-
-  uint32_t GetSerialNumberUnchecked() {
-    return SerialNumberField::Decode(GetRawValue());
-  }
-
-  bool IsSerialNumber() {
-    return (GetRawValue() & (1u << kFlagSerialNumber)) != 0u;
-  }
-
-  GcRoot<mirror::Object>* GetRootAddress() {
-    return &root_;
-  }
-
-  static constexpr uint32_t FreeListEnd() {
-    return MaxInt<uint32_t>(kFieldNextFreeBits);
-  }
+  void SetReference(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  // Definitions of bit fields and flags.
-  static constexpr size_t kFlagFree = 0u;
-  static constexpr size_t kFlagSerialNumber = kFlagFree + 1u;
-  static constexpr size_t kFieldNextFree = kFlagSerialNumber + 1u;
-  static constexpr size_t kFieldNextFreeBits = BitSizeOf<uint32_t>() - kFieldNextFree;
-
-  using NextFreeField = BitField<uint32_t, kFieldNextFree, kFieldNextFreeBits>;
-  using SerialNumberField = NextFreeField;
-
-  static_assert(kObjectAlignment > (1u << kFlagFree));
-  static_assert(kObjectAlignment > (1u << kFlagSerialNumber));
-
-  void SetVRegValue(uint32_t value) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  uint32_t GetRawValue() {
-    return root_.AddressWithoutBarrier()->AsVRegValue();
-  }
-
-  // We record the contents as a `GcRoot<>` but it is an actual `GcRoot<>` only if it's below
-  // the current segment's top index, it's not a "serial number" or inactive entry in a CheckJNI
-  // chunk, and it's not marked as "free". Such entries are never null.
-  GcRoot<mirror::Object> root_;
+  uint32_t serial_;  // Incremented for each reuse; checked against reference.
+  GcRoot<mirror::Object> reference_;
 };
-static_assert(sizeof(LrtEntry) == sizeof(mirror::CompressedReference<mirror::Object>));
-// Assert that the low bits of an `LrtEntry*` are sufficient for encoding the reference kind.
-static_assert(enum_cast<uint32_t>(IndirectRefKind::kLastKind) < alignof(LrtEntry));
+static_assert(sizeof(LrtEntry) == 2 * sizeof(uint32_t), "Unexpected sizeof(LrtEntry)");
+static_assert(IsPowerOfTwo(sizeof(LrtEntry)), "Unexpected sizeof(LrtEntry)");
 
-
-// We initially allocate local reference tables with a small number of entries, packing
-// multiple tables into a single page. If we need to expand, we double the capacity,
-// first allocating another chunk with the same number of entries as the first chunk
-// and then allocating twice as big chunk on each subsequent expansion.
-static constexpr size_t kInitialLrtBytes = 512;  // Number of bytes in an initial local table.
-static constexpr size_t kSmallLrtEntries = kInitialLrtBytes / sizeof(LrtEntry);
-static_assert(IsPowerOfTwo(kInitialLrtBytes));
+// We initially allocate local reference tables with a very small number of entries, packing
+// multiple tables into a single page. If we need to expand one, we allocate them in units of
+// pages.
+// TODO: We should allocate all LRT tables as nonmovable Java objects, That in turn works better
+// if we break up each table into 2 parallel arrays, one for the Java reference, and one for the
+// serial number. The current scheme page-aligns regions containing LRT tables, and so allows them
+// to be identified and page-protected in the future.
+constexpr size_t kInitialLrtBytes = 512;  // Number of bytes in an initial local table.
+constexpr size_t kSmallLrtEntries = kInitialLrtBytes / sizeof(LrtEntry);
 static_assert(kPageSize % kInitialLrtBytes == 0);
 static_assert(kInitialLrtBytes % sizeof(LrtEntry) == 0);
+static_assert(kInitialLrtBytes % sizeof(void *) == 0);
 
 // A minimal stopgap allocator for initial small local LRT tables.
 class SmallLrtAllocator {
  public:
   SmallLrtAllocator();
 
-  // Allocate a small block of `LrtEntries` for the `LocalReferenceTable` table. The `size`
-  // must be a power of 2, at least `kSmallLrtEntries`, and requiring less than a page of memory.
-  LrtEntry* Allocate(size_t size, std::string* error_msg) REQUIRES(!lock_);
+  // Allocate a LRT table for kSmallLrtEntries.
+  LrtEntry* Allocate(std::string* error_msg) REQUIRES(!lock_);
 
-  void Deallocate(LrtEntry* unneeded, size_t size) REQUIRES(!lock_);
+  void Deallocate(LrtEntry* unneeded) REQUIRES(!lock_);
 
  private:
-  static constexpr size_t kNumSlots = WhichPowerOf2(kPageSize / kInitialLrtBytes);
-
-  static size_t GetIndex(size_t size);
-
-  // Free lists of small chunks linked through the first word.
-  dchecked_vector<void*> free_lists_;
+  // A free list of kInitialLrtBytes chunks linked through the first word.
+  LrtEntry* small_lrt_freelist_;
 
   // Repository of MemMaps used for small LRT tables.
-  dchecked_vector<MemMap> shared_lrt_maps_;
+  std::vector<MemMap> shared_lrt_maps_;
 
   Mutex lock_;  // Level kGenericBottomLock; acquired before mem_map_lock_, which is a C++ mutex.
 };
 
 class LocalReferenceTable {
  public:
-  explicit LocalReferenceTable(bool check_jni);
-  ~LocalReferenceTable();
+  // Constructs an uninitialized indirect reference table. Use `Initialize()` to initialize it.
+  LocalReferenceTable();
 
-  // Set the CheckJNI enabled status.
-  // Called only from the Zygote post-fork callback while the process is single-threaded.
-  // Enabling CheckJNI reduces the number of entries that can be stored, thus invalidating
-  // guarantees provided by a previous call to `EnsureFreeCapacity()`.
-  void SetCheckJniEnabled(bool enabled);
-
-  // Returns whether the CheckJNI is enabled for this `LocalReferenceTable`.
-  bool IsCheckJniEnabled() const {
-    return (free_entries_list_ & (1u << kFlagCheckJni)) != 0u;
-  }
-
-  // Initialize the `LocalReferenceTable`.
+  // Initialize the indirect reference table.
   //
-  // Max_count is the requested minimum initial capacity (resizable). The actual initial
-  // capacity can be higher to utilize all allocated memory.
-  //
-  // Returns true on success.
-  // On failure, returns false and reports error in `*error_msg`.
+  // Max_count is the minimum initial capacity (resizable).
+  // A value of 1 indicates an implementation-convenient small size.
   bool Initialize(size_t max_count, std::string* error_msg);
 
-  // Add a new entry. The `obj` must be a valid non-null object reference. This function
-  // will return null if an error happened (with an appropriate error message set).
+  ~LocalReferenceTable();
+
+  /*
+   * Checks whether construction of the LocalReferenceTable succeeded.
+   *
+   * This object must only be used if IsValid() returns true. It is safe to
+   * call IsValid from multiple threads without locking or other explicit
+   * synchronization.
+   */
+  bool IsValid() const;
+
+  // Add a new entry. "obj" must be a valid non-null object reference. This function will
+  // return null if an error happened (with an appropriate error message set).
   IndirectRef Add(LRTSegmentState previous_state,
                   ObjPtr<mirror::Object> obj,
                   std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Given an `IndirectRef` in the table, return the `Object` it refers to.
+  // Given an IndirectRef in the table, return the Object it refers to.
   //
-  // This function may abort under error conditions in debug build.
-  // In release builds, error conditions are unchecked and the function can
-  // return old or invalid references from popped segments and deleted entries.
-  ObjPtr<mirror::Object> Get(IndirectRef iref) const
-      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE;
+  // This function may abort under error conditions.
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_)
+      ALWAYS_INLINE;
 
   // Updates an existing indirect reference to point to a new object.
-  // Used exclusively for updating `String` references after calling a `String` constructor.
   void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Remove an existing entry.
@@ -290,10 +249,9 @@
   // required by JNI's DeleteLocalRef function.
   //
   // Returns "false" if nothing was removed.
-  bool Remove(LRTSegmentState previous_state, IndirectRef iref)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+  bool Remove(LRTSegmentState previous_state, IndirectRef iref);
 
-  void AssertEmpty();
+  void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_);
 
   void Dump(std::ostream& os) const
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -303,17 +261,16 @@
     return kLocal;
   }
 
-  // Return the number of entries in the entire table. This includes holes,
-  // and so may be larger than the actual number of "live" entries.
-  // The value corresponds to the number of entries for the current CheckJNI setting
-  // and may be wrong if there are entries created with a different CheckJNI setting.
+  // Return the #of entries in the entire table.  This includes holes, and
+  // so may be larger than the actual number of "live" entries.
   size_t Capacity() const {
-    if (IsCheckJniEnabled()) {
-      DCHECK_ALIGNED(segment_state_.top_index, kCheckJniEntriesPerReference);
-      return segment_state_.top_index / kCheckJniEntriesPerReference;
-    } else {
-      return segment_state_.top_index;
-    }
+    return segment_state_.top_index;
+  }
+
+  // Return the number of non-null entries in the table. Only reliable for a
+  // single segment table.
+  int32_t NEntriesForGlobal() {
+    return segment_state_.top_index - current_num_holes_;
   }
 
   // Ensure that at least free_capacity elements are available, or return false.
@@ -343,142 +300,94 @@
   // Release pages past the end of the table that may have previously held references.
   void Trim() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  /* Reference validation for CheckJNI and debug build. */
+  // Determine what kind of indirect reference this is. Opposite of EncodeIndirectRefKind.
+  ALWAYS_INLINE static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
+    return DecodeIndirectRefKind(reinterpret_cast<uintptr_t>(iref));
+  }
+
+  /* Reference validation for CheckJNI. */
   bool IsValidReference(IndirectRef, /*out*/std::string* error_msg) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  // Flags and fields in the `free_entries_list_`.
-  static constexpr size_t kFlagCheckJni = 0u;
-  // Skip a bit to have the same value range for the "first free" as the "next free" in `LrtEntry`.
-  static constexpr size_t kFlagPadding = kFlagCheckJni + 1u;
-  static constexpr size_t kFieldFirstFree = kFlagPadding + 1u;
-  static constexpr size_t kFieldFirstFreeSize = BitSizeOf<uint32_t>() - kFieldFirstFree;
+  static constexpr uint32_t kShiftedSerialMask = (1u << kLRTSerialBits) - 1;
 
-  using FirstFreeField = BitField<uint32_t, kFieldFirstFree, kFieldFirstFreeSize>;
+  static constexpr size_t kKindBits = MinimumBitsToStore(
+      static_cast<uint32_t>(IndirectRefKind::kLastKind));
+  static constexpr uint32_t kKindMask = (1u << kKindBits) - 1;
 
-  // The value of `FirstFreeField` in `free_entries_list_` indicating the end of the free list.
-  static constexpr uint32_t kFreeListEnd = LrtEntry::FreeListEnd();
-  static_assert(kFreeListEnd == MaxInt<uint32_t>(kFieldFirstFreeSize));
-
-  // The value of `free_entries_list_` indicating empty free list and disabled CheckJNI.
-  static constexpr uint32_t kEmptyFreeListAndCheckJniDisabled =
-      FirstFreeField::Update(kFreeListEnd, 0u);  // kFlagCheckJni not set.
-
-  // The number of entries per reference to detect obsolete reference uses with CheckJNI enabled.
-  // The first entry serves as a serial number, one of the remaining entries can hold the actual
-  // reference or the next free index.
-  static constexpr size_t kCheckJniEntriesPerReference = 4u;
-  static_assert(IsPowerOfTwo(kCheckJniEntriesPerReference));
-
-  // The maximum total table size we allow.
-  static constexpr size_t kMaxTableSizeInBytes = 128 * MB;
-  static_assert(IsPowerOfTwo(kMaxTableSizeInBytes));
-  static_assert(IsPowerOfTwo(sizeof(LrtEntry)));
-  static constexpr size_t kMaxTableSize = kMaxTableSizeInBytes / sizeof(LrtEntry);
-
-  static IndirectRef ToIndirectRef(LrtEntry* entry) {
-    // The `IndirectRef` can be used to directly access the underlying `GcRoot<>`.
-    DCHECK_EQ(reinterpret_cast<GcRoot<mirror::Object>*>(entry), entry->GetRootAddress());
-    return reinterpret_cast<IndirectRef>(
-        reinterpret_cast<uintptr_t>(entry) | static_cast<uintptr_t>(kLocal));
+  static constexpr uintptr_t EncodeIndex(uint32_t table_index) {
+    static_assert(sizeof(IndirectRef) == sizeof(uintptr_t), "Unexpected IndirectRef size");
+    DCHECK_LE(MinimumBitsToStore(table_index), BitSizeOf<uintptr_t>() - kLRTSerialBits - kKindBits);
+    return (static_cast<uintptr_t>(table_index) << kKindBits << kLRTSerialBits);
+  }
+  static constexpr uint32_t DecodeIndex(uintptr_t uref) {
+    return static_cast<uint32_t>((uref >> kKindBits) >> kLRTSerialBits);
   }
 
-  static LrtEntry* ToLrtEntry(IndirectRef iref) {
-    DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(iref), kLocal);
-    return IndirectReferenceTable::ClearIndirectRefKind<LrtEntry*>(iref);
+  static constexpr uintptr_t EncodeIndirectRefKind(IndirectRefKind kind) {
+    return static_cast<uintptr_t>(kind);
+  }
+  static constexpr IndirectRefKind DecodeIndirectRefKind(uintptr_t uref) {
+    return static_cast<IndirectRefKind>(uref & kKindMask);
   }
 
-  static constexpr size_t GetTableSize(size_t table_index) {
-    // First two tables have size `kSmallLrtEntries`, then it doubles for subsequent tables.
-    return kSmallLrtEntries << (table_index != 0u ? table_index - 1u : 0u);
+  static constexpr uintptr_t EncodeSerial(uint32_t serial) {
+    DCHECK_LE(MinimumBitsToStore(serial), kLRTSerialBits);
+    return serial << kKindBits;
+  }
+  static constexpr uint32_t DecodeSerial(uintptr_t uref) {
+    return static_cast<uint32_t>(uref >> kKindBits) & kShiftedSerialMask;
   }
 
-  static constexpr size_t NumTablesForSize(size_t size) {
-    DCHECK_GE(size, kSmallLrtEntries);
-    DCHECK(IsPowerOfTwo(size));
-    return 1u + WhichPowerOf2(size / kSmallLrtEntries);
+  constexpr uintptr_t EncodeIndirectRef(uint32_t table_index, uint32_t serial) const {
+    DCHECK_LT(table_index, max_entries_);
+    return EncodeIndex(table_index) | EncodeSerial(serial) | EncodeIndirectRefKind(kLocal);
   }
 
-  static constexpr size_t MaxSmallTables() {
-    return NumTablesForSize(kPageSize / sizeof(LrtEntry));
+  static void ConstexprChecks();
+
+  // Extract the table index from an indirect reference.
+  ALWAYS_INLINE static uint32_t ExtractIndex(IndirectRef iref) {
+    return DecodeIndex(reinterpret_cast<uintptr_t>(iref));
   }
 
-  LrtEntry* GetEntry(size_t entry_index) const {
-    DCHECK_LT(entry_index, max_entries_);
-    if (LIKELY(small_table_ != nullptr)) {
-      DCHECK_LT(entry_index, kSmallLrtEntries);
-      DCHECK_EQ(max_entries_, kSmallLrtEntries);
-      return &small_table_[entry_index];
-    }
-    size_t table_start_index =
-        (entry_index < kSmallLrtEntries) ? 0u : TruncToPowerOfTwo(entry_index);
-    size_t table_index =
-        (entry_index < kSmallLrtEntries) ? 0u : NumTablesForSize(table_start_index);
-    LrtEntry* table = tables_[table_index];
-    return &table[entry_index - table_start_index];
+  IndirectRef ToIndirectRef(uint32_t table_index) const {
+    DCHECK_LT(table_index, max_entries_);
+    uint32_t serial = table_[table_index].GetSerial();
+    return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial));
   }
 
-  // Get the entry index for a local reference. Note that this may be higher than
-  // the current segment state. Returns maximum uint32 value if the reference does not
-  // point to one of the internal tables.
-  uint32_t GetReferenceEntryIndex(IndirectRef iref) const;
-
-  static LrtEntry* GetCheckJniSerialNumberEntry(LrtEntry* entry) {
-    return AlignDown(entry, kCheckJniEntriesPerReference * sizeof(LrtEntry));
-  }
-
-  static uint32_t IncrementSerialNumber(LrtEntry* serial_number_entry)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static bool IsValidSerialNumber(uint32_t serial_number) {
-    return serial_number != 0u && serial_number < kCheckJniEntriesPerReference;
-  }
-
-  // Debug mode check that the reference is valid.
-  void DCheckValidReference(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Resize the backing table to be at least `new_size` elements long. The `new_size`
+  // Resize the backing table to be at least new_size elements long. Currently
   // must be larger than the current size. After return max_entries_ >= new_size.
   bool Resize(size_t new_size, std::string* error_msg);
 
-  // Extract the first free index from `free_entries_list_`.
-  uint32_t GetFirstFreeIndex() const {
-    return FirstFreeField::Decode(free_entries_list_);
-  }
+  void RecoverHoles(LRTSegmentState from);
 
-  // Remove popped free entries from the list.
-  // Called only if `free_entries_list_` points to a popped entry.
-  template <typename EntryGetter>
-  void PrunePoppedFreeEntries(EntryGetter&& get_entry);
+  // Abort if check_jni is not enabled. Otherwise, just log as an error.
+  static void AbortIfNoCheckJNI(const std::string& msg);
 
-  // Helper template function for visiting roots.
-  template <typename Visitor>
-  void VisitRootsInternal(Visitor&& visitor) const REQUIRES_SHARED(Locks::mutator_lock_);
+  /* extra debugging checks */
+  bool CheckEntry(const char*, IndirectRef, uint32_t) const;
 
   /// semi-public - read/write by jni down calls.
   LRTSegmentState segment_state_;
 
-  // The maximum number of entries (modulo resizing).
-  uint32_t max_entries_;
+  // Mem map where we store the indirect refs. If it's invalid, and table_ is non-null, then
+  // table_ is valid, but was allocated via `SmallLrtAllocator`;
+  MemMap table_mem_map_;
+  // bottom of the stack. Do not directly access the object references
+  // in this as they are roots. Use Get() that has a read barrier.
+  LrtEntry* table_;
 
-  // The singly-linked list of free nodes.
-  // We use entry indexes instead of pointers and `kFreeListEnd` instead of null indicates
-  // the end of the list. See `LocalReferenceTable::GetEntry()` and `LrtEntry::GetNextFree().
-  //
-  // We use the lowest bit to record whether CheckJNI is enabled. This helps us
-  // check that the list is empty and CheckJNI is disabled in a single comparison.
-  uint32_t free_entries_list_;
+  // max #of entries allowed (modulo resizing).
+  size_t max_entries_;
 
-  // Individual tables.
-  // As long as we have only one small table, we use `small_table_` to avoid an extra load
-  // from another heap allocated location, otherwise we set it to null and use `tables_`.
-  LrtEntry* small_table_;  // For optimizing the fast-path.
-  dchecked_vector<LrtEntry*> tables_;
-
-  // Mem maps where we store tables allocated directly with `MemMap`
-  // rather than the `SmallLrtAllocator`.
-  dchecked_vector<MemMap> table_mem_maps_;
+  // Some values to retain old behavior with holes. Description of the algorithm is in the .cc
+  // file.
+  // TODO: Consider other data structures for compact tables, e.g., free lists.
+  size_t current_num_holes_;  // Number of holes in the current / top segment.
+  LRTSegmentState last_known_previous_state_;
 };
 
 }  // namespace jni
diff --git a/runtime/jni/local_reference_table_test.cc b/runtime/jni/local_reference_table_test.cc
index ca953b9..84bb189 100644
--- a/runtime/jni/local_reference_table_test.cc
+++ b/runtime/jni/local_reference_table_test.cc
@@ -14,11 +14,11 @@
  * limitations under the License.
  */
 
-#include "local_reference_table-inl.h"
+#include "indirect_reference_table-inl.h"
 
 #include "android-base/stringprintf.h"
 
-#include "class_root-inl.h"
+#include "class_linker-inl.h"
 #include "common_runtime_test.h"
 #include "mirror/class-alloc-inl.h"
 #include "mirror/object-inl.h"
@@ -34,19 +34,10 @@
   LocalReferenceTableTest() {
     use_boot_image_ = true;  // Make the Runtime creation cheaper.
   }
-
-  static void CheckDump(LocalReferenceTable* lrt, size_t num_objects, size_t num_unique)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  void BasicTest(bool check_jni, size_t max_count);
-  void BasicHolesTest(bool check_jni, size_t max_count);
-  void BasicResizeTest(bool check_jni, size_t max_count);
-  void TestAddRemove(bool check_jni, size_t max_count, size_t fill_count = 0u);
-  void TestAddRemoveMixed(bool start_check_jni);
 };
 
-void LocalReferenceTableTest::CheckDump(
-    LocalReferenceTable* lrt, size_t num_objects, size_t num_unique) {
+static void CheckDump(LocalReferenceTable* lrt, size_t num_objects, size_t num_unique)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   std::ostringstream oss;
   lrt->Dump(oss);
   if (num_objects == 0) {
@@ -63,13 +54,20 @@
   }
 }
 
-void LocalReferenceTableTest::BasicTest(bool check_jni, size_t max_count) {
+TEST_F(LocalReferenceTableTest, BasicTest) {
   // This will lead to error messages in the log.
   ScopedLogSeverity sls(LogSeverity::FATAL);
 
   ScopedObjectAccess soa(Thread::Current());
+  static const size_t kTableMax = 20;
+  std::string error_msg;
+  LocalReferenceTable lrt;
+  bool success = lrt.Initialize(kTableMax, &error_msg);
+  ASSERT_TRUE(success) << error_msg;
+
   StackHandleScope<5> hs(soa.Self());
-  Handle<mirror::Class> c = hs.NewHandle(GetClassRoot<mirror::Object>());
+  Handle<mirror::Class> c =
+      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
   ASSERT_TRUE(c != nullptr);
   Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
   ASSERT_TRUE(obj0 != nullptr);
@@ -80,22 +78,15 @@
   Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
   ASSERT_TRUE(obj3 != nullptr);
 
-  std::string error_msg;
-  LocalReferenceTable lrt(check_jni);
-  bool success = lrt.Initialize(max_count, &error_msg);
-  ASSERT_TRUE(success) << error_msg;
-
   const LRTSegmentState cookie = kLRTFirstSegment;
 
   CheckDump(&lrt, 0, 0);
 
-  if (check_jni) {
-    IndirectRef bad_iref = (IndirectRef) 0x11110;
-    EXPECT_FALSE(lrt.Remove(cookie, bad_iref)) << "unexpectedly successful removal";
-  }
+  IndirectRef iref0 = (IndirectRef) 0x11110;
+  EXPECT_FALSE(lrt.Remove(cookie, iref0)) << "unexpectedly successful removal";
 
   // Add three, check, remove in the order in which they were added.
-  IndirectRef iref0 = lrt.Add(cookie, obj0.Get(), &error_msg);
+  iref0 = lrt.Add(cookie, obj0.Get(), &error_msg);
   EXPECT_TRUE(iref0 != nullptr);
   CheckDump(&lrt, 1, 1);
   IndirectRef iref1 = lrt.Add(cookie, obj1.Get(), &error_msg);
@@ -156,10 +147,8 @@
 
   ASSERT_TRUE(lrt.Remove(cookie, iref1));
   CheckDump(&lrt, 2, 2);
-  if (check_jni) {
-    ASSERT_FALSE(lrt.Remove(cookie, iref1));
-    CheckDump(&lrt, 2, 2);
-  }
+  ASSERT_FALSE(lrt.Remove(cookie, iref1));
+  CheckDump(&lrt, 2, 2);
 
   // Check that the reference to the hole is not valid.
   EXPECT_FALSE(lrt.IsValidReference(iref1, &error_msg));
@@ -219,10 +208,8 @@
   iref1 = lrt.Add(cookie, obj1.Get(), &error_msg);
   EXPECT_TRUE(iref1 != nullptr);
   CheckDump(&lrt, 1, 1);
-  if (check_jni) {
-    ASSERT_FALSE(lrt.Remove(cookie, iref0)) << "mismatched del succeeded";
-    CheckDump(&lrt, 1, 1);
-  }
+  ASSERT_FALSE(lrt.Remove(cookie, iref0)) << "mismatched del succeeded";
+  CheckDump(&lrt, 1, 1);
   ASSERT_TRUE(lrt.Remove(cookie, iref1)) << "switched del failed";
   ASSERT_EQ(0U, lrt.Capacity()) << "switching del not empty";
   CheckDump(&lrt, 0, 0);
@@ -255,7 +242,7 @@
 
   // Test table resizing.
   // These ones fit...
-  static const size_t kTableInitial = max_count / 2;
+  static const size_t kTableInitial = kTableMax / 2;
   IndirectRef manyRefs[kTableInitial];
   for (size_t i = 0; i < kTableInitial; i++) {
     manyRefs[i] = lrt.Add(cookie, obj0.Get(), &error_msg);
@@ -281,19 +268,7 @@
   CheckDump(&lrt, 0, 0);
 }
 
-TEST_F(LocalReferenceTableTest, BasicTest) {
-  BasicTest(/*check_jni=*/ false, /*max_count=*/ 20u);
-  BasicTest(/*check_jni=*/ false, /*max_count=*/ kSmallLrtEntries);
-  BasicTest(/*check_jni=*/ false, /*max_count=*/ 2u * kSmallLrtEntries);
-}
-
-TEST_F(LocalReferenceTableTest, BasicTestCheckJNI) {
-  BasicTest(/*check_jni=*/ true, /*max_count=*/ 20u);
-  BasicTest(/*check_jni=*/ true, /*max_count=*/ kSmallLrtEntries);
-  BasicTest(/*check_jni=*/ true, /*max_count=*/ 2u * kSmallLrtEntries);
-}
-
-void LocalReferenceTableTest::BasicHolesTest(bool check_jni, size_t max_count) {
+TEST_F(LocalReferenceTableTest, Holes) {
   // Test the explicitly named cases from the LRT implementation:
   //
   // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
@@ -305,8 +280,11 @@
   //    reference
 
   ScopedObjectAccess soa(Thread::Current());
+  static const size_t kTableMax = 10;
+
   StackHandleScope<6> hs(soa.Self());
-  Handle<mirror::Class> c = hs.NewHandle(GetClassRoot<mirror::Object>());
+  Handle<mirror::Class> c = hs.NewHandle(
+      class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
   ASSERT_TRUE(c != nullptr);
   Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
   ASSERT_TRUE(obj0 != nullptr);
@@ -323,8 +301,8 @@
 
   // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference.
   {
-    LocalReferenceTable lrt(check_jni);
-    bool success = lrt.Initialize(max_count, &error_msg);
+    LocalReferenceTable lrt;
+    bool success = lrt.Initialize(kTableMax, &error_msg);
     ASSERT_TRUE(success) << error_msg;
 
     const LRTSegmentState cookie0 = kLRTFirstSegment;
@@ -352,8 +330,8 @@
 
   // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
   {
-    LocalReferenceTable lrt(check_jni);
-    bool success = lrt.Initialize(max_count, &error_msg);
+    LocalReferenceTable lrt;
+    bool success = lrt.Initialize(kTableMax, &error_msg);
     ASSERT_TRUE(success) << error_msg;
 
     const LRTSegmentState cookie0 = kLRTFirstSegment;
@@ -386,8 +364,8 @@
   // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
   //    reference.
   {
-    LocalReferenceTable lrt(check_jni);
-    bool success = lrt.Initialize(max_count, &error_msg);
+    LocalReferenceTable lrt;
+    bool success = lrt.Initialize(kTableMax, &error_msg);
     ASSERT_TRUE(success) << error_msg;
 
     const LRTSegmentState cookie0 = kLRTFirstSegment;
@@ -415,9 +393,7 @@
     IndirectRef iref4 = lrt.Add(cookie1, obj4.Get(), &error_msg);
 
     EXPECT_EQ(lrt.Capacity(), 3u);
-    if (check_jni) {
-      EXPECT_FALSE(lrt.IsValidReference(iref1, &error_msg));
-    }
+    EXPECT_FALSE(lrt.IsValidReference(iref1, &error_msg));
     CheckDump(&lrt, 3, 3);
 
     UNUSED(iref0, iref1, iref2, iref3, iref4);
@@ -425,8 +401,8 @@
 
   // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference.
   {
-    LocalReferenceTable lrt(check_jni);
-    bool success = lrt.Initialize(max_count, &error_msg);
+    LocalReferenceTable lrt;
+    bool success = lrt.Initialize(kTableMax, &error_msg);
     ASSERT_TRUE(success) << error_msg;
 
     const LRTSegmentState cookie0 = kLRTFirstSegment;
@@ -466,8 +442,8 @@
   // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
   //    reference
   {
-    LocalReferenceTable lrt(check_jni);
-    bool success = lrt.Initialize(max_count, &error_msg);
+    LocalReferenceTable lrt;
+    bool success = lrt.Initialize(kTableMax, &error_msg);
     ASSERT_TRUE(success) << error_msg;
 
     const LRTSegmentState cookie0 = kLRTFirstSegment;
@@ -502,275 +478,30 @@
   }
 }
 
-TEST_F(LocalReferenceTableTest, BasicHolesTest) {
-  BasicHolesTest(/*check_jni=*/ false, 20u);
-  BasicHolesTest(/*check_jni=*/ false, /*max_count=*/ kSmallLrtEntries);
-  BasicHolesTest(/*check_jni=*/ false, /*max_count=*/ 2u * kSmallLrtEntries);
-}
-
-TEST_F(LocalReferenceTableTest, BasicHolesTestCheckJNI) {
-  BasicHolesTest(/*check_jni=*/ true, 20u);
-  BasicHolesTest(/*check_jni=*/ true, /*max_count=*/ kSmallLrtEntries);
-  BasicHolesTest(/*check_jni=*/ true, /*max_count=*/ 2u * kSmallLrtEntries);
-}
-
-void LocalReferenceTableTest::BasicResizeTest(bool check_jni, size_t max_count) {
+TEST_F(LocalReferenceTableTest, Resize) {
   ScopedObjectAccess soa(Thread::Current());
+  static const size_t kTableMax = 512;
+
   StackHandleScope<2> hs(soa.Self());
-  Handle<mirror::Class> c = hs.NewHandle(GetClassRoot<mirror::Object>());
+  Handle<mirror::Class> c = hs.NewHandle(
+      class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
   ASSERT_TRUE(c != nullptr);
   Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
   ASSERT_TRUE(obj0 != nullptr);
 
   std::string error_msg;
-  LocalReferenceTable lrt(check_jni);
-  bool success = lrt.Initialize(max_count, &error_msg);
+  LocalReferenceTable lrt;
+  bool success = lrt.Initialize(kTableMax, &error_msg);
   ASSERT_TRUE(success) << error_msg;
 
   CheckDump(&lrt, 0, 0);
   const LRTSegmentState cookie = kLRTFirstSegment;
 
-  for (size_t i = 0; i != max_count + 1; ++i) {
+  for (size_t i = 0; i != kTableMax + 1; ++i) {
     lrt.Add(cookie, obj0.Get(), &error_msg);
   }
 
-  EXPECT_EQ(lrt.Capacity(), max_count + 1);
-}
-
-TEST_F(LocalReferenceTableTest, BasicResizeTest) {
-  BasicResizeTest(/*check_jni=*/ false, 20u);
-  BasicResizeTest(/*check_jni=*/ false, /*max_count=*/ kSmallLrtEntries);
-  BasicResizeTest(/*check_jni=*/ false, /*max_count=*/ 2u * kSmallLrtEntries);
-  BasicResizeTest(/*check_jni=*/ false, /*max_count=*/ kPageSize / sizeof(LrtEntry));
-}
-
-TEST_F(LocalReferenceTableTest, BasicResizeTestCheckJNI) {
-  BasicResizeTest(/*check_jni=*/ true, 20u);
-  BasicResizeTest(/*check_jni=*/ true, /*max_count=*/ kSmallLrtEntries);
-  BasicResizeTest(/*check_jni=*/ true, /*max_count=*/ 2u * kSmallLrtEntries);
-  BasicResizeTest(/*check_jni=*/ true, /*max_count=*/ kPageSize / sizeof(LrtEntry));
-}
-
-void LocalReferenceTableTest::TestAddRemove(bool check_jni, size_t max_count, size_t fill_count) {
-  // This will lead to error messages in the log.
-  ScopedLogSeverity sls(LogSeverity::FATAL);
-
-  ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<9> hs(soa.Self());
-  Handle<mirror::Class> c = hs.NewHandle(GetClassRoot<mirror::Object>());
-  ASSERT_TRUE(c != nullptr);
-  Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj0 != nullptr);
-  Handle<mirror::Object> obj0x = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj0x != nullptr);
-  Handle<mirror::Object> obj1 = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj1 != nullptr);
-  Handle<mirror::Object> obj1x = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj1x != nullptr);
-  Handle<mirror::Object> obj2 = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj2 != nullptr);
-  Handle<mirror::Object> obj2x = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj2x != nullptr);
-  Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj3 != nullptr);
-  Handle<mirror::Object> obj3x = hs.NewHandle(c->AllocObject(soa.Self()));
-  ASSERT_TRUE(obj3x != nullptr);
-
-  std::string error_msg;
-  LocalReferenceTable lrt(check_jni);
-  bool success = lrt.Initialize(max_count, &error_msg);
-  ASSERT_TRUE(success) << error_msg;
-
-  const LRTSegmentState cookie0 = kLRTFirstSegment;
-  for (size_t i = 0; i != fill_count; ++i) {
-    IndirectRef iref = lrt.Add(cookie0, c.Get(), &error_msg);
-    ASSERT_TRUE(iref != nullptr) << error_msg;
-    ASSERT_EQ(i + 1u, lrt.Capacity());
-    EXPECT_OBJ_PTR_EQ(c.Get(), lrt.Get(iref));
-  }
-
-  IndirectRef iref0, iref1, iref2, iref3;
-
-#define ADD_REF(iref, cookie, obj, expected_capacity)             \
-  do {                                                            \
-    (iref) = lrt.Add(cookie, (obj).Get(), &error_msg);            \
-    ASSERT_TRUE((iref) != nullptr) << error_msg;                  \
-    ASSERT_EQ(fill_count + (expected_capacity), lrt.Capacity());  \
-    EXPECT_OBJ_PTR_EQ((obj).Get(), lrt.Get(iref));                \
-  } while (false)
-#define REMOVE_REF(cookie, iref, expected_capacity)               \
-  do {                                                            \
-    ASSERT_TRUE(lrt.Remove(cookie, iref));                        \
-    ASSERT_EQ(fill_count + (expected_capacity), lrt.Capacity());  \
-  } while (false)
-#define POP_SEGMENT(cookie, expected_capacity)                    \
-  do {                                                            \
-    lrt.SetSegmentState(cookie);                                  \
-    ASSERT_EQ(fill_count + (expected_capacity), lrt.Capacity());  \
-  } while (false)
-
-  const LRTSegmentState cookie1 = lrt.GetSegmentState();
-  ADD_REF(iref0, cookie1, obj0, 1u);
-  ADD_REF(iref1, cookie1, obj1, 2u);
-  REMOVE_REF(cookie1, iref1, 1u);  // Remove top entry.
-  if (check_jni) {
-    ASSERT_FALSE(lrt.Remove(cookie1, iref1));
-  }
-  ADD_REF(iref1, cookie1, obj1x, 2u);
-  REMOVE_REF(cookie1, iref0, 2u);  // Create hole.
-  IndirectRef obsolete_iref0 = iref0;
-  if (check_jni) {
-    ASSERT_FALSE(lrt.Remove(cookie1, iref0));
-  }
-  ADD_REF(iref0, cookie1, obj0x, 2u);  // Reuse hole
-  if (check_jni) {
-    ASSERT_FALSE(lrt.Remove(cookie1, obsolete_iref0));
-  }
-
-  // Test addition to the second segment without a hole in the first segment.
-  // Also test removal from the wrong segment here.
-  LRTSegmentState cookie2 = lrt.GetSegmentState();  // Create second segment.
-  ASSERT_FALSE(lrt.Remove(cookie2, iref0));  // Cannot remove from inactive segment.
-  ADD_REF(iref2, cookie2, obj2, 3u);
-  POP_SEGMENT(cookie2, 2u);  // Pop the second segment.
-  if (check_jni) {
-    ASSERT_FALSE(lrt.Remove(cookie1, iref2));  // Cannot remove from popped segment.
-  }
-
-  // Test addition to the second segment with a hole in the first.
-  // Use one more reference in the first segment to allow hitting the small table
-  // overflow path either above or here, based on the provided `fill_count`.
-  ADD_REF(iref2, cookie2, obj2x, 3u);
-  REMOVE_REF(cookie1, iref1, 3u);  // Create hole.
-  cookie2 = lrt.GetSegmentState();  // Create second segment.
-  ADD_REF(iref3, cookie2, obj3, 4u);
-  POP_SEGMENT(cookie2, 3u);  // Pop the second segment.
-  REMOVE_REF(cookie1, iref2, 1u);  // Remove top entry, prune previous entry.
-  ADD_REF(iref1, cookie1, obj1, 2u);
-
-  cookie2 = lrt.GetSegmentState();  // Create second segment.
-  ADD_REF(iref2, cookie2, obj2, 3u);
-  ADD_REF(iref3, cookie2, obj3, 4u);
-  REMOVE_REF(cookie2, iref2, 4u);  // Create hole in second segment.
-  POP_SEGMENT(cookie2, 2u);  // Pop the second segment with hole.
-  ADD_REF(iref2, cookie1, obj2x, 3u);  // Prune free list, use new entry.
-  REMOVE_REF(cookie1, iref2, 2u);
-
-  REMOVE_REF(cookie1, iref0, 2u);  // Create hole.
-  cookie2 = lrt.GetSegmentState();  // Create second segment.
-  ADD_REF(iref2, cookie2, obj2, 3u);
-  ADD_REF(iref3, cookie2, obj3x, 4u);
-  REMOVE_REF(cookie2, iref2, 4u);  // Create hole in second segment.
-  POP_SEGMENT(cookie2, 2u);  // Pop the second segment with hole.
-  ADD_REF(iref0, cookie1, obj0, 2u);  // Prune free list, use remaining entry from free list.
-
-  REMOVE_REF(cookie1, iref0, 2u);  // Create hole.
-  cookie2 = lrt.GetSegmentState();  // Create second segment.
-  ADD_REF(iref2, cookie2, obj2x, 3u);
-  ADD_REF(iref3, cookie2, obj3, 4u);
-  REMOVE_REF(cookie2, iref2, 4u);  // Create hole in second segment.
-  REMOVE_REF(cookie2, iref3, 2u);  // Remove top entry, prune previous entry, keep hole above.
-  POP_SEGMENT(cookie2, 2u);  // Pop the empty second segment.
-  ADD_REF(iref0, cookie1, obj0x, 2u);  // Reuse hole.
-
-#undef REMOVE_REF
-#undef ADD_REF
-}
-
-TEST_F(LocalReferenceTableTest, TestAddRemove) {
-  TestAddRemove(/*check_jni=*/ false, /*max_count=*/ 20u);
-  TestAddRemove(/*check_jni=*/ false, /*max_count=*/ kSmallLrtEntries);
-  TestAddRemove(/*check_jni=*/ false, /*max_count=*/ 2u * kSmallLrtEntries);
-  static_assert(kSmallLrtEntries >= 4u);
-  for (size_t fill_count = kSmallLrtEntries - 4u; fill_count != kSmallLrtEntries; ++fill_count) {
-    TestAddRemove(/*check_jni=*/ false, /*max_count=*/ kSmallLrtEntries, fill_count);
-  }
-}
-
-TEST_F(LocalReferenceTableTest, TestAddRemoveCheckJNI) {
-  TestAddRemove(/*check_jni=*/ true, /*max_count=*/ 20u);
-  TestAddRemove(/*check_jni=*/ true, /*max_count=*/ kSmallLrtEntries);
-  TestAddRemove(/*check_jni=*/ true, /*max_count=*/ 2u * kSmallLrtEntries);
-  static_assert(kSmallLrtEntries >= 4u);
-  for (size_t fill_count = kSmallLrtEntries - 4u; fill_count != kSmallLrtEntries; ++fill_count) {
-    TestAddRemove(/*check_jni=*/ true, /*max_count=*/ kSmallLrtEntries, fill_count);
-  }
-}
-
-void LocalReferenceTableTest::TestAddRemoveMixed(bool start_check_jni) {
-  // This will lead to error messages in the log.
-  ScopedLogSeverity sls(LogSeverity::FATAL);
-
-  ScopedObjectAccess soa(Thread::Current());
-  static constexpr size_t kMaxUniqueRefs = 16;
-  StackHandleScope<kMaxUniqueRefs + 1u> hs(soa.Self());
-  Handle<mirror::Class> c = hs.NewHandle(GetClassRoot<mirror::Object>());
-  ASSERT_TRUE(c != nullptr);
-  std::array<Handle<mirror::Object>, kMaxUniqueRefs> objs;
-  for (size_t i = 0u; i != kMaxUniqueRefs; ++i) {
-    objs[i] = hs.NewHandle(c->AllocObject(soa.Self()));
-    ASSERT_TRUE(objs[i] != nullptr);
-  }
-
-  std::string error_msg;
-  std::array<IndirectRef, kMaxUniqueRefs> irefs;
-  const LRTSegmentState cookie0 = kLRTFirstSegment;
-
-#define ADD_REF(iref, cookie, obj)                                \
-  do {                                                            \
-    (iref) = lrt.Add(cookie, (obj).Get(), &error_msg);            \
-    ASSERT_TRUE((iref) != nullptr) << error_msg;                  \
-    EXPECT_OBJ_PTR_EQ((obj).Get(), lrt.Get(iref));                \
-  } while (false)
-
-  for (size_t split = 1u; split < kMaxUniqueRefs - 1u; ++split) {
-    for (size_t total = split + 1u; total < kMaxUniqueRefs; ++total) {
-      for (size_t deleted_at_start = 0u; deleted_at_start + 1u < split; ++deleted_at_start) {
-        LocalReferenceTable lrt(/*check_jni=*/ start_check_jni);
-        bool success = lrt.Initialize(kSmallLrtEntries, &error_msg);
-        ASSERT_TRUE(success) << error_msg;
-        for (size_t i = 0; i != split; ++i) {
-          ADD_REF(irefs[i], cookie0, objs[i]);
-          ASSERT_EQ(i + 1u, lrt.Capacity());
-        }
-        for (size_t i = 0; i != deleted_at_start; ++i) {
-          ASSERT_TRUE(lrt.Remove(cookie0, irefs[i]));
-          if (lrt.IsCheckJniEnabled()) {
-            ASSERT_FALSE(lrt.Remove(cookie0, irefs[i]));
-          }
-          ASSERT_EQ(split, lrt.Capacity());
-        }
-        lrt.SetCheckJniEnabled(!start_check_jni);
-        // Check top index instead of `Capacity()` after changing the CheckJNI setting.
-        uint32_t split_top_index = lrt.GetSegmentState().top_index;
-        uint32_t last_top_index = split_top_index;
-        for (size_t i = split; i != total; ++i) {
-          ADD_REF(irefs[i], cookie0, objs[i]);
-          ASSERT_LT(last_top_index, lrt.GetSegmentState().top_index);
-          last_top_index = lrt.GetSegmentState().top_index;
-        }
-        for (size_t i = split; i != total; ++i) {
-          ASSERT_TRUE(lrt.Remove(cookie0, irefs[i]));
-          if (lrt.IsCheckJniEnabled()) {
-            ASSERT_FALSE(lrt.Remove(cookie0, irefs[i]));
-          }
-          if (i + 1u != total) {
-            ASSERT_LE(last_top_index, lrt.GetSegmentState().top_index);
-          } else {
-            ASSERT_GT(last_top_index, lrt.GetSegmentState().top_index);
-            ASSERT_LE(split_top_index, lrt.GetSegmentState().top_index);
-          }
-        }
-      }
-    }
-  }
-
-#undef ADD_REF
-}
-
-TEST_F(LocalReferenceTableTest, TestAddRemoveMixed) {
-  TestAddRemoveMixed(/*start_check_jni=*/ false);
-  TestAddRemoveMixed(/*start_check_jni=*/ true);
+  EXPECT_EQ(lrt.Capacity(), kTableMax + 1);
 }
 
 }  // namespace jni
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 3c73cc5..ddd11cc 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -168,9 +168,8 @@
     if (!vm->IsCheckJniEnabled()) {
       LOG(INFO) << "Late-enabling -Xcheck:jni";
       vm->SetCheckJniEnabled(true);
-      // This is the only thread that's running at this point and the above call sets
-      // the CheckJNI flag in the corresponding `JniEnvExt`.
-      DCHECK(Thread::Current()->GetJniEnv()->IsCheckJniEnabled());
+      // There's only one thread running at this point, so only one JNIEnv to fix up.
+      Thread::Current()->GetJniEnv()->SetCheckJniEnabled(true);
     } else {
       LOG(INFO) << "Not late-enabling -Xcheck:jni (already on)";
     }
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
index a35152f..412ab0a 100644
--- a/runtime/non_debuggable_classes.cc
+++ b/runtime/non_debuggable_classes.cc
@@ -22,7 +22,6 @@
 #include "nativehelper/scoped_local_ref.h"
 #include "obj_ptr-inl.h"
 #include "thread-current-inl.h"
-#include "thread-inl.h"
 
 namespace art {
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1cbc6e2..092d652 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -2178,9 +2178,6 @@
   // com_android_art linker namespace.
   jclass java_lang_Object;
   {
-    // Use global JNI reference to keep the local references empty. If we allocated a
-    // local reference here, the `PushLocalFrame(128)` that these internal libraries do
-    // in their `JNI_OnLoad()` would reserve a lot of unnecessary space due to rounding.
     ScopedObjectAccess soa(self);
     java_lang_Object = reinterpret_cast<jclass>(
         GetJavaVM()->AddGlobalRef(self, GetClassRoot<mirror::Object>(GetClassLinker())));
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index e4b3f64..f99b435 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -24,10 +24,9 @@
 #include "base/casts.h"
 #include "base/mutex-inl.h"
 #include "base/time_utils.h"
-#include "indirect_reference_table.h"
 #include "jni/jni_env_ext.h"
 #include "managed_stack-inl.h"
-#include "obj_ptr-inl.h"
+#include "obj_ptr.h"
 #include "suspend_reason.h"
 #include "thread-current-inl.h"
 #include "thread_pool.h"
@@ -40,32 +39,6 @@
   return full_env->GetSelf();
 }
 
-inline ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const {
-  if (obj == nullptr) {
-    return nullptr;
-  }
-  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
-  if (LIKELY(IndirectReferenceTable::IsJniTransitionOrLocalReference(ref))) {
-    // For JNI transitions, the `jclass` for a static method points to the
-    // `CompressedReference<>` in the `ArtMethod::declaring_class_` and other `jobject`
-    // arguments point to spilled stack references but a `StackReference<>` is just
-    // a subclass of `CompressedReference<>`. Local references also point to
-    // a `CompressedReference<>` encapsulated in a `GcRoot<>`.
-    if (kIsDebugBuild && IndirectReferenceTable::GetIndirectRefKind(ref) == kJniTransition) {
-      CHECK(IsJniTransitionReference(obj));
-    }
-    auto* cref = IndirectReferenceTable::ClearIndirectRefKind<
-        mirror::CompressedReference<mirror::Object>*>(ref);
-    ObjPtr<mirror::Object> result = cref->AsMirrorPtr();
-    if (kIsDebugBuild && IndirectReferenceTable::GetIndirectRefKind(ref) != kJniTransition) {
-      CHECK_EQ(result, tlsPtr_.jni_env->locals_.Get(ref));
-    }
-    return result;
-  } else {
-    return DecodeGlobalJObject(obj);
-  }
-}
-
 inline void Thread::AllowThreadSuspension() {
   CheckSuspend();
   // Invalidate the current thread's object pointers (ObjPtr) to catch possible moving GC bugs due
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a341c56..77f53cf 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2768,15 +2768,27 @@
   }
 }
 
-ObjPtr<mirror::Object> Thread::DecodeGlobalJObject(jobject obj) const {
-  DCHECK(obj != nullptr);
+ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const {
+  if (obj == nullptr) {
+    return nullptr;
+  }
   IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
   IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
-  DCHECK_NE(kind, kJniTransition);
-  DCHECK_NE(kind, kLocal);
   ObjPtr<mirror::Object> result;
   bool expect_null = false;
-  if (kind == kGlobal) {
+  // The "kinds" below are sorted by the frequency we expect to encounter them.
+  if (kind == kLocal) {
+    jni::LocalReferenceTable& locals = tlsPtr_.jni_env->locals_;
+    // Local references do not need a read barrier.
+    result = locals.Get(ref);
+  } else if (kind == kJniTransition) {
+    // The `jclass` for a static method points to the CompressedReference<> in the
+    // `ArtMethod::declaring_class_`. Other `jobject` arguments point to spilled stack
+    // references but a StackReference<> is just a subclass of CompressedReference<>.
+    DCHECK(IsJniTransitionReference(obj));
+    result = reinterpret_cast<mirror::CompressedReference<mirror::Object>*>(obj)->AsMirrorPtr();
+    VerifyObject(result);
+  } else if (kind == kGlobal) {
     result = tlsPtr_.jni_env->vm_->DecodeGlobal(ref);
   } else {
     DCHECK_EQ(kind, kWeakGlobal);
diff --git a/runtime/thread.h b/runtime/thread.h
index 5350330..1362f9f 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1016,10 +1016,6 @@
   // Is the given obj in one of this thread's JNI transition frames?
   bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Convert a global (or weak global) jobject into a Object*
-  ObjPtr<mirror::Object> DecodeGlobalJObject(jobject obj) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
       REQUIRES_SHARED(Locks::mutator_lock_);