Merge changes Iae3a933e,I08ff5d6e

* changes:
  ART: Make IndirectReferenceTable resizable
  ART: Change IndirectReferenceTable
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 330c742..670dadc 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -15,13 +15,18 @@
  */
 
 #include "art_method-inl.h"
+#include "base/casts.h"
 #include "entrypoints/entrypoint_utils-inl.h"
+#include "indirect_reference_table.h"
 #include "mirror/object-inl.h"
 #include "thread-inl.h"
 #include "verify_object-inl.h"
 
 namespace art {
 
+static_assert(sizeof(IRTSegmentState) == sizeof(uint32_t), "IRTSegmentState size unexpected");
+static_assert(std::is_trivial<IRTSegmentState>::value, "IRTSegmentState not trivial");
+
 template <bool kDynamicFast>
 static inline void GoToRunnableFast(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
 
@@ -45,7 +50,7 @@
 extern uint32_t JniMethodFastStart(Thread* self) {
   JNIEnvExt* env = self->GetJniEnv();
   DCHECK(env != nullptr);
-  uint32_t saved_local_ref_cookie = env->local_ref_cookie;
+  uint32_t saved_local_ref_cookie = bit_cast<uint32_t>(env->local_ref_cookie);
   env->local_ref_cookie = env->locals.GetSegmentState();
 
   if (kIsDebugBuild) {
@@ -60,7 +65,7 @@
 extern uint32_t JniMethodStart(Thread* self) {
   JNIEnvExt* env = self->GetJniEnv();
   DCHECK(env != nullptr);
-  uint32_t saved_local_ref_cookie = env->local_ref_cookie;
+  uint32_t saved_local_ref_cookie = bit_cast<uint32_t>(env->local_ref_cookie);
   env->local_ref_cookie = env->locals.GetSegmentState();
   ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
   if (!native_method->IsFastNative()) {
@@ -117,7 +122,7 @@
     env->CheckNoHeldMonitors();
   }
   env->locals.SetSegmentState(env->local_ref_cookie);
-  env->local_ref_cookie = saved_local_ref_cookie;
+  env->local_ref_cookie = bit_cast<IRTSegmentState>(saved_local_ref_cookie);
   self->PopHandleScope();
 }
 
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index e357fa6..9c634fa 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -43,15 +43,15 @@
                                    iref));
     return false;
   }
-  const int topIndex = segment_state_.parts.topIndex;
-  int idx = ExtractIndex(iref);
-  if (UNLIKELY(idx >= topIndex)) {
+  const uint32_t top_index = segment_state_.top_index;
+  uint32_t idx = ExtractIndex(iref);
+  if (UNLIKELY(idx >= top_index)) {
     std::string msg = StringPrintf(
         "JNI ERROR (app bug): accessed stale %s %p  (index %d in a table of size %d)",
         GetIndirectRefKindString(kind_),
         iref,
         idx,
-        topIndex);
+        top_index);
     AbortIfNoCheckJNI(msg);
     return false;
   }
@@ -68,7 +68,9 @@
 }
 
 // Make sure that the entry at "idx" is correctly paired with "iref".
-inline bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int idx) const {
+inline bool IndirectReferenceTable::CheckEntry(const char* what,
+                                               IndirectRef iref,
+                                               uint32_t idx) const {
   IndirectRef checkRef = ToIndirectRef(idx);
   if (UNLIKELY(checkRef != iref)) {
     std::string msg = StringPrintf(
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index b48e711..bc0f73a 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -32,6 +32,7 @@
 namespace art {
 
 static constexpr bool kDumpStackOnNonLocalReference = false;
+static constexpr bool kDebugIRT = false;
 
 const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
   switch (kind) {
@@ -60,9 +61,13 @@
 
 IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
                                                IndirectRefKind desired_kind,
+                                               ResizableCapacity resizable,
                                                std::string* error_msg)
-    : kind_(desired_kind),
-      max_entries_(max_count) {
+    : segment_state_(kIRTFirstSegment),
+      kind_(desired_kind),
+      max_entries_(max_count),
+      current_num_holes_(0),
+      resizable_(resizable) {
   CHECK(error_msg != nullptr);
   CHECK_NE(desired_kind, kHandleScopeOrInvalid);
 
@@ -78,7 +83,7 @@
   } else {
     table_ = nullptr;
   }
-  segment_state_.all = IRT_FIRST_SEGMENT;
+  segment_state_ = kIRTFirstSegment;
 }
 
 IndirectReferenceTable::~IndirectReferenceTable() {
@@ -116,50 +121,171 @@
   return table_mem_map_.get() != nullptr;
 }
 
-IndirectRef IndirectReferenceTable::Add(uint32_t cookie, ObjPtr<mirror::Object> obj) {
-  IRTSegmentState prevState;
-  prevState.all = cookie;
-  size_t topIndex = segment_state_.parts.topIndex;
+// Holes:
+//
+// To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
+// operation sequences. For simplicity and lower memory overhead, we do not use a free list or
+// similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
+// are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
+// scans when there are no holes, the number of known holes should be tracked.
+//
+// A previous implementation stored the top index and the number of holes as the segment state.
+// This constraints the maximum number of references to 16-bit. We want to relax this, as it
+// is easy to require more references (e.g., to list all classes in large applications). Thus,
+// the implicitly stack-stored state, the IRTSegmentState, is only the top index.
+//
+// Thus, hole count is a local property of the current segment, and needs to be recovered when
+// (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
+// cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
+// hole count is correct.
+//
+// To be able to detect segment changes, we require an additional local field that can describe
+// the known segment. This is last_known_previous_state_. The requirement will become clear with
+// the following (some non-trivial) cases that have to be supported:
+//
+// 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
+// 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+// 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+//    reference
+// 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
+// 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+//    reference
+//
+// Storing the last known *previous* state (bottom index) allows conservatively detecting all the
+// segment changes above. The condition is simply that the last known state is greater than or
+// equal to the current previous state, and smaller than the current state (top index). The
+// condition is conservative as it adds O(1) overhead to operations on an empty segment.
+
+static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) {
+  size_t count = 0;
+  for (size_t index = from; index != to; ++index) {
+    if (table[index].GetReference()->IsNull()) {
+      count++;
+    }
+  }
+  return count;
+}
+
+void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) {
+  if (last_known_previous_state_.top_index >= segment_state_.top_index ||
+      last_known_previous_state_.top_index < prev_state.top_index) {
+    const size_t top_index = segment_state_.top_index;
+    size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
+
+    if (kDebugIRT) {
+      LOG(INFO) << "+++ Recovered holes: "
+                << " Current prev=" << prev_state.top_index
+                << " Current top_index=" << top_index
+                << " Old num_holes=" << current_num_holes_
+                << " New num_holes=" << count;
+    }
+
+    current_num_holes_ = count;
+    last_known_previous_state_ = prev_state;
+  } else if (kDebugIRT) {
+    LOG(INFO) << "No need to recover holes";
+  }
+}
+
+ALWAYS_INLINE
+static inline void CheckHoleCount(IrtEntry* table,
+                                  size_t exp_num_holes,
+                                  IRTSegmentState prev_state,
+                                  IRTSegmentState cur_state) {
+  if (kIsDebugBuild) {
+    size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
+    CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
+                                   << " topIndex=" << cur_state.top_index;
+  }
+}
+
+bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
+  CHECK_GT(new_size, max_entries_);
+
+  const size_t table_bytes = new_size * sizeof(IrtEntry);
+  std::unique_ptr<MemMap> new_map(MemMap::MapAnonymous("indirect ref table",
+                                                       nullptr,
+                                                       table_bytes,
+                                                       PROT_READ | PROT_WRITE,
+                                                       false,
+                                                       false,
+                                                       error_msg));
+  if (new_map == nullptr) {
+    return false;
+  }
+
+  memcpy(new_map->Begin(), table_mem_map_->Begin(), table_mem_map_->Size());
+  table_mem_map_ = std::move(new_map);
+  table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+  max_entries_ = new_size;
+
+  return true;
+}
+
+IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state,
+                                        ObjPtr<mirror::Object> obj) {
+  if (kDebugIRT) {
+    LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
+              << " top_index=" << segment_state_.top_index
+              << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+              << " holes=" << current_num_holes_;
+  }
+
+  size_t top_index = segment_state_.top_index;
 
   CHECK(obj != nullptr);
   VerifyObject(obj);
   DCHECK(table_ != nullptr);
-  DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
 
-  if (topIndex == max_entries_) {
-    LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
-               << "(max=" << max_entries_ << ")\n"
-               << MutatorLockedDumpable<IndirectReferenceTable>(*this);
+  if (top_index == max_entries_) {
+    if (resizable_ == ResizableCapacity::kNo) {
+      LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
+                 << "(max=" << max_entries_ << ")\n"
+                 << MutatorLockedDumpable<IndirectReferenceTable>(*this);
+      UNREACHABLE();
+    }
+
+    // Try to double space.
+    std::string error_msg;
+    if (!Resize(max_entries_ * 2, &error_msg)) {
+      LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
+                 << "(max=" << max_entries_ << ")" << std::endl
+                 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
+                 << " Resizing failed: " << error_msg;
+      UNREACHABLE();
+    }
   }
 
+  RecoverHoles(previous_state);
+  CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
   // We know there's enough room in the table.  Now we just need to find
   // the right spot.  If there's a hole, find it and fill it; otherwise,
   // add to the end of the list.
   IndirectRef result;
-  int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
   size_t index;
-  if (numHoles > 0) {
-    DCHECK_GT(topIndex, 1U);
+  if (current_num_holes_ > 0) {
+    DCHECK_GT(top_index, 1U);
     // Find the first hole; likely to be near the end of the list.
-    IrtEntry* pScan = &table_[topIndex - 1];
-    DCHECK(!pScan->GetReference()->IsNull());
-    --pScan;
-    while (!pScan->GetReference()->IsNull()) {
-      DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
-      --pScan;
+    IrtEntry* p_scan = &table_[top_index - 1];
+    DCHECK(!p_scan->GetReference()->IsNull());
+    --p_scan;
+    while (!p_scan->GetReference()->IsNull()) {
+      DCHECK_GE(p_scan, table_ + previous_state.top_index);
+      --p_scan;
     }
-    index = pScan - table_;
-    segment_state_.parts.numHoles--;
+    index = p_scan - table_;
+    current_num_holes_--;
   } else {
     // Add to the end.
-    index = topIndex++;
-    segment_state_.parts.topIndex = topIndex;
+    index = top_index++;
+    segment_state_.top_index = top_index;
   }
   table_[index].Add(obj);
   result = ToIndirectRef(index);
-  if ((false)) {
-    LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.parts.topIndex
-              << " holes=" << segment_state_.parts.numHoles;
+  if (kDebugIRT) {
+    LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
+              << " holes=" << current_num_holes_;
   }
 
   DCHECK(result != nullptr);
@@ -184,14 +310,18 @@
 // This method is not called when a local frame is popped; this is only used
 // for explicit single removals.
 // Returns "false" if nothing was removed.
-bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
-  IRTSegmentState prevState;
-  prevState.all = cookie;
-  int topIndex = segment_state_.parts.topIndex;
-  int bottomIndex = prevState.parts.topIndex;
+bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) {
+  if (kDebugIRT) {
+    LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
+              << " top_index=" << segment_state_.top_index
+              << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+              << " holes=" << current_num_holes_;
+  }
+
+  const uint32_t top_index = segment_state_.top_index;
+  const uint32_t bottom_index = previous_state.top_index;
 
   DCHECK(table_ != nullptr);
-  DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
 
   if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) {
     auto* self = Thread::Current();
@@ -208,21 +338,24 @@
       return true;
     }
   }
-  const int idx = ExtractIndex(iref);
-  if (idx < bottomIndex) {
+  const uint32_t idx = ExtractIndex(iref);
+  if (idx < bottom_index) {
     // Wrong segment.
     LOG(WARNING) << "Attempt to remove index outside index area (" << idx
-                 << " vs " << bottomIndex << "-" << topIndex << ")";
+                 << " vs " << bottom_index << "-" << top_index << ")";
     return false;
   }
-  if (idx >= topIndex) {
+  if (idx >= top_index) {
     // Bad --- stale reference?
     LOG(WARNING) << "Attempt to remove invalid index " << idx
-                 << " (bottom=" << bottomIndex << " top=" << topIndex << ")";
+                 << " (bottom=" << bottom_index << " top=" << top_index << ")";
     return false;
   }
 
-  if (idx == topIndex - 1) {
+  RecoverHoles(previous_state);
+  CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
+  if (idx == top_index - 1) {
     // Top-most entry.  Scan up and consume holes.
 
     if (!CheckEntry("remove", iref, idx)) {
@@ -230,28 +363,30 @@
     }
 
     *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
-    int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
-    if (numHoles != 0) {
-      while (--topIndex > bottomIndex && numHoles != 0) {
-        if ((false)) {
-          LOG(INFO) << "+++ checking for hole at " << topIndex - 1
-                    << " (cookie=" << cookie << ") val="
-                    << table_[topIndex - 1].GetReference()->Read<kWithoutReadBarrier>();
+    if (current_num_holes_ != 0) {
+      uint32_t collapse_top_index = top_index;
+      while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
+        if (kDebugIRT) {
+          ScopedObjectAccess soa(Thread::Current());
+          LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
+                    << " (previous_state=" << bottom_index << ") val="
+                    << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
         }
-        if (!table_[topIndex - 1].GetReference()->IsNull()) {
+        if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
           break;
         }
-        if ((false)) {
-          LOG(INFO) << "+++ ate hole at " << (topIndex - 1);
+        if (kDebugIRT) {
+          LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1);
         }
-        numHoles--;
+        current_num_holes_--;
       }
-      segment_state_.parts.numHoles = numHoles + prevState.parts.numHoles;
-      segment_state_.parts.topIndex = topIndex;
+      segment_state_.top_index = collapse_top_index;
+
+      CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
     } else {
-      segment_state_.parts.topIndex = topIndex-1;
-      if ((false)) {
-        LOG(INFO) << "+++ ate last entry " << topIndex - 1;
+      segment_state_.top_index = top_index - 1;
+      if (kDebugIRT) {
+        LOG(INFO) << "+++ ate last entry " << top_index - 1;
       }
     }
   } else {
@@ -266,9 +401,10 @@
     }
 
     *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
-    segment_state_.parts.numHoles++;
-    if ((false)) {
-      LOG(INFO) << "+++ left hole at " << idx << ", holes=" << segment_state_.parts.numHoles;
+    current_num_holes_++;
+    CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+    if (kDebugIRT) {
+      LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
     }
   }
 
@@ -306,4 +442,14 @@
   ReferenceTable::Dump(os, entries);
 }
 
+void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) {
+  if (kDebugIRT) {
+    LOG(INFO) << "Setting segment state: "
+              << segment_state_.top_index
+              << " -> "
+              << new_state.top_index;
+  }
+  segment_state_ = new_state;
+}
+
 }  // namespace art
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index c0355de..7e452a2 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 
 #include <iosfwd>
+#include <limits>
 #include <string>
 
 #include "base/bit_utils.h"
@@ -41,79 +42,54 @@
 
 class MemMap;
 
-/*
- * Maintain a table of indirect references.  Used for local/global JNI
- * references.
- *
- * The table contains object references that are part of the GC root set.
- * When an object is added we return an IndirectRef that is not a valid
- * pointer but can be used to find the original value in O(1) time.
- * Conversions to and from indirect references are performed on upcalls
- * and downcalls, so they need to be very fast.
- *
- * To be efficient for JNI local variable storage, we need to provide
- * operations that allow us to operate on segments of the table, where
- * segments are pushed and popped as if on a stack.  For example, deletion
- * of an entry should only succeed if it appears in the current segment,
- * and we want to be able to strip off the current segment quickly when
- * a method returns.  Additions to the table must be made in the current
- * segment even if space is available in an earlier area.
- *
- * A new segment is created when we call into native code from interpreted
- * code, or when we handle the JNI PushLocalFrame function.
- *
- * The GC must be able to scan the entire table quickly.
- *
- * In summary, these must be very fast:
- *  - adding or removing a segment
- *  - adding references to a new segment
- *  - converting an indirect reference back to an Object
- * These can be a little slower, but must still be pretty quick:
- *  - adding references to a "mature" segment
- *  - removing individual references
- *  - scanning the entire table straight through
- *
- * If there's more than one segment, we don't guarantee that the table
- * will fill completely before we fail due to lack of space.  We do ensure
- * that the current segment will pack tightly, which should satisfy JNI
- * requirements (e.g. EnsureLocalCapacity).
- *
- * To make everything fit nicely in 32-bit integers, the maximum size of
- * the table is capped at 64K.
- *
- * Only SynchronizedGet is synchronized.
- */
+// Maintain a table of indirect references.  Used for local/global JNI references.
+//
+// The table contains object references, where the strong (local/global) references are part of the
+// GC root set (but not the weak global references). When an object is added we return an
+// IndirectRef that is not a valid pointer but can be used to find the original value in O(1) time.
+// Conversions to and from indirect references are performed on upcalls and downcalls, so they need
+// to be very fast.
+//
+// To be efficient for JNI local variable storage, we need to provide operations that allow us to
+// operate on segments of the table, where segments are pushed and popped as if on a stack. For
+// example, deletion of an entry should only succeed if it appears in the current segment, and we
+// want to be able to strip off the current segment quickly when a method returns. Additions to the
+// table must be made in the current segment even if space is available in an earlier area.
+//
+// A new segment is created when we call into native code from interpreted code, or when we handle
+// the JNI PushLocalFrame function.
+//
+// The GC must be able to scan the entire table quickly.
+//
+// In summary, these must be very fast:
+//  - adding or removing a segment
+//  - adding references to a new segment
+//  - converting an indirect reference back to an Object
+// These can be a little slower, but must still be pretty quick:
+//  - adding references to a "mature" segment
+//  - removing individual references
+//  - scanning the entire table straight through
+//
+// If there's more than one segment, we don't guarantee that the table will fill completely before
+// we fail due to lack of space. We do ensure that the current segment will pack tightly, which
+// should satisfy JNI requirements (e.g. EnsureLocalCapacity).
+//
+// Only SynchronizedGet is synchronized.
 
-/*
- * Indirect reference definition.  This must be interchangeable with JNI's
- * jobject, and it's convenient to let null be null, so we use void*.
- *
- * We need a 16-bit table index and a 2-bit reference type (global, local,
- * weak global).  Real object pointers will have zeroes in the low 2 or 3
- * bits (4- or 8-byte alignment), so it's useful to put the ref type
- * in the low bits and reserve zero as an invalid value.
- *
- * The remaining 14 bits can be used to detect stale indirect references.
- * For example, if objects don't move, we can use a hash of the original
- * Object* to make sure the entry hasn't been re-used.  (If the Object*
- * we find there doesn't match because of heap movement, we could do a
- * secondary check on the preserved hash value; this implies that creating
- * a global/local ref queries the hash value and forces it to be saved.)
- *
- * A more rigorous approach would be to put a serial number in the extra
- * bits, and keep a copy of the serial number in a parallel table.  This is
- * easier when objects can move, but requires 2x the memory and additional
- * memory accesses on add/get.  It will catch additional problems, e.g.:
- * create iref1 for obj, delete iref1, create iref2 for same obj, lookup
- * iref1.  A pattern based on object bits will miss this.
- */
+// Indirect reference definition.  This must be interchangeable with JNI's jobject, and it's
+// convenient to let null be null, so we use void*.
+//
+// We need a (potentially) large table index and a 2-bit reference type (global, local, weak
+// global). We also reserve some bits to be used to detect stale indirect references: we put a
+// serial number in the extra bits, and keep a copy of the serial number in the table. This requires
+// more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch
+// additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj,
+// lookup iref1. A pattern based on object bits will miss this.
 typedef void* IndirectRef;
 
-/*
- * Indirect reference kind, used as the two low bits of IndirectRef.
- *
- * For convenience these match up with enum jobjectRefType from jni.h.
- */
+// Indirect reference kind, used as the two low bits of IndirectRef.
+//
+// For convenience these match up with enum jobjectRefType from jni.h.
 enum IndirectRefKind {
   kHandleScopeOrInvalid = 0,           // <<stack indirect reference table or invalid reference>>
   kLocal                = 1,           // <<local reference>>
@@ -124,72 +100,55 @@
 std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs);
 const char* GetIndirectRefKindString(const IndirectRefKind& kind);
 
-/* use as initial value for "cookie", and when table has only one segment */
-static const uint32_t IRT_FIRST_SEGMENT = 0;
+// Table definition.
+//
+// For the global reference table, the expected common operations are adding a new entry and
+// removing a recently-added entry (usually the most-recently-added entry).  For JNI local
+// references, the common operations are adding a new entry and removing an entire table segment.
+//
+// If we delete entries from the middle of the list, we will be left with "holes".  We track the
+// number of holes so that, when adding new elements, we can quickly decide to do a trivial append
+// or go slot-hunting.
+//
+// When the top-most entry is removed, any holes immediately below it are also removed. Thus,
+// deletion of an entry may reduce "top_index" by more than one.
+//
+// To get the desired behavior for JNI locals, we need to know the bottom and top of the current
+// "segment". The top is managed internally, and the bottom is passed in as a function argument.
+// When we call a native method or push a local frame, the current top index gets pushed on, and
+// serves as the new bottom. When we pop a frame off, the value from the stack becomes the new top
+// index, and the value stored in the previous frame becomes the new bottom.
+//
+// Holes are being locally cached for the segment. Otherwise we'd have to pass bottom index and
+// number of holes, which restricts us to 16 bits for the top index. The value is cached within the
+// table. To avoid code in generated JNI transitions, which implicitly form segments, the code for
+// adding and removing references needs to detect the change of a segment. Helper fields are used
+// for this detection.
+//
+// Common alternative implementation: make IndirectRef a pointer to the actual reference slot.
+// Instead of getting a table and doing a lookup, the lookup can be done instantly. Operations like
+// determining the type and deleting the reference are more expensive because the table must be
+// hunted for (i.e. you have to do a pointer comparison to see which table it's in), you can't move
+// the table when expanding it (so realloc() is out), and tricks like serial number checking to
+// detect stale references aren't possible (though we may be able to get similar benefits with other
+// approaches).
+//
+// TODO: consider a "lastDeleteIndex" for quick hole-filling when an add immediately follows a
+// delete; must invalidate after segment pop might be worth only using it for JNI globals.
+//
+// TODO: may want completely different add/remove algorithms for global and local refs to improve
+// performance.  A large circular buffer might reduce the amortized cost of adding global
+// references.
 
-/*
- * Table definition.
- *
- * For the global reference table, the expected common operations are
- * adding a new entry and removing a recently-added entry (usually the
- * most-recently-added entry).  For JNI local references, the common
- * operations are adding a new entry and removing an entire table segment.
- *
- * If "alloc_entries_" is not equal to "max_entries_", the table may expand
- * when entries are added, which means the memory may move.  If you want
- * to keep pointers into "table" rather than offsets, you must use a
- * fixed-size table.
- *
- * If we delete entries from the middle of the list, we will be left with
- * "holes".  We track the number of holes so that, when adding new elements,
- * we can quickly decide to do a trivial append or go slot-hunting.
- *
- * When the top-most entry is removed, any holes immediately below it are
- * also removed.  Thus, deletion of an entry may reduce "topIndex" by more
- * than one.
- *
- * To get the desired behavior for JNI locals, we need to know the bottom
- * and top of the current "segment".  The top is managed internally, and
- * the bottom is passed in as a function argument.  When we call a native method or
- * push a local frame, the current top index gets pushed on, and serves
- * as the new bottom.  When we pop a frame off, the value from the stack
- * becomes the new top index, and the value stored in the previous frame
- * becomes the new bottom.
- *
- * To avoid having to re-scan the table after a pop, we want to push the
- * number of holes in the table onto the stack.  Because of our 64K-entry
- * cap, we can combine the two into a single unsigned 32-bit value.
- * Instead of a "bottom" argument we take a "cookie", which includes the
- * bottom index and the count of holes below the bottom.
- *
- * Common alternative implementation: make IndirectRef a pointer to the
- * actual reference slot.  Instead of getting a table and doing a lookup,
- * the lookup can be done instantly.  Operations like determining the
- * type and deleting the reference are more expensive because the table
- * must be hunted for (i.e. you have to do a pointer comparison to see
- * which table it's in), you can't move the table when expanding it (so
- * realloc() is out), and tricks like serial number checking to detect
- * stale references aren't possible (though we may be able to get similar
- * benefits with other approaches).
- *
- * TODO: consider a "lastDeleteIndex" for quick hole-filling when an
- * add immediately follows a delete; must invalidate after segment pop
- * (which could increase the cost/complexity of method call/return).
- * Might be worth only using it for JNI globals.
- *
- * TODO: may want completely different add/remove algorithms for global
- * and local refs to improve performance.  A large circular buffer might
- * reduce the amortized cost of adding global references.
- *
- */
-union IRTSegmentState {
-  uint32_t          all;
-  struct {
-    uint32_t      topIndex:16;            /* index of first unused entry */
-    uint32_t      numHoles:16;            /* #of holes in entire table */
-  } parts;
+// The state of the current segment. We only store the index. Splitting it for index and hole
+// count restricts the range too much.
+struct IRTSegmentState {
+  uint32_t top_index;
 };
 
+// Use as initial value for "cookie", and when table has only one segment.
+static constexpr IRTSegmentState kIRTFirstSegment = { 0 };
+
 // Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2.
 // Contains multiple entries but only one active one, this helps us detect use after free errors
 // since the serial stored in the indirect ref wont match.
@@ -204,6 +163,11 @@
     return &references_[serial_];
   }
 
+  const GcRoot<mirror::Object>* GetReference() const {
+    DCHECK_LT(serial_, kIRTPrevCount);
+    return &references_[serial_];
+  }
+
   uint32_t GetSerial() const {
     return serial_;
   }
@@ -254,14 +218,20 @@
 
 class IndirectReferenceTable {
  public:
-  /*
-   * WARNING: Construction of the IndirectReferenceTable may fail.
-   * error_msg must not be null. If error_msg is set by the constructor, then
-   * construction has failed and the IndirectReferenceTable will be in an
-   * invalid state. Use IsValid to check whether the object is in an invalid
-   * state.
-   */
-  IndirectReferenceTable(size_t max_count, IndirectRefKind kind, std::string* error_msg);
+  enum class ResizableCapacity {
+    kNo,
+    kYes
+  };
+
+  // WARNING: Construction of the IndirectReferenceTable may fail.
+  // error_msg must not be null. If error_msg is set by the constructor, then
+  // construction has failed and the IndirectReferenceTable will be in an
+  // invalid state. Use IsValid to check whether the object is in an invalid
+  // state.
+  IndirectReferenceTable(size_t max_count,
+                         IndirectRefKind kind,
+                         ResizableCapacity resizable,
+                         std::string* error_msg);
 
   ~IndirectReferenceTable();
 
@@ -274,20 +244,14 @@
    */
   bool IsValid() const;
 
-  /*
-   * Add a new entry.  "obj" must be a valid non-nullptr object reference.
-   *
-   * Returns nullptr if the table is full (max entries reached, or alloc
-   * failed during expansion).
-   */
-  IndirectRef Add(uint32_t cookie, ObjPtr<mirror::Object> obj)
+  // Add a new entry. "obj" must be a valid non-null object reference. This function will
+  // abort if the table is full (max entries reached, or expansion failed).
+  IndirectRef Add(IRTSegmentState previous_state, ObjPtr<mirror::Object> obj)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  /*
-   * Given an IndirectRef in the table, return the Object it refers to.
-   *
-   * Returns kInvalidIndirectRefObject if iref is invalid.
-   */
+  // Given an IndirectRef in the table, return the Object it refers to.
+  //
+  // This function may abort under error conditions.
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_)
       ALWAYS_INLINE;
@@ -299,34 +263,26 @@
     return Get<kReadBarrierOption>(iref);
   }
 
-  /*
-   * Update an existing entry.
-   *
-   * Updates an existing indirect reference to point to a new object.
-   */
+  // Updates an existing indirect reference to point to a new object.
   void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  /*
-   * Remove an existing entry.
-   *
-   * If the entry is not between the current top index and the bottom index
-   * specified by the cookie, we don't remove anything.  This is the behavior
-   * required by JNI's DeleteLocalRef function.
-   *
-   * Returns "false" if nothing was removed.
-   */
-  bool Remove(uint32_t cookie, IndirectRef iref);
+  // Remove an existing entry.
+  //
+  // If the entry is not between the current top index and the bottom index
+  // specified by the cookie, we don't remove anything.  This is the behavior
+  // required by JNI's DeleteLocalRef function.
+  //
+  // Returns "false" if nothing was removed.
+  bool Remove(IRTSegmentState previous_state, IndirectRef iref);
 
   void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_);
 
   void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
 
-  /*
-   * Return the #of entries in the entire table.  This includes holes, and
-   * so may be larger than the actual number of "live" entries.
-   */
+  // Return the #of entries in the entire table.  This includes holes, and
+  // so may be larger than the actual number of "live" entries.
   size_t Capacity() const {
-    return segment_state_.parts.topIndex;
+    return segment_state_.top_index;
   }
 
   // Note IrtIterator does not have a read barrier as it's used to visit roots.
@@ -341,13 +297,11 @@
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  uint32_t GetSegmentState() const {
-    return segment_state_.all;
+  IRTSegmentState GetSegmentState() const {
+    return segment_state_;
   }
 
-  void SetSegmentState(uint32_t new_state) {
-    segment_state_.all = new_state;
-  }
+  void SetSegmentState(IRTSegmentState new_state);
 
   static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
     // Note: Currently segment_state_ is at offset 0. We're testing the expected value in
@@ -414,14 +368,19 @@
     return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial));
   }
 
+  // Resize the backing table. Currently must be larger than the current size.
+  bool Resize(size_t new_size, std::string* error_msg);
+
+  void RecoverHoles(IRTSegmentState from);
+
   // Abort if check_jni is not enabled. Otherwise, just log as an error.
   static void AbortIfNoCheckJNI(const std::string& msg);
 
   /* extra debugging checks */
   bool GetChecked(IndirectRef) const REQUIRES_SHARED(Locks::mutator_lock_);
-  bool CheckEntry(const char*, IndirectRef, int) const;
+  bool CheckEntry(const char*, IndirectRef, uint32_t) const;
 
-  /* semi-public - read/write by jni down calls */
+  /// semi-public - read/write by jni down calls.
   IRTSegmentState segment_state_;
 
   // Mem map where we store the indirect refs.
@@ -429,10 +388,21 @@
   // bottom of the stack. Do not directly access the object references
   // in this as they are roots. Use Get() that has a read barrier.
   IrtEntry* table_;
-  /* bit mask, ORed into all irefs */
+  // bit mask, ORed into all irefs.
   const IndirectRefKind kind_;
-  /* max #of entries allowed */
-  const size_t max_entries_;
+
+  // max #of entries allowed (modulo resizing).
+  size_t max_entries_;
+
+  // Some values to retain old behavior with holes. Description of the algorithm is in the .cc
+  // file.
+  // TODO: Consider other data structures for compact tables, e.g., free lists.
+  size_t current_num_holes_;
+  IRTSegmentState last_known_previous_state_;
+
+  // Whether the table's capacity may be resized. As there are no locks used, it is the caller's
+  // responsibility to ensure thread-safety.
+  ResizableCapacity resizable_;
 };
 
 }  // namespace art
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index d7026de..722b411 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -50,7 +50,10 @@
   ScopedObjectAccess soa(Thread::Current());
   static const size_t kTableMax = 20;
   std::string error_msg;
-  IndirectReferenceTable irt(kTableMax, kGlobal, &error_msg);
+  IndirectReferenceTable irt(kTableMax,
+                             kGlobal,
+                             IndirectReferenceTable::ResizableCapacity::kNo,
+                             &error_msg);
   ASSERT_TRUE(irt.IsValid()) << error_msg;
 
   mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
@@ -65,7 +68,7 @@
   Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
   ASSERT_TRUE(obj3.Get() != nullptr);
 
-  const uint32_t cookie = IRT_FIRST_SEGMENT;
+  const IRTSegmentState cookie = kIRTFirstSegment;
 
   CheckDump(&irt, 0, 0);
 
@@ -257,4 +260,250 @@
   CheckDump(&irt, 0, 0);
 }
 
+TEST_F(IndirectReferenceTableTest, Holes) {
+  // Test the explicitly named cases from the IRT implementation:
+  //
+  // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
+  // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+  // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+  //    reference
+  // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
+  // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+  //    reference
+
+  ScopedObjectAccess soa(Thread::Current());
+  static const size_t kTableMax = 10;
+
+  mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+  StackHandleScope<5> hs(soa.Self());
+  ASSERT_TRUE(c != nullptr);
+  Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj0.Get() != nullptr);
+  Handle<mirror::Object> obj1 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj1.Get() != nullptr);
+  Handle<mirror::Object> obj2 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj2.Get() != nullptr);
+  Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj3.Get() != nullptr);
+  Handle<mirror::Object> obj4 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj4.Get() != nullptr);
+
+  std::string error_msg;
+
+  // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference.
+  {
+    IndirectReferenceTable irt(kTableMax,
+                               kGlobal,
+                               IndirectReferenceTable::ResizableCapacity::kNo,
+                               &error_msg);
+    ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+    const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+    CheckDump(&irt, 0, 0);
+
+    IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+    IndirectRef iref1 = irt.Add(cookie0, obj1.Get());
+    IndirectRef iref2 = irt.Add(cookie0, obj2.Get());
+
+    EXPECT_TRUE(irt.Remove(cookie0, iref1));
+
+    // New segment.
+    const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+    IndirectRef iref3 = irt.Add(cookie1, obj3.Get());
+
+    // Must not have filled the previous hole.
+    EXPECT_EQ(irt.Capacity(), 4u);
+    EXPECT_TRUE(irt.Get(iref1) == nullptr);
+    CheckDump(&irt, 3, 3);
+
+    UNUSED(iref0, iref1, iref2, iref3);
+  }
+
+  // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+  {
+    IndirectReferenceTable irt(kTableMax,
+                               kGlobal,
+                               IndirectReferenceTable::ResizableCapacity::kNo,
+                               &error_msg);
+    ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+    const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+    CheckDump(&irt, 0, 0);
+
+    IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+    // New segment.
+    const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+    IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+    IndirectRef iref2 = irt.Add(cookie1, obj2.Get());
+    IndirectRef iref3 = irt.Add(cookie1, obj3.Get());
+
+    EXPECT_TRUE(irt.Remove(cookie1, iref2));
+
+    // Pop segment.
+    irt.SetSegmentState(cookie1);
+
+    IndirectRef iref4 = irt.Add(cookie1, obj4.Get());
+
+    EXPECT_EQ(irt.Capacity(), 2u);
+    EXPECT_TRUE(irt.Get(iref2) == nullptr);
+    CheckDump(&irt, 2, 2);
+
+    UNUSED(iref0, iref1, iref2, iref3, iref4);
+  }
+
+  // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+  //    reference.
+  {
+    IndirectReferenceTable irt(kTableMax,
+                               kGlobal,
+                               IndirectReferenceTable::ResizableCapacity::kNo,
+                               &error_msg);
+    ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+    const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+    CheckDump(&irt, 0, 0);
+
+    IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+    // New segment.
+    const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+    IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+    IndirectRef iref2 = irt.Add(cookie1, obj2.Get());
+
+    EXPECT_TRUE(irt.Remove(cookie1, iref1));
+
+    // New segment.
+    const IRTSegmentState cookie2 = irt.GetSegmentState();
+
+    IndirectRef iref3 = irt.Add(cookie2, obj3.Get());
+
+    // Pop segment.
+    irt.SetSegmentState(cookie2);
+
+    IndirectRef iref4 = irt.Add(cookie1, obj4.Get());
+
+    EXPECT_EQ(irt.Capacity(), 3u);
+    EXPECT_TRUE(irt.Get(iref1) == nullptr);
+    CheckDump(&irt, 3, 3);
+
+    UNUSED(iref0, iref1, iref2, iref3, iref4);
+  }
+
+  // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference.
+  {
+    IndirectReferenceTable irt(kTableMax,
+                               kGlobal,
+                               IndirectReferenceTable::ResizableCapacity::kNo,
+                               &error_msg);
+    ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+    const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+    CheckDump(&irt, 0, 0);
+
+    IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+    // New segment.
+    const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+    IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+    EXPECT_TRUE(irt.Remove(cookie1, iref1));
+
+    // Emptied segment, push new one.
+    const IRTSegmentState cookie2 = irt.GetSegmentState();
+
+    IndirectRef iref2 = irt.Add(cookie1, obj1.Get());
+    IndirectRef iref3 = irt.Add(cookie1, obj2.Get());
+    IndirectRef iref4 = irt.Add(cookie1, obj3.Get());
+
+    EXPECT_TRUE(irt.Remove(cookie1, iref3));
+
+    // Pop segment.
+    UNUSED(cookie2);
+    irt.SetSegmentState(cookie1);
+
+    IndirectRef iref5 = irt.Add(cookie1, obj4.Get());
+
+    EXPECT_EQ(irt.Capacity(), 2u);
+    EXPECT_TRUE(irt.Get(iref3) == nullptr);
+    CheckDump(&irt, 2, 2);
+
+    UNUSED(iref0, iref1, iref2, iref3, iref4, iref5);
+  }
+
+  // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+  //    reference
+  {
+    IndirectReferenceTable irt(kTableMax,
+                               kGlobal,
+                               IndirectReferenceTable::ResizableCapacity::kNo,
+                               &error_msg);
+    ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+    const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+    CheckDump(&irt, 0, 0);
+
+    IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+    // New segment.
+    const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+    IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+    IndirectRef iref2 = irt.Add(cookie1, obj1.Get());
+    IndirectRef iref3 = irt.Add(cookie1, obj2.Get());
+
+    EXPECT_TRUE(irt.Remove(cookie1, iref2));
+
+    // Pop segment.
+    irt.SetSegmentState(cookie1);
+
+    // Push segment.
+    const IRTSegmentState cookie1_second = irt.GetSegmentState();
+    UNUSED(cookie1_second);
+
+    IndirectRef iref4 = irt.Add(cookie1, obj3.Get());
+
+    EXPECT_EQ(irt.Capacity(), 2u);
+    EXPECT_TRUE(irt.Get(iref3) == nullptr);
+    CheckDump(&irt, 2, 2);
+
+    UNUSED(iref0, iref1, iref2, iref3, iref4);
+  }
+}
+
+TEST_F(IndirectReferenceTableTest, Resize) {
+  ScopedObjectAccess soa(Thread::Current());
+  static const size_t kTableMax = 512;
+
+  mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+  StackHandleScope<1> hs(soa.Self());
+  ASSERT_TRUE(c != nullptr);
+  Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+  ASSERT_TRUE(obj0.Get() != nullptr);
+
+  std::string error_msg;
+  IndirectReferenceTable irt(kTableMax,
+                             kLocal,
+                             IndirectReferenceTable::ResizableCapacity::kYes,
+                             &error_msg);
+  ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+  CheckDump(&irt, 0, 0);
+  const IRTSegmentState cookie = kIRTFirstSegment;
+
+  for (size_t i = 0; i != kTableMax + 1; ++i) {
+    irt.Add(cookie, obj0.Get());
+  }
+
+  EXPECT_EQ(irt.Capacity(), kTableMax + 1);
+}
+
 }  // namespace art
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index f1f9de8..a1ed470 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -422,10 +422,13 @@
       tracing_enabled_(runtime_options.Exists(RuntimeArgumentMap::JniTrace)
                        || VLOG_IS_ON(third_party_jni)),
       trace_(runtime_options.GetOrDefault(RuntimeArgumentMap::JniTrace)),
-      globals_(kGlobalsMax, kGlobal, error_msg),
+      globals_(kGlobalsMax, kGlobal, IndirectReferenceTable::ResizableCapacity::kNo, error_msg),
       libraries_(new Libraries),
       unchecked_functions_(&gJniInvokeInterface),
-      weak_globals_(kWeakGlobalsMax, kWeakGlobal, error_msg),
+      weak_globals_(kWeakGlobalsMax,
+                    kWeakGlobal,
+                    IndirectReferenceTable::ResizableCapacity::kNo,
+                    error_msg),
       allow_accessing_weak_globals_(true),
       weak_globals_add_condition_("weak globals add condition",
                                   (CHECK(Locks::jni_weak_globals_lock_ != nullptr),
@@ -551,7 +554,7 @@
     return nullptr;
   }
   WriterMutexLock mu(self, *Locks::jni_globals_lock_);
-  IndirectRef ref = globals_.Add(IRT_FIRST_SEGMENT, obj);
+  IndirectRef ref = globals_.Add(kIRTFirstSegment, obj);
   return reinterpret_cast<jobject>(ref);
 }
 
@@ -563,7 +566,7 @@
   while (UNLIKELY(!MayAccessWeakGlobals(self))) {
     weak_globals_add_condition_.WaitHoldingLocks(self);
   }
-  IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
+  IndirectRef ref = weak_globals_.Add(kIRTFirstSegment, obj);
   return reinterpret_cast<jweak>(ref);
 }
 
@@ -572,7 +575,7 @@
     return;
   }
   WriterMutexLock mu(self, *Locks::jni_globals_lock_);
-  if (!globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+  if (!globals_.Remove(kIRTFirstSegment, obj)) {
     LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
                  << "failed to find entry";
   }
@@ -583,7 +586,7 @@
     return;
   }
   MutexLock mu(self, *Locks::jni_weak_globals_lock_);
-  if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+  if (!weak_globals_.Remove(kIRTFirstSegment, obj)) {
     LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
                  << "failed to find entry";
   }
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 8eca8fc..342e0d2 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -68,8 +68,8 @@
 JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in, std::string* error_msg)
     : self(self_in),
       vm(vm_in),
-      local_ref_cookie(IRT_FIRST_SEGMENT),
-      locals(kLocalsInitial, kLocal, error_msg),
+      local_ref_cookie(kIRTFirstSegment),
+      locals(kLocalsInitial, kLocal, IndirectReferenceTable::ResizableCapacity::kYes, error_msg),
       check_jni(false),
       runtime_deleted(false),
       critical(0),
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index e89debb..5cca0ae 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -64,7 +64,7 @@
   JavaVMExt* const vm;
 
   // Cookie used when using the local indirect reference table.
-  uint32_t local_ref_cookie;
+  IRTSegmentState local_ref_cookie;
 
   // JNI local references.
   IndirectReferenceTable locals GUARDED_BY(Locks::mutator_lock_);
@@ -72,7 +72,7 @@
   // Stack of cookies corresponding to PushLocalFrame/PopLocalFrame calls.
   // TODO: to avoid leaks (and bugs), we need to clear this vector on entry (or return)
   // to a native method.
-  std::vector<uint32_t> stacked_local_ref_cookies;
+  std::vector<IRTSegmentState> stacked_local_ref_cookies;
 
   // Frequently-accessed fields cached from JavaVM.
   bool check_jni;
@@ -131,7 +131,7 @@
 
  private:
   JNIEnvExt* const env_;
-  uint32_t saved_local_ref_cookie_;
+  IRTSegmentState saved_local_ref_cookie_;
 
   DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
 };
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 9479a18..e990935 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -2308,21 +2308,25 @@
   // by modifying memory.
   // The parameters don't really matter here.
   std::string error_msg;
-  IndirectReferenceTable irt(5, IndirectRefKind::kGlobal, &error_msg);
+  IndirectReferenceTable irt(5,
+                             IndirectRefKind::kGlobal,
+                             IndirectReferenceTable::ResizableCapacity::kNo,
+                             &error_msg);
   ASSERT_TRUE(irt.IsValid()) << error_msg;
-  uint32_t old_state = irt.GetSegmentState();
+  IRTSegmentState old_state = irt.GetSegmentState();
 
   // Write some new state directly. We invert parts of old_state to ensure a new value.
-  uint32_t new_state = old_state ^ 0x07705005;
-  ASSERT_NE(old_state, new_state);
+  IRTSegmentState new_state;
+  new_state.top_index = old_state.top_index ^ 0x07705005;
+  ASSERT_NE(old_state.top_index, new_state.top_index);
 
   uint8_t* base = reinterpret_cast<uint8_t*>(&irt);
   int32_t segment_state_offset =
       IndirectReferenceTable::SegmentStateOffset(sizeof(void*)).Int32Value();
-  *reinterpret_cast<uint32_t*>(base + segment_state_offset) = new_state;
+  *reinterpret_cast<IRTSegmentState*>(base + segment_state_offset) = new_state;
 
   // Read and compare.
-  EXPECT_EQ(new_state, irt.GetSegmentState());
+  EXPECT_EQ(new_state.top_index, irt.GetSegmentState().top_index);
 }
 
 // Test the offset computation of JNIEnvExt offsets. b/26071368.