Add validate object, clean up stack indirect reference table.

Change-Id: Ifb329ae7d3bede3e95d48e6761cee1412a33d867
diff --git a/src/assembler_arm.cc b/src/assembler_arm.cc
index 2022ee9..60b8f38 100644
--- a/src/assembler_arm.cc
+++ b/src/assembler_arm.cc
@@ -1609,50 +1609,50 @@
   }
 }
 
-void Assembler::CreateStackHandle(ManagedRegister out_reg,
-                                  FrameOffset handle_offset,
-                                  ManagedRegister in_reg, bool null_allowed) {
+void Assembler::CreateSirtEntry(ManagedRegister out_reg,
+                                FrameOffset sirt_offset,
+                                ManagedRegister in_reg, bool null_allowed) {
   CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister());
   CHECK(out_reg.IsCoreRegister());
   if (null_allowed) {
-    // Null values get a handle value of 0.  Otherwise, the handle value is
-    // the address in the stack handle block holding the reference.
+    // Null values get a SIRT entry value of 0.  Otherwise, the SIRT entry is
+    // the address in the SIRT holding the reference.
     // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
     if (in_reg.IsNoRegister()) {
       LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
-                     SP, handle_offset.Int32Value());
+                     SP, sirt_offset.Int32Value());
       in_reg = out_reg;
     }
     cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
     if (!out_reg.Equals(in_reg)) {
       LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
     }
-    AddConstant(out_reg.AsCoreRegister(), SP, handle_offset.Int32Value(), NE);
+    AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
   } else {
-    AddConstant(out_reg.AsCoreRegister(), SP, handle_offset.Int32Value(), AL);
+    AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
   }
 }
 
-void Assembler::CreateStackHandle(FrameOffset out_off,
-                                  FrameOffset handle_offset,
-                                  ManagedRegister scratch, bool null_allowed) {
+void Assembler::CreateSirtEntry(FrameOffset out_off,
+                                FrameOffset sirt_offset,
+                                ManagedRegister scratch, bool null_allowed) {
   CHECK(scratch.IsCoreRegister());
   if (null_allowed) {
     LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
-                   handle_offset.Int32Value());
-    // Null values get a handle value of 0.  Otherwise, the handle value is
-    // the address in the stack handle block holding the reference.
-    // e.g. scratch = (handle == 0) ? 0 : (SP+handle_offset)
+                   sirt_offset.Int32Value());
+    // Null values get a SIRT entry value of 0.  Otherwise, the sirt entry is
+    // the address in the SIRT holding the reference.
+    // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
     cmp(scratch.AsCoreRegister(), ShifterOperand(0));
-    AddConstant(scratch.AsCoreRegister(), SP, handle_offset.Int32Value(), NE);
+    AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
   } else {
-    AddConstant(scratch.AsCoreRegister(), SP, handle_offset.Int32Value(), AL);
+    AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
   }
   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
 }
 
-void Assembler::LoadReferenceFromStackHandle(ManagedRegister out_reg,
-                                             ManagedRegister in_reg) {
+void Assembler::LoadReferenceFromSirt(ManagedRegister out_reg,
+                                      ManagedRegister in_reg) {
   CHECK(out_reg.IsCoreRegister());
   CHECK(in_reg.IsCoreRegister());
   Label null_arg;
@@ -1664,11 +1664,11 @@
                  in_reg.AsCoreRegister(), 0, NE);
 }
 
-void Assembler::ValidateRef(ManagedRegister src, bool could_be_null) {
+void Assembler::VerifyObject(ManagedRegister src, bool could_be_null) {
   // TODO: not validating references
 }
 
-void Assembler::ValidateRef(FrameOffset src, bool could_be_null) {
+void Assembler::VerifyObject(FrameOffset src, bool could_be_null) {
   // TODO: not validating references
 }
 
diff --git a/src/assembler_arm.h b/src/assembler_arm.h
index 2072845..c35934e 100644
--- a/src/assembler_arm.h
+++ b/src/assembler_arm.h
@@ -457,17 +457,17 @@
   void Move(ManagedRegister dest, ManagedRegister src);
   void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch,
             size_t size);
-  void CreateStackHandle(ManagedRegister out_reg, FrameOffset handle_offset,
-                         ManagedRegister in_reg, bool null_allowed);
+  void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+                       ManagedRegister in_reg, bool null_allowed);
 
-  void CreateStackHandle(FrameOffset out_off, FrameOffset handle_offset,
-                         ManagedRegister scratch, bool null_allowed);
+  void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+                       ManagedRegister scratch, bool null_allowed);
 
-  void LoadReferenceFromStackHandle(ManagedRegister dst, ManagedRegister src);
+  void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src);
 
-  void ValidateRef(ManagedRegister src, bool could_be_null);
+  void VerifyObject(ManagedRegister src, bool could_be_null);
 
-  void ValidateRef(FrameOffset src, bool could_be_null);
+  void VerifyObject(FrameOffset src, bool could_be_null);
 
   void Call(ManagedRegister base, Offset offset, ManagedRegister scratch);
   void Call(FrameOffset base, Offset offset, ManagedRegister scratch);
diff --git a/src/assembler_x86.cc b/src/assembler_x86.cc
index 83a4f36..dc04f87 100644
--- a/src/assembler_x86.cc
+++ b/src/assembler_x86.cc
@@ -1547,12 +1547,12 @@
   }
 }
 
-void Assembler::CreateStackHandle(ManagedRegister out_reg,
-                                  FrameOffset handle_offset,
-                                  ManagedRegister in_reg, bool null_allowed) {
+void Assembler::CreateSirtEntry(ManagedRegister out_reg,
+                                FrameOffset sirt_offset,
+                                ManagedRegister in_reg, bool null_allowed) {
   CHECK(in_reg.IsCpuRegister());
   CHECK(out_reg.IsCpuRegister());
-  ValidateRef(in_reg, null_allowed);
+  VerifyObject(in_reg, null_allowed);
   if (null_allowed) {
     Label null_arg;
     if (!out_reg.Equals(in_reg)) {
@@ -1560,33 +1560,33 @@
     }
     testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
     j(kZero, &null_arg);
-    leal(out_reg.AsCpuRegister(), Address(ESP, handle_offset));
+    leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
     Bind(&null_arg);
   } else {
-    leal(out_reg.AsCpuRegister(), Address(ESP, handle_offset));
+    leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
   }
 }
 
-void Assembler::CreateStackHandle(FrameOffset out_off,
-                                  FrameOffset handle_offset,
-                                  ManagedRegister scratch, bool null_allowed) {
+void Assembler::CreateSirtEntry(FrameOffset out_off,
+                                FrameOffset sirt_offset,
+                                ManagedRegister scratch, bool null_allowed) {
   CHECK(scratch.IsCpuRegister());
   if (null_allowed) {
     Label null_arg;
-    movl(scratch.AsCpuRegister(), Address(ESP, handle_offset));
+    movl(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
     testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
     j(kZero, &null_arg);
-    leal(scratch.AsCpuRegister(), Address(ESP, handle_offset));
+    leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
     Bind(&null_arg);
   } else {
-    leal(scratch.AsCpuRegister(), Address(ESP, handle_offset));
+    leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
   }
   Store(out_off, scratch, 4);
 }
 
-// Given a stack handle, load the associated reference.
-void Assembler::LoadReferenceFromStackHandle(ManagedRegister out_reg,
-                                             ManagedRegister in_reg) {
+// Given a SIRT entry, load the associated reference.
+void Assembler::LoadReferenceFromSirt(ManagedRegister out_reg,
+                                      ManagedRegister in_reg) {
   CHECK(out_reg.IsCpuRegister());
   CHECK(in_reg.IsCpuRegister());
   Label null_arg;
@@ -1599,11 +1599,11 @@
   Bind(&null_arg);
 }
 
-void Assembler::ValidateRef(ManagedRegister src, bool could_be_null) {
+void Assembler::VerifyObject(ManagedRegister src, bool could_be_null) {
   // TODO: not validating references
 }
 
-void Assembler::ValidateRef(FrameOffset src, bool could_be_null) {
+void Assembler::VerifyObject(FrameOffset src, bool could_be_null) {
   // TODO: not validating references
 }
 
diff --git a/src/assembler_x86.h b/src/assembler_x86.h
index 52a1202..d26d33f 100644
--- a/src/assembler_x86.h
+++ b/src/assembler_x86.h
@@ -469,16 +469,16 @@
   void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch,
             unsigned int size);
 
-  void CreateStackHandle(ManagedRegister out_reg, FrameOffset handle_offset,
-                         ManagedRegister in_reg, bool null_allowed);
+  void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+                       ManagedRegister in_reg, bool null_allowed);
 
-  void CreateStackHandle(FrameOffset out_off, FrameOffset handle_offset,
-                         ManagedRegister scratch, bool null_allowed);
+  void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+                       ManagedRegister scratch, bool null_allowed);
 
-  void LoadReferenceFromStackHandle(ManagedRegister dst, ManagedRegister src);
+  void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src);
 
-  void ValidateRef(ManagedRegister src, bool could_be_null);
-  void ValidateRef(FrameOffset src, bool could_be_null);
+  void VerifyObject(ManagedRegister src, bool could_be_null);
+  void VerifyObject(FrameOffset src, bool could_be_null);
 
   void Call(ManagedRegister base, Offset offset, ManagedRegister scratch);
   void Call(FrameOffset base, Offset offset, ManagedRegister scratch);
diff --git a/src/calling_convention.cc b/src/calling_convention.cc
index 32e3ee2..12dab2e 100644
--- a/src/calling_convention.cc
+++ b/src/calling_convention.cc
@@ -52,18 +52,18 @@
 // JNI calling convention
 
 size_t JniCallingConvention::OutArgSize() {
-  return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, 16);
+  return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, kStackAlignment);
 }
 
-size_t JniCallingConvention::HandleCount() {
+size_t JniCallingConvention::ReferenceCount() {
   const Method* method = GetMethod();
   return method->NumReferenceArgs() + (method->IsStatic() ? 1 : 0);
 }
 
 FrameOffset JniCallingConvention::ReturnValueSaveLocation() {
-  size_t start_of_shb = ShbLinkOffset().Int32Value() +  kPointerSize;
-  size_t handle_size = kPointerSize * HandleCount();  // size excluding header
-  return FrameOffset(start_of_shb + handle_size);
+  size_t start_of_sirt = SirtLinkOffset().Int32Value() +  kPointerSize;
+  size_t references_size = kPointerSize * ReferenceCount();  // size excluding header
+  return FrameOffset(start_of_sirt + references_size);
 }
 
 bool JniCallingConvention::HasNext() {
@@ -101,12 +101,13 @@
   }
 }
 
-// Return position of handle holding reference at the current iterator position
-FrameOffset JniCallingConvention::CurrentParamHandleOffset() {
+// Return position of SIRT entry holding reference at the current iterator
+// position
+FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
   CHECK(IsCurrentParamAReference());
-  CHECK_GT(ShbLinkOffset(), ShbNumRefsOffset());
-  // Address of 1st handle
-  int result = ShbLinkOffset().Int32Value() + kPointerSize;
+  CHECK_GT(SirtLinkOffset(), SirtNumRefsOffset());
+  // Address of 1st SIRT entry
+  int result = SirtLinkOffset().Int32Value() + kPointerSize;
   if (itr_args_ != kObjectOrClass) {
     const Method *method = GetMethod();
     int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(method);
@@ -116,7 +117,7 @@
     }
     result += previous_refs * kPointerSize;
   }
-  CHECK_GT(result, ShbLinkOffset().Int32Value());
+  CHECK_GT(result, SirtLinkOffset().Int32Value());
   return FrameOffset(result);
 }
 
diff --git a/src/calling_convention.h b/src/calling_convention.h
index 0aeb260..7e59fd5 100644
--- a/src/calling_convention.h
+++ b/src/calling_convention.h
@@ -87,8 +87,8 @@
 // | incoming stack args    | <-- Prior SP
 // | { Return address }     |     (x86)
 // | { Return value spill } |     (live on return slow paths)
-// | { Stack Handle Block   |
-// |   ...                  |
+// | { Stack Indirect Ref.  |
+// |   Table...             |
 // |   num. refs./link }    |     (here to prior SP is frame size)
 // | { Spill area }         |     (ARM)
 // | Method*                | <-- Anchor SP written to thread
@@ -109,8 +109,8 @@
   size_t ReturnPcOffset();
   // Size of outgoing arguments, including alignment
   size_t OutArgSize();
-  // Number of handles in stack handle block
-  size_t HandleCount();
+  // Number of references in stack indirect reference table
+  size_t ReferenceCount();
   // Size of area used to hold spilled registers
   size_t SpillAreaSize();
   // Location where the return value of a call can be squirreled if another
@@ -138,21 +138,21 @@
   FrameOffset CurrentParamStackOffset();
 
   // Iterator interface extension for JNI
-  FrameOffset CurrentParamHandleOffset();
+  FrameOffset CurrentParamSirtEntryOffset();
 
-  // Position of stack handle block and interior fields
-  FrameOffset ShbOffset() {
+  // Position of SIRT and interior fields
+  FrameOffset SirtOffset() {
     return FrameOffset(displacement_.Int32Value() +
                        SpillAreaSize() +
                        kPointerSize);  // above Method*
   }
-  FrameOffset ShbNumRefsOffset() {
-    return FrameOffset(ShbOffset().Int32Value() +
-                       StackHandleBlock::NumberOfReferencesOffset());
+  FrameOffset SirtNumRefsOffset() {
+    return FrameOffset(SirtOffset().Int32Value() +
+                       StackIndirectReferenceTable::NumberOfReferencesOffset());
   }
-  FrameOffset ShbLinkOffset() {
-    return FrameOffset(ShbOffset().Int32Value() +
-                       StackHandleBlock::LinkOffset());
+  FrameOffset SirtLinkOffset() {
+    return FrameOffset(SirtOffset().Int32Value() +
+                       StackIndirectReferenceTable::LinkOffset());
   }
 
  private:
@@ -162,7 +162,7 @@
     kObjectOrClass = 1
   };
 
-  // Number of stack slots for outgoing arguments, above which handles are
+  // Number of stack slots for outgoing arguments, above which the SIRT is
   // located
   size_t NumberOfOutgoingStackArgs();
 
diff --git a/src/calling_convention_arm.cc b/src/calling_convention_arm.cc
index 143d83b..826f40d 100644
--- a/src/calling_convention_arm.cc
+++ b/src/calling_convention_arm.cc
@@ -72,10 +72,11 @@
 size_t JniCallingConvention::FrameSize() {
   // Method* and spill area size
   size_t frame_data_size = kPointerSize + SpillAreaSize();
-  // Handles plus 2 words for SHB header
-  size_t handle_area_size = (HandleCount() + 2) * kPointerSize;
+  // References plus 2 words for SIRT header
+  size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
   // Plus return value spill area size
-  return RoundUp(frame_data_size + handle_area_size + SizeOfReturnValue(), 16);
+  return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(),
+                 kStackAlignment);
 }
 
 size_t JniCallingConvention::ReturnPcOffset() {
diff --git a/src/calling_convention_x86.cc b/src/calling_convention_x86.cc
index 1d2c4f8..148eee2 100644
--- a/src/calling_convention_x86.cc
+++ b/src/calling_convention_x86.cc
@@ -53,10 +53,11 @@
 size_t JniCallingConvention::FrameSize() {
   // Return address and Method*
   size_t frame_data_size = 2 * kPointerSize;
-  // Handles plus 2 words for SHB header
-  size_t handle_area_size = (HandleCount() + 2) * kPointerSize;
+  // References plus 2 words for SIRT header
+  size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
   // Plus return value spill area size
-  return RoundUp(frame_data_size + handle_area_size + SizeOfReturnValue(), 16);
+  return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(),
+                 kStackAlignment);
 }
 
 size_t JniCallingConvention::ReturnPcOffset() {
diff --git a/src/compiler/Dataflow.h b/src/compiler/Dataflow.h
index 909913f..235ad31 100644
--- a/src/compiler/Dataflow.h
+++ b/src/compiler/Dataflow.h
@@ -111,7 +111,7 @@
     int basicSSAReg;
     int m;      // multiplier
     int c;      // constant
-    int inc;    // loop incriment
+    int inc;    // loop increment
 } InductionVariableInfo;
 
 typedef struct ArrayAccessInfo {
diff --git a/src/compiler/codegen/arm/Assemble.cc b/src/compiler/codegen/arm/Assemble.cc
index 9981d64..9a5bd40 100644
--- a/src/compiler/codegen/arm/Assemble.cc
+++ b/src/compiler/codegen/arm/Assemble.cc
@@ -399,7 +399,8 @@
                  IS_UNARY_OP | REG_DEF_SP | REG_USE_SP,
                  "sub", "sp, #!0d*4", 1),
     ENCODING_MAP(kThumbSwi,           0xdf00,
-                 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,                       kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
+                 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
                  "swi", "!0d", 1),
     ENCODING_MAP(kThumbTst,           0x4200,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
diff --git a/src/globals.h b/src/globals.h
index 777cfd6..2fb0225 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -35,6 +35,9 @@
 // Required stack alignment
 const int kStackAlignment = 16;
 
+// Required object alignment
+const int kObjectAlignment = 8;
+
 // System page size.  Normally you're expected to get this from
 // sysconf(_SC_PAGESIZE) or some system-specific define (usually
 // PAGESIZE or PAGE_SIZE).  If we use a simple compile-time constant
diff --git a/src/heap.cc b/src/heap.cc
index f86f556..026fbe0 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -112,6 +112,18 @@
   return obj;
 }
 
+void Heap::VerifyObject(Object *obj) {
+  if (!IsAligned(obj, kObjectAlignment)) {
+    LOG(FATAL) << "Object isn't aligned: " << obj;
+  } else if (!live_bitmap_->Test(obj)) {
+    // TODO: we don't hold a lock here as it is assumed the live bit map
+    // isn't changing if the mutator is running.
+    LOG(FATAL) << "Object is dead: " << obj;
+  } else if(obj->GetClass() == NULL) {
+    LOG(FATAL) << "Object has no class: " << obj;
+  }
+}
+
 void Heap::RecordAllocation(Space* space, const Object* obj) {
   size_t size = space->AllocationSize(obj);
   DCHECK_NE(size, 0u);
diff --git a/src/heap.h b/src/heap.h
index 4d45366..1e56778 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -33,6 +33,9 @@
   // Allocates and initializes storage for an object instance.
   static Object* AllocObject(Class* klass, size_t num_bytes);
 
+  // Check sanity of given reference
+  static void VerifyObject(Object *obj);
+
   // Initiates an explicit garbage collection.
   static void CollectGarbage();
 
diff --git a/src/indirect_reference_table.cc b/src/indirect_reference_table.cc
index ec52cc6..2ea2434 100644
--- a/src/indirect_reference_table.cc
+++ b/src/indirect_reference_table.cc
@@ -36,7 +36,7 @@
 {
   CHECK_GT(initialCount, 0U);
   CHECK_LE(initialCount, maxCount);
-  CHECK_NE(desiredKind, kInvalid);
+  CHECK_NE(desiredKind, kSirtOrInvalid);
 
   table_ = reinterpret_cast<Object**>(malloc(initialCount * sizeof(Object*)));
   CHECK(table_ != NULL);
@@ -162,7 +162,7 @@
     LOG(WARNING) << "Attempt to look up NULL " << kind_;
     return false;
   }
-  if (GetIndirectRefKind(iref) == kInvalid) {
+  if (GetIndirectRefKind(iref) == kSirtOrInvalid) {
     LOG(ERROR) << "JNI ERROR (app bug): invalid " << kind_ << " " << iref;
     AbortMaybe();
     return false;
@@ -229,7 +229,7 @@
   int idx = ExtractIndex(iref);
   bool workAroundAppJniBugs = false;
 
-  if (GetIndirectRefKind(iref) == kInvalid /*&& gDvmJni.workAroundAppJniBugs*/) { // TODO
+  if (GetIndirectRefKind(iref) == kSirtOrInvalid /*&& gDvmJni.workAroundAppJniBugs*/) { // TODO
     idx = LinearScan(iref, bottomIndex, topIndex, table_);
     workAroundAppJniBugs = true;
     if (idx == -1) {
@@ -297,8 +297,8 @@
 
 std::ostream& operator<<(std::ostream& os, IndirectRefKind rhs) {
   switch (rhs) {
-  case kInvalid:
-    os << "invalid reference";
+  case kSirtOrInvalid:
+    os << "stack indirect reference table or invalid reference";
     break;
   case kLocal:
     os << "local reference";
diff --git a/src/indirect_reference_table.h b/src/indirect_reference_table.h
index 857b9cd..358162c 100644
--- a/src/indirect_reference_table.h
+++ b/src/indirect_reference_table.h
@@ -105,10 +105,10 @@
  * For convenience these match up with enum jobjectRefType from jni.h.
  */
 enum IndirectRefKind {
-    kInvalid    = 0,
-    kLocal      = 1,
-    kGlobal     = 2,
-    kWeakGlobal = 3
+    kSirtOrInvalid = 0,
+    kLocal         = 1,
+    kGlobal        = 2,
+    kWeakGlobal    = 3
 };
 std::ostream& operator<<(std::ostream& os, IndirectRefKind rhs);
 
diff --git a/src/jni_compiler.cc b/src/jni_compiler.cc
index eccd8f9..9dceed4 100644
--- a/src/jni_compiler.cc
+++ b/src/jni_compiler.cc
@@ -38,55 +38,55 @@
   // TODO: implement computing the difference of the callee saves
   // and saving
 
-  // 3. Set up the StackHandleBlock
+  // 3. Set up the StackIndirectReferenceTable
   mr_conv.ResetIterator(FrameOffset(frame_size));
   jni_conv.ResetIterator(FrameOffset(0));
-  jni_asm->StoreImmediateToFrame(jni_conv.ShbNumRefsOffset(),
-                                 jni_conv.HandleCount(),
+  jni_asm->StoreImmediateToFrame(jni_conv.SirtNumRefsOffset(),
+                                 jni_conv.ReferenceCount(),
                                  mr_conv.InterproceduralScratchRegister());
-  jni_asm->CopyRawPtrFromThread(jni_conv.ShbLinkOffset(),
-                                Thread::TopShbOffset(),
+  jni_asm->CopyRawPtrFromThread(jni_conv.SirtLinkOffset(),
+                                Thread::TopSirtOffset(),
                                 mr_conv.InterproceduralScratchRegister());
-  jni_asm->StoreStackOffsetToThread(Thread::TopShbOffset(),
-                                    jni_conv.ShbOffset(),
+  jni_asm->StoreStackOffsetToThread(Thread::TopSirtOffset(),
+                                    jni_conv.SirtOffset(),
                                     mr_conv.InterproceduralScratchRegister());
 
-  // 4. Place incoming reference arguments into handle block
+  // 4. Place incoming reference arguments into SIRT
   jni_conv.Next();  // Skip JNIEnv*
   // 4.5. Create Class argument for static methods out of passed method
   if (is_static) {
-    FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset();
-    // Check handle offset is within frame
-    CHECK_LT(handle_offset.Uint32Value(), frame_size);
+    FrameOffset sirt_offset = jni_conv.CurrentParamSirtEntryOffset();
+    // Check sirt offset is within frame
+    CHECK_LT(sirt_offset.Uint32Value(), frame_size);
     jni_asm->LoadRef(jni_conv.InterproceduralScratchRegister(),
                      mr_conv.MethodRegister(), Method::DeclaringClassOffset());
-    jni_asm->ValidateRef(jni_conv.InterproceduralScratchRegister(), false);
-    jni_asm->StoreRef(handle_offset, jni_conv.InterproceduralScratchRegister());
-    jni_conv.Next();  // handlerized so move to next argument
+    jni_asm->VerifyObject(jni_conv.InterproceduralScratchRegister(), false);
+    jni_asm->StoreRef(sirt_offset, jni_conv.InterproceduralScratchRegister());
+    jni_conv.Next();  // in SIRT so move to next argument
   }
   while (mr_conv.HasNext()) {
     CHECK(jni_conv.HasNext());
     bool ref_param = jni_conv.IsCurrentParamAReference();
     CHECK(!ref_param || mr_conv.IsCurrentParamAReference());
-    // References need handlerization and the handle address passing
+    // References need placing in SIRT and the entry value passing
     if (ref_param) {
-      // Compute handle offset, note null is handlerized but its boxed value
+      // Compute SIRT entry, note null is placed in the SIRT but its boxed value
       // must be NULL
-      FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset();
-      // Check handle offset is within frame
-      CHECK_LT(handle_offset.Uint32Value(), frame_size);
+      FrameOffset sirt_offset = jni_conv.CurrentParamSirtEntryOffset();
+      // Check SIRT offset is within frame
+      CHECK_LT(sirt_offset.Uint32Value(), frame_size);
       bool input_in_reg = mr_conv.IsCurrentParamInRegister();
       bool input_on_stack = mr_conv.IsCurrentParamOnStack();
       CHECK(input_in_reg || input_on_stack);
 
       if (input_in_reg) {
         ManagedRegister in_reg  =  mr_conv.CurrentParamRegister();
-        jni_asm->ValidateRef(in_reg, mr_conv.IsCurrentUserArg());
-        jni_asm->StoreRef(handle_offset, in_reg);
+        jni_asm->VerifyObject(in_reg, mr_conv.IsCurrentUserArg());
+        jni_asm->StoreRef(sirt_offset, in_reg);
       } else if (input_on_stack) {
         FrameOffset in_off  = mr_conv.CurrentParamStackOffset();
-        jni_asm->ValidateRef(in_off, mr_conv.IsCurrentUserArg());
-        jni_asm->CopyRef(handle_offset, in_off,
+        jni_asm->VerifyObject(in_off, mr_conv.IsCurrentUserArg());
+        jni_asm->CopyRef(sirt_offset, in_off,
                          mr_conv.InterproceduralScratchRegister());
       }
     }
@@ -111,18 +111,18 @@
     mr_conv.ResetIterator(FrameOffset(frame_size+out_arg_size));
     jni_conv.ResetIterator(FrameOffset(out_arg_size));
     jni_conv.Next();  // Skip JNIEnv*
-    // Get stack handle for 1st argument
+    // Get SIRT entry for 1st argument
     if (is_static) {
-      FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset();
+      FrameOffset sirt_offset = jni_conv.CurrentParamSirtEntryOffset();
       if (jni_conv.IsCurrentParamOnStack()) {
         FrameOffset out_off = jni_conv.CurrentParamStackOffset();
-        jni_asm->CreateStackHandle(out_off, handle_offset,
-                                   mr_conv.InterproceduralScratchRegister(),
-                                   false);
+        jni_asm->CreateSirtEntry(out_off, sirt_offset,
+                                 mr_conv.InterproceduralScratchRegister(),
+                                 false);
       } else {
         ManagedRegister out_reg = jni_conv.CurrentParamRegister();
-        jni_asm->CreateStackHandle(out_reg, handle_offset,
-                                   ManagedRegister::NoRegister(), false);
+        jni_asm->CreateSirtEntry(out_reg, sirt_offset,
+                                 ManagedRegister::NoRegister(), false);
       }
     } else {
       CopyParameter(jni_asm, &mr_conv, &jni_conv, frame_size, out_arg_size);
@@ -171,16 +171,16 @@
     mr_conv.ResetIterator(FrameOffset(frame_size+out_arg_size));
     jni_conv.ResetIterator(FrameOffset(out_arg_size));
     jni_conv.Next();  // Skip JNIEnv*
-    FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset();
+    FrameOffset sirt_offset = jni_conv.CurrentParamSirtEntryOffset();
     if (jni_conv.IsCurrentParamOnStack()) {
       FrameOffset out_off = jni_conv.CurrentParamStackOffset();
-      jni_asm->CreateStackHandle(out_off, handle_offset,
-                                 mr_conv.InterproceduralScratchRegister(),
-                                 false);
+      jni_asm->CreateSirtEntry(out_off, sirt_offset,
+                               mr_conv.InterproceduralScratchRegister(),
+                               false);
     } else {
       ManagedRegister out_reg = jni_conv.CurrentParamRegister();
-      jni_asm->CreateStackHandle(out_reg, handle_offset,
-                                 ManagedRegister::NoRegister(), false);
+      jni_asm->CreateSirtEntry(out_reg, sirt_offset,
+                               ManagedRegister::NoRegister(), false);
     }
   }
   // 9. Create 1st argument, the JNI environment ptr
@@ -214,18 +214,18 @@
     CHECK_LT(return_save_location.Uint32Value(), frame_size+out_arg_size);
     jni_asm->Store(return_save_location, jni_conv.ReturnRegister(),
                    jni_conv.SizeOfReturnValue());
-    // Get stack handle for 1st argument
+    // Get SIRT entry for 1st argument
     if (is_static) {
-      FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset();
+      FrameOffset sirt_offset = jni_conv.CurrentParamSirtEntryOffset();
       if (jni_conv.IsCurrentParamOnStack()) {
         FrameOffset out_off = jni_conv.CurrentParamStackOffset();
-        jni_asm->CreateStackHandle(out_off, handle_offset,
-                                   mr_conv.InterproceduralScratchRegister(),
-                                   false);
+        jni_asm->CreateSirtEntry(out_off, sirt_offset,
+                                 mr_conv.InterproceduralScratchRegister(),
+                                 false);
       } else {
         ManagedRegister out_reg = jni_conv.CurrentParamRegister();
-        jni_asm->CreateStackHandle(out_reg, handle_offset,
-                                   ManagedRegister::NoRegister(), false);
+        jni_asm->CreateSirtEntry(out_reg, sirt_offset,
+                                 ManagedRegister::NoRegister(), false);
       }
     } else {
       CopyParameter(jni_asm, &mr_conv, &jni_conv, frame_size, out_arg_size);
@@ -272,16 +272,18 @@
                                   jni_conv.InterproceduralScratchRegister());
 
 
-  // 15. Place result in correct register possibly dehandlerizing
+  // 15. Place result in correct register possibly loading from indirect
+  //     reference table
   if (jni_conv.IsReturnAReference()) {
-    jni_asm->LoadReferenceFromStackHandle(mr_conv.ReturnRegister(),
-                                          jni_conv.ReturnRegister());
+    // TODO: load from local/global reference tables
+    jni_asm->LoadReferenceFromSirt(mr_conv.ReturnRegister(),
+                                   jni_conv.ReturnRegister());
   } else {
     jni_asm->Move(mr_conv.ReturnRegister(), jni_conv.ReturnRegister());
   }
 
-  // 16. Remove stack handle block from thread
-  jni_asm->CopyRawPtrToThread(Thread::TopShbOffset(), jni_conv.ShbLinkOffset(),
+  // 16. Remove SIRT from thread
+  jni_asm->CopyRawPtrToThread(Thread::TopSirtOffset(), jni_conv.SirtLinkOffset(),
                               jni_conv.InterproceduralScratchRegister());
 
   // 17. Remove activation
@@ -305,37 +307,36 @@
                                 size_t frame_size, size_t out_arg_size) {
   bool input_in_reg = mr_conv->IsCurrentParamInRegister();
   bool output_in_reg = jni_conv->IsCurrentParamInRegister();
-  FrameOffset handle_offset(0);
+  FrameOffset sirt_offset(0);
   bool null_allowed = false;
   bool ref_param = jni_conv->IsCurrentParamAReference();
   CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
   CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack());
   CHECK(output_in_reg || jni_conv->IsCurrentParamOnStack());
-  // References need handlerization and the handle address passing
+  // References need placing in SIRT and the entry address passing
   if (ref_param) {
     null_allowed = mr_conv->IsCurrentUserArg();
-    // Compute handle offset. Note null is placed in the SHB but the jobject
-    // passed to the native code must be null (not a pointer into the SHB
+    // Compute SIRT offset. Note null is placed in the SIRT but the jobject
+    // passed to the native code must be null (not a pointer into the SIRT
     // as with regular references).
-    handle_offset = jni_conv->CurrentParamHandleOffset();
-    // Check handle offset is within frame.
-    CHECK_LT(handle_offset.Uint32Value(), (frame_size+out_arg_size));
+    sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+    // Check SIRT offset is within frame.
+    CHECK_LT(sirt_offset.Uint32Value(), (frame_size+out_arg_size));
   }
   if (input_in_reg && output_in_reg) {
     ManagedRegister in_reg = mr_conv->CurrentParamRegister();
     ManagedRegister out_reg = jni_conv->CurrentParamRegister();
     if (ref_param) {
-      jni_asm->CreateStackHandle(out_reg, handle_offset, in_reg,
-                                 null_allowed);
+      jni_asm->CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed);
     } else {
       jni_asm->Move(out_reg, in_reg);
     }
   } else if (!input_in_reg && !output_in_reg) {
     FrameOffset out_off = jni_conv->CurrentParamStackOffset();
     if (ref_param) {
-      jni_asm->CreateStackHandle(out_off, handle_offset,
-                                 mr_conv->InterproceduralScratchRegister(),
-                                 null_allowed);
+      jni_asm->CreateSirtEntry(out_off, sirt_offset,
+                               mr_conv->InterproceduralScratchRegister(),
+                               null_allowed);
     } else {
       FrameOffset in_off = mr_conv->CurrentParamStackOffset();
       size_t param_size = mr_conv->CurrentParamSize();
@@ -349,8 +350,8 @@
     // Check that incoming stack arguments are above the current stack frame.
     CHECK_GT(in_off.Uint32Value(), frame_size);
     if (ref_param) {
-      jni_asm->CreateStackHandle(out_reg, handle_offset,
-                                 ManagedRegister::NoRegister(), null_allowed);
+      jni_asm->CreateSirtEntry(out_reg, sirt_offset,
+                               ManagedRegister::NoRegister(), null_allowed);
     } else {
       unsigned int param_size = mr_conv->CurrentParamSize();
       CHECK_EQ(param_size, jni_conv->CurrentParamSize());
@@ -363,10 +364,10 @@
     // Check outgoing argument is within frame
     CHECK_LT(out_off.Uint32Value(), frame_size);
     if (ref_param) {
-      // TODO: recycle value in in_reg rather than reload from handle
-      jni_asm->CreateStackHandle(out_off, handle_offset,
-                                 mr_conv->InterproceduralScratchRegister(),
-                                 null_allowed);
+      // TODO: recycle value in in_reg rather than reload from SIRT
+      jni_asm->CreateSirtEntry(out_off, sirt_offset,
+                               mr_conv->InterproceduralScratchRegister(),
+                               null_allowed);
     } else {
       size_t param_size = mr_conv->CurrentParamSize();
       CHECK_EQ(param_size, jni_conv->CurrentParamSize());
diff --git a/src/jni_compiler_test.cc b/src/jni_compiler_test.cc
index 29e8942..4611b3c 100644
--- a/src/jni_compiler_test.cc
+++ b/src/jni_compiler_test.cc
@@ -78,7 +78,7 @@
 
 int gJava_MyClass_foo_calls = 0;
 void Java_MyClass_foo(JNIEnv* env, jobject thisObj) {
-  EXPECT_EQ(1u, Thread::Current()->NumShbHandles());
+  EXPECT_EQ(1u, Thread::Current()->NumSirtReferences());
   EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
   EXPECT_TRUE(thisObj != NULL);
@@ -98,7 +98,7 @@
 
 int gJava_MyClass_fooI_calls = 0;
 jint Java_MyClass_fooI(JNIEnv* env, jobject thisObj, jint x) {
-  EXPECT_EQ(1u, Thread::Current()->NumShbHandles());
+  EXPECT_EQ(1u, Thread::Current()->NumSirtReferences());
   EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
   EXPECT_TRUE(thisObj != NULL);
@@ -122,7 +122,7 @@
 
 int gJava_MyClass_fooII_calls = 0;
 jint Java_MyClass_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) {
-  EXPECT_EQ(1u, Thread::Current()->NumShbHandles());
+  EXPECT_EQ(1u, Thread::Current()->NumSirtReferences());
   EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
   EXPECT_TRUE(thisObj != NULL);
@@ -147,7 +147,7 @@
 
 int gJava_MyClass_fooDD_calls = 0;
 jdouble Java_MyClass_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdouble y) {
-  EXPECT_EQ(1u, Thread::Current()->NumShbHandles());
+  EXPECT_EQ(1u, Thread::Current()->NumSirtReferences());
   EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
   EXPECT_TRUE(thisObj != NULL);
@@ -175,7 +175,7 @@
 int gJava_MyClass_fooIOO_calls = 0;
 jobject Java_MyClass_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject y,
                             jobject z) {
-  EXPECT_EQ(3u, Thread::Current()->NumShbHandles());
+  EXPECT_EQ(3u, Thread::Current()->NumSirtReferences());
   EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
   EXPECT_TRUE(thisObj != NULL);
@@ -225,7 +225,7 @@
 int gJava_MyClass_fooSIOO_calls = 0;
 jobject Java_MyClass_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y,
                              jobject z) {
-  EXPECT_EQ(3u, Thread::Current()->NumShbHandles());
+  EXPECT_EQ(3u, Thread::Current()->NumSirtReferences());
   EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
   EXPECT_TRUE(klass != NULL);
@@ -276,7 +276,7 @@
 int gJava_MyClass_fooSSIOO_calls = 0;
 jobject Java_MyClass_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject y,
                              jobject z) {
-  EXPECT_EQ(3u, Thread::Current()->NumShbHandles());
+  EXPECT_EQ(3u, Thread::Current()->NumSirtReferences());
   EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
   EXPECT_TRUE(klass != NULL);
diff --git a/src/jni_internal.cc b/src/jni_internal.cc
index ca40e598..0458ceb 100644
--- a/src/jni_internal.cc
+++ b/src/jni_internal.cc
@@ -249,60 +249,7 @@
 
 template<typename T>
 T Decode(ScopedJniThreadState& ts, jobject obj) {
-  if (obj == NULL) {
-    return NULL;
-  }
-
-  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
-  IndirectRefKind kind = GetIndirectRefKind(ref);
-  Object* result;
-  switch (kind) {
-  case kLocal:
-    {
-      IndirectReferenceTable& locals = ts.Env()->locals;
-      result = locals.Get(ref);
-      break;
-    }
-  case kGlobal:
-    {
-      JavaVMExt* vm = Runtime::Current()->GetJavaVM();
-      IndirectReferenceTable& globals = vm->globals;
-      MutexLock mu(vm->globals_lock);
-      result = globals.Get(ref);
-      break;
-    }
-  case kWeakGlobal:
-    {
-      JavaVMExt* vm = Runtime::Current()->GetJavaVM();
-      IndirectReferenceTable& weak_globals = vm->weak_globals;
-      MutexLock mu(vm->weak_globals_lock);
-      result = weak_globals.Get(ref);
-      if (result == kClearedJniWeakGlobal) {
-        // This is a special case where it's okay to return NULL.
-        return NULL;
-      }
-      break;
-    }
-  case kInvalid:
-  default:
-    // TODO: make stack handle blocks more efficient
-    // Check if this is a local reference in a stack handle block
-    if (ts.Self()->ShbContains(obj)) {
-      return *reinterpret_cast<T*>(obj); // Read from stack handle block
-    }
-    if (false /*gDvmJni.workAroundAppJniBugs*/) { // TODO
-      // Assume an invalid local reference is actually a direct pointer.
-      return reinterpret_cast<T>(obj);
-    }
-    LOG(FATAL) << "Invalid indirect reference " << obj;
-    return reinterpret_cast<T>(kInvalidIndirectRefObject);
-  }
-
-  if (result == NULL) {
-    LOG(FATAL) << "JNI ERROR (app bug): use of deleted " << kind << ": "
-               << obj;
-  }
-  return reinterpret_cast<T>(result);
+  return reinterpret_cast<T>(ts.Self()->DecodeJObject(obj));
 }
 
 Field* DecodeField(ScopedJniThreadState& ts, jfieldID fid) {
diff --git a/src/thread.cc b/src/thread.cc
index f37dcba..3284625 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -9,6 +9,7 @@
 #include <list>
 
 #include "class_linker.h"
+#include "heap.h"
 #include "jni_internal.h"
 #include "object.h"
 #include "runtime.h"
@@ -204,27 +205,89 @@
   return true;
 }
 
-size_t Thread::NumShbHandles() {
+size_t Thread::NumSirtReferences() {
   size_t count = 0;
-  for (StackHandleBlock* cur = top_shb_; cur; cur = cur->Link()) {
+  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->Link()) {
     count += cur->NumberOfReferences();
   }
   return count;
 }
 
-bool Thread::ShbContains(jobject obj) {
-  Object** shb_entry = reinterpret_cast<Object**>(obj);
-  for (StackHandleBlock* cur = top_shb_; cur; cur = cur->Link()) {
+bool Thread::SirtContains(jobject obj) {
+  Object** sirt_entry = reinterpret_cast<Object**>(obj);
+  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->Link()) {
     size_t num_refs = cur->NumberOfReferences();
-    DCHECK_GT(num_refs, 0u); // A SHB should always have a jobject/jclass
-    if ((&cur->Handles()[0] >= shb_entry) &&
-        (shb_entry <= (&cur->Handles()[num_refs-1]))) {
+    // A SIRT should always have a jobject/jclass as a native method is passed
+    // in a this pointer or a class
+    DCHECK_GT(num_refs, 0u);
+    if ((&cur->References()[0] >= sirt_entry) &&
+        (sirt_entry <= (&cur->References()[num_refs-1]))) {
       return true;
     }
   }
   return false;
 }
 
+Object* Thread::DecodeJObject(jobject obj) {
+  // TODO: Only allowed to hold Object* when in the runnable state
+  // DCHECK(state_ == kRunnable);
+  if (obj == NULL) {
+    return NULL;
+  }
+  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
+  IndirectRefKind kind = GetIndirectRefKind(ref);
+  Object* result;
+  switch (kind) {
+  case kLocal:
+    {
+      JNIEnvExt* env = reinterpret_cast<JNIEnvExt*>(jni_env_);
+      IndirectReferenceTable& locals = env->locals;
+      result = locals.Get(ref);
+      break;
+    }
+  case kGlobal:
+    {
+      JavaVMExt* vm = Runtime::Current()->GetJavaVM();
+      IndirectReferenceTable& globals = vm->globals;
+      MutexLock mu(vm->globals_lock);
+      result = globals.Get(ref);
+      break;
+    }
+  case kWeakGlobal:
+    {
+      JavaVMExt* vm = Runtime::Current()->GetJavaVM();
+      IndirectReferenceTable& weak_globals = vm->weak_globals;
+      MutexLock mu(vm->weak_globals_lock);
+      result = weak_globals.Get(ref);
+      if (result == kClearedJniWeakGlobal) {
+        // This is a special case where it's okay to return NULL.
+        return NULL;
+      }
+      break;
+    }
+  case kSirtOrInvalid:
+  default:
+    // TODO: make stack indirect reference table lookup more efficient
+    // Check if this is a local reference in the SIRT
+    if (SirtContains(obj)) {
+      result = *reinterpret_cast<Object**>(obj); // Read from SIRT
+    } else if (false /*gDvmJni.workAroundAppJniBugs*/) { // TODO
+      // Assume an invalid local reference is actually a direct pointer.
+      result = reinterpret_cast<Object*>(obj);
+    } else {
+      LOG(FATAL) << "Invalid indirect reference " << obj;
+      result = reinterpret_cast<Object*>(kInvalidIndirectRefObject);
+    }
+  }
+
+  if (result == NULL) {
+    LOG(FATAL) << "JNI ERROR (app bug): use of deleted " << kind << ": "
+               << obj;
+  }
+  Heap::VerifyObject(result);
+  return result;
+}
+
 void Thread::ThrowNewException(const char* exception_class_descriptor, const char* fmt, ...) {
   std::string msg;
   va_list args;
diff --git a/src/thread.h b/src/thread.h
index 68b6cbf..15781ff 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -22,7 +22,6 @@
 class Method;
 class Object;
 class Runtime;
-class StackHandleBlock;
 class Thread;
 class ThreadList;
 class Throwable;
@@ -73,44 +72,44 @@
   DISALLOW_COPY_AND_ASSIGN(MutexLock);
 };
 
-// Stack handle blocks are allocated within the bridge frame between managed
-// and native code.
-class StackHandleBlock {
+// Stack allocated indirect reference table, allocated within the bridge frame
+// between managed and native code.
+class StackIndirectReferenceTable {
  public:
-  // Number of references contained within this SHB
+  // Number of references contained within this SIRT
   size_t NumberOfReferences() {
     return number_of_references_;
   }
 
-  // Link to previous SHB or NULL
-  StackHandleBlock* Link() {
+  // Link to previous SIRT or NULL
+  StackIndirectReferenceTable* Link() {
     return link_;
   }
 
-  Object** Handles() {
-    return handles_;
+  Object** References() {
+    return references_;
   }
 
-  // Offset of length within SHB, used by generated code
+  // Offset of length within SIRT, used by generated code
   static size_t NumberOfReferencesOffset() {
-    return OFFSETOF_MEMBER(StackHandleBlock, number_of_references_);
+    return OFFSETOF_MEMBER(StackIndirectReferenceTable, number_of_references_);
   }
 
-  // Offset of link within SHB, used by generated code
+  // Offset of link within SIRT, used by generated code
   static size_t LinkOffset() {
-    return OFFSETOF_MEMBER(StackHandleBlock, link_);
+    return OFFSETOF_MEMBER(StackIndirectReferenceTable, link_);
   }
 
  private:
-  StackHandleBlock() {}
+  StackIndirectReferenceTable() {}
 
   size_t number_of_references_;
-  StackHandleBlock* link_;
+  StackIndirectReferenceTable* link_;
 
   // Fake array, really allocated and filled in by jni_compiler.
-  Object* handles_[0];
+  Object* references_[0];
 
-  DISALLOW_COPY_AND_ASSIGN(StackHandleBlock);
+  DISALLOW_COPY_AND_ASSIGN(StackIndirectReferenceTable);
 };
 
 struct NativeToManagedRecord {
@@ -326,16 +325,20 @@
                         OFFSETOF_MEMBER(Frame, sp_));
   }
 
-  // Offset of top stack handle block within Thread, used by generated code
-  static ThreadOffset TopShbOffset() {
-    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_shb_));
+  // Offset of top stack indirect reference table within Thread, used by
+  // generated code
+  static ThreadOffset TopSirtOffset() {
+    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
   }
 
-  // Number of references allocated in StackHandleBlocks on this thread
-  size_t NumShbHandles();
+  // Number of references allocated in SIRTs on this thread
+  size_t NumSirtReferences();
 
-  // Is the given obj in this thread's stack handle blocks?
-  bool ShbContains(jobject obj);
+  // Is the given obj in this thread's stack indirect reference table?
+  bool SirtContains(jobject obj);
+
+  // Convert a jobject into a Object*
+  Object* DecodeJObject(jobject obj);
 
   // Offset of exception_entry_point_ within Thread, used by generated code
   static ThreadOffset ExceptionEntryPointOffset() {
@@ -384,7 +387,7 @@
       : id_(1234),
         top_of_managed_stack_(),
         native_to_managed_record_(NULL),
-        top_shb_(NULL),
+        top_sirt_(NULL),
         jni_env_(NULL),
         exception_(NULL),
         suspend_count_(0),
@@ -414,8 +417,8 @@
   // native to managed code.
   NativeToManagedRecord* native_to_managed_record_;
 
-  // Top of linked list of stack handle blocks or NULL for none
-  StackHandleBlock* top_shb_;
+  // Top of linked list of stack indirect reference tables or NULL for none
+  StackIndirectReferenceTable* top_sirt_;
 
   // Every thread may have an associated JNI environment
   JNIEnv* jni_env_;