Compile-time tuning

Specialized the dataflow iterators and did a few other minor tweaks.
Showing ~5% compile-time improvement in a single-threaded environment;
less in multi-threaded (presumably because we're blocked by something
else).

Change-Id: I2e2ed58d881414b9fc97e04cd0623e188259afd2
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 06cc505..236c6f4 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -21,42 +21,63 @@
 
 namespace art {
 
-inline BasicBlock* DataflowIterator::NextBody(bool had_change) {
-  changed_ |= had_change;
+// Single forward pass over the nodes.
+inline BasicBlock* DataflowIterator::ForwardSingleNext() {
   BasicBlock* res = NULL;
-  if (reverse_) {
-    if (is_iterative_ && changed_ && (idx_ < 0)) {
-      idx_ = start_idx_;
-      changed_ = false;
-    }
-    if (idx_ >= 0) {
-      int bb_id = block_id_list_->Get(idx_--);
-      res = mir_graph_->GetBasicBlock(bb_id);
-    }
-  } else {
-    if (is_iterative_ && changed_ && (idx_ >= end_idx_)) {
-      idx_ = start_idx_;
-      changed_ = false;
-    }
-    if (idx_ < end_idx_) {
-      int bb_id = block_id_list_->Get(idx_++);
-      res = mir_graph_->GetBasicBlock(bb_id);
-    }
+  if (idx_ < end_idx_) {
+    int bb_id = block_id_list_->Get(idx_++);
+    res = mir_graph_->GetBasicBlock(bb_id);
   }
   return res;
 }
 
-// AllNodes uses the existing GrowableArray iterator, so use different NextBody().
-inline BasicBlock* AllNodesIterator::NextBody(bool had_change) {
+// Repeat full forward passes over all nodes until no change occurs during a complete pass.
+inline BasicBlock* DataflowIterator::ForwardRepeatNext(bool had_change) {
   changed_ |= had_change;
   BasicBlock* res = NULL;
+  if ((idx_ >= end_idx_) && changed_) {
+    idx_ = start_idx_;
+    changed_ = false;
+  }
+  if (idx_ < end_idx_) {
+    int bb_id = block_id_list_->Get(idx_++);
+    res = mir_graph_->GetBasicBlock(bb_id);
+  }
+  return res;
+}
+
+// Single reverse pass over the nodes.
+inline BasicBlock* DataflowIterator::ReverseSingleNext() {
+  BasicBlock* res = NULL;
+  if (idx_ >= 0) {
+    int bb_id = block_id_list_->Get(idx_--);
+    res = mir_graph_->GetBasicBlock(bb_id);
+  }
+  return res;
+}
+
+// Repeat full backwards passes over all nodes until no change occurs during a complete pass.
+inline BasicBlock* DataflowIterator::ReverseRepeatNext(bool had_change) {
+  changed_ |= had_change;
+  BasicBlock* res = NULL;
+  if ((idx_ < 0) && changed_) {
+    idx_ = start_idx_;
+    changed_ = false;
+  }
+  if (idx_ >= 0) {
+    int bb_id = block_id_list_->Get(idx_--);
+    res = mir_graph_->GetBasicBlock(bb_id);
+  }
+  return res;
+}
+
+// AllNodes uses the existing GrowableArray iterator, and should be considered unordered.
+inline BasicBlock* AllNodesIterator::Next() {
+  BasicBlock* res = NULL;
   bool keep_looking = true;
   while (keep_looking) {
     res = all_nodes_iterator_->Next();
-    if (is_iterative_ && changed_ && (res == NULL)) {
-      all_nodes_iterator_->Reset();
-      changed_ = false;
-    } else if ((res == NULL) || (!res->hidden)) {
+    if ((res == NULL) || (!res->hidden)) {
       keep_looking = false;
     }
   }
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index da44ffd..1dab54e 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -27,124 +27,130 @@
    * interesting orders.  Note that for efficiency, the visit orders have been pre-computed.
    * The order itself will not change during the iteration.  However, for some uses,
    * auxiliary data associated with the basic blocks may be changed during the iteration,
-   * necessitating another pass over the list.
-   *
-   * To support this usage, we have is_iterative_.  If false, the iteration is a one-shot
-   * pass through the pre-computed list using Next().  If true, the caller must tell the
-   * iterator whether a change has been made that necessitates another pass.  Use
-   * Next(had_change) for this.  The general idea is that the iterative_ use case means
-   * that the iterator will keep repeating the full basic block list until a complete pass
-   * is made through it with no changes.  Note that calling Next(true) does not affect
-   * the iteration order or short-curcuit the current pass - it simply tells the iterator
-   * that once it has finished walking through the block list it should reset and do another
-   * full pass through the list.
+   * necessitating another pass over the list.  If this behavior is required, use the
+   * "Repeating" variant.  For the repeating variant, the caller must tell the iterator
+   * whether a change has been made that necessitates another pass.  Note that calling Next(true)
+   * does not affect the iteration order or short-circuit the current pass - it simply tells
+   * the iterator that once it has finished walking through the block list it should reset and
+   * do another full pass through the list.
    */
   class DataflowIterator {
     public:
       virtual ~DataflowIterator() {}
 
-      // Return the next BasicBlock* to visit.
-      BasicBlock* Next() {
-        DCHECK(!is_iterative_);
-        return NextBody(false);
-      }
-
-      /*
-       * Return the next BasicBlock* to visit, and tell the iterator whether any change
-       * has occurred that requires another full pass over the block list.
-       */
-      BasicBlock* Next(bool had_change) {
-        DCHECK(is_iterative_);
-        return NextBody(had_change);
-      }
-
     protected:
-      DataflowIterator(MIRGraph* mir_graph, bool is_iterative, int start_idx, int end_idx,
-                       bool reverse)
+      DataflowIterator(MIRGraph* mir_graph, int start_idx, int end_idx)
           : mir_graph_(mir_graph),
-            is_iterative_(is_iterative),
             start_idx_(start_idx),
             end_idx_(end_idx),
-            reverse_(reverse),
             block_id_list_(NULL),
             idx_(0),
             changed_(false) {}
 
-      virtual BasicBlock* NextBody(bool had_change) ALWAYS_INLINE;
+      virtual BasicBlock* ForwardSingleNext() ALWAYS_INLINE;
+      virtual BasicBlock* ReverseSingleNext() ALWAYS_INLINE;
+      virtual BasicBlock* ForwardRepeatNext(bool had_change) ALWAYS_INLINE;
+      virtual BasicBlock* ReverseRepeatNext(bool had_change) ALWAYS_INLINE;
 
       MIRGraph* const mir_graph_;
-      const bool is_iterative_;
       const int start_idx_;
       const int end_idx_;
-      const bool reverse_;
       GrowableArray<int>* block_id_list_;
       int idx_;
       bool changed_;
   };  // DataflowIterator
 
-  class ReachableNodesIterator : public DataflowIterator {
-    public:
-      ReachableNodesIterator(MIRGraph* mir_graph, bool is_iterative)
-          : DataflowIterator(mir_graph, is_iterative, 0,
-                             mir_graph->GetNumReachableBlocks(), false) {
-        idx_ = start_idx_;
-        block_id_list_ = mir_graph->GetDfsOrder();
-      }
-  };
-
   class PreOrderDfsIterator : public DataflowIterator {
     public:
-      PreOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
-          : DataflowIterator(mir_graph, is_iterative, 0,
-                             mir_graph->GetNumReachableBlocks(), false) {
+      explicit PreOrderDfsIterator(MIRGraph* mir_graph)
+          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
         idx_ = start_idx_;
         block_id_list_ = mir_graph->GetDfsOrder();
       }
+
+      BasicBlock* Next() {
+        return ForwardSingleNext();
+      }
   };
 
-  class PostOrderDfsIterator : public DataflowIterator {
+  class RepeatingPreOrderDfsIterator : public DataflowIterator {
     public:
-      PostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
-          : DataflowIterator(mir_graph, is_iterative, 0,
-                             mir_graph->GetNumReachableBlocks(), false) {
+      explicit RepeatingPreOrderDfsIterator(MIRGraph* mir_graph)
+          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph->GetDfsOrder();
+      }
+
+      BasicBlock* Next(bool had_change) {
+        return ForwardRepeatNext(had_change);
+      }
+  };
+
+  class RepeatingPostOrderDfsIterator : public DataflowIterator {
+    public:
+      explicit RepeatingPostOrderDfsIterator(MIRGraph* mir_graph)
+          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
         idx_ = start_idx_;
         block_id_list_ = mir_graph->GetDfsPostOrder();
       }
+
+      BasicBlock* Next(bool had_change) {
+        return ForwardRepeatNext(had_change);
+      }
   };
 
   class ReversePostOrderDfsIterator : public DataflowIterator {
     public:
-      ReversePostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
-          : DataflowIterator(mir_graph, is_iterative,
-                             mir_graph->GetNumReachableBlocks() -1, 0, true) {
+      explicit ReversePostOrderDfsIterator(MIRGraph* mir_graph)
+          : DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
         idx_ = start_idx_;
         block_id_list_ = mir_graph->GetDfsPostOrder();
       }
+
+      BasicBlock* Next() {
+        return ReverseSingleNext();
+      }
+  };
+
+  class RepeatingReversePostOrderDfsIterator : public DataflowIterator {
+    public:
+      explicit RepeatingReversePostOrderDfsIterator(MIRGraph* mir_graph)
+          : DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph->GetDfsPostOrder();
+      }
+
+      BasicBlock* Next(bool had_change) {
+        return ReverseRepeatNext(had_change);
+      }
   };
 
   class PostOrderDOMIterator : public DataflowIterator {
     public:
-      PostOrderDOMIterator(MIRGraph* mir_graph, bool is_iterative)
-          : DataflowIterator(mir_graph, is_iterative, 0,
-                             mir_graph->GetNumReachableBlocks(), false) {
+      explicit PostOrderDOMIterator(MIRGraph* mir_graph)
+          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
         idx_ = start_idx_;
         block_id_list_ = mir_graph->GetDomPostOrder();
       }
+
+      BasicBlock* Next() {
+        return ForwardSingleNext();
+      }
   };
 
   class AllNodesIterator : public DataflowIterator {
     public:
-      AllNodesIterator(MIRGraph* mir_graph, bool is_iterative)
-          : DataflowIterator(mir_graph, is_iterative, 0, 0, false) {
-        all_nodes_iterator_ =
-            new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator(mir_graph->GetBlockList());
+      explicit AllNodesIterator(MIRGraph* mir_graph)
+          : DataflowIterator(mir_graph, 0, 0) {
+        all_nodes_iterator_ = new
+            (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator(mir_graph->GetBlockList());
       }
 
       void Reset() {
         all_nodes_iterator_->Reset();
       }
 
-      BasicBlock* NextBody(bool had_change) ALWAYS_INLINE;
+      BasicBlock* Next() ALWAYS_INLINE;
 
     private:
       GrowableArray<BasicBlock*>::Iterator* all_nodes_iterator_;
diff --git a/compiler/dex/growable_array.h b/compiler/dex/growable_array.h
index 8e2abfb..9053285 100644
--- a/compiler/dex/growable_array.h
+++ b/compiler/dex/growable_array.h
@@ -150,6 +150,11 @@
 
     size_t Size() const { return num_used_; }
 
+    void SetSize(size_t new_size) {
+      Resize(new_size);
+      num_used_ = new_size;
+    }
+
     T* GetRawStorage() const { return elem_list_; }
 
     static void* operator new(size_t size, ArenaAllocator* arena) {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index d7a4136..8472a3c 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1061,7 +1061,7 @@
   memset(&stats, 0, sizeof(stats));
 
   ClearAllVisitedFlags();
-  AllNodesIterator iter(this, false /* not iterative */);
+  AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     AnalyzeBlock(bb, &stats);
   }
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 3a73717..addfd6b 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1287,7 +1287,7 @@
   if (cu_->disable_opt & (1 << kPromoteRegs)) {
     return;
   }
-  AllNodesIterator iter(this, false /* not iterative */);
+  AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     CountUses(bb);
   }
@@ -1331,7 +1331,7 @@
 
 void MIRGraph::VerifyDataflow() {
     /* Verify if all blocks are connected as claimed */
-  AllNodesIterator iter(this, false /* not iterative */);
+  AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     VerifyPredInfo(bb);
   }
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index b7611f8..05e428e 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -96,7 +96,7 @@
   is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
   constant_values_ = static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumSSARegs(),
                                                      ArenaAllocator::kAllocDFInfo));
-  AllNodesIterator iter(this, false /* not iterative */);
+  AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     DoConstantPropogation(bb);
   }
@@ -762,11 +762,11 @@
 void MIRGraph::NullCheckElimination() {
   if (!(cu_->disable_opt & (1 << kNullCheckElimination))) {
     DCHECK(temp_ssa_register_v_ != NULL);
-    AllNodesIterator iter(this, false /* not iterative */);
+    AllNodesIterator iter(this);
     for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
       NullCheckEliminationInit(bb);
     }
-    PreOrderDfsIterator iter2(this, true /* iterative */);
+    RepeatingPreOrderDfsIterator iter2(this);
     bool change = false;
     for (BasicBlock* bb = iter2.Next(change); bb != NULL; bb = iter2.Next(change)) {
       change = EliminateNullChecks(bb);
@@ -778,7 +778,7 @@
 }
 
 void MIRGraph::BasicBlockCombine() {
-  PreOrderDfsIterator iter(this, false /* not iterative */);
+  PreOrderDfsIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     CombineBlocks(bb);
   }
@@ -791,7 +791,7 @@
   if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
     VerifyDataflow();
   }
-  AllNodesIterator iter(this, false /* not iterative */);
+  AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     LayoutBlocks(bb);
   }
@@ -804,7 +804,7 @@
   Checkstats* stats =
       static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), ArenaAllocator::kAllocDFInfo));
   checkstats_ = stats;
-  AllNodesIterator iter(this, false /* not iterative */);
+  AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     CountChecks(bb);
   }
@@ -858,7 +858,7 @@
   if (!(cu_->disable_opt & (1 << kBBOpt))) {
     DCHECK_EQ(cu_->num_compiler_temps, 0);
     ClearAllVisitedFlags();
-    PreOrderDfsIterator iter2(this, false /* not iterative */);
+    PreOrderDfsIterator iter2(this);
     for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
       BuildExtendedBBList(bb);
     }
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index 7831cf6..2cf55e7 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -1877,7 +1877,7 @@
   CreateFunction();
 
   // Create an LLVM basic block for each MIR block in dfs preorder
-  PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
+  PreOrderDfsIterator iter(mir_graph_);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     CreateLLVMBasicBlock(bb);
   }
@@ -1909,7 +1909,7 @@
     }
   }
 
-  PreOrderDfsIterator iter2(mir_graph_, false /* not iterative */);
+  PreOrderDfsIterator iter2(mir_graph_);
   for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
     BlockBitcodeConversion(bb);
   }
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index e081c16..8c5949b 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -710,7 +710,6 @@
     }
     /* Pseudo opcodes don't consume space */
   }
-
   return offset;
 }
 
@@ -789,15 +788,15 @@
  */
 LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) {
   SafeMap<unsigned int, LIR*>::iterator it;
-  it = boundary_map_.find(vaddr);
-  if (it == boundary_map_.end()) {
+  LIR* boundary_lir = boundary_map_.Get(vaddr);
+  if (boundary_lir == NULL) {
     LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
   }
   LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
   new_label->dalvik_offset = vaddr;
   new_label->opcode = kPseudoCaseLabel;
   new_label->operands[0] = keyVal;
-  InsertLIRAfter(it->second, new_label);
+  InsertLIRAfter(boundary_lir, new_label);
   return new_label;
 }
 
@@ -889,7 +888,7 @@
  */
 LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
   LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
-  if (boundary_map_.find(offset) == boundary_map_.end()) {
+  if (boundary_map_.Get(offset) == NULL) {
     boundary_map_.Put(offset, res);
   }
   return res;
@@ -947,6 +946,7 @@
       throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
       suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
       intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
+      boundary_map_(arena, 0, kGrowableArrayMisc),
       data_offset_(0),
       total_size_(0),
       block_label_list_(NULL),
@@ -963,6 +963,8 @@
   promotion_map_ = static_cast<PromotionMap*>
       (arena_->Alloc((cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
                       sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
+  // Pre-fill with nulls.
+  boundary_map_.SetSize(cu->code_item->insns_size_in_code_units_);
 }
 
 void Mir2Lir::Materialize() {
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 630e990..41adb94 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -99,12 +99,11 @@
     int native_reg_id;
     if (cu_->instruction_set == kX86) {
       // If x86, location differs depending on whether memory/reg operation.
-      native_reg_id = (GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
-          : this_lir->operands[0];
+      native_reg_id = (target_flags & IS_STORE) ? this_lir->operands[2] : this_lir->operands[0];
     } else {
       native_reg_id = this_lir->operands[0];
     }
-    bool is_this_lir_load = GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
+    bool is_this_lir_load = target_flags & IS_LOAD;
     LIR* check_lir;
     /* Use the mem mask to determine the rough memory location */
     uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 440df2a..f9ec199 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -33,7 +33,12 @@
     p->def_end = NULL;
     if (p->pair) {
       p->pair = false;
-      Clobber(p->partner);
+      p = GetRegInfo(p->partner);
+      p->pair = false;
+      p->live = false;
+      p->s_reg = INVALID_SREG;
+      p->def_start = NULL;
+      p->def_end = NULL;
     }
   }
 }
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index c41feb1..7c79f59 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -706,16 +706,15 @@
   }
 
   // Free temp registers and reset redundant store tracking.
-  ResetRegPool();
-  ResetDefTracking();
-
   ClobberAllRegs();
 
   if (bb->block_type == kEntryBlock) {
+    ResetRegPool();
     int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
     GenEntrySequence(&mir_graph_->reg_location_[start_vreg],
                          mir_graph_->reg_location_[mir_graph_->GetMethodSReg()]);
   } else if (bb->block_type == kExitBlock) {
+    ResetRegPool();
     GenExitSequence();
   }
 
@@ -815,7 +814,7 @@
       static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
                                       ArenaAllocator::kAllocLIR));
 
-  PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
+  PreOrderDfsIterator iter(mir_graph_);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     MethodBlockCodeGen(bb);
   }
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index a37ebd1..7e6d21a 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -727,7 +727,7 @@
     GrowableArray<LIR*> throw_launchpads_;
     GrowableArray<LIR*> suspend_launchpads_;
     GrowableArray<LIR*> intrinsic_launchpads_;
-    SafeMap<unsigned int, LIR*> boundary_map_;  // boundary lookup cache.
+    GrowableArray<LIR*> boundary_map_;
     /*
      * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
      * Native PC is on the return address of the safepointed operation.  Dex PC is for
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 71b74a4..db110aa 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -379,7 +379,7 @@
   if (s_reg == -1)
     return NULL;
   for (int i = 0; i < num_regs; i++) {
-    if (p[i].live && (p[i].s_reg == s_reg)) {
+    if ((p[i].s_reg == s_reg) && p[i].live) {
       if (p[i].is_temp)
         p[i].in_use = true;
       return &p[i];
@@ -412,47 +412,16 @@
 }
 
 void Mir2Lir::FreeTemp(int reg) {
-  RegisterInfo* p = reg_pool_->core_regs;
-  int num_regs = reg_pool_->num_core_regs;
-  for (int i = 0; i< num_regs; i++) {
-    if (p[i].reg == reg) {
-      if (p[i].is_temp) {
-        p[i].in_use = false;
-      }
-      p[i].pair = false;
-      return;
-    }
+  RegisterInfo* p = GetRegInfo(reg);
+  if (p->is_temp) {
+    p->in_use = false;
   }
-  p = reg_pool_->FPRegs;
-  num_regs = reg_pool_->num_fp_regs;
-  for (int i = 0; i< num_regs; i++) {
-    if (p[i].reg == reg) {
-      if (p[i].is_temp) {
-        p[i].in_use = false;
-      }
-      p[i].pair = false;
-      return;
-    }
-  }
-  LOG(FATAL) << "Tried to free a non-existant temp: r" << reg;
+  p->pair = false;
 }
 
 Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) {
-  RegisterInfo* p = reg_pool_->core_regs;
-  int num_regs = reg_pool_->num_core_regs;
-  for (int i = 0; i< num_regs; i++) {
-    if (p[i].reg == reg) {
-      return p[i].live ? &p[i] : NULL;
-    }
-  }
-  p = reg_pool_->FPRegs;
-  num_regs = reg_pool_->num_fp_regs;
-  for (int i = 0; i< num_regs; i++) {
-    if (p[i].reg == reg) {
-      return p[i].live ? &p[i] : NULL;
-    }
-  }
-  return NULL;
+  RegisterInfo* p = GetRegInfo(reg);
+  return p->live ? p : NULL;
 }
 
 Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg) {
@@ -476,27 +445,10 @@
  * allocated.  Use with caution.
  */
 void Mir2Lir::LockTemp(int reg) {
-  RegisterInfo* p = reg_pool_->core_regs;
-  int num_regs = reg_pool_->num_core_regs;
-  for (int i = 0; i< num_regs; i++) {
-    if (p[i].reg == reg) {
-      DCHECK(p[i].is_temp);
-      p[i].in_use = true;
-      p[i].live = false;
-      return;
-    }
-  }
-  p = reg_pool_->FPRegs;
-  num_regs = reg_pool_->num_fp_regs;
-  for (int i = 0; i< num_regs; i++) {
-    if (p[i].reg == reg) {
-      DCHECK(p[i].is_temp);
-      p[i].in_use = true;
-      p[i].live = false;
-      return;
-    }
-  }
-  LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg;
+  RegisterInfo* p = GetRegInfo(reg);
+  DCHECK(p->is_temp);
+  p->in_use = true;
+  p->live = false;
 }
 
 void Mir2Lir::ResetDef(int reg) {
@@ -599,11 +551,24 @@
 }
 
 void Mir2Lir::ClobberAllRegs() {
-  for (int i = 0; i< reg_pool_->num_core_regs; i++) {
-    ClobberBody(&reg_pool_->core_regs[i]);
+  RegisterInfo* p;
+  for (p = reg_pool_->core_regs; p < reg_pool_->core_regs + reg_pool_->num_core_regs; p++) {
+    if (p->is_temp) {
+      p->live = false;
+      p->s_reg = INVALID_SREG;
+      p->def_start = NULL;
+      p->def_end = NULL;
+      p->pair = false;
+    }
   }
-  for (int i = 0; i< reg_pool_->num_fp_regs; i++) {
-    ClobberBody(&reg_pool_->FPRegs[i]);
+  for (p = reg_pool_->FPRegs; p < reg_pool_->FPRegs + reg_pool_->num_fp_regs; p++) {
+    if (p->is_temp) {
+      p->live = false;
+      p->s_reg = INVALID_SREG;
+      p->def_start = NULL;
+      p->def_end = NULL;
+      p->pair = false;
+    }
   }
 }
 
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index cd1602f..366d7f2 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -22,7 +22,7 @@
 namespace art {
 
 void MIRGraph::ClearAllVisitedFlags() {
-  AllNodesIterator iter(this, false /* not iterative */);
+  AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     bb->visited = false;
   }
@@ -145,11 +145,11 @@
     def_block_matrix_[i] =
         new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapBMatrix);
   }
-  AllNodesIterator iter(this, false /* not iterative */);
+  AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     FindLocalLiveIn(bb);
   }
-  AllNodesIterator iter2(this, false /* not iterative */);
+  AllNodesIterator iter2(this);
   for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
     FillDefBlockMatrix(bb);
   }
@@ -377,7 +377,7 @@
   int num_total_blocks = GetBasicBlockListCount();
 
   /* Initialize domination-related data structures */
-  ReachableNodesIterator iter(this, false /* not iterative */);
+  PreOrderDfsIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     InitializeDominationInfo(bb);
   }
@@ -396,7 +396,7 @@
   i_dom_list_[GetEntryBlock()->dfs_id] = GetEntryBlock()->dfs_id;
 
   /* Compute the immediate dominators */
-  ReversePostOrderDfsIterator iter2(this, true /* iterative */);
+  RepeatingReversePostOrderDfsIterator iter2(this);
   bool change = false;
   for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
     change = ComputeblockIDom(bb);
@@ -414,19 +414,19 @@
   }
   GetEntryBlock()->i_dom = NULL;
 
-  ReachableNodesIterator iter3(this, false /* not iterative */);
+  PreOrderDfsIterator iter3(this);
   for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
     SetDominators(bb);
   }
 
-  ReversePostOrderDfsIterator iter4(this, false /* not iterative */);
+  ReversePostOrderDfsIterator iter4(this);
   for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
     ComputeBlockDominators(bb);
   }
 
   // Compute the dominance frontier for each block.
   ComputeDomPostOrderTraversal(GetEntryBlock());
-  PostOrderDOMIterator iter5(this, false /* not iterative */);
+  PostOrderDOMIterator iter5(this);
   for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
     ComputeDominanceFrontier(bb);
   }
@@ -503,7 +503,7 @@
   temp_dalvik_register_v_ =
       new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapRegisterV);
 
-  PostOrderDfsIterator iter(this, true /* iterative */);
+  RepeatingPostOrderDfsIterator iter(this);
   bool change = false;
   for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
     change = ComputeBlockLiveIns(bb);
@@ -700,7 +700,7 @@
       new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
 
   /* Insert phi-operands with latest SSA names from predecessor blocks */
-  ReachableNodesIterator iter2(this, false /* not iterative */);
+  PreOrderDfsIterator iter2(this);
   for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
     InsertPhiNodeOperands(bb);
   }
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 07f37bb..2551068 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -444,7 +444,7 @@
   }
 
   /* Do type & size inference pass */
-  PreOrderDfsIterator iter(this, true /* iterative */);
+  RepeatingPreOrderDfsIterator iter(this);
   bool change = false;
   for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
     change = InferTypeAndSize(bb);