Enable compiler temporaries

Compiler temporaries are a facility for having virtual register sized space
for dealing with intermediate values during MIR transformations. They receive
explicit space in managed frames so they can have a home location in case they
need to be spilled. The facility also supports "special" temporaries which
have specific semantic purpose and their location in frame must be tracked.

The compiler temporaries are treated in the same way as virtual registers
so that the MIR level transformations do not need to have special logic. However,
generated code needs to know stack layout so that it can distinguish between
home locations.

MIRGraph has received an interface for dealing with compiler temporaries. This
interface allows allocation of wide and non-wide virtual register temporaries.

The information about how temporaries are kept on stack has been moved to
stack.h. This is was necessary because stack layout is dependent on where the
temporaries are placed.

Change-Id: Iba5cf095b32feb00d3f648db112a00209c8e5f55
Signed-off-by: Razvan A Lupusoru <razvan.a.lupusoru@intel.com>
diff --git a/compiler/dex/bb_optimizations.cc b/compiler/dex/bb_optimizations.cc
index b6716e6..2ab6252 100644
--- a/compiler/dex/bb_optimizations.cc
+++ b/compiler/dex/bb_optimizations.cc
@@ -108,8 +108,6 @@
  * BasicBlock Optimization pass implementation start.
  */
 void BBOptimizations::Start(CompilationUnit* cUnit) const {
-  DCHECK_EQ(cUnit->num_compiler_temps, 0);
-
   /*
    * This pass has a different ordering depEnding on the suppress exception,
    * so do the pass here for now:
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 3798b45..32fd79b 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -81,9 +81,6 @@
   uint16_t num_outs;
   uint16_t num_regs;            // Unlike num_dalvik_registers, does not include ins.
 
-  // TODO: may want to move this to MIRGraph.
-  uint16_t num_compiler_temps;
-
   // If non-empty, apply optimizer/debug flags only to matching methods.
   std::string compiler_method_match;
   // Flips sense of compiler_method_match - apply flags if doesn't match.
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index f5bb85a..c2016d0 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -130,7 +130,6 @@
     num_ins(0),
     num_outs(0),
     num_regs(0),
-    num_compiler_temps(0),
     compiler_flip_match(false),
     arena(pool),
     mir_graph(NULL),
@@ -236,6 +235,43 @@
   cu.StartTimingSplit("BuildMIRGraph");
   cu.mir_graph.reset(new MIRGraph(&cu, &cu.arena));
 
+  /*
+   * After creation of the MIR graph, also create the code generator.
+   * The reason we do this is that optimizations on the MIR graph may need to get information
+   * that is only available if a CG exists.
+   */
+#if defined(ART_USE_PORTABLE_COMPILER)
+  if (compiler_backend == kPortable) {
+    cu.cg.reset(PortableCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena, llvm_compilation_unit));
+  } else {
+#endif
+    Mir2Lir* mir_to_lir = nullptr;
+    switch (compiler.GetInstructionSet()) {
+      case kThumb2:
+        mir_to_lir = ArmCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena);
+        break;
+      case kMips:
+        mir_to_lir = MipsCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena);
+        break;
+      case kX86:
+        mir_to_lir = X86CodeGenerator(&cu, cu.mir_graph.get(), &cu.arena);
+        break;
+      default:
+        LOG(FATAL) << "Unexpected instruction set: " << compiler.GetInstructionSet();
+    }
+
+    cu.cg.reset(mir_to_lir);
+
+    /* The number of compiler temporaries depends on backend so set it up now if possible */
+    if (mir_to_lir) {
+      size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
+      bool set_max = cu.mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
+      CHECK(set_max);
+    }
+#if defined(ART_USE_PORTABLE_COMPILER)
+  }
+#endif
+
   /* Gathering opcode stats? */
   if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
     cu.mir_graph->EnableOpcodeCounting();
@@ -269,28 +305,6 @@
 
   CompiledMethod* result = NULL;
 
-#if defined(ART_USE_PORTABLE_COMPILER)
-  if (compiler_backend == kPortable) {
-    cu.cg.reset(PortableCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena, llvm_compilation_unit));
-  } else {
-#endif
-    switch (compiler.GetInstructionSet()) {
-      case kThumb2:
-        cu.cg.reset(ArmCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena));
-        break;
-      case kMips:
-        cu.cg.reset(MipsCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena));
-        break;
-      case kX86:
-        cu.cg.reset(X86CodeGenerator(&cu, cu.mir_graph.get(), &cu.arena));
-        break;
-      default:
-        LOG(FATAL) << "Unexpected instruction set: " << compiler.GetInstructionSet();
-    }
-#if defined(ART_USE_PORTABLE_COMPILER)
-  }
-#endif
-
   cu.cg->Materialize();
 
   cu.NewTimingSplit("Dedupe");  /* deduping takes up the vast majority of time in GetCompiledMethod(). */
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index c235448..9680450 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1158,8 +1158,8 @@
     ssa_last_defs_[i] = 0;
   }
 
-  /* Add ssa reg for Method* */
-  method_sreg_ = AddNewSReg(SSA_METHOD_BASEREG);
+  // Create a compiler temporary for Method*. This is done after SSA initialization.
+  GetNewCompilerTemp(kCompilerTempSpecialMethodPtr, false);
 
   /*
    * Allocate the BasicBlockDataFlow structure for the entry and code blocks
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 9dbb341..10bcdb9 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -46,7 +46,6 @@
 
 MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
     : reg_location_(NULL),
-      compiler_temps_(arena, 6, kGrowableArrayMisc),
       cu_(cu),
       ssa_base_vregs_(NULL),
       ssa_subscripts_(NULL),
@@ -82,8 +81,13 @@
       checkstats_(NULL),
       arena_(arena),
       backward_branches_(0),
-      forward_branches_(0) {
+      forward_branches_(0),
+      compiler_temps_(arena, 6, kGrowableArrayMisc),
+      num_non_special_compiler_temps_(0),
+      max_available_non_special_compiler_temps_(0) {
   try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
+  max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
+      - std::abs(static_cast<int>(kVRegTempBaseReg));
 }
 
 MIRGraph::~MIRGraph() {
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 4666d1e..debcb5c 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -168,11 +168,6 @@
 #define INVALID_REG (0xFF)
 #define INVALID_OFFSET (0xDEADF00FU)
 
-/* SSA encodings for special registers */
-#define SSA_METHOD_BASEREG (-2)
-/* First compiler temp basereg, grows smaller */
-#define SSA_CTEMP_BASEREG (SSA_METHOD_BASEREG - 1)
-
 #define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
 #define MIR_NULL_CHECK_ONLY             (1 << kMIRNullCheckOnly)
 #define MIR_IGNORE_RANGE_CHECK          (1 << kMIRIgnoreRangeCheck)
@@ -195,7 +190,13 @@
  * name of compiler-introduced temporaries.
  */
 struct CompilerTemp {
-  int32_t s_reg;
+  int32_t v_reg;      // Virtual register number for temporary.
+  int32_t s_reg_low;  // SSA name for low Dalvik word.
+};
+
+enum CompilerTempType {
+  kCompilerTempVR,                // A virtual register temporary.
+  kCompilerTempSpecialMethodPtr,  // Temporary that keeps track of current method pointer.
 };
 
 // When debug option enabled, records effectiveness of null and range check elimination.
@@ -571,10 +572,76 @@
     return bad_loc;
   }
 
-  int GetMethodSReg() {
+  int GetMethodSReg() const {
     return method_sreg_;
   }
 
+  /**
+   * @brief Used to obtain the number of compiler temporaries being used.
+   * @return Returns the number of compiler temporaries.
+   */
+  size_t GetNumUsedCompilerTemps() const {
+    size_t total_num_temps = compiler_temps_.Size();
+    DCHECK_LE(num_non_special_compiler_temps_, total_num_temps);
+    return total_num_temps;
+  }
+
+  /**
+   * @brief Used to obtain the number of non-special compiler temporaries being used.
+   * @return Returns the number of non-special compiler temporaries.
+   */
+  size_t GetNumNonSpecialCompilerTemps() const {
+    return num_non_special_compiler_temps_;
+  }
+
+  /**
+   * @brief Used to set the total number of available non-special compiler temporaries.
+   * @details Can fail setting the new max if there are more temps being used than the new_max.
+   * @param new_max The new maximum number of non-special compiler temporaries.
+   * @return Returns true if the max was set and false if failed to set.
+   */
+  bool SetMaxAvailableNonSpecialCompilerTemps(size_t new_max) {
+    if (new_max < GetNumNonSpecialCompilerTemps()) {
+      return false;
+    } else {
+      max_available_non_special_compiler_temps_ = new_max;
+      return true;
+    }
+  }
+
+  /**
+   * @brief Provides the number of non-special compiler temps available.
+   * @details Even if this returns zero, special compiler temps are guaranteed to be available.
+   * @return Returns the number of available temps.
+   */
+  size_t GetNumAvailableNonSpecialCompilerTemps();
+
+  /**
+   * @brief Used to obtain an existing compiler temporary.
+   * @param index The index of the temporary which must be strictly less than the
+   * number of temporaries.
+   * @return Returns the temporary that was asked for.
+   */
+  CompilerTemp* GetCompilerTemp(size_t index) const {
+    return compiler_temps_.Get(index);
+  }
+
+  /**
+   * @brief Used to obtain the maximum number of compiler temporaries that can be requested.
+   * @return Returns the maximum number of compiler temporaries, whether used or not.
+   */
+  size_t GetMaxPossibleCompilerTemps() const {
+    return max_available_special_compiler_temps_ + max_available_non_special_compiler_temps_;
+  }
+
+  /**
+   * @brief Used to obtain a new unique compiler temporary.
+   * @param ct_type Type of compiler temporary requested.
+   * @param wide Whether we should allocate a wide temporary.
+   * @return Returns the newly created compiler temporary.
+   */
+  CompilerTemp* GetNewCompilerTemp(CompilerTempType ct_type, bool wide);
+
   bool MethodIsLeaf() {
     return attributes_ & METHOD_IS_LEAF;
   }
@@ -727,7 +794,6 @@
 
   // TODO: make these private.
   RegLocation* reg_location_;                         // Map SSA names to location.
-  GrowableArray<CompilerTemp*> compiler_temps_;
   SafeMap<unsigned int, unsigned int> block_id_map_;  // Block collapse lookup cache.
 
   static const uint64_t oat_data_flow_attributes_[kMirOpLast];
@@ -836,6 +902,10 @@
   ArenaAllocator* arena_;
   int backward_branches_;
   int forward_branches_;
+  GrowableArray<CompilerTemp*> compiler_temps_;
+  size_t num_non_special_compiler_temps_;
+  size_t max_available_non_special_compiler_temps_;
+  size_t max_available_special_compiler_temps_;
 };
 
 }  // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 0d53d4c..209ed3d 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -199,13 +199,94 @@
   return raw_use_counts_.Get(s_reg);
 }
 
+size_t MIRGraph::GetNumAvailableNonSpecialCompilerTemps() {
+  if (num_non_special_compiler_temps_ >= max_available_non_special_compiler_temps_) {
+    return 0;
+  } else {
+    return max_available_non_special_compiler_temps_ - num_non_special_compiler_temps_;
+  }
+}
+
+static const RegLocation temp_loc = {kLocCompilerTemp,
+                                     0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/,
+                                     kVectorNotUsed, INVALID_REG, INVALID_REG, INVALID_SREG,
+                                     INVALID_SREG};
+
+CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) {
+  // There is a limit to the number of non-special temps so check to make sure it wasn't exceeded.
+  if (ct_type == kCompilerTempVR) {
+    size_t available_temps = GetNumAvailableNonSpecialCompilerTemps();
+    if (available_temps <= 0 || (available_temps <= 1 && wide)) {
+      return 0;
+    }
+  }
+
+  CompilerTemp *compiler_temp = static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp),
+                                                            ArenaAllocator::kAllocRegAlloc));
+
+  // Create the type of temp requested. Special temps need special handling because
+  // they have a specific virtual register assignment.
+  if (ct_type == kCompilerTempSpecialMethodPtr) {
+    DCHECK_EQ(wide, false);
+    compiler_temp->v_reg = static_cast<int>(kVRegMethodPtrBaseReg);
+    compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
+
+    // The MIR graph keeps track of the sreg for method pointer specially, so record that now.
+    method_sreg_ = compiler_temp->s_reg_low;
+  } else {
+    DCHECK_EQ(ct_type, kCompilerTempVR);
+
+    // The new non-special compiler temp must receive a unique v_reg with a negative value.
+    compiler_temp->v_reg = static_cast<int>(kVRegNonSpecialTempBaseReg) - num_non_special_compiler_temps_;
+    compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
+    num_non_special_compiler_temps_++;
+
+    if (wide) {
+      // Ensure that the two registers are consecutive. Since the virtual registers used for temps grow in a
+      // negative fashion, we need the smaller to refer to the low part. Thus, we redefine the v_reg and s_reg_low.
+      compiler_temp->v_reg--;
+      int ssa_reg_high = compiler_temp->s_reg_low;
+      compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
+      int ssa_reg_low = compiler_temp->s_reg_low;
+
+      // If needed initialize the register location for the high part.
+      // The low part is handled later in this method on a common path.
+      if (reg_location_ != nullptr) {
+        reg_location_[ssa_reg_high] = temp_loc;
+        reg_location_[ssa_reg_high].high_word = 1;
+        reg_location_[ssa_reg_high].s_reg_low = ssa_reg_low;
+        reg_location_[ssa_reg_high].wide = true;
+
+        // A new SSA needs new use counts.
+        use_counts_.Insert(0);
+        raw_use_counts_.Insert(0);
+      }
+
+      num_non_special_compiler_temps_++;
+    }
+  }
+
+  // Have we already allocated the register locations?
+  if (reg_location_ != nullptr) {
+    int ssa_reg_low = compiler_temp->s_reg_low;
+    reg_location_[ssa_reg_low] = temp_loc;
+    reg_location_[ssa_reg_low].s_reg_low = ssa_reg_low;
+    reg_location_[ssa_reg_low].wide = wide;
+
+    // A new SSA needs new use counts.
+    use_counts_.Insert(0);
+    raw_use_counts_.Insert(0);
+  }
+
+  compiler_temps_.Insert(compiler_temp);
+  return compiler_temp;
+}
 
 /* Do some MIR-level extended basic block optimizations */
 bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
   if (bb->block_type == kDead) {
     return true;
   }
-  int num_temps = 0;
   bool use_lvn = bb->use_lvn;
   UniquePtr<LocalValueNumbering> local_valnum;
   if (use_lvn) {
@@ -468,9 +549,6 @@
     bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
   }
 
-  if (num_temps > cu_->num_compiler_temps) {
-    cu_->num_compiler_temps = num_temps;
-  }
   return true;
 }
 
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index 70b660b..e6cc2de 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -1522,7 +1522,7 @@
   reg_info.push_back(irb_->getInt32(cu_->num_ins));
   reg_info.push_back(irb_->getInt32(cu_->num_regs));
   reg_info.push_back(irb_->getInt32(cu_->num_outs));
-  reg_info.push_back(irb_->getInt32(cu_->num_compiler_temps));
+  reg_info.push_back(irb_->getInt32(mir_graph_->GetNumUsedCompilerTemps()));
   reg_info.push_back(irb_->getInt32(mir_graph_->GetNumSSARegs()));
   ::llvm::MDNode* reg_info_node = ::llvm::MDNode::get(*context_, reg_info);
   inst->setMetadata("RegInfo", reg_info_node);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 7f19ea1..ec5fedd 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -252,7 +252,7 @@
 }
 
 void Mir2Lir::DumpPromotionMap() {
-  int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1;
+  int num_regs = cu_->num_dalvik_registers + mir_graph_->GetNumUsedCompilerTemps();
   for (int i = 0; i < num_regs; i++) {
     PromotionMap v_reg_map = promotion_map_[i];
     std::string buf;
@@ -289,7 +289,7 @@
   LOG(INFO) << "Outs         : " << cu_->num_outs;
   LOG(INFO) << "CoreSpills       : " << num_core_spills_;
   LOG(INFO) << "FPSpills       : " << num_fp_spills_;
-  LOG(INFO) << "CompilerTemps    : " << cu_->num_compiler_temps;
+  LOG(INFO) << "CompilerTemps    : " << mir_graph_->GetNumUsedCompilerTemps();
   LOG(INFO) << "Frame size       : " << frame_size_;
   LOG(INFO) << "code size is " << total_size_ <<
     " bytes, Dalvik size is " << insns_size * 2;
@@ -1004,7 +1004,7 @@
       first_lir_insn_(NULL),
       last_lir_insn_(NULL) {
   promotion_map_ = static_cast<PromotionMap*>
-      (arena_->Alloc((cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
+      (arena_->Alloc((cu_->num_dalvik_registers  + mir_graph_->GetNumUsedCompilerTemps()) *
                       sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
   // Reserve pointer id 0 for NULL.
   size_t null_idx = WrapPointer(NULL);
@@ -1081,13 +1081,27 @@
   return result;
 }
 
+size_t Mir2Lir::GetMaxPossibleCompilerTemps() const {
+  // Chose a reasonably small value in order to contain stack growth.
+  // Backends that are smarter about spill region can return larger values.
+  const size_t max_compiler_temps = 10;
+  return max_compiler_temps;
+}
+
+size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() {
+  // By default assume that the Mir2Lir will need one slot for each temporary.
+  // If the backend can better determine temps that have non-overlapping ranges and
+  // temps that do not need spilled, it can actually provide a small region.
+  return (mir_graph_->GetNumUsedCompilerTemps() * sizeof(uint32_t));
+}
+
 int Mir2Lir::ComputeFrameSize() {
   /* Figure out the frame size */
   static const uint32_t kAlignMask = kStackAlignment - 1;
-  uint32_t size = (num_core_spills_ + num_fp_spills_ +
-                   1 /* filler word */ + cu_->num_regs + cu_->num_outs +
-                   cu_->num_compiler_temps + 1 /* cur_method* */)
-                   * sizeof(uint32_t);
+  uint32_t size = ((num_core_spills_ + num_fp_spills_ +
+                   1 /* filler word */ + cu_->num_regs + cu_->num_outs)
+                   * sizeof(uint32_t)) +
+                   GetNumBytesForCompilerTempSpillRegion();
   /* Align and set */
   return (size + kAlignMask) & ~(kAlignMask);
 }
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index bcd0eb1..35d0831 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -310,6 +310,19 @@
     void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
     void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
 
+    /**
+     * @brief Provides the maximum number of compiler temporaries that the backend can/wants
+     * to place in a frame.
+     * @return Returns the maximum number of compiler temporaries.
+     */
+    size_t GetMaxPossibleCompilerTemps() const;
+
+    /**
+     * @brief Provides the number of bytes needed in frame for spilling of compiler temporaries.
+     * @return Returns the size in bytes for space needed for compiler temporary spill region.
+     */
+    size_t GetNumBytesForCompilerTempSpillRegion();
+
     int ComputeFrameSize();
     virtual void Materialize();
     virtual CompiledMethod* GetCompiledMethod();
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 32c22f2..6d69512 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -132,9 +132,15 @@
     DCHECK_LT(v_reg, cu_->num_dalvik_registers);
     return v_reg;
   } else {
-    int pos = std::abs(v_reg) - std::abs(SSA_METHOD_BASEREG);
-    DCHECK_LE(pos, cu_->num_compiler_temps);
-    return cu_->num_dalvik_registers + pos;
+    /*
+     * It must be the case that the v_reg for temporary is less than or equal to the
+     * base reg for temps. For that reason, "position" must be zero or positive.
+     */
+    unsigned int position = std::abs(v_reg) - std::abs(static_cast<int>(kVRegTempBaseReg));
+
+    // The temporaries are placed after dalvik registers in the promotion map
+    DCHECK_LT(position, mir_graph_->GetNumUsedCompilerTemps());
+    return cu_->num_dalvik_registers + position;
   }
 }
 
@@ -897,9 +903,8 @@
  * optimization is disabled.
  */
 void Mir2Lir::DoPromotion() {
-  int reg_bias = cu_->num_compiler_temps + 1;
   int dalvik_regs = cu_->num_dalvik_registers;
-  int num_regs = dalvik_regs + reg_bias;
+  int num_regs = dalvik_regs + mir_graph_->GetNumUsedCompilerTemps();
   const int promotion_threshold = 1;
 
   // Allow target code to add any special registers
@@ -926,16 +931,13 @@
   for (int i = 0; i < dalvik_regs; i++) {
     core_regs[i].s_reg = FpRegs[i].s_reg = i;
   }
-  // Set ssa name for Method*
-  core_regs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg();
-  FpRegs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg();  // For consistecy.
-  FpRegs[dalvik_regs + num_regs].s_reg = mir_graph_->GetMethodSReg();  // for consistency.
-  // Set ssa names for compiler_temps
-  for (int i = 1; i <= cu_->num_compiler_temps; i++) {
-    CompilerTemp* ct = mir_graph_->compiler_temps_.Get(i);
-    core_regs[dalvik_regs + i].s_reg = ct->s_reg;
-    FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
-    FpRegs[num_regs + dalvik_regs + i].s_reg = ct->s_reg;
+
+  // Set ssa names for compiler temporaries
+  for (unsigned int ct_idx = 0; ct_idx < mir_graph_->GetNumUsedCompilerTemps(); ct_idx++) {
+    CompilerTemp* ct = mir_graph_->GetCompilerTemp(ct_idx);
+    core_regs[dalvik_regs + ct_idx].s_reg = ct->s_reg_low;
+    FpRegs[dalvik_regs + ct_idx].s_reg = ct->s_reg_low;
+    FpRegs[num_regs + dalvik_regs + ct_idx].s_reg = ct->s_reg_low;
   }
 
   // Duplicate in upper half to represent possible fp double starting sregs.
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index f211e3f..f8dc223 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -410,7 +410,8 @@
 
 void MIRGraph::InitRegLocations() {
   /* Allocate the location map */
-  RegLocation* loc = static_cast<RegLocation*>(arena_->Alloc(GetNumSSARegs() * sizeof(*loc),
+  int max_regs = GetNumSSARegs() + GetMaxPossibleCompilerTemps();
+  RegLocation* loc = static_cast<RegLocation*>(arena_->Alloc(max_regs * sizeof(*loc),
                                                              ArenaAllocator::kAllocRegAlloc));
   for (int i = 0; i < GetNumSSARegs(); i++) {
     loc[i] = fresh_loc;
@@ -418,13 +419,11 @@
     loc[i].is_const = is_constant_v_->IsBitSet(i);
   }
 
-  /* Patch up the locations for Method* and the compiler temps */
-  loc[method_sreg_].location = kLocCompilerTemp;
-  loc[method_sreg_].defined = true;
-  for (int i = 0; i < cu_->num_compiler_temps; i++) {
-    CompilerTemp* ct = compiler_temps_.Get(i);
-    loc[ct->s_reg].location = kLocCompilerTemp;
-    loc[ct->s_reg].defined = true;
+  /* Patch up the locations for the compiler temps */
+  GrowableArray<CompilerTemp*>::Iterator iter(&compiler_temps_);
+  for (CompilerTemp* ct = iter.Next(); ct != NULL; ct = iter.Next()) {
+    loc[ct->s_reg_low].location = kLocCompilerTemp;
+    loc[ct->s_reg_low].defined = true;
   }
 
   reg_location_ = loc;