Clean up MIRGraph pass temporaries.

Create a union of pass-specific structs with temporaries
instead of shared temporaries with common names.

Change-Id: Id80d3b12c48139af1580b0839c21e07e7afd0ed5
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index b87ab66..82a2408 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -97,11 +97,6 @@
       max_nested_loops_(0u),
       i_dom_list_(NULL),
       temp_scoped_alloc_(),
-      temp_insn_data_(nullptr),
-      temp_bit_vector_size_(0u),
-      temp_bit_vector_(nullptr),
-      temp_bit_matrix_(nullptr),
-      temp_gvn_(),
       block_list_(arena->Adapter(kArenaAllocBBList)),
       try_block_addr_(NULL),
       entry_block_(NULL),
@@ -133,6 +128,7 @@
       sfield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
       method_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
       gen_suspend_test_list_(arena->Adapter()) {
+  memset(&temp_, 0, sizeof(temp_));
   use_counts_.reserve(256);
   raw_use_counts_.reserve(256);
   block_list_.reserve(100);
@@ -1681,9 +1677,9 @@
 void MIRGraph::SSATransformationStart() {
   DCHECK(temp_scoped_alloc_.get() == nullptr);
   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-  temp_bit_vector_size_ = GetNumOfCodeAndTempVRs();
-  temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
-      temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapRegisterV);
+  temp_.ssa.num_vregs = GetNumOfCodeAndTempVRs();
+  temp_.ssa.work_live_vregs = new (temp_scoped_alloc_.get()) ArenaBitVector(
+      temp_scoped_alloc_.get(), temp_.ssa.num_vregs, false, kBitMapRegisterV);
 }
 
 void MIRGraph::SSATransformationEnd() {
@@ -1692,9 +1688,9 @@
     VerifyDataflow();
   }
 
-  temp_bit_vector_size_ = 0u;
-  temp_bit_vector_ = nullptr;
-  temp_bit_matrix_ = nullptr;  // Def block matrix.
+  temp_.ssa.num_vregs = 0u;
+  temp_.ssa.work_live_vregs = nullptr;
+  temp_.ssa.def_block_matrix = nullptr;
   DCHECK(temp_scoped_alloc_.get() != nullptr);
   temp_scoped_alloc_.reset();
 
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index a1d24e2..d77ad6f 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -1270,15 +1270,38 @@
   size_t max_nested_loops_;
   int* i_dom_list_;
   std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
-  uint16_t* temp_insn_data_;
-  uint32_t temp_bit_vector_size_;
-  ArenaBitVector* temp_bit_vector_;
-  // temp_bit_matrix_ used as one of
-  //   - def_block_matrix: original num registers x num_blocks_,
-  //   - ending_null_check_matrix: num_blocks_ x original num registers,
-  //   - ending_clinit_check_matrix: num_blocks_ x unique class count.
-  ArenaBitVector** temp_bit_matrix_;
-  std::unique_ptr<GlobalValueNumbering> temp_gvn_;
+  // Union of temporaries used by different passes.
+  union {
+    // Class init check elimination.
+    struct {
+      size_t num_class_bits;  // 2 bits per class: class initialized and class in dex cache.
+      ArenaBitVector* work_classes_to_check;
+      ArenaBitVector** ending_classes_to_check_matrix;  // num_blocks_ x num_class_bits.
+      uint16_t* indexes;
+    } cice;
+    // Null check elimination.
+    struct {
+      size_t num_vregs;
+      ArenaBitVector* work_vregs_to_check;
+      ArenaBitVector** ending_vregs_to_check_matrix;  // num_blocks_ x num_vregs.
+    } nce;
+    // Special method inlining.
+    struct {
+      size_t num_indexes;
+      ArenaBitVector* processed_indexes;
+      uint16_t* lowering_infos;
+    } smi;
+    // SSA transformation.
+    struct {
+      size_t num_vregs;
+      ArenaBitVector* work_live_vregs;
+      ArenaBitVector** def_block_matrix;  // num_vregs x num_blocks_.
+    } ssa;
+    // Global value numbering.
+    struct {
+      GlobalValueNumbering* gvn;
+    } gvn;
+  } temp_;
   static const int kInvalidEntry = -1;
   ArenaVector<BasicBlock*> block_list_;
   ArenaBitVector* try_block_addr_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index a0ad213..d025d08 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -891,12 +891,12 @@
 
   DCHECK(temp_scoped_alloc_.get() == nullptr);
   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-  temp_bit_vector_size_ = GetNumOfCodeVRs();
-  temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
-      temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
-  temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+  temp_.nce.num_vregs = GetNumOfCodeVRs();
+  temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+      temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck);
+  temp_.nce.ending_vregs_to_check_matrix = static_cast<ArenaBitVector**>(
       temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
-  std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
+  std::fill_n(temp_.nce.ending_vregs_to_check_matrix, GetNumBlocks(), nullptr);
 
   // reset MIR_MARK
   AllNodesIterator iter(this);
@@ -919,7 +919,7 @@
     return false;
   }
 
-  ArenaBitVector* vregs_to_check = temp_bit_vector_;
+  ArenaBitVector* vregs_to_check = temp_.nce.work_vregs_to_check;
   /*
    * Set initial state. Catch blocks don't need any special treatment.
    */
@@ -940,7 +940,7 @@
     // Starting state is union of all incoming arcs.
     bool copied_first = false;
     for (BasicBlockId pred_id : bb->predecessors) {
-      if (temp_bit_matrix_[pred_id] == nullptr) {
+      if (temp_.nce.ending_vregs_to_check_matrix[pred_id] == nullptr) {
         continue;
       }
       BasicBlock* pred_bb = GetBasicBlock(pred_id);
@@ -962,9 +962,9 @@
       }
       if (!copied_first) {
         copied_first = true;
-        vregs_to_check->Copy(temp_bit_matrix_[pred_id]);
+        vregs_to_check->Copy(temp_.nce.ending_vregs_to_check_matrix[pred_id]);
       } else {
-        vregs_to_check->Union(temp_bit_matrix_[pred_id]);
+        vregs_to_check->Union(temp_.nce.ending_vregs_to_check_matrix[pred_id]);
       }
       if (null_check_insn != nullptr) {
         vregs_to_check->ClearBit(null_check_insn->dalvikInsn.vA);
@@ -1057,27 +1057,27 @@
 
   // Did anything change?
   bool nce_changed = false;
-  ArenaBitVector* old_ending_ssa_regs_to_check = temp_bit_matrix_[bb->id];
+  ArenaBitVector* old_ending_ssa_regs_to_check = temp_.nce.ending_vregs_to_check_matrix[bb->id];
   if (old_ending_ssa_regs_to_check == nullptr) {
     DCHECK(temp_scoped_alloc_.get() != nullptr);
     nce_changed = vregs_to_check->GetHighestBitSet() != -1;
-    temp_bit_matrix_[bb->id] = vregs_to_check;
+    temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check;
     // Create a new vregs_to_check for next BB.
-    temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
-        temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
+    temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+        temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck);
   } else if (!vregs_to_check->SameBitsSet(old_ending_ssa_regs_to_check)) {
     nce_changed = true;
-    temp_bit_matrix_[bb->id] = vregs_to_check;
-    temp_bit_vector_ = old_ending_ssa_regs_to_check;  // Reuse for vregs_to_check for next BB.
+    temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check;
+    temp_.nce.work_vregs_to_check = old_ending_ssa_regs_to_check;  // Reuse for next BB.
   }
   return nce_changed;
 }
 
 void MIRGraph::EliminateNullChecksEnd() {
   // Clean up temporaries.
-  temp_bit_vector_size_ = 0u;
-  temp_bit_vector_ = nullptr;
-  temp_bit_matrix_ = nullptr;
+  temp_.nce.num_vregs = 0u;
+  temp_.nce.work_vregs_to_check = nullptr;
+  temp_.nce.ending_vregs_to_check_matrix = nullptr;
   DCHECK(temp_scoped_alloc_.get() != nullptr);
   temp_scoped_alloc_.reset();
 
@@ -1124,9 +1124,9 @@
 
   // Each insn we use here has at least 2 code units, offset/2 will be a unique index.
   const size_t end = (GetNumDalvikInsns() + 1u) / 2u;
-  temp_insn_data_ = static_cast<uint16_t*>(
-      temp_scoped_alloc_->Alloc(end * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
-  std::fill_n(temp_insn_data_, end, 0xffffu);
+  temp_.cice.indexes = static_cast<uint16_t*>(
+      temp_scoped_alloc_->Alloc(end * sizeof(*temp_.cice.indexes), kArenaAllocGrowableArray));
+  std::fill_n(temp_.cice.indexes, end, 0xffffu);
 
   uint32_t unique_class_count = 0u;
   {
@@ -1173,8 +1173,8 @@
                   static_cast<uint16_t>(class_to_index_map.size())
               };
               uint16_t index = class_to_index_map.insert(entry).first->index;
-              // Using offset/2 for index into temp_insn_data_.
-              temp_insn_data_[mir->offset / 2u] = index;
+              // Using offset/2 for index into temp_.cice.indexes.
+              temp_.cice.indexes[mir->offset / 2u] = index;
             }
           } else if (mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC ||
               mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE) {
@@ -1187,8 +1187,8 @@
                   static_cast<uint16_t>(class_to_index_map.size())
               };
               uint16_t index = class_to_index_map.insert(entry).first->index;
-              // Using offset/2 for index into temp_insn_data_.
-              temp_insn_data_[mir->offset / 2u] = index;
+              // Using offset/2 for index into temp_.cice.indexes.
+              temp_.cice.indexes[mir->offset / 2u] = index;
             }
           }
         }
@@ -1199,19 +1199,19 @@
 
   if (unique_class_count == 0u) {
     // All SGET/SPUTs refer to initialized classes. Nothing to do.
-    temp_insn_data_ = nullptr;
+    temp_.cice.indexes = nullptr;
     temp_scoped_alloc_.reset();
     return false;
   }
 
   // 2 bits for each class: is class initialized, is class in dex cache.
-  temp_bit_vector_size_ = 2u * unique_class_count;
-  temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
-      temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
-  temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+  temp_.cice.num_class_bits = 2u * unique_class_count;
+  temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+      temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck);
+  temp_.cice.ending_classes_to_check_matrix = static_cast<ArenaBitVector**>(
       temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
-  std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
-  DCHECK_GT(temp_bit_vector_size_, 0u);
+  std::fill_n(temp_.cice.ending_classes_to_check_matrix, GetNumBlocks(), nullptr);
+  DCHECK_GT(temp_.cice.num_class_bits, 0u);
   return true;
 }
 
@@ -1229,22 +1229,22 @@
   /*
    * Set initial state.  Catch blocks don't need any special treatment.
    */
-  ArenaBitVector* classes_to_check = temp_bit_vector_;
+  ArenaBitVector* classes_to_check = temp_.cice.work_classes_to_check;
   DCHECK(classes_to_check != nullptr);
   if (bb->block_type == kEntryBlock) {
-    classes_to_check->SetInitialBits(temp_bit_vector_size_);
+    classes_to_check->SetInitialBits(temp_.cice.num_class_bits);
   } else {
     // Starting state is union of all incoming arcs.
     bool copied_first = false;
     for (BasicBlockId pred_id : bb->predecessors) {
-      if (temp_bit_matrix_[pred_id] == nullptr) {
+      if (temp_.cice.ending_classes_to_check_matrix[pred_id] == nullptr) {
         continue;
       }
       if (!copied_first) {
         copied_first = true;
-        classes_to_check->Copy(temp_bit_matrix_[pred_id]);
+        classes_to_check->Copy(temp_.cice.ending_classes_to_check_matrix[pred_id]);
       } else {
-        classes_to_check->Union(temp_bit_matrix_[pred_id]);
+        classes_to_check->Union(temp_.cice.ending_classes_to_check_matrix[pred_id]);
       }
     }
     DCHECK(copied_first);  // At least one predecessor must have been processed before this bb.
@@ -1253,7 +1253,7 @@
 
   // Walk through the instruction in the block, updating as necessary
   for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    uint16_t index = temp_insn_data_[mir->offset / 2u];
+    uint16_t index = temp_.cice.indexes[mir->offset / 2u];
     if (index != 0xffffu) {
       bool check_initialization = false;
       bool check_dex_cache = false;
@@ -1299,29 +1299,29 @@
 
   // Did anything change?
   bool changed = false;
-  ArenaBitVector* old_ending_classes_to_check = temp_bit_matrix_[bb->id];
+  ArenaBitVector* old_ending_classes_to_check = temp_.cice.ending_classes_to_check_matrix[bb->id];
   if (old_ending_classes_to_check == nullptr) {
     DCHECK(temp_scoped_alloc_.get() != nullptr);
     changed = classes_to_check->GetHighestBitSet() != -1;
-    temp_bit_matrix_[bb->id] = classes_to_check;
+    temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check;
     // Create a new classes_to_check for next BB.
-    temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
-        temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
+    temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+        temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck);
   } else if (!classes_to_check->Equal(old_ending_classes_to_check)) {
     changed = true;
-    temp_bit_matrix_[bb->id] = classes_to_check;
-    temp_bit_vector_ = old_ending_classes_to_check;  // Reuse for classes_to_check for next BB.
+    temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check;
+    temp_.cice.work_classes_to_check = old_ending_classes_to_check;  // Reuse for next BB.
   }
   return changed;
 }
 
 void MIRGraph::EliminateClassInitChecksEnd() {
   // Clean up temporaries.
-  temp_bit_vector_size_ = 0u;
-  temp_bit_vector_ = nullptr;
-  temp_bit_matrix_ = nullptr;
-  DCHECK(temp_insn_data_ != nullptr);
-  temp_insn_data_ = nullptr;
+  temp_.cice.num_class_bits = 0u;
+  temp_.cice.work_classes_to_check = nullptr;
+  temp_.cice.ending_classes_to_check_matrix = nullptr;
+  DCHECK(temp_.cice.indexes != nullptr);
+  temp_.cice.indexes = nullptr;
   DCHECK(temp_scoped_alloc_.get() != nullptr);
   temp_scoped_alloc_.reset();
 }
@@ -1333,39 +1333,39 @@
 
   DCHECK(temp_scoped_alloc_ == nullptr);
   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-  DCHECK(temp_gvn_ == nullptr);
-  temp_gvn_.reset(
-      new (temp_scoped_alloc_.get()) GlobalValueNumbering(cu_, temp_scoped_alloc_.get(),
-                                                          GlobalValueNumbering::kModeGvn));
+  DCHECK(temp_.gvn.gvn == nullptr);
+  temp_.gvn.gvn = new (temp_scoped_alloc_.get()) GlobalValueNumbering(
+      cu_, temp_scoped_alloc_.get(), GlobalValueNumbering::kModeGvn);
   return true;
 }
 
 bool MIRGraph::ApplyGlobalValueNumbering(BasicBlock* bb) {
-  DCHECK(temp_gvn_ != nullptr);
-  LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb);
+  DCHECK(temp_.gvn.gvn != nullptr);
+  LocalValueNumbering* lvn = temp_.gvn.gvn->PrepareBasicBlock(bb);
   if (lvn != nullptr) {
     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
       lvn->GetValueNumber(mir);
     }
   }
-  bool change = (lvn != nullptr) && temp_gvn_->FinishBasicBlock(bb);
+  bool change = (lvn != nullptr) && temp_.gvn.gvn->FinishBasicBlock(bb);
   return change;
 }
 
 void MIRGraph::ApplyGlobalValueNumberingEnd() {
   // Perform modifications.
-  if (temp_gvn_->Good()) {
+  DCHECK(temp_.gvn.gvn != nullptr);
+  if (temp_.gvn.gvn->Good()) {
     if (max_nested_loops_ != 0u) {
-      temp_gvn_->StartPostProcessing();
+      temp_.gvn.gvn->StartPostProcessing();
       TopologicalSortIterator iter(this);
       for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
         ScopedArenaAllocator allocator(&cu_->arena_stack);  // Reclaim memory after each LVN.
-        LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb, &allocator);
+        LocalValueNumbering* lvn = temp_.gvn.gvn->PrepareBasicBlock(bb, &allocator);
         if (lvn != nullptr) {
           for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
             lvn->GetValueNumber(mir);
           }
-          bool change = temp_gvn_->FinishBasicBlock(bb);
+          bool change = temp_.gvn.gvn->FinishBasicBlock(bb);
           DCHECK(!change) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
         }
       }
@@ -1376,16 +1376,16 @@
     LOG(WARNING) << "GVN failed for " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   }
 
-  DCHECK(temp_gvn_ != nullptr);
-  temp_gvn_.reset();
+  delete temp_.gvn.gvn;
+  temp_.gvn.gvn = nullptr;
   DCHECK(temp_scoped_alloc_ != nullptr);
   temp_scoped_alloc_.reset();
 }
 
 void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput) {
   uint32_t method_index = invoke->meta.method_lowering_info;
-  if (temp_bit_vector_->IsBitSet(method_index)) {
-    iget_or_iput->meta.ifield_lowering_info = temp_insn_data_[method_index];
+  if (temp_.smi.processed_indexes->IsBitSet(method_index)) {
+    iget_or_iput->meta.ifield_lowering_info = temp_.smi.lowering_infos[method_index];
     DCHECK_EQ(field_idx, GetIFieldLoweringInfo(iget_or_iput).FieldIndex());
     return;
   }
@@ -1402,8 +1402,8 @@
 
   uint32_t field_info_index = ifield_lowering_infos_.size();
   ifield_lowering_infos_.push_back(inlined_field_info);
-  temp_bit_vector_->SetBit(method_index);
-  temp_insn_data_[method_index] = field_info_index;
+  temp_.smi.processed_indexes->SetBit(method_index);
+  temp_.smi.lowering_infos[method_index] = field_info_index;
   iget_or_iput->meta.ifield_lowering_info = field_info_index;
 }
 
@@ -1425,12 +1425,12 @@
 
   DCHECK(temp_scoped_alloc_.get() == nullptr);
   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-  temp_bit_vector_size_ = method_lowering_infos_.size();
-  temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
-      temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapMisc);
-  temp_bit_vector_->ClearAllBits();
-  temp_insn_data_ = static_cast<uint16_t*>(temp_scoped_alloc_->Alloc(
-      temp_bit_vector_size_ * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
+  temp_.smi.num_indexes = method_lowering_infos_.size();
+  temp_.smi.processed_indexes = new (temp_scoped_alloc_.get()) ArenaBitVector(
+      temp_scoped_alloc_.get(), temp_.smi.num_indexes, false, kBitMapMisc);
+  temp_.smi.processed_indexes->ClearAllBits();
+  temp_.smi.lowering_infos = static_cast<uint16_t*>(temp_scoped_alloc_->Alloc(
+      temp_.smi.num_indexes * sizeof(*temp_.smi.lowering_infos), kArenaAllocGrowableArray));
 }
 
 void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
@@ -1477,10 +1477,12 @@
 }
 
 void MIRGraph::InlineSpecialMethodsEnd() {
-  DCHECK(temp_insn_data_ != nullptr);
-  temp_insn_data_ = nullptr;
-  DCHECK(temp_bit_vector_ != nullptr);
-  temp_bit_vector_ = nullptr;
+  // Clean up temporaries.
+  DCHECK(temp_.smi.lowering_infos != nullptr);
+  temp_.smi.lowering_infos = nullptr;
+  temp_.smi.num_indexes = 0u;
+  DCHECK(temp_.smi.processed_indexes != nullptr);
+  temp_.smi.processed_indexes = nullptr;
   DCHECK(temp_scoped_alloc_.get() != nullptr);
   temp_scoped_alloc_.reset();
 }
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index d3d76ba..ed33882 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -126,7 +126,7 @@
 
   for (uint32_t idx : bb->data_flow_info->def_v->Indexes()) {
     /* Block bb defines register idx */
-    temp_bit_matrix_[idx]->SetBit(bb->id);
+    temp_.ssa.def_block_matrix[idx]->SetBit(bb->id);
   }
   return true;
 }
@@ -135,16 +135,16 @@
   int num_registers = GetNumOfCodeAndTempVRs();
   /* Allocate num_registers bit vector pointers */
   DCHECK(temp_scoped_alloc_ != nullptr);
-  DCHECK(temp_bit_matrix_ == nullptr);
-  temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+  DCHECK(temp_.ssa.def_block_matrix == nullptr);
+  temp_.ssa.def_block_matrix = static_cast<ArenaBitVector**>(
       temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * num_registers, kArenaAllocDFInfo));
   int i;
 
   /* Initialize num_register vectors with num_blocks bits each */
   for (i = 0; i < num_registers; i++) {
-    temp_bit_matrix_[i] = new (temp_scoped_alloc_.get()) ArenaBitVector(arena_, GetNumBlocks(),
-                                                                        false, kBitMapBMatrix);
-    temp_bit_matrix_[i]->ClearAllBits();
+    temp_.ssa.def_block_matrix[i] = new (temp_scoped_alloc_.get()) ArenaBitVector(
+        arena_, GetNumBlocks(), false, kBitMapBMatrix);
+    temp_.ssa.def_block_matrix[i]->ClearAllBits();
   }
 
   AllNodesIterator iter(this);
@@ -163,7 +163,7 @@
   int num_regs = GetNumOfCodeVRs();
   int in_reg = GetFirstInVR();
   for (; in_reg < num_regs; in_reg++) {
-    temp_bit_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
+    temp_.ssa.def_block_matrix[in_reg]->SetBit(GetEntryBlock()->id);
   }
 }
 
@@ -435,32 +435,32 @@
  * insert a phi node if the variable is live-in to the block.
  */
 bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
-  DCHECK_EQ(temp_bit_vector_size_, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
-  ArenaBitVector* temp_dalvik_register_v = temp_bit_vector_;
+  DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
+  ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
 
   if (bb->data_flow_info == NULL) {
     return false;
   }
-  temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v);
+  temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
   BasicBlock* bb_taken = GetBasicBlock(bb->taken);
   BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
   if (bb_taken && bb_taken->data_flow_info)
-    ComputeSuccLineIn(temp_dalvik_register_v, bb_taken->data_flow_info->live_in_v,
+    ComputeSuccLineIn(temp_live_vregs, bb_taken->data_flow_info->live_in_v,
                       bb->data_flow_info->def_v);
   if (bb_fall_through && bb_fall_through->data_flow_info)
-    ComputeSuccLineIn(temp_dalvik_register_v, bb_fall_through->data_flow_info->live_in_v,
+    ComputeSuccLineIn(temp_live_vregs, bb_fall_through->data_flow_info->live_in_v,
                       bb->data_flow_info->def_v);
   if (bb->successor_block_list_type != kNotUsed) {
     for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
       BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
       if (succ_bb->data_flow_info) {
-        ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
+        ComputeSuccLineIn(temp_live_vregs, succ_bb->data_flow_info->live_in_v,
                           bb->data_flow_info->def_v);
       }
     }
   }
-  if (!temp_dalvik_register_v->Equal(bb->data_flow_info->live_in_v)) {
-    bb->data_flow_info->live_in_v->Copy(temp_dalvik_register_v);
+  if (!temp_live_vregs->Equal(bb->data_flow_info->live_in_v)) {
+    bb->data_flow_info->live_in_v->Copy(temp_live_vregs);
     return true;
   }
   return false;
@@ -482,7 +482,7 @@
 
   /* Iterate through each Dalvik register */
   for (dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
-    input_blocks->Copy(temp_bit_matrix_[dalvik_reg]);
+    input_blocks->Copy(temp_.ssa.def_block_matrix[dalvik_reg]);
     phi_blocks->ClearAllBits();
     do {
       // TUNING: When we repeat this, we could skip indexes from the previous pass.