ART: Better SSA Allocation when recreating SSA

The SSA calculation is not allocation friendly. This makes the
SSARepresentation remember how much is allocated and not reallocate
if SSA should be recalculated.

Also added some allocation friendly code for the dominance code.

Signed-off-by: Jean Christophe Beyler <jean.christophe.beyler@intel.com>
Change-Id: I6418b402434bd850b45771c75b7631b7b84a8f66
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 36f1be7..cd41d0f 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -947,18 +947,34 @@
   defs[reg_index] = ssa_reg;
 }
 
+void MIRGraph::AllocateSSAUseData(MIR *mir, int num_uses) {
+  mir->ssa_rep->num_uses = num_uses;
+
+  if (mir->ssa_rep->num_uses_allocated < num_uses) {
+    mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, kArenaAllocDFInfo));
+    // NOTE: will be filled in during type & size inference pass
+    mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, kArenaAllocDFInfo));
+  }
+}
+
+void MIRGraph::AllocateSSADefData(MIR *mir, int num_defs) {
+  mir->ssa_rep->num_defs = num_defs;
+
+  if (mir->ssa_rep->num_defs_allocated < num_defs) {
+    mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
+          kArenaAllocDFInfo));
+    mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
+          kArenaAllocDFInfo));
+  }
+}
+
 /* Look up new SSA names for format_35c instructions */
 void MIRGraph::DataFlowSSAFormat35C(MIR* mir) {
   DecodedInstruction *d_insn = &mir->dalvikInsn;
   int num_uses = d_insn->vA;
   int i;
 
-  mir->ssa_rep->num_uses = num_uses;
-  mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
-                                                       kArenaAllocDFInfo));
-  // NOTE: will be filled in during type & size inference pass
-  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
-                                                          kArenaAllocDFInfo));
+  AllocateSSAUseData(mir, num_uses);
 
   for (i = 0; i < num_uses; i++) {
     HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
@@ -971,12 +987,7 @@
   int num_uses = d_insn->vA;
   int i;
 
-  mir->ssa_rep->num_uses = num_uses;
-  mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
-                                                       kArenaAllocDFInfo));
-  // NOTE: will be filled in during type & size inference pass
-  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
-                                                          kArenaAllocDFInfo));
+  AllocateSSAUseData(mir, num_uses);
 
   for (i = 0; i < num_uses; i++) {
     HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
@@ -989,10 +1000,17 @@
 
   if (bb->data_flow_info == NULL) return false;
 
+  bb->data_flow_info->vreg_to_ssa_map_entrance =
+    static_cast<int*>(arena_->Alloc(sizeof(int) * cu_->num_dalvik_registers, kArenaAllocDFInfo));
+
+  memcpy(bb->data_flow_info->vreg_to_ssa_map_entrance, vreg_to_ssa_map_,
+      sizeof(int) * cu_->num_dalvik_registers);
+
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     mir->ssa_rep =
         static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
                                                               kArenaAllocDFInfo));
+    memset(mir->ssa_rep, 0, sizeof(*mir->ssa_rep));
 
     uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
 
@@ -1039,13 +1057,7 @@
       }
     }
 
-    if (num_uses) {
-      mir->ssa_rep->num_uses = num_uses;
-      mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
-                                                           kArenaAllocDFInfo));
-      mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
-                                                              kArenaAllocDFInfo));
-    }
+    AllocateSSAUseData(mir, num_uses);
 
     int num_defs = 0;
 
@@ -1056,13 +1068,7 @@
       }
     }
 
-    if (num_defs) {
-      mir->ssa_rep->num_defs = num_defs;
-      mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
-                                                           kArenaAllocDFInfo));
-      mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
-                                                              kArenaAllocDFInfo));
-    }
+    AllocateSSADefData(mir, num_defs);
 
     DecodedInstruction *d_insn = &mir->dalvikInsn;
 
@@ -1108,11 +1114,11 @@
    * input to PHI nodes can be derived from the snapshot of all
    * predecessor blocks.
    */
-  bb->data_flow_info->vreg_to_ssa_map =
+  bb->data_flow_info->vreg_to_ssa_map_exit =
       static_cast<int*>(arena_->Alloc(sizeof(int) * cu_->num_dalvik_registers,
                                       kArenaAllocDFInfo));
 
-  memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
+  memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, vreg_to_ssa_map_,
          sizeof(int) * cu_->num_dalvik_registers);
   return true;
 }
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 8ce4f1f..ad80c3c 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -58,6 +58,7 @@
       use_counts_(arena, 256, kGrowableArrayMisc),
       raw_use_counts_(arena, 256, kGrowableArrayMisc),
       num_reachable_blocks_(0),
+      max_num_reachable_blocks_(0),
       dfs_order_(NULL),
       dfs_post_order_(NULL),
       dom_post_order_traversal_(NULL),
@@ -1237,6 +1238,9 @@
   /* Rename register names by local defs and phi nodes */
   ClearAllVisitedFlags();
   DoDFSPreOrderSSARename(GetEntryBlock());
+
+  // Update the maximum number of reachable blocks.
+  max_num_reachable_blocks_ = num_reachable_blocks_;
 }
 
 }  // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 2c125f6..f64f3e0 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -223,7 +223,8 @@
   ArenaBitVector* def_v;
   ArenaBitVector* live_in_v;
   ArenaBitVector* phi_v;
-  int32_t* vreg_to_ssa_map;
+  int32_t* vreg_to_ssa_map_exit;
+  int32_t* vreg_to_ssa_map_entrance;
   ArenaBitVector* ending_check_v;  // For null check and class init check elimination.
 };
 
@@ -236,6 +237,8 @@
  * we may want to revisit in the future.
  */
 struct SSARepresentation {
+  int16_t num_uses_allocated;
+  int16_t num_defs_allocated;
   int16_t num_uses;
   int16_t num_defs;
   int32_t* uses;
@@ -858,6 +861,10 @@
   void CombineBlocks(BasicBlock* bb);
 
   void ClearAllVisitedFlags();
+
+  void AllocateSSAUseData(MIR *mir, int num_uses);
+  void AllocateSSADefData(MIR *mir, int num_defs);
+
   /*
    * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
    * we can verify that all catch entries have native PC entries.
@@ -943,6 +950,7 @@
   GrowableArray<uint32_t> use_counts_;      // Weighted by nesting depth
   GrowableArray<uint32_t> raw_use_counts_;  // Not weighted
   unsigned int num_reachable_blocks_;
+  unsigned int max_num_reachable_blocks_;
   GrowableArray<BasicBlockId>* dfs_order_;
   GrowableArray<BasicBlockId>* dfs_post_order_;
   GrowableArray<BasicBlockId>* dom_post_order_traversal_;
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 5f89c21..50fb298 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -173,8 +173,8 @@
 }
 
 void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
-  if (dom_post_order_traversal_ == NULL) {
-    // First time - create the array.
+  if (dom_post_order_traversal_ == NULL || max_num_reachable_blocks_ < num_reachable_blocks_) {
+    // First time or too small - create the array.
     dom_post_order_traversal_ =
         new (arena_) GrowableArray<BasicBlockId>(arena_, num_reachable_blocks_,
                                         kGrowableArrayDomPostOrderTraversal);
@@ -380,8 +380,8 @@
     InitializeDominationInfo(bb);
   }
 
-  /* Initalize & Clear i_dom_list */
-  if (i_dom_list_ == NULL) {
+  /* Initialize & Clear i_dom_list */
+  if (max_num_reachable_blocks_ < num_reachable_blocks_) {
     i_dom_list_ = static_cast<int*>(arena_->Alloc(sizeof(int) * num_reachable_blocks,
                                                   kArenaAllocDFInfo));
   }
@@ -584,12 +584,8 @@
     /* Iterate through the predecessors */
     GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
     size_t num_uses = bb->predecessors->Size();
-    mir->ssa_rep->num_uses = num_uses;
-    int* uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
-                                                kArenaAllocDFInfo));
-    mir->ssa_rep->uses = uses;
-    mir->ssa_rep->fp_use =
-        static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, kArenaAllocDFInfo));
+    AllocateSSAUseData(mir, num_uses);
+    int* uses = mir->ssa_rep->uses;
     BasicBlockId* incoming =
         static_cast<BasicBlockId*>(arena_->Alloc(sizeof(BasicBlockId) * num_uses,
                                                  kArenaAllocDFInfo));
@@ -598,9 +594,9 @@
     while (true) {
       BasicBlock* pred_bb = GetBasicBlock(iter.Next());
       if (!pred_bb) {
-        break;
+       break;
       }
-      int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
+      int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
       uses[idx] = ssa_reg;
       incoming[idx] = pred_bb->id;
       idx++;