Revert "ART: Better SSA Allocation when recreating SSA"
Temporarily reverting until memory footprint cost of adding a vreg to ssa entrance map on every applicable MIR node can be assessed..
This reverts commit cb73fb35e5f7c575ed491c0c8e2d2b1a0a22ea2e.
Change-Id: Ia9c03bfc5d365ad8d8b949e870f1e3bcda7f9a54
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index cd41d0f..36f1be7 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -947,34 +947,18 @@
defs[reg_index] = ssa_reg;
}
-void MIRGraph::AllocateSSAUseData(MIR *mir, int num_uses) {
- mir->ssa_rep->num_uses = num_uses;
-
- if (mir->ssa_rep->num_uses_allocated < num_uses) {
- mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, kArenaAllocDFInfo));
- // NOTE: will be filled in during type & size inference pass
- mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, kArenaAllocDFInfo));
- }
-}
-
-void MIRGraph::AllocateSSADefData(MIR *mir, int num_defs) {
- mir->ssa_rep->num_defs = num_defs;
-
- if (mir->ssa_rep->num_defs_allocated < num_defs) {
- mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
- kArenaAllocDFInfo));
- mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
- kArenaAllocDFInfo));
- }
-}
-
/* Look up new SSA names for format_35c instructions */
void MIRGraph::DataFlowSSAFormat35C(MIR* mir) {
DecodedInstruction *d_insn = &mir->dalvikInsn;
int num_uses = d_insn->vA;
int i;
- AllocateSSAUseData(mir, num_uses);
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
+ kArenaAllocDFInfo));
+ // NOTE: will be filled in during type & size inference pass
+ mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
+ kArenaAllocDFInfo));
for (i = 0; i < num_uses; i++) {
HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
@@ -987,7 +971,12 @@
int num_uses = d_insn->vA;
int i;
- AllocateSSAUseData(mir, num_uses);
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
+ kArenaAllocDFInfo));
+ // NOTE: will be filled in during type & size inference pass
+ mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
+ kArenaAllocDFInfo));
for (i = 0; i < num_uses; i++) {
HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
@@ -1000,17 +989,10 @@
if (bb->data_flow_info == NULL) return false;
- bb->data_flow_info->vreg_to_ssa_map_entrance =
- static_cast<int*>(arena_->Alloc(sizeof(int) * cu_->num_dalvik_registers, kArenaAllocDFInfo));
-
- memcpy(bb->data_flow_info->vreg_to_ssa_map_entrance, vreg_to_ssa_map_,
- sizeof(int) * cu_->num_dalvik_registers);
-
for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
mir->ssa_rep =
static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
kArenaAllocDFInfo));
- memset(mir->ssa_rep, 0, sizeof(*mir->ssa_rep));
uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
@@ -1057,7 +1039,13 @@
}
}
- AllocateSSAUseData(mir, num_uses);
+ if (num_uses) {
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
+ kArenaAllocDFInfo));
+ mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
+ kArenaAllocDFInfo));
+ }
int num_defs = 0;
@@ -1068,7 +1056,13 @@
}
}
- AllocateSSADefData(mir, num_defs);
+ if (num_defs) {
+ mir->ssa_rep->num_defs = num_defs;
+ mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
+ kArenaAllocDFInfo));
+ mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
+ kArenaAllocDFInfo));
+ }
DecodedInstruction *d_insn = &mir->dalvikInsn;
@@ -1114,11 +1108,11 @@
* input to PHI nodes can be derived from the snapshot of all
* predecessor blocks.
*/
- bb->data_flow_info->vreg_to_ssa_map_exit =
+ bb->data_flow_info->vreg_to_ssa_map =
static_cast<int*>(arena_->Alloc(sizeof(int) * cu_->num_dalvik_registers,
kArenaAllocDFInfo));
- memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, vreg_to_ssa_map_,
+ memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
sizeof(int) * cu_->num_dalvik_registers);
return true;
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index ad80c3c..8ce4f1f 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -58,7 +58,6 @@
use_counts_(arena, 256, kGrowableArrayMisc),
raw_use_counts_(arena, 256, kGrowableArrayMisc),
num_reachable_blocks_(0),
- max_num_reachable_blocks_(0),
dfs_order_(NULL),
dfs_post_order_(NULL),
dom_post_order_traversal_(NULL),
@@ -1238,9 +1237,6 @@
/* Rename register names by local defs and phi nodes */
ClearAllVisitedFlags();
DoDFSPreOrderSSARename(GetEntryBlock());
-
- // Update the maximum number of reachable blocks.
- max_num_reachable_blocks_ = num_reachable_blocks_;
}
} // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index f64f3e0..2c125f6 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -223,8 +223,7 @@
ArenaBitVector* def_v;
ArenaBitVector* live_in_v;
ArenaBitVector* phi_v;
- int32_t* vreg_to_ssa_map_exit;
- int32_t* vreg_to_ssa_map_entrance;
+ int32_t* vreg_to_ssa_map;
ArenaBitVector* ending_check_v; // For null check and class init check elimination.
};
@@ -237,8 +236,6 @@
* we may want to revisit in the future.
*/
struct SSARepresentation {
- int16_t num_uses_allocated;
- int16_t num_defs_allocated;
int16_t num_uses;
int16_t num_defs;
int32_t* uses;
@@ -861,10 +858,6 @@
void CombineBlocks(BasicBlock* bb);
void ClearAllVisitedFlags();
-
- void AllocateSSAUseData(MIR *mir, int num_uses);
- void AllocateSSADefData(MIR *mir, int num_defs);
-
/*
* IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
* we can verify that all catch entries have native PC entries.
@@ -950,7 +943,6 @@
GrowableArray<uint32_t> use_counts_; // Weighted by nesting depth
GrowableArray<uint32_t> raw_use_counts_; // Not weighted
unsigned int num_reachable_blocks_;
- unsigned int max_num_reachable_blocks_;
GrowableArray<BasicBlockId>* dfs_order_;
GrowableArray<BasicBlockId>* dfs_post_order_;
GrowableArray<BasicBlockId>* dom_post_order_traversal_;
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 50fb298..5f89c21 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -173,8 +173,8 @@
}
void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
- if (dom_post_order_traversal_ == NULL || max_num_reachable_blocks_ < num_reachable_blocks_) {
- // First time or too small - create the array.
+ if (dom_post_order_traversal_ == NULL) {
+ // First time - create the array.
dom_post_order_traversal_ =
new (arena_) GrowableArray<BasicBlockId>(arena_, num_reachable_blocks_,
kGrowableArrayDomPostOrderTraversal);
@@ -380,8 +380,8 @@
InitializeDominationInfo(bb);
}
- /* Initialize & Clear i_dom_list */
- if (max_num_reachable_blocks_ < num_reachable_blocks_) {
+ /* Initalize & Clear i_dom_list */
+ if (i_dom_list_ == NULL) {
i_dom_list_ = static_cast<int*>(arena_->Alloc(sizeof(int) * num_reachable_blocks,
kArenaAllocDFInfo));
}
@@ -584,8 +584,12 @@
/* Iterate through the predecessors */
GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
size_t num_uses = bb->predecessors->Size();
- AllocateSSAUseData(mir, num_uses);
- int* uses = mir->ssa_rep->uses;
+ mir->ssa_rep->num_uses = num_uses;
+ int* uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
+ kArenaAllocDFInfo));
+ mir->ssa_rep->uses = uses;
+ mir->ssa_rep->fp_use =
+ static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, kArenaAllocDFInfo));
BasicBlockId* incoming =
static_cast<BasicBlockId*>(arena_->Alloc(sizeof(BasicBlockId) * num_uses,
kArenaAllocDFInfo));
@@ -594,9 +598,9 @@
while (true) {
BasicBlock* pred_bb = GetBasicBlock(iter.Next());
if (!pred_bb) {
- break;
+ break;
}
- int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
+ int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
uses[idx] = ssa_reg;
incoming[idx] = pred_bb->id;
idx++;