diff options
author | 2013-10-11 15:24:55 -0700 | |
---|---|---|
committer | 2013-10-21 12:15:45 -0700 | |
commit | 0d82948094d9a198e01aa95f64012bdedd5b6fc9 (patch) | |
tree | c219c9dd2f1ae3b18245aafac4fb00970d5266a3 /compiler/dex/mir_optimization.cc | |
parent | 409fe94ad529d9334587be80b9f6a3d166805508 (diff) |
64-bit prep
Preparation for 64-bit roll.
o Eliminated storing pointers in 32-bit int slots in LIR.
o General size reductions of common structures to reduce impact
of doubled pointer sizes:
- BasicBlock struct was 72 bytes, now is 48.
- MIR struct was 72 bytes, now is 64.
- RegLocation was 12 bytes, now is 8.
o Generally replaced uses of BasicBlock* pointers with 16-bit Ids.
o Replaced several doubly-linked lists with singly-linked to save
one stored pointer per node.
o We had quite a few uses of uintptr_t's that were a holdover from
the JIT (which used pointers to mapped dex & actual code cache
addresses rather than trace-relative offsets). Replaced those with
uint32_t's.
o Clean up handling of embedded data for switch tables and array data.
o Miscellaneous cleanup.
I anticipate one or two additional CLs to reduce the size of MIR and LIR
structs.
Change-Id: I58e426d3f8e5efe64c1146b2823453da99451230
Diffstat (limited to 'compiler/dex/mir_optimization.cc')
-rw-r--r-- | compiler/dex/mir_optimization.cc | 84 |
1 files changed, 44 insertions, 40 deletions
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 05e428e178..3cd158ffc0 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -103,12 +103,12 @@ void MIRGraph::PropagateConstants() { } /* Advance to next strictly dominated MIR node in an extended basic block */ -static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir) { +MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) { BasicBlock* bb = *p_bb; if (mir != NULL) { mir = mir->next; if (mir == NULL) { - bb = bb->fall_through; + bb = GetBasicBlock(bb->fall_through); if ((bb == NULL) || Predecessors(bb) != 1) { mir = NULL; } else { @@ -147,19 +147,21 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) { return mir; } -static BasicBlock* NextDominatedBlock(BasicBlock* bb) { +BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) { if (bb->block_type == kDead) { return NULL; } DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock)); - if (((bb->taken != NULL) && (bb->fall_through == NULL)) && - ((bb->taken->block_type == kDalvikByteCode) || (bb->taken->block_type == kExitBlock))) { + BasicBlock* bb_taken = GetBasicBlock(bb->taken); + BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through); + if (((bb_taken != NULL) && (bb_fall_through == NULL)) && + ((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) { // Follow simple unconditional branches. - bb = bb->taken; + bb = bb_taken; } else { // Follow simple fallthrough - bb = (bb->taken != NULL) ? NULL : bb->fall_through; + bb = (bb_taken != NULL) ? NULL : bb_fall_through; } if (bb == NULL || (Predecessors(bb) != 1)) { return NULL; @@ -311,11 +313,13 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { case Instruction::IF_GTZ: case Instruction::IF_LEZ: // If we've got a backwards branch to return, no need to suspend check. - if ((IsBackedge(bb, bb->taken) && bb->taken->dominates_return) || - (IsBackedge(bb, bb->fall_through) && bb->fall_through->dominates_return)) { + if ((IsBackedge(bb, bb->taken) && GetBasicBlock(bb->taken)->dominates_return) || + (IsBackedge(bb, bb->fall_through) && + GetBasicBlock(bb->fall_through)->dominates_return)) { mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK; if (cu_->verbose) { - LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex << mir->offset; + LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex + << mir->offset; } } break; @@ -328,15 +332,15 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { if (!(cu_->compiler_backend == kPortable) && (cu_->instruction_set == kThumb2) && ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) || (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) { - BasicBlock* ft = bb->fall_through; + BasicBlock* ft = GetBasicBlock(bb->fall_through); DCHECK(ft != NULL); - BasicBlock* ft_ft = ft->fall_through; - BasicBlock* ft_tk = ft->taken; + BasicBlock* ft_ft = GetBasicBlock(ft->fall_through); + BasicBlock* ft_tk = GetBasicBlock(ft->taken); - BasicBlock* tk = bb->taken; + BasicBlock* tk = GetBasicBlock(bb->taken); DCHECK(tk != NULL); - BasicBlock* tk_ft = tk->fall_through; - BasicBlock* tk_tk = tk->taken; + BasicBlock* tk_ft = GetBasicBlock(tk->fall_through); + BasicBlock* tk_tk = GetBasicBlock(tk->taken); /* * In the select pattern, the taken edge goes to a block that unconditionally @@ -434,7 +438,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { int dead_def = if_false->ssa_rep->defs[0]; int live_def = if_true->ssa_rep->defs[0]; mir->ssa_rep->defs[0] = live_def; - int* incoming = reinterpret_cast<int*>(phi->dalvikInsn.vB); + BasicBlockId* incoming = phi->meta.phi_incoming; for (int i = 0; i < phi->ssa_rep->num_uses; i++) { if (phi->ssa_rep->uses[i] == live_def) { incoming[i] = bb->id; @@ -449,7 +453,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { } } phi->ssa_rep->num_uses--; - bb->taken = NULL; + bb->taken = NullBasicBlockId; tk->block_type = kDead; for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) { tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop); @@ -500,7 +504,7 @@ void MIRGraph::CountChecks(struct BasicBlock* bb) { } /* Try to make common case the fallthrough path */ -static bool LayoutBlocks(struct BasicBlock* bb) { +bool MIRGraph::LayoutBlocks(BasicBlock* bb) { // TODO: For now, just looking for direct throws. Consider generalizing for profile feedback if (!bb->explicit_throw) { return false; @@ -511,13 +515,13 @@ static bool LayoutBlocks(struct BasicBlock* bb) { if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) { break; } - BasicBlock* prev = walker->predecessors->Get(0); + BasicBlock* prev = GetBasicBlock(walker->predecessors->Get(0)); if (prev->conditional_branch) { - if (prev->fall_through == walker) { + if (GetBasicBlock(prev->fall_through) == walker) { // Already done - return break; } - DCHECK_EQ(walker, prev->taken); + DCHECK_EQ(walker, GetBasicBlock(prev->taken)); // Got one. Flip it and exit Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode; switch (opcode) { @@ -536,7 +540,7 @@ static bool LayoutBlocks(struct BasicBlock* bb) { default: LOG(FATAL) << "Unexpected opcode " << opcode; } prev->last_mir_insn->dalvikInsn.opcode = opcode; - BasicBlock* t_bb = prev->taken; + BasicBlockId t_bb = prev->taken; prev->taken = prev->fall_through; prev->fall_through = t_bb; break; @@ -556,8 +560,9 @@ bool MIRGraph::CombineBlocks(struct BasicBlock* bb) { || (bb->block_type == kExceptionHandling) || (bb->block_type == kExitBlock) || (bb->block_type == kDead) - || ((bb->taken == NULL) || (bb->taken->block_type != kExceptionHandling)) - || (bb->successor_block_list.block_list_type != kNotUsed) + || (bb->taken == NullBasicBlockId) + || (GetBasicBlock(bb->taken)->block_type != kExceptionHandling) + || (bb->successor_block_list_type != kNotUsed) || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) { break; } @@ -578,19 +583,18 @@ bool MIRGraph::CombineBlocks(struct BasicBlock* bb) { break; } // OK - got one. Combine - BasicBlock* bb_next = bb->fall_through; + BasicBlock* bb_next = GetBasicBlock(bb->fall_through); DCHECK(!bb_next->catch_entry); DCHECK_EQ(Predecessors(bb_next), 1U); - MIR* t_mir = bb->last_mir_insn->prev; // Overwrite the kOpCheck insn with the paired opcode DCHECK_EQ(bb_next->first_mir_insn, throw_insn); *bb->last_mir_insn = *throw_insn; - bb->last_mir_insn->prev = t_mir; // Use the successor info from the next block - bb->successor_block_list = bb_next->successor_block_list; + bb->successor_block_list_type = bb_next->successor_block_list_type; + bb->successor_blocks = bb_next->successor_blocks; // Use the ending block linkage from the next block bb->fall_through = bb_next->fall_through; - bb->taken->block_type = kDead; // Kill the unused exception block + GetBasicBlock(bb->taken)->block_type = kDead; // Kill the unused exception block bb->taken = bb_next->taken; // Include the rest of the instructions bb->last_mir_insn = bb_next->last_mir_insn; @@ -631,20 +635,20 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { temp_ssa_register_v_->SetBit(this_reg); } } else if (bb->predecessors->Size() == 1) { - BasicBlock* pred_bb = bb->predecessors->Get(0); + BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0)); temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v); if (pred_bb->block_type == kDalvikByteCode) { // Check to see if predecessor had an explicit null-check. MIR* last_insn = pred_bb->last_mir_insn; Instruction::Code last_opcode = last_insn->dalvikInsn.opcode; if (last_opcode == Instruction::IF_EQZ) { - if (pred_bb->fall_through == bb) { + if (pred_bb->fall_through == bb->id) { // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that // it can't be null. temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]); } } else if (last_opcode == Instruction::IF_NEZ) { - if (pred_bb->taken == bb) { + if (pred_bb->taken == bb->id) { // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be // null. temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]); @@ -653,12 +657,12 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { } } else { // Starting state is intersection of all incoming arcs - GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors); - BasicBlock* pred_bb = iter.Next(); + GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors); + BasicBlock* pred_bb = GetBasicBlock(iter.Next()); DCHECK(pred_bb != NULL); temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v); while (true) { - pred_bb = iter.Next(); + pred_bb = GetBasicBlock(iter.Next()); if (!pred_bb) break; if ((pred_bb->data_flow_info == NULL) || (pred_bb->data_flow_info->ending_null_check_v == NULL)) { @@ -691,9 +695,9 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { } else { if (next_mir) { LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode; - } else if (bb->fall_through) { + } else if (bb->fall_through != NullBasicBlockId) { // Look in next basic block - struct BasicBlock* next_bb = bb->fall_through; + struct BasicBlock* next_bb = GetBasicBlock(bb->fall_through); for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL; tmir =tmir->next) { if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) { @@ -834,7 +838,7 @@ bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) { } // Must be head of extended basic block. BasicBlock* start_bb = bb; - extended_basic_blocks_.push_back(bb); + extended_basic_blocks_.push_back(bb->id); bool terminated_by_return = false; // Visit blocks strictly dominated by this head. while (bb != NULL) { @@ -864,7 +868,7 @@ void MIRGraph::BasicBlockOptimization() { } // Perform extended basic block optimizations. for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) { - BasicBlockOpt(extended_basic_blocks_[i]); + BasicBlockOpt(GetBasicBlock(extended_basic_blocks_[i])); } } if (cu_->enable_debug & (1 << kDebugDumpCFG)) { |