diff options
| -rw-r--r-- | compiler/dex/frontend.cc | 5 | ||||
| -rw-r--r-- | compiler/dex/frontend.h | 1 | ||||
| -rw-r--r-- | compiler/dex/local_value_numbering.cc | 16 | ||||
| -rw-r--r-- | compiler/dex/mir_dataflow.cc | 9 | ||||
| -rw-r--r-- | compiler/dex/mir_graph.cc | 25 | ||||
| -rw-r--r-- | compiler/dex/mir_graph.h | 4 | ||||
| -rw-r--r-- | compiler/dex/quick/mir_to_lir-inl.h | 4 |
7 files changed, 48 insertions, 16 deletions
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index b8cd67e3e7..057177b998 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -84,6 +84,7 @@ static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimi // (1 << kBBOpt) | // (1 << kMatch) | // (1 << kPromoteCompilerTemps) | + // (1 << kSuppressExceptionEdges) | 0; static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes @@ -212,7 +213,9 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, if (compiler_backend == kPortable) { // Fused long branches not currently useful in bitcode. - cu.disable_opt |= (1 << kBranchFusing); + cu.disable_opt |= + (1 << kBranchFusing) | + (1 << kSuppressExceptionEdges); } if (cu.instruction_set == kMips) { diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h index 43f68554b5..b9b4178890 100644 --- a/compiler/dex/frontend.h +++ b/compiler/dex/frontend.h @@ -56,6 +56,7 @@ enum opt_control_vector { kMatch, kPromoteCompilerTemps, kBranchFusing, + kSuppressExceptionEdges, }; // Force code generation paths for testing. diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc index 35d29235f2..75883b7bd6 100644 --- a/compiler/dex/local_value_numbering.cc +++ b/compiler/dex/local_value_numbering.cc @@ -380,7 +380,9 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { } mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK; } - mir->meta.throw_insn->optimization_flags |= mir->optimization_flags; + if (mir->meta.throw_insn != NULL) { + mir->meta.throw_insn->optimization_flags |= mir->optimization_flags; + } // Use side effect to note range check completed. (void)LookupValue(ARRAY_REF, array, index, NO_VALUE); // Establish value number for loaded register. Note use of memory version. @@ -419,7 +421,9 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { } mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK; } - mir->meta.throw_insn->optimization_flags |= mir->optimization_flags; + if (mir->meta.throw_insn != NULL) { + mir->meta.throw_insn->optimization_flags |= mir->optimization_flags; + } // Use side effect to note range check completed. (void)LookupValue(ARRAY_REF, array, index, NO_VALUE); // Rev the memory version @@ -443,7 +447,9 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { } else { null_checked_.insert(base); } - mir->meta.throw_insn->optimization_flags |= mir->optimization_flags; + if (mir->meta.throw_insn != NULL) { + mir->meta.throw_insn->optimization_flags |= mir->optimization_flags; + } uint16_t field_ref = mir->dalvikInsn.vC; uint16_t memory_version = GetMemoryVersion(base, field_ref); if (opcode == Instruction::IGET_WIDE) { @@ -473,7 +479,9 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { } else { null_checked_.insert(base); } - mir->meta.throw_insn->optimization_flags |= mir->optimization_flags; + if (mir->meta.throw_insn != NULL) { + mir->meta.throw_insn->optimization_flags |= mir->optimization_flags; + } uint16_t field_ref = mir->dalvikInsn.vC; AdvanceMemoryVersion(base, field_ref); } diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index 11e19dc43f..d359ee2dfe 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -1243,12 +1243,13 @@ bool MIRGraph::CountUses(struct BasicBlock* bb) { if (mir->ssa_rep == NULL) { continue; } - // Each level of nesting adds *16 to count, up to 3 levels deep. - uint32_t weight = std::min(3U, static_cast<uint32_t>(bb->nesting_depth) * 4); + // Each level of nesting adds *100 to count, up to 3 levels deep. + uint32_t depth = std::min(3U, static_cast<uint32_t>(bb->nesting_depth)); + uint32_t weight = std::max(1U, depth * 100); for (int i = 0; i < mir->ssa_rep->num_uses; i++) { int s_reg = mir->ssa_rep->uses[i]; raw_use_counts_.Increment(s_reg); - use_counts_.Put(s_reg, use_counts_.Get(s_reg) + (1 << weight)); + use_counts_.Put(s_reg, use_counts_.Get(s_reg) + weight); } if (!(cu_->disable_opt & (1 << kPromoteCompilerTemps))) { int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode]; @@ -1267,7 +1268,7 @@ bool MIRGraph::CountUses(struct BasicBlock* bb) { } if (uses_method_star) { raw_use_counts_.Increment(method_sreg_); - use_counts_.Put(method_sreg_, use_counts_.Get(method_sreg_) + (1 << weight)); + use_counts_.Put(method_sreg_, use_counts_.Get(method_sreg_) + weight); } } } diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index cf758fc5da..deaf2ffe80 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -365,8 +365,8 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs } /* Process instructions with the kSwitch flag */ -void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width, - int flags) { +BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, + int width, int flags) { const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB); int size; @@ -437,6 +437,7 @@ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_ /* create */ true, /* immed_pred_block_p */ NULL); cur_block->fall_through = fallthrough_block->id; fallthrough_block->predecessors->Insert(cur_block->id); + return cur_block; } /* Process instructions with the kThrow flag */ @@ -444,6 +445,9 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse int width, int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr, const uint16_t* code_end) { bool in_try_block = try_block_addr->IsBitSet(cur_offset); + bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW); + bool build_all_edges = + (cu_->disable_opt & (1 << kSuppressExceptionEdges)) || is_throw || in_try_block; /* In try block */ if (in_try_block) { @@ -473,7 +477,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse cur_block->successor_blocks->Insert(successor_block_info); catch_block->predecessors->Insert(cur_block->id); } - } else { + } else if (build_all_edges) { BasicBlock *eh_block = NewMemBB(kExceptionHandling, num_blocks_++); cur_block->taken = eh_block->id; block_list_.Insert(eh_block); @@ -481,7 +485,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse eh_block->predecessors->Insert(cur_block->id); } - if (insn->dalvikInsn.opcode == Instruction::THROW) { + if (is_throw) { cur_block->explicit_throw = true; if (code_ptr < code_end) { // Force creation of new block following THROW via side-effect @@ -494,6 +498,16 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse } } + if (!build_all_edges) { + /* + * Even though there is an exception edge here, control cannot return to this + * method. Thus, for the purposes of dataflow analysis and optimization, we can + * ignore the edge. Doing this reduces compile time, and increases the scope + * of the basic-block level optimization pass. + */ + return cur_block; + } + /* * Split the potentially-throwing instruction into two parts. * The first half will be a pseudo-op that captures the exception @@ -695,7 +709,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_, code_ptr, code_end); } else if (flags & Instruction::kSwitch) { - ProcessCanSwitch(cur_block, insn, current_offset_, width, flags); + cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags); } current_offset_ += width; BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */ @@ -1100,6 +1114,7 @@ const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) { void MIRGraph::DumpMIRGraph() { BasicBlock* bb; const char* block_type_names[] = { + "Null Block", "Entry Block", "Code Block", "Exit Block", diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index a69dde0da3..8c20728a51 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -698,8 +698,8 @@ class MIRGraph { void ProcessTryCatchBlocks(); BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width, int flags, const uint16_t* code_ptr, const uint16_t* code_end); - void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width, - int flags); + BasicBlock* ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width, + int flags); BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width, int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr, const uint16_t* code_end); diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h index 1a30b7aef0..f567b5c6dd 100644 --- a/compiler/dex/quick/mir_to_lir-inl.h +++ b/compiler/dex/quick/mir_to_lir-inl.h @@ -198,6 +198,10 @@ inline void Mir2Lir::SetupResourceMasks(LIR* lir) { SetupRegMask(&lir->u.m.use_mask, lir->operands[3]); } + if (flags & REG_USE4) { + SetupRegMask(&lir->u.m.use_mask, lir->operands[4]); + } + if (flags & SETS_CCODES) { lir->u.m.def_mask |= ENCODE_CCODE; } |