From 2c45bc9137c29f886e69923535aff31a74d90829 Mon Sep 17 00:00:00 2001 From: Vladimir Marko Date: Tue, 25 Oct 2016 16:54:12 +0100 Subject: Remove H[Reverse]PostOrderIterator and HInsertionOrderIterator. Use range-based loops instead, introducing helper functions ReverseRange() for iteration in reverse order in containers. When the contents of the underlying container change inside the loop, use an index-based loop that better exposes the container data modifications, compared to the old iterator interface that's hiding it which may lead to subtle bugs. Test: m test-art-host Change-Id: I2a4e6c508b854c37a697fc4b1e8423a8c92c5ea0 --- compiler/optimizing/bounds_check_elimination.cc | 11 ++-- compiler/optimizing/code_generator.cc | 6 +- compiler/optimizing/dead_code_elimination.cc | 38 ++++++------- compiler/optimizing/gvn.cc | 4 +- compiler/optimizing/induction_var_analysis.cc | 3 +- compiler/optimizing/inliner.cc | 7 +-- compiler/optimizing/instruction_builder.cc | 7 +-- compiler/optimizing/instruction_simplifier.cc | 16 ++---- compiler/optimizing/intrinsics.cc | 3 +- compiler/optimizing/licm.cc | 4 +- compiler/optimizing/linear_order.cc | 3 +- compiler/optimizing/linear_order.h | 6 +- compiler/optimizing/liveness_test.cc | 3 +- compiler/optimizing/load_store_elimination.cc | 8 +-- compiler/optimizing/nodes.cc | 36 +++++------- compiler/optimizing/nodes.h | 66 +++++----------------- .../optimizing/prepare_for_register_allocation.cc | 3 +- compiler/optimizing/reference_type_propagation.cc | 7 +-- .../optimizing/register_allocator_graph_color.cc | 2 +- .../optimizing/register_allocator_linear_scan.cc | 2 +- compiler/optimizing/select_generator.cc | 3 +- compiler/optimizing/side_effects_analysis.cc | 7 +-- compiler/optimizing/ssa_builder.cc | 14 ++--- compiler/optimizing/ssa_liveness_analysis.cc | 10 ++-- compiler/optimizing/ssa_phi_elimination.cc | 9 +-- 25 files changed, 103 insertions(+), 175 deletions(-) (limited to 'compiler/optimizing') diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc index 529fc9e261..d2357a5d05 100644 --- a/compiler/optimizing/bounds_check_elimination.cc +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -1845,8 +1845,8 @@ void BoundsCheckElimination::Run() { // that value dominated by that instruction fits in that range. Range of that // value can be narrowed further down in the dominator tree. BCEVisitor visitor(graph_, side_effects_, induction_analysis_); - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* current = it.Current(); + for (size_t i = 0, size = graph_->GetReversePostOrder().size(); i != size; ++i) { + HBasicBlock* current = graph_->GetReversePostOrder()[i]; if (visitor.IsAddedBlock(current)) { // Skip added blocks. Their effects are already taken care of. continue; @@ -1855,8 +1855,11 @@ void BoundsCheckElimination::Run() { // Skip forward to the current block in case new basic blocks were inserted // (which always appear earlier in reverse post order) to avoid visiting the // same basic block twice. - for ( ; !it.Done() && it.Current() != current; it.Advance()) { - } + size_t new_size = graph_->GetReversePostOrder().size(); + DCHECK_GE(new_size, size); + i += new_size - size; + DCHECK_EQ(current, graph_->GetReversePostOrder()[i]); + size = new_size; } // Perform cleanup. diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 0f8cdbb19b..8b450e11dc 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -671,9 +671,9 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph, return; } ArenaVector loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc)); - for (HReversePostOrderIterator it(graph); !it.Done(); it.Advance()) { - if (it.Current()->IsLoopHeader()) { - HSuspendCheck* suspend_check = it.Current()->GetLoopInformation()->GetSuspendCheck(); + for (HBasicBlock* block : graph.GetReversePostOrder()) { + if (block->IsLoopHeader()) { + HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck(); if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) { loop_headers.push_back(suspend_check); } diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index adfe09ba9f..9de521ad8d 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -18,6 +18,7 @@ #include "base/array_ref.h" #include "base/bit_vector-inl.h" +#include "base/stl_util.h" #include "ssa_phi_elimination.h" namespace art { @@ -168,8 +169,7 @@ bool HDeadCodeElimination::SimplifyIfs() { bool simplified_one_or_more_ifs = false; bool rerun_dominance_and_loop_analysis = false; - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { HInstruction* last = block->GetLastInstruction(); HInstruction* first = block->GetFirstInstruction(); if (last->IsIf() && @@ -271,20 +271,22 @@ bool HDeadCodeElimination::SimplifyIfs() { } void HDeadCodeElimination::ConnectSuccessiveBlocks() { - // Order does not matter. - for (HReversePostOrderIterator it(*graph_); !it.Done();) { - HBasicBlock* block = it.Current(); - if (block->IsEntryBlock() || !block->GetLastInstruction()->IsGoto()) { - it.Advance(); - continue; - } - HBasicBlock* successor = block->GetSingleSuccessor(); - if (successor->IsExitBlock() || successor->GetPredecessors().size() != 1u) { - it.Advance(); - continue; + // Order does not matter. Skip the entry block by starting at index 1 in reverse post order. + for (size_t i = 1u, size = graph_->GetReversePostOrder().size(); i != size; ++i) { + HBasicBlock* block = graph_->GetReversePostOrder()[i]; + DCHECK(!block->IsEntryBlock()); + while (block->GetLastInstruction()->IsGoto()) { + HBasicBlock* successor = block->GetSingleSuccessor(); + if (successor->IsExitBlock() || successor->GetPredecessors().size() != 1u) { + break; + } + DCHECK_LT(i, IndexOfElement(graph_->GetReversePostOrder(), successor)); + block->MergeWith(successor); + --size; + DCHECK_EQ(size, graph_->GetReversePostOrder().size()); + DCHECK_EQ(block, graph_->GetReversePostOrder()[i]); + // Reiterate on this block in case it can be merged with its new successor. } - block->MergeWith(successor); - // Reiterate on this block in case it can be merged with its new successor. } } @@ -300,8 +302,7 @@ bool HDeadCodeElimination::RemoveDeadBlocks() { // Remove all dead blocks. Iterate in post order because removal needs the // block's chain of dominators and nested loops need to be updated from the // inside out. - for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetPostOrder()) { int id = block->GetBlockId(); if (!live_blocks.IsBitSet(id)) { MaybeRecordDeadBlock(block); @@ -332,8 +333,7 @@ bool HDeadCodeElimination::RemoveDeadBlocks() { void HDeadCodeElimination::RemoveDeadInstructions() { // Process basic blocks in post-order in the dominator tree, so that // a dead instruction depending on another dead instruction is removed. - for (HPostOrderIterator b(*graph_); !b.Done(); b.Advance()) { - HBasicBlock* block = b.Current(); + for (HBasicBlock* block : graph_->GetPostOrder()) { // Traverse this block's instructions in backward order and remove // the unused ones. HBackwardInstructionIterator i(block->GetInstructions()); diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc index 1e86b75075..f5931a2f81 100644 --- a/compiler/optimizing/gvn.cc +++ b/compiler/optimizing/gvn.cc @@ -411,8 +411,8 @@ void GlobalValueNumberer::Run() { // Use the reverse post order to ensure the non back-edge predecessors of a block are // visited before the block itself. - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - VisitBasicBlock(it.Current()); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + VisitBasicBlock(block); } } diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc index 38937bf488..4bea3c362b 100644 --- a/compiler/optimizing/induction_var_analysis.cc +++ b/compiler/optimizing/induction_var_analysis.cc @@ -101,8 +101,7 @@ void HInductionVarAnalysis::Run() { // Detects sequence variables (generalized induction variables) during an outer to inner // traversal of all loops using Gerlek's algorithm. The order is important to enable // range analysis on outer loop while visiting inner loops. - for (HReversePostOrderIterator it_graph(*graph_); !it_graph.Done(); it_graph.Advance()) { - HBasicBlock* graph_block = it_graph.Current(); + for (HBasicBlock* graph_block : graph_->GetReversePostOrder()) { // Don't analyze irreducible loops. if (graph_block->IsLoopHeader() && !graph_block->GetLoopInformation()->IsIrreducible()) { VisitLoop(graph_block->GetLoopInformation()); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 9faa98a388..cc420b3260 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -1219,16 +1219,13 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, return false; } - HReversePostOrderIterator it(*callee_graph); - it.Advance(); // Past the entry block, it does not contain instructions that prevent inlining. size_t number_of_instructions = 0; bool can_inline_environment = total_number_of_dex_registers_ < kMaximumNumberOfCumulatedDexRegisters; - for (; !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); - + // Skip the entry block, it does not contain instructions that prevent inlining. + for (HBasicBlock* block : callee_graph->GetReversePostOrderSkipEntryBlock()) { if (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible()) { // Don't inline methods with irreducible loops, they could prevent some // optimizations to run. diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index 613e00843f..c8c4ca76fd 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -81,8 +81,7 @@ void HInstructionBuilder::InitializeBlockLocals() { // locals (guaranteed by HGraphBuilder) and that all try blocks have been // visited already (from HTryBoundary scoping and reverse post order). bool catch_block_visited = false; - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* current = it.Current(); + for (HBasicBlock* current : graph_->GetReversePostOrder()) { if (current == current_block_) { catch_block_visited = true; } else if (current->IsTryBlock()) { @@ -276,8 +275,8 @@ bool HInstructionBuilder::Build() { FindNativeDebugInfoLocations(native_debug_info_locations); } - for (HReversePostOrderIterator block_it(*graph_); !block_it.Done(); block_it.Advance()) { - current_block_ = block_it.Current(); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + current_block_ = block; uint32_t block_dex_pc = current_block_->GetDexPc(); InitializeBlockLocals(); diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 3bb1c1dc21..e4d280f26d 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -124,20 +124,16 @@ void InstructionSimplifier::Run() { void InstructionSimplifierVisitor::Run() { // Iterate in reverse post order to open up more simplifications to users // of instructions that got simplified. - for (HReversePostOrderIterator it(*GetGraph()); !it.Done();) { + for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) { // The simplification of an instruction to another instruction may yield // possibilities for other simplifications. So although we perform a reverse // post order visit, we sometimes need to revisit an instruction index. - simplification_occurred_ = false; - VisitBasicBlock(it.Current()); - if (simplification_occurred_ && - (simplifications_at_current_position_ < kMaxSamePositionSimplifications)) { - // New simplifications may be applicable to the instruction at the - // current index, so don't advance the iterator. - continue; - } + do { + simplification_occurred_ = false; + VisitBasicBlock(block); + } while (simplification_occurred_ && + (simplifications_at_current_position_ < kMaxSamePositionSimplifications)); simplifications_at_current_position_ = 0; - it.Advance(); } } diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 8327a4c244..fc6ff7b197 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -133,8 +133,7 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke) { void IntrinsicsRecognizer::Run() { ScopedObjectAccess soa(Thread::Current()); - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done(); inst_it.Advance()) { HInstruction* inst = inst_it.Current(); diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc index a0ded74d6d..eb2d18dd88 100644 --- a/compiler/optimizing/licm.cc +++ b/compiler/optimizing/licm.cc @@ -15,6 +15,7 @@ */ #include "licm.h" + #include "side_effects_analysis.h" namespace art { @@ -90,8 +91,7 @@ void LICM::Run() { } // Post order visit to visit inner loops before outer loops. - for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetPostOrder()) { if (!block->IsLoopHeader()) { // Only visit the loop when we reach the header. continue; diff --git a/compiler/optimizing/linear_order.cc b/compiler/optimizing/linear_order.cc index 3af212fa48..80cecd41dc 100644 --- a/compiler/optimizing/linear_order.cc +++ b/compiler/optimizing/linear_order.cc @@ -94,8 +94,7 @@ void LinearizeGraph(const HGraph* graph, // for it. ArenaVector forward_predecessors(graph->GetBlocks().size(), allocator->Adapter(kArenaAllocLinearOrder)); - for (HReversePostOrderIterator it(*graph); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph->GetReversePostOrder()) { size_t number_of_forward_predecessors = block->GetPredecessors().size(); if (block->IsLoopHeader()) { number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges(); diff --git a/compiler/optimizing/linear_order.h b/compiler/optimizing/linear_order.h index cdbdd0714b..7122d67be9 100644 --- a/compiler/optimizing/linear_order.h +++ b/compiler/optimizing/linear_order.h @@ -30,16 +30,12 @@ namespace art { // // for (HBasicBlock* block : linear_order) // linear order // -// for (HBasicBlock* block : LinearPostOrder(linear_order)) // linear post order +// for (HBasicBlock* block : ReverseRange(linear_order)) // linear post order // void LinearizeGraph(const HGraph* graph, ArenaAllocator* allocator, ArenaVector* linear_order); -inline auto LinearPostOrder(const ArenaVector& linear_order) { - return MakeIterationRange(linear_order.rbegin(), linear_order.rend()); -} - } // namespace art #endif // ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_ diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc index bd74368e17..37b58ded59 100644 --- a/compiler/optimizing/liveness_test.cc +++ b/compiler/optimizing/liveness_test.cc @@ -56,8 +56,7 @@ static void TestCode(const uint16_t* data, const char* expected) { liveness.Analyze(); std::ostringstream buffer; - for (HInsertionOrderIterator it(*graph); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph->GetBlocks()) { buffer << "Block " << block->GetBlockId() << std::endl; size_t ssa_values = liveness.GetNumberOfSsaValues(); BitVector* live_in = liveness.GetLiveInSet(*block); diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index 820fa29597..b91e9e6868 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -1046,8 +1046,8 @@ void LoadStoreElimination::Run() { return; } HeapLocationCollector heap_location_collector(graph_); - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - heap_location_collector.VisitBasicBlock(it.Current()); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + heap_location_collector.VisitBasicBlock(block); } if (heap_location_collector.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) { // Bail out if there are too many heap locations to deal with. @@ -1065,8 +1065,8 @@ void LoadStoreElimination::Run() { } heap_location_collector.BuildAliasingMatrix(); LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_); - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - lse_visitor.VisitBasicBlock(it.Current()); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + lse_visitor.VisitBasicBlock(block); } lse_visitor.RemoveInstructions(); } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 59cc0091bf..45c7eb1a46 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -179,16 +179,16 @@ GraphAnalysisResult HGraph::BuildDominatorTree() { } void HGraph::ClearDominanceInformation() { - for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { - it.Current()->ClearDominanceInformation(); + for (HBasicBlock* block : GetReversePostOrder()) { + block->ClearDominanceInformation(); } reverse_post_order_.clear(); } void HGraph::ClearLoopInformation() { SetHasIrreducibleLoops(false); - for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { - it.Current()->SetLoopInformation(nullptr); + for (HBasicBlock* block : GetReversePostOrder()) { + block->SetLoopInformation(nullptr); } } @@ -275,8 +275,7 @@ void HGraph::ComputeDominanceInformation() { bool update_occurred = true; while (update_occurred) { update_occurred = false; - for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : GetReversePostOrder()) { for (HBasicBlock* successor : block->GetSuccessors()) { update_occurred |= UpdateDominatorOfSuccessor(block, successor); } @@ -287,8 +286,7 @@ void HGraph::ComputeDominanceInformation() { // Make sure that there are no remaining blocks whose dominator information // needs to be updated. if (kIsDebugBuild) { - for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : GetReversePostOrder()) { for (HBasicBlock* successor : block->GetSuccessors()) { DCHECK(!UpdateDominatorOfSuccessor(block, successor)); } @@ -297,8 +295,7 @@ void HGraph::ComputeDominanceInformation() { // Populate `dominated_blocks_` information after computing all dominators. // The potential presence of irreducible loops requires to do it after. - for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : GetReversePostOrder()) { if (!block->IsEntryBlock()) { block->GetDominator()->AddDominatedBlock(block); } @@ -375,8 +372,7 @@ void HGraph::SimplifyLoop(HBasicBlock* header) { void HGraph::ComputeTryBlockInformation() { // Iterate in reverse post order to propagate try membership information from // predecessors to their successors. - for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : GetReversePostOrder()) { if (block->IsEntryBlock() || block->IsCatchBlock()) { // Catch blocks after simplification have only exceptional predecessors // and hence are never in tries. @@ -446,8 +442,7 @@ GraphAnalysisResult HGraph::AnalyzeLoops() const { // We iterate post order to ensure we visit inner loops before outer loops. // `PopulateRecursive` needs this guarantee to know whether a natural loop // contains an irreducible loop. - for (HPostOrderIterator it(*this); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : GetPostOrder()) { if (block->IsLoopHeader()) { if (block->IsCatchBlock()) { // TODO: Dealing with exceptional back edges could be tricky because @@ -1134,8 +1129,8 @@ void HGraphVisitor::VisitInsertionOrder() { } void HGraphVisitor::VisitReversePostOrder() { - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - VisitBasicBlock(it.Current()); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + VisitBasicBlock(block); } } @@ -1986,10 +1981,8 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { // Update the environments in this graph to have the invoke's environment // as parent. { - HReversePostOrderIterator it(*this); - it.Advance(); // Skip the entry block, we do not need to update the entry's suspend check. - for (; !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + // Skip the entry block, we do not need to update the entry's suspend check. + for (HBasicBlock* block : GetReversePostOrderSkipEntryBlock()) { for (HInstructionIterator instr_it(block->GetInstructions()); !instr_it.Done(); instr_it.Advance()) { @@ -2070,8 +2063,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { // Do a reverse post order of the blocks in the callee and do (1), (2), (3) // and (4) to the blocks that apply. - for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { - HBasicBlock* current = it.Current(); + for (HBasicBlock* current : GetReversePostOrder()) { if (current != exit_block_ && current != entry_block_ && current != first) { DCHECK(current->GetTryCatchInformation() == nullptr); DCHECK(current->GetGraph() == this); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 6f4f3c9505..710f709c89 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -25,6 +25,7 @@ #include "base/arena_containers.h" #include "base/arena_object.h" #include "base/array_ref.h" +#include "base/iteration_range.h" #include "base/stl_util.h" #include "base/transform_array_ref.h" #include "dex_file.h" @@ -460,10 +461,23 @@ class HGraph : public ArenaObject { return reverse_post_order_; } + ArrayRef GetReversePostOrderSkipEntryBlock() { + DCHECK(GetReversePostOrder()[0] == entry_block_); + return ArrayRef(GetReversePostOrder()).SubArray(1); + } + + IterationRange::const_reverse_iterator> GetPostOrder() const { + return ReverseRange(GetReversePostOrder()); + } + const ArenaVector& GetLinearOrder() const { return linear_order_; } + IterationRange::const_reverse_iterator> GetLinearPostOrder() const { + return ReverseRange(GetLinearOrder()); + } + bool HasBoundsChecks() const { return has_bounds_checks_; } @@ -6615,58 +6629,6 @@ class HGraphDelegateVisitor : public HGraphVisitor { DISALLOW_COPY_AND_ASSIGN(HGraphDelegateVisitor); }; -class HInsertionOrderIterator : public ValueObject { - public: - explicit HInsertionOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {} - - bool Done() const { return index_ == graph_.GetBlocks().size(); } - HBasicBlock* Current() const { return graph_.GetBlocks()[index_]; } - void Advance() { ++index_; } - - private: - const HGraph& graph_; - size_t index_; - - DISALLOW_COPY_AND_ASSIGN(HInsertionOrderIterator); -}; - -class HReversePostOrderIterator : public ValueObject { - public: - explicit HReversePostOrderIterator(const HGraph& graph) : graph_(graph), index_(0) { - // Check that reverse post order of the graph has been built. - DCHECK(!graph.GetReversePostOrder().empty()); - } - - bool Done() const { return index_ == graph_.GetReversePostOrder().size(); } - HBasicBlock* Current() const { return graph_.GetReversePostOrder()[index_]; } - void Advance() { ++index_; } - - private: - const HGraph& graph_; - size_t index_; - - DISALLOW_COPY_AND_ASSIGN(HReversePostOrderIterator); -}; - -class HPostOrderIterator : public ValueObject { - public: - explicit HPostOrderIterator(const HGraph& graph) - : graph_(graph), index_(graph_.GetReversePostOrder().size()) { - // Check that reverse post order of the graph has been built. - DCHECK(!graph.GetReversePostOrder().empty()); - } - - bool Done() const { return index_ == 0; } - HBasicBlock* Current() const { return graph_.GetReversePostOrder()[index_ - 1u]; } - void Advance() { --index_; } - - private: - const HGraph& graph_; - size_t index_; - - DISALLOW_COPY_AND_ASSIGN(HPostOrderIterator); -}; - // Iterator over the blocks that art part of the loop. Includes blocks part // of an inner loop. The order in which the blocks are iterated is on their // block id. diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index 7b66ef3627..0db60882db 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -20,8 +20,7 @@ namespace art { void PrepareForRegisterAllocation::Run() { // Order does not matter. - for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) { // No need to visit the phis. for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done(); inst_it.Advance()) { diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index d93c9ddc39..d588deaace 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -123,8 +123,7 @@ void ReferenceTypePropagation::ValidateTypes() { // TODO: move this to the graph checker. if (kIsDebugBuild) { ScopedObjectAccess soa(Thread::Current()); - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { for (HInstructionIterator iti(block->GetInstructions()); !iti.Done(); iti.Advance()) { HInstruction* instr = iti.Current(); if (instr->GetType() == Primitive::kPrimNot) { @@ -158,8 +157,8 @@ void ReferenceTypePropagation::Run() { // To properly propagate type info we need to visit in the dominator-based order. // Reverse post order guarantees a node's dominators are visited first. // We take advantage of this order in `VisitBasicBlock`. - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - VisitBasicBlock(it.Current()); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + VisitBasicBlock(block); } ProcessWorklist(); diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc index 961077419e..aa0d3710fa 100644 --- a/compiler/optimizing/register_allocator_graph_color.cc +++ b/compiler/optimizing/register_allocator_graph_color.cc @@ -758,7 +758,7 @@ bool RegisterAllocatorGraphColor::Validate(bool log_fatal_on_failure) { } void RegisterAllocatorGraphColor::ProcessInstructions() { - for (HBasicBlock* block : LinearPostOrder(codegen_->GetGraph()->GetLinearOrder())) { + for (HBasicBlock* block : codegen_->GetGraph()->GetLinearPostOrder()) { // Note that we currently depend on this ordering, since some helper // code is designed for linear scan register allocation. for (HBackwardInstructionIterator instr_it(block->GetInstructions()); diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc index 4e69bc8999..1a391ce9bb 100644 --- a/compiler/optimizing/register_allocator_linear_scan.cc +++ b/compiler/optimizing/register_allocator_linear_scan.cc @@ -163,7 +163,7 @@ void RegisterAllocatorLinearScan::BlockRegisters(size_t start, size_t end, bool void RegisterAllocatorLinearScan::AllocateRegistersInternal() { // Iterate post-order, to ensure the list is sorted, and the last added interval // is the one with the lowest start position. - for (HBasicBlock* block : LinearPostOrder(codegen_->GetGraph()->GetLinearOrder())) { + for (HBasicBlock* block : codegen_->GetGraph()->GetLinearPostOrder()) { for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done(); back_it.Advance()) { ProcessInstruction(back_it.Current()); diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc index e409035d9d..46d0d0eb65 100644 --- a/compiler/optimizing/select_generator.cc +++ b/compiler/optimizing/select_generator.cc @@ -76,8 +76,7 @@ void HSelectGenerator::Run() { // Iterate in post order in the unlikely case that removing one occurrence of // the selection pattern empties a branch block of another occurrence. // Otherwise the order does not matter. - for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetPostOrder()) { if (!block->EndsWithIf()) continue; // Find elements of the diamond pattern. diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc index 1dc69867b4..6d82e8e06d 100644 --- a/compiler/optimizing/side_effects_analysis.cc +++ b/compiler/optimizing/side_effects_analysis.cc @@ -26,8 +26,7 @@ void SideEffectsAnalysis::Run() { // In DEBUG mode, ensure side effects are properly initialized to empty. if (kIsDebugBuild) { - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { SideEffects effects = GetBlockEffects(block); DCHECK(effects.DoesNothing()); if (block->IsLoopHeader()) { @@ -38,9 +37,7 @@ void SideEffectsAnalysis::Run() { } // Do a post order visit to ensure we visit a loop header after its loop body. - for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); - + for (HBasicBlock* block : graph_->GetPostOrder()) { SideEffects effects = SideEffects::None(); // Update `effects` with the side effects of all instructions in this block. for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done(); diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index 03807ba1ee..ae1e369999 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -25,8 +25,8 @@ namespace art { void SsaBuilder::FixNullConstantType() { // The order doesn't matter here. - for (HReversePostOrderIterator itb(*graph_); !itb.Done(); itb.Advance()) { - for (HInstructionIterator it(itb.Current()->GetInstructions()); !it.Done(); it.Advance()) { + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* equality_instr = it.Current(); if (!equality_instr->IsEqual() && !equality_instr->IsNotEqual()) { continue; @@ -57,8 +57,8 @@ void SsaBuilder::FixNullConstantType() { void SsaBuilder::EquivalentPhisCleanup() { // The order doesn't matter here. - for (HReversePostOrderIterator itb(*graph_); !itb.Done(); itb.Advance()) { - for (HInstructionIterator it(itb.Current()->GetPhis()); !it.Done(); it.Advance()) { + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { HPhi* phi = it.Current()->AsPhi(); HPhi* next = phi->GetNextEquivalentPhiWithSameType(); if (next != nullptr) { @@ -79,8 +79,7 @@ void SsaBuilder::EquivalentPhisCleanup() { } void SsaBuilder::FixEnvironmentPhis() { - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { for (HInstructionIterator it_phis(block->GetPhis()); !it_phis.Done(); it_phis.Advance()) { HPhi* phi = it_phis.Current()->AsPhi(); // If the phi is not dead, or has no environment uses, there is nothing to do. @@ -228,8 +227,7 @@ bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector* worklist) { void SsaBuilder::RunPrimitiveTypePropagation() { ArenaVector worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder)); - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { if (block->IsLoopHeader()) { for (HInstructionIterator phi_it(block->GetPhis()); !phi_it.Done(); phi_it.Advance()) { HPhi* phi = phi_it.Current()->AsPhi(); diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 76cf8fe1ae..e8e12e1a55 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -139,7 +139,7 @@ static void RecursivelyProcessInputs(HInstruction* current, void SsaLivenessAnalysis::ComputeLiveRanges() { // Do a post order visit, adding inputs of instructions live in the block where // that instruction is defined, and killing instructions that are being visited. - for (HBasicBlock* block : LinearPostOrder(graph_->GetLinearOrder())) { + for (HBasicBlock* block : ReverseRange(graph_->GetLinearOrder())) { BitVector* kill = GetKillSet(*block); BitVector* live_in = GetLiveInSet(*block); @@ -256,15 +256,13 @@ void SsaLivenessAnalysis::ComputeLiveInAndLiveOutSets() { do { changed = false; - for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - const HBasicBlock& block = *it.Current(); - + for (const HBasicBlock* block : graph_->GetPostOrder()) { // The live_in set depends on the kill set (which does not // change in this loop), and the live_out set. If the live_out // set does not change, there is no need to update the live_in set. - if (UpdateLiveOut(block) && UpdateLiveIn(block)) { + if (UpdateLiveOut(*block) && UpdateLiveIn(*block)) { if (kIsDebugBuild) { - CheckNoLiveInIrreducibleLoop(block); + CheckNoLiveInIrreducibleLoop(*block); } changed = true; } diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index b1ec99ab8e..aec7a3c555 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -34,8 +34,7 @@ void SsaDeadPhiElimination::MarkDeadPhis() { ArenaSet initially_live(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination)); // Add to the worklist phis referenced by non-phi instructions. - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) { HPhi* phi = inst_it.Current()->AsPhi(); if (phi->IsDead()) { @@ -84,8 +83,7 @@ void SsaDeadPhiElimination::EliminateDeadPhis() { // Remove phis that are not live. Visit in post order so that phis // that are not inputs of loop phis can be removed when they have // no users left (dead phis might use dead phis). - for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetPostOrder()) { HInstruction* current = block->GetFirstPhi(); HInstruction* next = nullptr; HPhi* phi; @@ -119,8 +117,7 @@ void SsaDeadPhiElimination::EliminateDeadPhis() { void SsaRedundantPhiElimination::Run() { // Add all phis in the worklist. Order does not matter for correctness, and // neither will necessarily converge faster. - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) { worklist_.push_back(inst_it.Current()->AsPhi()); } -- cgit v1.2.3-59-g8ed1b