Remove H[Reverse]PostOrderIterator and HInsertionOrderIterator.
Use range-based loops instead, introducing helper functions
ReverseRange() for iteration in reverse order in containers.
When the contents of the underlying container change inside
the loop, use an index-based loop that better exposes the
container data modifications, compared to the old iterator
interface that's hiding it which may lead to subtle bugs.
Test: m test-art-host
Change-Id: I2a4e6c508b854c37a697fc4b1e8423a8c92c5ea0
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 529fc9e..d2357a5 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1845,8 +1845,8 @@
// that value dominated by that instruction fits in that range. Range of that
// value can be narrowed further down in the dominator tree.
BCEVisitor visitor(graph_, side_effects_, induction_analysis_);
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* current = it.Current();
+ for (size_t i = 0, size = graph_->GetReversePostOrder().size(); i != size; ++i) {
+ HBasicBlock* current = graph_->GetReversePostOrder()[i];
if (visitor.IsAddedBlock(current)) {
// Skip added blocks. Their effects are already taken care of.
continue;
@@ -1855,8 +1855,11 @@
// Skip forward to the current block in case new basic blocks were inserted
// (which always appear earlier in reverse post order) to avoid visiting the
// same basic block twice.
- for ( ; !it.Done() && it.Current() != current; it.Advance()) {
- }
+ size_t new_size = graph_->GetReversePostOrder().size();
+ DCHECK_GE(new_size, size);
+ i += new_size - size;
+ DCHECK_EQ(current, graph_->GetReversePostOrder()[i]);
+ size = new_size;
}
// Perform cleanup.
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 0f8cdbb..8b450e1 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -671,9 +671,9 @@
return;
}
ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc));
- for (HReversePostOrderIterator it(graph); !it.Done(); it.Advance()) {
- if (it.Current()->IsLoopHeader()) {
- HSuspendCheck* suspend_check = it.Current()->GetLoopInformation()->GetSuspendCheck();
+ for (HBasicBlock* block : graph.GetReversePostOrder()) {
+ if (block->IsLoopHeader()) {
+ HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
loop_headers.push_back(suspend_check);
}
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index adfe09b..9de521a 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -18,6 +18,7 @@
#include "base/array_ref.h"
#include "base/bit_vector-inl.h"
+#include "base/stl_util.h"
#include "ssa_phi_elimination.h"
namespace art {
@@ -168,8 +169,7 @@
bool simplified_one_or_more_ifs = false;
bool rerun_dominance_and_loop_analysis = false;
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
HInstruction* last = block->GetLastInstruction();
HInstruction* first = block->GetFirstInstruction();
if (last->IsIf() &&
@@ -271,20 +271,22 @@
}
void HDeadCodeElimination::ConnectSuccessiveBlocks() {
- // Order does not matter.
- for (HReversePostOrderIterator it(*graph_); !it.Done();) {
- HBasicBlock* block = it.Current();
- if (block->IsEntryBlock() || !block->GetLastInstruction()->IsGoto()) {
- it.Advance();
- continue;
+ // Order does not matter. Skip the entry block by starting at index 1 in reverse post order.
+ for (size_t i = 1u, size = graph_->GetReversePostOrder().size(); i != size; ++i) {
+ HBasicBlock* block = graph_->GetReversePostOrder()[i];
+ DCHECK(!block->IsEntryBlock());
+ while (block->GetLastInstruction()->IsGoto()) {
+ HBasicBlock* successor = block->GetSingleSuccessor();
+ if (successor->IsExitBlock() || successor->GetPredecessors().size() != 1u) {
+ break;
+ }
+ DCHECK_LT(i, IndexOfElement(graph_->GetReversePostOrder(), successor));
+ block->MergeWith(successor);
+ --size;
+ DCHECK_EQ(size, graph_->GetReversePostOrder().size());
+ DCHECK_EQ(block, graph_->GetReversePostOrder()[i]);
+ // Reiterate on this block in case it can be merged with its new successor.
}
- HBasicBlock* successor = block->GetSingleSuccessor();
- if (successor->IsExitBlock() || successor->GetPredecessors().size() != 1u) {
- it.Advance();
- continue;
- }
- block->MergeWith(successor);
- // Reiterate on this block in case it can be merged with its new successor.
}
}
@@ -300,8 +302,7 @@
// Remove all dead blocks. Iterate in post order because removal needs the
// block's chain of dominators and nested loops need to be updated from the
// inside out.
- for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetPostOrder()) {
int id = block->GetBlockId();
if (!live_blocks.IsBitSet(id)) {
MaybeRecordDeadBlock(block);
@@ -332,8 +333,7 @@
void HDeadCodeElimination::RemoveDeadInstructions() {
// Process basic blocks in post-order in the dominator tree, so that
// a dead instruction depending on another dead instruction is removed.
- for (HPostOrderIterator b(*graph_); !b.Done(); b.Advance()) {
- HBasicBlock* block = b.Current();
+ for (HBasicBlock* block : graph_->GetPostOrder()) {
// Traverse this block's instructions in backward order and remove
// the unused ones.
HBackwardInstructionIterator i(block->GetInstructions());
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 1e86b75..f5931a2 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -411,8 +411,8 @@
// Use the reverse post order to ensure the non back-edge predecessors of a block are
// visited before the block itself.
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- VisitBasicBlock(it.Current());
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ VisitBasicBlock(block);
}
}
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 38937bf..4bea3c3 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -101,8 +101,7 @@
// Detects sequence variables (generalized induction variables) during an outer to inner
// traversal of all loops using Gerlek's algorithm. The order is important to enable
// range analysis on outer loop while visiting inner loops.
- for (HReversePostOrderIterator it_graph(*graph_); !it_graph.Done(); it_graph.Advance()) {
- HBasicBlock* graph_block = it_graph.Current();
+ for (HBasicBlock* graph_block : graph_->GetReversePostOrder()) {
// Don't analyze irreducible loops.
if (graph_block->IsLoopHeader() && !graph_block->GetLoopInformation()->IsIrreducible()) {
VisitLoop(graph_block->GetLoopInformation());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 9faa98a..cc420b3 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1219,16 +1219,13 @@
return false;
}
- HReversePostOrderIterator it(*callee_graph);
- it.Advance(); // Past the entry block, it does not contain instructions that prevent inlining.
size_t number_of_instructions = 0;
bool can_inline_environment =
total_number_of_dex_registers_ < kMaximumNumberOfCumulatedDexRegisters;
- for (; !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
-
+ // Skip the entry block, it does not contain instructions that prevent inlining.
+ for (HBasicBlock* block : callee_graph->GetReversePostOrderSkipEntryBlock()) {
if (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible()) {
// Don't inline methods with irreducible loops, they could prevent some
// optimizations to run.
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 613e008..c8c4ca7 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -81,8 +81,7 @@
// locals (guaranteed by HGraphBuilder) and that all try blocks have been
// visited already (from HTryBoundary scoping and reverse post order).
bool catch_block_visited = false;
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* current = it.Current();
+ for (HBasicBlock* current : graph_->GetReversePostOrder()) {
if (current == current_block_) {
catch_block_visited = true;
} else if (current->IsTryBlock()) {
@@ -276,8 +275,8 @@
FindNativeDebugInfoLocations(native_debug_info_locations);
}
- for (HReversePostOrderIterator block_it(*graph_); !block_it.Done(); block_it.Advance()) {
- current_block_ = block_it.Current();
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ current_block_ = block;
uint32_t block_dex_pc = current_block_->GetDexPc();
InitializeBlockLocals();
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 3bb1c1d..e4d280f 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -124,20 +124,16 @@
void InstructionSimplifierVisitor::Run() {
// Iterate in reverse post order to open up more simplifications to users
// of instructions that got simplified.
- for (HReversePostOrderIterator it(*GetGraph()); !it.Done();) {
+ for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
// The simplification of an instruction to another instruction may yield
// possibilities for other simplifications. So although we perform a reverse
// post order visit, we sometimes need to revisit an instruction index.
- simplification_occurred_ = false;
- VisitBasicBlock(it.Current());
- if (simplification_occurred_ &&
- (simplifications_at_current_position_ < kMaxSamePositionSimplifications)) {
- // New simplifications may be applicable to the instruction at the
- // current index, so don't advance the iterator.
- continue;
- }
+ do {
+ simplification_occurred_ = false;
+ VisitBasicBlock(block);
+ } while (simplification_occurred_ &&
+ (simplifications_at_current_position_ < kMaxSamePositionSimplifications));
simplifications_at_current_position_ = 0;
- it.Advance();
}
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 8327a4c..fc6ff7b 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -133,8 +133,7 @@
void IntrinsicsRecognizer::Run() {
ScopedObjectAccess soa(Thread::Current());
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
inst_it.Advance()) {
HInstruction* inst = inst_it.Current();
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index a0ded74..eb2d18d 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -15,6 +15,7 @@
*/
#include "licm.h"
+
#include "side_effects_analysis.h"
namespace art {
@@ -90,8 +91,7 @@
}
// Post order visit to visit inner loops before outer loops.
- for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetPostOrder()) {
if (!block->IsLoopHeader()) {
// Only visit the loop when we reach the header.
continue;
diff --git a/compiler/optimizing/linear_order.cc b/compiler/optimizing/linear_order.cc
index 3af212f..80cecd4 100644
--- a/compiler/optimizing/linear_order.cc
+++ b/compiler/optimizing/linear_order.cc
@@ -94,8 +94,7 @@
// for it.
ArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(),
allocator->Adapter(kArenaAllocLinearOrder));
- for (HReversePostOrderIterator it(*graph); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph->GetReversePostOrder()) {
size_t number_of_forward_predecessors = block->GetPredecessors().size();
if (block->IsLoopHeader()) {
number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
diff --git a/compiler/optimizing/linear_order.h b/compiler/optimizing/linear_order.h
index cdbdd07..7122d67 100644
--- a/compiler/optimizing/linear_order.h
+++ b/compiler/optimizing/linear_order.h
@@ -30,16 +30,12 @@
//
// for (HBasicBlock* block : linear_order) // linear order
//
-// for (HBasicBlock* block : LinearPostOrder(linear_order)) // linear post order
+// for (HBasicBlock* block : ReverseRange(linear_order)) // linear post order
//
void LinearizeGraph(const HGraph* graph,
ArenaAllocator* allocator,
ArenaVector<HBasicBlock*>* linear_order);
-inline auto LinearPostOrder(const ArenaVector<HBasicBlock*>& linear_order) {
- return MakeIterationRange(linear_order.rbegin(), linear_order.rend());
-}
-
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index bd74368..37b58de 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -56,8 +56,7 @@
liveness.Analyze();
std::ostringstream buffer;
- for (HInsertionOrderIterator it(*graph); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph->GetBlocks()) {
buffer << "Block " << block->GetBlockId() << std::endl;
size_t ssa_values = liveness.GetNumberOfSsaValues();
BitVector* live_in = liveness.GetLiveInSet(*block);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 820fa29..b91e9e6 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -1046,8 +1046,8 @@
return;
}
HeapLocationCollector heap_location_collector(graph_);
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- heap_location_collector.VisitBasicBlock(it.Current());
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ heap_location_collector.VisitBasicBlock(block);
}
if (heap_location_collector.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) {
// Bail out if there are too many heap locations to deal with.
@@ -1065,8 +1065,8 @@
}
heap_location_collector.BuildAliasingMatrix();
LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_);
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- lse_visitor.VisitBasicBlock(it.Current());
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ lse_visitor.VisitBasicBlock(block);
}
lse_visitor.RemoveInstructions();
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 59cc009..45c7eb1 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -179,16 +179,16 @@
}
void HGraph::ClearDominanceInformation() {
- for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
- it.Current()->ClearDominanceInformation();
+ for (HBasicBlock* block : GetReversePostOrder()) {
+ block->ClearDominanceInformation();
}
reverse_post_order_.clear();
}
void HGraph::ClearLoopInformation() {
SetHasIrreducibleLoops(false);
- for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
- it.Current()->SetLoopInformation(nullptr);
+ for (HBasicBlock* block : GetReversePostOrder()) {
+ block->SetLoopInformation(nullptr);
}
}
@@ -275,8 +275,7 @@
bool update_occurred = true;
while (update_occurred) {
update_occurred = false;
- for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : GetReversePostOrder()) {
for (HBasicBlock* successor : block->GetSuccessors()) {
update_occurred |= UpdateDominatorOfSuccessor(block, successor);
}
@@ -287,8 +286,7 @@
// Make sure that there are no remaining blocks whose dominator information
// needs to be updated.
if (kIsDebugBuild) {
- for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : GetReversePostOrder()) {
for (HBasicBlock* successor : block->GetSuccessors()) {
DCHECK(!UpdateDominatorOfSuccessor(block, successor));
}
@@ -297,8 +295,7 @@
// Populate `dominated_blocks_` information after computing all dominators.
// The potential presence of irreducible loops requires to do it after.
- for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : GetReversePostOrder()) {
if (!block->IsEntryBlock()) {
block->GetDominator()->AddDominatedBlock(block);
}
@@ -375,8 +372,7 @@
void HGraph::ComputeTryBlockInformation() {
// Iterate in reverse post order to propagate try membership information from
// predecessors to their successors.
- for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : GetReversePostOrder()) {
if (block->IsEntryBlock() || block->IsCatchBlock()) {
// Catch blocks after simplification have only exceptional predecessors
// and hence are never in tries.
@@ -446,8 +442,7 @@
// We iterate post order to ensure we visit inner loops before outer loops.
// `PopulateRecursive` needs this guarantee to know whether a natural loop
// contains an irreducible loop.
- for (HPostOrderIterator it(*this); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : GetPostOrder()) {
if (block->IsLoopHeader()) {
if (block->IsCatchBlock()) {
// TODO: Dealing with exceptional back edges could be tricky because
@@ -1134,8 +1129,8 @@
}
void HGraphVisitor::VisitReversePostOrder() {
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- VisitBasicBlock(it.Current());
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ VisitBasicBlock(block);
}
}
@@ -1986,10 +1981,8 @@
// Update the environments in this graph to have the invoke's environment
// as parent.
{
- HReversePostOrderIterator it(*this);
- it.Advance(); // Skip the entry block, we do not need to update the entry's suspend check.
- for (; !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ // Skip the entry block, we do not need to update the entry's suspend check.
+ for (HBasicBlock* block : GetReversePostOrderSkipEntryBlock()) {
for (HInstructionIterator instr_it(block->GetInstructions());
!instr_it.Done();
instr_it.Advance()) {
@@ -2070,8 +2063,7 @@
// Do a reverse post order of the blocks in the callee and do (1), (2), (3)
// and (4) to the blocks that apply.
- for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
- HBasicBlock* current = it.Current();
+ for (HBasicBlock* current : GetReversePostOrder()) {
if (current != exit_block_ && current != entry_block_ && current != first) {
DCHECK(current->GetTryCatchInformation() == nullptr);
DCHECK(current->GetGraph() == this);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6f4f3c9..710f709 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -25,6 +25,7 @@
#include "base/arena_containers.h"
#include "base/arena_object.h"
#include "base/array_ref.h"
+#include "base/iteration_range.h"
#include "base/stl_util.h"
#include "base/transform_array_ref.h"
#include "dex_file.h"
@@ -460,10 +461,23 @@
return reverse_post_order_;
}
+ ArrayRef<HBasicBlock* const> GetReversePostOrderSkipEntryBlock() {
+ DCHECK(GetReversePostOrder()[0] == entry_block_);
+ return ArrayRef<HBasicBlock* const>(GetReversePostOrder()).SubArray(1);
+ }
+
+ IterationRange<ArenaVector<HBasicBlock*>::const_reverse_iterator> GetPostOrder() const {
+ return ReverseRange(GetReversePostOrder());
+ }
+
const ArenaVector<HBasicBlock*>& GetLinearOrder() const {
return linear_order_;
}
+ IterationRange<ArenaVector<HBasicBlock*>::const_reverse_iterator> GetLinearPostOrder() const {
+ return ReverseRange(GetLinearOrder());
+ }
+
bool HasBoundsChecks() const {
return has_bounds_checks_;
}
@@ -6615,58 +6629,6 @@
DISALLOW_COPY_AND_ASSIGN(HGraphDelegateVisitor);
};
-class HInsertionOrderIterator : public ValueObject {
- public:
- explicit HInsertionOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {}
-
- bool Done() const { return index_ == graph_.GetBlocks().size(); }
- HBasicBlock* Current() const { return graph_.GetBlocks()[index_]; }
- void Advance() { ++index_; }
-
- private:
- const HGraph& graph_;
- size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HInsertionOrderIterator);
-};
-
-class HReversePostOrderIterator : public ValueObject {
- public:
- explicit HReversePostOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {
- // Check that reverse post order of the graph has been built.
- DCHECK(!graph.GetReversePostOrder().empty());
- }
-
- bool Done() const { return index_ == graph_.GetReversePostOrder().size(); }
- HBasicBlock* Current() const { return graph_.GetReversePostOrder()[index_]; }
- void Advance() { ++index_; }
-
- private:
- const HGraph& graph_;
- size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HReversePostOrderIterator);
-};
-
-class HPostOrderIterator : public ValueObject {
- public:
- explicit HPostOrderIterator(const HGraph& graph)
- : graph_(graph), index_(graph_.GetReversePostOrder().size()) {
- // Check that reverse post order of the graph has been built.
- DCHECK(!graph.GetReversePostOrder().empty());
- }
-
- bool Done() const { return index_ == 0; }
- HBasicBlock* Current() const { return graph_.GetReversePostOrder()[index_ - 1u]; }
- void Advance() { --index_; }
-
- private:
- const HGraph& graph_;
- size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HPostOrderIterator);
-};
-
// Iterator over the blocks that art part of the loop. Includes blocks part
// of an inner loop. The order in which the blocks are iterated is on their
// block id.
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 7b66ef3..0db6088 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -20,8 +20,7 @@
void PrepareForRegisterAllocation::Run() {
// Order does not matter.
- for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
// No need to visit the phis.
for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
inst_it.Advance()) {
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index d93c9dd..d588dea 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -123,8 +123,7 @@
// TODO: move this to the graph checker.
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
for (HInstructionIterator iti(block->GetInstructions()); !iti.Done(); iti.Advance()) {
HInstruction* instr = iti.Current();
if (instr->GetType() == Primitive::kPrimNot) {
@@ -158,8 +157,8 @@
// To properly propagate type info we need to visit in the dominator-based order.
// Reverse post order guarantees a node's dominators are visited first.
// We take advantage of this order in `VisitBasicBlock`.
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- VisitBasicBlock(it.Current());
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ VisitBasicBlock(block);
}
ProcessWorklist();
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index 9610774..aa0d371 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -758,7 +758,7 @@
}
void RegisterAllocatorGraphColor::ProcessInstructions() {
- for (HBasicBlock* block : LinearPostOrder(codegen_->GetGraph()->GetLinearOrder())) {
+ for (HBasicBlock* block : codegen_->GetGraph()->GetLinearPostOrder()) {
// Note that we currently depend on this ordering, since some helper
// code is designed for linear scan register allocation.
for (HBackwardInstructionIterator instr_it(block->GetInstructions());
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 4e69bc8..1a391ce 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -163,7 +163,7 @@
void RegisterAllocatorLinearScan::AllocateRegistersInternal() {
// Iterate post-order, to ensure the list is sorted, and the last added interval
// is the one with the lowest start position.
- for (HBasicBlock* block : LinearPostOrder(codegen_->GetGraph()->GetLinearOrder())) {
+ for (HBasicBlock* block : codegen_->GetGraph()->GetLinearPostOrder()) {
for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
back_it.Advance()) {
ProcessInstruction(back_it.Current());
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index e409035..46d0d0e 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -76,8 +76,7 @@
// Iterate in post order in the unlikely case that removing one occurrence of
// the selection pattern empties a branch block of another occurrence.
// Otherwise the order does not matter.
- for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetPostOrder()) {
if (!block->EndsWithIf()) continue;
// Find elements of the diamond pattern.
diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc
index 1dc6986..6d82e8e 100644
--- a/compiler/optimizing/side_effects_analysis.cc
+++ b/compiler/optimizing/side_effects_analysis.cc
@@ -26,8 +26,7 @@
// In DEBUG mode, ensure side effects are properly initialized to empty.
if (kIsDebugBuild) {
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
SideEffects effects = GetBlockEffects(block);
DCHECK(effects.DoesNothing());
if (block->IsLoopHeader()) {
@@ -38,9 +37,7 @@
}
// Do a post order visit to ensure we visit a loop header after its loop body.
- for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
-
+ for (HBasicBlock* block : graph_->GetPostOrder()) {
SideEffects effects = SideEffects::None();
// Update `effects` with the side effects of all instructions in this block.
for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 03807ba..ae1e369 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -25,8 +25,8 @@
void SsaBuilder::FixNullConstantType() {
// The order doesn't matter here.
- for (HReversePostOrderIterator itb(*graph_); !itb.Done(); itb.Advance()) {
- for (HInstructionIterator it(itb.Current()->GetInstructions()); !it.Done(); it.Advance()) {
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* equality_instr = it.Current();
if (!equality_instr->IsEqual() && !equality_instr->IsNotEqual()) {
continue;
@@ -57,8 +57,8 @@
void SsaBuilder::EquivalentPhisCleanup() {
// The order doesn't matter here.
- for (HReversePostOrderIterator itb(*graph_); !itb.Done(); itb.Advance()) {
- for (HInstructionIterator it(itb.Current()->GetPhis()); !it.Done(); it.Advance()) {
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
HPhi* phi = it.Current()->AsPhi();
HPhi* next = phi->GetNextEquivalentPhiWithSameType();
if (next != nullptr) {
@@ -79,8 +79,7 @@
}
void SsaBuilder::FixEnvironmentPhis() {
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
for (HInstructionIterator it_phis(block->GetPhis()); !it_phis.Done(); it_phis.Advance()) {
HPhi* phi = it_phis.Current()->AsPhi();
// If the phi is not dead, or has no environment uses, there is nothing to do.
@@ -228,8 +227,7 @@
void SsaBuilder::RunPrimitiveTypePropagation() {
ArenaVector<HPhi*> worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder));
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
if (block->IsLoopHeader()) {
for (HInstructionIterator phi_it(block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
HPhi* phi = phi_it.Current()->AsPhi();
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 76cf8fe..e8e12e1 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -139,7 +139,7 @@
void SsaLivenessAnalysis::ComputeLiveRanges() {
// Do a post order visit, adding inputs of instructions live in the block where
// that instruction is defined, and killing instructions that are being visited.
- for (HBasicBlock* block : LinearPostOrder(graph_->GetLinearOrder())) {
+ for (HBasicBlock* block : ReverseRange(graph_->GetLinearOrder())) {
BitVector* kill = GetKillSet(*block);
BitVector* live_in = GetLiveInSet(*block);
@@ -256,15 +256,13 @@
do {
changed = false;
- for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- const HBasicBlock& block = *it.Current();
-
+ for (const HBasicBlock* block : graph_->GetPostOrder()) {
// The live_in set depends on the kill set (which does not
// change in this loop), and the live_out set. If the live_out
// set does not change, there is no need to update the live_in set.
- if (UpdateLiveOut(block) && UpdateLiveIn(block)) {
+ if (UpdateLiveOut(*block) && UpdateLiveIn(*block)) {
if (kIsDebugBuild) {
- CheckNoLiveInIrreducibleLoop(block);
+ CheckNoLiveInIrreducibleLoop(*block);
}
changed = true;
}
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index b1ec99a..aec7a3c 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -34,8 +34,7 @@
ArenaSet<HPhi*> initially_live(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination));
// Add to the worklist phis referenced by non-phi instructions.
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
HPhi* phi = inst_it.Current()->AsPhi();
if (phi->IsDead()) {
@@ -84,8 +83,7 @@
// Remove phis that are not live. Visit in post order so that phis
// that are not inputs of loop phis can be removed when they have
// no users left (dead phis might use dead phis).
- for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetPostOrder()) {
HInstruction* current = block->GetFirstPhi();
HInstruction* next = nullptr;
HPhi* phi;
@@ -119,8 +117,7 @@
void SsaRedundantPhiElimination::Run() {
// Add all phis in the worklist. Order does not matter for correctness, and
// neither will necessarily converge faster.
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
worklist_.push_back(inst_it.Current()->AsPhi());
}