Refactoring of graph linearization and linear order.
Rationale:
Ownership of graph's linear order and iterators was
a bit unclear now that other phases are using it.
New approach allows phases to compute their own
order, while ssa_liveness is sole owner for graph
(since it is not mutated afterwards).
Also shortens lifetime of loop's arena.
Test: test-art-host
Change-Id: Ib7137d1203a1e0a12db49868f4117d48a4277f30
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 2556178..6faac09 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -61,6 +61,7 @@
"optimizing/instruction_simplifier.cc",
"optimizing/intrinsics.cc",
"optimizing/licm.cc",
+ "optimizing/linear_order.cc",
"optimizing/load_store_elimination.cc",
"optimizing/locations.cc",
"optimizing/loop_optimization.cc",
diff --git a/compiler/optimizing/linear_order.cc b/compiler/optimizing/linear_order.cc
new file mode 100644
index 0000000..3af212f
--- /dev/null
+++ b/compiler/optimizing/linear_order.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "linear_order.h"
+
+namespace art {
+
+static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) {
+ return first_loop == second_loop;
+}
+
+static bool IsLoop(HLoopInformation* info) {
+ return info != nullptr;
+}
+
+static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) {
+ return (inner != outer)
+ && (inner != nullptr)
+ && (outer != nullptr)
+ && inner->IsIn(*outer);
+}
+
+// Helper method to update work list for linear order.
+static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) {
+ HLoopInformation* block_loop = block->GetLoopInformation();
+ auto insert_pos = worklist->rbegin(); // insert_pos.base() will be the actual position.
+ for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) {
+ HBasicBlock* current = *insert_pos;
+ HLoopInformation* current_loop = current->GetLoopInformation();
+ if (InSameLoop(block_loop, current_loop)
+ || !IsLoop(current_loop)
+ || IsInnerLoop(current_loop, block_loop)) {
+ // The block can be processed immediately.
+ break;
+ }
+ }
+ worklist->insert(insert_pos.base(), block);
+}
+
+// Helper method to validate linear order.
+static bool IsLinearOrderWellFormed(const HGraph* graph, ArenaVector<HBasicBlock*>* linear_order) {
+ for (HBasicBlock* header : graph->GetBlocks()) {
+ if (header == nullptr || !header->IsLoopHeader()) {
+ continue;
+ }
+ HLoopInformation* loop = header->GetLoopInformation();
+ size_t num_blocks = loop->GetBlocks().NumSetBits();
+ size_t found_blocks = 0u;
+ for (HBasicBlock* block : *linear_order) {
+ if (loop->Contains(*block)) {
+ found_blocks++;
+ if (found_blocks == 1u && block != header) {
+ // First block is not the header.
+ return false;
+ } else if (found_blocks == num_blocks && !loop->IsBackEdge(*block)) {
+ // Last block is not a back edge.
+ return false;
+ }
+ } else if (found_blocks != 0u && found_blocks != num_blocks) {
+ // Blocks are not adjacent.
+ return false;
+ }
+ }
+ DCHECK_EQ(found_blocks, num_blocks);
+ }
+ return true;
+}
+
+void LinearizeGraph(const HGraph* graph,
+ ArenaAllocator* allocator,
+ ArenaVector<HBasicBlock*>* linear_order) {
+ DCHECK(linear_order->empty());
+ // Create a reverse post ordering with the following properties:
+ // - Blocks in a loop are consecutive,
+ // - Back-edge is the last block before loop exits.
+ //
+ // (1): Record the number of forward predecessors for each block. This is to
+ // ensure the resulting order is reverse post order. We could use the
+ // current reverse post order in the graph, but it would require making
+ // order queries to a GrowableArray, which is not the best data structure
+ // for it.
+ ArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(),
+ allocator->Adapter(kArenaAllocLinearOrder));
+ for (HReversePostOrderIterator it(*graph); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ size_t number_of_forward_predecessors = block->GetPredecessors().size();
+ if (block->IsLoopHeader()) {
+ number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
+ }
+ forward_predecessors[block->GetBlockId()] = number_of_forward_predecessors;
+ }
+ // (2): Following a worklist approach, first start with the entry block, and
+ // iterate over the successors. When all non-back edge predecessors of a
+ // successor block are visited, the successor block is added in the worklist
+ // following an order that satisfies the requirements to build our linear graph.
+ linear_order->reserve(graph->GetReversePostOrder().size());
+ ArenaVector<HBasicBlock*> worklist(allocator->Adapter(kArenaAllocLinearOrder));
+ worklist.push_back(graph->GetEntryBlock());
+ do {
+ HBasicBlock* current = worklist.back();
+ worklist.pop_back();
+ linear_order->push_back(current);
+ for (HBasicBlock* successor : current->GetSuccessors()) {
+ int block_id = successor->GetBlockId();
+ size_t number_of_remaining_predecessors = forward_predecessors[block_id];
+ if (number_of_remaining_predecessors == 1) {
+ AddToListForLinearization(&worklist, successor);
+ }
+ forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
+ }
+ } while (!worklist.empty());
+
+ DCHECK(graph->HasIrreducibleLoops() || IsLinearOrderWellFormed(graph, linear_order));
+}
+
+} // namespace art
diff --git a/compiler/optimizing/linear_order.h b/compiler/optimizing/linear_order.h
new file mode 100644
index 0000000..cdbdd07
--- /dev/null
+++ b/compiler/optimizing/linear_order.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
+#define ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
+
+#include "nodes.h"
+
+namespace art {
+
+// Linearizes the 'graph' such that:
+// (1): a block is always after its dominator,
+// (2): blocks of loops are contiguous.
+//
+// Storage is obtained through 'allocator' and the linear order it computed
+// into 'linear_order'. Once computed, iteration can be expressed as:
+//
+// for (HBasicBlock* block : linear_order) // linear order
+//
+// for (HBasicBlock* block : LinearPostOrder(linear_order)) // linear post order
+//
+void LinearizeGraph(const HGraph* graph,
+ ArenaAllocator* allocator,
+ ArenaVector<HBasicBlock*>* linear_order);
+
+inline auto LinearPostOrder(const ArenaVector<HBasicBlock*>& linear_order) {
+ return MakeIterationRange(linear_order.rbegin(), linear_order.rend());
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index b12a7f7..4acf3ac 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -16,10 +16,7 @@
#include "loop_optimization.h"
-#include "base/arena_containers.h"
-#include "induction_var_range.h"
-#include "ssa_liveness_analysis.h"
-#include "nodes.h"
+#include "linear_order.h"
namespace art {
@@ -126,14 +123,9 @@
HLoopOptimization::HLoopOptimization(HGraph* graph,
HInductionVarAnalysis* induction_analysis)
- : HLoopOptimization(graph, induction_analysis, nullptr) {}
-
-HLoopOptimization::HLoopOptimization(HGraph* graph,
- HInductionVarAnalysis* induction_analysis,
- ArenaAllocator* allocator)
: HOptimization(graph, kLoopOptimizationPassName),
induction_range_(induction_analysis),
- loop_allocator_(allocator),
+ loop_allocator_(nullptr),
top_loop_(nullptr),
last_loop_(nullptr) {
}
@@ -141,32 +133,39 @@
void HLoopOptimization::Run() {
// Well-behaved loops only.
// TODO: make this less of a sledgehammer.
- if (graph_-> HasTryCatch() || graph_->HasIrreducibleLoops()) {
+ if (graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) {
return;
}
+ // Phase-local allocator that draws from the global pool. Since the allocator
+ // itself resides on the stack, it is destructed on exiting Run(), which
+ // implies its underlying memory is released immediately.
ArenaAllocator allocator(graph_->GetArena()->GetArenaPool());
- if (loop_allocator_ == nullptr) {
- loop_allocator_ = &allocator;
- }
+ loop_allocator_ = &allocator;
- // Build the linear order. This step enables building a loop hierarchy that
- // properly reflects the outer-inner and previous-next relation.
- graph_->Linearize();
+ // Perform loop optimizations.
+ LocalRun();
+
+ // Detach.
+ loop_allocator_ = nullptr;
+ last_loop_ = top_loop_ = nullptr;
+}
+
+void HLoopOptimization::LocalRun() {
+ // Build the linear order using the phase-local allocator. This step enables building
+ // a loop hierarchy that properly reflects the outer-inner and previous-next relation.
+ ArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
+ LinearizeGraph(graph_, loop_allocator_, &linear_order);
+
// Build the loop hierarchy.
- for (HLinearOrderIterator it_graph(*graph_); !it_graph.Done(); it_graph.Advance()) {
- HBasicBlock* block = it_graph.Current();
+ for (HBasicBlock* block : linear_order) {
if (block->IsLoopHeader()) {
AddLoop(block->GetLoopInformation());
}
}
- if (top_loop_ != nullptr) {
- // Traverse the loop hierarchy inner-to-outer and optimize.
- TraverseLoopsInnerToOuter(top_loop_);
- }
- if (loop_allocator_ == &allocator) {
- loop_allocator_ = nullptr;
- }
+
+ // Traverse the loop hierarchy inner-to-outer and optimize.
+ TraverseLoopsInnerToOuter(top_loop_);
}
void HLoopOptimization::AddLoop(HLoopInformation* loop_info) {
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 591e45a..6092955 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -17,8 +17,6 @@
#ifndef ART_COMPILER_OPTIMIZING_LOOP_OPTIMIZATION_H_
#define ART_COMPILER_OPTIMIZING_LOOP_OPTIMIZATION_H_
-#include <string>
-
#include "induction_var_range.h"
#include "nodes.h"
#include "optimization.h"
@@ -32,9 +30,6 @@
class HLoopOptimization : public HOptimization {
public:
HLoopOptimization(HGraph* graph, HInductionVarAnalysis* induction_analysis);
- HLoopOptimization(HGraph* graph,
- HInductionVarAnalysis* induction_analysis,
- ArenaAllocator* allocator);
void Run() OVERRIDE;
@@ -44,7 +39,7 @@
/**
* A single loop inside the loop hierarchy representation.
*/
- struct LoopNode : public ArenaObject<kArenaAllocInductionVarAnalysis> {
+ struct LoopNode : public ArenaObject<kArenaAllocLoopOptimization> {
explicit LoopNode(HLoopInformation* lp_info)
: loop_info(lp_info),
outer(nullptr),
@@ -58,6 +53,8 @@
LoopNode* next;
};
+ void LocalRun();
+
void AddLoop(HLoopInformation* loop_info);
void RemoveLoop(LoopNode* node);
@@ -70,14 +67,15 @@
HInstruction* replacement,
HInstruction* exclusion);
- // Range analysis based on induction variables.
+ // Range information based on prior induction variable analysis.
InductionVarRange induction_range_;
// Phase-local heap memory allocator for the loop optimizer. Storage obtained
- // through this allocator is released when the loop optimizer is done.
+ // through this allocator is immediately released when the loop optimizer is done.
ArenaAllocator* loop_allocator_;
- // Entries into the loop hierarchy representation.
+ // Entries into the loop hierarchy representation. The hierarchy resides
+ // in phase-local heap memory.
LoopNode* top_loop_;
LoopNode* last_loop_;
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index 4d54afd..7805a69 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -31,7 +31,7 @@
allocator_(&pool_),
graph_(CreateGraph(&allocator_)),
iva_(new (&allocator_) HInductionVarAnalysis(graph_)),
- loop_opt_(new (&allocator_) HLoopOptimization(graph_, iva_, &allocator_)) {
+ loop_opt_(new (&allocator_) HLoopOptimization(graph_, iva_)) {
BuildGraph();
}
@@ -76,7 +76,9 @@
void PerformAnalysis() {
graph_->BuildDominatorTree();
iva_->Run();
- loop_opt_->Run();
+ // Do not release the loop hierarchy.
+ loop_opt_->loop_allocator_ = &allocator_;
+ loop_opt_->LocalRun();
}
/** Constructs string representation of computed loop hierarchy. */
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 1ff2252..874c1ed 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -460,116 +460,6 @@
return kAnalysisSuccess;
}
-static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) {
- return first_loop == second_loop;
-}
-
-static bool IsLoop(HLoopInformation* info) {
- return info != nullptr;
-}
-
-static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) {
- return (inner != outer)
- && (inner != nullptr)
- && (outer != nullptr)
- && inner->IsIn(*outer);
-}
-
-// Helper method to update work list for linear order.
-static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) {
- HLoopInformation* block_loop = block->GetLoopInformation();
- auto insert_pos = worklist->rbegin(); // insert_pos.base() will be the actual position.
- for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) {
- HBasicBlock* current = *insert_pos;
- HLoopInformation* current_loop = current->GetLoopInformation();
- if (InSameLoop(block_loop, current_loop)
- || !IsLoop(current_loop)
- || IsInnerLoop(current_loop, block_loop)) {
- // The block can be processed immediately.
- break;
- }
- }
- worklist->insert(insert_pos.base(), block);
-}
-
-// Helper method to validate linear order.
-static bool IsLinearOrderWellFormed(const HGraph& graph) {
- for (HBasicBlock* header : graph.GetBlocks()) {
- if (header == nullptr || !header->IsLoopHeader()) {
- continue;
- }
- HLoopInformation* loop = header->GetLoopInformation();
- size_t num_blocks = loop->GetBlocks().NumSetBits();
- size_t found_blocks = 0u;
- for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) {
- HBasicBlock* current = it.Current();
- if (loop->Contains(*current)) {
- found_blocks++;
- if (found_blocks == 1u && current != header) {
- // First block is not the header.
- return false;
- } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) {
- // Last block is not a back edge.
- return false;
- }
- } else if (found_blocks != 0u && found_blocks != num_blocks) {
- // Blocks are not adjacent.
- return false;
- }
- }
- DCHECK_EQ(found_blocks, num_blocks);
- }
- return true;
-}
-
-// TODO: return order, and give only liveness analysis ownership of graph's linear_order_?
-void HGraph::Linearize() {
- linear_order_.clear();
-
- // Create a reverse post ordering with the following properties:
- // - Blocks in a loop are consecutive,
- // - Back-edge is the last block before loop exits.
-
- // (1): Record the number of forward predecessors for each block. This is to
- // ensure the resulting order is reverse post order. We could use the
- // current reverse post order in the graph, but it would require making
- // order queries to a GrowableArray, which is not the best data structure
- // for it.
- ArenaVector<uint32_t> forward_predecessors(blocks_.size(),
- arena_->Adapter(kArenaAllocSsaLiveness));
- for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
- size_t number_of_forward_predecessors = block->GetPredecessors().size();
- if (block->IsLoopHeader()) {
- number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
- }
- forward_predecessors[block->GetBlockId()] = number_of_forward_predecessors;
- }
-
- // (2): Following a worklist approach, first start with the entry block, and
- // iterate over the successors. When all non-back edge predecessors of a
- // successor block are visited, the successor block is added in the worklist
- // following an order that satisfies the requirements to build our linear graph.
- linear_order_.reserve(GetReversePostOrder().size());
- ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocSsaLiveness));
- worklist.push_back(GetEntryBlock());
- do {
- HBasicBlock* current = worklist.back();
- worklist.pop_back();
- linear_order_.push_back(current);
- for (HBasicBlock* successor : current->GetSuccessors()) {
- int block_id = successor->GetBlockId();
- size_t number_of_remaining_predecessors = forward_predecessors[block_id];
- if (number_of_remaining_predecessors == 1) {
- AddToListForLinearization(&worklist, successor);
- }
- forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
- }
- } while (!worklist.empty());
-
- DCHECK(HasIrreducibleLoops() || IsLinearOrderWellFormed(*this));
-}
-
void HLoopInformation::Dump(std::ostream& os) {
os << "header: " << header_->GetBlockId() << std::endl;
os << "pre header: " << GetPreHeader()->GetBlockId() << std::endl;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 5cfbf42..828c0e5 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -366,13 +366,6 @@
// is a throw-catch loop, i.e. the header is a catch block.
GraphAnalysisResult AnalyzeLoops() const;
- // Computes a linear order for the current graph (should be called before
- // using HLinearOrderIterator). Linearizes the graph such that:
- // (1): a block is always after its dominator,
- // (2): blocks of loops are contiguous.
- // This creates a natural and efficient ordering when visualizing live ranges.
- void Linearize();
-
// Iterate over blocks to compute try block membership. Needs reverse post
// order and loop information.
void ComputeTryBlockInformation();
@@ -6661,43 +6654,6 @@
DISALLOW_COPY_AND_ASSIGN(HPostOrderIterator);
};
-class HLinearPostOrderIterator : public ValueObject {
- public:
- explicit HLinearPostOrderIterator(const HGraph& graph)
- : order_(graph.GetLinearOrder()), index_(graph.GetLinearOrder().size()) {}
-
- bool Done() const { return index_ == 0; }
-
- HBasicBlock* Current() const { return order_[index_ - 1u]; }
-
- void Advance() {
- --index_;
- DCHECK_GE(index_, 0U);
- }
-
- private:
- const ArenaVector<HBasicBlock*>& order_;
- size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HLinearPostOrderIterator);
-};
-
-class HLinearOrderIterator : public ValueObject {
- public:
- explicit HLinearOrderIterator(const HGraph& graph)
- : order_(graph.GetLinearOrder()), index_(0) {}
-
- bool Done() const { return index_ == order_.size(); }
- HBasicBlock* Current() const { return order_[index_]; }
- void Advance() { ++index_; }
-
- private:
- const ArenaVector<HBasicBlock*>& order_;
- size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HLinearOrderIterator);
-};
-
// Iterator over the blocks that art part of the loop. Includes blocks part
// of an inner loop. The order in which the blocks are iterated is on their
// block id.
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index ad7a8a3..caf6647 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -17,6 +17,7 @@
#include "register_allocation_resolver.h"
#include "code_generator.h"
+#include "linear_order.h"
#include "ssa_liveness_analysis.h"
namespace art {
@@ -141,8 +142,7 @@
}
// Resolve non-linear control flow across branches. Order does not matter.
- for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
if (block->IsCatchBlock() ||
(block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
// Instructions live at the top of catch blocks or irreducible loop header
@@ -172,15 +172,14 @@
}
// Resolve phi inputs. Order does not matter.
- for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
- HBasicBlock* current = it.Current();
- if (current->IsCatchBlock()) {
+ for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
+ if (block->IsCatchBlock()) {
// Catch phi values are set at runtime by the exception delivery mechanism.
} else {
- for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
HInstruction* phi = inst_it.Current();
- for (size_t i = 0, e = current->GetPredecessors().size(); i < e; ++i) {
- HBasicBlock* predecessor = current->GetPredecessors()[i];
+ for (size_t i = 0, e = block->GetPredecessors().size(); i < e; ++i) {
+ HBasicBlock* predecessor = block->GetPredecessors()[i];
DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
HInstruction* input = phi->InputAt(i);
Location source = input->GetLiveInterval()->GetLocationAt(
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index 7178399..9610774 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -17,6 +17,7 @@
#include "register_allocator_graph_color.h"
#include "code_generator.h"
+#include "linear_order.h"
#include "register_allocation_resolver.h"
#include "ssa_liveness_analysis.h"
#include "thread-inl.h"
@@ -757,9 +758,7 @@
}
void RegisterAllocatorGraphColor::ProcessInstructions() {
- for (HLinearPostOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
-
+ for (HBasicBlock* block : LinearPostOrder(codegen_->GetGraph()->GetLinearOrder())) {
// Note that we currently depend on this ordering, since some helper
// code is designed for linear scan register allocation.
for (HBackwardInstructionIterator instr_it(block->GetInstructions());
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 6910c71..4e69bc8 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -22,6 +22,7 @@
#include "base/bit_vector-inl.h"
#include "base/enums.h"
#include "code_generator.h"
+#include "linear_order.h"
#include "register_allocation_resolver.h"
#include "ssa_liveness_analysis.h"
@@ -108,8 +109,7 @@
// Since only parallel moves have been inserted during the register allocation,
// these checks are mostly for making sure these moves have been added correctly.
size_t current_liveness = 0;
- for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
HInstruction* instruction = inst_it.Current();
DCHECK_LE(current_liveness, instruction->GetLifetimePosition());
@@ -163,8 +163,7 @@
void RegisterAllocatorLinearScan::AllocateRegistersInternal() {
// Iterate post-order, to ensure the list is sorted, and the last added interval
// is the one with the lowest start position.
- for (HLinearPostOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : LinearPostOrder(codegen_->GetGraph()->GetLinearOrder())) {
for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
back_it.Advance()) {
ProcessInstruction(back_it.Current());
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 9ce34aa..76cf8fe 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -18,12 +18,17 @@
#include "base/bit_vector-inl.h"
#include "code_generator.h"
+#include "linear_order.h"
#include "nodes.h"
namespace art {
void SsaLivenessAnalysis::Analyze() {
- graph_->Linearize();
+ // Compute the linear order directly in the graph's data structure
+ // (there are no more following graph mutations).
+ LinearizeGraph(graph_, graph_->GetArena(), &graph_->linear_order_);
+
+ // Liveness analysis.
NumberInstructions();
ComputeLiveness();
}
@@ -40,8 +45,7 @@
// to differentiate between the start and end of an instruction. Adding 2 to
// the lifetime position for each instruction ensures the start of an
// instruction is different than the end of the previous instruction.
- for (HLinearOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetLinearOrder()) {
block->SetLifetimeStart(lifetime_position);
for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
@@ -83,8 +87,7 @@
}
void SsaLivenessAnalysis::ComputeLiveness() {
- for (HLinearOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
+ for (HBasicBlock* block : graph_->GetLinearOrder()) {
block_infos_[block->GetBlockId()] =
new (graph_->GetArena()) BlockInfo(graph_->GetArena(), *block, number_of_ssa_values_);
}
@@ -136,9 +139,7 @@
void SsaLivenessAnalysis::ComputeLiveRanges() {
// Do a post order visit, adding inputs of instructions live in the block where
// that instruction is defined, and killing instructions that are being visited.
- for (HLinearPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
-
+ for (HBasicBlock* block : LinearPostOrder(graph_->GetLinearOrder())) {
BitVector* kill = GetKillSet(*block);
BitVector* live_in = GetLiveInSet(*block);
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index aeb990c..5cdf671 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -69,6 +69,7 @@
"DCE ",
"LSE ",
"LICM ",
+ "LoopOpt ",
"SsaLiveness ",
"SsaPhiElim ",
"RefTypeProp ",
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 31dbb36..62cd2a7 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -80,6 +80,7 @@
kArenaAllocDCE,
kArenaAllocLSE,
kArenaAllocLICM,
+ kArenaAllocLoopOptimization,
kArenaAllocSsaLiveness,
kArenaAllocSsaPhiElimination,
kArenaAllocReferenceTypePropagation,