summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2024-01-29 14:45:32 +0000
committer VladimĂ­r Marko <vmarko@google.com> 2024-01-29 17:40:44 +0000
commit5067a0d46efc7d72f213284bb1c321eebb466979 (patch)
treee28b5dfcb7df43b99ef1344083577589769a83d3
parentddd4776acaa0a4fb0e6027cd063ec450f518c7d7 (diff)
Optimizing: Remove block reachability information.
This code is dead after Partial LSE removal. Test: m test-art-host-gtest Test: testrunner.py --host --optimizing Bug: 298176183 Change-Id: If67efa9d1df908232b6c2f32f3d2c64fb91759ae
-rw-r--r--compiler/optimizing/load_store_analysis_test.cc100
-rw-r--r--compiler/optimizing/load_store_elimination.cc6
-rw-r--r--compiler/optimizing/nodes.cc165
-rw-r--r--compiler/optimizing/nodes.h11
-rw-r--r--compiler/optimizing/optimizing_unit_test.h2
-rw-r--r--compiler/optimizing/reference_type_propagation_test.cc2
6 files changed, 0 insertions, 286 deletions
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index 947bf04923..90185579e3 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -49,9 +49,6 @@ class LoadStoreAnalysisTest : public CommonCompilerTest, public OptimizingUnitTe
const std::vector<AdjacencyListGraph::Edge>& adj) {
return AdjacencyListGraph(graph_, GetAllocator(), entry_name, exit_name, adj);
}
-
- void CheckReachability(const AdjacencyListGraph& adj,
- const std::vector<AdjacencyListGraph::Edge>& reach);
};
TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) {
@@ -711,103 +708,6 @@ TEST_F(LoadStoreAnalysisTest, TestHuntOriginalRef) {
ASSERT_EQ(loc1, loc4);
}
-void LoadStoreAnalysisTest::CheckReachability(const AdjacencyListGraph& adj,
- const std::vector<AdjacencyListGraph::Edge>& reach) {
- uint32_t cnt = 0;
- for (HBasicBlock* blk : graph_->GetBlocks()) {
- if (adj.HasBlock(blk)) {
- for (HBasicBlock* other : graph_->GetBlocks()) {
- if (other == nullptr) {
- continue;
- }
- if (adj.HasBlock(other)) {
- bool contains_edge =
- std::find(reach.begin(),
- reach.end(),
- AdjacencyListGraph::Edge { adj.GetName(blk), adj.GetName(other) }) !=
- reach.end();
- if (graph_->PathBetween(blk, other)) {
- cnt++;
- EXPECT_TRUE(contains_edge) << "Unexpected edge found between " << adj.GetName(blk)
- << " and " << adj.GetName(other);
- } else {
- EXPECT_FALSE(contains_edge) << "Expected edge not found between " << adj.GetName(blk)
- << " and " << adj.GetName(other);
- }
- } else if (graph_->PathBetween(blk, other)) {
- ADD_FAILURE() << "block " << adj.GetName(blk)
- << " has path to non-adjacency-graph block id: " << other->GetBlockId();
- }
- }
- } else {
- for (HBasicBlock* other : graph_->GetBlocks()) {
- if (other == nullptr) {
- continue;
- }
- EXPECT_FALSE(graph_->PathBetween(blk, other))
- << "Reachable blocks outside of adjacency-list";
- }
- }
- }
- EXPECT_EQ(cnt, reach.size());
-}
-
-TEST_F(LoadStoreAnalysisTest, ReachabilityTest1) {
- CreateGraph();
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- { { "entry", "left" }, { "entry", "right" }, { "left", "exit" }, { "right", "exit" } }));
- CheckReachability(blks,
- {
- { "entry", "left" },
- { "entry", "right" },
- { "entry", "exit" },
- { "right", "exit" },
- { "left", "exit" },
- });
-}
-
-TEST_F(LoadStoreAnalysisTest, ReachabilityTest2) {
- CreateGraph();
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- { { "entry", "loop-header" }, { "loop-header", "loop" }, { "loop", "loop-header" } }));
- CheckReachability(blks,
- {
- { "entry", "loop-header" },
- { "entry", "loop" },
- { "loop-header", "loop-header" },
- { "loop-header", "loop" },
- { "loop", "loop-header" },
- { "loop", "loop" },
- });
-}
-
-TEST_F(LoadStoreAnalysisTest, ReachabilityTest3) {
- CreateGraph();
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- { { "entry", "loop-header" },
- { "loop-header", "loop" },
- { "loop", "loop-header" },
- { "entry", "right" },
- { "right", "exit" } }));
- CheckReachability(blks,
- {
- { "entry", "loop-header" },
- { "entry", "loop" },
- { "entry", "right" },
- { "entry", "exit" },
- { "loop-header", "loop-header" },
- { "loop-header", "loop" },
- { "loop", "loop-header" },
- { "loop", "loop" },
- { "right", "exit" },
- });
-}
-
// // ENTRY
// obj = new Obj();
// if (parameter_value) {
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 20ca687ad5..09d1755a38 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -2901,12 +2901,6 @@ bool LoadStoreElimination::Run() {
// Skip this optimization.
return false;
}
- // We need to be able to determine reachability. Clear it just to be safe but
- // this should initially be empty.
- graph_->ClearReachabilityInformation();
- // This is O(blocks^3) time complexity. It means we can query reachability in
- // O(1) though.
- graph_->ComputeReachabilityInformation();
ScopedArenaAllocator allocator(graph_->GetArenaStack());
LoadStoreAnalysis lsa(graph_, stats_, &allocator);
lsa.Run();
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index e6cb3a2379..833b7e9ca2 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -308,171 +308,6 @@ static bool UpdateDominatorOfSuccessor(HBasicBlock* block, HBasicBlock* successo
}
}
-// TODO Consider moving this entirely into LoadStoreAnalysis/Elimination.
-bool HGraph::PathBetween(uint32_t source_idx, uint32_t dest_idx) const {
- DCHECK_LT(source_idx, blocks_.size()) << "source not present in graph!";
- DCHECK_LT(dest_idx, blocks_.size()) << "dest not present in graph!";
- DCHECK(blocks_[source_idx] != nullptr);
- DCHECK(blocks_[dest_idx] != nullptr);
- return reachability_graph_.IsBitSet(source_idx, dest_idx);
-}
-
-bool HGraph::PathBetween(const HBasicBlock* source, const HBasicBlock* dest) const {
- if (source == nullptr || dest == nullptr) {
- return false;
- }
- size_t source_idx = source->GetBlockId();
- size_t dest_idx = dest->GetBlockId();
- return PathBetween(source_idx, dest_idx);
-}
-
-// This function/struct calculates the reachability of every node from every
-// other node by iteratively using DFS to find reachability of each individual
-// block.
-//
-// This is in practice faster then the simpler Floyd-Warshall since while that
-// is O(N**3) this is O(N*(E + N)) where N is the number of blocks and E is the
-// number of edges. Since in practice each block only has a few outgoing edges
-// we can confidently say that E ~ B*N where B is a small number (~3). We also
-// memoize the results as we go allowing us to (potentially) avoid walking the
-// entire graph for every node. To make best use of this memoization we
-// calculate the reachability of blocks in PostOrder. This means that
-// (generally) blocks that are dominated by many other blocks and dominate few
-// blocks themselves will be examined first. This makes it more likely we can
-// use our memoized results.
-class ReachabilityAnalysisHelper {
- public:
- ReachabilityAnalysisHelper(const HGraph* graph,
- ArenaBitVectorArray* reachability_graph,
- ArenaStack* arena_stack)
- : graph_(graph),
- reachability_graph_(reachability_graph),
- arena_stack_(arena_stack),
- temporaries_(arena_stack_),
- block_size_(RoundUp(graph_->GetBlocks().size(), BitVector::kWordBits)),
- all_visited_nodes_(
- &temporaries_, graph_->GetBlocks().size(), false, kArenaAllocReachabilityGraph),
- not_post_order_visited_(
- &temporaries_, graph_->GetBlocks().size(), false, kArenaAllocReachabilityGraph) {
- // We can't adjust the size of reachability graph any more without breaking
- // our allocator invariants so it had better be large enough.
- CHECK_GE(reachability_graph_->NumRows(), graph_->GetBlocks().size());
- CHECK_GE(reachability_graph_->NumColumns(), graph_->GetBlocks().size());
- not_post_order_visited_.SetInitialBits(graph_->GetBlocks().size());
- }
-
- void CalculateReachability() {
- // Calculate what blocks connect using repeated DFS
- //
- // Going in PostOrder should generally give memoization a good shot of
- // hitting.
- for (const HBasicBlock* blk : graph_->GetPostOrder()) {
- if (blk == nullptr) {
- continue;
- }
- not_post_order_visited_.ClearBit(blk->GetBlockId());
- CalculateConnectednessOn(blk);
- all_visited_nodes_.SetBit(blk->GetBlockId());
- }
- // Get all other bits
- for (auto idx : not_post_order_visited_.Indexes()) {
- const HBasicBlock* blk = graph_->GetBlocks()[idx];
- if (blk == nullptr) {
- continue;
- }
- CalculateConnectednessOn(blk);
- all_visited_nodes_.SetBit(blk->GetBlockId());
- }
- }
-
- private:
- void AddEdge(uint32_t source, const HBasicBlock* dest) {
- reachability_graph_->SetBit(source, dest->GetBlockId());
- }
-
- // Union the reachability of 'idx' into 'update_block_idx'. This is done to
- // implement memoization. In order to improve performance we do this in 4-byte
- // blocks. Clang should be able to optimize this to larger blocks if possible.
- void UnionBlock(size_t update_block_idx, size_t idx) {
- reachability_graph_->UnionRows(update_block_idx, idx);
- }
-
- // Single DFS to get connectedness of a single block
- void CalculateConnectednessOn(const HBasicBlock* const target_block) {
- const uint32_t target_idx = target_block->GetBlockId();
- ScopedArenaAllocator connectedness_temps(arena_stack_);
- // What nodes we have already discovered and either have processed or are
- // already on the queue.
- ArenaBitVector discovered(
- &connectedness_temps, graph_->GetBlocks().size(), false, kArenaAllocReachabilityGraph);
- // The work stack. What blocks we still need to process.
- ScopedArenaVector<const HBasicBlock*> work_stack(
- connectedness_temps.Adapter(kArenaAllocReachabilityGraph));
- // Known max size since otherwise we'd have blocks multiple times. Avoids
- // re-allocation
- work_stack.reserve(graph_->GetBlocks().size());
- discovered.SetBit(target_idx);
- work_stack.push_back(target_block);
- // Main DFS Loop.
- while (!work_stack.empty()) {
- const HBasicBlock* cur = work_stack.back();
- work_stack.pop_back();
- // Memoization of previous runs.
- if (all_visited_nodes_.IsBitSet(cur->GetBlockId())) {
- DCHECK_NE(target_block, cur);
- // Already explored from here. Just use that data.
- UnionBlock(target_idx, cur->GetBlockId());
- continue;
- }
- for (const HBasicBlock* succ : cur->GetSuccessors()) {
- AddEdge(target_idx, succ);
- if (!discovered.IsBitSet(succ->GetBlockId())) {
- work_stack.push_back(succ);
- discovered.SetBit(succ->GetBlockId());
- }
- }
- }
- }
-
- const HGraph* graph_;
- // The graph's reachability_graph_ on the main allocator.
- ArenaBitVectorArray* reachability_graph_;
- ArenaStack* arena_stack_;
- // An allocator for temporary bit-vectors used by this algorithm. The
- // 'SetBit,ClearBit' on reachability_graph_ prior to the construction of this
- // object should be the only allocation on the main allocator so it's safe to
- // make a sub-allocator here.
- ScopedArenaAllocator temporaries_;
- // number of columns
- const size_t block_size_;
- // Where we've already completely calculated connectedness.
- ArenaBitVector all_visited_nodes_;
- // What we never visited and need to do later
- ArenaBitVector not_post_order_visited_;
-
- DISALLOW_COPY_AND_ASSIGN(ReachabilityAnalysisHelper);
-};
-
-void HGraph::ComputeReachabilityInformation() {
- DCHECK_EQ(reachability_graph_.GetRawData().NumSetBits(), 0u);
- DCHECK(reachability_graph_.IsExpandable());
- // Reserve all the bits we'll need. This is the only allocation on the
- // standard allocator we do here, enabling us to create a new ScopedArena for
- // use with temporaries.
- //
- // reachability_graph_ acts as |N| x |N| graph for PathBetween. Array is
- // padded so each row starts on an BitVector::kWordBits-bit alignment for
- // simplicity and performance, allowing us to union blocks together without
- // going bit-by-bit.
- reachability_graph_.Resize(blocks_.size(), blocks_.size(), /*clear=*/false);
- ReachabilityAnalysisHelper helper(this, &reachability_graph_, GetArenaStack());
- helper.CalculateReachability();
-}
-
-void HGraph::ClearReachabilityInformation() {
- reachability_graph_.Clear();
-}
-
void HGraph::ComputeDominanceInformation() {
DCHECK(reverse_post_order_.empty());
reverse_post_order_.reserve(blocks_.size());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 8dd89fa4e4..c862e31de7 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -393,7 +393,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
blocks_(allocator->Adapter(kArenaAllocBlockList)),
reverse_post_order_(allocator->Adapter(kArenaAllocReversePostOrder)),
linear_order_(allocator->Adapter(kArenaAllocLinearOrder)),
- reachability_graph_(allocator, 0, 0, true, kArenaAllocReachabilityGraph),
entry_block_(nullptr),
exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
@@ -462,8 +461,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
void ComputeDominanceInformation();
void ClearDominanceInformation();
- void ComputeReachabilityInformation();
- void ClearReachabilityInformation();
void ClearLoopInformation();
void FindBackEdges(ArenaBitVector* visited);
GraphAnalysisResult BuildDominatorTree();
@@ -620,10 +617,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
has_bounds_checks_ = value;
}
- // Returns true if dest is reachable from source, using either blocks or block-ids.
- bool PathBetween(const HBasicBlock* source, const HBasicBlock* dest) const;
- bool PathBetween(uint32_t source_id, uint32_t dest_id) const;
-
// Is the code known to be robust against eliminating dead references
// and the effects of early finalization?
bool IsDeadReferenceSafe() const { return dead_reference_safe_; }
@@ -798,10 +791,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// post order, this order is not incrementally kept up-to-date.
ArenaVector<HBasicBlock*> linear_order_;
- // Reachability graph for checking connectedness between nodes. Acts as a partitioned vector where
- // each RoundUp(blocks_.size(), BitVector::kWordBits) is the reachability of each node.
- ArenaBitVectorArray reachability_graph_;
-
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 77e6420df8..8729763e73 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -164,9 +164,7 @@ class AdjacencyListGraph {
HBasicBlock* dest_blk = name_to_block_.GetOrCreate(dest, create_block);
src_blk->AddSuccessor(dest_blk);
}
- graph_->ClearReachabilityInformation();
graph_->ComputeDominanceInformation();
- graph_->ComputeReachabilityInformation();
for (auto [name, blk] : name_to_block_) {
block_to_name_.Put(blk, name);
}
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index ffd94e56b5..0e0acd11c2 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -425,8 +425,6 @@ void NonLoopReferenceTypePropagationTestGroup::RunVisitListTest(Func mutator) {
}
auto vals = MakeTransformRange(succ_blocks, [&](HBasicBlock* blk) { return single_value[blk]; });
std::vector<HInstruction*> ins(vals.begin(), vals.end());
- graph_->ClearReachabilityInformation();
- graph_->ComputeReachabilityInformation();
mutator(ins);
propagation_->Visit(ArrayRef<HInstruction* const>(ins));
for (auto [blk, i] : single_value) {