Step 1 of 2: conditional passes.
Rationale:
The change adds a return value to Run() in preparation of
conditional pass execution. The value returned by Run() is
best effort, returning false means no optimizations were
applied or no useful information was obtained. I filled
in a few cases with more exact information, others
still just return true. In addition, it integrates inlining
as a regular pass, avoiding the ugly "break" into
optimizations1 and optimziations2.
Bug: b/78171933, b/74026074
Test: test-art-host,target
Change-Id: Ia39c5c83c01dcd79841e4b623917d61c754cf075
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 237ecd3..d598ff5 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -948,22 +948,22 @@
DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
};
-void LoadStoreElimination::Run() {
+bool LoadStoreElimination::Run() {
if (graph_->IsDebuggable() || graph_->HasTryCatch()) {
// Debugger may set heap values or trigger deoptimization of callers.
// Try/catch support not implemented yet.
// Skip this optimization.
- return;
+ return false;
}
const HeapLocationCollector& heap_location_collector = lsa_.GetHeapLocationCollector();
if (heap_location_collector.GetNumberOfHeapLocations() == 0) {
// No HeapLocation information from LSA, skip this optimization.
- return;
+ return false;
}
// TODO: analyze VecLoad/VecStore better.
if (graph_->HasSIMD()) {
- return;
+ return false;
}
LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_, stats_);
@@ -971,6 +971,7 @@
lse_visitor.VisitBasicBlock(block);
}
lse_visitor.RemoveInstructions();
+ return true;
}
} // namespace art