Step 1 of 2: conditional passes.
Rationale:
The change adds a return value to Run() in preparation of
conditional pass execution. The value returned by Run() is
best effort, returning false means no optimizations were
applied or no useful information was obtained. I filled
in a few cases with more exact information, others
still just return true. In addition, it integrates inlining
as a regular pass, avoiding the ugly "break" into
optimizations1 and optimziations2.
Bug: b/78171933, b/74026074
Test: test-art-host,target
Change-Id: Ia39c5c83c01dcd79841e4b623917d61c754cf075
diff --git a/compiler/optimizing/load_store_analysis.cc b/compiler/optimizing/load_store_analysis.cc
index 8b1812a..7d7bb94 100644
--- a/compiler/optimizing/load_store_analysis.cc
+++ b/compiler/optimizing/load_store_analysis.cc
@@ -152,7 +152,7 @@
return true;
}
-void LoadStoreAnalysis::Run() {
+bool LoadStoreAnalysis::Run() {
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
heap_location_collector_.VisitBasicBlock(block);
}
@@ -160,22 +160,23 @@
if (heap_location_collector_.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) {
// Bail out if there are too many heap locations to deal with.
heap_location_collector_.CleanUp();
- return;
+ return false;
}
if (!heap_location_collector_.HasHeapStores()) {
// Without heap stores, this pass would act mostly as GVN on heap accesses.
heap_location_collector_.CleanUp();
- return;
+ return false;
}
if (heap_location_collector_.HasVolatile() || heap_location_collector_.HasMonitorOps()) {
// Don't do load/store elimination if the method has volatile field accesses or
// monitor operations, for now.
// TODO: do it right.
heap_location_collector_.CleanUp();
- return;
+ return false;
}
heap_location_collector_.BuildAliasingMatrix();
+ return true;
}
} // namespace art