summaryrefslogtreecommitdiff
path: root/compiler/optimizing/scheduler.cc
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2020-06-08 10:26:06 +0100
committer Vladimir Marko <vmarko@google.com> 2020-06-08 12:41:04 +0000
commitef898425c975f150caaed077ca204fa86b951e7f (patch)
tree1ad038b90bb860fe1b9a20872b990c7918fcd1e1 /compiler/optimizing/scheduler.cc
parentf7290cac4af6a981d98122af1a6d48b0e80da574 (diff)
Run LSA as a part of the LSE pass.
Make LSA a helper class, not an optimization pass. Move all its allocations to ScopedArenaAllocator to reduce the peak memory usage a little bit. Test: m test-art-host-gtest Test: testrunner.py --host --optimizing Change-Id: I7fc634abe732d22c99005921ffecac5207bcf05f
Diffstat (limited to 'compiler/optimizing/scheduler.cc')
-rw-r--r--compiler/optimizing/scheduler.cc3
1 files changed, 2 insertions, 1 deletions
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index f722cf91a7..ea5a13a0db 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -559,7 +559,8 @@ void HScheduler::Schedule(HGraph* graph) {
// We run lsa here instead of in a separate pass to better control whether we
// should run the analysis or not.
const HeapLocationCollector* heap_location_collector = nullptr;
- LoadStoreAnalysis lsa(graph);
+ ScopedArenaAllocator allocator(graph->GetArenaStack());
+ LoadStoreAnalysis lsa(graph, &allocator);
if (!only_optimize_loop_blocks_ || graph->HasLoops()) {
lsa.Run();
heap_location_collector = &lsa.GetHeapLocationCollector();