summaryrefslogtreecommitdiff
path: root/compiler/optimizing/scheduler.h
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2017-10-05 14:35:55 +0100
committer Vladimir Marko <vmarko@google.com> 2017-10-09 10:39:22 +0100
commite764d2e50c544c2cb98ee61a15d613161ac6bd17 (patch)
tree112aa7ca459d2edb4f800897060a2407fcc622c7 /compiler/optimizing/scheduler.h
parentca6fff898afcb62491458ae8bcd428bfb3043da1 (diff)
Use ScopedArenaAllocator for register allocation.
Memory needed to compile the two most expensive methods for aosp_angler-userdebug boot image: BatteryStats.dumpCheckinLocked() : 25.1MiB -> 21.1MiB BatteryStats.dumpLocked(): 49.6MiB -> 42.0MiB This is because all the memory previously used by Scheduler is reused by the register allocator; the register allocator has a higher peak usage of the ArenaStack. And continue the "arena"->"allocator" renaming. Test: m test-art-host-gtest Test: testrunner.py --host Bug: 64312607 Change-Id: Idfd79a9901552b5147ec0bf591cb38120de86b01
Diffstat (limited to 'compiler/optimizing/scheduler.h')
-rw-r--r--compiler/optimizing/scheduler.h20
1 files changed, 10 insertions, 10 deletions
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 493ec0b07b..3efd26af9b 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -156,14 +156,14 @@ class HScheduler;
*/
class SchedulingNode : public DeletableArenaObject<kArenaAllocScheduler> {
public:
- SchedulingNode(HInstruction* instr, ScopedArenaAllocator* arena, bool is_scheduling_barrier)
+ SchedulingNode(HInstruction* instr, ScopedArenaAllocator* allocator, bool is_scheduling_barrier)
: latency_(0),
internal_latency_(0),
critical_path_(0),
instruction_(instr),
is_scheduling_barrier_(is_scheduling_barrier),
- data_predecessors_(arena->Adapter(kArenaAllocScheduler)),
- other_predecessors_(arena->Adapter(kArenaAllocScheduler)),
+ data_predecessors_(allocator->Adapter(kArenaAllocScheduler)),
+ other_predecessors_(allocator->Adapter(kArenaAllocScheduler)),
num_unscheduled_successors_(0) {
data_predecessors_.reserve(kPreallocatedPredecessors);
}
@@ -251,9 +251,9 @@ class SchedulingNode : public DeletableArenaObject<kArenaAllocScheduler> {
*/
class SchedulingGraph : public ValueObject {
public:
- SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* arena)
+ SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator)
: scheduler_(scheduler),
- arena_(arena),
+ arena_(allocator),
contains_scheduling_barrier_(false),
nodes_map_(arena_->Adapter(kArenaAllocScheduler)),
heap_location_collector_(nullptr) {}
@@ -434,16 +434,16 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector {
class HScheduler {
public:
- HScheduler(ScopedArenaAllocator* arena,
+ HScheduler(ScopedArenaAllocator* allocator,
SchedulingLatencyVisitor* latency_visitor,
SchedulingNodeSelector* selector)
- : arena_(arena),
+ : allocator_(allocator),
latency_visitor_(latency_visitor),
selector_(selector),
only_optimize_loop_blocks_(true),
- scheduling_graph_(this, arena),
+ scheduling_graph_(this, allocator),
cursor_(nullptr),
- candidates_(arena_->Adapter(kArenaAllocScheduler)) {}
+ candidates_(allocator_->Adapter(kArenaAllocScheduler)) {}
virtual ~HScheduler() {}
void Schedule(HGraph* graph);
@@ -471,7 +471,7 @@ class HScheduler {
node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency());
}
- ScopedArenaAllocator* const arena_;
+ ScopedArenaAllocator* const allocator_;
SchedulingLatencyVisitor* const latency_visitor_;
SchedulingNodeSelector* const selector_;
bool only_optimize_loop_blocks_;