summaryrefslogtreecommitdiff
path: root/compiler/optimizing/register_allocator.h
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2017-10-05 14:35:55 +0100
committer Vladimir Marko <vmarko@google.com> 2017-10-09 10:39:22 +0100
commite764d2e50c544c2cb98ee61a15d613161ac6bd17 (patch)
tree112aa7ca459d2edb4f800897060a2407fcc622c7 /compiler/optimizing/register_allocator.h
parentca6fff898afcb62491458ae8bcd428bfb3043da1 (diff)
Use ScopedArenaAllocator for register allocation.
Memory needed to compile the two most expensive methods for aosp_angler-userdebug boot image: BatteryStats.dumpCheckinLocked() : 25.1MiB -> 21.1MiB BatteryStats.dumpLocked(): 49.6MiB -> 42.0MiB This is because all the memory previously used by Scheduler is reused by the register allocator; the register allocator has a higher peak usage of the ArenaStack. And continue the "arena"->"allocator" renaming. Test: m test-art-host-gtest Test: testrunner.py --host Bug: 64312607 Change-Id: Idfd79a9901552b5147ec0bf591cb38120de86b01
Diffstat (limited to 'compiler/optimizing/register_allocator.h')
-rw-r--r--compiler/optimizing/register_allocator.h19
1 files changed, 9 insertions, 10 deletions
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 4375d6851a..eaeec3b261 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
#include "arch/instruction_set.h"
-#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/arena_object.h"
#include "base/macros.h"
@@ -36,7 +36,7 @@ class SsaLivenessAnalysis;
/**
* Base class for any register allocator.
*/
-class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> {
+class RegisterAllocator : public DeletableArenaObject<kArenaAllocRegisterAllocator> {
public:
enum Strategy {
kRegisterAllocatorLinearScan,
@@ -45,10 +45,10 @@ class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> {
static constexpr Strategy kRegisterAllocatorDefault = kRegisterAllocatorLinearScan;
- static RegisterAllocator* Create(ArenaAllocator* allocator,
- CodeGenerator* codegen,
- const SsaLivenessAnalysis& analysis,
- Strategy strategy = kRegisterAllocatorDefault);
+ static std::unique_ptr<RegisterAllocator> Create(ScopedArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& analysis,
+ Strategy strategy = kRegisterAllocatorDefault);
virtual ~RegisterAllocator() = default;
@@ -64,18 +64,17 @@ class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> {
InstructionSet instruction_set);
// Verifies that live intervals do not conflict. Used by unit testing.
- static bool ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
+ static bool ValidateIntervals(ArrayRef<LiveInterval* const> intervals,
size_t number_of_spill_slots,
size_t number_of_out_slots,
const CodeGenerator& codegen,
- ArenaAllocator* allocator,
bool processing_core_registers,
bool log_fatal_on_failure);
static constexpr const char* kRegisterAllocatorPassName = "register";
protected:
- RegisterAllocator(ArenaAllocator* allocator,
+ RegisterAllocator(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
@@ -88,7 +87,7 @@ class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> {
// to find an optimal split position.
LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* allocator_;
CodeGenerator* const codegen_;
const SsaLivenessAnalysis& liveness_;
};