diff options
author | 2017-10-05 14:35:55 +0100 | |
---|---|---|
committer | 2017-10-09 10:39:22 +0100 | |
commit | e764d2e50c544c2cb98ee61a15d613161ac6bd17 (patch) | |
tree | 112aa7ca459d2edb4f800897060a2407fcc622c7 /compiler/optimizing/code_generator.cc | |
parent | ca6fff898afcb62491458ae8bcd428bfb3043da1 (diff) |
Use ScopedArenaAllocator for register allocation.
Memory needed to compile the two most expensive methods for
aosp_angler-userdebug boot image:
BatteryStats.dumpCheckinLocked() : 25.1MiB -> 21.1MiB
BatteryStats.dumpLocked(): 49.6MiB -> 42.0MiB
This is because all the memory previously used by Scheduler
is reused by the register allocator; the register allocator
has a higher peak usage of the ArenaStack.
And continue the "arena"->"allocator" renaming.
Test: m test-art-host-gtest
Test: testrunner.py --host
Bug: 64312607
Change-Id: Idfd79a9901552b5147ec0bf591cb38120de86b01
Diffstat (limited to 'compiler/optimizing/code_generator.cc')
-rw-r--r-- | compiler/optimizing/code_generator.cc | 42 |
1 files changed, 15 insertions, 27 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index ba26cfc70f..dd8e3d240f 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -617,61 +617,49 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph, const InstructionSetFeatures& isa_features, const CompilerOptions& compiler_options, OptimizingCompilerStats* stats) { - ArenaAllocator* arena = graph->GetAllocator(); + ArenaAllocator* allocator = graph->GetAllocator(); switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm case kArm: case kThumb2: { return std::unique_ptr<CodeGenerator>( - new (arena) arm::CodeGeneratorARMVIXL(graph, - *isa_features.AsArmInstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) arm::CodeGeneratorARMVIXL( + graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: { return std::unique_ptr<CodeGenerator>( - new (arena) arm64::CodeGeneratorARM64(graph, - *isa_features.AsArm64InstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) arm64::CodeGeneratorARM64( + graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_mips case kMips: { return std::unique_ptr<CodeGenerator>( - new (arena) mips::CodeGeneratorMIPS(graph, - *isa_features.AsMipsInstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) mips::CodeGeneratorMIPS( + graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: { return std::unique_ptr<CodeGenerator>( - new (arena) mips64::CodeGeneratorMIPS64(graph, - *isa_features.AsMips64InstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) mips64::CodeGeneratorMIPS64( + graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: { return std::unique_ptr<CodeGenerator>( - new (arena) x86::CodeGeneratorX86(graph, - *isa_features.AsX86InstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) x86::CodeGeneratorX86( + graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: { return std::unique_ptr<CodeGenerator>( - new (arena) x86_64::CodeGeneratorX86_64(graph, - *isa_features.AsX86_64InstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) x86_64::CodeGeneratorX86_64( + graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats)); } #endif default: @@ -910,7 +898,7 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, } void CodeGenerator::RecordCatchBlockInfo() { - ArenaAllocator* arena = graph_->GetAllocator(); + ArenaAllocator* allocator = graph_->GetAllocator(); for (HBasicBlock* block : *block_order_) { if (!block->IsCatchBlock()) { @@ -925,7 +913,7 @@ void CodeGenerator::RecordCatchBlockInfo() { // The stack mask is not used, so we leave it empty. ArenaBitVector* stack_mask = - ArenaBitVector::Create(arena, 0, /* expandable */ true, kArenaAllocCodeGenerator); + ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator); stack_map_stream_.BeginStackMapEntry(dex_pc, native_pc, |