From f9f6441c665b5ff9004d3ed55014f46d416fb1bb Mon Sep 17 00:00:00 2001 From: Vladimir Marko Date: Wed, 2 Sep 2015 14:05:49 +0100 Subject: Optimizing: Tag Arena allocations with their source. This adds the ability to track where we allocate memory when the kArenaAllocatorCountAllocations flag is turned on. Also move some allocations from native heap to the Arena and remove some unnecessary utilities. Bug: 23736311 Change-Id: I1aaef3fd405d1de444fe9e618b1ce7ecef07ade3 --- compiler/optimizing/optimizing_compiler.cc | 55 ++++++++++++++++++------------ 1 file changed, 34 insertions(+), 21 deletions(-) (limited to 'compiler/optimizing/optimizing_compiler.cc') diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 6f251e8e6c..898b656b80 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -25,6 +25,7 @@ #include "art_method-inl.h" #include "base/arena_allocator.h" +#include "base/arena_containers.h" #include "base/dumpable.h" #include "base/timing_logger.h" #include "boolean_simplifier.h" @@ -68,7 +69,9 @@ namespace art { */ class CodeVectorAllocator FINAL : public CodeAllocator { public: - CodeVectorAllocator() : size_(0) {} + explicit CodeVectorAllocator(ArenaAllocator* arena) + : memory_(arena->Adapter(kArenaAllocCodeBuffer)), + size_(0) {} virtual uint8_t* Allocate(size_t size) { size_ = size; @@ -77,10 +80,10 @@ class CodeVectorAllocator FINAL : public CodeAllocator { } size_t GetSize() const { return size_; } - const std::vector& GetMemory() const { return memory_; } + const ArenaVector& GetMemory() const { return memory_; } private: - std::vector memory_; + ArenaVector memory_; size_t size_; DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator); @@ -498,7 +501,7 @@ static void RunOptimizations(HGraph* graph, // The stack map we generate must be 4-byte aligned on ARM. Since existing // maps are generated alongside these stack maps, we must also align them. -static ArrayRef AlignVectorSize(std::vector& vector) { +static ArrayRef AlignVectorSize(ArenaVector& vector) { size_t size = vector.size(); size_t aligned_size = RoundUp(size, 4); for (; size < aligned_size; ++size) { @@ -553,7 +556,8 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph, AllocateRegisters(graph, codegen, pass_observer); - CodeVectorAllocator allocator; + ArenaAllocator* arena = graph->GetArena(); + CodeVectorAllocator allocator(arena); codegen->CompileOptimized(&allocator); ArenaVector linker_patches = EmitAndSortLinkerPatches(codegen); @@ -563,7 +567,7 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph, codegen->BuildSourceMap(&src_mapping_table); } - std::vector stack_map; + ArenaVector stack_map(arena->Adapter(kArenaAllocStackMaps)); codegen->BuildStackMaps(&stack_map); MaybeRecordStat(MethodCompilationStat::kCompiledOptimized); @@ -595,20 +599,21 @@ CompiledMethod* OptimizingCompiler::CompileBaseline( CompilerDriver* compiler_driver, const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer) const { - CodeVectorAllocator allocator; + ArenaAllocator* arena = codegen->GetGraph()->GetArena(); + CodeVectorAllocator allocator(arena); codegen->CompileBaseline(&allocator); ArenaVector linker_patches = EmitAndSortLinkerPatches(codegen); - std::vector mapping_table; + ArenaVector mapping_table(arena->Adapter(kArenaAllocBaselineMaps)); codegen->BuildMappingTable(&mapping_table); DefaultSrcMap src_mapping_table; if (compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()) { codegen->BuildSourceMap(&src_mapping_table); } - std::vector vmap_table; + ArenaVector vmap_table(arena->Adapter(kArenaAllocBaselineMaps)); codegen->BuildVMapTable(&vmap_table); - std::vector gc_map; + ArenaVector gc_map(arena->Adapter(kArenaAllocBaselineMaps)); codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit); MaybeRecordStat(MethodCompilationStat::kCompiledBaseline); @@ -752,6 +757,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite // or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back // to Quick. bool can_use_baseline = !run_optimizations_ && builder.CanUseBaselineForStringInit(); + CompiledMethod* compiled_method = nullptr; if (run_optimizations_ && can_allocate_registers) { VLOG(compiler) << "Optimizing " << method_name; @@ -766,11 +772,11 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite } } - return CompileOptimized(graph, - codegen.get(), - compiler_driver, - dex_compilation_unit, - &pass_observer); + compiled_method = CompileOptimized(graph, + codegen.get(), + compiler_driver, + dex_compilation_unit, + &pass_observer); } else if (shouldOptimize && can_allocate_registers) { LOG(FATAL) << "Could not allocate registers in optimizing compiler"; UNREACHABLE(); @@ -783,13 +789,20 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite MaybeRecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator); } - return CompileBaseline(codegen.get(), - compiler_driver, - dex_compilation_unit, - &pass_observer); - } else { - return nullptr; + compiled_method = CompileBaseline(codegen.get(), + compiler_driver, + dex_compilation_unit, + &pass_observer); + } + + if (kArenaAllocatorCountAllocations) { + if (arena.BytesAllocated() > 4 * MB) { + MemStats mem_stats(arena.GetMemStats()); + LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable(mem_stats); + } } + + return compiled_method; } CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, -- cgit v1.2.3-59-g8ed1b