diff options
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/code_generator.cc | 12 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator.h | 10 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 93 | ||||
| -rw-r--r-- | compiler/optimizing/optimization.h | 2 | ||||
| -rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 55 |
5 files changed, 84 insertions, 88 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 7c60026642..3f69270f17 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -585,7 +585,7 @@ CodeGenerator* CodeGenerator::Create(HGraph* graph, } void CodeGenerator::BuildNativeGCMap( - std::vector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const { + ArenaVector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const { const std::vector<uint8_t>& gc_map_raw = dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap(); verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]); @@ -613,7 +613,7 @@ void CodeGenerator::BuildSourceMap(DefaultSrcMap* src_map) const { } } -void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const { +void CodeGenerator::BuildMappingTable(ArenaVector<uint8_t>* data) const { uint32_t pc2dex_data_size = 0u; uint32_t pc2dex_entries = stack_map_stream_.GetNumberOfStackMaps(); uint32_t pc2dex_offset = 0u; @@ -712,18 +712,16 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const { } } -void CodeGenerator::BuildVMapTable(std::vector<uint8_t>* data) const { - Leb128EncodingVector vmap_encoder; +void CodeGenerator::BuildVMapTable(ArenaVector<uint8_t>* data) const { + Leb128Encoder<ArenaAllocatorAdapter<uint8_t>> vmap_encoder(data); // We currently don't use callee-saved registers. size_t size = 0 + 1 /* marker */ + 0; vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128). vmap_encoder.PushBackUnsigned(size); vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker); - - *data = vmap_encoder.GetData(); } -void CodeGenerator::BuildStackMaps(std::vector<uint8_t>* data) { +void CodeGenerator::BuildStackMaps(ArenaVector<uint8_t>* data) { uint32_t size = stack_map_stream_.PrepareForFillIn(); data->resize(size); MemoryRegion region(data->data(), size); diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index cdd4675d2f..754b5ecd0e 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -19,6 +19,8 @@ #include "arch/instruction_set.h" #include "arch/instruction_set_features.h" +#include "base/arena_containers.h" +#include "base/arena_object.h" #include "base/bit_field.h" #include "driver/compiler_options.h" #include "globals.h" @@ -236,11 +238,11 @@ class CodeGenerator { } void BuildSourceMap(DefaultSrcMap* src_map) const; - void BuildMappingTable(std::vector<uint8_t>* vector) const; - void BuildVMapTable(std::vector<uint8_t>* vector) const; + void BuildMappingTable(ArenaVector<uint8_t>* vector) const; + void BuildVMapTable(ArenaVector<uint8_t>* vector) const; void BuildNativeGCMap( - std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const; - void BuildStackMaps(std::vector<uint8_t>* vector); + ArenaVector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const; + void BuildStackMaps(ArenaVector<uint8_t>* vector); bool IsBaseline() const { return is_baseline_; diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 2ed2d9ab20..ee82fda6c8 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -17,6 +17,7 @@ #ifndef ART_COMPILER_OPTIMIZING_NODES_H_ #define ART_COMPILER_OPTIMIZING_NODES_H_ +#include <array> #include <type_traits> #include "base/arena_containers.h" @@ -81,7 +82,7 @@ enum IfCondition { kCondGE, }; -class HInstructionList { +class HInstructionList : public ValueObject { public: HInstructionList() : first_instruction_(nullptr), last_instruction_(nullptr) {} @@ -127,7 +128,7 @@ class HInstructionList { }; // Control-flow graph of a method. Contains a list of basic blocks. -class HGraph : public ArenaObject<kArenaAllocMisc> { +class HGraph : public ArenaObject<kArenaAllocGraph> { public: HGraph(ArenaAllocator* arena, const DexFile& dex_file, @@ -464,7 +465,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { DISALLOW_COPY_AND_ASSIGN(HGraph); }; -class HLoopInformation : public ArenaObject<kArenaAllocMisc> { +class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> { public: HLoopInformation(HBasicBlock* header, HGraph* graph) : header_(header), @@ -562,7 +563,7 @@ class HLoopInformation : public ArenaObject<kArenaAllocMisc> { // Stores try/catch information for basic blocks. // Note that HGraph is constructed so that catch blocks cannot simultaneously // be try blocks. -class TryCatchInformation : public ArenaObject<kArenaAllocMisc> { +class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> { public: // Try block information constructor. explicit TryCatchInformation(const HTryBoundary& try_entry) @@ -619,7 +620,7 @@ static constexpr uint32_t kNoDexPc = -1; // as a double linked list. Each block knows its predecessors and // successors. -class HBasicBlock : public ArenaObject<kArenaAllocMisc> { +class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> { public: explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc) : graph_(graph), @@ -1107,7 +1108,7 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION) template <typename T> class HUseList; template <typename T> -class HUseListNode : public ArenaObject<kArenaAllocMisc> { +class HUseListNode : public ArenaObject<kArenaAllocUseListNode> { public: HUseListNode* GetPrevious() const { return prev_; } HUseListNode* GetNext() const { return next_; } @@ -1492,7 +1493,7 @@ class SideEffects : public ValueObject { }; // A HEnvironment object contains the values of virtual registers at a given location. -class HEnvironment : public ArenaObject<kArenaAllocMisc> { +class HEnvironment : public ArenaObject<kArenaAllocEnvironment> { public: HEnvironment(ArenaAllocator* arena, size_t number_of_vregs, @@ -1682,7 +1683,7 @@ class ReferenceTypeInfo : ValueObject { std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs); -class HInstruction : public ArenaObject<kArenaAllocMisc> { +class HInstruction : public ArenaObject<kArenaAllocInstruction> { public: explicit HInstruction(SideEffects side_effects) : previous_(nullptr), @@ -2038,72 +2039,54 @@ class HBackwardInstructionIterator : public ValueObject { DISALLOW_COPY_AND_ASSIGN(HBackwardInstructionIterator); }; -// An embedded container with N elements of type T. Used (with partial -// specialization for N=0) because embedded arrays cannot have size 0. -template<typename T, intptr_t N> -class EmbeddedArray { +template<size_t N> +class HTemplateInstruction: public HInstruction { public: - EmbeddedArray() : elements_() {} - - intptr_t GetLength() const { return N; } - - const T& operator[](intptr_t i) const { - DCHECK_LT(i, GetLength()); - return elements_[i]; - } + HTemplateInstruction<N>(SideEffects side_effects) + : HInstruction(side_effects), inputs_() {} + virtual ~HTemplateInstruction() {} - T& operator[](intptr_t i) { - DCHECK_LT(i, GetLength()); - return elements_[i]; - } + size_t InputCount() const OVERRIDE { return N; } - const T& At(intptr_t i) const { - return (*this)[i]; + protected: + const HUserRecord<HInstruction*> InputRecordAt(size_t i) const OVERRIDE { + DCHECK_LT(i, N); + return inputs_[i]; } - void SetAt(intptr_t i, const T& val) { - (*this)[i] = val; + void SetRawInputRecordAt(size_t i, const HUserRecord<HInstruction*>& input) OVERRIDE { + DCHECK_LT(i, N); + inputs_[i] = input; } private: - T elements_[N]; -}; + std::array<HUserRecord<HInstruction*>, N> inputs_; -template<typename T> -class EmbeddedArray<T, 0> { - public: - intptr_t length() const { return 0; } - const T& operator[](intptr_t i) const { - UNUSED(i); - LOG(FATAL) << "Unreachable"; - UNREACHABLE(); - } - T& operator[](intptr_t i) { - UNUSED(i); - LOG(FATAL) << "Unreachable"; - UNREACHABLE(); - } + friend class SsaBuilder; }; -template<intptr_t N> -class HTemplateInstruction: public HInstruction { +// HTemplateInstruction specialization for N=0. +template<> +class HTemplateInstruction<0>: public HInstruction { public: - HTemplateInstruction<N>(SideEffects side_effects) - : HInstruction(side_effects), inputs_() {} + explicit HTemplateInstruction(SideEffects side_effects) : HInstruction(side_effects) {} virtual ~HTemplateInstruction() {} - size_t InputCount() const OVERRIDE { return N; } + size_t InputCount() const OVERRIDE { return 0; } protected: - const HUserRecord<HInstruction*> InputRecordAt(size_t i) const OVERRIDE { return inputs_[i]; } + const HUserRecord<HInstruction*> InputRecordAt(size_t i ATTRIBUTE_UNUSED) const OVERRIDE { + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); + } - void SetRawInputRecordAt(size_t i, const HUserRecord<HInstruction*>& input) OVERRIDE { - inputs_[i] = input; + void SetRawInputRecordAt(size_t i ATTRIBUTE_UNUSED, + const HUserRecord<HInstruction*>& input ATTRIBUTE_UNUSED) OVERRIDE { + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); } private: - EmbeddedArray<HUserRecord<HInstruction*>, N> inputs_; - friend class SsaBuilder; }; @@ -4833,7 +4816,7 @@ class HFakeString : public HTemplateInstruction<0> { DISALLOW_COPY_AND_ASSIGN(HFakeString); }; -class MoveOperands : public ArenaObject<kArenaAllocMisc> { +class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> { public: MoveOperands(Location source, Location destination, diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h index f793a65bf3..2f59d4cd5b 100644 --- a/compiler/optimizing/optimization.h +++ b/compiler/optimizing/optimization.h @@ -26,7 +26,7 @@ namespace art { /** * Abstraction to implement an optimization pass. */ -class HOptimization : public ArenaObject<kArenaAllocMisc> { +class HOptimization : public ArenaObject<kArenaAllocOptimization> { public: HOptimization(HGraph* graph, const char* pass_name, diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 6f251e8e6c..898b656b80 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -25,6 +25,7 @@ #include "art_method-inl.h" #include "base/arena_allocator.h" +#include "base/arena_containers.h" #include "base/dumpable.h" #include "base/timing_logger.h" #include "boolean_simplifier.h" @@ -68,7 +69,9 @@ namespace art { */ class CodeVectorAllocator FINAL : public CodeAllocator { public: - CodeVectorAllocator() : size_(0) {} + explicit CodeVectorAllocator(ArenaAllocator* arena) + : memory_(arena->Adapter(kArenaAllocCodeBuffer)), + size_(0) {} virtual uint8_t* Allocate(size_t size) { size_ = size; @@ -77,10 +80,10 @@ class CodeVectorAllocator FINAL : public CodeAllocator { } size_t GetSize() const { return size_; } - const std::vector<uint8_t>& GetMemory() const { return memory_; } + const ArenaVector<uint8_t>& GetMemory() const { return memory_; } private: - std::vector<uint8_t> memory_; + ArenaVector<uint8_t> memory_; size_t size_; DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator); @@ -498,7 +501,7 @@ static void RunOptimizations(HGraph* graph, // The stack map we generate must be 4-byte aligned on ARM. Since existing // maps are generated alongside these stack maps, we must also align them. -static ArrayRef<const uint8_t> AlignVectorSize(std::vector<uint8_t>& vector) { +static ArrayRef<const uint8_t> AlignVectorSize(ArenaVector<uint8_t>& vector) { size_t size = vector.size(); size_t aligned_size = RoundUp(size, 4); for (; size < aligned_size; ++size) { @@ -553,7 +556,8 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph, AllocateRegisters(graph, codegen, pass_observer); - CodeVectorAllocator allocator; + ArenaAllocator* arena = graph->GetArena(); + CodeVectorAllocator allocator(arena); codegen->CompileOptimized(&allocator); ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen); @@ -563,7 +567,7 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph, codegen->BuildSourceMap(&src_mapping_table); } - std::vector<uint8_t> stack_map; + ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps)); codegen->BuildStackMaps(&stack_map); MaybeRecordStat(MethodCompilationStat::kCompiledOptimized); @@ -595,20 +599,21 @@ CompiledMethod* OptimizingCompiler::CompileBaseline( CompilerDriver* compiler_driver, const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer) const { - CodeVectorAllocator allocator; + ArenaAllocator* arena = codegen->GetGraph()->GetArena(); + CodeVectorAllocator allocator(arena); codegen->CompileBaseline(&allocator); ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen); - std::vector<uint8_t> mapping_table; + ArenaVector<uint8_t> mapping_table(arena->Adapter(kArenaAllocBaselineMaps)); codegen->BuildMappingTable(&mapping_table); DefaultSrcMap src_mapping_table; if (compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()) { codegen->BuildSourceMap(&src_mapping_table); } - std::vector<uint8_t> vmap_table; + ArenaVector<uint8_t> vmap_table(arena->Adapter(kArenaAllocBaselineMaps)); codegen->BuildVMapTable(&vmap_table); - std::vector<uint8_t> gc_map; + ArenaVector<uint8_t> gc_map(arena->Adapter(kArenaAllocBaselineMaps)); codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit); MaybeRecordStat(MethodCompilationStat::kCompiledBaseline); @@ -752,6 +757,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite // or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back // to Quick. bool can_use_baseline = !run_optimizations_ && builder.CanUseBaselineForStringInit(); + CompiledMethod* compiled_method = nullptr; if (run_optimizations_ && can_allocate_registers) { VLOG(compiler) << "Optimizing " << method_name; @@ -766,11 +772,11 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite } } - return CompileOptimized(graph, - codegen.get(), - compiler_driver, - dex_compilation_unit, - &pass_observer); + compiled_method = CompileOptimized(graph, + codegen.get(), + compiler_driver, + dex_compilation_unit, + &pass_observer); } else if (shouldOptimize && can_allocate_registers) { LOG(FATAL) << "Could not allocate registers in optimizing compiler"; UNREACHABLE(); @@ -783,13 +789,20 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite MaybeRecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator); } - return CompileBaseline(codegen.get(), - compiler_driver, - dex_compilation_unit, - &pass_observer); - } else { - return nullptr; + compiled_method = CompileBaseline(codegen.get(), + compiler_driver, + dex_compilation_unit, + &pass_observer); + } + + if (kArenaAllocatorCountAllocations) { + if (arena.BytesAllocated() > 4 * MB) { + MemStats mem_stats(arena.GetMemStats()); + LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats); + } } + + return compiled_method; } CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, |