diff options
author | 2017-10-09 14:12:23 +0100 | |
---|---|---|
committer | 2017-10-11 09:44:26 +0100 | |
commit | 69d310e0317e2fce97bf8c9c133c5c2c0332e61d (patch) | |
tree | fba05a1530e6fc4a2e6950303c1f7c6b0ffbb936 /compiler/optimizing | |
parent | e764d2e50c544c2cb98ee61a15d613161ac6bd17 (diff) |
Use ScopedArenaAllocator for building HGraph.
Memory needed to compile the two most expensive methods for
aosp_angler-userdebug boot image:
BatteryStats.dumpCheckinLocked() : 21.1MiB -> 20.2MiB
BatteryStats.dumpLocked(): 42.0MiB -> 40.3MiB
This is because all the memory previously used by the graph
builder is reused by later passes.
And finish the "arena"->"allocator" renaming; make renamed
allocator pointers that are members of classes const when
appropriate (and make a few more members around them const).
Test: m test-art-host-gtest
Test: testrunner.py --host
Bug: 64312607
Change-Id: Ia50aafc80c05941ae5b96984ba4f31ed4c78255e
Diffstat (limited to 'compiler/optimizing')
28 files changed, 324 insertions, 267 deletions
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc index fe7ecd1ae1..d7def774fd 100644 --- a/compiler/optimizing/block_builder.cc +++ b/compiler/optimizing/block_builder.cc @@ -29,7 +29,7 @@ HBasicBlock* HBasicBlockBuilder::MaybeCreateBlockAt(uint32_t semantic_dex_pc, uint32_t store_dex_pc) { HBasicBlock* block = branch_targets_[store_dex_pc]; if (block == nullptr) { - block = new (arena_) HBasicBlock(graph_, semantic_dex_pc); + block = new (allocator_) HBasicBlock(graph_, semantic_dex_pc); branch_targets_[store_dex_pc] = block; } DCHECK_EQ(block->GetDexPc(), semantic_dex_pc); @@ -200,7 +200,7 @@ void HBasicBlockBuilder::ConnectBasicBlocks() { // Returns the TryItem stored for `block` or nullptr if there is no info for it. static const DexFile::TryItem* GetTryItem( HBasicBlock* block, - const ArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) { + const ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) { auto iterator = try_block_info.find(block->GetBlockId()); return (iterator == try_block_info.end()) ? nullptr : iterator->second; } @@ -212,7 +212,7 @@ static const DexFile::TryItem* GetTryItem( static void LinkToCatchBlocks(HTryBoundary* try_boundary, const DexFile::CodeItem& code_item, const DexFile::TryItem* try_item, - const ArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) { + const ScopedArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) { for (CatchHandlerIterator it(code_item, *try_item); it.HasNext(); it.Next()) { try_boundary->AddExceptionHandler(catch_blocks.Get(it.GetHandlerAddress())); } @@ -253,8 +253,8 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { // Keep a map of all try blocks and their respective TryItems. We do not use // the block's pointer but rather its id to ensure deterministic iteration. - ArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info( - std::less<uint32_t>(), arena_->Adapter(kArenaAllocGraphBuilder)); + ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info( + std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder)); // Obtain TryItem information for blocks with throwing instructions, and split // blocks which are both try & catch to simplify the graph. @@ -278,8 +278,8 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { } // Map from a handler dex_pc to the corresponding catch block. - ArenaSafeMap<uint32_t, HBasicBlock*> catch_blocks( - std::less<uint32_t>(), arena_->Adapter(kArenaAllocGraphBuilder)); + ScopedArenaSafeMap<uint32_t, HBasicBlock*> catch_blocks( + std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder)); // Iterate over catch blocks, create artifical landing pads if necessary to // simplify the CFG, and set metadata. @@ -302,8 +302,8 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { HBasicBlock* catch_block = GetBlockAt(address); bool is_try_block = (try_block_info.find(catch_block->GetBlockId()) != try_block_info.end()); if (is_try_block || MightHaveLiveNormalPredecessors(catch_block)) { - HBasicBlock* new_catch_block = new (arena_) HBasicBlock(graph_, address); - new_catch_block->AddInstruction(new (arena_) HGoto(address)); + HBasicBlock* new_catch_block = new (allocator_) HBasicBlock(graph_, address); + new_catch_block->AddInstruction(new (allocator_) HGoto(address)); new_catch_block->AddSuccessor(catch_block); graph_->AddBlock(new_catch_block); catch_block = new_catch_block; @@ -311,7 +311,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { catch_blocks.Put(address, catch_block); catch_block->SetTryCatchInformation( - new (arena_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_)); + new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_)); } handlers_ptr = iterator.EndDataPointer(); } @@ -328,8 +328,8 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { if (GetTryItem(predecessor, try_block_info) != try_item) { // Found a predecessor not covered by the same TryItem. Insert entering // boundary block. - HTryBoundary* try_entry = - new (arena_) HTryBoundary(HTryBoundary::BoundaryKind::kEntry, try_block->GetDexPc()); + HTryBoundary* try_entry = new (allocator_) HTryBoundary( + HTryBoundary::BoundaryKind::kEntry, try_block->GetDexPc()); try_block->CreateImmediateDominator()->AddInstruction(try_entry); LinkToCatchBlocks(try_entry, code_item_, try_item, catch_blocks); break; @@ -357,7 +357,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { // Insert TryBoundary and link to catch blocks. HTryBoundary* try_exit = - new (arena_) HTryBoundary(HTryBoundary::BoundaryKind::kExit, successor->GetDexPc()); + new (allocator_) HTryBoundary(HTryBoundary::BoundaryKind::kExit, successor->GetDexPc()); graph_->SplitEdge(try_block, successor)->AddInstruction(try_exit); LinkToCatchBlocks(try_exit, code_item_, try_item, catch_blocks); } @@ -367,8 +367,8 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { bool HBasicBlockBuilder::Build() { DCHECK(graph_->GetBlocks().empty()); - graph_->SetEntryBlock(new (arena_) HBasicBlock(graph_, kNoDexPc)); - graph_->SetExitBlock(new (arena_) HBasicBlock(graph_, kNoDexPc)); + graph_->SetEntryBlock(new (allocator_) HBasicBlock(graph_, kNoDexPc)); + graph_->SetExitBlock(new (allocator_) HBasicBlock(graph_, kNoDexPc)); // TODO(dbrazdil): Do CreateBranchTargets and ConnectBasicBlocks in one pass. if (!CreateBranchTargets()) { diff --git a/compiler/optimizing/block_builder.h b/compiler/optimizing/block_builder.h index 4a0f78ce3d..79f7a7bc81 100644 --- a/compiler/optimizing/block_builder.h +++ b/compiler/optimizing/block_builder.h @@ -17,8 +17,8 @@ #ifndef ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_ #define ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_ -#include "base/arena_containers.h" -#include "base/arena_object.h" +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" #include "dex_file.h" #include "nodes.h" @@ -28,17 +28,21 @@ class HBasicBlockBuilder : public ValueObject { public: HBasicBlockBuilder(HGraph* graph, const DexFile* const dex_file, - const DexFile::CodeItem& code_item) - : arena_(graph->GetAllocator()), + const DexFile::CodeItem& code_item, + ScopedArenaAllocator* local_allocator) + : allocator_(graph->GetAllocator()), graph_(graph), dex_file_(dex_file), code_item_(code_item), + local_allocator_(local_allocator), branch_targets_(code_item.insns_size_in_code_units_, nullptr, - arena_->Adapter(kArenaAllocGraphBuilder)), - throwing_blocks_(kDefaultNumberOfThrowingBlocks, arena_->Adapter(kArenaAllocGraphBuilder)), + local_allocator->Adapter(kArenaAllocGraphBuilder)), + throwing_blocks_(kDefaultNumberOfThrowingBlocks, + local_allocator->Adapter(kArenaAllocGraphBuilder)), number_of_branches_(0u), - quicken_index_for_dex_pc_(std::less<uint32_t>(), arena_->Adapter()) {} + quicken_index_for_dex_pc_(std::less<uint32_t>(), + local_allocator->Adapter(kArenaAllocGraphBuilder)) {} // Creates basic blocks in `graph_` at branch target dex_pc positions of the // `code_item_`. Blocks are connected but left unpopulated with instructions. @@ -71,18 +75,19 @@ class HBasicBlockBuilder : public ValueObject { // handler dex_pcs. bool MightHaveLiveNormalPredecessors(HBasicBlock* catch_block); - ArenaAllocator* const arena_; + ArenaAllocator* const allocator_; HGraph* const graph_; const DexFile* const dex_file_; const DexFile::CodeItem& code_item_; - ArenaVector<HBasicBlock*> branch_targets_; - ArenaVector<HBasicBlock*> throwing_blocks_; + ScopedArenaAllocator* const local_allocator_; + ScopedArenaVector<HBasicBlock*> branch_targets_; + ScopedArenaVector<HBasicBlock*> throwing_blocks_; size_t number_of_branches_; // A table to quickly find the quicken index for the first instruction of a basic block. - ArenaSafeMap<uint32_t, uint32_t> quicken_index_for_dex_pc_; + ScopedArenaSafeMap<uint32_t, uint32_t> quicken_index_for_dex_pc_; static constexpr size_t kDefaultNumberOfThrowingBlocks = 2u; diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 76350a6d55..4ed1612220 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -20,12 +20,16 @@ #include "base/arena_bit_vector.h" #include "base/bit_vector-inl.h" #include "base/logging.h" +#include "block_builder.h" #include "data_type-inl.h" #include "dex/verified_method.h" #include "driver/compiler_options.h" +#include "instruction_builder.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" #include "nodes.h" +#include "optimizing_compiler_stats.h" +#include "ssa_builder.h" #include "thread.h" #include "utils/dex_cache_arrays_layout-inl.h" @@ -43,27 +47,13 @@ HGraphBuilder::HGraphBuilder(HGraph* graph, dex_file_(&graph->GetDexFile()), code_item_(*dex_compilation_unit->GetCodeItem()), dex_compilation_unit_(dex_compilation_unit), + outer_compilation_unit_(outer_compilation_unit), compiler_driver_(driver), + code_generator_(code_generator), compilation_stats_(compiler_stats), - block_builder_(graph, dex_file_, code_item_), - ssa_builder_(graph, - dex_compilation_unit->GetClassLoader(), - dex_compilation_unit->GetDexCache(), - handles), - instruction_builder_(graph, - &block_builder_, - &ssa_builder_, - dex_file_, - code_item_, - DataType::FromShorty(dex_compilation_unit_->GetShorty()[0]), - dex_compilation_unit, - outer_compilation_unit, - driver, - code_generator, - interpreter_metadata, - compiler_stats, - dex_compilation_unit->GetDexCache(), - handles) {} + interpreter_metadata_(interpreter_metadata), + handles_(handles), + return_type_(DataType::FromShorty(dex_compilation_unit_->GetShorty()[0])) {} bool HGraphBuilder::SkipCompilation(size_t number_of_branches) { if (compiler_driver_ == nullptr) { @@ -108,15 +98,38 @@ GraphAnalysisResult HGraphBuilder::BuildGraph() { graph_->SetMaximumNumberOfOutVRegs(code_item_.outs_size_); graph_->SetHasTryCatch(code_item_.tries_size_ != 0); + // Use ScopedArenaAllocator for all local allocations. + ScopedArenaAllocator local_allocator(graph_->GetArenaStack()); + HBasicBlockBuilder block_builder(graph_, dex_file_, code_item_, &local_allocator); + SsaBuilder ssa_builder(graph_, + dex_compilation_unit_->GetClassLoader(), + dex_compilation_unit_->GetDexCache(), + handles_, + &local_allocator); + HInstructionBuilder instruction_builder(graph_, + &block_builder, + &ssa_builder, + dex_file_, + code_item_, + return_type_, + dex_compilation_unit_, + outer_compilation_unit_, + compiler_driver_, + code_generator_, + interpreter_metadata_, + compilation_stats_, + handles_, + &local_allocator); + // 1) Create basic blocks and link them together. Basic blocks are left // unpopulated with the exception of synthetic blocks, e.g. HTryBoundaries. - if (!block_builder_.Build()) { + if (!block_builder.Build()) { return kAnalysisInvalidBytecode; } // 2) Decide whether to skip this method based on its code size and number // of branches. - if (SkipCompilation(block_builder_.GetNumberOfBranches())) { + if (SkipCompilation(block_builder.GetNumberOfBranches())) { return kAnalysisSkipped; } @@ -127,12 +140,12 @@ GraphAnalysisResult HGraphBuilder::BuildGraph() { } // 4) Populate basic blocks with instructions. - if (!instruction_builder_.Build()) { + if (!instruction_builder.Build()) { return kAnalysisInvalidBytecode; } // 5) Type the graph and eliminate dead/redundant phis. - return ssa_builder_.BuildSsa(); + return ssa_builder.BuildSsa(); } } // namespace art diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h index 6c5985a3de..5a860f1e43 100644 --- a/compiler/optimizing/builder.h +++ b/compiler/optimizing/builder.h @@ -17,21 +17,17 @@ #ifndef ART_COMPILER_OPTIMIZING_BUILDER_H_ #define ART_COMPILER_OPTIMIZING_BUILDER_H_ -#include "base/arena_containers.h" #include "base/arena_object.h" -#include "block_builder.h" #include "dex_file-inl.h" #include "dex_file.h" #include "driver/compiler_driver.h" #include "driver/dex_compilation_unit.h" -#include "instruction_builder.h" #include "nodes.h" -#include "optimizing_compiler_stats.h" -#include "ssa_builder.h" namespace art { class CodeGenerator; +class OptimizingCompilerStats; class HGraphBuilder : public ValueObject { public: @@ -46,34 +42,21 @@ class HGraphBuilder : public ValueObject { // Only for unit testing. HGraphBuilder(HGraph* graph, + const DexCompilationUnit* dex_compilation_unit, const DexFile::CodeItem& code_item, VariableSizedHandleScope* handles, DataType::Type return_type = DataType::Type::kInt32) : graph_(graph), - dex_file_(nullptr), + dex_file_(dex_compilation_unit->GetDexFile()), code_item_(code_item), - dex_compilation_unit_(nullptr), + dex_compilation_unit_(dex_compilation_unit), + outer_compilation_unit_(nullptr), compiler_driver_(nullptr), + code_generator_(nullptr), compilation_stats_(nullptr), - block_builder_(graph, nullptr, code_item), - ssa_builder_(graph, - handles->NewHandle<mirror::ClassLoader>(nullptr), - handles->NewHandle<mirror::DexCache>(nullptr), - handles), - instruction_builder_(graph, - &block_builder_, - &ssa_builder_, - /* dex_file */ nullptr, - code_item_, - return_type, - /* dex_compilation_unit */ nullptr, - /* outer_compilation_unit */ nullptr, - /* compiler_driver */ nullptr, - /* code_generator */ nullptr, - /* interpreter_metadata */ nullptr, - /* compiler_stats */ nullptr, - handles->NewHandle<mirror::DexCache>(nullptr), - handles) {} + interpreter_metadata_(nullptr), + handles_(handles), + return_type_(return_type) {} GraphAnalysisResult BuildGraph(); @@ -90,13 +73,16 @@ class HGraphBuilder : public ValueObject { // it can be an inlined method. const DexCompilationUnit* const dex_compilation_unit_; - CompilerDriver* const compiler_driver_; + // The compilation unit of the enclosing method being compiled. + const DexCompilationUnit* const outer_compilation_unit_; - OptimizingCompilerStats* compilation_stats_; + CompilerDriver* const compiler_driver_; + CodeGenerator* const code_generator_; - HBasicBlockBuilder block_builder_; - SsaBuilder ssa_builder_; - HInstructionBuilder instruction_builder_; + OptimizingCompilerStats* const compilation_stats_; + const uint8_t* const interpreter_metadata_; + VariableSizedHandleScope* const handles_; + const DataType::Type return_type_; DISALLOW_COPY_AND_ASSIGN(HGraphBuilder); }; diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index eccdccf186..3851877ae5 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -21,6 +21,7 @@ #include <cctype> #include <sstream> +#include "art_method.h" #include "bounds_check_elimination.h" #include "builder.h" #include "code_generator.h" @@ -33,6 +34,7 @@ #include "optimization.h" #include "reference_type_propagation.h" #include "register_allocator_linear_scan.h" +#include "scoped_thread_state_change-inl.h" #include "ssa_liveness_analysis.h" #include "utils/assembler.h" #include "utils/intrusive_forward_list.h" diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index b06d91c823..902985e4ee 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -17,15 +17,23 @@ #include "instruction_builder.h" #include "art_method-inl.h" +#include "base/arena_bit_vector.h" +#include "base/bit_vector-inl.h" +#include "block_builder.h" #include "bytecode_utils.h" #include "class_linker.h" #include "data_type-inl.h" #include "dex_instruction-inl.h" +#include "driver/compiler_driver-inl.h" +#include "driver/dex_compilation_unit.h" #include "driver/compiler_options.h" #include "imtable-inl.h" +#include "mirror/dex_cache.h" +#include "optimizing_compiler_stats.h" #include "quicken_info.h" #include "scoped_thread_state_change-inl.h" #include "sharpening.h" +#include "ssa_builder.h" #include "well_known_classes.h" namespace art { @@ -34,8 +42,8 @@ HBasicBlock* HInstructionBuilder::FindBlockStartingAt(uint32_t dex_pc) const { return block_builder_->GetBlockAt(dex_pc); } -inline ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock* block) { - ArenaVector<HInstruction*>* locals = &locals_for_[block->GetBlockId()]; +inline ScopedArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock* block) { + ScopedArenaVector<HInstruction*>* locals = &locals_for_[block->GetBlockId()]; const size_t vregs = graph_->GetNumberOfVRegs(); if (locals->size() == vregs) { return locals; @@ -43,9 +51,9 @@ inline ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock return GetLocalsForWithAllocation(block, locals, vregs); } -ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation( +ScopedArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation( HBasicBlock* block, - ArenaVector<HInstruction*>* locals, + ScopedArenaVector<HInstruction*>* locals, const size_t vregs) { DCHECK_NE(locals->size(), vregs); locals->resize(vregs, nullptr); @@ -73,7 +81,7 @@ ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation( } inline HInstruction* HInstructionBuilder::ValueOfLocalAt(HBasicBlock* block, size_t local) { - ArenaVector<HInstruction*>* locals = GetLocalsFor(block); + ScopedArenaVector<HInstruction*>* locals = GetLocalsFor(block); return (*locals)[local]; } @@ -168,7 +176,7 @@ void HInstructionBuilder::InitializeBlockLocals() { void HInstructionBuilder::PropagateLocalsToCatchBlocks() { const HTryBoundary& try_entry = current_block_->GetTryCatchInformation()->GetTryEntry(); for (HBasicBlock* catch_block : try_entry.GetExceptionHandlers()) { - ArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block); + ScopedArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block); DCHECK_EQ(handler_locals->size(), current_locals_->size()); for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) { HInstruction* handler_value = (*handler_locals)[vreg]; @@ -216,7 +224,7 @@ void HInstructionBuilder::InitializeInstruction(HInstruction* instruction) { graph_->GetArtMethod(), instruction->GetDexPc(), instruction); - environment->CopyFrom(*current_locals_); + environment->CopyFrom(ArrayRef<HInstruction* const>(*current_locals_)); instruction->SetRawEnvironment(environment); } } @@ -264,8 +272,9 @@ static bool IsBlockPopulated(HBasicBlock* block) { } bool HInstructionBuilder::Build() { - locals_for_.resize(graph_->GetBlocks().size(), - ArenaVector<HInstruction*>(allocator_->Adapter(kArenaAllocGraphBuilder))); + locals_for_.resize( + graph_->GetBlocks().size(), + ScopedArenaVector<HInstruction*>(local_allocator_->Adapter(kArenaAllocGraphBuilder))); // Find locations where we want to generate extra stackmaps for native debugging. // This allows us to generate the info only at interesting points (for example, @@ -274,10 +283,7 @@ bool HInstructionBuilder::Build() { compiler_driver_->GetCompilerOptions().GetNativeDebuggable(); ArenaBitVector* native_debug_info_locations = nullptr; if (native_debuggable) { - const uint32_t num_instructions = code_item_.insns_size_in_code_units_; - native_debug_info_locations = - new (allocator_) ArenaBitVector (allocator_, num_instructions, false); - FindNativeDebugInfoLocations(native_debug_info_locations); + native_debug_info_locations = FindNativeDebugInfoLocations(); } for (HBasicBlock* block : graph_->GetReversePostOrder()) { @@ -358,7 +364,7 @@ bool HInstructionBuilder::Build() { return true; } -void HInstructionBuilder::FindNativeDebugInfoLocations(ArenaBitVector* locations) { +ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() { // The callback gets called when the line number changes. // In other words, it marks the start of new java statement. struct Callback { @@ -367,6 +373,12 @@ void HInstructionBuilder::FindNativeDebugInfoLocations(ArenaBitVector* locations return false; } }; + const uint32_t num_instructions = code_item_.insns_size_in_code_units_; + ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_, + num_instructions, + /* expandable */ false, + kArenaAllocGraphBuilder); + locations->ClearAllBits(); dex_file_->DecodeDebugPositionInfo(&code_item_, Callback::Position, locations); // Instruction-specific tweaks. IterationRange<DexInstructionIterator> instructions = code_item_.Instructions(); @@ -387,6 +399,7 @@ void HInstructionBuilder::FindNativeDebugInfoLocations(ArenaBitVector* locations break; } } + return locations; } HInstruction* HInstructionBuilder::LoadLocal(uint32_t reg_number, DataType::Type type) const { @@ -439,8 +452,8 @@ void HInstructionBuilder::UpdateLocal(uint32_t reg_number, HInstruction* stored_ void HInstructionBuilder::InitializeParameters() { DCHECK(current_block_->IsEntryBlock()); - // dex_compilation_unit_ is null only when unit testing. - if (dex_compilation_unit_ == nullptr) { + // outer_compilation_unit_ is null only when unit testing. + if (outer_compilation_unit_ == nullptr) { return; } diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h index 79d6ddc87d..058b711687 100644 --- a/compiler/optimizing/instruction_builder.h +++ b/compiler/optimizing/instruction_builder.h @@ -17,23 +17,32 @@ #ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_ #define ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_ -#include "base/arena_containers.h" -#include "base/arena_object.h" -#include "block_builder.h" +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" +#include "data_type.h" +#include "dex_file.h" #include "dex_file_types.h" -#include "driver/compiler_driver-inl.h" -#include "driver/compiler_driver.h" -#include "driver/dex_compilation_unit.h" -#include "mirror/dex_cache.h" +#include "handle.h" #include "nodes.h" -#include "optimizing_compiler_stats.h" #include "quicken_info.h" -#include "ssa_builder.h" namespace art { +class ArenaBitVector; +class ArtField; +class ArtMethod; class CodeGenerator; +class CompilerDriver; +class DexCompilationUnit; +class HBasicBlockBuilder; class Instruction; +class OptimizingCompilerStats; +class SsaBuilder; +class VariableSizedHandleScope; + +namespace mirror { +class Class; +} // namespace mirror class HInstructionBuilder : public ValueObject { public: @@ -45,12 +54,12 @@ class HInstructionBuilder : public ValueObject { DataType::Type return_type, const DexCompilationUnit* dex_compilation_unit, const DexCompilationUnit* outer_compilation_unit, - CompilerDriver* driver, + CompilerDriver* compiler_driver, CodeGenerator* code_generator, const uint8_t* interpreter_metadata, OptimizingCompilerStats* compiler_stats, - Handle<mirror::DexCache> dex_cache, - VariableSizedHandleScope* handles) + VariableSizedHandleScope* handles, + ScopedArenaAllocator* local_allocator) : allocator_(graph->GetAllocator()), graph_(graph), handles_(handles), @@ -59,19 +68,19 @@ class HInstructionBuilder : public ValueObject { return_type_(return_type), block_builder_(block_builder), ssa_builder_(ssa_builder), - locals_for_(allocator_->Adapter(kArenaAllocGraphBuilder)), - current_block_(nullptr), - current_locals_(nullptr), - latest_result_(nullptr), - current_this_parameter_(nullptr), - compiler_driver_(driver), + compiler_driver_(compiler_driver), code_generator_(code_generator), dex_compilation_unit_(dex_compilation_unit), outer_compilation_unit_(outer_compilation_unit), quicken_info_(interpreter_metadata), compilation_stats_(compiler_stats), - dex_cache_(dex_cache), - loop_headers_(allocator_->Adapter(kArenaAllocGraphBuilder)) { + local_allocator_(local_allocator), + locals_for_(local_allocator->Adapter(kArenaAllocGraphBuilder)), + current_block_(nullptr), + current_locals_(nullptr), + latest_result_(nullptr), + current_this_parameter_(nullptr), + loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)) { loop_headers_.reserve(kDefaultNumberOfLoops); } @@ -83,18 +92,18 @@ class HInstructionBuilder : public ValueObject { void SetLoopHeaderPhiInputs(); bool ProcessDexInstruction(const Instruction& instruction, uint32_t dex_pc, size_t quicken_index); - void FindNativeDebugInfoLocations(ArenaBitVector* locations); + ArenaBitVector* FindNativeDebugInfoLocations(); bool CanDecodeQuickenedInfo() const; uint16_t LookupQuickenedInfo(uint32_t quicken_index); HBasicBlock* FindBlockStartingAt(uint32_t dex_pc) const; - ArenaVector<HInstruction*>* GetLocalsFor(HBasicBlock* block); + ScopedArenaVector<HInstruction*>* GetLocalsFor(HBasicBlock* block); // Out of line version of GetLocalsFor(), which has a fast path that is // beneficial to get inlined by callers. - ArenaVector<HInstruction*>* GetLocalsForWithAllocation( - HBasicBlock* block, ArenaVector<HInstruction*>* locals, const size_t vregs); + ScopedArenaVector<HInstruction*>* GetLocalsForWithAllocation( + HBasicBlock* block, ScopedArenaVector<HInstruction*>* locals, const size_t vregs); HInstruction* ValueOfLocalAt(HBasicBlock* block, size_t local); HInstruction* LoadLocal(uint32_t register_index, DataType::Type type) const; HInstruction* LoadNullCheckedLocal(uint32_t register_index, uint32_t dex_pc); @@ -314,7 +323,7 @@ class HInstructionBuilder : public ValueObject { ArenaAllocator* const allocator_; HGraph* const graph_; - VariableSizedHandleScope* handles_; + VariableSizedHandleScope* const handles_; // The dex file where the method being compiled is, and the bytecode data. const DexFile* const dex_file_; @@ -323,18 +332,8 @@ class HInstructionBuilder : public ValueObject { // The return type of the method being compiled. const DataType::Type return_type_; - HBasicBlockBuilder* block_builder_; - SsaBuilder* ssa_builder_; - - ArenaVector<ArenaVector<HInstruction*>> locals_for_; - HBasicBlock* current_block_; - ArenaVector<HInstruction*>* current_locals_; - HInstruction* latest_result_; - // Current "this" parameter. - // Valid only after InitializeParameters() finishes. - // * Null for static methods. - // * Non-null for instance methods. - HParameterValue* current_this_parameter_; + HBasicBlockBuilder* const block_builder_; + SsaBuilder* const ssa_builder_; CompilerDriver* const compiler_driver_; @@ -352,10 +351,20 @@ class HInstructionBuilder : public ValueObject { // Original values kept after instruction quickening. QuickenInfoTable quicken_info_; - OptimizingCompilerStats* compilation_stats_; - Handle<mirror::DexCache> dex_cache_; + OptimizingCompilerStats* const compilation_stats_; + + ScopedArenaAllocator* const local_allocator_; + ScopedArenaVector<ScopedArenaVector<HInstruction*>> locals_for_; + HBasicBlock* current_block_; + ScopedArenaVector<HInstruction*>* current_locals_; + HInstruction* latest_result_; + // Current "this" parameter. + // Valid only after InitializeParameters() finishes. + // * Null for static methods. + // * Non-null for instance methods. + HParameterValue* current_this_parameter_; - ArenaVector<HBasicBlock*> loop_headers_; + ScopedArenaVector<HBasicBlock*> loop_headers_; static constexpr int kDefaultNumberOfLoops = 2; diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index f39acab3d7..afe748458e 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -1284,9 +1284,9 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) { DCHECK(input_other->IsShr()); // For UShr, we would have taken the branch above. // Replace SHR+AND with USHR, for example "(x >> 24) & 0xff" -> "x >>> 24". HUShr* ushr = new (GetGraph()->GetAllocator()) HUShr(instruction->GetType(), - input_other->InputAt(0), - input_other->InputAt(1), - input_other->GetDexPc()); + input_other->InputAt(0), + input_other->InputAt(1), + input_other->GetDexPc()); instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, ushr); input_other->GetBlock()->RemoveInstruction(input_other); RecordSimplification(); diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h index 3533c88c67..033a644f34 100644 --- a/compiler/optimizing/intrinsics_arm64.h +++ b/compiler/optimizing/intrinsics_arm64.h @@ -57,8 +57,8 @@ class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor { bool TryDispatch(HInvoke* invoke); private: - ArenaAllocator* allocator_; - CodeGeneratorARM64* codegen_; + ArenaAllocator* const allocator_; + CodeGeneratorARM64* const codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64); }; @@ -81,7 +81,7 @@ class IntrinsicCodeGeneratorARM64 FINAL : public IntrinsicVisitor { ArenaAllocator* GetAllocator(); - CodeGeneratorARM64* codegen_; + CodeGeneratorARM64* const codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARM64); }; diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h index 4f18ca3fc1..9c02d0a4ad 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.h +++ b/compiler/optimizing/intrinsics_arm_vixl.h @@ -46,9 +46,9 @@ class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor { bool TryDispatch(HInvoke* invoke); private: - ArenaAllocator* allocator_; - CodeGenerator* codegen_; - ArmVIXLAssembler* assembler_; + ArenaAllocator* const allocator_; + CodeGenerator* const codegen_; + ArmVIXLAssembler* const assembler_; const ArmInstructionSetFeatures& features_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARMVIXL); @@ -71,7 +71,7 @@ class IntrinsicCodeGeneratorARMVIXL FINAL : public IntrinsicVisitor { ArenaAllocator* GetAllocator(); ArmVIXLAssembler* GetAssembler(); - CodeGeneratorARMVIXL* codegen_; + CodeGeneratorARMVIXL* const codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARMVIXL); }; diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h index afd9548a60..13397f11d4 100644 --- a/compiler/optimizing/intrinsics_mips.h +++ b/compiler/optimizing/intrinsics_mips.h @@ -49,8 +49,8 @@ class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor { bool TryDispatch(HInvoke* invoke); private: - CodeGeneratorMIPS* codegen_; - ArenaAllocator* allocator_; + CodeGeneratorMIPS* const codegen_; + ArenaAllocator* const allocator_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS); }; @@ -77,7 +77,7 @@ class IntrinsicCodeGeneratorMIPS FINAL : public IntrinsicVisitor { ArenaAllocator* GetAllocator(); - CodeGeneratorMIPS* codegen_; + CodeGeneratorMIPS* const codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS); }; diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h index 6085c7b29c..6f40d90ddb 100644 --- a/compiler/optimizing/intrinsics_mips64.h +++ b/compiler/optimizing/intrinsics_mips64.h @@ -49,8 +49,8 @@ class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor { bool TryDispatch(HInvoke* invoke); private: - CodeGeneratorMIPS64* codegen_; - ArenaAllocator* allocator_; + CodeGeneratorMIPS64* const codegen_; + ArenaAllocator* const allocator_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64); }; @@ -73,7 +73,7 @@ class IntrinsicCodeGeneratorMIPS64 FINAL : public IntrinsicVisitor { ArenaAllocator* GetAllocator(); - CodeGeneratorMIPS64* codegen_; + CodeGeneratorMIPS64* const codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS64); }; diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h index ba3ca0a410..e3555e78fc 100644 --- a/compiler/optimizing/intrinsics_x86.h +++ b/compiler/optimizing/intrinsics_x86.h @@ -49,8 +49,8 @@ class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor { bool TryDispatch(HInvoke* invoke); private: - ArenaAllocator* allocator_; - CodeGeneratorX86* codegen_; + ArenaAllocator* const allocator_; + CodeGeneratorX86* const codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86); }; @@ -73,7 +73,7 @@ class IntrinsicCodeGeneratorX86 FINAL : public IntrinsicVisitor { ArenaAllocator* GetAllocator(); - CodeGeneratorX86* codegen_; + CodeGeneratorX86* const codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86); }; diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h index b0fbe91a75..5cb601edfe 100644 --- a/compiler/optimizing/intrinsics_x86_64.h +++ b/compiler/optimizing/intrinsics_x86_64.h @@ -49,8 +49,8 @@ class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor { bool TryDispatch(HInvoke* invoke); private: - ArenaAllocator* allocator_; - CodeGeneratorX86_64* codegen_; + ArenaAllocator* const allocator_; + CodeGeneratorX86_64* const codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64); }; @@ -73,7 +73,7 @@ class IntrinsicCodeGeneratorX86_64 FINAL : public IntrinsicVisitor { ArenaAllocator* GetAllocator(); - CodeGeneratorX86_64* codegen_; + CodeGeneratorX86_64* const codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86_64); }; diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 1a537ca47e..f4f6434678 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -55,14 +55,18 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) { // "visited" must be empty on entry, it's an output argument for all visited (i.e. live) blocks. DCHECK_EQ(visited->GetHighestBitSet(), -1); + // Allocate memory from local ScopedArenaAllocator. + ScopedArenaAllocator allocator(GetArenaStack()); // Nodes that we're currently visiting, indexed by block id. - ArenaBitVector visiting(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder); + ArenaBitVector visiting( + &allocator, blocks_.size(), /* expandable */ false, kArenaAllocGraphBuilder); + visiting.ClearAllBits(); // Number of successors visited from a given node, indexed by block id. - ArenaVector<size_t> successors_visited(blocks_.size(), - 0u, - allocator_->Adapter(kArenaAllocGraphBuilder)); + ScopedArenaVector<size_t> successors_visited(blocks_.size(), + 0u, + allocator.Adapter(kArenaAllocGraphBuilder)); // Stack of nodes that we're currently visiting (same as marked in "visiting" above). - ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder)); + ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder)); constexpr size_t kDefaultWorklistSize = 8; worklist.reserve(kDefaultWorklistSize); visited->SetBit(entry_block_->GetBlockId()); @@ -173,7 +177,11 @@ void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) { } GraphAnalysisResult HGraph::BuildDominatorTree() { - ArenaBitVector visited(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder); + // Allocate memory from local ScopedArenaAllocator. + ScopedArenaAllocator allocator(GetArenaStack()); + + ArenaBitVector visited(&allocator, blocks_.size(), false, kArenaAllocGraphBuilder); + visited.ClearAllBits(); // (1) Find the back edges in the graph doing a DFS traversal. FindBackEdges(&visited); @@ -258,14 +266,16 @@ void HGraph::ComputeDominanceInformation() { reverse_post_order_.reserve(blocks_.size()); reverse_post_order_.push_back(entry_block_); + // Allocate memory from local ScopedArenaAllocator. + ScopedArenaAllocator allocator(GetArenaStack()); // Number of visits of a given node, indexed by block id. - ArenaVector<size_t> visits(blocks_.size(), 0u, allocator_->Adapter(kArenaAllocGraphBuilder)); + ScopedArenaVector<size_t> visits(blocks_.size(), 0u, allocator.Adapter(kArenaAllocGraphBuilder)); // Number of successors visited from a given node, indexed by block id. - ArenaVector<size_t> successors_visited(blocks_.size(), - 0u, - allocator_->Adapter(kArenaAllocGraphBuilder)); + ScopedArenaVector<size_t> successors_visited(blocks_.size(), + 0u, + allocator.Adapter(kArenaAllocGraphBuilder)); // Nodes for which we need to visit successors. - ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder)); + ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder)); constexpr size_t kDefaultWorklistSize = 8; worklist.reserve(kDefaultWorklistSize); worklist.push_back(entry_block_); @@ -710,10 +720,13 @@ void HLoopInformation::Populate() { bool is_irreducible_loop = HasBackEdgeNotDominatedByHeader(); if (is_irreducible_loop) { - ArenaBitVector visited(graph->GetAllocator(), + // Allocate memory from local ScopedArenaAllocator. + ScopedArenaAllocator allocator(graph->GetArenaStack()); + ArenaBitVector visited(&allocator, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGraphBuilder); + visited.ClearAllBits(); // Stop marking blocks at the loop header. visited.SetBit(header_->GetBlockId()); @@ -942,7 +955,7 @@ void HBasicBlock::RemoveInstructionOrPhi(HInstruction* instruction, bool ensure_ } } -void HEnvironment::CopyFrom(const ArenaVector<HInstruction*>& locals) { +void HEnvironment::CopyFrom(ArrayRef<HInstruction* const> locals) { for (size_t i = 0; i < locals.size(); i++) { HInstruction* instruction = locals[i]; SetRawEnvAt(i, instruction); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 99fde755da..75cdb3ee5e 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -1839,7 +1839,7 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> { } } - void CopyFrom(const ArenaVector<HInstruction*>& locals); + void CopyFrom(ArrayRef<HInstruction* const> locals); void CopyFrom(HEnvironment* environment); // Copy from `env`. If it's a loop phi for `loop_header`, copy the first diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc index b2180d9f98..9bfd250ea4 100644 --- a/compiler/optimizing/nodes_test.cc +++ b/compiler/optimizing/nodes_test.cc @@ -129,10 +129,9 @@ TEST_F(NodeTest, ParentEnvironment) { HEnvironment* environment = new (GetAllocator()) HEnvironment( GetAllocator(), 1, graph->GetArtMethod(), 0, with_environment); - ArenaVector<HInstruction*> array(GetAllocator()->Adapter()); - array.push_back(parameter1); + HInstruction* const array[] = { parameter1 }; - environment->CopyFrom(array); + environment->CopyFrom(ArrayRef<HInstruction* const>(array)); with_environment->SetRawEnvironment(environment); ASSERT_TRUE(parameter1->HasEnvironmentUses()); @@ -140,13 +139,13 @@ TEST_F(NodeTest, ParentEnvironment) { HEnvironment* parent1 = new (GetAllocator()) HEnvironment( GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr); - parent1->CopyFrom(array); + parent1->CopyFrom(ArrayRef<HInstruction* const>(array)); ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u); HEnvironment* parent2 = new (GetAllocator()) HEnvironment( GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr); - parent2->CopyFrom(array); + parent2->CopyFrom(ArrayRef<HInstruction* const>(array)); parent1->SetAndCopyParentChain(GetAllocator(), parent2); // One use for parent2, and one other use for the new parent of parent1. diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 9bfb7a5c50..42f32b7866 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1146,7 +1146,8 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, if (total_allocated > kArenaAllocatorMemoryReportThreshold) { MemStats mem_stats(allocator.GetMemStats()); MemStats peak_stats(arena_stack.GetPeakStats()); - LOG(INFO) << dex_file.PrettyMethod(method_idx) + LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling " + << dex_file.PrettyMethod(method_idx) << "\n" << Dumpable<MemStats>(mem_stats) << "\n" << Dumpable<MemStats>(peak_stats); } @@ -1256,7 +1257,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, if (total_allocated > kArenaAllocatorMemoryReportThreshold) { MemStats mem_stats(allocator.GetMemStats()); MemStats peak_stats(arena_stack.GetPeakStats()); - LOG(INFO) << dex_file->PrettyMethod(method_idx) + LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling " + << dex_file->PrettyMethod(method_idx) << "\n" << Dumpable<MemStats>(mem_stats) << "\n" << Dumpable<MemStats>(peak_stats); } diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index 5632f9a453..9aba91205c 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -22,7 +22,9 @@ #include "common_compiler_test.h" #include "dex_file.h" #include "dex_instruction.h" -#include "handle_scope.h" +#include "handle_scope-inl.h" +#include "mirror/class_loader.h" +#include "mirror/dex_cache.h" #include "nodes.h" #include "scoped_thread_state_change.h" #include "ssa_builder.h" @@ -123,8 +125,7 @@ class OptimizingUnitTest : public CommonCompilerTest { // Create a control-flow graph from Dex instructions. HGraph* CreateCFG(const uint16_t* data, DataType::Type return_type = DataType::Type::kInt32) { - const DexFile::CodeItem* item = - reinterpret_cast<const DexFile::CodeItem*>(data); + const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(data); HGraph* graph = CreateGraph(); { @@ -132,7 +133,19 @@ class OptimizingUnitTest : public CommonCompilerTest { if (handles_ == nullptr) { handles_.reset(new VariableSizedHandleScope(soa.Self())); } - HGraphBuilder builder(graph, *item, handles_.get(), return_type); + const DexFile* dex_file = graph->GetAllocator()->Alloc<DexFile>(); + const DexCompilationUnit* dex_compilation_unit = + new (graph->GetAllocator()) DexCompilationUnit( + handles_->NewHandle<mirror::ClassLoader>(nullptr), + /* class_linker */ nullptr, + *dex_file, + code_item, + /* class_def_index */ DexFile::kDexNoIndex16, + /* method_idx */ dex::kDexNoIndex, + /* access_flags */ 0u, + /* verified_method */ nullptr, + handles_->NewHandle<mirror::DexCache>(nullptr)); + HGraphBuilder builder(graph, dex_compilation_unit, *code_item, handles_.get(), return_type); bool graph_built = (builder.BuildGraph() == kAnalysisSuccess); return graph_built ? graph : nullptr; } diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h index eaeec3b261..55a8a381b8 100644 --- a/compiler/optimizing/register_allocator.h +++ b/compiler/optimizing/register_allocator.h @@ -87,7 +87,7 @@ class RegisterAllocator : public DeletableArenaObject<kArenaAllocRegisterAllocat // to find an optimal split position. LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to); - ScopedArenaAllocator* allocator_; + ScopedArenaAllocator* const allocator_; CodeGenerator* const codegen_; const SsaLivenessAnalysis& liveness_; }; diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc index c673d54458..57eb7623b1 100644 --- a/compiler/optimizing/scheduler.cc +++ b/compiler/optimizing/scheduler.cc @@ -781,7 +781,7 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks, #if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm) // Phase-local allocator that allocates scheduler internal data structures like // scheduling nodes, internel nodes map, dependencies, etc. - ScopedArenaAllocator arena_allocator(graph_->GetArenaStack()); + ScopedArenaAllocator allocator(graph_->GetArenaStack()); CriticalPathSchedulingNodeSelector critical_path_selector; RandomSchedulingNodeSelector random_selector; SchedulingNodeSelector* selector = schedule_randomly @@ -797,7 +797,7 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks, switch (instruction_set_) { #ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: { - arm64::HSchedulerARM64 scheduler(&arena_allocator, selector); + arm64::HSchedulerARM64 scheduler(&allocator, selector); scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks); scheduler.Schedule(graph_); break; @@ -807,7 +807,7 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks, case kThumb2: case kArm: { arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_); - arm::HSchedulerARM scheduler(&arena_allocator, selector, &arm_latency_visitor); + arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor); scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks); scheduler.Schedule(graph_); break; diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h index 3efd26af9b..afdf6f1474 100644 --- a/compiler/optimizing/scheduler.h +++ b/compiler/optimizing/scheduler.h @@ -253,14 +253,14 @@ class SchedulingGraph : public ValueObject { public: SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator) : scheduler_(scheduler), - arena_(allocator), + allocator_(allocator), contains_scheduling_barrier_(false), - nodes_map_(arena_->Adapter(kArenaAllocScheduler)), + nodes_map_(allocator_->Adapter(kArenaAllocScheduler)), heap_location_collector_(nullptr) {} SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) { std::unique_ptr<SchedulingNode> node( - new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier)); + new (allocator_) SchedulingNode(instr, allocator_, is_scheduling_barrier)); SchedulingNode* result = node.get(); nodes_map_.Insert(std::make_pair(instr, std::move(node))); contains_scheduling_barrier_ |= is_scheduling_barrier; @@ -323,7 +323,7 @@ class SchedulingGraph : public ValueObject { const HScheduler* const scheduler_; - ScopedArenaAllocator* const arena_; + ScopedArenaAllocator* const allocator_; bool contains_scheduling_barrier_; diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index f4a8a17131..e4edbfdc24 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -105,7 +105,7 @@ void SsaBuilder::FixEnvironmentPhis() { } static void AddDependentInstructionsToWorklist(HInstruction* instruction, - ArenaVector<HPhi*>* worklist) { + ScopedArenaVector<HPhi*>* worklist) { // If `instruction` is a dead phi, type conflict was just identified. All its // live phi users, and transitively users of those users, therefore need to be // marked dead/conflicting too, so we add them to the worklist. Otherwise we @@ -167,7 +167,7 @@ static bool TypePhiFromInputs(HPhi* phi) { } // Replace inputs of `phi` to match its type. Return false if conflict is identified. -bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist) { +bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ScopedArenaVector<HPhi*>* worklist) { DataType::Type common_type = phi->GetType(); if (DataType::IsIntegralType(common_type)) { // We do not need to retype ambiguous inputs because they are always constructed @@ -213,7 +213,7 @@ bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist) { // Attempt to set the primitive type of `phi` to match its inputs. Return whether // it was changed by the algorithm or not. -bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist) { +bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ScopedArenaVector<HPhi*>* worklist) { DCHECK(phi->IsLive()); DataType::Type original_type = phi->GetType(); @@ -233,7 +233,7 @@ bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist) { } void SsaBuilder::RunPrimitiveTypePropagation() { - ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder)); + ScopedArenaVector<HPhi*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder)); for (HBasicBlock* block : graph_->GetReversePostOrder()) { if (block->IsLoopHeader()) { @@ -262,7 +262,7 @@ void SsaBuilder::RunPrimitiveTypePropagation() { EquivalentPhisCleanup(); } -void SsaBuilder::ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* worklist) { +void SsaBuilder::ProcessPrimitiveTypePropagationWorklist(ScopedArenaVector<HPhi*>* worklist) { // Process worklist while (!worklist->empty()) { HPhi* phi = worklist->back(); @@ -319,7 +319,7 @@ bool SsaBuilder::FixAmbiguousArrayOps() { // uses (because they are untyped) and environment uses (if --debuggable). // After resolving all ambiguous ArrayGets, we will re-run primitive type // propagation on the Phis which need to be updated. - ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder)); + ScopedArenaVector<HPhi*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder)); { ScopedObjectAccess soa(Thread::Current()); @@ -623,8 +623,7 @@ HPhi* SsaBuilder::GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, DataType:: || (next->GetType() != type)) { ArenaAllocator* allocator = graph_->GetAllocator(); HInputsRef inputs = phi->GetInputs(); - HPhi* new_phi = - new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type); + HPhi* new_phi = new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type); // Copy the inputs. Note that the graph may not be correctly typed // by doing this copy, but the type propagation phase will fix it. ArrayRef<HUserRecord<HInstruction*>> new_input_records = new_phi->GetInputRecords(); diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h index 509cdc1252..60831a9e6a 100644 --- a/compiler/optimizing/ssa_builder.h +++ b/compiler/optimizing/ssa_builder.h @@ -17,7 +17,8 @@ #ifndef ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_ #define ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_ -#include "base/arena_containers.h" +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" #include "nodes.h" #include "optimization.h" @@ -50,15 +51,17 @@ class SsaBuilder : public ValueObject { SsaBuilder(HGraph* graph, Handle<mirror::ClassLoader> class_loader, Handle<mirror::DexCache> dex_cache, - VariableSizedHandleScope* handles) + VariableSizedHandleScope* handles, + ScopedArenaAllocator* local_allocator) : graph_(graph), class_loader_(class_loader), dex_cache_(dex_cache), handles_(handles), agets_fixed_(false), - ambiguous_agets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)), - ambiguous_asets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)), - uninitialized_strings_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)) { + local_allocator_(local_allocator), + ambiguous_agets_(local_allocator->Adapter(kArenaAllocGraphBuilder)), + ambiguous_asets_(local_allocator->Adapter(kArenaAllocGraphBuilder)), + uninitialized_strings_(local_allocator->Adapter(kArenaAllocGraphBuilder)) { graph_->InitializeInexactObjectRTI(handles); } @@ -105,9 +108,9 @@ class SsaBuilder : public ValueObject { // input. Returns false if the type of an array is unknown. bool FixAmbiguousArrayOps(); - bool TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist); - bool UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist); - void ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* worklist); + bool TypeInputsOfPhi(HPhi* phi, ScopedArenaVector<HPhi*>* worklist); + bool UpdatePrimitiveType(HPhi* phi, ScopedArenaVector<HPhi*>* worklist); + void ProcessPrimitiveTypePropagationWorklist(ScopedArenaVector<HPhi*>* worklist); HFloatConstant* GetFloatEquivalent(HIntConstant* constant); HDoubleConstant* GetDoubleEquivalent(HLongConstant* constant); @@ -116,7 +119,7 @@ class SsaBuilder : public ValueObject { void RemoveRedundantUninitializedStrings(); - HGraph* graph_; + HGraph* const graph_; Handle<mirror::ClassLoader> class_loader_; Handle<mirror::DexCache> dex_cache_; VariableSizedHandleScope* const handles_; @@ -124,9 +127,10 @@ class SsaBuilder : public ValueObject { // True if types of ambiguous ArrayGets have been resolved. bool agets_fixed_; - ArenaVector<HArrayGet*> ambiguous_agets_; - ArenaVector<HArraySet*> ambiguous_asets_; - ArenaVector<HNewInstance*> uninitialized_strings_; + ScopedArenaAllocator* const local_allocator_; + ScopedArenaVector<HArrayGet*> ambiguous_agets_; + ScopedArenaVector<HArraySet*> ambiguous_asets_; + ScopedArenaVector<HNewInstance*> uninitialized_strings_; DISALLOW_COPY_AND_ASSIGN(SsaBuilder); }; diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index 9800af76f8..f83bb52b69 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -1292,7 +1292,7 @@ class SsaLivenessAnalysis : public ValueObject { // Use a local ScopedArenaAllocator for allocating memory. // This allocator must remain alive while doing register allocation. - ScopedArenaAllocator* allocator_; + ScopedArenaAllocator* const allocator_; ScopedArenaVector<BlockInfo*> block_infos_; diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc index 9b78e0ee6c..b9bfbaa173 100644 --- a/compiler/optimizing/ssa_liveness_analysis_test.cc +++ b/compiler/optimizing/ssa_liveness_analysis_test.cc @@ -95,8 +95,7 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) { graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32); HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference); - ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 }, - GetAllocator()->Adapter()); + HInstruction* const args[] = { array, index, value, extra_arg1, extra_arg2 }; for (HInstruction* insn : args) { entry_->AddInstruction(insn); } @@ -109,7 +108,7 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) { /* method */ nullptr, /* dex_pc */ 0u, null_check); - null_check_env->CopyFrom(args); + null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args)); null_check->SetRawEnvironment(null_check_env); HInstruction* length = new (GetAllocator()) HArrayLength(array, 0); block->AddInstruction(length); @@ -120,7 +119,7 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) { /* method */ nullptr, /* dex_pc */ 0u, bounds_check); - bounds_check_env->CopyFrom(args); + bounds_check_env->CopyFrom(ArrayRef<HInstruction* const>(args)); bounds_check->SetRawEnvironment(bounds_check_env); HInstruction* array_set = new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0); @@ -144,7 +143,7 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) { // Environment uses keep the reference argument alive. "ranges: { [10,19) }, uses: { }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", }; - ASSERT_EQ(arraysize(expected), args.size()); + static_assert(arraysize(expected) == arraysize(args), "Array size check."); size_t arg_index = 0u; for (HInstruction* arg : args) { std::ostringstream arg_dump; @@ -165,8 +164,7 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32); HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference); - ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 }, - GetAllocator()->Adapter()); + HInstruction* const args[] = { array, index, value, extra_arg1, extra_arg2 }; for (HInstruction* insn : args) { entry_->AddInstruction(insn); } @@ -179,7 +177,7 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { /* method */ nullptr, /* dex_pc */ 0u, null_check); - null_check_env->CopyFrom(args); + null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args)); null_check->SetRawEnvironment(null_check_env); HInstruction* length = new (GetAllocator()) HArrayLength(array, 0); block->AddInstruction(length); @@ -194,7 +192,7 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { /* method */ nullptr, /* dex_pc */ 0u, deoptimize); - deoptimize_env->CopyFrom(args); + deoptimize_env->CopyFrom(ArrayRef<HInstruction* const>(args)); deoptimize->SetRawEnvironment(deoptimize_env); HInstruction* array_set = new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0); @@ -217,7 +215,7 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { // Environment uses keep the reference argument alive. "ranges: { [10,21) }, uses: { }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", }; - ASSERT_EQ(arraysize(expected), args.size()); + static_assert(arraysize(expected) == arraysize(args), "Array size check."); size_t arg_index = 0u; for (HInstruction* arg : args) { std::ostringstream arg_dump; diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index a574566e33..62ed7ee0e5 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -223,7 +223,7 @@ class StackMapStream : public ValueObject { size_t dex_register_locations_index) const; void CheckCodeInfo(MemoryRegion region) const; - ArenaAllocator* allocator_; + ArenaAllocator* const allocator_; const InstructionSet instruction_set_; ArenaVector<StackMapEntry> stack_maps_; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index a842c6e452..96ac368ac3 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -47,10 +47,10 @@ using Kind = DexRegisterLocation::Kind; TEST(StackMapTest, Test1) { ArenaPool pool; - ArenaAllocator arena(&pool); - StackMapStream stream(&arena, kRuntimeISA); + ArenaAllocator allocator(&pool); + StackMapStream stream(&allocator, kRuntimeISA); - ArenaBitVector sp_mask(&arena, 0, false); + ArenaBitVector sp_mask(&allocator, 0, false); size_t number_of_dex_registers = 2; stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location. @@ -58,7 +58,7 @@ TEST(StackMapTest, Test1) { stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); - void* memory = arena.Alloc(size, kArenaAllocMisc); + void* memory = allocator.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); stream.FillInCodeInfo(region); @@ -128,11 +128,11 @@ TEST(StackMapTest, Test1) { TEST(StackMapTest, Test2) { ArenaPool pool; - ArenaAllocator arena(&pool); - StackMapStream stream(&arena, kRuntimeISA); + ArenaAllocator allocator(&pool); + StackMapStream stream(&allocator, kRuntimeISA); ArtMethod art_method; - ArenaBitVector sp_mask1(&arena, 0, true); + ArenaBitVector sp_mask1(&allocator, 0, true); sp_mask1.SetBit(2); sp_mask1.SetBit(4); size_t number_of_dex_registers = 2; @@ -146,7 +146,7 @@ TEST(StackMapTest, Test2) { stream.EndInlineInfoEntry(); stream.EndStackMapEntry(); - ArenaBitVector sp_mask2(&arena, 0, true); + ArenaBitVector sp_mask2(&allocator, 0, true); sp_mask2.SetBit(3); sp_mask2.SetBit(8); stream.BeginStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0); @@ -154,7 +154,7 @@ TEST(StackMapTest, Test2) { stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location. stream.EndStackMapEntry(); - ArenaBitVector sp_mask3(&arena, 0, true); + ArenaBitVector sp_mask3(&allocator, 0, true); sp_mask3.SetBit(1); sp_mask3.SetBit(5); stream.BeginStackMapEntry(2, 192, 0xAB, &sp_mask3, number_of_dex_registers, 0); @@ -162,7 +162,7 @@ TEST(StackMapTest, Test2) { stream.AddDexRegisterEntry(Kind::kInRegisterHigh, 8); // Short location. stream.EndStackMapEntry(); - ArenaBitVector sp_mask4(&arena, 0, true); + ArenaBitVector sp_mask4(&allocator, 0, true); sp_mask4.SetBit(6); sp_mask4.SetBit(7); stream.BeginStackMapEntry(3, 256, 0xCD, &sp_mask4, number_of_dex_registers, 0); @@ -171,7 +171,7 @@ TEST(StackMapTest, Test2) { stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); - void* memory = arena.Alloc(size, kArenaAllocMisc); + void* memory = allocator.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); stream.FillInCodeInfo(region); @@ -412,11 +412,11 @@ TEST(StackMapTest, Test2) { TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { ArenaPool pool; - ArenaAllocator arena(&pool); - StackMapStream stream(&arena, kRuntimeISA); + ArenaAllocator allocator(&pool); + StackMapStream stream(&allocator, kRuntimeISA); ArtMethod art_method; - ArenaBitVector sp_mask1(&arena, 0, true); + ArenaBitVector sp_mask1(&allocator, 0, true); sp_mask1.SetBit(2); sp_mask1.SetBit(4); const size_t number_of_dex_registers = 2; @@ -431,7 +431,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); - void* memory = arena.Alloc(size, kArenaAllocMisc); + void* memory = allocator.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); stream.FillInCodeInfo(region); @@ -506,10 +506,10 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { TEST(StackMapTest, TestNonLiveDexRegisters) { ArenaPool pool; - ArenaAllocator arena(&pool); - StackMapStream stream(&arena, kRuntimeISA); + ArenaAllocator allocator(&pool); + StackMapStream stream(&allocator, kRuntimeISA); - ArenaBitVector sp_mask(&arena, 0, false); + ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 2; stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kNone, 0); // No location. @@ -517,7 +517,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); - void* memory = arena.Alloc(size, kArenaAllocMisc); + void* memory = allocator.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); stream.FillInCodeInfo(region); @@ -585,10 +585,10 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { // not treat it as kNoDexRegisterMap. TEST(StackMapTest, DexRegisterMapOffsetOverflow) { ArenaPool pool; - ArenaAllocator arena(&pool); - StackMapStream stream(&arena, kRuntimeISA); + ArenaAllocator allocator(&pool); + StackMapStream stream(&allocator, kRuntimeISA); - ArenaBitVector sp_mask(&arena, 0, false); + ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 1024; // Create the first stack map (and its Dex register map). stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); @@ -609,7 +609,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) { stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); - void* memory = arena.Alloc(size, kArenaAllocMisc); + void* memory = allocator.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); stream.FillInCodeInfo(region); @@ -648,10 +648,10 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) { TEST(StackMapTest, TestShareDexRegisterMap) { ArenaPool pool; - ArenaAllocator arena(&pool); - StackMapStream stream(&arena, kRuntimeISA); + ArenaAllocator allocator(&pool); + StackMapStream stream(&allocator, kRuntimeISA); - ArenaBitVector sp_mask(&arena, 0, false); + ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 2; // First stack map. stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); @@ -670,7 +670,7 @@ TEST(StackMapTest, TestShareDexRegisterMap) { stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); - void* memory = arena.Alloc(size, kArenaAllocMisc); + void* memory = allocator.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); stream.FillInCodeInfo(region); @@ -706,10 +706,10 @@ TEST(StackMapTest, TestShareDexRegisterMap) { TEST(StackMapTest, TestNoDexRegisterMap) { ArenaPool pool; - ArenaAllocator arena(&pool); - StackMapStream stream(&arena, kRuntimeISA); + ArenaAllocator allocator(&pool); + StackMapStream stream(&allocator, kRuntimeISA); - ArenaBitVector sp_mask(&arena, 0, false); + ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 0; stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); stream.EndStackMapEntry(); @@ -719,7 +719,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) { stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); - void* memory = arena.Alloc(size, kArenaAllocMisc); + void* memory = allocator.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); stream.FillInCodeInfo(region); @@ -755,11 +755,11 @@ TEST(StackMapTest, TestNoDexRegisterMap) { TEST(StackMapTest, InlineTest) { ArenaPool pool; - ArenaAllocator arena(&pool); - StackMapStream stream(&arena, kRuntimeISA); + ArenaAllocator allocator(&pool); + StackMapStream stream(&allocator, kRuntimeISA); ArtMethod art_method; - ArenaBitVector sp_mask1(&arena, 0, true); + ArenaBitVector sp_mask1(&allocator, 0, true); sp_mask1.SetBit(2); sp_mask1.SetBit(4); @@ -821,7 +821,7 @@ TEST(StackMapTest, InlineTest) { stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); - void* memory = arena.Alloc(size, kArenaAllocMisc); + void* memory = allocator.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); stream.FillInCodeInfo(region); @@ -936,10 +936,10 @@ TEST(StackMapTest, CodeOffsetTest) { TEST(StackMapTest, TestDeduplicateStackMask) { ArenaPool pool; - ArenaAllocator arena(&pool); - StackMapStream stream(&arena, kRuntimeISA); + ArenaAllocator allocator(&pool); + StackMapStream stream(&allocator, kRuntimeISA); - ArenaBitVector sp_mask(&arena, 0, true); + ArenaBitVector sp_mask(&allocator, 0, true); sp_mask.SetBit(1); sp_mask.SetBit(4); stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0); @@ -948,7 +948,7 @@ TEST(StackMapTest, TestDeduplicateStackMask) { stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); - void* memory = arena.Alloc(size, kArenaAllocMisc); + void* memory = allocator.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); stream.FillInCodeInfo(region); @@ -964,10 +964,10 @@ TEST(StackMapTest, TestDeduplicateStackMask) { TEST(StackMapTest, TestInvokeInfo) { ArenaPool pool; - ArenaAllocator arena(&pool); - StackMapStream stream(&arena, kRuntimeISA); + ArenaAllocator allocator(&pool); + StackMapStream stream(&allocator, kRuntimeISA); - ArenaBitVector sp_mask(&arena, 0, true); + ArenaBitVector sp_mask(&allocator, 0, true); sp_mask.SetBit(1); stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0); stream.AddInvoke(kSuper, 1); @@ -980,11 +980,12 @@ TEST(StackMapTest, TestInvokeInfo) { stream.EndStackMapEntry(); const size_t code_info_size = stream.PrepareForFillIn(); - MemoryRegion code_info_region(arena.Alloc(code_info_size, kArenaAllocMisc), code_info_size); + MemoryRegion code_info_region(allocator.Alloc(code_info_size, kArenaAllocMisc), code_info_size); stream.FillInCodeInfo(code_info_region); const size_t method_info_size = stream.ComputeMethodInfoSize(); - MemoryRegion method_info_region(arena.Alloc(method_info_size, kArenaAllocMisc), method_info_size); + MemoryRegion method_info_region(allocator.Alloc(method_info_size, kArenaAllocMisc), + method_info_size); stream.FillInMethodInfo(method_info_region); CodeInfo code_info(code_info_region); |