diff options
26 files changed, 153 insertions, 89 deletions
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc index f2c2e22d6a..7d647e5c3b 100644 --- a/compiler/dex/global_value_numbering_test.cc +++ b/compiler/dex/global_value_numbering_test.cc @@ -364,7 +364,7 @@ class GlobalValueNumberingTest : public testing::Test { allocator_(), gvn_(), value_names_(), - live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)) { + live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false)) { cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena)); cu_.access_flags = kAccStatic; // Don't let "this" interfere with this test. allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack)); diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc index 28c61a8fca..22fb835b70 100644 --- a/compiler/dex/gvn_dead_code_elimination_test.cc +++ b/compiler/dex/gvn_dead_code_elimination_test.cc @@ -473,7 +473,7 @@ class GvnDeadCodeEliminationTest : public testing::Test { gvn_(), dce_(), value_names_(), - live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)) { + live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false)) { cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena)); cu_.access_flags = kAccStatic; // Don't let "this" interfere with this test. allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack)); diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index a7ba061984..f1cc5fc4d2 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -989,11 +989,11 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) { if (bb->data_flow_info == nullptr) return false; use_v = bb->data_flow_info->use_v = - new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapUse); + new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false); def_v = bb->data_flow_info->def_v = - new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapDef); + new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false); live_in_v = bb->data_flow_info->live_in_v = - new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapLiveIn); + new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false); for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) { uint64_t df_attributes = GetDataFlowAttributes(mir); diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index b0972d98d4..6dc148dfdb 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -1809,7 +1809,7 @@ void MIRGraph::SSATransformationStart() { temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack)); temp_.ssa.num_vregs = GetNumOfCodeAndTempVRs(); temp_.ssa.work_live_vregs = new (temp_scoped_alloc_.get()) ArenaBitVector( - temp_scoped_alloc_.get(), temp_.ssa.num_vregs, false, kBitMapRegisterV); + temp_scoped_alloc_.get(), temp_.ssa.num_vregs, false); } void MIRGraph::SSATransformationEnd() { @@ -1869,7 +1869,7 @@ static BasicBlock* SelectTopologicalSortOrderFallBack( BasicBlock* fall_back = nullptr; size_t fall_back_num_reachable = 0u; // Reuse the same bit vector for each candidate to mark reachable unvisited blocks. - ArenaBitVector candidate_reachable(allocator, mir_graph->GetNumBlocks(), false, kBitMapMisc); + ArenaBitVector candidate_reachable(allocator, mir_graph->GetNumBlocks(), false); AllNodesIterator iter(mir_graph); for (BasicBlock* candidate = iter.Next(); candidate != nullptr; candidate = iter.Next()) { if (candidate->hidden || // Hidden, or @@ -1944,7 +1944,7 @@ void MIRGraph::ComputeTopologicalSortOrder() { ScopedArenaVector<size_t> visited_cnt_values(num_blocks, 0u, allocator.Adapter()); ScopedArenaVector<BasicBlockId> loop_head_stack(allocator.Adapter()); size_t max_nested_loops = 0u; - ArenaBitVector loop_exit_blocks(&allocator, num_blocks, false, kBitMapMisc); + ArenaBitVector loop_exit_blocks(&allocator, num_blocks, false); loop_exit_blocks.ClearAllBits(); // Count the number of blocks to process and add the entry block(s). @@ -2051,7 +2051,7 @@ void MIRGraph::ComputeTopologicalSortOrder() { } // Compute blocks from which the loop head is reachable and process those blocks first. ArenaBitVector* reachable = - new (&allocator) ArenaBitVector(&allocator, num_blocks, false, kBitMapMisc); + new (&allocator) ArenaBitVector(&allocator, num_blocks, false); loop_head_reachable_from[bb->id] = reachable; ComputeUnvisitedReachableFrom(this, bb->id, reachable, &tmp_stack); // Now mark as loop head. (Even if it's only a fall back when we don't find a true loop.) diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 6f9dd6d268..0e74a48aa1 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -927,7 +927,7 @@ bool MIRGraph::EliminateNullChecksGate() { temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack)); temp_.nce.num_vregs = GetNumOfCodeAndTempVRs(); temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector( - temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck); + temp_scoped_alloc_.get(), temp_.nce.num_vregs, false); temp_.nce.ending_vregs_to_check_matrix = temp_scoped_alloc_->AllocArray<ArenaBitVector*>(GetNumBlocks(), kArenaAllocMisc); std::fill_n(temp_.nce.ending_vregs_to_check_matrix, GetNumBlocks(), nullptr); @@ -1095,7 +1095,7 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) { temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check; // Create a new vregs_to_check for next BB. temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector( - temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck); + temp_scoped_alloc_.get(), temp_.nce.num_vregs, false); } else if (!vregs_to_check->SameBitsSet(old_ending_ssa_regs_to_check)) { nce_changed = true; temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check; @@ -1238,7 +1238,7 @@ bool MIRGraph::EliminateClassInitChecksGate() { // 2 bits for each class: is class initialized, is class in dex cache. temp_.cice.num_class_bits = 2u * unique_class_count; temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector( - temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck); + temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false); temp_.cice.ending_classes_to_check_matrix = temp_scoped_alloc_->AllocArray<ArenaBitVector*>(GetNumBlocks(), kArenaAllocMisc); std::fill_n(temp_.cice.ending_classes_to_check_matrix, GetNumBlocks(), nullptr); @@ -1335,7 +1335,7 @@ bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) { temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check; // Create a new classes_to_check for next BB. temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector( - temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck); + temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false); } else if (!classes_to_check->Equal(old_ending_classes_to_check)) { changed = true; temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check; @@ -1517,7 +1517,7 @@ void MIRGraph::InlineSpecialMethodsStart() { temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack)); temp_.smi.num_indexes = method_lowering_infos_.size(); temp_.smi.processed_indexes = new (temp_scoped_alloc_.get()) ArenaBitVector( - temp_scoped_alloc_.get(), temp_.smi.num_indexes, false, kBitMapMisc); + temp_scoped_alloc_.get(), temp_.smi.num_indexes, false); temp_.smi.processed_indexes->ClearAllBits(); temp_.smi.lowering_infos = temp_scoped_alloc_->AllocArray<uint16_t>(temp_.smi.num_indexes, kArenaAllocGrowableArray); diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc index 6ed666b9f7..6d5b3510b5 100644 --- a/compiler/dex/ssa_transformation.cc +++ b/compiler/dex/ssa_transformation.cc @@ -144,7 +144,7 @@ void MIRGraph::ComputeDefBlockMatrix() { /* Initialize num_register vectors with num_blocks bits each */ for (i = 0; i < num_registers; i++) { temp_.ssa.def_block_matrix[i] = new (temp_scoped_alloc_.get()) ArenaBitVector( - arena_, GetNumBlocks(), false, kBitMapBMatrix); + arena_, GetNumBlocks(), false); temp_.ssa.def_block_matrix[i]->ClearAllBits(); } @@ -248,12 +248,9 @@ void MIRGraph::InitializeDominationInfo(BasicBlock* bb) { int num_total_blocks = GetBasicBlockListCount(); if (bb->dominators == nullptr) { - bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks, - true /* expandable */, kBitMapDominators); - bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks, - true /* expandable */, kBitMapIDominated); - bb->dom_frontier = new (arena_) ArenaBitVector(arena_, num_total_blocks, - true /* expandable */, kBitMapDomFrontier); + bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks, true /* expandable */); + bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks, true /* expandable */); + bb->dom_frontier = new (arena_) ArenaBitVector(arena_, num_total_blocks, true /* expandable */); } else { bb->dominators->ClearAllBits(); bb->i_dominated->ClearAllBits(); @@ -471,7 +468,7 @@ void MIRGraph::FindPhiNodeBlocks() { } ArenaBitVector* phi_blocks = new (temp_scoped_alloc_.get()) ArenaBitVector( - temp_scoped_alloc_.get(), GetNumBlocks(), false, kBitMapBMatrix); + temp_scoped_alloc_.get(), GetNumBlocks(), false); // Reuse the def_block_matrix storage for phi_node_blocks. ArenaBitVector** def_block_matrix = temp_.ssa.def_block_matrix; diff --git a/compiler/dex/type_inference_test.cc b/compiler/dex/type_inference_test.cc index e2c0d32f97..ef5365106d 100644 --- a/compiler/dex/type_inference_test.cc +++ b/compiler/dex/type_inference_test.cc @@ -494,7 +494,7 @@ class TypeInferenceTest : public testing::Test { code_item_(nullptr), ssa_reps_(), allocator_(), - live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)), + live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false)), type_defs_(nullptr), type_count_(0u), ifield_defs_(nullptr), diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 57660c2623..124afbc73b 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -367,7 +367,8 @@ GraphAnalysisResult HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item ArenaBitVector* native_debug_info_locations; if (native_debuggable) { const uint32_t num_instructions = code_item.insns_size_in_code_units_; - native_debug_info_locations = new (arena_) ArenaBitVector (arena_, num_instructions, false); + native_debug_info_locations = + ArenaBitVector::Create(arena_, num_instructions, false, kArenaAllocGraphBuilder); FindNativeDebugInfoLocations(code_item, native_debug_info_locations); } diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index d64a786a9e..32869ec0b4 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -856,7 +856,8 @@ void CodeGenerator::RecordCatchBlockInfo() { uint32_t register_mask = 0; // Not used. // The stack mask is not used, so we leave it empty. - ArenaBitVector* stack_mask = new (arena) ArenaBitVector(arena, 0, /* expandable */ true); + ArenaBitVector* stack_mask = + ArenaBitVector::Create(arena, 0, /* expandable */ true, kArenaAllocCodeGenerator); stack_map_stream_.BeginStackMapEntry(dex_pc, native_pc, diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index e170e37bdd..d7bf16e0cc 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -97,7 +97,7 @@ void HDeadCodeElimination::RemoveDeadBlocks() { } // Classify blocks as reachable/unreachable. ArenaAllocator* allocator = graph_->GetArena(); - ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false); + ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE); MarkReachableBlocks(graph_, &live_blocks); bool removed_one_or_more_blocks = false; diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc index 11e3689a82..9491ef6119 100644 --- a/compiler/optimizing/graph_checker.cc +++ b/compiler/optimizing/graph_checker.cc @@ -812,7 +812,10 @@ void GraphChecker::VisitPhi(HPhi* phi) { phi->GetRegNumber(), type_str.str().c_str())); } else { - ArenaBitVector visited(GetGraph()->GetArena(), 0, /* expandable */ true); + ArenaBitVector visited(GetGraph()->GetArena(), + 0, + /* expandable */ true, + kArenaAllocGraphChecker); if (!IsConstantEquivalent(phi, other_phi, &visited)) { AddError(StringPrintf("Two phis (%d and %d) found for VReg %d but they " "are not equivalents of constants.", diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h index 52252cd3d4..8da8457859 100644 --- a/compiler/optimizing/graph_checker.h +++ b/compiler/optimizing/graph_checker.h @@ -30,7 +30,10 @@ class GraphChecker : public HGraphDelegateVisitor { : HGraphDelegateVisitor(graph), errors_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)), dump_prefix_(dump_prefix), - seen_ids_(graph->GetArena(), graph->GetCurrentInstructionId(), false) {} + seen_ids_(graph->GetArena(), + graph->GetCurrentInstructionId(), + false, + kArenaAllocGraphChecker) {} // Check the whole graph (in reverse post-order). void Run() { diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc index b4922789d4..f7eb2adc6c 100644 --- a/compiler/optimizing/gvn.cc +++ b/compiler/optimizing/gvn.cc @@ -40,7 +40,7 @@ class ValueSet : public ArenaObject<kArenaAllocGvn> { : allocator_(allocator), num_buckets_(kMinimumNumberOfBuckets), buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)), - buckets_owned_(allocator, num_buckets_, false), + buckets_owned_(allocator, num_buckets_, false, kArenaAllocGvn), num_entries_(0) { // ArenaAllocator returns zeroed memory, so no need to set buckets to null. DCHECK(IsPowerOfTwo(num_buckets_)); @@ -53,7 +53,7 @@ class ValueSet : public ArenaObject<kArenaAllocGvn> { : allocator_(allocator), num_buckets_(to_copy.IdealBucketCount()), buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)), - buckets_owned_(allocator, num_buckets_, false), + buckets_owned_(allocator, num_buckets_, false, kArenaAllocGvn), num_entries_(to_copy.num_entries_) { // ArenaAllocator returns zeroed memory, so entries of buckets_ and // buckets_owned_ are initialized to null and false, respectively. diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc index 33bb2e8f30..7a1e06b951 100644 --- a/compiler/optimizing/licm.cc +++ b/compiler/optimizing/licm.cc @@ -80,7 +80,7 @@ static void UpdateLoopPhisIn(HEnvironment* environment, HLoopInformation* info) void LICM::Run() { DCHECK(side_effects_.HasRun()); // Only used during debug. - ArenaBitVector visited(graph_->GetArena(), graph_->GetBlocks().size(), false); + ArenaBitVector visited(graph_->GetArena(), graph_->GetBlocks().size(), false, kArenaAllocLICM); // Post order visit to visit inner loops before outer loops. for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) { diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index 9601b066e5..e1977b1798 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -186,7 +186,10 @@ class HeapLocationCollector : public HGraphVisitor { : HGraphVisitor(graph), ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)), heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)), - aliasing_matrix_(graph->GetArena(), kInitialAliasingMatrixBitVectorSize, true), + aliasing_matrix_(graph->GetArena(), + kInitialAliasingMatrixBitVectorSize, + true, + kArenaAllocLSE), has_heap_stores_(false), has_volatile_(false), has_monitor_operations_(false), diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc index 1ab206f69e..83596da41a 100644 --- a/compiler/optimizing/locations.cc +++ b/compiler/optimizing/locations.cc @@ -37,7 +37,7 @@ LocationSummary::LocationSummary(HInstruction* instruction, if (NeedsSafepoint()) { ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetArena(); - stack_mask_ = new (arena) ArenaBitVector(arena, 0, true); + stack_mask_ = ArenaBitVector::Create(arena, 0, true, kArenaAllocLocationSummary); } } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 98766a31a6..c83340b1f6 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -54,7 +54,7 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) { DCHECK_EQ(visited->GetHighestBitSet(), -1); // Nodes that we're currently visiting, indexed by block id. - ArenaBitVector visiting(arena_, blocks_.size(), false); + ArenaBitVector visiting(arena_, blocks_.size(), false, kArenaAllocGraphBuilder); // Number of successors visited from a given node, indexed by block id. ArenaVector<size_t> successors_visited(blocks_.size(), 0u, arena_->Adapter()); // Stack of nodes that we're currently visiting (same as marked in "visiting" above). @@ -140,7 +140,7 @@ GraphAnalysisResult HGraph::BuildDominatorTree() { // collect both normal- and exceptional-flow values at the same time. SimplifyCatchBlocks(); - ArenaBitVector visited(arena_, blocks_.size(), false); + ArenaBitVector visited(arena_, blocks_.size(), false, kArenaAllocGraphBuilder); // (2) Find the back edges in the graph doing a DFS traversal. FindBackEdges(&visited); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 46377ee503..673631958a 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -645,7 +645,7 @@ class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> { irreducible_(false), back_edges_(graph->GetArena()->Adapter(kArenaAllocLoopInfoBackEdges)), // Make bit vector growable, as the number of blocks may change. - blocks_(graph->GetArena(), graph->GetBlocks().size(), true) { + blocks_(graph->GetArena(), graph->GetBlocks().size(), true, kArenaAllocLoopInfoBackEdges) { back_edges_.reserve(kDefaultNumberOfBackEdges); } diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index b8d76b912e..34d9af1e74 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -445,7 +445,7 @@ class AllRangesIterator : public ValueObject { bool RegisterAllocator::ValidateInternal(bool log_fatal_on_failure) const { // To simplify unit testing, we eagerly create the array of intervals, and // call the helper method. - ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocator)); + ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocatorValidate)); for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) { HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i); if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) { @@ -483,13 +483,21 @@ bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& inte ? codegen.GetNumberOfCoreRegisters() : codegen.GetNumberOfFloatingPointRegisters(); ArenaVector<ArenaBitVector*> liveness_of_values( - allocator->Adapter(kArenaAllocRegisterAllocator)); + allocator->Adapter(kArenaAllocRegisterAllocatorValidate)); liveness_of_values.reserve(number_of_registers + number_of_spill_slots); + size_t max_end = 0u; + for (LiveInterval* start_interval : intervals) { + for (AllRangesIterator it(start_interval); !it.Done(); it.Advance()) { + max_end = std::max(max_end, it.CurrentRange()->GetEnd()); + } + } + // Allocate a bit vector per register. A live interval that has a register // allocated will populate the associated bit vector based on its live ranges. for (size_t i = 0; i < number_of_registers + number_of_spill_slots; ++i) { - liveness_of_values.push_back(new (allocator) ArenaBitVector(allocator, 0, true)); + liveness_of_values.push_back( + ArenaBitVector::Create(allocator, max_end, false, kArenaAllocRegisterAllocatorValidate)); } for (LiveInterval* start_interval : intervals) { diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index a78aedcff5..97f2aeeb1e 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -31,9 +31,9 @@ class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> { public: BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values) : block_(block), - live_in_(allocator, number_of_ssa_values, false), - live_out_(allocator, number_of_ssa_values, false), - kill_(allocator, number_of_ssa_values, false) { + live_in_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness), + live_out_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness), + kill_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness) { UNUSED(block_); live_in_.ClearAllBits(); live_out_.ClearAllBits(); diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index 54cbdf8b66..3f41e3594e 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -37,7 +37,7 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, current_entry_.same_dex_register_map_as_ = kNoSameDexMapFound; if (num_dex_registers != 0) { current_entry_.live_dex_registers_mask = - new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true); + ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream); } else { current_entry_.live_dex_registers_mask = nullptr; } @@ -111,7 +111,7 @@ void StackMapStream::BeginInlineInfoEntry(uint32_t method_index, current_inline_info_.dex_register_locations_start_index = dex_register_locations_.size(); if (num_dex_registers != 0) { current_inline_info_.live_dex_registers_mask = - new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true); + ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream); } else { current_inline_info_.live_dex_registers_mask = nullptr; } @@ -256,7 +256,7 @@ void StackMapStream::FillIn(MemoryRegion region) { // Ensure we reached the end of the Dex registers location_catalog. DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size()); - ArenaBitVector empty_bitmask(allocator_, 0, /* expandable */ false); + ArenaBitVector empty_bitmask(allocator_, 0, /* expandable */ false, kArenaAllocStackMapStream); uintptr_t next_dex_register_map_offset = 0; uintptr_t next_inline_info_offset = 0; for (size_t i = 0, e = stack_maps_.size(); i < e; ++i) { diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc index f871543862..753994392e 100644 --- a/runtime/base/arena_allocator.cc +++ b/runtime/base/arena_allocator.cc @@ -85,17 +85,20 @@ const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = { "GVN ", "InductionVar ", "BCE ", + "DCE ", + "LSE ", + "LICM ", "SsaLiveness ", "SsaPhiElim ", "RefTypeProp ", "PrimTypeProp ", "SideEffects ", "RegAllocator ", + "RegAllocVldt ", "StackMapStm ", "CodeGen ", "ParallelMove ", "GraphChecker ", - "LSE ", "Verifier ", }; diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h index 728f897229..f8f7396ed5 100644 --- a/runtime/base/arena_allocator.h +++ b/runtime/base/arena_allocator.h @@ -96,17 +96,20 @@ enum ArenaAllocKind { kArenaAllocGvn, kArenaAllocInductionVarAnalysis, kArenaAllocBoundsCheckElimination, + kArenaAllocDCE, + kArenaAllocLSE, + kArenaAllocLICM, kArenaAllocSsaLiveness, kArenaAllocSsaPhiElimination, kArenaAllocReferenceTypePropagation, kArenaAllocPrimitiveTypePropagation, kArenaAllocSideEffectsAnalysis, kArenaAllocRegisterAllocator, + kArenaAllocRegisterAllocatorValidate, kArenaAllocStackMapStream, kArenaAllocCodeGenerator, kArenaAllocParallelMoveResolver, kArenaAllocGraphChecker, - kArenaAllocLSE, kArenaAllocVerifier, kNumArenaAllocKinds }; @@ -356,6 +359,11 @@ class ArenaAllocator } template <typename T> + T* Alloc(ArenaAllocKind kind = kArenaAllocMisc) { + return AllocArray<T>(1, kind); + } + + template <typename T> T* AllocArray(size_t length, ArenaAllocKind kind = kArenaAllocMisc) { return static_cast<T*>(Alloc(length * sizeof(T), kind)); } diff --git a/runtime/base/arena_bit_vector.cc b/runtime/base/arena_bit_vector.cc index fbbfd84fcf..5f8f5d2275 100644 --- a/runtime/base/arena_bit_vector.cc +++ b/runtime/base/arena_bit_vector.cc @@ -21,36 +21,78 @@ namespace art { +template <bool kCount> +class ArenaBitVectorAllocatorKindImpl; + +template <> +class ArenaBitVectorAllocatorKindImpl<false> { + public: + // Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL. + explicit ArenaBitVectorAllocatorKindImpl(ArenaAllocKind kind ATTRIBUTE_UNUSED) {} + ArenaBitVectorAllocatorKindImpl(const ArenaBitVectorAllocatorKindImpl&) = default; + ArenaBitVectorAllocatorKindImpl& operator=(const ArenaBitVectorAllocatorKindImpl&) = default; + ArenaAllocKind Kind() { return kArenaAllocGrowableBitMap; } +}; + +template <bool kCount> +class ArenaBitVectorAllocatorKindImpl { + public: + explicit ArenaBitVectorAllocatorKindImpl(ArenaAllocKind kind) : kind_(kind) { } + ArenaBitVectorAllocatorKindImpl(const ArenaBitVectorAllocatorKindImpl&) = default; + ArenaBitVectorAllocatorKindImpl& operator=(const ArenaBitVectorAllocatorKindImpl&) = default; + ArenaAllocKind Kind() { return kind_; } + + private: + ArenaAllocKind kind_; +}; + +using ArenaBitVectorAllocatorKind = + ArenaBitVectorAllocatorKindImpl<kArenaAllocatorCountAllocations>; + template <typename ArenaAlloc> -class ArenaBitVectorAllocator FINAL : public Allocator, - public ArenaObject<kArenaAllocGrowableBitMap> { +class ArenaBitVectorAllocator FINAL : public Allocator, private ArenaBitVectorAllocatorKind { public: - explicit ArenaBitVectorAllocator(ArenaAlloc* arena) : arena_(arena) {} - ~ArenaBitVectorAllocator() {} + static ArenaBitVectorAllocator* Create(ArenaAlloc* arena, ArenaAllocKind kind) { + void* storage = arena->template Alloc<ArenaBitVectorAllocator>(kind); + return new (storage) ArenaBitVectorAllocator(arena, kind); + } + + ~ArenaBitVectorAllocator() { + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } virtual void* Alloc(size_t size) { - return arena_->Alloc(size, kArenaAllocGrowableBitMap); + return arena_->Alloc(size, this->Kind()); } virtual void Free(void*) {} // Nop. private: + ArenaBitVectorAllocator(ArenaAlloc* arena, ArenaAllocKind kind) + : ArenaBitVectorAllocatorKind(kind), arena_(arena) { } + ArenaAlloc* const arena_; + DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator); }; -ArenaBitVector::ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits, - bool expandable, OatBitMapKind kind) - : BitVector(start_bits, expandable, - new (arena) ArenaBitVectorAllocator<ArenaAllocator>(arena)), kind_(kind) { - UNUSED(kind_); +ArenaBitVector::ArenaBitVector(ArenaAllocator* arena, + unsigned int start_bits, + bool expandable, + ArenaAllocKind kind) + : BitVector(start_bits, + expandable, + ArenaBitVectorAllocator<ArenaAllocator>::Create(arena, kind)) { } -ArenaBitVector::ArenaBitVector(ScopedArenaAllocator* arena, unsigned int start_bits, - bool expandable, OatBitMapKind kind) - : BitVector(start_bits, expandable, - new (arena) ArenaBitVectorAllocator<ScopedArenaAllocator>(arena)), kind_(kind) { - UNUSED(kind_); +ArenaBitVector::ArenaBitVector(ScopedArenaAllocator* arena, + unsigned int start_bits, + bool expandable, + ArenaAllocKind kind) + : BitVector(start_bits, + expandable, + ArenaBitVectorAllocator<ScopedArenaAllocator>::Create(arena, kind)) { } } // namespace art diff --git a/runtime/base/arena_bit_vector.h b/runtime/base/arena_bit_vector.h index d6061662c2..d86d622d38 100644 --- a/runtime/base/arena_bit_vector.h +++ b/runtime/base/arena_bit_vector.h @@ -25,44 +25,34 @@ namespace art { class ArenaAllocator; class ScopedArenaAllocator; -// Type of growable bitmap for memory tuning. -enum OatBitMapKind { - kBitMapMisc = 0, - kBitMapUse, - kBitMapDef, - kBitMapLiveIn, - kBitMapBMatrix, - kBitMapDominators, - kBitMapIDominated, - kBitMapDomFrontier, - kBitMapRegisterV, - kBitMapTempSSARegisterV, - kBitMapNullCheck, - kBitMapClInitCheck, - kBitMapPredecessors, - kNumBitMapKinds -}; - -std::ostream& operator<<(std::ostream& os, const OatBitMapKind& kind); - /* * A BitVector implementation that uses Arena allocation. */ class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableBitMap> { public: - ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable, - OatBitMapKind kind = kBitMapMisc); - ArenaBitVector(ScopedArenaAllocator* arena, uint32_t start_bits, bool expandable, - OatBitMapKind kind = kBitMapMisc); + template <typename Allocator> + static ArenaBitVector* Create(Allocator* arena, + uint32_t start_bits, + bool expandable, + ArenaAllocKind kind = kArenaAllocGrowableBitMap) { + void* storage = arena->template Alloc<ArenaBitVector>(kind); + return new (storage) ArenaBitVector(arena, start_bits, expandable, kind); + } + + ArenaBitVector(ArenaAllocator* arena, + uint32_t start_bits, + bool expandable, + ArenaAllocKind kind = kArenaAllocGrowableBitMap); + ArenaBitVector(ScopedArenaAllocator* arena, + uint32_t start_bits, + bool expandable, + ArenaAllocKind kind = kArenaAllocGrowableBitMap); ~ArenaBitVector() {} private: - const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused. - DISALLOW_COPY_AND_ASSIGN(ArenaBitVector); }; - } // namespace art #endif // ART_RUNTIME_BASE_ARENA_BIT_VECTOR_H_ diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h index a87153bd77..55044b34e5 100644 --- a/runtime/base/scoped_arena_allocator.h +++ b/runtime/base/scoped_arena_allocator.h @@ -152,6 +152,11 @@ class ScopedArenaAllocator } template <typename T> + T* Alloc(ArenaAllocKind kind = kArenaAllocMisc) { + return AllocArray<T>(1, kind); + } + + template <typename T> T* AllocArray(size_t length, ArenaAllocKind kind = kArenaAllocMisc) { return static_cast<T*>(Alloc(length * sizeof(T), kind)); } |