diff options
author | 2024-02-05 16:13:28 +0000 | |
---|---|---|
committer | 2024-02-06 15:15:13 +0000 | |
commit | 1969e59e7eba03ac246171b88eea56ad36bfc9f0 (patch) | |
tree | c119bffd2a1c51d1d903260a50ef1cd1d0c3ada2 | |
parent | a9b2bf7f7261e2a1be244a182dbbadf6fd4c6d93 (diff) |
Improve data structure use in RemoveUnneededTries
1) The new/malloc were taking an important part of the runtime. By
using scope allocator on local compiles, the method is now ~33%
faster.
2) Use BitVectors instead of sets in TryBelongingInformation which
avoids tree rebalancing after set merging. This improves the
method a further ~20%, for a total of ~55% combined with 1).
Test: art/test/testrunner/testrunner.py --host --64 --optimizing -b
Change-Id: If9ad005d0d6f89656ba5aa0e37e3061af272ba07
-rw-r--r-- | compiler/optimizing/dead_code_elimination.cc | 48 |
1 files changed, 29 insertions, 19 deletions
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index 0935f57f82..6746771fa4 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -589,19 +589,24 @@ void HDeadCodeElimination::ConnectSuccessiveBlocks() { } struct HDeadCodeElimination::TryBelongingInformation { - explicit TryBelongingInformation(ScopedArenaAllocator* allocator) - : blocks_in_try(allocator->Adapter(kArenaAllocDCE)), - coalesced_try_entries(allocator->Adapter(kArenaAllocDCE)) {} + TryBelongingInformation(HGraph* graph, ScopedArenaAllocator* allocator) + : blocks_in_try(allocator, graph->GetBlocks().size(), /*expandable=*/false, kArenaAllocDCE), + coalesced_try_entries( + allocator, graph->GetBlocks().size(), /*expandable=*/false, kArenaAllocDCE) { + blocks_in_try.ClearAllBits(); + coalesced_try_entries.ClearAllBits(); + } // Which blocks belong in the try. - ScopedArenaSet<HBasicBlock*> blocks_in_try; + ArenaBitVector blocks_in_try; // Which other try entries are referencing this same try. - ScopedArenaSet<HBasicBlock*> coalesced_try_entries; + ArenaBitVector coalesced_try_entries; }; bool HDeadCodeElimination::CanPerformTryRemoval(const TryBelongingInformation& try_belonging_info) { - for (HBasicBlock* block : try_belonging_info.blocks_in_try) { - for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { + const ArenaVector<HBasicBlock*>& blocks = graph_->GetBlocks(); + for (uint32_t i : try_belonging_info.blocks_in_try.Indexes()) { + for (HInstructionIterator it(blocks[i]->GetInstructions()); !it.Done(); it.Advance()) { if (it.Current()->CanThrow()) { return false; } @@ -644,14 +649,17 @@ void HDeadCodeElimination::RemoveTry(HBasicBlock* try_entry, DCHECK(try_entry->GetLastInstruction()->AsTryBoundary()->IsEntry()); DisconnectHandlersAndUpdateTryBoundary(try_entry, any_block_in_loop); - for (HBasicBlock* other_try_entry : try_belonging_info.coalesced_try_entries) { + const ArenaVector<HBasicBlock*>& blocks = graph_->GetBlocks(); + for (uint32_t i : try_belonging_info.coalesced_try_entries.Indexes()) { + HBasicBlock* other_try_entry = blocks[i]; DCHECK(other_try_entry->EndsWithTryBoundary()); DCHECK(other_try_entry->GetLastInstruction()->AsTryBoundary()->IsEntry()); DisconnectHandlersAndUpdateTryBoundary(other_try_entry, any_block_in_loop); } // Update the blocks in the try. - for (HBasicBlock* block : try_belonging_info.blocks_in_try) { + for (uint32_t i : try_belonging_info.blocks_in_try.Indexes()) { + HBasicBlock* block = blocks[i]; // Update the try catch information since now the try doesn't exist. block->SetTryCatchInformation(nullptr); if (block->IsInLoop()) { @@ -694,33 +702,35 @@ bool HDeadCodeElimination::RemoveUnneededTries() { ScopedArenaAllocator allocator(graph_->GetArenaStack()); // Collect which blocks are part of which try. - std::unordered_map<HBasicBlock*, TryBelongingInformation> tries; + ScopedArenaUnorderedMap<HBasicBlock*, TryBelongingInformation> tries( + allocator.Adapter(kArenaAllocDCE)); for (HBasicBlock* block : graph_->GetReversePostOrderSkipEntryBlock()) { if (block->IsTryBlock()) { HBasicBlock* key = block->GetTryCatchInformation()->GetTryEntry().GetBlock(); auto it = tries.find(key); if (it == tries.end()) { - it = tries.insert({key, TryBelongingInformation(&allocator)}).first; + it = tries.insert({key, TryBelongingInformation(graph_, &allocator)}).first; } - it->second.blocks_in_try.insert(block); + it->second.blocks_in_try.SetBit(block->GetBlockId()); } } // Deduplicate the tries which have different try entries but they are really the same try. for (auto it = tries.begin(); it != tries.end(); it++) { - DCHECK(it->first->EndsWithTryBoundary()); - HTryBoundary* try_boundary = it->first->GetLastInstruction()->AsTryBoundary(); + HBasicBlock* block = it->first; + DCHECK(block->EndsWithTryBoundary()); + HTryBoundary* try_boundary = block->GetLastInstruction()->AsTryBoundary(); for (auto other_it = next(it); other_it != tries.end(); /*other_it++ in the loop*/) { - DCHECK(other_it->first->EndsWithTryBoundary()); - HTryBoundary* other_try_boundary = other_it->first->GetLastInstruction()->AsTryBoundary(); + HBasicBlock* other_block = other_it->first; + DCHECK(other_block->EndsWithTryBoundary()); + HTryBoundary* other_try_boundary = other_block->GetLastInstruction()->AsTryBoundary(); if (try_boundary->HasSameExceptionHandlersAs(*other_try_boundary)) { // Merge the entries as they are really the same one. // Block merging. - it->second.blocks_in_try.insert(other_it->second.blocks_in_try.begin(), - other_it->second.blocks_in_try.end()); + it->second.blocks_in_try.Union(&other_it->second.blocks_in_try); // Add the coalesced try entry to update it too. - it->second.coalesced_try_entries.insert(other_it->first); + it->second.coalesced_try_entries.SetBit(other_block->GetBlockId()); // Erase the other entry. other_it = tries.erase(other_it); |