diff options
author | 2023-04-06 16:13:44 +0100 | |
---|---|---|
committer | 2023-04-14 13:52:59 +0000 | |
commit | 81869cc06c19d5e549d369ff86d581efb87cf2b8 (patch) | |
tree | 2551a10e80f17b713e326a3f73f438ff4bbf6909 /compiler/optimizing/code_sinking.cc | |
parent | 77bbd1d06fc0d074eb53f88005c4c549351963ec (diff) |
Coalesce Return/ReturnVoid instructions
We do this to avoid generating the exit frame code several times.
We don't create new Return/ReturnVoid instructions after the
inliner, and CodeSinking looks like a logical place to add this
optimization.
Locally, speed-compiling in Pixel 5:
* system server: -516.05KB (1.1%)
* SystemUIGoogle: -216.50KB (0.8%)
* AGSA: -3021.91KB (0.93%)
Bug: 203237752
Bug: 188926997
Bug: 194133637
Test: art/test/testrunner/testrunner.py --host --64 --optimizing -b
Change-Id: Ib149382462efac82a0bbc114cbdbc773e1a5228c
Diffstat (limited to 'compiler/optimizing/code_sinking.cc')
-rw-r--r-- | compiler/optimizing/code_sinking.cc | 88 |
1 files changed, 85 insertions, 3 deletions
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc index ac1cefe7c5..d759a16f48 100644 --- a/compiler/optimizing/code_sinking.cc +++ b/compiler/optimizing/code_sinking.cc @@ -29,11 +29,19 @@ namespace art HIDDEN { bool CodeSinking::Run() { - HBasicBlock* exit = graph_->GetExitBlock(); - if (exit == nullptr) { + if (graph_->GetExitBlock() == nullptr) { // Infinite loop, just bail. return false; } + + UncommonBranchSinking(); + ReturnSinking(); + return true; +} + +void CodeSinking::UncommonBranchSinking() { + HBasicBlock* exit = graph_->GetExitBlock(); + DCHECK(exit != nullptr); // TODO(ngeoffray): we do not profile branches yet, so use throw instructions // as an indicator of an uncommon branch. for (HBasicBlock* exit_predecessor : exit->GetPredecessors()) { @@ -60,7 +68,6 @@ bool CodeSinking::Run() { SinkCodeToUncommonBranch(exit_predecessor); } } - return true; } static bool IsInterestingInstruction(HInstruction* instruction) { @@ -531,4 +538,79 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) { } } +void CodeSinking::ReturnSinking() { + HBasicBlock* exit = graph_->GetExitBlock(); + DCHECK(exit != nullptr); + + int number_of_returns = 0; + bool saw_return = false; + for (HBasicBlock* pred : exit->GetPredecessors()) { + // TODO(solanes): We might have Return/ReturnVoid->TryBoundary->Exit. We can theoretically + // handle them and move them out of the TryBoundary. However, it is a border case and it adds + // codebase complexity. + if (pred->GetLastInstruction()->IsReturn() || pred->GetLastInstruction()->IsReturnVoid()) { + saw_return |= pred->GetLastInstruction()->IsReturn(); + ++number_of_returns; + } + } + + if (number_of_returns < 2) { + // Nothing to do. + return; + } + + // `new_block` will coalesce the Return instructions into Phi+Return, or the ReturnVoid + // instructions into a ReturnVoid. + HBasicBlock* new_block = new (graph_->GetAllocator()) HBasicBlock(graph_, exit->GetDexPc()); + if (saw_return) { + HPhi* new_phi = nullptr; + for (size_t i = 0; i < exit->GetPredecessors().size(); /*++i in loop*/) { + HBasicBlock* pred = exit->GetPredecessors()[i]; + if (!pred->GetLastInstruction()->IsReturn()) { + ++i; + continue; + } + + HReturn* ret = pred->GetLastInstruction()->AsReturn(); + if (new_phi == nullptr) { + // Create the new_phi, if we haven't done so yet. We do it here since we need to know the + // type to assign to it. + new_phi = new (graph_->GetAllocator()) HPhi(graph_->GetAllocator(), + kNoRegNumber, + /*number_of_inputs=*/0, + ret->InputAt(0)->GetType()); + new_block->AddPhi(new_phi); + } + new_phi->AddInput(ret->InputAt(0)); + pred->ReplaceAndRemoveInstructionWith(ret, + new (graph_->GetAllocator()) HGoto(ret->GetDexPc())); + pred->ReplaceSuccessor(exit, new_block); + // Since we are removing a predecessor, there's no need to increment `i`. + } + new_block->AddInstruction(new (graph_->GetAllocator()) HReturn(new_phi, exit->GetDexPc())); + } else { + for (size_t i = 0; i < exit->GetPredecessors().size(); /*++i in loop*/) { + HBasicBlock* pred = exit->GetPredecessors()[i]; + if (!pred->GetLastInstruction()->IsReturnVoid()) { + ++i; + continue; + } + + HReturnVoid* ret = pred->GetLastInstruction()->AsReturnVoid(); + pred->ReplaceAndRemoveInstructionWith(ret, + new (graph_->GetAllocator()) HGoto(ret->GetDexPc())); + pred->ReplaceSuccessor(exit, new_block); + // Since we are removing a predecessor, there's no need to increment `i`. + } + new_block->AddInstruction(new (graph_->GetAllocator()) HReturnVoid(exit->GetDexPc())); + } + + new_block->AddSuccessor(exit); + graph_->AddBlock(new_block); + + // Recompute dominance since we added a new block. + graph_->ClearDominanceInformation(); + graph_->ComputeDominanceInformation(); +} + } // namespace art |