Update domination chain and RPO manually in MaybeAddExtraGotoBlocks

There's no need to recompute the whole graph as we know what changed.

As a drive-by, we now don't return false for graphs with irreducible
loops so we can remove that restriction from the builder. However,
if a graph with irreducible loops hits this path it means that:
  A) it's being inlined
  B) Has irreducible loops

We don't inline graphs with irreducible loops, and after building
for inline we don't remove them either because constant folding
and instruction simplifier don't remove them, and DCE doesn't
run for graphs with irreducible loops. So, in terms of dex2oat's
outputs nothing should change.

Test: art/test/testrunner/testrunner.py --host --64 --optimizing -b
Change-Id: I8cbf1b5f0518bb5dd14ffd751100ea81f5478863
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index da791a7..897d8b7 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -113,36 +113,38 @@
   return !last_instruction->IsThrow();
 }
 
-bool HGraphBuilder::MaybeAddExtraGotoBlocks() {
-  if (graph_->GetExitBlock() == nullptr) {
-    return true;
+void HGraphBuilder::MaybeAddExtraGotoBlocks() {
+  HBasicBlock* exit = graph_->GetExitBlock();
+  if (exit == nullptr) {
+    return;
   }
 
-  bool added_block = false;
-  for (size_t pred = 0, size = graph_->GetExitBlock()->GetPredecessors().size(); pred < size;
-       ++pred) {
-    HBasicBlock* predecessor = graph_->GetExitBlock()->GetPredecessors()[pred];
+  for (size_t pred = 0, size = exit->GetPredecessors().size(); pred < size; ++pred) {
+    HBasicBlock* predecessor = exit->GetPredecessors()[pred];
     if (NeedsExtraGotoBlock(predecessor)) {
-      added_block = true;
-      graph_->SplitEdge(predecessor, graph_->GetExitBlock())
-          ->AddInstruction(new (graph_->GetAllocator()) HGoto(predecessor->GetDexPc()));
-    }
-  }
+      HBasicBlock* new_goto = graph_->SplitEdgeAndUpdateRPO(predecessor, exit);
+      new_goto->AddInstruction(new (graph_->GetAllocator()) HGoto(predecessor->GetDexPc()));
 
-  // TODO(solanes): Avoid recomputing the full dominator tree by manually updating the relevant
-  // information (loop information, dominance, try catch information).
-  if (added_block) {
-    if (graph_->HasIrreducibleLoops()) {
-      // Recomputing loop information in graphs with irreducible loops is unsupported, as it could
-      // lead to loop header changes. In this case it is safe to abort since we don't inline graphs
-      // with irreducible loops anyway.
-      return false;
+      // No need to update loop info of the new block.
+      DCHECK(!predecessor->IsInLoop())
+          << " we should only add the extra Goto blocks for Return/ReturnVoid->TryBoundary->Exit "
+          << "chains. In those chains, the TryBoundary of kind:exit should never be a part of a "
+          << "loop";
+
+      // Update domination chain
+      if (!predecessor->GetDominatedBlocks().empty()) {
+        DCHECK_EQ(predecessor->GetDominatedBlocks().size(), 1u);
+        DCHECK_EQ(predecessor->GetDominatedBlocks()[0], exit);
+        new_goto->AddDominatedBlock(exit);
+        predecessor->RemoveDominatedBlock(exit);
+        exit->SetDominator(new_goto);
+      }
+
+      DCHECK(predecessor->GetDominatedBlocks().empty());
+      predecessor->AddDominatedBlock(new_goto);
+      new_goto->SetDominator(predecessor);
     }
-    graph_->ClearLoopInformation();
-    graph_->ClearDominanceInformation();
-    graph_->BuildDominatorTree();
   }
-  return true;
 }
 
 GraphAnalysisResult HGraphBuilder::BuildGraph(bool build_for_inline) {
@@ -199,9 +201,7 @@
   // 5) When inlining, we want to add a Goto block if we have Return/ReturnVoid->TryBoundary->Exit
   // since we will have Return/ReturnVoid->TryBoundary->`continue to normal execution` once inlined.
   if (build_for_inline) {
-    if (!MaybeAddExtraGotoBlocks()) {
-      return kAnalysisFailInliningIrreducibleLoop;
-    }
+    MaybeAddExtraGotoBlocks();
   }
 
   // 6) Type the graph and eliminate dead/redundant phis.
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 85fa8d4..145dbfb 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -57,8 +57,7 @@
 
   // When inlining, we sometimes want to add an extra Goto block before the Exit block. This is done
   // in the building phase as we do not allow the inlining phase to add new instructions.
-  // Returns false if the graph we are adding the extra block has irreducible loops.
-  bool MaybeAddExtraGotoBlocks();
+  void MaybeAddExtraGotoBlocks();
 
   HGraph* const graph_;
   const DexFile* const dex_file_;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 841576a..4a0ec93 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -569,6 +569,15 @@
   }
 }
 
+HBasicBlock* HGraph::SplitEdgeAndUpdateRPO(HBasicBlock* block, HBasicBlock* successor) {
+  HBasicBlock* new_block = SplitEdge(block, successor);
+  // In the RPO we have {... , block, ... , successor}. We want to insert `new_block` right after
+  // `block` to have a consistent RPO without recomputing the whole graph's RPO.
+  reverse_post_order_.insert(
+      reverse_post_order_.begin() + IndexOfElement(reverse_post_order_, block) + 1, new_block);
+  return new_block;
+}
+
 // Reorder phi inputs to match reordering of the block's predecessors.
 static void FixPhisAfterPredecessorsReodering(HBasicBlock* block, size_t first, size_t second) {
   for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 006ccfb..591087b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -138,7 +138,6 @@
   kAnalysisInvalidBytecode,
   kAnalysisFailThrowCatchLoop,
   kAnalysisFailAmbiguousArrayOp,
-  kAnalysisFailInliningIrreducibleLoop,
   kAnalysisFailIrreducibleLoopAndStringInit,
   kAnalysisFailPhiEquivalentInOsr,
   kAnalysisSuccess,
@@ -515,6 +514,11 @@
   HBasicBlock* SplitEdge(HBasicBlock* block, HBasicBlock* successor);
 
   void SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor);
+
+  // Splits the edge between `block` and `successor` and then updates the graph's RPO to keep
+  // consistency without recomputing the whole graph.
+  HBasicBlock* SplitEdgeAndUpdateRPO(HBasicBlock* block, HBasicBlock* successor);
+
   void OrderLoopHeaderPredecessors(HBasicBlock* header);
 
   // Transform a loop into a format with a single preheader.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 7cb4b29..807c78e 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -877,11 +877,6 @@
                           MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
           break;
         }
-        case kAnalysisFailInliningIrreducibleLoop: {
-          MaybeRecordStat(compilation_stats_.get(),
-                          MethodCompilationStat::kNotCompiledInliningIrreducibleLoop);
-          break;
-        }
         case kAnalysisFailIrreducibleLoopAndStringInit: {
           MaybeRecordStat(compilation_stats_.get(),
                           MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);