Code sinking cleanups am: 5dd14cf9ab am: ceb1deed2a

Original change: https://android-review.googlesource.com/c/platform/art/+/2446502

Change-Id: I31df817cb00cbb05fbaa180b633070da2c740fe0
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index 76885ec..ac1cefe 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -19,6 +19,7 @@
 #include "base/arena_bit_vector.h"
 #include "base/array_ref.h"
 #include "base/bit_vector-inl.h"
+#include "base/globals.h"
 #include "base/logging.h"
 #include "base/scoped_arena_allocator.h"
 #include "base/scoped_arena_containers.h"
@@ -105,7 +106,7 @@
 
   // We can only store on local allocations. Other heap references can
   // be escaping. Note that allocations can escape too, but we only move
-  // allocations if their users can move to, or are in the list of
+  // allocations if their users can move too, or are in the list of
   // post dominated blocks.
   if (instruction->IsInstanceFieldSet()) {
     if (!instruction->InputAt(0)->IsNewInstance()) {
@@ -119,7 +120,7 @@
     }
   }
 
-  // Heap accesses cannot go pass instructions that have memory side effects, which
+  // Heap accesses cannot go past instructions that have memory side effects, which
   // we are not tracking here. Note that the load/store elimination optimization
   // runs before this optimization, and should have removed interesting ones.
   // In theory, we could handle loads of local allocations, but this is currently
@@ -236,48 +237,42 @@
     target_block = target_block->GetDominator();
     DCHECK(target_block != nullptr);
   }
-  const bool was_in_loop = target_block->IsInLoop();
 
-  // For throwing instructions we can move them into:
-  //   * Blocks that are not part of a try
-  //     * Catch blocks are suitable as well, as long as they are not part of an outer try.
-  //   * Blocks that are part of the same try that the instrucion was already in.
-  //
-  // We cannot move an instruction that can throw into a try that said instruction is not a part of
-  // already, as that would mean it will throw into a different catch block. If we detect that
-  // `target_block` is not a valid block to move `instruction` to, we traverse up the dominator tree
-  // to find if we have a suitable block.
-  while (instruction->CanThrow() && target_block->GetTryCatchInformation() != nullptr) {
-    if (target_block->IsCatchBlock()) {
-      // If the catch block has an xhandler, it means it is inside of an outer try.
-      const bool inside_of_another_try_catch = target_block->GetSuccessors().size() != 1;
-      if (!inside_of_another_try_catch) {
-        // If we have a catch block, it's okay to sink as long as that catch is not inside of
-        // another try catch.
-        break;
+  if (instruction->CanThrow()) {
+    // Consistency check: We shouldn't land in a loop if we weren't in one before traversing up the
+    // dominator tree regarding try catches.
+    const bool was_in_loop = target_block->IsInLoop();
+
+    // We cannot move an instruction that can throw into a try that said instruction is not a part
+    // of already, as that would mean it will throw into a different catch block. In short, for
+    // throwing instructions:
+    // * If the throwing instruction is part of a try, they should only be sunk into that same try.
+    // * If the throwing instruction is not part of any try, they shouldn't be sunk to any try.
+    if (instruction->GetBlock()->IsTryBlock()) {
+      const HTryBoundary& try_entry =
+          instruction->GetBlock()->GetTryCatchInformation()->GetTryEntry();
+      while (!(target_block->IsTryBlock() &&
+               try_entry.HasSameExceptionHandlersAs(
+                   target_block->GetTryCatchInformation()->GetTryEntry()))) {
+        target_block = target_block->GetDominator();
+        if (!post_dominated.IsBitSet(target_block->GetBlockId())) {
+          // We couldn't find a suitable block.
+          return nullptr;
+        }
       }
     } else {
-      DCHECK(target_block->IsTryBlock());
-      if (instruction->GetBlock()->IsTryBlock() &&
-          instruction->GetBlock()->GetTryCatchInformation()->GetTryEntry().GetId() ==
-              target_block->GetTryCatchInformation()->GetTryEntry().GetId()) {
-        // Sink within the same try block is allowed.
-        break;
+      // Search for the first block also not in a try block
+      while (target_block->IsTryBlock()) {
+        target_block = target_block->GetDominator();
+        if (!post_dominated.IsBitSet(target_block->GetBlockId())) {
+          // We couldn't find a suitable block.
+          return nullptr;
+        }
       }
     }
-    // We are now in the case where we would be moving to a different try. Since we don't want
-    // that, traverse up the dominator tree to find a suitable block.
-    if (!post_dominated.IsBitSet(target_block->GetDominator()->GetBlockId())) {
-      // We couldn't find a suitable block.
-      return nullptr;
-    }
-    target_block = target_block->GetDominator();
-    DCHECK(target_block != nullptr);
-  }
 
-  // We shouldn't land in a loop if we weren't in one before traversing up the dominator tree
-  // regarding try catches.
-  DCHECK_IMPLIES(target_block->IsInLoop(), was_in_loop);
+    DCHECK_IMPLIES(target_block->IsInLoop(), was_in_loop);
+  }
 
   // Find insertion position. No need to filter anymore, as we have found a
   // target block.
@@ -339,8 +334,8 @@
   ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
 
   // Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
-  // TODO(ngeoffray): Getting the full set of post-dominated shoud be done by
-  // computint the post dominator tree, but that could be too time consuming. Also,
+  // TODO(ngeoffray): Getting the full set of post-dominated should be done by
+  // computing the post dominator tree, but that could be too time consuming. Also,
   // we should start the analysis from blocks dominated by an uncommon branch, but we
   // don't profile branches yet.
   bool found_block = false;
@@ -350,45 +345,43 @@
       post_dominated.SetBit(block->GetBlockId());
     } else if (found_block) {
       bool is_post_dominated = true;
-      if (block->GetSuccessors().empty()) {
-        // We currently bail for loops.
-        is_post_dominated = false;
-      } else {
-        // BasicBlock that are try entries look like this:
-        //   BasicBlock i:
-        //     instr 1
-        //     ...
-        //     instr N
-        //     TryBoundary kind:entry ---Try begins here---
-        //
-        // Due to how our BasicBlocks are structured, BasicBlock i will have an xhandler successor
-        // since we are starting a try. If we use `GetSuccessors` for this case, we will check if
-        // the catch block is post_dominated.
-        //
-        // However, this catch block doesn't matter: when we sink the instruction into that
-        // BasicBlock i, we do it before the TryBoundary (i.e. outside of the try and outside the
-        // catch's domain). We can ignore catch blocks using `GetNormalSuccessors` to sink code
-        // right before the start of a try block.
-        //
-        // On the other side of the coin, BasicBlock that are try exits look like this:
-        //   BasicBlock j:
-        //     instr 1
-        //     ...
-        //     instr N
-        //     TryBoundary kind:exit ---Try ends here---
-        //
-        // If we sink to these basic blocks we would be sinking inside of the try so we would like
-        // to check the catch block for post dominance.
-        const bool ends_with_try_boundary_entry =
-            block->EndsWithTryBoundary() && block->GetLastInstruction()->AsTryBoundary()->IsEntry();
-        ArrayRef<HBasicBlock* const> successors =
-            ends_with_try_boundary_entry ? block->GetNormalSuccessors() :
-                                           ArrayRef<HBasicBlock* const>(block->GetSuccessors());
-        for (HBasicBlock* successor : successors) {
-          if (!post_dominated.IsBitSet(successor->GetBlockId())) {
-            is_post_dominated = false;
-            break;
-          }
+      DCHECK_NE(block, graph_->GetExitBlock())
+          << "We shouldn't encounter the exit block after `end_block`.";
+
+      // BasicBlock that are try entries look like this:
+      //   BasicBlock i:
+      //     instr 1
+      //     ...
+      //     instr N
+      //     TryBoundary kind:entry ---Try begins here---
+      //
+      // Due to how our BasicBlocks are structured, BasicBlock i will have an xhandler successor
+      // since we are starting a try. If we use `GetSuccessors` for this case, we will check if
+      // the catch block is post_dominated.
+      //
+      // However, this catch block doesn't matter: when we sink the instruction into that
+      // BasicBlock i, we do it before the TryBoundary (i.e. outside of the try and outside the
+      // catch's domain). We can ignore catch blocks using `GetNormalSuccessors` to sink code
+      // right before the start of a try block.
+      //
+      // On the other side of the coin, BasicBlock that are try exits look like this:
+      //   BasicBlock j:
+      //     instr 1
+      //     ...
+      //     instr N
+      //     TryBoundary kind:exit ---Try ends here---
+      //
+      // If we sink to these basic blocks we would be sinking inside of the try so we would like
+      // to check the catch block for post dominance.
+      const bool ends_with_try_boundary_entry =
+          block->EndsWithTryBoundary() && block->GetLastInstruction()->AsTryBoundary()->IsEntry();
+      ArrayRef<HBasicBlock* const> successors =
+          ends_with_try_boundary_entry ? block->GetNormalSuccessors() :
+                                         ArrayRef<HBasicBlock* const>(block->GetSuccessors());
+      for (HBasicBlock* successor : successors) {
+        if (!post_dominated.IsBitSet(successor->GetBlockId())) {
+          is_post_dominated = false;
+          break;
         }
       }
       if (is_post_dominated) {
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 8e6c64d..190b362 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -319,6 +319,12 @@
     }
   }
 
+  // Ensure all blocks have at least one successor, except the Exit block.
+  if (block->GetSuccessors().empty() && !block->IsExitBlock()) {
+    AddError(StringPrintf("Block %d has no successor and it is not the Exit block.",
+                          block->GetBlockId()));
+  }
+
   // Ensure there is no critical edge (i.e., an edge connecting a
   // block with multiple successors to a block with multiple
   // predecessors). Exceptional edges are synthesized and hence
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index ee8069a..17e7f1f 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -230,10 +230,9 @@
 
 TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) {
   CreateGraph();
-  HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
-  graph_->AddBlock(entry);
-  graph_->SetEntryBlock(entry);
-  graph_->BuildDominatorTree();
+  AdjacencyListGraph blks(
+      SetupFromAdjacencyList("entry", "exit", {{"entry", "body"}, {"body", "exit"}}));
+  HBasicBlock* body = blks.Get("body");
 
   HInstruction* array = new (GetAllocator()) HParameterValue(
       graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
@@ -263,23 +262,25 @@
   HInstruction* arr_set8 =
       new (GetAllocator()) HArraySet(array, sub_neg1, c0, DataType::Type::kInt32, 0);
 
-  entry->AddInstruction(array);
-  entry->AddInstruction(index);
-  entry->AddInstruction(add0);
-  entry->AddInstruction(add1);
-  entry->AddInstruction(sub0);
-  entry->AddInstruction(sub1);
-  entry->AddInstruction(sub_neg1);
-  entry->AddInstruction(rev_sub1);
+  body->AddInstruction(array);
+  body->AddInstruction(index);
+  body->AddInstruction(add0);
+  body->AddInstruction(add1);
+  body->AddInstruction(sub0);
+  body->AddInstruction(sub1);
+  body->AddInstruction(sub_neg1);
+  body->AddInstruction(rev_sub1);
 
-  entry->AddInstruction(arr_set1);  // array[0] = c0
-  entry->AddInstruction(arr_set2);  // array[1] = c0
-  entry->AddInstruction(arr_set3);  // array[i+0] = c0
-  entry->AddInstruction(arr_set4);  // array[i+1] = c0
-  entry->AddInstruction(arr_set5);  // array[i-0] = c0
-  entry->AddInstruction(arr_set6);  // array[i-1] = c0
-  entry->AddInstruction(arr_set7);  // array[1-i] = c0
-  entry->AddInstruction(arr_set8);  // array[i-(-1)] = c0
+  body->AddInstruction(arr_set1);  // array[0] = c0
+  body->AddInstruction(arr_set2);  // array[1] = c0
+  body->AddInstruction(arr_set3);  // array[i+0] = c0
+  body->AddInstruction(arr_set4);  // array[i+1] = c0
+  body->AddInstruction(arr_set5);  // array[i-0] = c0
+  body->AddInstruction(arr_set6);  // array[i-1] = c0
+  body->AddInstruction(arr_set7);  // array[1-i] = c0
+  body->AddInstruction(arr_set8);  // array[i-(-1)] = c0
+
+  body->AddInstruction(new (GetAllocator()) HReturnVoid());
 
   ScopedArenaAllocator allocator(graph_->GetArenaStack());
   LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kBasic);