Optimizing: Do not use range-based loop when inserting elements.

When we iterate over the elements of a container and we may
insert new elements into that container, it's wrong to use
the range-based loop.

Bug: 24133462
Change-Id: Iee35fbcf88ed3bcd6155cbeba09bd256032a16be
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 9772ee7..274a2a6 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -342,7 +342,10 @@
   ArenaBitVector can_block_throw(arena_, graph_->GetBlocks().size(), /* expandable */ true);
 
   // Scan blocks and mark those which contain throwing instructions.
-  for (HBasicBlock* block : graph_->GetBlocks()) {
+  // NOTE: We're appending new blocks inside the loop, so we need to use index because iterators
+  // can be invalidated. We remember the initial size to avoid iterating over the new blocks.
+  for (size_t block_id = 0u, end = graph_->GetBlocks().size(); block_id != end; ++block_id) {
+    HBasicBlock* block = graph_->GetBlocks()[block_id];
     bool can_throw = false;
     for (HInstructionIterator insn(block->GetInstructions()); !insn.Done(); insn.Advance()) {
       if (insn.Current()->CanThrow()) {
@@ -380,7 +383,10 @@
   //   (c) link the new blocks to corresponding exception handlers.
   // We cannot iterate only over blocks in `branch_targets_` because switch-case
   // blocks share the same dex_pc.
-  for (HBasicBlock* try_block : graph_->GetBlocks()) {
+  // NOTE: We're appending new blocks inside the loop, so we need to use index because iterators
+  // can be invalidated. We remember the initial size to avoid iterating over the new blocks.
+  for (size_t block_id = 0u, end = graph_->GetBlocks().size(); block_id != end; ++block_id) {
+    HBasicBlock* try_block = graph_->GetBlocks()[block_id];
     // TryBoundary blocks are added at the end of the list and not iterated over.
     DCHECK(!try_block->IsSingleTryBoundary());
 
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 54f4071..b2407c5 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -289,7 +289,10 @@
 }
 
 void HGraph::SimplifyCatchBlocks() {
-  for (HBasicBlock* catch_block : blocks_) {
+  // NOTE: We're appending new blocks inside the loop, so we need to use index because iterators
+  // can be invalidated. We remember the initial size to avoid iterating over the new blocks.
+  for (size_t block_id = 0u, end = blocks_.size(); block_id != end; ++block_id) {
+    HBasicBlock* catch_block = blocks_[block_id];
     if (!catch_block->IsCatchBlock()) {
       continue;
     }
@@ -349,7 +352,10 @@
   // Simplify the CFG for future analysis, and code generation:
   // (1): Split critical edges.
   // (2): Simplify loops by having only one back edge, and one preheader.
-  for (HBasicBlock* block : blocks_) {
+  // NOTE: We're appending new blocks inside the loop, so we need to use index because iterators
+  // can be invalidated. We remember the initial size to avoid iterating over the new blocks.
+  for (size_t block_id = 0u, end = blocks_.size(); block_id != end; ++block_id) {
+    HBasicBlock* block = blocks_[block_id];
     if (block == nullptr) continue;
     if (block->NumberOfNormalSuccessors() > 1) {
       for (size_t j = 0; j < block->GetSuccessors().size(); ++j) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 33c0d88..38f4667 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3028,7 +3028,8 @@
     : HInstruction(
           SideEffects::AllExceptGCDependency(), dex_pc),  // Assume write/read on all fields/arrays.
       number_of_arguments_(number_of_arguments),
-      inputs_(number_of_arguments + number_of_other_inputs, arena->Adapter(kArenaAllocInvokeInputs)),
+      inputs_(number_of_arguments + number_of_other_inputs,
+              arena->Adapter(kArenaAllocInvokeInputs)),
       return_type_(return_type),
       dex_method_index_(dex_method_index),
       original_invoke_type_(original_invoke_type),