Fix using LiveIntervals beyond their lifetime.

Fixes a bug introduced by
    https://android-review.googlesource.com/504041

Test: test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 64312607
Change-Id: I7fd2d55c2a657f736eaed7c94c41d1237ae2ec0b
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index dd8e3d2..84f0182 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -935,7 +935,7 @@
       if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
         stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
       } else {
-        Location location = current_phi->GetLiveInterval()->ToLocation();
+        Location location = current_phi->GetLocations()->Out();
         switch (location.GetKind()) {
           case Location::kStackSlot: {
             stack_map_stream_.AddDexRegisterEntry(
@@ -1202,22 +1202,21 @@
   }
 }
 
-void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const {
+void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
+                                                          HParallelMove* spills) const {
   LocationSummary* locations = suspend_check->GetLocations();
   HBasicBlock* block = suspend_check->GetBlock();
   DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
   DCHECK(block->IsLoopHeader());
+  DCHECK(block->GetFirstInstruction() == spills);
 
-  for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
-    HInstruction* current = it.Current();
-    LiveInterval* interval = current->GetLiveInterval();
-    // We only need to clear bits of loop phis containing objects and allocated in register.
-    // Loop phis allocated on stack already have the object in the stack.
-    if (current->GetType() == DataType::Type::kReference
-        && interval->HasRegister()
-        && interval->HasSpillSlot()) {
-      locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize);
-    }
+  for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
+    Location dest = spills->MoveOperandsAt(i)->GetDestination();
+    // All parallel moves in loop headers are spills.
+    DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
+    // Clear the stack bit marking a reference. Do not bother to check if the spill is
+    // actually a reference spill, clearing bits that are already zero is harmless.
+    locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
   }
 }
 
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2c3cf26..2904b71 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -380,7 +380,8 @@
   // for the suspend check at the back edge (instead of where the suspend check
   // is, which is the loop entry). At this point, the spill slots for the phis
   // have not been written to.
-  void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const;
+  void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
+                                             HParallelMove* spills) const;
 
   bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
   bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; }
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9be9117..e6e6984 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2209,7 +2209,6 @@
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
       DCHECK(successor->IsLoopHeader());
-      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
     }
   } else {
     DCHECK_EQ(slow_path->GetSuccessor(), successor);
@@ -3560,7 +3559,6 @@
   HLoopInformation* info = block->GetLoopInformation();
 
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -5420,6 +5418,13 @@
 }
 
 void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d7137a3..251f390 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2858,7 +2858,6 @@
   HLoopInformation* info = block->GetLoopInformation();
 
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -6741,6 +6740,13 @@
 }
 
 void InstructionCodeGeneratorARMVIXL::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
@@ -6776,7 +6782,6 @@
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
       DCHECK(successor->IsLoopHeader());
-      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
     }
   } else {
     DCHECK_EQ(slow_path->GetSuccessor(), successor);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 7ea7b9c..e58f43e 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3967,7 +3967,6 @@
   HLoopInformation* info = block->GetLoopInformation();
 
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -8359,6 +8358,13 @@
 }
 
 void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index fad0fe7..11120cf 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3488,7 +3488,6 @@
   HLoopInformation* info = block->GetLoopInformation();
 
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -6490,6 +6489,13 @@
 }
 
 void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d8a47fa..39a07b8 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -5680,6 +5680,13 @@
 }
 
 void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
@@ -5717,7 +5724,6 @@
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
       DCHECK(successor->IsLoopHeader());
-      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
     }
   } else {
     DCHECK_EQ(slow_path->GetSuccessor(), successor);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b6aa110..c8032c2 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5125,6 +5125,13 @@
 }
 
 void InstructionCodeGeneratorX86_64::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
@@ -5162,7 +5169,6 @@
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
       DCHECK(successor->IsLoopHeader());
-      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
     }
   } else {
     DCHECK_EQ(slow_path->GetSuccessor(), successor);
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index ece9904..86e9713 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -53,6 +53,21 @@
   }
 }
 
+RegisterAllocator::~RegisterAllocator() {
+  if (kIsDebugBuild) {
+    // Poison live interval pointers with "Error: BAD 71ve1nt3rval."
+    LiveInterval* bad_live_interval = reinterpret_cast<LiveInterval*>(0xebad7113u);
+    for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
+      for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+        it.Current()->SetLiveInterval(bad_live_interval);
+      }
+      for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+        it.Current()->SetLiveInterval(bad_live_interval);
+      }
+    }
+  }
+}
+
 bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
                                                 InstructionSet instruction_set) {
   return instruction_set == kArm
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index eaeec3b..e284498 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -50,7 +50,7 @@
                                                    const SsaLivenessAnalysis& analysis,
                                                    Strategy strategy = kRegisterAllocatorDefault);
 
-  virtual ~RegisterAllocator() = default;
+  virtual ~RegisterAllocator();
 
   // Main entry point for the register allocator. Given the liveness analysis,
   // allocates registers to live intervals.