Refactor code in preparation of correct stack maps in slow path.

Move the logic of saving/restoring live registers in slow path
in the SlowPathCode method. Also add a RecordPcInfo helper to
SlowPathCode, that will act as the placeholder of saving correct
stack maps.

Change-Id: I25c2bc7a642ef854bbc8a3eb570e5c8c8d2d030c
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index a6ab208..742d83e 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -750,54 +750,6 @@
   }
 }
 
-void CodeGenerator::SaveLiveRegisters(LocationSummary* locations) {
-  RegisterSet* register_set = locations->GetLiveRegisters();
-  size_t stack_offset = first_register_slot_in_slow_path_;
-  for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
-    if (!IsCoreCalleeSaveRegister(i)) {
-      if (register_set->ContainsCoreRegister(i)) {
-        // If the register holds an object, update the stack mask.
-        if (locations->RegisterContainsObject(i)) {
-          locations->SetStackBit(stack_offset / kVRegSize);
-        }
-        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
-        stack_offset += SaveCoreRegister(stack_offset, i);
-      }
-    }
-  }
-
-  for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
-    if (!IsFloatingPointCalleeSaveRegister(i)) {
-      if (register_set->ContainsFloatingPointRegister(i)) {
-        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
-        stack_offset += SaveFloatingPointRegister(stack_offset, i);
-      }
-    }
-  }
-}
-
-void CodeGenerator::RestoreLiveRegisters(LocationSummary* locations) {
-  RegisterSet* register_set = locations->GetLiveRegisters();
-  size_t stack_offset = first_register_slot_in_slow_path_;
-  for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
-    if (!IsCoreCalleeSaveRegister(i)) {
-      if (register_set->ContainsCoreRegister(i)) {
-        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
-        stack_offset += RestoreCoreRegister(stack_offset, i);
-      }
-    }
-  }
-
-  for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
-    if (!IsFloatingPointCalleeSaveRegister(i)) {
-      if (register_set->ContainsFloatingPointRegister(i)) {
-        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
-        stack_offset += RestoreFloatingPointRegister(stack_offset, i);
-      }
-    }
-  }
-}
-
 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const {
   LocationSummary* locations = suspend_check->GetLocations();
   HBasicBlock* block = suspend_check->GetBlock();
@@ -824,4 +776,56 @@
   GetMoveResolver()->EmitNativeCode(&parallel_move);
 }
 
+void SlowPathCode::RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc) {
+  codegen->RecordPcInfo(instruction, dex_pc);
+}
+
+void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
+  RegisterSet* register_set = locations->GetLiveRegisters();
+  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
+  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
+    if (!codegen->IsCoreCalleeSaveRegister(i)) {
+      if (register_set->ContainsCoreRegister(i)) {
+        // If the register holds an object, update the stack mask.
+        if (locations->RegisterContainsObject(i)) {
+          locations->SetStackBit(stack_offset / kVRegSize);
+        }
+        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+        stack_offset += codegen->SaveCoreRegister(stack_offset, i);
+      }
+    }
+  }
+
+  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
+    if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
+      if (register_set->ContainsFloatingPointRegister(i)) {
+        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+        stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
+      }
+    }
+  }
+}
+
+void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
+  RegisterSet* register_set = locations->GetLiveRegisters();
+  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
+  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
+    if (!codegen->IsCoreCalleeSaveRegister(i)) {
+      if (register_set->ContainsCoreRegister(i)) {
+        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+        stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
+      }
+    }
+  }
+
+  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
+    if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
+      if (register_set->ContainsFloatingPointRegister(i)) {
+        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+        stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
+      }
+    }
+  }
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b8f4572..81fc684 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -73,6 +73,10 @@
 
   virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
 
+  void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+  void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+  void RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc);
+
  private:
   DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
 };
@@ -182,8 +186,6 @@
   void BuildNativeGCMap(
       std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
   void BuildStackMaps(std::vector<uint8_t>* vector);
-  void SaveLiveRegisters(LocationSummary* locations);
-  void RestoreLiveRegisters(LocationSummary* locations);
 
   bool IsLeafMethod() const {
     return is_leaf_;
@@ -267,6 +269,15 @@
     }
   }
 
+  size_t GetFirstRegisterSlotInSlowPath() const {
+    return first_register_slot_in_slow_path_;
+  }
+
+  uint32_t FrameEntrySpillSize() const {
+    return GetFpuSpillSize() + GetCoreSpillSize();
+  }
+
+
  protected:
   CodeGenerator(HGraph* graph,
                 size_t number_of_core_registers,
@@ -326,10 +337,6 @@
     return POPCOUNT(core_spill_mask_) * GetWordSize();
   }
 
-  uint32_t FrameEntrySpillSize() const {
-    return GetFpuSpillSize() + GetCoreSpillSize();
-  }
-
   bool HasAllocatedCalleeSaveRegisters() const {
     // We check the core registers against 1 because it always comprises the return PC.
     return (POPCOUNT(allocated_registers_.GetCoreRegisters() & core_callee_save_mask_) != 1)
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 07cc41a..aed8c06 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -114,10 +114,10 @@
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
+    SaveLiveRegisters(codegen, instruction_->GetLocations());
     arm_codegen->InvokeRuntime(
         QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc());
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
+    RestoreLiveRegisters(codegen, instruction_->GetLocations());
     if (successor_ == nullptr) {
       __ b(GetReturnLabel());
     } else {
@@ -188,7 +188,7 @@
 
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
@@ -204,7 +204,7 @@
       DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
       arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
     }
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ b(GetExitLabel());
   }
 
@@ -235,7 +235,7 @@
 
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
@@ -244,7 +244,7 @@
         QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
     arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ b(GetExitLabel());
   }
 
@@ -272,7 +272,7 @@
 
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
@@ -291,7 +291,7 @@
       arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ b(GetExitLabel());
   }
 
@@ -1205,6 +1205,7 @@
   Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
 
   codegen_->GenerateStaticOrDirectCall(invoke, temp);
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
 void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
@@ -3861,7 +3862,6 @@
     __ bl(GetFrameEntryLabel());
   }
 
-  RecordPcInfo(invoke, invoke->GetDexPc());
   DCHECK(!IsLeafMethod());
 }
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c21084a..93c4ce5 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -191,7 +191,7 @@
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
 
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
@@ -213,7 +213,7 @@
       arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ B(GetExitLabel());
   }
 
@@ -244,7 +244,7 @@
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
 
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W());
@@ -255,7 +255,7 @@
     Primitive::Type type = instruction_->GetType();
     arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ B(GetExitLabel());
   }
 
@@ -292,11 +292,11 @@
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
+    SaveLiveRegisters(codegen, instruction_->GetLocations());
     arm64_codegen->InvokeRuntime(
         QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc());
     CheckEntrypointTypes<kQuickTestSuspend, void, void>();
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
+    RestoreLiveRegisters(codegen, instruction_->GetLocations());
     if (successor_ == nullptr) {
       __ B(GetReturnLabel());
     } else {
@@ -338,7 +338,7 @@
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
 
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
@@ -360,7 +360,7 @@
       CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ B(GetExitLabel());
   }
 
@@ -1920,7 +1920,6 @@
     __ Bl(&frame_entry_label_);
   }
 
-  RecordPcInfo(invoke, invoke->GetDexPc());
   DCHECK(!IsLeafMethod());
 }
 
@@ -1931,6 +1930,7 @@
 
   Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
   codegen_->GenerateStaticOrDirectCall(invoke, temp);
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
 void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a09ecb8..1db1600 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -158,16 +158,16 @@
 
 class SuspendCheckSlowPathX86 : public SlowPathCodeX86 {
  public:
-  explicit SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
+  SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
       : instruction_(instruction), successor_(successor) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
+    SaveLiveRegisters(codegen, instruction_->GetLocations());
     __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend)));
     codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
+    RestoreLiveRegisters(codegen, instruction_->GetLocations());
     if (successor_ == nullptr) {
       __ jmp(GetReturnLabel());
     } else {
@@ -198,15 +198,15 @@
 
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
     __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex()));
     __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString)));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
     x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
 
     __ jmp(GetExitLabel());
   }
@@ -231,7 +231,7 @@
     LocationSummary* locations = at_->GetLocations();
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex()));
@@ -239,7 +239,7 @@
     __ fs()->call(Address::Absolute(do_clinit_
         ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)
         : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType)));
-    codegen->RecordPcInfo(at_, dex_pc_);
+    RecordPcInfo(codegen, at_, dex_pc_);
 
     // Move the class to the desired location.
     Location out = locations->Out();
@@ -248,7 +248,7 @@
       x86_codegen->Move32(out, Location::RegisterLocation(EAX));
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ jmp(GetExitLabel());
   }
 
@@ -287,7 +287,7 @@
 
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
@@ -306,11 +306,11 @@
       __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast)));
     }
 
-    codegen->RecordPcInfo(instruction_, dex_pc_);
+    RecordPcInfo(codegen, instruction_, dex_pc_);
     if (instruction_->IsInstanceOf()) {
       x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
     }
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
 
     __ jmp(GetExitLabel());
   }
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 07ba95d..90d87d4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -72,7 +72,7 @@
     __ Bind(GetEntryLabel());
     __ gs()->call(
         Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowNullPointer), true));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
   }
 
  private:
@@ -88,7 +88,7 @@
     __ Bind(GetEntryLabel());
     __ gs()->call(
         Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
   }
 
  private:
@@ -136,10 +136,10 @@
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
+    SaveLiveRegisters(codegen, instruction_->GetLocations());
     __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+    RestoreLiveRegisters(codegen, instruction_->GetLocations());
     if (successor_ == nullptr) {
       __ jmp(GetReturnLabel());
     } else {
@@ -181,7 +181,7 @@
         Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
     __ gs()->call(Address::Absolute(
         QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
   }
 
  private:
@@ -207,7 +207,7 @@
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
 
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
@@ -215,7 +215,7 @@
     __ gs()->call(Address::Absolute((do_clinit_
           ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage)
           : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true));
-    codegen->RecordPcInfo(at_, dex_pc_);
+    RecordPcInfo(codegen, at_, dex_pc_);
 
     Location out = locations->Out();
     // Move the class to the desired location.
@@ -224,7 +224,7 @@
       x64_codegen->Move(out, Location::RegisterLocation(RAX));
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ jmp(GetExitLabel());
   }
 
@@ -255,7 +255,7 @@
 
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
@@ -263,9 +263,9 @@
             Immediate(instruction_->GetStringIndex()));
     __ gs()->call(Address::Absolute(
         QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pResolveString), true));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
     x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ jmp(GetExitLabel());
   }
 
@@ -293,7 +293,7 @@
 
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
@@ -312,13 +312,13 @@
       __ gs()->call(
           Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true));
     }
-    codegen->RecordPcInfo(instruction_, dex_pc_);
+    RecordPcInfo(codegen, instruction_, dex_pc_);
 
     if (instruction_->IsInstanceOf()) {
       x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ jmp(GetExitLabel());
   }
 
@@ -374,7 +374,6 @@
   }
 
   DCHECK(!IsLeafMethod());
-  RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
 void CodeGeneratorX86_64::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -1216,6 +1215,7 @@
   codegen_->GenerateStaticOrDirectCall(
       invoke,
       invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
 void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index a82d80a..0c9eb94 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -114,12 +114,13 @@
     CodeGeneratorARM* codegen = down_cast<CodeGeneratorARM*>(codegen_in);
     __ Bind(GetEntryLabel());
 
-    codegen->SaveLiveRegisters(invoke_->GetLocations());
+    SaveLiveRegisters(codegen, invoke_->GetLocations());
 
     MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
 
     if (invoke_->IsInvokeStaticOrDirect()) {
       codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
+      RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
       UNREACHABLE();
@@ -133,7 +134,7 @@
       MoveFromReturnRegister(out, invoke_->GetType(), codegen);
     }
 
-    codegen->RestoreLiveRegisters(invoke_->GetLocations());
+    RestoreLiveRegisters(codegen, invoke_->GetLocations());
     __ b(GetExitLabel());
   }
 
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 1ddff8a..19b04ae 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -122,12 +122,13 @@
     CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
     __ Bind(GetEntryLabel());
 
-    codegen->SaveLiveRegisters(invoke_->GetLocations());
+    SaveLiveRegisters(codegen, invoke_->GetLocations());
 
     MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
 
     if (invoke_->IsInvokeStaticOrDirect()) {
       codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
+      RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
       UNREACHABLE();
@@ -141,7 +142,7 @@
       MoveFromReturnRegister(out, invoke_->GetType(), codegen);
     }
 
-    codegen->RestoreLiveRegisters(invoke_->GetLocations());
+    RestoreLiveRegisters(codegen, invoke_->GetLocations());
     __ B(GetExitLabel());
   }
 
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index c73f092..2064b18 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -134,12 +134,13 @@
     CodeGeneratorX86_64* codegen = down_cast<CodeGeneratorX86_64*>(codegen_in);
     __ Bind(GetEntryLabel());
 
-    codegen->SaveLiveRegisters(invoke_->GetLocations());
+    SaveLiveRegisters(codegen, invoke_->GetLocations());
 
     MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
 
     if (invoke_->IsInvokeStaticOrDirect()) {
       codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), CpuRegister(RDI));
+      RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
       UNREACHABLE();
@@ -153,7 +154,7 @@
       MoveFromReturnRegister(out, invoke_->GetType(), codegen);
     }
 
-    codegen->RestoreLiveRegisters(invoke_->GetLocations());
+    RestoreLiveRegisters(codegen, invoke_->GetLocations());
     __ jmp(GetExitLabel());
   }