Move entry spills determination to JNI compiler.

The calling convention no longer describes entry spills as
spilling is the JNI compiler's responsibility. This allows
future improvements, such as spilling registers directly to
the HandleScope or outgoing stack args.

Remove the notion of interprocedural scratch register from
calling conventions and let assemblers deal with all scratch
register uses. The remaining JNI assembler APIs that take
scratch registers are currently unused and can be removed.

Also fix a bug in disassembly comparison for tests; the
contents of two files were considered identical if the
second one just contained additional data.

This change fully preserves the generated code and adds TODO
comments where doing so results in weird or suboptimal code.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: Prebuilt boot image is unchanged.
Test: aosp_taimen-userdebug boots.
Bug: 12189621
Change-Id: Ic26a670276920313cd907a6eda8d982cf0abfd81
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index e06c914..d07ab98 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -27,8 +27,6 @@
 namespace art {
 namespace arm {
 
-static_assert(kArmPointerSize == PointerSize::k32, "Unexpected ARM pointer size");
-
 //
 // JNI calling convention constants.
 //
@@ -49,18 +47,21 @@
 static const Register kHFCoreArgumentRegisters[] = {
   R0, R1, R2, R3
 };
+static constexpr size_t kHFCoreArgumentRegistersCount = arraysize(kHFCoreArgumentRegisters);
 
 // (VFP single-precision registers.)
 static const SRegister kHFSArgumentRegisters[] = {
   S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15
 };
+static constexpr size_t kHFSArgumentRegistersCount = arraysize(kHFSArgumentRegisters);
 
 // (VFP double-precision registers.)
 static const DRegister kHFDArgumentRegisters[] = {
   D0, D1, D2, D3, D4, D5, D6, D7
 };
+static constexpr size_t kHFDArgumentRegistersCount = arraysize(kHFDArgumentRegisters);
 
-static_assert(arraysize(kHFDArgumentRegisters) * 2 == arraysize(kHFSArgumentRegisters),
+static_assert(kHFDArgumentRegistersCount * 2 == kHFSArgumentRegistersCount,
     "ks d argument registers mismatch");
 
 //
@@ -159,14 +160,6 @@
 
 // Calling convention
 
-ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() const {
-  return ArmManagedRegister::FromCoreRegister(IP);  // R12
-}
-
-ManagedRegister ArmJniCallingConvention::InterproceduralScratchRegister() const {
-  return ArmManagedRegister::FromCoreRegister(IP);  // R12
-}
-
 ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() {
   switch (GetShorty()[0]) {
     case 'V':
@@ -204,93 +197,93 @@
   return ArmManagedRegister::FromCoreRegister(R0);
 }
 
+void ArmManagedRuntimeCallingConvention::ResetIterator(FrameOffset displacement) {
+  ManagedRuntimeCallingConvention::ResetIterator(displacement);
+  gpr_index_ = 1u;  // Skip r0 for ArtMethod*
+  float_index_ = 0u;
+  double_index_ = 0u;
+}
+
+void ArmManagedRuntimeCallingConvention::Next() {
+  if (IsCurrentParamAFloatOrDouble()) {
+    if (float_index_ % 2 == 0) {
+      // The register for the current float is the same as the first register for double.
+      DCHECK_EQ(float_index_, double_index_ * 2u);
+    } else {
+      // There is a space for an extra float before space for a double.
+      DCHECK_LT(float_index_, double_index_ * 2u);
+    }
+    if (IsCurrentParamADouble()) {
+      double_index_ += 1u;
+      if (float_index_ % 2 == 0) {
+        float_index_ = double_index_ * 2u;
+      }
+    } else {
+      if (float_index_ % 2 == 0) {
+        float_index_ += 1u;
+        double_index_ += 1u;  // Leaves space for one more float before the next double.
+      } else {
+        float_index_ = double_index_ * 2u;
+      }
+    }
+  } else {  // Not a float/double.
+    if (IsCurrentParamALong()) {
+      // Note that the alignment to even register is done lazily.
+      gpr_index_ = RoundUp(gpr_index_, 2u) + 2u;
+    } else {
+      gpr_index_ += 1u;
+    }
+  }
+  ManagedRuntimeCallingConvention::Next();
+}
+
 bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
-  return false;  // Everything moved to stack on entry.
+  if (IsCurrentParamAFloatOrDouble()) {
+    if (IsCurrentParamADouble()) {
+      return double_index_ < kHFDArgumentRegistersCount;
+    } else {
+      return float_index_ < kHFSArgumentRegistersCount;
+    }
+  } else {
+    if (IsCurrentParamALong()) {
+      // Round up to even register and do not split a long between the last register and the stack.
+      return RoundUp(gpr_index_, 2u) + 1u < kHFCoreArgumentRegistersCount;
+    } else {
+      return gpr_index_ < kHFCoreArgumentRegistersCount;
+    }
+  }
 }
 
 bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
-  return true;
+  return !IsCurrentParamInRegister();
 }
 
 ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
-  LOG(FATAL) << "Should not reach here";
-  UNREACHABLE();
+  DCHECK(IsCurrentParamInRegister());
+  if (IsCurrentParamAFloatOrDouble()) {
+    if (IsCurrentParamADouble()) {
+      return ArmManagedRegister::FromDRegister(kHFDArgumentRegisters[double_index_]);
+    } else {
+      return ArmManagedRegister::FromSRegister(kHFSArgumentRegisters[float_index_]);
+    }
+  } else {
+    if (IsCurrentParamALong()) {
+      // Currently the only register pair for a long parameter is r2-r3.
+      // Note that the alignment to even register is done lazily.
+      CHECK_EQ(RoundUp(gpr_index_, 2u), 2u);
+      return ArmManagedRegister::FromRegisterPair(R2_R3);
+    } else {
+      return ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index_]);
+    }
+  }
 }
 
 FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
-  CHECK(IsCurrentParamOnStack());
   return FrameOffset(displacement_.Int32Value() +        // displacement
                      kFramePointerSize +                 // Method*
                      (itr_slots_ * kFramePointerSize));  // offset into in args
 }
 
-const ManagedRegisterEntrySpills& ArmManagedRuntimeCallingConvention::EntrySpills() {
-  // We spill the argument registers on ARM to free them up for scratch use, we then assume
-  // all arguments are on the stack.
-  if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
-    uint32_t gpr_index = 1;  // R0 ~ R3. Reserve r0 for ArtMethod*.
-    uint32_t fpr_index = 0;  // S0 ~ S15.
-    uint32_t fpr_double_index = 0;  // D0 ~ D7.
-
-    ResetIterator(FrameOffset(0));
-    while (HasNext()) {
-      if (IsCurrentParamAFloatOrDouble()) {
-        if (IsCurrentParamADouble()) {  // Double.
-          // Double should not overlap with float.
-          fpr_double_index = (std::max(fpr_double_index * 2, RoundUp(fpr_index, 2))) / 2;
-          if (fpr_double_index < arraysize(kHFDArgumentRegisters)) {
-            entry_spills_.push_back(
-                ArmManagedRegister::FromDRegister(kHFDArgumentRegisters[fpr_double_index++]));
-          } else {
-            entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
-          }
-        } else {  // Float.
-          // Float should not overlap with double.
-          if (fpr_index % 2 == 0) {
-            fpr_index = std::max(fpr_double_index * 2, fpr_index);
-          }
-          if (fpr_index < arraysize(kHFSArgumentRegisters)) {
-            entry_spills_.push_back(
-                ArmManagedRegister::FromSRegister(kHFSArgumentRegisters[fpr_index++]));
-          } else {
-            entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
-          }
-        }
-      } else {
-        // FIXME: Pointer this returns as both reference and long.
-        if (IsCurrentParamALong() && !IsCurrentParamAReference()) {  // Long.
-          if (gpr_index < arraysize(kHFCoreArgumentRegisters) - 1) {
-            // Skip R1, and use R2_R3 if the long is the first parameter.
-            if (gpr_index == 1) {
-              gpr_index++;
-            }
-          }
-
-          // If it spans register and memory, we must use the value in memory.
-          if (gpr_index < arraysize(kHFCoreArgumentRegisters) - 1) {
-            entry_spills_.push_back(
-                ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++]));
-          } else if (gpr_index == arraysize(kHFCoreArgumentRegisters) - 1) {
-            gpr_index++;
-            entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
-          } else {
-            entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
-          }
-        }
-        // High part of long or 32-bit argument.
-        if (gpr_index < arraysize(kHFCoreArgumentRegisters)) {
-          entry_spills_.push_back(
-              ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++]));
-        } else {
-          entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
-        }
-      }
-      Next();
-    }
-  }
-  return entry_spills_;
-}
-
 // JNI calling convention
 
 ArmJniCallingConvention::ArmJniCallingConvention(bool is_static,
@@ -538,7 +531,6 @@
   DCHECK(std::none_of(kJniArgumentRegisters,
                       kJniArgumentRegisters + std::size(kJniArgumentRegisters),
                       [](Register reg) { return reg == R4; }));
-  DCHECK(!InterproceduralScratchRegister().Equals(ArmManagedRegister::FromCoreRegister(R4)));
   return ArmManagedRegister::FromCoreRegister(R4);
 }
 
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index e4b86fa..7896d64 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -29,22 +29,26 @@
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
-                                        PointerSize::k32) {}
+                                        PointerSize::k32),
+        gpr_index_(1u),  // Skip r0 for ArtMethod*
+        float_index_(0u),
+        double_index_(0u) {}
   ~ArmManagedRuntimeCallingConvention() override {}
   // Calling convention
   ManagedRegister ReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() const override;
+  void ResetIterator(FrameOffset displacement) override;
   // Managed runtime calling convention
   ManagedRegister MethodRegister() override;
+  void Next() override;
   bool IsCurrentParamInRegister() override;
   bool IsCurrentParamOnStack() override;
   ManagedRegister CurrentParamRegister() override;
   FrameOffset CurrentParamStackOffset() override;
-  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
-  ManagedRegisterEntrySpills entry_spills_;
-
+  size_t gpr_index_;
+  size_t float_index_;
+  size_t double_index_;
   DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
 };
 
@@ -58,7 +62,6 @@
   // Calling convention
   ManagedRegister ReturnRegister() override;
   ManagedRegister IntReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() const override;
   // JNI calling convention
   void Next() override;  // Override default behavior for AAPCS
   size_t FrameSize() const override;
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 231e140..32da141 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -26,8 +26,6 @@
 namespace art {
 namespace arm64 {
 
-static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
-
 static const XRegister kXArgumentRegisters[] = {
   X0, X1, X2, X3, X4, X5, X6, X7
 };
@@ -143,14 +141,6 @@
     CalculateFpCalleeSpillMask(kAapcs64CalleeSaveRegisters);
 
 // Calling convention
-ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() const {
-  return Arm64ManagedRegister::FromXRegister(IP0);  // X16
-}
-
-ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() const {
-  return Arm64ManagedRegister::FromXRegister(IP0);  // X16
-}
-
 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
   if (shorty[0] == 'F') {
     return Arm64ManagedRegister::FromSRegister(S0);
@@ -184,73 +174,44 @@
 }
 
 bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
-  return false;  // Everything moved to stack on entry.
+  if (IsCurrentParamAFloatOrDouble()) {
+    return itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments;
+  } else {
+    size_t non_fp_arg_number = itr_args_ - itr_float_and_doubles_;
+    return /* method */ 1u + non_fp_arg_number < kMaxIntLikeRegisterArguments;
+  }
 }
 
 bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
-  return true;
+  return !IsCurrentParamInRegister();
 }
 
 ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() {
-  LOG(FATAL) << "Should not reach here";
-  UNREACHABLE();
+  DCHECK(IsCurrentParamInRegister());
+  if (IsCurrentParamAFloatOrDouble()) {
+    if (IsCurrentParamADouble()) {
+      return Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[itr_float_and_doubles_]);
+    } else {
+      return Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[itr_float_and_doubles_]);
+    }
+  } else {
+    size_t non_fp_arg_number = itr_args_ - itr_float_and_doubles_;
+    if (IsCurrentParamALong()) {
+      XRegister x_reg = kXArgumentRegisters[/* method */ 1u + non_fp_arg_number];
+      return Arm64ManagedRegister::FromXRegister(x_reg);
+    } else {
+      WRegister w_reg = kWArgumentRegisters[/* method */ 1u + non_fp_arg_number];
+      return Arm64ManagedRegister::FromWRegister(w_reg);
+    }
+  }
 }
 
 FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
-  CHECK(IsCurrentParamOnStack());
   return FrameOffset(displacement_.Int32Value() +  // displacement
                      kFramePointerSize +  // Method ref
                      (itr_slots_ * sizeof(uint32_t)));  // offset into in args
 }
 
-const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
-  // We spill the argument registers on ARM64 to free them up for scratch use, we then assume
-  // all arguments are on the stack.
-  if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
-    int gp_reg_index = 1;   // we start from X1/W1, X0 holds ArtMethod*.
-    int fp_reg_index = 0;   // D0/S0.
-
-    // We need to choose the correct register (D/S or X/W) since the managed
-    // stack uses 32bit stack slots.
-    ResetIterator(FrameOffset(0));
-    while (HasNext()) {
-      if (IsCurrentParamAFloatOrDouble()) {  // FP regs.
-          if (fp_reg_index < 8) {
-            if (!IsCurrentParamADouble()) {
-              entry_spills_.push_back(Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[fp_reg_index]));
-            } else {
-              entry_spills_.push_back(Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[fp_reg_index]));
-            }
-            fp_reg_index++;
-          } else {  // just increase the stack offset.
-            if (!IsCurrentParamADouble()) {
-              entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
-            } else {
-              entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
-            }
-          }
-      } else {  // GP regs.
-        if (gp_reg_index < 8) {
-          if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
-            entry_spills_.push_back(Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg_index]));
-          } else {
-            entry_spills_.push_back(Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg_index]));
-          }
-          gp_reg_index++;
-        } else {  // just increase the stack offset.
-          if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
-              entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
-          } else {
-              entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
-          }
-        }
-      }
-      Next();
-    }
-  }
-  return entry_spills_;
-}
-
 // JNI calling convention
 
 Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static,
@@ -411,7 +372,6 @@
   DCHECK(std::none_of(kXArgumentRegisters,
                       kXArgumentRegisters + std::size(kXArgumentRegisters),
                       [](XRegister reg) { return reg == X15; }));
-  DCHECK(!InterproceduralScratchRegister().Equals(Arm64ManagedRegister::FromXRegister(X15)));
   return Arm64ManagedRegister::FromXRegister(X15);
 }
 
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 64b29f1..7beca08 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -33,18 +33,14 @@
   ~Arm64ManagedRuntimeCallingConvention() override {}
   // Calling convention
   ManagedRegister ReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() const override;
   // Managed runtime calling convention
   ManagedRegister MethodRegister() override;
   bool IsCurrentParamInRegister() override;
   bool IsCurrentParamOnStack() override;
   ManagedRegister CurrentParamRegister() override;
   FrameOffset CurrentParamStackOffset() override;
-  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
-  ManagedRegisterEntrySpills entry_spills_;
-
   DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention);
 };
 
@@ -58,7 +54,6 @@
   // Calling convention
   ManagedRegister ReturnRegister() override;
   ManagedRegister IntReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() const override;
   // JNI calling convention
   size_t FrameSize() const override;
   size_t OutArgSize() const override;
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 3d4cefe..b4396f0 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -48,15 +48,13 @@
 
   // Register that holds result of this method invocation.
   virtual ManagedRegister ReturnRegister() = 0;
-  // Register reserved for scratch usage during procedure calls.
-  virtual ManagedRegister InterproceduralScratchRegister() const = 0;
 
   // Iterator interface
 
   // Place iterator at start of arguments. The displacement is applied to
   // frame offset methods to account for frames which may be on the stack
   // below the one being iterated over.
-  void ResetIterator(FrameOffset displacement) {
+  virtual void ResetIterator(FrameOffset displacement) {
     displacement_ = displacement;
     itr_slots_ = 0;
     itr_args_ = 0;
@@ -252,11 +250,14 @@
 
   // Iterator interface
   bool HasNext();
-  void Next();
+  virtual void Next();
   bool IsCurrentParamAReference();
   bool IsCurrentParamAFloatOrDouble();
   bool IsCurrentParamADouble();
   bool IsCurrentParamALong();
+  bool IsCurrentParamALongOrDouble() {
+    return IsCurrentParamALong() || IsCurrentParamADouble();
+  }
   bool IsCurrentArgExplicit();  // ie a non-implict argument such as this
   bool IsCurrentArgPossiblyNull();
   size_t CurrentParamSize();
@@ -267,9 +268,6 @@
 
   virtual ~ManagedRuntimeCallingConvention() {}
 
-  // Registers to spill to caller's out registers on entry.
-  virtual const ManagedRegisterEntrySpills& EntrySpills() = 0;
-
  protected:
   ManagedRuntimeCallingConvention(bool is_static,
                                   bool is_synchronized,
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index c2db73a..ac060cc 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -224,9 +224,25 @@
   ManagedRegister method_register =
       is_critical_native ? ManagedRegister::NoRegister() : mr_conv->MethodRegister();
   ArrayRef<const ManagedRegister> callee_save_regs = main_jni_conv->CalleeSaveRegisters();
-  __ BuildFrame(current_frame_size, method_register, callee_save_regs, mr_conv->EntrySpills());
+  __ BuildFrame(current_frame_size, method_register, callee_save_regs);
   DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(current_frame_size));
 
+  {
+    // Spill all register arguments.
+    // TODO: Spill reference args directly to the HandleScope.
+    // TODO: Spill native stack args straight to their stack locations (adjust SP earlier).
+    // TODO: Move args in registers without spilling for @CriticalNative.
+    // TODO: Provide assembler API for batched moves to allow moving multiple arguments
+    // with single instruction (arm: LDRD/STRD/LDMIA/STMIA, arm64: LDP/STP).
+    mr_conv->ResetIterator(FrameOffset(current_frame_size));
+    for (; mr_conv->HasNext(); mr_conv->Next()) {
+      if (mr_conv->IsCurrentParamInRegister()) {
+        size_t size = mr_conv->IsCurrentParamALongOrDouble() ? 8u : 4u;
+        __ Store(mr_conv->CurrentParamStackOffset(), mr_conv->CurrentParamRegister(), size);
+      }
+    }
+  }
+
   if (LIKELY(!is_critical_native)) {
     // NOTE: @CriticalNative methods don't have a HandleScope
     //       because they can't have any reference parameters or return values.
@@ -235,15 +251,12 @@
     mr_conv->ResetIterator(FrameOffset(current_frame_size));
     main_jni_conv->ResetIterator(FrameOffset(0));
     __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(),
-                             main_jni_conv->ReferenceCount(),
-                             mr_conv->InterproceduralScratchRegister());
+                             main_jni_conv->ReferenceCount());
 
     __ CopyRawPtrFromThread(main_jni_conv->HandleScopeLinkOffset(),
-                            Thread::TopHandleScopeOffset<kPointerSize>(),
-                            mr_conv->InterproceduralScratchRegister());
+                            Thread::TopHandleScopeOffset<kPointerSize>());
     __ StoreStackOffsetToThread(Thread::TopHandleScopeOffset<kPointerSize>(),
-                                main_jni_conv->HandleScopeOffset(),
-                                mr_conv->InterproceduralScratchRegister());
+                                main_jni_conv->HandleScopeOffset());
 
     // 3. Place incoming reference arguments into handle scope
     main_jni_conv->Next();  // Skip JNIEnv*
@@ -252,15 +265,12 @@
       FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
       // Check handle scope offset is within frame
       CHECK_LT(handle_scope_offset.Uint32Value(), current_frame_size);
-      // Note this LoadRef() doesn't need heap unpoisoning since it's from the ArtMethod.
-      // Note this LoadRef() does not include read barrier. It will be handled below.
-      //
-      // scratchRegister = *method[DeclaringClassOffset()];
-      __ LoadRef(main_jni_conv->InterproceduralScratchRegister(),
-                 mr_conv->MethodRegister(), ArtMethod::DeclaringClassOffset(), false);
-      __ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false);
-      // *handleScopeOffset = scratchRegister
-      __ StoreRef(handle_scope_offset, main_jni_conv->InterproceduralScratchRegister());
+      // Note: This CopyRef() doesn't need heap unpoisoning since it's from the ArtMethod.
+      // Note: This CopyRef() does not include read barrier. It will be handled below.
+      __ CopyRef(handle_scope_offset,
+                 mr_conv->MethodRegister(),
+                 ArtMethod::DeclaringClassOffset(),
+                 /* unpoison_reference= */ false);
       main_jni_conv->Next();  // in handle scope so move to next argument
     }
     // Place every reference into the handle scope (ignore other parameters).
@@ -277,8 +287,10 @@
         CHECK_LT(handle_scope_offset.Uint32Value(), current_frame_size);
         CHECK_NE(handle_scope_offset.Uint32Value(),
                  main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
-        bool input_in_reg = mr_conv->IsCurrentParamInRegister();
-        bool input_on_stack = mr_conv->IsCurrentParamOnStack();
+        // We spilled all registers above, so use stack locations.
+        // TODO: Spill refs straight to the HandleScope.
+        bool input_in_reg = false;  // mr_conv->IsCurrentParamInRegister();
+        bool input_on_stack = true;  // mr_conv->IsCurrentParamOnStack();
         CHECK(input_in_reg || input_on_stack);
 
         if (input_in_reg) {
@@ -288,8 +300,7 @@
         } else if (input_on_stack) {
           FrameOffset in_off  = mr_conv->CurrentParamStackOffset();
           __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
-          __ CopyRef(handle_scope_offset, in_off,
-                     mr_conv->InterproceduralScratchRegister());
+          __ CopyRef(handle_scope_offset, in_off);
         }
       }
       mr_conv->Next();
@@ -330,13 +341,8 @@
       //
       // Check if gc_is_marking is set -- if it's not, we don't need
       // a read barrier so skip it.
-      __ LoadFromThread(main_jni_conv->InterproceduralScratchRegister(),
-                        Thread::IsGcMarkingOffset<kPointerSize>(),
-                        Thread::IsGcMarkingSize());
       // Jump over the slow path if gc is marking is false.
-      __ Jump(skip_cold_path_label.get(),
-              JNIMacroUnaryCondition::kZero,
-              main_jni_conv->InterproceduralScratchRegister());
+      __ TestGcMarking(skip_cold_path_label.get(), JNIMacroUnaryCondition::kZero);
     }
 
     // Construct slow path for read barrier:
@@ -353,25 +359,22 @@
     // Pass the handle for the class as the first argument.
     if (main_jni_conv->IsCurrentParamOnStack()) {
       FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
-      __ CreateHandleScopeEntry(out_off, class_handle_scope_offset,
-                         mr_conv->InterproceduralScratchRegister(),
-                         false);
+      __ CreateHandleScopeEntry(out_off, class_handle_scope_offset, /*null_allowed=*/ false);
     } else {
       ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
-      __ CreateHandleScopeEntry(out_reg, class_handle_scope_offset,
-                         ManagedRegister::NoRegister(), false);
+      __ CreateHandleScopeEntry(out_reg,
+                                class_handle_scope_offset,
+                                ManagedRegister::NoRegister(),
+                                /*null_allowed=*/ false);
     }
     main_jni_conv->Next();
     // Pass the current thread as the second argument and call.
     if (main_jni_conv->IsCurrentParamInRegister()) {
       __ GetCurrentThread(main_jni_conv->CurrentParamRegister());
-      __ Call(main_jni_conv->CurrentParamRegister(),
-              Offset(read_barrier),
-              main_jni_conv->InterproceduralScratchRegister());
+      __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier));
     } else {
-      __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
-                          main_jni_conv->InterproceduralScratchRegister());
-      __ CallFromThread(read_barrier, main_jni_conv->InterproceduralScratchRegister());
+      __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset());
+      __ CallFromThread(read_barrier);
     }
     main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));  // Reset.
 
@@ -403,27 +406,27 @@
       main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
       if (main_jni_conv->IsCurrentParamOnStack()) {
         FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
-        __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset,
-                                  mr_conv->InterproceduralScratchRegister(), false);
+        __ CreateHandleScopeEntry(out_off,
+                                  locked_object_handle_scope_offset,
+                                  /*null_allowed=*/ false);
       } else {
         ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
-        __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset,
-                                  ManagedRegister::NoRegister(), false);
+        __ CreateHandleScopeEntry(out_reg,
+                                  locked_object_handle_scope_offset,
+                                  ManagedRegister::NoRegister(),
+                                  /*null_allowed=*/ false);
       }
       main_jni_conv->Next();
     }
     if (main_jni_conv->IsCurrentParamInRegister()) {
       __ GetCurrentThread(main_jni_conv->CurrentParamRegister());
-      __ Call(main_jni_conv->CurrentParamRegister(),
-              Offset(jni_start),
-              main_jni_conv->InterproceduralScratchRegister());
+      __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start));
     } else {
-      __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
-                          main_jni_conv->InterproceduralScratchRegister());
-      __ CallFromThread(jni_start, main_jni_conv->InterproceduralScratchRegister());
+      __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset());
+      __ CallFromThread(jni_start);
     }
     if (is_synchronized) {  // Check for exceptions from monitor enter.
-      __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size);
+      __ ExceptionPoll(main_out_arg_size);
     }
 
     // Store into stack_frame[saved_cookie_offset] the return value of JniMethodStart.
@@ -477,13 +480,13 @@
     FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
     if (main_jni_conv->IsCurrentParamOnStack()) {
       FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
-      __ CreateHandleScopeEntry(out_off, handle_scope_offset,
-                         mr_conv->InterproceduralScratchRegister(),
-                         false);
+      __ CreateHandleScopeEntry(out_off, handle_scope_offset, /*null_allowed=*/ false);
     } else {
       ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
-      __ CreateHandleScopeEntry(out_reg, handle_scope_offset,
-                         ManagedRegister::NoRegister(), false);
+      __ CreateHandleScopeEntry(out_reg,
+                                handle_scope_offset,
+                                ManagedRegister::NoRegister(),
+                                /*null_allowed=*/ false);
     }
   }
 
@@ -494,13 +497,10 @@
     // Register that will hold local indirect reference table
     if (main_jni_conv->IsCurrentParamInRegister()) {
       ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
-      DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
       __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset<kPointerSize>());
     } else {
       FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
-      __ CopyRawPtrFromThread(jni_env,
-                              Thread::JniEnvOffset<kPointerSize>(),
-                              main_jni_conv->InterproceduralScratchRegister());
+      __ CopyRawPtrFromThread(jni_env, Thread::JniEnvOffset<kPointerSize>());
     }
   }
 
@@ -509,18 +509,13 @@
       ArtMethod::EntryPointFromJniOffset(InstructionSetPointerSize(instruction_set));
   if (UNLIKELY(is_critical_native)) {
     if (main_jni_conv->UseTailCall()) {
-      __ Jump(main_jni_conv->HiddenArgumentRegister(),
-              jni_entrypoint_offset,
-              main_jni_conv->InterproceduralScratchRegister());
+      __ Jump(main_jni_conv->HiddenArgumentRegister(), jni_entrypoint_offset);
     } else {
-      __ Call(main_jni_conv->HiddenArgumentRegister(),
-              jni_entrypoint_offset,
-              main_jni_conv->InterproceduralScratchRegister());
+      __ Call(main_jni_conv->HiddenArgumentRegister(), jni_entrypoint_offset);
     }
   } else {
     __ Call(FrameOffset(main_out_arg_size + mr_conv->MethodStackOffset().SizeValue()),
-            jni_entrypoint_offset,
-            main_jni_conv->InterproceduralScratchRegister());
+            jni_entrypoint_offset);
   }
 
   // 10. Fix differences in result widths.
@@ -601,7 +596,7 @@
     // Pass saved local reference state.
     if (end_jni_conv->IsCurrentParamOnStack()) {
       FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
-      __ Copy(out_off, saved_cookie_offset, end_jni_conv->InterproceduralScratchRegister(), 4);
+      __ Copy(out_off, saved_cookie_offset, 4);
     } else {
       ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
       __ Load(out_reg, saved_cookie_offset, 4);
@@ -611,25 +606,24 @@
       // Pass object for unlocking.
       if (end_jni_conv->IsCurrentParamOnStack()) {
         FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
-        __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset,
-                           end_jni_conv->InterproceduralScratchRegister(),
-                           false);
+        __ CreateHandleScopeEntry(out_off,
+                                  locked_object_handle_scope_offset,
+                                  /*null_allowed=*/ false);
       } else {
         ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
-        __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset,
-                           ManagedRegister::NoRegister(), false);
+        __ CreateHandleScopeEntry(out_reg,
+                                  locked_object_handle_scope_offset,
+                                  ManagedRegister::NoRegister(),
+                                  /*null_allowed=*/ false);
       }
       end_jni_conv->Next();
     }
     if (end_jni_conv->IsCurrentParamInRegister()) {
       __ GetCurrentThread(end_jni_conv->CurrentParamRegister());
-      __ Call(end_jni_conv->CurrentParamRegister(),
-              Offset(jni_end),
-              end_jni_conv->InterproceduralScratchRegister());
+      __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end));
     } else {
-      __ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
-                          end_jni_conv->InterproceduralScratchRegister());
-      __ CallFromThread(jni_end, end_jni_conv->InterproceduralScratchRegister());
+      __ GetCurrentThread(end_jni_conv->CurrentParamStackOffset());
+      __ CallFromThread(jni_end);
     }
 
     // 13. Reload return value
@@ -651,7 +645,7 @@
   // 15. Process pending exceptions from JNI call or monitor exit.
   //     @CriticalNative methods do not need exception poll in the stub.
   if (LIKELY(!is_critical_native)) {
-    __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust= */);
+    __ ExceptionPoll(/* stack_adjust= */ 0);
   }
 
   // 16. Remove activation - need to restore callee save registers since the GC may have changed
@@ -685,14 +679,14 @@
 static void CopyParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
                           ManagedRuntimeCallingConvention* mr_conv,
                           JniCallingConvention* jni_conv) {
-  bool input_in_reg = mr_conv->IsCurrentParamInRegister();
+  // We spilled all registers, so use stack locations.
+  // TODO: Move args in registers for @CriticalNative.
+  bool input_in_reg = false;  // mr_conv->IsCurrentParamInRegister();
   bool output_in_reg = jni_conv->IsCurrentParamInRegister();
   FrameOffset handle_scope_offset(0);
   bool null_allowed = false;
   bool ref_param = jni_conv->IsCurrentParamAReference();
   CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
-  // input may be in register, on stack or both - but not none!
-  CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack());
   if (output_in_reg) {  // output shouldn't straddle registers and stack
     CHECK(!jni_conv->IsCurrentParamOnStack());
   } else {
@@ -724,13 +718,12 @@
   } else if (!input_in_reg && !output_in_reg) {
     FrameOffset out_off = jni_conv->CurrentParamStackOffset();
     if (ref_param) {
-      __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(),
-                         null_allowed);
+      __ CreateHandleScopeEntry(out_off, handle_scope_offset, null_allowed);
     } else {
       FrameOffset in_off = mr_conv->CurrentParamStackOffset();
       size_t param_size = mr_conv->CurrentParamSize();
       CHECK_EQ(param_size, jni_conv->CurrentParamSize());
-      __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size);
+      __ Copy(out_off, in_off, param_size);
     }
   } else if (!input_in_reg && output_in_reg) {
     FrameOffset in_off = mr_conv->CurrentParamStackOffset();
@@ -738,7 +731,10 @@
     // Check that incoming stack arguments are above the current stack frame.
     CHECK_GT(in_off.Uint32Value(), mr_conv->GetDisplacement().Uint32Value());
     if (ref_param) {
-      __ CreateHandleScopeEntry(out_reg, handle_scope_offset, ManagedRegister::NoRegister(), null_allowed);
+      __ CreateHandleScopeEntry(out_reg,
+                                handle_scope_offset,
+                                ManagedRegister::NoRegister(),
+                                null_allowed);
     } else {
       size_t param_size = mr_conv->CurrentParamSize();
       CHECK_EQ(param_size, jni_conv->CurrentParamSize());
@@ -752,8 +748,7 @@
     CHECK_LT(out_off.Uint32Value(), jni_conv->GetDisplacement().Uint32Value());
     if (ref_param) {
       // TODO: recycle value in in_reg rather than reload from handle scope
-      __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(),
-                         null_allowed);
+      __ CreateHandleScopeEntry(out_off, handle_scope_offset, null_allowed);
     } else {
       size_t param_size = mr_conv->CurrentParamSize();
       CHECK_EQ(param_size, jni_conv->CurrentParamSize());
@@ -764,7 +759,7 @@
         // store where input straddles registers and stack
         CHECK_EQ(param_size, 8u);
         FrameOffset in_off = mr_conv->CurrentParamStackOffset();
-        __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister());
+        __ StoreSpanning(out_off, in_reg, in_off);
       }
     }
   }
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 4e643ba..6776f12 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -26,7 +26,12 @@
 namespace art {
 namespace x86 {
 
-static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size");
+static constexpr Register kManagedCoreArgumentRegisters[] = {
+    EAX, ECX, EDX, EBX
+};
+static constexpr size_t kManagedCoreArgumentRegistersCount =
+    arraysize(kManagedCoreArgumentRegisters);
+static constexpr size_t kManagedFpArgumentRegistersCount = 4u;
 
 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
     // Core registers.
@@ -67,14 +72,6 @@
 
 // Calling convention
 
-ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() const {
-  return X86ManagedRegister::FromCpuRegister(ECX);
-}
-
-ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() const {
-  return X86ManagedRegister::FromCpuRegister(ECX);
-}
-
 ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const {
   return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
 }
@@ -113,49 +110,58 @@
   return X86ManagedRegister::FromCpuRegister(EAX);
 }
 
+void X86ManagedRuntimeCallingConvention::ResetIterator(FrameOffset displacement) {
+  ManagedRuntimeCallingConvention::ResetIterator(displacement);
+  gpr_arg_count_ = 1u;  // Skip EAX for ArtMethod*
+}
+
+void X86ManagedRuntimeCallingConvention::Next() {
+  if (!IsCurrentParamAFloatOrDouble()) {
+    gpr_arg_count_ += IsCurrentParamALong() ? 2u : 1u;
+  }
+  ManagedRuntimeCallingConvention::Next();
+}
+
 bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
-  return false;  // Everything is passed by stack
+  if (IsCurrentParamAFloatOrDouble()) {
+    return itr_float_and_doubles_ < kManagedFpArgumentRegistersCount;
+  } else {
+    // Don't split a long between the last register and the stack.
+    size_t extra_regs = IsCurrentParamALong() ? 1u : 0u;
+    return gpr_arg_count_ + extra_regs < kManagedCoreArgumentRegistersCount;
+  }
 }
 
 bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
-  // We assume all parameters are on stack, args coming via registers are spilled as entry_spills.
-  return true;
+  return !IsCurrentParamInRegister();
 }
 
 ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() {
-  ManagedRegister res = ManagedRegister::NoRegister();
-  if (!IsCurrentParamAFloatOrDouble()) {
-    switch (gpr_arg_count_) {
-      case 0:
-        res = X86ManagedRegister::FromCpuRegister(ECX);
-        break;
-      case 1:
-        res = X86ManagedRegister::FromCpuRegister(EDX);
-        break;
-      case 2:
-        // Don't split a long between the last register and the stack.
-        if (IsCurrentParamALong()) {
-          return ManagedRegister::NoRegister();
-        }
-        res = X86ManagedRegister::FromCpuRegister(EBX);
-        break;
-    }
-  } else if (itr_float_and_doubles_ < 4) {
+  DCHECK(IsCurrentParamInRegister());
+  if (IsCurrentParamAFloatOrDouble()) {
     // First four float parameters are passed via XMM0..XMM3
-    res = X86ManagedRegister::FromXmmRegister(
-                                 static_cast<XmmRegister>(XMM0 + itr_float_and_doubles_));
+    XmmRegister reg = static_cast<XmmRegister>(XMM0 + itr_float_and_doubles_);
+    return X86ManagedRegister::FromXmmRegister(reg);
+  } else {
+    if (IsCurrentParamALong()) {
+      switch (gpr_arg_count_) {
+        case 1:
+          static_assert(kManagedCoreArgumentRegisters[1] == ECX);
+          static_assert(kManagedCoreArgumentRegisters[2] == EDX);
+          return X86ManagedRegister::FromRegisterPair(ECX_EDX);
+        case 2:
+          static_assert(kManagedCoreArgumentRegisters[2] == EDX);
+          static_assert(kManagedCoreArgumentRegisters[3] == EBX);
+          return X86ManagedRegister::FromRegisterPair(EDX_EBX);
+        default:
+          LOG(FATAL) << "UNREACHABLE";
+          UNREACHABLE();
+      }
+    } else {
+      Register core_reg = kManagedCoreArgumentRegisters[gpr_arg_count_];
+      return X86ManagedRegister::FromCpuRegister(core_reg);
+    }
   }
-  return res;
-}
-
-ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamHighLongRegister() {
-  ManagedRegister res = ManagedRegister::NoRegister();
-  DCHECK(IsCurrentParamALong());
-  switch (gpr_arg_count_) {
-    case 0: res = X86ManagedRegister::FromCpuRegister(EDX); break;
-    case 1: res = X86ManagedRegister::FromCpuRegister(EBX); break;
-  }
-  return res;
 }
 
 FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
@@ -164,48 +170,6 @@
                      (itr_slots_ * kFramePointerSize));  // offset into in args
 }
 
-const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpills() {
-  // We spill the argument registers on X86 to free them up for scratch use, we then assume
-  // all arguments are on the stack.
-  if (entry_spills_.size() == 0) {
-    ResetIterator(FrameOffset(0));
-    while (HasNext()) {
-      ManagedRegister in_reg = CurrentParamRegister();
-      bool is_long = IsCurrentParamALong();
-      if (!in_reg.IsNoRegister()) {
-        int32_t size = IsParamADouble(itr_args_) ? 8 : 4;
-        int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
-        ManagedRegisterSpill spill(in_reg, size, spill_offset);
-        entry_spills_.push_back(spill);
-        if (is_long) {
-          // special case, as we need a second register here.
-          in_reg = CurrentParamHighLongRegister();
-          DCHECK(!in_reg.IsNoRegister());
-          // We have to spill the second half of the long.
-          ManagedRegisterSpill spill2(in_reg, size, spill_offset + 4);
-          entry_spills_.push_back(spill2);
-        }
-
-        // Keep track of the number of GPRs allocated.
-        if (!IsCurrentParamAFloatOrDouble()) {
-          if (is_long) {
-            // Long was allocated in 2 registers.
-            gpr_arg_count_ += 2;
-          } else {
-            gpr_arg_count_++;
-          }
-        }
-      } else if (is_long) {
-        // We need to skip the unused last register, which is empty.
-        // If we are already out of registers, this is harmless.
-        gpr_arg_count_ += 2;
-      }
-      Next();
-    }
-  }
-  return entry_spills_;
-}
-
 // JNI calling convention
 
 X86JniCallingConvention::X86JniCallingConvention(bool is_static,
@@ -326,7 +290,6 @@
                       [](ManagedRegister callee_save) constexpr {
                         return callee_save.Equals(X86ManagedRegister::FromCpuRegister(EAX));
                       }));
-  DCHECK(!InterproceduralScratchRegister().Equals(X86ManagedRegister::FromCpuRegister(EAX)));
   return X86ManagedRegister::FromCpuRegister(EAX);
 }
 
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index 1273e8d..6f22c2b 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -30,23 +30,21 @@
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k32),
-        gpr_arg_count_(0) {}
+        gpr_arg_count_(1u) {}  // Skip EAX for ArtMethod*
   ~X86ManagedRuntimeCallingConvention() override {}
   // Calling convention
   ManagedRegister ReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() const override;
+  void ResetIterator(FrameOffset displacement) override;
   // Managed runtime calling convention
   ManagedRegister MethodRegister() override;
+  void Next() override;
   bool IsCurrentParamInRegister() override;
   bool IsCurrentParamOnStack() override;
   ManagedRegister CurrentParamRegister() override;
   FrameOffset CurrentParamStackOffset() override;
-  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   int gpr_arg_count_;
-  ManagedRegister CurrentParamHighLongRegister();
-  ManagedRegisterEntrySpills entry_spills_;
   DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention);
 };
 
@@ -61,7 +59,6 @@
   // Calling convention
   ManagedRegister ReturnRegister() override;
   ManagedRegister IntReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() const override;
   // JNI calling convention
   size_t FrameSize() const override;
   size_t OutArgSize() const override;
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 9013b02..579347f 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -27,6 +27,11 @@
 namespace art {
 namespace x86_64 {
 
+static constexpr Register kCoreArgumentRegisters[] = {
+    RDI, RSI, RDX, RCX, R8, R9
+};
+static_assert(kMaxIntLikeRegisterArguments == arraysize(kCoreArgumentRegisters));
+
 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
     // Core registers.
     X86_64ManagedRegister::FromCpuRegister(RBX),
@@ -87,14 +92,6 @@
 
 // Calling convention
 
-ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() const {
-  return X86_64ManagedRegister::FromCpuRegister(RAX);
-}
-
-ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() const {
-  return X86_64ManagedRegister::FromCpuRegister(RAX);
-}
-
 ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
   return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
 }
@@ -130,58 +127,37 @@
 }
 
 bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
-  return !IsCurrentParamOnStack();
+  if (IsCurrentParamAFloatOrDouble()) {
+    return itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments;
+  } else {
+    size_t non_fp_arg_number = itr_args_ - itr_float_and_doubles_;
+    return /* method */ 1u + non_fp_arg_number < kMaxIntLikeRegisterArguments;
+  }
 }
 
 bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
-  // We assume all parameters are on stack, args coming via registers are spilled as entry_spills
-  return true;
+  return !IsCurrentParamInRegister();
 }
 
 ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() {
-  ManagedRegister res = ManagedRegister::NoRegister();
-  if (!IsCurrentParamAFloatOrDouble()) {
-    switch (itr_args_ - itr_float_and_doubles_) {
-    case 0: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
-    case 1: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
-    case 2: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
-    case 3: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
-    case 4: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
-    }
-  } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) {
+  DCHECK(IsCurrentParamInRegister());
+  if (IsCurrentParamAFloatOrDouble()) {
     // First eight float parameters are passed via XMM0..XMM7
-    res = X86_64ManagedRegister::FromXmmRegister(
-                                 static_cast<FloatRegister>(XMM0 + itr_float_and_doubles_));
+    FloatRegister fp_reg = static_cast<FloatRegister>(XMM0 + itr_float_and_doubles_);
+    return X86_64ManagedRegister::FromXmmRegister(fp_reg);
+  } else {
+    size_t non_fp_arg_number = itr_args_ - itr_float_and_doubles_;
+    Register core_reg = kCoreArgumentRegisters[/* method */ 1u + non_fp_arg_number];
+    return X86_64ManagedRegister::FromCpuRegister(core_reg);
   }
-  return res;
 }
 
 FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
-  CHECK(IsCurrentParamOnStack());
   return FrameOffset(displacement_.Int32Value() +  // displacement
                      static_cast<size_t>(kX86_64PointerSize) +  // Method ref
                      itr_slots_ * sizeof(uint32_t));  // offset into in args
 }
 
-const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() {
-  // We spill the argument registers on X86 to free them up for scratch use, we then assume
-  // all arguments are on the stack.
-  if (entry_spills_.size() == 0) {
-    ResetIterator(FrameOffset(0));
-    while (HasNext()) {
-      ManagedRegister in_reg = CurrentParamRegister();
-      if (!in_reg.IsNoRegister()) {
-        int32_t size = IsParamALongOrDouble(itr_args_) ? 8 : 4;
-        int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
-        ManagedRegisterSpill spill(in_reg, size, spill_offset);
-        entry_spills_.push_back(spill);
-      }
-      Next();
-    }
-  }
-  return entry_spills_;
-}
-
 // JNI calling convention
 
 X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static,
@@ -334,7 +310,6 @@
                       [](ManagedRegister callee_save) constexpr {
                         return callee_save.Equals(X86_64ManagedRegister::FromCpuRegister(R11));
                       }));
-  DCHECK(!InterproceduralScratchRegister().Equals(X86_64ManagedRegister::FromCpuRegister(R11)));
   return X86_64ManagedRegister::FromCpuRegister(R11);
 }
 
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
index 37b5978..d043a3e 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -33,16 +33,14 @@
   ~X86_64ManagedRuntimeCallingConvention() override {}
   // Calling convention
   ManagedRegister ReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() const override;
   // Managed runtime calling convention
   ManagedRegister MethodRegister() override;
   bool IsCurrentParamInRegister() override;
   bool IsCurrentParamOnStack() override;
   ManagedRegister CurrentParamRegister() override;
   FrameOffset CurrentParamStackOffset() override;
-  const ManagedRegisterEntrySpills& EntrySpills() override;
+
  private:
-  ManagedRegisterEntrySpills entry_spills_;
   DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention);
 };
 
@@ -56,7 +54,6 @@
   // Calling convention
   ManagedRegister ReturnRegister() override;
   ManagedRegister IntReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() const override;
   // JNI calling convention
   size_t FrameSize() const override;
   size_t OutArgSize() const override;