Inline IRT frame push/pop into JNI stubs.

Golem results for art-opt-cc (higher is better):
linux-ia32                       before after
NativeDowncallStaticNormal       25.704 26.839 (+4.414%)
NativeDowncallStaticNormal6      23.857 25.086 (+5.152%)
NativeDowncallStaticNormalRefs6  23.704 25.248 (+6.513%)
NativeDowncallVirtualNormal      25.578 27.000 (+5.560%)
NativeDowncallVirtualNormal6     23.704 24.925 (+5.153%)
NativeDowncallVirtualNormalRefs6 23.704 25.074 (+5.870%)
NativeDowncallStaticFast         100.65 149.13 (+48.17%)
NativeDowncallStaticFast6        78.304 107.39 (+37.71%)
NativeDowncallStaticFastRefs6    76.962 104.45 (+35.71%)
NativeDowncallVirtualFast        100.40 147.28 (+46.69%)
NativeDowncallVirtualFast6       79.302 106.34 (+34.10%)
NativeDowncallVirtualFastRef26   76.617 103.29 (+34.82%)
linux-x64                        before after
NativeDowncallStaticNormal       26.083 26.987 (+3.465%)
NativeDowncallStaticNormal6      24.606 25.411 (+3.271%)
NativeDowncallStaticNormalRefs6  24.150 25.086 (+3.877%)
NativeDowncallVirtualNormal      25.743 26.812 (+4.156%)
NativeDowncallVirtualNormal6     24.294 25.248 (+3.927%)
NativeDowncallVirtualNormalRefs6 23.857 25.086 (+5.152%)
NativeDowncallStaticFast         109.95 133.10 (+21.06%)
NativeDowncallStaticFast6        90.274 109.12 (+20.87%)
NativeDowncallStaticFastRefs6    87.282 105.29 (+20.63%)
NativeDowncallVirtualFast        104.00 127.55 (+22.65%)
NativeDowncallVirtualFast6       88.191 106.73 (+21.02%)
NativeDowncallVirtualFastRef26   85.530 102.09 (+19.36%)
linux-armv7                      before after
NativeDowncallStaticNormal       6.1148 6.3694 (+4.316%)
NativeDowncallStaticNormal6      5.6845 5.9026 (+3.837%)
NativeDowncallStaticNormalRefs6  5.4054 5.6022 (+3.641%)
NativeDowncallVirtualNormal      5.4726 5.7088 (+4.316%)
NativeDowncallVirtualNormal6     5.1789 5.3685 (+3.660%)
NativeDowncallVirtualNormalRefs6 4.9140 5.0902 (+3.586%)
NativeDowncallStaticFast         16.683 18.058 (+8.239%)
NativeDowncallStaticFast6        13.951 14.896 (+6.770%)
NativeDowncallStaticFastRefs6    12.279 13.006 (+5.919%)
NativeDowncallVirtualFast        16.161 17.848 (+10.44%)
NativeDowncallVirtualFast6       14.085 15.196 (+7.892%)
NativeDowncallVirtualFastRef26   12.089 12.897 (+6.683%)
linux-armv8                      before after
NativeDowncallStaticNormal       6.0663 6.4229 (+5.879%)
NativeDowncallStaticNormal6      5.7252 6.0437 (+5.563%)
NativeDowncallStaticNormalRefs6  5.3114 5.5814 (+5.082%)
NativeDowncallVirtualNormal      5.8795 6.2651 (+6.558%)
NativeDowncallVirtualNormal6     5.6232 5.9494 (+5.801%)
NativeDowncallVirtualNormalRefs6 5.1862 5.4429 (+4.948%)
NativeDowncallStaticFast         17.638 19.183 (+8.760%)
NativeDowncallStaticFast6        14.903 16.161 (+8.438%)
NativeDowncallStaticFastRefs6    12.475 13.235 (+6.094%)
NativeDowncallVirtualFast        15.826 17.848 (+12.78%)
NativeDowncallVirtualFast6       14.064 15.504 (+10.24%)
NativeDowncallVirtualFastRef26   11.628 12.475 (+7.285%)

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: run-gtests.sh
Test: testrunner.py --target --optimizing
Bug: 172332525
Change-Id: I5ecfa7a661f08ab63dd2a75d666e1c1b9121935f
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index 2710eb1..3c88447 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -127,33 +127,48 @@
   }
 }
 
+ManagedRegister X86JNIMacroAssembler::CoreRegisterWithSize(ManagedRegister src, size_t size) {
+  DCHECK(src.AsX86().IsCpuRegister());
+  DCHECK_EQ(size, 4u);
+  return src;
+}
+
 void X86JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
   DecreaseFrameSizeImpl(&asm_, adjust);
 }
 
 void X86JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+  Store(X86ManagedRegister::FromCpuRegister(ESP), MemberOffset(offs.Int32Value()), msrc, size);
+}
+
+void X86JNIMacroAssembler::Store(ManagedRegister mbase,
+                                 MemberOffset offs,
+                                 ManagedRegister msrc,
+                                 size_t size) {
+  X86ManagedRegister base = mbase.AsX86();
   X86ManagedRegister src = msrc.AsX86();
   if (src.IsNoRegister()) {
     CHECK_EQ(0u, size);
   } else if (src.IsCpuRegister()) {
     CHECK_EQ(4u, size);
-    __ movl(Address(ESP, offs), src.AsCpuRegister());
+    __ movl(Address(base.AsCpuRegister(), offs), src.AsCpuRegister());
   } else if (src.IsRegisterPair()) {
     CHECK_EQ(8u, size);
-    __ movl(Address(ESP, offs), src.AsRegisterPairLow());
-    __ movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), src.AsRegisterPairHigh());
+    __ movl(Address(base.AsCpuRegister(), offs), src.AsRegisterPairLow());
+    __ movl(Address(base.AsCpuRegister(), FrameOffset(offs.Int32Value()+4)),
+            src.AsRegisterPairHigh());
   } else if (src.IsX87Register()) {
     if (size == 4) {
-      __ fstps(Address(ESP, offs));
+      __ fstps(Address(base.AsCpuRegister(), offs));
     } else {
-      __ fstpl(Address(ESP, offs));
+      __ fstpl(Address(base.AsCpuRegister(), offs));
     }
   } else {
     CHECK(src.IsXmmRegister());
     if (size == 4) {
-      __ movss(Address(ESP, offs), src.AsXmmRegister());
+      __ movss(Address(base.AsCpuRegister(), offs), src.AsXmmRegister());
     } else {
-      __ movsd(Address(ESP, offs), src.AsXmmRegister());
+      __ movsd(Address(base.AsCpuRegister(), offs), src.AsXmmRegister());
     }
   }
 }
@@ -191,28 +206,37 @@
 }
 
 void X86JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+  Load(mdest, X86ManagedRegister::FromCpuRegister(ESP), MemberOffset(src.Int32Value()), size);
+}
+
+void X86JNIMacroAssembler::Load(ManagedRegister mdest,
+                                ManagedRegister mbase,
+                                MemberOffset offs,
+                                size_t size) {
   X86ManagedRegister dest = mdest.AsX86();
+  X86ManagedRegister base = mbase.AsX86();
   if (dest.IsNoRegister()) {
     CHECK_EQ(0u, size);
   } else if (dest.IsCpuRegister()) {
     CHECK_EQ(4u, size);
-    __ movl(dest.AsCpuRegister(), Address(ESP, src));
+    __ movl(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
   } else if (dest.IsRegisterPair()) {
     CHECK_EQ(8u, size);
-    __ movl(dest.AsRegisterPairLow(), Address(ESP, src));
-    __ movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
+    __ movl(dest.AsRegisterPairLow(), Address(base.AsCpuRegister(), offs));
+    __ movl(dest.AsRegisterPairHigh(),
+            Address(base.AsCpuRegister(), FrameOffset(offs.Int32Value()+4)));
   } else if (dest.IsX87Register()) {
     if (size == 4) {
-      __ flds(Address(ESP, src));
+      __ flds(Address(base.AsCpuRegister(), offs));
     } else {
-      __ fldl(Address(ESP, src));
+      __ fldl(Address(base.AsCpuRegister(), offs));
     }
   } else {
     CHECK(dest.IsXmmRegister());
     if (size == 4) {
-      __ movss(dest.AsXmmRegister(), Address(ESP, src));
+      __ movss(dest.AsXmmRegister(), Address(base.AsCpuRegister(), offs));
     } else {
-      __ movsd(dest.AsXmmRegister(), Address(ESP, src));
+      __ movsd(dest.AsXmmRegister(), Address(base.AsCpuRegister(), offs));
     }
   }
 }