Do not create HandleScope for JNI transitions.

We previously crated a HandleScope in the JNI transition
frame to hold references passed as jobject (jclass, etc.)
to the native function and these references were actually
spilled twice during the transition.

We now construct the jobject as a pointer to the reference
spilled in the reserved out vreg area in the caller's frame.
And the jclass for static methods is just a pointer to the
method's declaring class. This reduces the amount of work
required in the JNI transition, both on entry (in compiled
stubs) and exit (in JniMethodEnd*).

Some additional work is required when GC visits references
of a native method as we need to walk over the method's
shorty which was unnecessary for a HandleScope.

Also fix Thread::InitStackHwm() to calculate correct stack
size needed by the new Thread::IsJniTransitionReference().

The results for StringToBytesBenchmark on blueline little
cores running at fixed frequency 1420800 are approximately
arm64 (medians from 3 runs) before after
timeGetBytesAscii EMPTY     447.33 436.86
timeGetBytesIso88591 EMPTY  440.52 431.13
timeGetBytesUtf8 EMPTY      432.31 409.82
arm (medians from 3 runs)   before after
timeGetBytesAscii EMPTY     500.53 490.87
timeGetBytesIso88591 EMPTY  496.45 495.30
timeGetBytesUtf8 EMPTY      488.84 472.68

Test: m test-art-host-gtest
Test: testrunner.py --host
Test: testrunner.py --host --gcstress
Test: testrunner.py --host --jit-on-first-use
Test: testrunner.py --host --jit-on-first-use --gcstress
Test: run-gtests.sh
Test: testrunner.py --target --optimizing
Test: boots.
Bug: 172332525
Change-Id: I658f9d87071587b3e89f31c65feca976a11e9cc2
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index 02b1c7a..20dc399 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -1,8 +1,8 @@
 // TODO These arrays should be generated automatically or have instructions for re-creation.
 static constexpr uint8_t expected_asm_kThumb2[] = {
-    0x2D, 0xE9, 0xE0, 0x4D, 0x2D, 0xED, 0x10, 0x8A, 0x89, 0xB0, 0x00, 0x90,
-    0x21, 0x91, 0x8D, 0xED, 0x22, 0x0A, 0x23, 0x92, 0x24, 0x93, 0x88, 0xB0,
-    0x08, 0xB0, 0x09, 0xB0, 0xBD, 0xEC, 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x4D,
+    0x2D, 0xE9, 0xE0, 0x4D, 0x2D, 0xED, 0x10, 0x8A, 0x85, 0xB0, 0x00, 0x90,
+    0x1D, 0x91, 0x8D, 0xED, 0x1E, 0x0A, 0x1F, 0x92, 0x20, 0x93, 0x88, 0xB0,
+    0x08, 0xB0, 0x05, 0xB0, 0xBD, 0xEC, 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x4D,
     0xD9, 0xF8, 0x30, 0x80, 0x70, 0x47,
 };
 static constexpr uint8_t expected_cfi_kThumb2[] = {
@@ -11,13 +11,12 @@
     0x51, 0x16, 0x05, 0x52, 0x15, 0x05, 0x53, 0x14, 0x05, 0x54, 0x13, 0x05,
     0x55, 0x12, 0x05, 0x56, 0x11, 0x05, 0x57, 0x10, 0x05, 0x58, 0x0F, 0x05,
     0x59, 0x0E, 0x05, 0x5A, 0x0D, 0x05, 0x5B, 0x0C, 0x05, 0x5C, 0x0B, 0x05,
-    0x5D, 0x0A, 0x05, 0x5E, 0x09, 0x05, 0x5F, 0x08, 0x42, 0x0E, 0x80, 0x01,
-    0x4E, 0x0E, 0xA0, 0x01, 0x42, 0x0E, 0x80, 0x01, 0x0A, 0x42, 0x0E, 0x5C,
-    0x44, 0x0E, 0x1C, 0x06, 0x50, 0x06, 0x51, 0x06, 0x52, 0x06, 0x53, 0x06,
-    0x54, 0x06, 0x55, 0x06, 0x56, 0x06, 0x57, 0x06, 0x58, 0x06, 0x59, 0x06,
-    0x5A, 0x06, 0x5B, 0x06, 0x5C, 0x06, 0x5D, 0x06, 0x5E, 0x06, 0x5F, 0x44,
-    0x0E, 0x00, 0xC5, 0xC6, 0xC7, 0xC8, 0xCA, 0xCB, 0xCE, 0x46, 0x0B, 0x0E,
-    0x80, 0x01,
+    0x5D, 0x0A, 0x05, 0x5E, 0x09, 0x05, 0x5F, 0x08, 0x42, 0x0E, 0x70, 0x4E,
+    0x0E, 0x90, 0x01, 0x42, 0x0E, 0x70, 0x0A, 0x42, 0x0E, 0x5C, 0x44, 0x0E,
+    0x1C, 0x06, 0x50, 0x06, 0x51, 0x06, 0x52, 0x06, 0x53, 0x06, 0x54, 0x06,
+    0x55, 0x06, 0x56, 0x06, 0x57, 0x06, 0x58, 0x06, 0x59, 0x06, 0x5A, 0x06,
+    0x5B, 0x06, 0x5C, 0x06, 0x5D, 0x06, 0x5E, 0x06, 0x5F, 0x44, 0x0E, 0x00,
+    0xC5, 0xC6, 0xC7, 0xC8, 0xCA, 0xCB, 0xCE, 0x46, 0x0B, 0x0E, 0x70,
 };
 // 0x00000000: push {r5,r6,r7,r8,r10,r11,lr}
 // 0x00000004: .cfi_def_cfa_offset: 28
@@ -46,19 +45,19 @@
 // 0x00000008: .cfi_offset_extended: r93 at cfa-40
 // 0x00000008: .cfi_offset_extended: r94 at cfa-36
 // 0x00000008: .cfi_offset_extended: r95 at cfa-32
-// 0x00000008: sub sp, #36
-// 0x0000000a: .cfi_def_cfa_offset: 128
+// 0x00000008: sub sp, #20
+// 0x0000000a: .cfi_def_cfa_offset: 112
 // 0x0000000a: str r0, [sp]
-// 0x0000000c: str r1, [sp, #132]
-// 0x0000000e: vstr s0, [sp, #136]
-// 0x00000012: str r2, [sp, #140]
-// 0x00000014: str r3, [sp, #144]
+// 0x0000000c: str r1, [sp, #116]
+// 0x0000000e: vstr s0, [sp, #120]
+// 0x00000012: str r2, [sp, #124]
+// 0x00000014: str r3, [sp, #128]
 // 0x00000016: sub sp, #32
-// 0x00000018: .cfi_def_cfa_offset: 160
+// 0x00000018: .cfi_def_cfa_offset: 144
 // 0x00000018: add sp, #32
-// 0x0000001a: .cfi_def_cfa_offset: 128
+// 0x0000000a: .cfi_def_cfa_offset: 112
 // 0x0000001a: .cfi_remember_state
-// 0x0000001a: add sp, #36
+// 0x0000001a: add sp, #20
 // 0x0000001c: .cfi_def_cfa_offset: 92
 // 0x0000001c: vpop {s16-s31}
 // 0x00000020: .cfi_def_cfa_offset: 28
@@ -90,123 +89,123 @@
 // 0x00000024: ldr r8, [tr, #48] ; is_gc_marking
 // 0x00000028: bx lr
 // 0x0000002a: .cfi_restore_state
-// 0x0000002a: .cfi_def_cfa_offset: 128
+// 0x0000002a: .cfi_def_cfa_offset: 112
 
 static constexpr uint8_t expected_asm_kArm64[] = {
-    0xFF, 0x03, 0x03, 0xD1, 0xF3, 0x53, 0x06, 0xA9, 0xF5, 0x5B, 0x07, 0xA9,
-    0xF7, 0x63, 0x08, 0xA9, 0xF9, 0x6B, 0x09, 0xA9, 0xFB, 0x73, 0x0A, 0xA9,
-    0xFD, 0x7B, 0x0B, 0xA9, 0xE8, 0x27, 0x02, 0x6D, 0xEA, 0x2F, 0x03, 0x6D,
-    0xEC, 0x37, 0x04, 0x6D, 0xEE, 0x3F, 0x05, 0x6D, 0xE0, 0x03, 0x00, 0xF9,
-    0xE1, 0xCB, 0x00, 0xB9, 0xE0, 0xCF, 0x00, 0xBD, 0xE2, 0xD3, 0x00, 0xB9,
-    0xE3, 0xD7, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1, 0xFF, 0x83, 0x00, 0x91,
-    0xF3, 0x53, 0x46, 0xA9, 0xF5, 0x5B, 0x47, 0xA9, 0xF7, 0x63, 0x48, 0xA9,
-    0xF9, 0x6B, 0x49, 0xA9, 0xFB, 0x73, 0x4A, 0xA9, 0xFD, 0x7B, 0x4B, 0xA9,
-    0xE8, 0x27, 0x42, 0x6D, 0xEA, 0x2F, 0x43, 0x6D, 0xEC, 0x37, 0x44, 0x6D,
-    0xEE, 0x3F, 0x45, 0x6D, 0x74, 0x32, 0x40, 0xB9, 0xFF, 0x03, 0x03, 0x91,
+    0xFF, 0xC3, 0x02, 0xD1, 0xF3, 0x53, 0x05, 0xA9, 0xF5, 0x5B, 0x06, 0xA9,
+    0xF7, 0x63, 0x07, 0xA9, 0xF9, 0x6B, 0x08, 0xA9, 0xFB, 0x73, 0x09, 0xA9,
+    0xFD, 0x7B, 0x0A, 0xA9, 0xE8, 0x27, 0x01, 0x6D, 0xEA, 0x2F, 0x02, 0x6D,
+    0xEC, 0x37, 0x03, 0x6D, 0xEE, 0x3F, 0x04, 0x6D, 0xE0, 0x03, 0x00, 0xF9,
+    0xE1, 0xBB, 0x00, 0xB9, 0xE0, 0xBF, 0x00, 0xBD, 0xE2, 0xC3, 0x00, 0xB9,
+    0xE3, 0xC7, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1, 0xFF, 0x83, 0x00, 0x91,
+    0xF3, 0x53, 0x45, 0xA9, 0xF5, 0x5B, 0x46, 0xA9, 0xF7, 0x63, 0x47, 0xA9,
+    0xF9, 0x6B, 0x48, 0xA9, 0xFB, 0x73, 0x49, 0xA9, 0xFD, 0x7B, 0x4A, 0xA9,
+    0xE8, 0x27, 0x41, 0x6D, 0xEA, 0x2F, 0x42, 0x6D, 0xEC, 0x37, 0x43, 0x6D,
+    0xEE, 0x3F, 0x44, 0x6D, 0x74, 0x32, 0x40, 0xB9, 0xFF, 0xC3, 0x02, 0x91,
     0xC0, 0x03, 0x5F, 0xD6,
 };
 static constexpr uint8_t expected_cfi_kArm64[] = {
-    0x44, 0x0E, 0xC0, 0x01, 0x44, 0x93, 0x18, 0x94, 0x16, 0x44, 0x95, 0x14,
+    0x44, 0x0E, 0xB0, 0x01, 0x44, 0x93, 0x18, 0x94, 0x16, 0x44, 0x95, 0x14,
     0x96, 0x12, 0x44, 0x97, 0x10, 0x98, 0x0E, 0x44, 0x99, 0x0C, 0x9A, 0x0A,
     0x44, 0x9B, 0x08, 0x9C, 0x06, 0x44, 0x9D, 0x04, 0x9E, 0x02, 0x44, 0x05,
     0x48, 0x28, 0x05, 0x49, 0x26, 0x44, 0x05, 0x4A, 0x24, 0x05, 0x4B, 0x22,
     0x44, 0x05, 0x4C, 0x20, 0x05, 0x4D, 0x1E, 0x44, 0x05, 0x4E, 0x1C, 0x05,
-    0x4F, 0x1A, 0x58, 0x0E, 0xE0, 0x01, 0x44, 0x0E, 0xC0, 0x01, 0x0A, 0x44,
+    0x4F, 0x1A, 0x58, 0x0E, 0xD0, 0x01, 0x44, 0x0E, 0xB0, 0x01, 0x0A, 0x44,
     0xD3, 0xD4, 0x44, 0xD5, 0xD6, 0x44, 0xD7, 0xD8, 0x44, 0xD9, 0xDA, 0x44,
     0xDB, 0xDC, 0x44, 0xDD, 0xDE, 0x44, 0x06, 0x48, 0x06, 0x49, 0x44, 0x06,
     0x4A, 0x06, 0x4B, 0x44, 0x06, 0x4C, 0x06, 0x4D, 0x44, 0x06, 0x4E, 0x06,
-    0x4F, 0x48, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xC0, 0x01,
+    0x4F, 0x48, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xB0, 0x01,
 };
-// 0x00000000: sub sp, sp, #0xc0 (192)
-// 0x00000004: .cfi_def_cfa_offset: 192
-// 0x00000004: stp tr, x20, [sp, #96]
+// 0x00000000: sub sp, sp, #0xb0 (176)
+// 0x00000004: .cfi_def_cfa_offset: 176
+// 0x00000004: stp tr, x20, [sp, #80]
 // 0x00000008: .cfi_offset: r19 at cfa-96
 // 0x00000008: .cfi_offset: r20 at cfa-88
-// 0x00000008: stp x21, x22, [sp, #112]
+// 0x00000008: stp x21, x22, [sp, #96]
 // 0x0000000c: .cfi_offset: r21 at cfa-80
 // 0x0000000c: .cfi_offset: r22 at cfa-72
-// 0x0000000c: stp x23, x24, [sp, #128]
+// 0x0000000c: stp x23, x24, [sp, #112]
 // 0x00000010: .cfi_offset: r23 at cfa-64
 // 0x00000010: .cfi_offset: r24 at cfa-56
-// 0x00000010: stp x25, x26, [sp, #144]
+// 0x00000010: stp x25, x26, [sp, #128]
 // 0x00000014: .cfi_offset: r25 at cfa-48
 // 0x00000014: .cfi_offset: r26 at cfa-40
-// 0x00000014: stp x27, x28, [sp, #160]
+// 0x00000014: stp x27, x28, [sp, #144]
 // 0x00000018: .cfi_offset: r27 at cfa-32
 // 0x00000018: .cfi_offset: r28 at cfa-24
-// 0x00000018: stp x29, lr, [sp, #176]
+// 0x00000018: stp x29, lr, [sp, #160]
 // 0x0000001c: .cfi_offset: r29 at cfa-16
 // 0x0000001c: .cfi_offset: r30 at cfa-8
-// 0x0000001c: stp d8, d9, [sp, #32]
+// 0x0000001c: stp d8, d9, [sp, #16]
 // 0x00000020: .cfi_offset_extended: r72 at cfa-160
 // 0x00000020: .cfi_offset_extended: r73 at cfa-152
-// 0x00000020: stp d10, d11, [sp, #48]
+// 0x00000020: stp d10, d11, [sp, #32]
 // 0x00000024: .cfi_offset_extended: r74 at cfa-144
 // 0x00000024: .cfi_offset_extended: r75 at cfa-136
-// 0x00000024: stp d12, d13, [sp, #64]
+// 0x00000024: stp d12, d13, [sp, #48]
 // 0x00000028: .cfi_offset_extended: r76 at cfa-128
 // 0x00000028: .cfi_offset_extended: r77 at cfa-120
-// 0x00000028: stp d14, d15, [sp, #80]
+// 0x00000028: stp d14, d15, [sp, #64]
 // 0x0000002c: .cfi_offset_extended: r78 at cfa-112
 // 0x0000002c: .cfi_offset_extended: r79 at cfa-104
 // 0x0000002c: str x0, [sp]
-// 0x00000030: str w1, [sp, #200]
-// 0x00000034: str s0, [sp, #204]
-// 0x00000038: str w2, [sp, #208]
-// 0x0000003c: str w3, [sp, #212]
+// 0x00000030: str w1, [sp, #184]
+// 0x00000034: str s0, [sp, #188]
+// 0x00000038: str w2, [sp, #192]
+// 0x0000003c: str w3, [sp, #196]
 // 0x00000040: sub sp, sp, #0x20 (32)
-// 0x00000044: .cfi_def_cfa_offset: 224
+// 0x00000044: .cfi_def_cfa_offset: 208
 // 0x00000044: add sp, sp, #0x20 (32)
-// 0x00000048: .cfi_def_cfa_offset: 192
+// 0x00000048: .cfi_def_cfa_offset: 176
 // 0x00000048: .cfi_remember_state
-// 0x00000048: ldp tr, x20, [sp, #96]
+// 0x00000048: ldp tr, x20, [sp, #80]
 // 0x0000004c: .cfi_restore: r19
 // 0x0000004c: .cfi_restore: r20
-// 0x0000004c: ldp x21, x22, [sp, #112]
+// 0x0000004c: ldp x21, x22, [sp, #96]
 // 0x00000050: .cfi_restore: r21
 // 0x00000050: .cfi_restore: r22
-// 0x00000050: ldp x23, x24, [sp, #128]
+// 0x00000050: ldp x23, x24, [sp, #112]
 // 0x00000054: .cfi_restore: r23
 // 0x00000054: .cfi_restore: r24
-// 0x00000054: ldp x25, x26, [sp, #144]
+// 0x00000054: ldp x25, x26, [sp, #128]
 // 0x00000058: .cfi_restore: r25
 // 0x00000058: .cfi_restore: r26
-// 0x00000058: ldp x27, x28, [sp, #160]
+// 0x00000058: ldp x27, x28, [sp, #144]
 // 0x0000005c: .cfi_restore: r27
 // 0x0000005c: .cfi_restore: r28
-// 0x0000005c: ldp x29, lr, [sp, #176]
+// 0x0000005c: ldp x29, lr, [sp, #160]
 // 0x00000060: .cfi_restore: r29
 // 0x00000060: .cfi_restore: r30
-// 0x00000060: ldp d8, d9, [sp, #32]
+// 0x00000060: ldp d8, d9, [sp, #16]
 // 0x00000064: .cfi_restore_extended: r72
 // 0x00000064: .cfi_restore_extended: r73
-// 0x00000064: ldp d10, d11, [sp, #48]
+// 0x00000064: ldp d10, d11, [sp, #32]
 // 0x00000068: .cfi_restore_extended: r74
 // 0x00000068: .cfi_restore_extended: r75
-// 0x00000068: ldp d12, d13, [sp, #64]
+// 0x00000068: ldp d12, d13, [sp, #48]
 // 0x0000006c: .cfi_restore_extended: r76
 // 0x0000006c: .cfi_restore_extended: r77
-// 0x0000006c: ldp d14, d15, [sp, #80]
+// 0x0000006c: ldp d14, d15, [sp, #64]
 // 0x00000070: .cfi_restore_extended: r78
 // 0x00000070: .cfi_restore_extended: r79
-// 0x00000070: ldr w20, [tr, #50] ; is_gc_marking
-// 0x00000074: add sp, sp, #0xc0 (192)
+// 0x00000070: ldr w20, [tr, #48] ; is_gc_marking
+// 0x00000074: add sp, sp, #0xb0 (176)
 // 0x00000078: .cfi_def_cfa_offset: 0
 // 0x00000078: ret
 // 0x0000007c: .cfi_restore_state
-// 0x0000007c: .cfi_def_cfa_offset: 192
+// 0x0000007c: .cfi_def_cfa_offset: 176
 
 static constexpr uint8_t expected_asm_kX86[] = {
-    0x57, 0x56, 0x55, 0x83, 0xC4, 0xE4, 0x50, 0x89, 0x4C, 0x24, 0x34, 0xF3,
-    0x0F, 0x11, 0x44, 0x24, 0x38, 0x89, 0x54, 0x24, 0x3C, 0x89, 0x5C, 0x24,
-    0x40, 0x83, 0xC4, 0xE0, 0x83, 0xC4, 0x20, 0x83, 0xC4, 0x20, 0x5D, 0x5E,
+    0x57, 0x56, 0x55, 0x83, 0xC4, 0xF4, 0x50, 0x89, 0x4C, 0x24, 0x24, 0xF3,
+    0x0F, 0x11, 0x44, 0x24, 0x28, 0x89, 0x54, 0x24, 0x2C, 0x89, 0x5C, 0x24,
+    0x30, 0x83, 0xC4, 0xE0, 0x83, 0xC4, 0x20, 0x83, 0xC4, 0x10, 0x5D, 0x5E,
     0x5F, 0xC3,
 };
 static constexpr uint8_t expected_cfi_kX86[] = {
     0x41, 0x0E, 0x08, 0x87, 0x02, 0x41, 0x0E, 0x0C, 0x86, 0x03, 0x41, 0x0E,
-    0x10, 0x85, 0x04, 0x43, 0x0E, 0x2C, 0x41, 0x0E, 0x30, 0x55, 0x0E, 0x50,
-    0x43, 0x0E, 0x30, 0x0A, 0x43, 0x0E, 0x10, 0x41, 0x0E, 0x0C, 0xC5, 0x41,
-    0x0E, 0x08, 0xC6, 0x41, 0x0E, 0x04, 0xC7, 0x41, 0x0B, 0x0E, 0x30,
+    0x10, 0x85, 0x04, 0x43, 0x0E, 0x1C, 0x41, 0x0E, 0x20, 0x55, 0x0E, 0x40,
+    0x43, 0x0E, 0x20, 0x0A, 0x43, 0x0E, 0x10, 0x41, 0x0E, 0x0C, 0xC5, 0x41,
+    0x0E, 0x08, 0xC6, 0x41, 0x0E, 0x04, 0xC7, 0x41, 0x0B, 0x0E, 0x20,
 };
 // 0x00000000: push edi
 // 0x00000001: .cfi_def_cfa_offset: 8
@@ -217,20 +216,20 @@
 // 0x00000002: push ebp
 // 0x00000003: .cfi_def_cfa_offset: 16
 // 0x00000003: .cfi_offset: r5 at cfa-16
-// 0x00000003: add esp, -28
-// 0x00000006: .cfi_def_cfa_offset: 44
+// 0x00000003: add esp, -12
+// 0x00000006: .cfi_def_cfa_offset: 28
 // 0x00000006: push eax
-// 0x00000007: .cfi_def_cfa_offset: 48
-// 0x00000007: mov [esp + 52], ecx
-// 0x0000000b: movss [esp + 56], xmm0
-// 0x00000011: mov [esp + 60], edx
-// 0x00000015: mov [esp + 64], ebx
+// 0x00000007: .cfi_def_cfa_offset: 32
+// 0x00000007: mov [esp + 36], ecx
+// 0x0000000b: movss [esp + 40], xmm0
+// 0x00000011: mov [esp + 44], edx
+// 0x00000015: mov [esp + 48], ebx
 // 0x00000019: add esp, -32
-// 0x0000001c: .cfi_def_cfa_offset: 80
+// 0x0000001c: .cfi_def_cfa_offset: 64
 // 0x0000001c: add esp, 32
-// 0x0000001f: .cfi_def_cfa_offset: 48
+// 0x0000001f: .cfi_def_cfa_offset: 32
 // 0x0000001f: .cfi_remember_state
-// 0x0000001f: add esp, 32
+// 0x0000001f: add esp, 16
 // 0x00000022: .cfi_def_cfa_offset: 16
 // 0x00000022: pop ebp
 // 0x00000023: .cfi_def_cfa_offset: 12
@@ -243,30 +242,30 @@
 // 0x00000025: .cfi_restore: r7
 // 0x00000025: ret
 // 0x00000026: .cfi_restore_state
-// 0x00000026: .cfi_def_cfa_offset: 48
+// 0x00000026: .cfi_def_cfa_offset: 32
 
 static constexpr uint8_t expected_asm_kX86_64[] = {
     0x41, 0x57, 0x41, 0x56, 0x41, 0x55, 0x41, 0x54, 0x55, 0x53, 0x48, 0x83,
-    0xEC, 0x48, 0xF2, 0x44, 0x0F, 0x11, 0x7C, 0x24, 0x40, 0xF2, 0x44, 0x0F,
-    0x11, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, 0x30, 0xF2,
-    0x44, 0x0F, 0x11, 0x64, 0x24, 0x28, 0x48, 0x89, 0x3C, 0x24, 0x89, 0xB4,
-    0x24, 0x88, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x11, 0x84, 0x24, 0x8C, 0x00,
-    0x00, 0x00, 0x89, 0x94, 0x24, 0x90, 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24,
-    0x94, 0x00, 0x00, 0x00, 0x48, 0x83, 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20,
-    0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x6C,
-    0x24, 0x30, 0xF2, 0x44, 0x0F, 0x10, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F,
-    0x10, 0x7C, 0x24, 0x40, 0x48, 0x83, 0xC4, 0x48, 0x5B, 0x5D, 0x41, 0x5C,
-    0x41, 0x5D, 0x41, 0x5E, 0x41, 0x5F, 0xC3,
+    0xEC, 0x38, 0xF2, 0x44, 0x0F, 0x11, 0x7C, 0x24, 0x30, 0xF2, 0x44, 0x0F,
+    0x11, 0x74, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, 0x20, 0xF2,
+    0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x48, 0x89, 0x3C, 0x24, 0x89, 0x74,
+    0x24, 0x78, 0xF3, 0x0F, 0x11, 0x44, 0x24, 0x7C, 0x89, 0x94, 0x24, 0x80,
+    0x00, 0x00, 0x00, 0x89, 0x8C, 0x24, 0x84, 0x00, 0x00, 0x00, 0x48, 0x83,
+    0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20, 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24,
+    0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, 0x20, 0xF2, 0x44, 0x0F, 0x10,
+    0x74, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x7C, 0x24, 0x30, 0x48, 0x83,
+    0xC4, 0x38, 0x5B, 0x5D, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, 0x5F,
+    0xC3,
 };
 static constexpr uint8_t expected_cfi_kX86_64[] = {
     0x42, 0x0E, 0x10, 0x8F, 0x04, 0x42, 0x0E, 0x18, 0x8E, 0x06, 0x42, 0x0E,
     0x20, 0x8D, 0x08, 0x42, 0x0E, 0x28, 0x8C, 0x0A, 0x41, 0x0E, 0x30, 0x86,
-    0x0C, 0x41, 0x0E, 0x38, 0x83, 0x0E, 0x44, 0x0E, 0x80, 0x01, 0x47, 0xA0,
-    0x10, 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x66, 0x0E,
-    0xA0, 0x01, 0x44, 0x0E, 0x80, 0x01, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x47,
-    0xDF, 0x47, 0xE0, 0x44, 0x0E, 0x38, 0x41, 0x0E, 0x30, 0xC3, 0x41, 0x0E,
-    0x28, 0xC6, 0x42, 0x0E, 0x20, 0xCC, 0x42, 0x0E, 0x18, 0xCD, 0x42, 0x0E,
-    0x10, 0xCE, 0x42, 0x0E, 0x08, 0xCF, 0x41, 0x0B, 0x0E, 0x80, 0x01,
+    0x0C, 0x41, 0x0E, 0x38, 0x83, 0x0E, 0x44, 0x0E, 0x70, 0x47, 0xA0, 0x10,
+    0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x60, 0x0E, 0x90,
+    0x01, 0x44, 0x0E, 0x70, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x47, 0xDF, 0x47,
+    0xE0, 0x44, 0x0E, 0x38, 0x41, 0x0E, 0x30, 0xC3, 0x41, 0x0E, 0x28, 0xC6,
+    0x42, 0x0E, 0x20, 0xCC, 0x42, 0x0E, 0x18, 0xCD, 0x42, 0x0E, 0x10, 0xCE,
+    0x42, 0x0E, 0x08, 0xCF, 0x41, 0x0B, 0x0E, 0x70,
 };
 // 0x00000000: push r15
 // 0x00000002: .cfi_def_cfa_offset: 16
@@ -286,55 +285,55 @@
 // 0x00000009: push rbx
 // 0x0000000a: .cfi_def_cfa_offset: 56
 // 0x0000000a: .cfi_offset: r3 at cfa-56
-// 0x0000000a: subq rsp, 72
-// 0x0000000e: .cfi_def_cfa_offset: 128
-// 0x0000000e: movsd [rsp + 64], xmm15
+// 0x0000000a: subq rsp, 56
+// 0x0000000e: .cfi_def_cfa_offset: 112
+// 0x0000000e: movsd [rsp + 48], xmm15
 // 0x00000015: .cfi_offset: r32 at cfa-64
-// 0x00000015: movsd [rsp + 56], xmm14
+// 0x00000015: movsd [rsp + 40], xmm14
 // 0x0000001c: .cfi_offset: r31 at cfa-72
-// 0x0000001c: movsd [rsp + 48], xmm13
+// 0x0000001c: movsd [rsp + 32], xmm13
 // 0x00000023: .cfi_offset: r30 at cfa-80
-// 0x00000023: movsd [rsp + 40], xmm12
+// 0x00000023: movsd [rsp + 24], xmm12
 // 0x0000002a: .cfi_offset: r29 at cfa-88
 // 0x0000002a: movq [rsp], rdi
-// 0x0000002e: mov [rsp + 136], esi
-// 0x00000035: movss [rsp + 140], xmm0
-// 0x0000003e: mov [rsp + 144], edx
-// 0x00000045: mov [rsp + 148], ecx
-// 0x0000004c: addq rsp, -32
-// 0x00000050: .cfi_def_cfa_offset: 160
-// 0x00000050: addq rsp, 32
-// 0x00000054: .cfi_def_cfa_offset: 128
-// 0x00000054: .cfi_remember_state
-// 0x00000054: movsd xmm12, [rsp + 40]
-// 0x0000005b: .cfi_restore: r29
-// 0x0000005b: movsd xmm13, [rsp + 48]
-// 0x00000062: .cfi_restore: r30
-// 0x00000062: movsd xmm14, [rsp + 56]
-// 0x00000069: .cfi_restore: r31
-// 0x00000069: movsd xmm15, [rsp + 64]
-// 0x00000070: .cfi_restore: r32
-// 0x00000070: addq rsp, 72
-// 0x00000074: .cfi_def_cfa_offset: 56
-// 0x00000074: pop rbx
-// 0x00000075: .cfi_def_cfa_offset: 48
-// 0x00000075: .cfi_restore: r3
-// 0x00000075: pop rbp
-// 0x00000076: .cfi_def_cfa_offset: 40
-// 0x00000076: .cfi_restore: r6
-// 0x00000076: pop r12
-// 0x00000078: .cfi_def_cfa_offset: 32
-// 0x00000078: .cfi_restore: r12
-// 0x00000078: pop r13
-// 0x0000007a: .cfi_def_cfa_offset: 24
-// 0x0000007a: .cfi_restore: r13
-// 0x0000007a: pop r14
-// 0x0000007c: .cfi_def_cfa_offset: 16
-// 0x0000007c: .cfi_restore: r14
-// 0x0000007c: pop r15
-// 0x0000007e: .cfi_def_cfa_offset: 8
-// 0x0000007e: .cfi_restore: r15
-// 0x0000007e: ret
-// 0x0000007f: .cfi_restore_state
-// 0x0000007f: .cfi_def_cfa_offset: 128
+// 0x0000002e: mov [rsp + 120], esi
+// 0x00000032: movss [rsp + 124], xmm0
+// 0x00000038: mov [rsp + 128], edx
+// 0x0000003f: mov [rsp + 132], ecx
+// 0x00000046: addq rsp, -32
+// 0x0000004a: .cfi_def_cfa_offset: 144
+// 0x0000004a: addq rsp, 32
+// 0x0000004e: .cfi_def_cfa_offset: 112
+// 0x0000004e: .cfi_remember_state
+// 0x0000004e: movsd xmm12, [rsp + 24]
+// 0x00000055: .cfi_restore: r29
+// 0x00000055: movsd xmm13, [rsp + 32]
+// 0x0000005c: .cfi_restore: r30
+// 0x0000005c: movsd xmm14, [rsp + 40]
+// 0x00000063: .cfi_restore: r31
+// 0x00000063: movsd xmm15, [rsp + 48]
+// 0x0000006a: .cfi_restore: r32
+// 0x0000006a: addq rsp, 56
+// 0x0000006e: .cfi_def_cfa_offset: 56
+// 0x0000006e: pop rbx
+// 0x0000006f: .cfi_def_cfa_offset: 48
+// 0x0000006f: .cfi_restore: r3
+// 0x0000006f: pop rbp
+// 0x00000070: .cfi_def_cfa_offset: 40
+// 0x00000070: .cfi_restore: r6
+// 0x00000070: pop r12
+// 0x00000072: .cfi_def_cfa_offset: 32
+// 0x00000072: .cfi_restore: r12
+// 0x00000072: pop r13
+// 0x00000074: .cfi_def_cfa_offset: 24
+// 0x00000074: .cfi_restore: r13
+// 0x00000074: pop r14
+// 0x00000076: .cfi_def_cfa_offset: 16
+// 0x00000076: .cfi_restore: r14
+// 0x00000076: pop r15
+// 0x00000078: .cfi_def_cfa_offset: 8
+// 0x00000078: .cfi_restore: r15
+// 0x00000078: ret
+// 0x00000079: .cfi_restore_state
+// 0x00000079: .cfi_def_cfa_offset: 112
 
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 3ee7e0e..dc5304c 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -21,6 +21,7 @@
 
 #include "art_method-inl.h"
 #include "base/bit_utils.h"
+#include "base/casts.h"
 #include "base/mem_map.h"
 #include "class_linker.h"
 #include "common_compiler_test.h"
@@ -28,6 +29,7 @@
 #include "dex/dex_file.h"
 #include "gtest/gtest.h"
 #include "indirect_reference_table.h"
+#include "java_frame_root_info.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
 #include "mirror/class-inl.h"
@@ -154,19 +156,6 @@
                 jcharArray, jfloatArray, jshortArray, jdoubleArray, jlongArray>::value;
 };
 
-template <typename ... Args>
-struct count_refs_helper {
-  using value_type = size_t;
-  static constexpr const size_t value = 0;
-};
-
-template <typename Arg, typename ... Args>
-struct count_refs_helper<Arg, Args ...> {
-  using value_type = size_t;
-  static constexpr size_t value =
-      (jni_type_traits<Arg>::is_ref ? 1 : 0) + count_refs_helper<Args ...>::value;
-};
-
 // Base case: No parameters = 0 refs.
 size_t count_nonnull_refs_helper() {
   return 0;
@@ -399,12 +388,89 @@
   jmethodID jmethod_;
 
  private:
+  class ScopedSynchronizedEntryPointOverrides {
+   public:
+    ScopedSynchronizedEntryPointOverrides() {
+      QuickEntryPoints* qpoints = &Thread::Current()->tlsPtr_.quick_entrypoints;
+      jni_method_start_synchronized_original_ = qpoints->pJniMethodStartSynchronized;
+      qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronizedOverride;
+      jni_method_end_synchronized_original_ = qpoints->pJniMethodEndSynchronized;
+      qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronizedOverride;
+      jni_method_end_with_reference_synchronized_original_ =
+          qpoints->pJniMethodEndWithReferenceSynchronized;
+      qpoints->pJniMethodEndWithReferenceSynchronized =
+          JniMethodEndWithReferenceSynchronizedOverride;
+    }
+
+    ~ScopedSynchronizedEntryPointOverrides() {
+      QuickEntryPoints* qpoints = &Thread::Current()->tlsPtr_.quick_entrypoints;
+      qpoints->pJniMethodStartSynchronized = jni_method_start_synchronized_original_;
+      qpoints->pJniMethodEndSynchronized = jni_method_end_synchronized_original_;
+      qpoints->pJniMethodEndWithReferenceSynchronized =
+          jni_method_end_with_reference_synchronized_original_;
+    }
+  };
+
+  static uint32_t JniMethodStartSynchronizedOverride(jobject to_lock, Thread* self);
+  static void JniMethodEndSynchronizedOverride(uint32_t saved_local_ref_cookie,
+                                               jobject locked,
+                                               Thread* self);
+  static mirror::Object* JniMethodEndWithReferenceSynchronizedOverride(
+      jobject result,
+      uint32_t saved_local_ref_cookie,
+      jobject locked,
+      Thread* self);
+
+  using StartSynchronizedType = uint32_t (*)(jobject, Thread*);
+  using EndSynchronizedType = void (*)(uint32_t, jobject, Thread*);
+  using EndWithReferenceSynchronizedType = mirror::Object* (*)(jobject, uint32_t, jobject, Thread*);
+
+  static StartSynchronizedType jni_method_start_synchronized_original_;
+  static EndSynchronizedType jni_method_end_synchronized_original_;
+  static EndWithReferenceSynchronizedType jni_method_end_with_reference_synchronized_original_;
+  static uint32_t saved_local_ref_cookie_;
+  static jobject locked_object_;
+
   bool check_generic_jni_;
 };
 
 jclass JniCompilerTest::jklass_;
 jobject JniCompilerTest::jobj_;
 jobject JniCompilerTest::class_loader_;
+JniCompilerTest::StartSynchronizedType JniCompilerTest::jni_method_start_synchronized_original_;
+JniCompilerTest::EndSynchronizedType JniCompilerTest::jni_method_end_synchronized_original_;
+JniCompilerTest::EndWithReferenceSynchronizedType
+    JniCompilerTest::jni_method_end_with_reference_synchronized_original_;
+uint32_t JniCompilerTest::saved_local_ref_cookie_;
+jobject JniCompilerTest::locked_object_;
+
+uint32_t JniCompilerTest::JniMethodStartSynchronizedOverride(jobject to_lock, Thread* self) {
+  locked_object_ = to_lock;
+  uint32_t cookie = jni_method_start_synchronized_original_(to_lock, self);
+  saved_local_ref_cookie_ = cookie;
+  return cookie;
+}
+
+void JniCompilerTest::JniMethodEndSynchronizedOverride(uint32_t saved_local_ref_cookie,
+                                                       jobject locked,
+                                                       Thread* self) {
+  EXPECT_EQ(saved_local_ref_cookie_, saved_local_ref_cookie);
+  EXPECT_EQ(locked_object_, locked);
+  jni_method_end_synchronized_original_(saved_local_ref_cookie, locked, self);
+}
+
+mirror::Object* JniCompilerTest::JniMethodEndWithReferenceSynchronizedOverride(
+    jobject result,
+    uint32_t saved_local_ref_cookie,
+    jobject locked,
+    Thread* self) {
+  EXPECT_EQ(saved_local_ref_cookie_, saved_local_ref_cookie);
+  EXPECT_EQ(locked_object_, locked);
+  return jni_method_end_with_reference_synchronized_original_(result,
+                                                              saved_local_ref_cookie,
+                                                              locked,
+                                                              self);
+}
 
 // Test the normal compiler and normal generic JNI only.
 // The following features are unsupported in @FastNative:
@@ -553,42 +619,56 @@
   BaseHandleScope* const handle_scope_;
 };
 
-// Number of references allocated in JNI ShadowFrames on the given thread.
-static size_t NumJniShadowFrameReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
-  return self->GetManagedStack()->NumJniShadowFrameReferences();
-}
-
-// Number of references in handle scope on the given thread.
-static size_t NumHandleReferences(Thread* self) {
-  size_t count = 0;
-  for (BaseHandleScope* cur = self->GetTopHandleScope(); cur != nullptr; cur = cur->GetLink()) {
-    count += cur->NumberOfReferences();
+class CountReferencesVisitor : public RootVisitor {
+ public:
+  void VisitRoots(mirror::Object*** roots ATTRIBUTE_UNUSED,
+                  size_t count,
+                  const RootInfo& info) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (info.GetType() == art::RootType::kRootJavaFrame) {
+      const JavaFrameRootInfo& jrfi = static_cast<const JavaFrameRootInfo&>(info);
+      if (jrfi.GetVReg() == JavaFrameRootInfo::kNativeReferenceArgument) {
+        DCHECK_EQ(count, 1u);
+        num_references_ += count;
+      }
+    }
   }
-  return count;
-}
+
+  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots ATTRIBUTE_UNUSED,
+                  size_t count ATTRIBUTE_UNUSED,
+                  const RootInfo& info) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    CHECK_NE(info.GetType(), art::RootType::kRootJavaFrame);
+  }
+
+  size_t NumReferences() const {
+    return num_references_;
+  }
+
+ private:
+  size_t num_references_ = 0u;
+};
 
 // Number of references allocated in handle scopes & JNI shadow frames on this thread.
 static size_t NumStackReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
-  return NumHandleReferences(self) + NumJniShadowFrameReferences(self);
+  CountReferencesVisitor visitor;
+  self->VisitRoots(&visitor, kVisitRootFlagAllRoots);
+  return visitor.NumReferences();
 }
 
-static void expectNumStackReferences(size_t val1, size_t val2) {
+static void expectNumStackReferences(size_t expected) {
   // In rare cases when JNI functions call themselves recursively,
   // disable this test because it will have a false negative.
   if (!IsCurrentJniCritical() && ScopedDisableCheckNumStackReferences::sCheckNumStackReferences) {
     /* @CriticalNative doesn't build a HandleScope, so this test is meaningless then. */
     ScopedObjectAccess soa(Thread::Current());
 
-    size_t actual_num = NumStackReferences(Thread::Current());
-    // XX: Not too sure what's going on.
-    // Sometimes null references get placed and sometimes they don't?
-    EXPECT_TRUE(val1 == actual_num || val2 == actual_num)
-      << "expected either " << val1 << " or " << val2
-      << " number of stack references, but got: " << actual_num;
+    size_t num_references = NumStackReferences(Thread::Current());
+    EXPECT_EQ(expected, num_references);
   }
 }
 
-#define EXPECT_NUM_STACK_REFERENCES(val1, val2) expectNumStackReferences(val1, val2)
+#define EXPECT_NUM_STACK_REFERENCES(expected) expectNumStackReferences(expected)
 
 template <typename T, T* fn>
 struct make_jni_test_decorator;
@@ -600,9 +680,9 @@
     EXPECT_THREAD_STATE_FOR_CURRENT_JNI();
     EXPECT_MUTATOR_LOCK_FOR_CURRENT_JNI();
     EXPECT_JNI_ENV_AND_CLASS_FOR_CURRENT_JNI(env, kls);
-    // All incoming parameters + the jclass get put into the transition's StackHandleScope.
-    EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(kls, args...),
-                                (count_refs_helper<jclass, Args...>::value));
+    // All incoming parameters get spilled into the JNI transition frame.
+    // The `jclass` is just a reference to the method's declaring class field.
+    EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(args...));
 
     return fn(env, kls, args...);
   }
@@ -615,9 +695,8 @@
     EXPECT_THREAD_STATE_FOR_CURRENT_JNI();
     EXPECT_MUTATOR_LOCK_FOR_CURRENT_JNI();
     EXPECT_JNI_ENV_AND_OBJECT_FOR_CURRENT_JNI(env, thisObj);
-    // All incoming parameters + the implicit 'this' get put into the transition's StackHandleScope.
-    EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(thisObj, args...),
-                                (count_refs_helper<jobject, Args...>::value));
+    // All incoming parameters + the implicit 'this' get spilled into the JNI transition frame.
+    EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(thisObj, args...));
 
     return fn(env, thisObj, args...);
   }
@@ -804,6 +883,7 @@
 void JniCompilerTest::CompileAndRun_fooJJ_synchronizedImpl() {
   SetUpForTest(false, "fooJJ_synchronized", "(JJ)J",
                CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooJJ_synchronized));
+  ScopedSynchronizedEntryPointOverrides ssepo;
 
   EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_synchronized_calls[gCurrentJni]);
   jlong a = 0x1000000020000000ULL;
@@ -1103,6 +1183,7 @@
   SetUpForTest(true, "fooSSIOO",
                "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
                CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooSSIOO));
+  ScopedSynchronizedEntryPointOverrides ssepo;
 
   EXPECT_EQ(0, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]);
   jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr);
@@ -1343,6 +1424,7 @@
 void JniCompilerTest::GetSinkPropertiesNativeImpl() {
   SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;",
                CURRENT_JNI_WRAPPER(Java_MyClassNatives_GetSinkProperties));
+  ScopedSynchronizedEntryPointOverrides ssepo;
 
   EXPECT_EQ(0, gJava_MyClassNatives_GetSinkProperties_calls[gCurrentJni]);
   jarray result = down_cast<jarray>(
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 7afa8b1..bc1e866 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -21,7 +21,6 @@
 #include "arch/arm/jni_frame_arm.h"
 #include "arch/instruction_set.h"
 #include "base/macros.h"
-#include "handle_scope-inl.h"
 #include "utils/arm/managed_register_arm.h"
 
 namespace art {
@@ -394,28 +393,27 @@
   if (UNLIKELY(is_critical_native_)) {
     CHECK(!SpillsMethod());
     CHECK(!HasLocalReferenceSegmentState());
-    CHECK(!HasHandleScope());
     CHECK(!SpillsReturnValue());
     return 0u;  // There is no managed frame for @CriticalNative.
   }
 
   // Method*, callee save area size, local reference segment state
-  CHECK(SpillsMethod());
+  DCHECK(SpillsMethod());
   const size_t method_ptr_size = static_cast<size_t>(kArmPointerSize);
   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
   size_t total_size = method_ptr_size + callee_save_area_size;
 
-  CHECK(HasLocalReferenceSegmentState());
-  // local reference segment state
-  total_size += kFramePointerSize;
-  // TODO: Probably better to use sizeof(IRTSegmentState) here...
-
-  CHECK(HasHandleScope());
-  total_size += HandleScope::SizeOf(kArmPointerSize, ReferenceCount());
+  DCHECK(HasLocalReferenceSegmentState());
+  const size_t cookie_size = SavedLocalReferenceCookieSize();
+  total_size += cookie_size;
 
   // Plus return value spill area size
-  CHECK(SpillsReturnValue());
-  total_size += SizeOfReturnValue();
+  if (SpillsReturnValue()) {
+    // No padding between cookie and return value on arm.
+    DCHECK_EQ(ReturnValueSaveLocation().SizeValue(),
+              SavedLocalReferenceCookieOffset().SizeValue() + cookie_size);
+    total_size += SizeOfReturnValue();
+  }
 
   return RoundUp(total_size, kStackAlignment);
 }
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 06796c1..8d40f2e 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -20,7 +20,6 @@
 
 #include "arch/arm64/jni_frame_arm64.h"
 #include "arch/instruction_set.h"
-#include "handle_scope-inl.h"
 #include "utils/arm64/managed_register_arm64.h"
 
 namespace art {
@@ -241,26 +240,30 @@
   if (is_critical_native_) {
     CHECK(!SpillsMethod());
     CHECK(!HasLocalReferenceSegmentState());
-    CHECK(!HasHandleScope());
     CHECK(!SpillsReturnValue());
     return 0u;  // There is no managed frame for @CriticalNative.
   }
 
   // Method*, callee save area size, local reference segment state
-  CHECK(SpillsMethod());
+  DCHECK(SpillsMethod());
   size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
   size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
   size_t total_size = method_ptr_size + callee_save_area_size;
 
-  CHECK(HasLocalReferenceSegmentState());
-  total_size += sizeof(uint32_t);
-
-  CHECK(HasHandleScope());
-  total_size += HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
+  DCHECK(HasLocalReferenceSegmentState());
+  const size_t cookie_size = SavedLocalReferenceCookieSize();
+  total_size += cookie_size;
 
   // Plus return value spill area size
-  CHECK(SpillsReturnValue());
-  total_size += SizeOfReturnValue();
+  if (SpillsReturnValue()) {
+    // For 64-bit return values there shall be a 4B alignment gap between the cookie
+    // and the saved return value. However, we do not need to round the intermediate
+    // `total_size` here as the final rounding below shall add sufficient padding.
+    DCHECK_ALIGNED(total_size, 4u);
+    DCHECK(!IsAligned<8u>(total_size));
+    static_assert(IsAligned<8u>(kStackAlignment));
+    total_size += SizeOfReturnValue();
+  }
 
   return RoundUp(total_size, kStackAlignment);
 }
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 1943756..2127f73 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -19,6 +19,7 @@
 #include <android-base/logging.h>
 
 #include "arch/instruction_set.h"
+#include "indirect_reference_table.h"
 
 #ifdef ART_ENABLE_CODEGEN_arm
 #include "jni/quick/arm/calling_convention_arm.h"
@@ -173,25 +174,24 @@
 }
 
 FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
-  size_t references_size = handle_scope_pointer_size_ * ReferenceCount();  // size excluding header
-  return FrameOffset(HandleReferencesOffset().Int32Value() + references_size);
+  // The cookie goes after the method pointer.
+  DCHECK_EQ(SavedLocalReferenceCookieSize(), sizeof(IRTSegmentState));
+  DCHECK(HasLocalReferenceSegmentState());
+  return FrameOffset(displacement_.SizeValue() + static_cast<size_t>(frame_pointer_size_));
 }
 
 FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
-  if (LIKELY(HasHandleScope())) {
-    // Initial offset already includes the displacement.
-    // -- Remove the additional local reference cookie offset if we don't have a handle scope.
-    const size_t saved_local_reference_cookie_offset =
-        SavedLocalReferenceCookieOffset().Int32Value();
-    // Segment state is 4 bytes long
-    const size_t segment_state_size = 4;
-    return FrameOffset(saved_local_reference_cookie_offset + segment_state_size);
-  } else {
-    // Include only the initial Method* as part of the offset.
-    CHECK_LT(displacement_.SizeValue(),
-             static_cast<size_t>(std::numeric_limits<int32_t>::max()));
-    return FrameOffset(displacement_.Int32Value() + static_cast<size_t>(frame_pointer_size_));
+  // The saved return value goes at a properly aligned slot after the cookie.
+  DCHECK(SpillsReturnValue());
+  size_t cookie_offset = SavedLocalReferenceCookieOffset().SizeValue() - displacement_.SizeValue();
+  size_t return_value_offset = cookie_offset + SavedLocalReferenceCookieSize();
+  const size_t return_value_size = SizeOfReturnValue();
+  DCHECK(return_value_size == 4u || return_value_size == 8u) << return_value_size;
+  DCHECK_ALIGNED(return_value_offset, 4u);
+  if (return_value_size == 8u) {
+    return_value_offset = RoundUp(return_value_offset, 8u);
   }
+  return FrameOffset(displacement_.SizeValue() + return_value_offset);
 }
 
 bool JniCallingConvention::HasNext() {
@@ -285,16 +285,6 @@
   }
 }
 
-// Return position of handle scope entry holding reference at the current iterator
-// position
-FrameOffset JniCallingConvention::CurrentParamHandleScopeEntryOffset() {
-  CHECK(IsCurrentParamAReference());
-  CHECK_LT(HandleScopeLinkOffset(), HandleScopeNumRefsOffset());
-  int result = HandleReferencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_;
-  CHECK_GT(result, HandleScopeNumRefsOffset().Int32Value());
-  return FrameOffset(result);
-}
-
 size_t JniCallingConvention::CurrentParamSize() const {
   if (IsCurrentArgExtraForJni()) {
     return static_cast<size_t>(frame_pointer_size_);  // JNIEnv or jobject/jclass
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 005ae91..5679263 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -21,7 +21,6 @@
 #include "base/array_ref.h"
 #include "base/enums.h"
 #include "dex/primitive.h"
-#include "handle_scope.h"
 #include "thread.h"
 #include "utils/managed_register.h"
 
@@ -81,7 +80,6 @@
       : itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0),
         itr_float_and_doubles_(0), displacement_(0),
         frame_pointer_size_(frame_pointer_size),
-        handle_scope_pointer_size_(sizeof(StackReference<mirror::Object>)),
         is_static_(is_static), is_synchronized_(is_synchronized),
         shorty_(shorty) {
     num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
@@ -211,8 +209,6 @@
   FrameOffset displacement_;
   // The size of a pointer.
   const PointerSize frame_pointer_size_;
-  // The size of a reference entry within the handle scope.
-  const size_t handle_scope_pointer_size_;
 
  private:
   const bool is_static_;
@@ -345,32 +341,12 @@
   virtual ManagedRegister CurrentParamRegister() = 0;
   virtual FrameOffset CurrentParamStackOffset() = 0;
 
-  // Iterator interface extension for JNI
-  FrameOffset CurrentParamHandleScopeEntryOffset();
-
-  // Position of handle scope and interior fields
-  FrameOffset HandleScopeOffset() const {
-    return FrameOffset(this->displacement_.Int32Value() + static_cast<size_t>(frame_pointer_size_));
-    // above Method reference
-  }
-
-  FrameOffset HandleScopeLinkOffset() const {
-    return FrameOffset(HandleScopeOffset().Int32Value() +
-                       HandleScope::LinkOffset(frame_pointer_size_));
-  }
-
-  FrameOffset HandleScopeNumRefsOffset() const {
-    return FrameOffset(HandleScopeOffset().Int32Value() +
-                       HandleScope::NumberOfReferencesOffset(frame_pointer_size_));
-  }
-
-  FrameOffset HandleReferencesOffset() const {
-    return FrameOffset(HandleScopeOffset().Int32Value() +
-                       HandleScope::ReferencesOffset(frame_pointer_size_));
-  }
-
   virtual ~JniCallingConvention() {}
 
+  size_t SavedLocalReferenceCookieSize() const {
+    return 4u;
+  }
+
   bool IsCriticalNative() const {
     return is_critical_native_;
   }
@@ -397,6 +373,13 @@
            return_type == Primitive::kPrimChar;
   }
 
+  // Does the transition back spill the return value in the stack frame?
+  bool SpillsReturnValue() const {
+    // Exclude return value for @CriticalNative methods for optimization speed.
+    // References are passed directly to the "end method" and there is nothing to save for `void`.
+    return !IsCriticalNative() && !IsReturnAReference() && SizeOfReturnValue() != 0u;
+  }
+
  protected:
   // Named iterator positions
   enum IteratorPos {
@@ -415,24 +398,12 @@
  protected:
   size_t NumberOfExtraArgumentsForJni() const;
 
-  // Does the transition have a StackHandleScope?
-  bool HasHandleScope() const {
-    // Exclude HandleScope for @CriticalNative methods for optimization speed.
-    return !IsCriticalNative();
-  }
-
   // Does the transition have a local reference segment state?
   bool HasLocalReferenceSegmentState() const {
     // Exclude local reference segment states for @CriticalNative methods for optimization speed.
     return !IsCriticalNative();
   }
 
-  // Does the transition back spill the return value in the stack frame?
-  bool SpillsReturnValue() const {
-    // Exclude return value for @CriticalNative methods for optimization speed.
-    return !IsCriticalNative();
-  }
-
   // Are there extra JNI arguments (JNIEnv* and maybe jclass)?
   bool HasExtraArgumentsForJni() const {
     // @CriticalNative jni implementations exclude both JNIEnv* and the jclass/jobject parameters.
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index e7dd6cf..2fd9abd 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -230,7 +230,7 @@
 
   if (LIKELY(!is_critical_native)) {
     // Spill all register arguments.
-    // TODO: Spill reference args directly to the HandleScope.
+    // TODO: Pass these in a single call to let the assembler use multi-register stores.
     // TODO: Spill native stack args straight to their stack locations (adjust SP earlier).
     mr_conv->ResetIterator(FrameOffset(current_frame_size));
     for (; mr_conv->HasNext(); mr_conv->Next()) {
@@ -240,70 +240,7 @@
       }
     }
 
-    // NOTE: @CriticalNative methods don't have a HandleScope
-    //       because they can't have any reference parameters or return values.
-
-    // 2. Set up the HandleScope
-    mr_conv->ResetIterator(FrameOffset(current_frame_size));
-    main_jni_conv->ResetIterator(FrameOffset(0));
-    __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(),
-                             main_jni_conv->ReferenceCount());
-
-    __ CopyRawPtrFromThread(main_jni_conv->HandleScopeLinkOffset(),
-                            Thread::TopHandleScopeOffset<kPointerSize>());
-    __ StoreStackOffsetToThread(Thread::TopHandleScopeOffset<kPointerSize>(),
-                                main_jni_conv->HandleScopeOffset());
-
-    // 3. Place incoming reference arguments into handle scope
-    main_jni_conv->Next();  // Skip JNIEnv*
-    // 3.5. Create Class argument for static methods out of passed method
-    if (is_static) {
-      FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
-      // Check handle scope offset is within frame
-      CHECK_LT(handle_scope_offset.Uint32Value(), current_frame_size);
-      // Note: This CopyRef() doesn't need heap unpoisoning since it's from the ArtMethod.
-      // Note: This CopyRef() does not include read barrier. It will be handled below.
-      __ CopyRef(handle_scope_offset,
-                 mr_conv->MethodRegister(),
-                 ArtMethod::DeclaringClassOffset(),
-                 /* unpoison_reference= */ false);
-      main_jni_conv->Next();  // in handle scope so move to next argument
-    }
-    // Place every reference into the handle scope (ignore other parameters).
-    while (mr_conv->HasNext()) {
-      CHECK(main_jni_conv->HasNext());
-      bool ref_param = main_jni_conv->IsCurrentParamAReference();
-      CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
-      // References need placing in handle scope and the entry value passing
-      if (ref_param) {
-        // Compute handle scope entry, note null is placed in the handle scope but its boxed value
-        // must be null.
-        FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
-        // Check handle scope offset is within frame and doesn't run into the saved segment state.
-        CHECK_LT(handle_scope_offset.Uint32Value(), current_frame_size);
-        CHECK_NE(handle_scope_offset.Uint32Value(),
-                 main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
-        // We spilled all registers above, so use stack locations.
-        // TODO: Spill refs straight to the HandleScope.
-        bool input_in_reg = false;  // mr_conv->IsCurrentParamInRegister();
-        bool input_on_stack = true;  // mr_conv->IsCurrentParamOnStack();
-        CHECK(input_in_reg || input_on_stack);
-
-        if (input_in_reg) {
-          ManagedRegister in_reg  =  mr_conv->CurrentParamRegister();
-          __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull());
-          __ StoreRef(handle_scope_offset, in_reg);
-        } else if (input_on_stack) {
-          FrameOffset in_off  = mr_conv->CurrentParamStackOffset();
-          __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
-          __ CopyRef(handle_scope_offset, in_off);
-        }
-      }
-      mr_conv->Next();
-      main_jni_conv->Next();
-    }
-
-    // 4. Write out the end of the quick frames.
+    // 2. Write out the end of the quick frames.
     __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset<kPointerSize>());
 
     // NOTE: @CriticalNative does not need to store the stack pointer to the thread
@@ -312,7 +249,7 @@
     //       (TODO: We could probably disable it for @FastNative too).
   }  // if (!is_critical_native)
 
-  // 5. Move frame down to allow space for out going args.
+  // 3. Move frame down to allow space for out going args.
   size_t current_out_arg_size = main_out_arg_size;
   if (UNLIKELY(is_critical_native)) {
     DCHECK_EQ(main_out_arg_size, current_frame_size);
@@ -321,8 +258,8 @@
     current_frame_size += main_out_arg_size;
   }
 
-  // Call the read barrier for the declaring class loaded from the method for a static call.
-  // Skip this for @CriticalNative because we didn't build a HandleScope to begin with.
+  // 4. Call the read barrier for the declaring class in the method for a static call.
+  // Skip this for @CriticalNative because we're not passing a `jclass` to the native method.
   // Note that we always have outgoing param space available for at least two params.
   if (kUseReadBarrier && is_static && !is_critical_native) {
     const bool kReadBarrierFastPath = true;  // Always true after Mips codegen was removed.
@@ -341,24 +278,15 @@
     //
     // Call into the runtime's ReadBarrierJni and have it fix up
     // the object address if it was moved.
+    //
+    // TODO: Move this to an actual slow path, so that the fast path is a branch-not-taken.
 
     ThreadOffset<kPointerSize> read_barrier = QUICK_ENTRYPOINT_OFFSET(kPointerSize,
                                                                       pReadBarrierJni);
     main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
-    main_jni_conv->Next();  // Skip JNIEnv.
-    FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
-    main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
-    // Pass the handle for the class as the first argument.
-    if (main_jni_conv->IsCurrentParamOnStack()) {
-      FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
-      __ CreateHandleScopeEntry(out_off, class_handle_scope_offset, /*null_allowed=*/ false);
-    } else {
-      ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
-      __ CreateHandleScopeEntry(out_reg,
-                                class_handle_scope_offset,
-                                ManagedRegister::NoRegister(),
-                                /*null_allowed=*/ false);
-    }
+    // Pass the pointer to the method's declaring class as the first argument.
+    DCHECK_EQ(ArtMethod::DeclaringClassOffset().SizeValue(), 0u);
+    SetNativeParameter(jni_asm.get(), main_jni_conv.get(), method_register);
     main_jni_conv->Next();
     // Pass the current thread as the second argument and call.
     if (main_jni_conv->IsCurrentParamInRegister()) {
@@ -368,18 +296,22 @@
       __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset());
       __ CallFromThread(read_barrier);
     }
-    main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));  // Reset.
+    if (is_synchronized) {
+      // Reload the method pointer in the slow path because it is needed below.
+      __ Load(method_register,
+              FrameOffset(current_out_arg_size + mr_conv->MethodStackOffset().SizeValue()),
+              static_cast<size_t>(kPointerSize));
+    }
 
     if (kReadBarrierFastPath) {
       __ Bind(skip_cold_path_label.get());
     }
   }
 
-  // 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
+  // 5. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
   //    can occur. The result is the saved JNI local state that is restored by the exit call. We
   //    abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
   //    arguments.
-  FrameOffset locked_object_handle_scope_offset(0xBEEFDEAD);
   FrameOffset saved_cookie_offset(
       FrameOffset(0xDEADBEEFu));  // @CriticalNative - use obviously bad value for debugging
   if (LIKELY(!is_critical_native)) {
@@ -390,23 +322,26 @@
                                                    is_synchronized,
                                                    is_fast_native).SizeValue());
     main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
-    locked_object_handle_scope_offset = FrameOffset(0);
     if (is_synchronized) {
       // Pass object for locking.
-      main_jni_conv->Next();  // Skip JNIEnv.
-      locked_object_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
-      main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
-      if (main_jni_conv->IsCurrentParamOnStack()) {
-        FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
-        __ CreateHandleScopeEntry(out_off,
-                                  locked_object_handle_scope_offset,
-                                  /*null_allowed=*/ false);
+      if (is_static) {
+        // Pass the pointer to the method's declaring class as the first argument.
+        DCHECK_EQ(ArtMethod::DeclaringClassOffset().SizeValue(), 0u);
+        SetNativeParameter(jni_asm.get(), main_jni_conv.get(), method_register);
       } else {
-        ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
-        __ CreateHandleScopeEntry(out_reg,
-                                  locked_object_handle_scope_offset,
-                                  ManagedRegister::NoRegister(),
-                                  /*null_allowed=*/ false);
+        // TODO: Use the register that still holds the `this` reference.
+        mr_conv->ResetIterator(FrameOffset(current_frame_size));
+        FrameOffset this_offset = mr_conv->CurrentParamStackOffset();
+        if (main_jni_conv->IsCurrentParamOnStack()) {
+          FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
+          __ CreateJObject(out_off, this_offset, /*null_allowed=*/ false);
+        } else {
+          ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
+          __ CreateJObject(out_reg,
+                           this_offset,
+                           ManagedRegister::NoRegister(),
+                           /*null_allowed=*/ false);
+        }
       }
       main_jni_conv->Next();
     }
@@ -417,6 +352,7 @@
       __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset());
       __ CallFromThread(jni_start);
     }
+    method_register = ManagedRegister::NoRegister();  // Method register is clobbered.
     if (is_synchronized) {  // Check for exceptions from monitor enter.
       __ ExceptionPoll(main_out_arg_size);
     }
@@ -426,7 +362,7 @@
     __ Store(saved_cookie_offset, main_jni_conv->IntReturnRegister(), 4 /* sizeof cookie */);
   }
 
-  // 7. Fill arguments.
+  // 6. Fill arguments.
   if (UNLIKELY(is_critical_native)) {
     ArenaVector<ArgumentLocation> src_args(allocator.Adapter());
     ArenaVector<ArgumentLocation> dest_args(allocator.Adapter());
@@ -485,21 +421,26 @@
       }
       CopyParameter(jni_asm.get(), mr_conv.get(), main_jni_conv.get());
     }
+
+    // 7. For static method, create jclass argument as a pointer to the method's declaring class.
     if (is_static) {
-      // Create argument for Class
-      mr_conv->ResetIterator(FrameOffset(current_frame_size));
       main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
       main_jni_conv->Next();  // Skip JNIEnv*
-      FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
+      // Load reference to the method's declaring class. The method register has been
+      // clobbered by the above call, so we need to load the method from the stack.
+      FrameOffset method_offset =
+          FrameOffset(current_out_arg_size + mr_conv->MethodStackOffset().SizeValue());
+      DCHECK_EQ(ArtMethod::DeclaringClassOffset().SizeValue(), 0u);
       if (main_jni_conv->IsCurrentParamOnStack()) {
         FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
-        __ CreateHandleScopeEntry(out_off, handle_scope_offset, /*null_allowed=*/ false);
+        __ Copy(out_off, method_offset, static_cast<size_t>(kPointerSize));
+        // TODO(x86): Get hold of the register used to copy the method pointer,
+        // so that we can use it also for loading the method entrypoint below.
       } else {
         ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
-        __ CreateHandleScopeEntry(out_reg,
-                                  handle_scope_offset,
-                                  ManagedRegister::NoRegister(),
-                                /*null_allowed=*/ false);
+        __ Load(out_reg, method_offset, static_cast<size_t>(kPointerSize));
+        // Reuse the register also for loading the method entrypoint below.
+        method_register = out_reg;
       }
     }
 
@@ -527,8 +468,12 @@
       __ Call(main_jni_conv->HiddenArgumentRegister(), jni_entrypoint_offset);
     }
   } else {
-    __ Call(FrameOffset(main_out_arg_size + mr_conv->MethodStackOffset().SizeValue()),
-            jni_entrypoint_offset);
+    if (method_register.IsRegister()) {
+      __ Call(method_register, jni_entrypoint_offset);
+    } else {
+      __ Call(FrameOffset(current_out_arg_size + mr_conv->MethodStackOffset().SizeValue()),
+              jni_entrypoint_offset);
+    }
   }
 
   // 10. Fix differences in result widths.
@@ -548,35 +493,36 @@
   }
 
   // 11. Process return value
-  FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation();
-  if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
-    if (LIKELY(!is_critical_native)) {
-      // For normal JNI, store the return value on the stack because the call to
-      // JniMethodEnd will clobber the return value. It will be restored in (13).
-      CHECK_LT(return_save_location.Uint32Value(), current_frame_size);
-      __ Store(return_save_location,
-               main_jni_conv->ReturnRegister(),
-               main_jni_conv->SizeOfReturnValue());
-    } else {
-      // For @CriticalNative only,
-      // move the JNI return register into the managed return register (if they don't match).
-      ManagedRegister jni_return_reg = main_jni_conv->ReturnRegister();
-      ManagedRegister mr_return_reg = mr_conv->ReturnRegister();
+  bool spill_return_value = main_jni_conv->SpillsReturnValue();
+  FrameOffset return_save_location =
+      spill_return_value ? main_jni_conv->ReturnValueSaveLocation() : FrameOffset(0);
+  if (spill_return_value) {
+    DCHECK(!is_critical_native);
+    // For normal JNI, store the return value on the stack because the call to
+    // JniMethodEnd will clobber the return value. It will be restored in (13).
+    CHECK_LT(return_save_location.Uint32Value(), current_frame_size);
+    __ Store(return_save_location,
+             main_jni_conv->ReturnRegister(),
+             main_jni_conv->SizeOfReturnValue());
+  } else if (UNLIKELY(is_critical_native) && main_jni_conv->SizeOfReturnValue() != 0) {
+    // For @CriticalNative only,
+    // move the JNI return register into the managed return register (if they don't match).
+    ManagedRegister jni_return_reg = main_jni_conv->ReturnRegister();
+    ManagedRegister mr_return_reg = mr_conv->ReturnRegister();
 
-      // Check if the JNI return register matches the managed return register.
-      // If they differ, only then do we have to do anything about it.
-      // Otherwise the return value is already in the right place when we return.
-      if (!jni_return_reg.Equals(mr_return_reg)) {
-        CHECK(!main_jni_conv->UseTailCall());
-        // This is typically only necessary on ARM32 due to native being softfloat
-        // while managed is hardfloat.
-        // -- For example VMOV {r0, r1} -> D0; VMOV r0 -> S0.
-        __ Move(mr_return_reg, jni_return_reg, main_jni_conv->SizeOfReturnValue());
-      } else if (jni_return_reg.IsNoRegister() && mr_return_reg.IsNoRegister()) {
-        // Check that if the return value is passed on the stack for some reason,
-        // that the size matches.
-        CHECK_EQ(main_jni_conv->SizeOfReturnValue(), mr_conv->SizeOfReturnValue());
-      }
+    // Check if the JNI return register matches the managed return register.
+    // If they differ, only then do we have to do anything about it.
+    // Otherwise the return value is already in the right place when we return.
+    if (!jni_return_reg.Equals(mr_return_reg)) {
+      CHECK(!main_jni_conv->UseTailCall());
+      // This is typically only necessary on ARM32 due to native being softfloat
+      // while managed is hardfloat.
+      // -- For example VMOV {r0, r1} -> D0; VMOV r0 -> S0.
+      __ Move(mr_return_reg, jni_return_reg, main_jni_conv->SizeOfReturnValue());
+    } else if (jni_return_reg.IsNoRegister() && mr_return_reg.IsNoRegister()) {
+      // Check that if the return value is passed on the stack for some reason,
+      // that the size matches.
+      CHECK_EQ(main_jni_conv->SizeOfReturnValue(), mr_conv->SizeOfReturnValue());
     }
   }
 
@@ -589,8 +535,6 @@
       __ IncreaseFrameSize(out_arg_size_diff);
       current_frame_size += out_arg_size_diff;
       saved_cookie_offset = FrameOffset(saved_cookie_offset.SizeValue() + out_arg_size_diff);
-      locked_object_handle_scope_offset =
-          FrameOffset(locked_object_handle_scope_offset.SizeValue() + out_arg_size_diff);
       return_save_location = FrameOffset(return_save_location.SizeValue() + out_arg_size_diff);
     }
     end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
@@ -617,17 +561,32 @@
     end_jni_conv->Next();
     if (is_synchronized) {
       // Pass object for unlocking.
-      if (end_jni_conv->IsCurrentParamOnStack()) {
-        FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
-        __ CreateHandleScopeEntry(out_off,
-                                  locked_object_handle_scope_offset,
-                                  /*null_allowed=*/ false);
+      if (is_static) {
+        // Load reference to the method's declaring class. The method register has been
+        // clobbered by the above call, so we need to load the method from the stack.
+        FrameOffset method_offset =
+            FrameOffset(current_out_arg_size + mr_conv->MethodStackOffset().SizeValue());
+        DCHECK_EQ(ArtMethod::DeclaringClassOffset().SizeValue(), 0u);
+        if (end_jni_conv->IsCurrentParamOnStack()) {
+          FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
+          __ Copy(out_off, method_offset, static_cast<size_t>(kPointerSize));
+        } else {
+          ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
+          __ Load(out_reg, method_offset, static_cast<size_t>(kPointerSize));
+        }
       } else {
-        ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
-        __ CreateHandleScopeEntry(out_reg,
-                                  locked_object_handle_scope_offset,
-                                  ManagedRegister::NoRegister(),
-                                  /*null_allowed=*/ false);
+        mr_conv->ResetIterator(FrameOffset(current_frame_size));
+        FrameOffset this_offset = mr_conv->CurrentParamStackOffset();
+        if (end_jni_conv->IsCurrentParamOnStack()) {
+          FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
+          __ CreateJObject(out_off, this_offset, /*null_allowed=*/ false);
+        } else {
+          ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
+          __ CreateJObject(out_reg,
+                           this_offset,
+                           ManagedRegister::NoRegister(),
+                           /*null_allowed=*/ false);
+        }
       }
       end_jni_conv->Next();
     }
@@ -640,11 +599,8 @@
     }
 
     // 13. Reload return value
-    if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
+    if (spill_return_value) {
       __ Load(mr_conv->ReturnRegister(), return_save_location, mr_conv->SizeOfReturnValue());
-      // NIT: If it's @CriticalNative then we actually only need to do this IF
-      // the calling convention's native return register doesn't match the managed convention's
-      // return register.
     }
   }  // if (!is_critical_native)
 
@@ -696,7 +652,7 @@
   // TODO: Move args in registers for @CriticalNative.
   bool input_in_reg = false;  // mr_conv->IsCurrentParamInRegister();
   bool output_in_reg = jni_conv->IsCurrentParamInRegister();
-  FrameOffset handle_scope_offset(0);
+  FrameOffset spilled_reference_offset(0);
   bool null_allowed = false;
   bool ref_param = jni_conv->IsCurrentParamAReference();
   CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
@@ -705,21 +661,21 @@
   } else {
     CHECK(jni_conv->IsCurrentParamOnStack());
   }
-  // References need placing in handle scope and the entry address passing.
+  // References are spilled to caller's reserved out vreg area.
   if (ref_param) {
     null_allowed = mr_conv->IsCurrentArgPossiblyNull();
-    // Compute handle scope offset. Note null is placed in the handle scope but the jobject
-    // passed to the native code must be null (not a pointer into the handle scope
+    // Compute spilled reference offset. Note that null is spilled but the jobject
+    // passed to the native code must be null (not a pointer into the spilled value
     // as with regular references).
-    handle_scope_offset = jni_conv->CurrentParamHandleScopeEntryOffset();
-    // Check handle scope offset is within frame.
-    CHECK_LT(handle_scope_offset.Uint32Value(), mr_conv->GetDisplacement().Uint32Value());
+    spilled_reference_offset = mr_conv->CurrentParamStackOffset();
+    // Check that spilled reference offset is in the spill area in the caller's frame.
+    CHECK_GT(spilled_reference_offset.Uint32Value(), mr_conv->GetDisplacement().Uint32Value());
   }
   if (input_in_reg && output_in_reg) {
     ManagedRegister in_reg = mr_conv->CurrentParamRegister();
     ManagedRegister out_reg = jni_conv->CurrentParamRegister();
     if (ref_param) {
-      __ CreateHandleScopeEntry(out_reg, handle_scope_offset, in_reg, null_allowed);
+      __ CreateJObject(out_reg, spilled_reference_offset, in_reg, null_allowed);
     } else {
       if (!mr_conv->IsCurrentParamOnStack()) {
         // regular non-straddling move
@@ -731,7 +687,7 @@
   } else if (!input_in_reg && !output_in_reg) {
     FrameOffset out_off = jni_conv->CurrentParamStackOffset();
     if (ref_param) {
-      __ CreateHandleScopeEntry(out_off, handle_scope_offset, null_allowed);
+      __ CreateJObject(out_off, spilled_reference_offset, null_allowed);
     } else {
       FrameOffset in_off = mr_conv->CurrentParamStackOffset();
       size_t param_size = mr_conv->CurrentParamSize();
@@ -744,10 +700,10 @@
     // Check that incoming stack arguments are above the current stack frame.
     CHECK_GT(in_off.Uint32Value(), mr_conv->GetDisplacement().Uint32Value());
     if (ref_param) {
-      __ CreateHandleScopeEntry(out_reg,
-                                handle_scope_offset,
-                                ManagedRegister::NoRegister(),
-                                null_allowed);
+      __ CreateJObject(out_reg,
+                       spilled_reference_offset,
+                       ManagedRegister::NoRegister(),
+                       null_allowed);
     } else {
       size_t param_size = mr_conv->CurrentParamSize();
       CHECK_EQ(param_size, jni_conv->CurrentParamSize());
@@ -760,8 +716,8 @@
     // Check outgoing argument is within frame part dedicated to out args.
     CHECK_LT(out_off.Uint32Value(), jni_conv->GetDisplacement().Uint32Value());
     if (ref_param) {
-      // TODO: recycle value in in_reg rather than reload from handle scope
-      __ CreateHandleScopeEntry(out_off, handle_scope_offset, null_allowed);
+      // TODO: recycle value in in_reg rather than reload from spill slot.
+      __ CreateJObject(out_off, spilled_reference_offset, null_allowed);
     } else {
       size_t param_size = mr_conv->CurrentParamSize();
       CHECK_EQ(param_size, jni_conv->CurrentParamSize());
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index df45627..d624831 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -20,7 +20,6 @@
 
 #include "arch/instruction_set.h"
 #include "arch/x86/jni_frame_x86.h"
-#include "handle_scope-inl.h"
 #include "utils/x86/managed_register_x86.h"
 
 namespace art {
@@ -195,27 +194,28 @@
   if (is_critical_native_) {
     CHECK(!SpillsMethod());
     CHECK(!HasLocalReferenceSegmentState());
-    CHECK(!HasHandleScope());
     CHECK(!SpillsReturnValue());
     return 0u;  // There is no managed frame for @CriticalNative.
   }
 
   // Method*, PC return address and callee save area size, local reference segment state
-  CHECK(SpillsMethod());
+  DCHECK(SpillsMethod());
   const size_t method_ptr_size = static_cast<size_t>(kX86PointerSize);
   const size_t pc_return_addr_size = kFramePointerSize;
   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
   size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
 
-  CHECK(HasLocalReferenceSegmentState());
-  total_size += kFramePointerSize;
-
-  CHECK(HasHandleScope());
-  total_size += HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
+  DCHECK(HasLocalReferenceSegmentState());
+  const size_t cookie_size = SavedLocalReferenceCookieSize();
+  total_size += cookie_size;
 
   // Plus return value spill area size
-  CHECK(SpillsReturnValue());
-  total_size += SizeOfReturnValue();
+  if (SpillsReturnValue()) {
+    // No padding between cookie and return value on x86.
+    DCHECK_EQ(ReturnValueSaveLocation().SizeValue(),
+              SavedLocalReferenceCookieOffset().SizeValue() + cookie_size);
+    total_size += SizeOfReturnValue();
+  }
 
   return RoundUp(total_size, kStackAlignment);
 }
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 44ae8be..bb01371 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -21,7 +21,6 @@
 #include "arch/instruction_set.h"
 #include "arch/x86_64/jni_frame_x86_64.h"
 #include "base/bit_utils.h"
-#include "handle_scope-inl.h"
 #include "utils/x86_64/managed_register_x86_64.h"
 
 namespace art {
@@ -183,27 +182,31 @@
   if (is_critical_native_) {
     CHECK(!SpillsMethod());
     CHECK(!HasLocalReferenceSegmentState());
-    CHECK(!HasHandleScope());
     CHECK(!SpillsReturnValue());
     return 0u;  // There is no managed frame for @CriticalNative.
   }
 
   // Method*, PC return address and callee save area size, local reference segment state
-  CHECK(SpillsMethod());
+  DCHECK(SpillsMethod());
   const size_t method_ptr_size = static_cast<size_t>(kX86_64PointerSize);
   const size_t pc_return_addr_size = kFramePointerSize;
   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
   size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
 
-  CHECK(HasLocalReferenceSegmentState());
-  total_size += kFramePointerSize;
-
-  CHECK(HasHandleScope());
-  total_size += HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
+  DCHECK(HasLocalReferenceSegmentState());
+  const size_t cookie_size = SavedLocalReferenceCookieSize();
+  total_size += cookie_size;
 
   // Plus return value spill area size
-  CHECK(SpillsReturnValue());
-  total_size += SizeOfReturnValue();
+  if (SpillsReturnValue()) {
+    // For 64-bit return values there shall be a 4B alignment gap between the cookie
+    // and the saved return value. However, we do not need to round the intermediate
+    // `total_size` here as the final rounding below shall add sufficient padding.
+    DCHECK_ALIGNED(total_size, 4u);
+    DCHECK(!IsAligned<8u>(total_size));
+    static_assert(IsAligned<8u>(kStackAlignment));
+    total_size += SizeOfReturnValue();
+  }
 
   return RoundUp(total_size, kStackAlignment);
 }
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 38167fb..c7241c1 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -845,85 +845,79 @@
   UNIMPLEMENTED(FATAL);
 }
 
-void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
-                                                      FrameOffset handle_scope_offset,
-                                                      ManagedRegister min_reg,
-                                                      bool null_allowed) {
+void ArmVIXLJNIMacroAssembler::CreateJObject(ManagedRegister mout_reg,
+                                             FrameOffset spilled_reference_offset,
+                                             ManagedRegister min_reg,
+                                             bool null_allowed) {
   vixl::aarch32::Register out_reg = AsVIXLRegister(mout_reg.AsArm());
   vixl::aarch32::Register in_reg =
       min_reg.AsArm().IsNoRegister() ? vixl::aarch32::Register() : AsVIXLRegister(min_reg.AsArm());
   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
   temps.Exclude(out_reg);
   if (null_allowed) {
-    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
-    // the address in the handle scope holding the reference.
-    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+    // Null values get a jobject value null. Otherwise, the jobject is
+    // the address of the spilled reference.
+    // e.g. out_reg = (handle == 0) ? 0 : (SP+spilled_reference_offset)
     if (!in_reg.IsValid()) {
-      asm_.LoadFromOffset(kLoadWord, out_reg, sp, handle_scope_offset.Int32Value());
+      asm_.LoadFromOffset(kLoadWord, out_reg, sp, spilled_reference_offset.Int32Value());
       in_reg = out_reg;
     }
 
     temps.Exclude(in_reg);
     ___ Cmp(in_reg, 0);
 
-    if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value())) {
+    if (asm_.ShifterOperandCanHold(ADD, spilled_reference_offset.Int32Value())) {
       if (!out_reg.Is(in_reg)) {
         ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
                                  3 * vixl32::kMaxInstructionSizeInBytes,
                                  CodeBufferCheckScope::kMaximumSize);
         ___ it(eq, 0xc);
         ___ mov(eq, out_reg, 0);
-        asm_.AddConstantInIt(out_reg, sp, handle_scope_offset.Int32Value(), ne);
+        asm_.AddConstantInIt(out_reg, sp, spilled_reference_offset.Int32Value(), ne);
       } else {
         ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
                                  2 * vixl32::kMaxInstructionSizeInBytes,
                                  CodeBufferCheckScope::kMaximumSize);
         ___ it(ne, 0x8);
-        asm_.AddConstantInIt(out_reg, sp, handle_scope_offset.Int32Value(), ne);
+        asm_.AddConstantInIt(out_reg, sp, spilled_reference_offset.Int32Value(), ne);
       }
     } else {
       // TODO: Implement this (old arm assembler would have crashed here).
       UNIMPLEMENTED(FATAL);
     }
   } else {
-    asm_.AddConstant(out_reg, sp, handle_scope_offset.Int32Value());
+    asm_.AddConstant(out_reg, sp, spilled_reference_offset.Int32Value());
   }
 }
 
-void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
-                                                      FrameOffset handle_scope_offset,
-                                                      bool null_allowed) {
+void ArmVIXLJNIMacroAssembler::CreateJObject(FrameOffset out_off,
+                                             FrameOffset spilled_reference_offset,
+                                             bool null_allowed) {
   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
   vixl32::Register scratch = temps.Acquire();
   if (null_allowed) {
-    asm_.LoadFromOffset(kLoadWord, scratch, sp, handle_scope_offset.Int32Value());
-    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
-    // the address in the handle scope holding the reference.
-    // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+    asm_.LoadFromOffset(kLoadWord, scratch, sp, spilled_reference_offset.Int32Value());
+    // Null values get a jobject value null. Otherwise, the jobject is
+    // the address of the spilled reference.
+    // e.g. scratch = (scratch == 0) ? 0 : (SP+spilled_reference_offset)
     ___ Cmp(scratch, 0);
 
-    if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value())) {
+    if (asm_.ShifterOperandCanHold(ADD, spilled_reference_offset.Int32Value())) {
       ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
                                2 * vixl32::kMaxInstructionSizeInBytes,
                                CodeBufferCheckScope::kMaximumSize);
       ___ it(ne, 0x8);
-      asm_.AddConstantInIt(scratch, sp, handle_scope_offset.Int32Value(), ne);
+      asm_.AddConstantInIt(scratch, sp, spilled_reference_offset.Int32Value(), ne);
     } else {
       // TODO: Implement this (old arm assembler would have crashed here).
       UNIMPLEMENTED(FATAL);
     }
   } else {
-    asm_.AddConstant(scratch, sp, handle_scope_offset.Int32Value());
+    asm_.AddConstant(scratch, sp, spilled_reference_offset.Int32Value());
   }
   asm_.StoreToOffset(kStoreWord, scratch, sp, out_off.Int32Value());
 }
 
-void ArmVIXLJNIMacroAssembler::LoadReferenceFromHandleScope(
-    ManagedRegister mout_reg ATTRIBUTE_UNUSED,
-    ManagedRegister min_reg ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL);
-}
-
 void ArmVIXLJNIMacroAssembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
                                             bool could_be_null ATTRIBUTE_UNUSED) {
   // TODO: not validating references.
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 2f6813a..248fc67 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -153,24 +153,20 @@
   void GetCurrentThread(ManagedRegister dest) override;
   void GetCurrentThread(FrameOffset dest_offset) override;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed. in_reg holds a possibly stale reference
-  // that can be used to avoid loading the handle scope entry to see if the value is
-  // null.
-  void CreateHandleScopeEntry(ManagedRegister out_reg,
-                              FrameOffset handlescope_offset,
-                              ManagedRegister in_reg,
-                              bool null_allowed) override;
+  // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+  // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+  // stale reference that can be used to avoid loading the spilled value to
+  // see if the value is null.
+  void CreateJObject(ManagedRegister out_reg,
+                     FrameOffset spilled_reference_offset,
+                     ManagedRegister in_reg,
+                     bool null_allowed) override;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off,
-                              FrameOffset handlescope_offset,
-                              bool null_allowed) override;
-
-  // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst,
-                                    ManagedRegister src) override;
+  // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+  // or to be null if the value is null and `null_allowed`.
+  void CreateJObject(FrameOffset out_off,
+                     FrameOffset spilled_reference_offset,
+                     bool null_allowed) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index bb93a96..ff83828 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -650,70 +650,54 @@
   UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
 }
 
-void Arm64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister m_out_reg,
-                                                    FrameOffset handle_scope_offs,
-                                                    ManagedRegister m_in_reg,
-                                                    bool null_allowed) {
+void Arm64JNIMacroAssembler::CreateJObject(ManagedRegister m_out_reg,
+                                           FrameOffset spilled_reference_offset,
+                                           ManagedRegister m_in_reg,
+                                           bool null_allowed) {
   Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
   Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
   // For now we only hold stale handle scope entries in x registers.
   CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
   CHECK(out_reg.IsXRegister()) << out_reg;
   if (null_allowed) {
-    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
-    // the address in the handle scope holding the reference.
-    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+    // Null values get a jobject value null. Otherwise, the jobject is
+    // the address of the spilled reference.
+    // e.g. out_reg = (in == 0) ? 0 : (SP+spilled_reference_offset)
     if (in_reg.IsNoRegister()) {
       LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
-                      handle_scope_offs.Int32Value());
+                      spilled_reference_offset.Int32Value());
       in_reg = out_reg;
     }
     ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
     if (!out_reg.Equals(in_reg)) {
       LoadImmediate(out_reg.AsXRegister(), 0, eq);
     }
-    AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
+    AddConstant(out_reg.AsXRegister(), SP, spilled_reference_offset.Int32Value(), ne);
   } else {
-    AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
+    AddConstant(out_reg.AsXRegister(), SP, spilled_reference_offset.Int32Value(), al);
   }
 }
 
-void Arm64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
-                                                    FrameOffset handle_scope_offset,
-                                                    bool null_allowed) {
+void Arm64JNIMacroAssembler::CreateJObject(FrameOffset out_off,
+                                           FrameOffset spilled_reference_offset,
+                                           bool null_allowed) {
   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
   Register scratch = temps.AcquireX();
   if (null_allowed) {
     Register scratch2 = temps.AcquireW();
-    ___ Ldr(scratch2, MEM_OP(reg_x(SP), handle_scope_offset.Int32Value()));
-    ___ Add(scratch, reg_x(SP), handle_scope_offset.Int32Value());
-    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
-    // the address in the handle scope holding the reference.
-    // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+    ___ Ldr(scratch2, MEM_OP(reg_x(SP), spilled_reference_offset.Int32Value()));
+    ___ Add(scratch, reg_x(SP), spilled_reference_offset.Int32Value());
+    // Null values get a jobject value null. Otherwise, the jobject is
+    // the address of the spilled reference.
+    // e.g. scratch = (scratch == 0) ? 0 : (SP+spilled_reference_offset)
     ___ Cmp(scratch2, 0);
     ___ Csel(scratch, scratch, xzr, ne);
   } else {
-    ___ Add(scratch, reg_x(SP), handle_scope_offset.Int32Value());
+    ___ Add(scratch, reg_x(SP), spilled_reference_offset.Int32Value());
   }
   ___ Str(scratch, MEM_OP(reg_x(SP), out_off.Int32Value()));
 }
 
-void Arm64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
-                                                          ManagedRegister m_in_reg) {
-  Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
-  Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
-  CHECK(out_reg.IsXRegister()) << out_reg;
-  CHECK(in_reg.IsXRegister()) << in_reg;
-  vixl::aarch64::Label exit;
-  if (!out_reg.Equals(in_reg)) {
-    // FIXME: Who sets the flags here?
-    LoadImmediate(out_reg.AsXRegister(), 0, eq);
-  }
-  ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
-  LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
-  ___ Bind(&exit);
-}
-
 void Arm64JNIMacroAssembler::ExceptionPoll(size_t stack_adjust) {
   CHECK_ALIGNED(stack_adjust, kStackAlignment);
   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index 9f3eea2..ad027d3 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -135,23 +135,20 @@
   void GetCurrentThread(ManagedRegister dest) override;
   void GetCurrentThread(FrameOffset dest_offset) override;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed. in_reg holds a possibly stale reference
-  // that can be used to avoid loading the handle scope entry to see if the value is
-  // null.
-  void CreateHandleScopeEntry(ManagedRegister out_reg,
-                              FrameOffset handlescope_offset,
-                              ManagedRegister in_reg,
-                              bool null_allowed) override;
+  // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+  // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+  // stale reference that can be used to avoid loading the spilled value to
+  // see if the value is null.
+  void CreateJObject(ManagedRegister out_reg,
+                     FrameOffset spilled_reference_offset,
+                     ManagedRegister in_reg,
+                     bool null_allowed) override;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off,
-                              FrameOffset handlescope_offset,
-                              bool null_allowed) override;
-
-  // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
+  // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+  // or to be null if the value is null and `null_allowed`.
+  void CreateJObject(FrameOffset out_off,
+                     FrameOffset spilled_reference_offset,
+                     bool null_allowed) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 9231f9c..5265152 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -169,13 +169,13 @@
   __ Move(hidden_arg_register, method_register, 4);
   __ VerifyObject(scratch_register, false);
 
-  __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, true);
-  __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, false);
-  __ CreateHandleScopeEntry(method_register, FrameOffset(48), scratch_register, true);
-  __ CreateHandleScopeEntry(FrameOffset(48), FrameOffset(64), true);
-  __ CreateHandleScopeEntry(method_register, FrameOffset(0), scratch_register, true);
-  __ CreateHandleScopeEntry(method_register, FrameOffset(1025), scratch_register, true);
-  __ CreateHandleScopeEntry(scratch_register, FrameOffset(1025), scratch_register, true);
+  __ CreateJObject(scratch_register, FrameOffset(48), scratch_register, true);
+  __ CreateJObject(scratch_register, FrameOffset(48), scratch_register, false);
+  __ CreateJObject(method_register, FrameOffset(48), scratch_register, true);
+  __ CreateJObject(FrameOffset(48), FrameOffset(64), true);
+  __ CreateJObject(method_register, FrameOffset(0), scratch_register, true);
+  __ CreateJObject(method_register, FrameOffset(1025), scratch_register, true);
+  __ CreateJObject(scratch_register, FrameOffset(1025), scratch_register, true);
 
   __ ExceptionPoll(0);
 
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 1470ef1..1678f87 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -1,12 +1,12 @@
 const char* const VixlJniHelpersResults = {
   "       0: 2d e9 e0 4d   push.w {r5, r6, r7, r8, r10, r11, lr}\n"
   "       4: 2d ed 10 8a   vpush {s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31}\n"
-  "       8: 89 b0         sub sp, #36\n"
+  "       8: 85 b0         sub sp, #20\n"
   "       a: 00 90         str r0, [sp]\n"
-  "       c: 21 91         str r1, [sp, #132]\n"
-  "       e: 8d ed 22 0a   vstr s0, [sp, #136]\n"
-  "      12: 23 92         str r2, [sp, #140]\n"
-  "      14: 24 93         str r3, [sp, #144]\n"
+  "       c: 1d 91         str r1, [sp, #116]\n"
+  "       e: 8d ed 1e 0a   vstr s0, [sp, #120]\n"
+  "      12: 1f 92         str r2, [sp, #124]\n"
+  "      14: 20 93         str r3, [sp, #128]\n"
   "      16: 88 b0         sub sp, #32\n"
   "      18: ad f5 80 5d   sub.w sp, sp, #4096\n"
   "      1c: 08 98         ldr r0, [sp, #32]\n"
@@ -147,7 +147,7 @@
   "     208: cd f8 ff c7   str.w r12, [sp, #2047]\n"
   "     20c: 0d f5 80 5d   add.w sp, sp, #4096\n"
   "     210: 08 b0         add sp, #32\n"
-  "     212: 09 b0         add sp, #36\n"
+  "     212: 05 b0         add sp, #20\n"
   "     214: bd ec 10 8a   vpop {s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31}\n"
   "     218: bd e8 e0 4d   pop.w {r5, r6, r7, r8, r10, r11, lr}\n"
   "     21c: d9 f8 30 80   ldr.w r8, [r9, #48]\n"
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index 3490959..d621122 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -208,23 +208,20 @@
   virtual void GetCurrentThread(ManagedRegister dest) = 0;
   virtual void GetCurrentThread(FrameOffset dest_offset) = 0;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed. in_reg holds a possibly stale reference
-  // that can be used to avoid loading the handle scope entry to see if the value is
-  // null.
-  virtual void CreateHandleScopeEntry(ManagedRegister out_reg,
-                                      FrameOffset handlescope_offset,
-                                      ManagedRegister in_reg,
-                                      bool null_allowed) = 0;
+  // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+  // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+  // stale reference that can be used to avoid loading the spilled value to
+  // see if the value is null.
+  virtual void CreateJObject(ManagedRegister out_reg,
+                             FrameOffset spilled_reference_offset,
+                             ManagedRegister in_reg,
+                             bool null_allowed) = 0;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed.
-  virtual void CreateHandleScopeEntry(FrameOffset out_off,
-                                      FrameOffset handlescope_offset,
-                                      bool null_allowed) = 0;
-
-  // src holds a handle scope entry (Object**) load this into dst
-  virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0;
+  // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+  // or to be null if the value is null and `null_allowed`.
+  virtual void CreateJObject(FrameOffset out_off,
+                             FrameOffset spilled_reference_offset,
+                             bool null_allowed) = 0;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index 2c7902b..2710eb1 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -466,10 +466,10 @@
   __ mfence();
 }
 
-void X86JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
-                                                  FrameOffset handle_scope_offset,
-                                                  ManagedRegister min_reg,
-                                                  bool null_allowed) {
+void X86JNIMacroAssembler::CreateJObject(ManagedRegister mout_reg,
+                                         FrameOffset spilled_reference_offset,
+                                         ManagedRegister min_reg,
+                                         bool null_allowed) {
   X86ManagedRegister out_reg = mout_reg.AsX86();
   X86ManagedRegister in_reg = min_reg.AsX86();
   CHECK(in_reg.IsCpuRegister());
@@ -482,47 +482,30 @@
     }
     __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
     __ j(kZero, &null_arg);
-    __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
+    __ leal(out_reg.AsCpuRegister(), Address(ESP, spilled_reference_offset));
     __ Bind(&null_arg);
   } else {
-    __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
+    __ leal(out_reg.AsCpuRegister(), Address(ESP, spilled_reference_offset));
   }
 }
 
-void X86JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
-                                                  FrameOffset handle_scope_offset,
-                                                  bool null_allowed) {
+void X86JNIMacroAssembler::CreateJObject(FrameOffset out_off,
+                                         FrameOffset spilled_reference_offset,
+                                         bool null_allowed) {
   Register scratch = GetScratchRegister();
   if (null_allowed) {
     Label null_arg;
-    __ movl(scratch, Address(ESP, handle_scope_offset));
+    __ movl(scratch, Address(ESP, spilled_reference_offset));
     __ testl(scratch, scratch);
     __ j(kZero, &null_arg);
-    __ leal(scratch, Address(ESP, handle_scope_offset));
+    __ leal(scratch, Address(ESP, spilled_reference_offset));
     __ Bind(&null_arg);
   } else {
-    __ leal(scratch, Address(ESP, handle_scope_offset));
+    __ leal(scratch, Address(ESP, spilled_reference_offset));
   }
   __ movl(Address(ESP, out_off), scratch);
 }
 
-// Given a handle scope entry, load the associated reference.
-void X86JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
-                                                        ManagedRegister min_reg) {
-  X86ManagedRegister out_reg = mout_reg.AsX86();
-  X86ManagedRegister in_reg = min_reg.AsX86();
-  CHECK(out_reg.IsCpuRegister());
-  CHECK(in_reg.IsCpuRegister());
-  Label null_arg;
-  if (!out_reg.Equals(in_reg)) {
-    __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
-  }
-  __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
-  __ j(kZero, &null_arg);
-  __ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
-  __ Bind(&null_arg);
-}
-
 void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
   // TODO: not validating references
 }
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 0239ff7..448a7f4 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -126,23 +126,20 @@
   void GetCurrentThread(ManagedRegister dest) override;
   void GetCurrentThread(FrameOffset dest_offset) override;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed. in_reg holds a possibly stale reference
-  // that can be used to avoid loading the handle scope entry to see if the value is
-  // null.
-  void CreateHandleScopeEntry(ManagedRegister out_reg,
-                              FrameOffset handlescope_offset,
-                              ManagedRegister in_reg,
-                              bool null_allowed) override;
+  // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+  // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+  // stale reference that can be used to avoid loading the spilled value to
+  // see if the value is null.
+  void CreateJObject(ManagedRegister out_reg,
+                     FrameOffset spilled_reference_offset,
+                     ManagedRegister in_reg,
+                     bool null_allowed) override;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off,
-                              FrameOffset handlescope_offset,
-                              bool null_allowed) override;
-
-  // src holds a handle scope entry (Object**) load this into dst
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
+  // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+  // or to be null if the value is null and `null_allowed`.
+  void CreateJObject(FrameOffset out_off,
+                     FrameOffset spilled_reference_offset,
+                     bool null_allowed) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index 2649084..b5e17d1 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -541,17 +541,17 @@
   __ mfence();
 }
 
-void X86_64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
-                                                     FrameOffset handle_scope_offset,
-                                                     ManagedRegister min_reg,
-                                                     bool null_allowed) {
+void X86_64JNIMacroAssembler::CreateJObject(ManagedRegister mout_reg,
+                                            FrameOffset spilled_reference_offset,
+                                            ManagedRegister min_reg,
+                                            bool null_allowed) {
   X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
   X86_64ManagedRegister in_reg = min_reg.AsX86_64();
   if (in_reg.IsNoRegister()) {  // TODO(64): && null_allowed
     // Use out_reg as indicator of null.
     in_reg = out_reg;
     // TODO: movzwl
-    __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+    __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
   }
   CHECK(in_reg.IsCpuRegister());
   CHECK(out_reg.IsCpuRegister());
@@ -563,47 +563,30 @@
     }
     __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
     __ j(kZero, &null_arg);
-    __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+    __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
     __ Bind(&null_arg);
   } else {
-    __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+    __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
   }
 }
 
-void X86_64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
-                                                     FrameOffset handle_scope_offset,
-                                                     bool null_allowed) {
+void X86_64JNIMacroAssembler::CreateJObject(FrameOffset out_off,
+                                            FrameOffset spilled_reference_offset,
+                                            bool null_allowed) {
   CpuRegister scratch = GetScratchRegister();
   if (null_allowed) {
     Label null_arg;
-    __ movl(scratch, Address(CpuRegister(RSP), handle_scope_offset));
+    __ movl(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
     __ testl(scratch, scratch);
     __ j(kZero, &null_arg);
-    __ leaq(scratch, Address(CpuRegister(RSP), handle_scope_offset));
+    __ leaq(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
     __ Bind(&null_arg);
   } else {
-    __ leaq(scratch, Address(CpuRegister(RSP), handle_scope_offset));
+    __ leaq(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
   }
   __ movq(Address(CpuRegister(RSP), out_off), scratch);
 }
 
-// Given a handle scope entry, load the associated reference.
-void X86_64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
-                                                           ManagedRegister min_reg) {
-  X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
-  X86_64ManagedRegister in_reg = min_reg.AsX86_64();
-  CHECK(out_reg.IsCpuRegister());
-  CHECK(in_reg.IsCpuRegister());
-  Label null_arg;
-  if (!out_reg.Equals(in_reg)) {
-    __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
-  }
-  __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
-  __ j(kZero, &null_arg);
-  __ movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
-  __ Bind(&null_arg);
-}
-
 void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
   // TODO: not validating references
 }
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index 6589544..a5f7bbb 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -146,23 +146,20 @@
   void GetCurrentThread(ManagedRegister dest) override;
   void GetCurrentThread(FrameOffset dest_offset) override;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed. in_reg holds a possibly stale reference
-  // that can be used to avoid loading the handle scope entry to see if the value is
-  // null.
-  void CreateHandleScopeEntry(ManagedRegister out_reg,
-                              FrameOffset handlescope_offset,
-                              ManagedRegister in_reg,
-                              bool null_allowed) override;
+  // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+  // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+  // stale reference that can be used to avoid loading the spilled value to
+  // see if the value is null.
+  void CreateJObject(ManagedRegister out_reg,
+                     FrameOffset spilled_reference_offset,
+                     ManagedRegister in_reg,
+                     bool null_allowed) override;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off,
-                              FrameOffset handlescope_offset,
-                              bool null_allowed) override;
-
-  // src holds a handle scope entry (Object**) load this into dst
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
+  // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+  // or to be null if the value is null and `null_allowed`.
+  void CreateJObject(FrameOffset out_off,
+                     FrameOffset spilled_reference_offset,
+                     bool null_allowed) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.