Do not create HandleScope for JNI transitions.
We previously crated a HandleScope in the JNI transition
frame to hold references passed as jobject (jclass, etc.)
to the native function and these references were actually
spilled twice during the transition.
We now construct the jobject as a pointer to the reference
spilled in the reserved out vreg area in the caller's frame.
And the jclass for static methods is just a pointer to the
method's declaring class. This reduces the amount of work
required in the JNI transition, both on entry (in compiled
stubs) and exit (in JniMethodEnd*).
Some additional work is required when GC visits references
of a native method as we need to walk over the method's
shorty which was unnecessary for a HandleScope.
Also fix Thread::InitStackHwm() to calculate correct stack
size needed by the new Thread::IsJniTransitionReference().
The results for StringToBytesBenchmark on blueline little
cores running at fixed frequency 1420800 are approximately
arm64 (medians from 3 runs) before after
timeGetBytesAscii EMPTY 447.33 436.86
timeGetBytesIso88591 EMPTY 440.52 431.13
timeGetBytesUtf8 EMPTY 432.31 409.82
arm (medians from 3 runs) before after
timeGetBytesAscii EMPTY 500.53 490.87
timeGetBytesIso88591 EMPTY 496.45 495.30
timeGetBytesUtf8 EMPTY 488.84 472.68
Test: m test-art-host-gtest
Test: testrunner.py --host
Test: testrunner.py --host --gcstress
Test: testrunner.py --host --jit-on-first-use
Test: testrunner.py --host --jit-on-first-use --gcstress
Test: run-gtests.sh
Test: testrunner.py --target --optimizing
Test: boots.
Bug: 172332525
Change-Id: I658f9d87071587b3e89f31c65feca976a11e9cc2
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index 02b1c7a..20dc399 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -1,8 +1,8 @@
// TODO These arrays should be generated automatically or have instructions for re-creation.
static constexpr uint8_t expected_asm_kThumb2[] = {
- 0x2D, 0xE9, 0xE0, 0x4D, 0x2D, 0xED, 0x10, 0x8A, 0x89, 0xB0, 0x00, 0x90,
- 0x21, 0x91, 0x8D, 0xED, 0x22, 0x0A, 0x23, 0x92, 0x24, 0x93, 0x88, 0xB0,
- 0x08, 0xB0, 0x09, 0xB0, 0xBD, 0xEC, 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x4D,
+ 0x2D, 0xE9, 0xE0, 0x4D, 0x2D, 0xED, 0x10, 0x8A, 0x85, 0xB0, 0x00, 0x90,
+ 0x1D, 0x91, 0x8D, 0xED, 0x1E, 0x0A, 0x1F, 0x92, 0x20, 0x93, 0x88, 0xB0,
+ 0x08, 0xB0, 0x05, 0xB0, 0xBD, 0xEC, 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x4D,
0xD9, 0xF8, 0x30, 0x80, 0x70, 0x47,
};
static constexpr uint8_t expected_cfi_kThumb2[] = {
@@ -11,13 +11,12 @@
0x51, 0x16, 0x05, 0x52, 0x15, 0x05, 0x53, 0x14, 0x05, 0x54, 0x13, 0x05,
0x55, 0x12, 0x05, 0x56, 0x11, 0x05, 0x57, 0x10, 0x05, 0x58, 0x0F, 0x05,
0x59, 0x0E, 0x05, 0x5A, 0x0D, 0x05, 0x5B, 0x0C, 0x05, 0x5C, 0x0B, 0x05,
- 0x5D, 0x0A, 0x05, 0x5E, 0x09, 0x05, 0x5F, 0x08, 0x42, 0x0E, 0x80, 0x01,
- 0x4E, 0x0E, 0xA0, 0x01, 0x42, 0x0E, 0x80, 0x01, 0x0A, 0x42, 0x0E, 0x5C,
- 0x44, 0x0E, 0x1C, 0x06, 0x50, 0x06, 0x51, 0x06, 0x52, 0x06, 0x53, 0x06,
- 0x54, 0x06, 0x55, 0x06, 0x56, 0x06, 0x57, 0x06, 0x58, 0x06, 0x59, 0x06,
- 0x5A, 0x06, 0x5B, 0x06, 0x5C, 0x06, 0x5D, 0x06, 0x5E, 0x06, 0x5F, 0x44,
- 0x0E, 0x00, 0xC5, 0xC6, 0xC7, 0xC8, 0xCA, 0xCB, 0xCE, 0x46, 0x0B, 0x0E,
- 0x80, 0x01,
+ 0x5D, 0x0A, 0x05, 0x5E, 0x09, 0x05, 0x5F, 0x08, 0x42, 0x0E, 0x70, 0x4E,
+ 0x0E, 0x90, 0x01, 0x42, 0x0E, 0x70, 0x0A, 0x42, 0x0E, 0x5C, 0x44, 0x0E,
+ 0x1C, 0x06, 0x50, 0x06, 0x51, 0x06, 0x52, 0x06, 0x53, 0x06, 0x54, 0x06,
+ 0x55, 0x06, 0x56, 0x06, 0x57, 0x06, 0x58, 0x06, 0x59, 0x06, 0x5A, 0x06,
+ 0x5B, 0x06, 0x5C, 0x06, 0x5D, 0x06, 0x5E, 0x06, 0x5F, 0x44, 0x0E, 0x00,
+ 0xC5, 0xC6, 0xC7, 0xC8, 0xCA, 0xCB, 0xCE, 0x46, 0x0B, 0x0E, 0x70,
};
// 0x00000000: push {r5,r6,r7,r8,r10,r11,lr}
// 0x00000004: .cfi_def_cfa_offset: 28
@@ -46,19 +45,19 @@
// 0x00000008: .cfi_offset_extended: r93 at cfa-40
// 0x00000008: .cfi_offset_extended: r94 at cfa-36
// 0x00000008: .cfi_offset_extended: r95 at cfa-32
-// 0x00000008: sub sp, #36
-// 0x0000000a: .cfi_def_cfa_offset: 128
+// 0x00000008: sub sp, #20
+// 0x0000000a: .cfi_def_cfa_offset: 112
// 0x0000000a: str r0, [sp]
-// 0x0000000c: str r1, [sp, #132]
-// 0x0000000e: vstr s0, [sp, #136]
-// 0x00000012: str r2, [sp, #140]
-// 0x00000014: str r3, [sp, #144]
+// 0x0000000c: str r1, [sp, #116]
+// 0x0000000e: vstr s0, [sp, #120]
+// 0x00000012: str r2, [sp, #124]
+// 0x00000014: str r3, [sp, #128]
// 0x00000016: sub sp, #32
-// 0x00000018: .cfi_def_cfa_offset: 160
+// 0x00000018: .cfi_def_cfa_offset: 144
// 0x00000018: add sp, #32
-// 0x0000001a: .cfi_def_cfa_offset: 128
+// 0x0000000a: .cfi_def_cfa_offset: 112
// 0x0000001a: .cfi_remember_state
-// 0x0000001a: add sp, #36
+// 0x0000001a: add sp, #20
// 0x0000001c: .cfi_def_cfa_offset: 92
// 0x0000001c: vpop {s16-s31}
// 0x00000020: .cfi_def_cfa_offset: 28
@@ -90,123 +89,123 @@
// 0x00000024: ldr r8, [tr, #48] ; is_gc_marking
// 0x00000028: bx lr
// 0x0000002a: .cfi_restore_state
-// 0x0000002a: .cfi_def_cfa_offset: 128
+// 0x0000002a: .cfi_def_cfa_offset: 112
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xFF, 0x03, 0x03, 0xD1, 0xF3, 0x53, 0x06, 0xA9, 0xF5, 0x5B, 0x07, 0xA9,
- 0xF7, 0x63, 0x08, 0xA9, 0xF9, 0x6B, 0x09, 0xA9, 0xFB, 0x73, 0x0A, 0xA9,
- 0xFD, 0x7B, 0x0B, 0xA9, 0xE8, 0x27, 0x02, 0x6D, 0xEA, 0x2F, 0x03, 0x6D,
- 0xEC, 0x37, 0x04, 0x6D, 0xEE, 0x3F, 0x05, 0x6D, 0xE0, 0x03, 0x00, 0xF9,
- 0xE1, 0xCB, 0x00, 0xB9, 0xE0, 0xCF, 0x00, 0xBD, 0xE2, 0xD3, 0x00, 0xB9,
- 0xE3, 0xD7, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1, 0xFF, 0x83, 0x00, 0x91,
- 0xF3, 0x53, 0x46, 0xA9, 0xF5, 0x5B, 0x47, 0xA9, 0xF7, 0x63, 0x48, 0xA9,
- 0xF9, 0x6B, 0x49, 0xA9, 0xFB, 0x73, 0x4A, 0xA9, 0xFD, 0x7B, 0x4B, 0xA9,
- 0xE8, 0x27, 0x42, 0x6D, 0xEA, 0x2F, 0x43, 0x6D, 0xEC, 0x37, 0x44, 0x6D,
- 0xEE, 0x3F, 0x45, 0x6D, 0x74, 0x32, 0x40, 0xB9, 0xFF, 0x03, 0x03, 0x91,
+ 0xFF, 0xC3, 0x02, 0xD1, 0xF3, 0x53, 0x05, 0xA9, 0xF5, 0x5B, 0x06, 0xA9,
+ 0xF7, 0x63, 0x07, 0xA9, 0xF9, 0x6B, 0x08, 0xA9, 0xFB, 0x73, 0x09, 0xA9,
+ 0xFD, 0x7B, 0x0A, 0xA9, 0xE8, 0x27, 0x01, 0x6D, 0xEA, 0x2F, 0x02, 0x6D,
+ 0xEC, 0x37, 0x03, 0x6D, 0xEE, 0x3F, 0x04, 0x6D, 0xE0, 0x03, 0x00, 0xF9,
+ 0xE1, 0xBB, 0x00, 0xB9, 0xE0, 0xBF, 0x00, 0xBD, 0xE2, 0xC3, 0x00, 0xB9,
+ 0xE3, 0xC7, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1, 0xFF, 0x83, 0x00, 0x91,
+ 0xF3, 0x53, 0x45, 0xA9, 0xF5, 0x5B, 0x46, 0xA9, 0xF7, 0x63, 0x47, 0xA9,
+ 0xF9, 0x6B, 0x48, 0xA9, 0xFB, 0x73, 0x49, 0xA9, 0xFD, 0x7B, 0x4A, 0xA9,
+ 0xE8, 0x27, 0x41, 0x6D, 0xEA, 0x2F, 0x42, 0x6D, 0xEC, 0x37, 0x43, 0x6D,
+ 0xEE, 0x3F, 0x44, 0x6D, 0x74, 0x32, 0x40, 0xB9, 0xFF, 0xC3, 0x02, 0x91,
0xC0, 0x03, 0x5F, 0xD6,
};
static constexpr uint8_t expected_cfi_kArm64[] = {
- 0x44, 0x0E, 0xC0, 0x01, 0x44, 0x93, 0x18, 0x94, 0x16, 0x44, 0x95, 0x14,
+ 0x44, 0x0E, 0xB0, 0x01, 0x44, 0x93, 0x18, 0x94, 0x16, 0x44, 0x95, 0x14,
0x96, 0x12, 0x44, 0x97, 0x10, 0x98, 0x0E, 0x44, 0x99, 0x0C, 0x9A, 0x0A,
0x44, 0x9B, 0x08, 0x9C, 0x06, 0x44, 0x9D, 0x04, 0x9E, 0x02, 0x44, 0x05,
0x48, 0x28, 0x05, 0x49, 0x26, 0x44, 0x05, 0x4A, 0x24, 0x05, 0x4B, 0x22,
0x44, 0x05, 0x4C, 0x20, 0x05, 0x4D, 0x1E, 0x44, 0x05, 0x4E, 0x1C, 0x05,
- 0x4F, 0x1A, 0x58, 0x0E, 0xE0, 0x01, 0x44, 0x0E, 0xC0, 0x01, 0x0A, 0x44,
+ 0x4F, 0x1A, 0x58, 0x0E, 0xD0, 0x01, 0x44, 0x0E, 0xB0, 0x01, 0x0A, 0x44,
0xD3, 0xD4, 0x44, 0xD5, 0xD6, 0x44, 0xD7, 0xD8, 0x44, 0xD9, 0xDA, 0x44,
0xDB, 0xDC, 0x44, 0xDD, 0xDE, 0x44, 0x06, 0x48, 0x06, 0x49, 0x44, 0x06,
0x4A, 0x06, 0x4B, 0x44, 0x06, 0x4C, 0x06, 0x4D, 0x44, 0x06, 0x4E, 0x06,
- 0x4F, 0x48, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xC0, 0x01,
+ 0x4F, 0x48, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xB0, 0x01,
};
-// 0x00000000: sub sp, sp, #0xc0 (192)
-// 0x00000004: .cfi_def_cfa_offset: 192
-// 0x00000004: stp tr, x20, [sp, #96]
+// 0x00000000: sub sp, sp, #0xb0 (176)
+// 0x00000004: .cfi_def_cfa_offset: 176
+// 0x00000004: stp tr, x20, [sp, #80]
// 0x00000008: .cfi_offset: r19 at cfa-96
// 0x00000008: .cfi_offset: r20 at cfa-88
-// 0x00000008: stp x21, x22, [sp, #112]
+// 0x00000008: stp x21, x22, [sp, #96]
// 0x0000000c: .cfi_offset: r21 at cfa-80
// 0x0000000c: .cfi_offset: r22 at cfa-72
-// 0x0000000c: stp x23, x24, [sp, #128]
+// 0x0000000c: stp x23, x24, [sp, #112]
// 0x00000010: .cfi_offset: r23 at cfa-64
// 0x00000010: .cfi_offset: r24 at cfa-56
-// 0x00000010: stp x25, x26, [sp, #144]
+// 0x00000010: stp x25, x26, [sp, #128]
// 0x00000014: .cfi_offset: r25 at cfa-48
// 0x00000014: .cfi_offset: r26 at cfa-40
-// 0x00000014: stp x27, x28, [sp, #160]
+// 0x00000014: stp x27, x28, [sp, #144]
// 0x00000018: .cfi_offset: r27 at cfa-32
// 0x00000018: .cfi_offset: r28 at cfa-24
-// 0x00000018: stp x29, lr, [sp, #176]
+// 0x00000018: stp x29, lr, [sp, #160]
// 0x0000001c: .cfi_offset: r29 at cfa-16
// 0x0000001c: .cfi_offset: r30 at cfa-8
-// 0x0000001c: stp d8, d9, [sp, #32]
+// 0x0000001c: stp d8, d9, [sp, #16]
// 0x00000020: .cfi_offset_extended: r72 at cfa-160
// 0x00000020: .cfi_offset_extended: r73 at cfa-152
-// 0x00000020: stp d10, d11, [sp, #48]
+// 0x00000020: stp d10, d11, [sp, #32]
// 0x00000024: .cfi_offset_extended: r74 at cfa-144
// 0x00000024: .cfi_offset_extended: r75 at cfa-136
-// 0x00000024: stp d12, d13, [sp, #64]
+// 0x00000024: stp d12, d13, [sp, #48]
// 0x00000028: .cfi_offset_extended: r76 at cfa-128
// 0x00000028: .cfi_offset_extended: r77 at cfa-120
-// 0x00000028: stp d14, d15, [sp, #80]
+// 0x00000028: stp d14, d15, [sp, #64]
// 0x0000002c: .cfi_offset_extended: r78 at cfa-112
// 0x0000002c: .cfi_offset_extended: r79 at cfa-104
// 0x0000002c: str x0, [sp]
-// 0x00000030: str w1, [sp, #200]
-// 0x00000034: str s0, [sp, #204]
-// 0x00000038: str w2, [sp, #208]
-// 0x0000003c: str w3, [sp, #212]
+// 0x00000030: str w1, [sp, #184]
+// 0x00000034: str s0, [sp, #188]
+// 0x00000038: str w2, [sp, #192]
+// 0x0000003c: str w3, [sp, #196]
// 0x00000040: sub sp, sp, #0x20 (32)
-// 0x00000044: .cfi_def_cfa_offset: 224
+// 0x00000044: .cfi_def_cfa_offset: 208
// 0x00000044: add sp, sp, #0x20 (32)
-// 0x00000048: .cfi_def_cfa_offset: 192
+// 0x00000048: .cfi_def_cfa_offset: 176
// 0x00000048: .cfi_remember_state
-// 0x00000048: ldp tr, x20, [sp, #96]
+// 0x00000048: ldp tr, x20, [sp, #80]
// 0x0000004c: .cfi_restore: r19
// 0x0000004c: .cfi_restore: r20
-// 0x0000004c: ldp x21, x22, [sp, #112]
+// 0x0000004c: ldp x21, x22, [sp, #96]
// 0x00000050: .cfi_restore: r21
// 0x00000050: .cfi_restore: r22
-// 0x00000050: ldp x23, x24, [sp, #128]
+// 0x00000050: ldp x23, x24, [sp, #112]
// 0x00000054: .cfi_restore: r23
// 0x00000054: .cfi_restore: r24
-// 0x00000054: ldp x25, x26, [sp, #144]
+// 0x00000054: ldp x25, x26, [sp, #128]
// 0x00000058: .cfi_restore: r25
// 0x00000058: .cfi_restore: r26
-// 0x00000058: ldp x27, x28, [sp, #160]
+// 0x00000058: ldp x27, x28, [sp, #144]
// 0x0000005c: .cfi_restore: r27
// 0x0000005c: .cfi_restore: r28
-// 0x0000005c: ldp x29, lr, [sp, #176]
+// 0x0000005c: ldp x29, lr, [sp, #160]
// 0x00000060: .cfi_restore: r29
// 0x00000060: .cfi_restore: r30
-// 0x00000060: ldp d8, d9, [sp, #32]
+// 0x00000060: ldp d8, d9, [sp, #16]
// 0x00000064: .cfi_restore_extended: r72
// 0x00000064: .cfi_restore_extended: r73
-// 0x00000064: ldp d10, d11, [sp, #48]
+// 0x00000064: ldp d10, d11, [sp, #32]
// 0x00000068: .cfi_restore_extended: r74
// 0x00000068: .cfi_restore_extended: r75
-// 0x00000068: ldp d12, d13, [sp, #64]
+// 0x00000068: ldp d12, d13, [sp, #48]
// 0x0000006c: .cfi_restore_extended: r76
// 0x0000006c: .cfi_restore_extended: r77
-// 0x0000006c: ldp d14, d15, [sp, #80]
+// 0x0000006c: ldp d14, d15, [sp, #64]
// 0x00000070: .cfi_restore_extended: r78
// 0x00000070: .cfi_restore_extended: r79
-// 0x00000070: ldr w20, [tr, #50] ; is_gc_marking
-// 0x00000074: add sp, sp, #0xc0 (192)
+// 0x00000070: ldr w20, [tr, #48] ; is_gc_marking
+// 0x00000074: add sp, sp, #0xb0 (176)
// 0x00000078: .cfi_def_cfa_offset: 0
// 0x00000078: ret
// 0x0000007c: .cfi_restore_state
-// 0x0000007c: .cfi_def_cfa_offset: 192
+// 0x0000007c: .cfi_def_cfa_offset: 176
static constexpr uint8_t expected_asm_kX86[] = {
- 0x57, 0x56, 0x55, 0x83, 0xC4, 0xE4, 0x50, 0x89, 0x4C, 0x24, 0x34, 0xF3,
- 0x0F, 0x11, 0x44, 0x24, 0x38, 0x89, 0x54, 0x24, 0x3C, 0x89, 0x5C, 0x24,
- 0x40, 0x83, 0xC4, 0xE0, 0x83, 0xC4, 0x20, 0x83, 0xC4, 0x20, 0x5D, 0x5E,
+ 0x57, 0x56, 0x55, 0x83, 0xC4, 0xF4, 0x50, 0x89, 0x4C, 0x24, 0x24, 0xF3,
+ 0x0F, 0x11, 0x44, 0x24, 0x28, 0x89, 0x54, 0x24, 0x2C, 0x89, 0x5C, 0x24,
+ 0x30, 0x83, 0xC4, 0xE0, 0x83, 0xC4, 0x20, 0x83, 0xC4, 0x10, 0x5D, 0x5E,
0x5F, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86[] = {
0x41, 0x0E, 0x08, 0x87, 0x02, 0x41, 0x0E, 0x0C, 0x86, 0x03, 0x41, 0x0E,
- 0x10, 0x85, 0x04, 0x43, 0x0E, 0x2C, 0x41, 0x0E, 0x30, 0x55, 0x0E, 0x50,
- 0x43, 0x0E, 0x30, 0x0A, 0x43, 0x0E, 0x10, 0x41, 0x0E, 0x0C, 0xC5, 0x41,
- 0x0E, 0x08, 0xC6, 0x41, 0x0E, 0x04, 0xC7, 0x41, 0x0B, 0x0E, 0x30,
+ 0x10, 0x85, 0x04, 0x43, 0x0E, 0x1C, 0x41, 0x0E, 0x20, 0x55, 0x0E, 0x40,
+ 0x43, 0x0E, 0x20, 0x0A, 0x43, 0x0E, 0x10, 0x41, 0x0E, 0x0C, 0xC5, 0x41,
+ 0x0E, 0x08, 0xC6, 0x41, 0x0E, 0x04, 0xC7, 0x41, 0x0B, 0x0E, 0x20,
};
// 0x00000000: push edi
// 0x00000001: .cfi_def_cfa_offset: 8
@@ -217,20 +216,20 @@
// 0x00000002: push ebp
// 0x00000003: .cfi_def_cfa_offset: 16
// 0x00000003: .cfi_offset: r5 at cfa-16
-// 0x00000003: add esp, -28
-// 0x00000006: .cfi_def_cfa_offset: 44
+// 0x00000003: add esp, -12
+// 0x00000006: .cfi_def_cfa_offset: 28
// 0x00000006: push eax
-// 0x00000007: .cfi_def_cfa_offset: 48
-// 0x00000007: mov [esp + 52], ecx
-// 0x0000000b: movss [esp + 56], xmm0
-// 0x00000011: mov [esp + 60], edx
-// 0x00000015: mov [esp + 64], ebx
+// 0x00000007: .cfi_def_cfa_offset: 32
+// 0x00000007: mov [esp + 36], ecx
+// 0x0000000b: movss [esp + 40], xmm0
+// 0x00000011: mov [esp + 44], edx
+// 0x00000015: mov [esp + 48], ebx
// 0x00000019: add esp, -32
-// 0x0000001c: .cfi_def_cfa_offset: 80
+// 0x0000001c: .cfi_def_cfa_offset: 64
// 0x0000001c: add esp, 32
-// 0x0000001f: .cfi_def_cfa_offset: 48
+// 0x0000001f: .cfi_def_cfa_offset: 32
// 0x0000001f: .cfi_remember_state
-// 0x0000001f: add esp, 32
+// 0x0000001f: add esp, 16
// 0x00000022: .cfi_def_cfa_offset: 16
// 0x00000022: pop ebp
// 0x00000023: .cfi_def_cfa_offset: 12
@@ -243,30 +242,30 @@
// 0x00000025: .cfi_restore: r7
// 0x00000025: ret
// 0x00000026: .cfi_restore_state
-// 0x00000026: .cfi_def_cfa_offset: 48
+// 0x00000026: .cfi_def_cfa_offset: 32
static constexpr uint8_t expected_asm_kX86_64[] = {
0x41, 0x57, 0x41, 0x56, 0x41, 0x55, 0x41, 0x54, 0x55, 0x53, 0x48, 0x83,
- 0xEC, 0x48, 0xF2, 0x44, 0x0F, 0x11, 0x7C, 0x24, 0x40, 0xF2, 0x44, 0x0F,
- 0x11, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, 0x30, 0xF2,
- 0x44, 0x0F, 0x11, 0x64, 0x24, 0x28, 0x48, 0x89, 0x3C, 0x24, 0x89, 0xB4,
- 0x24, 0x88, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x11, 0x84, 0x24, 0x8C, 0x00,
- 0x00, 0x00, 0x89, 0x94, 0x24, 0x90, 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24,
- 0x94, 0x00, 0x00, 0x00, 0x48, 0x83, 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20,
- 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x6C,
- 0x24, 0x30, 0xF2, 0x44, 0x0F, 0x10, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F,
- 0x10, 0x7C, 0x24, 0x40, 0x48, 0x83, 0xC4, 0x48, 0x5B, 0x5D, 0x41, 0x5C,
- 0x41, 0x5D, 0x41, 0x5E, 0x41, 0x5F, 0xC3,
+ 0xEC, 0x38, 0xF2, 0x44, 0x0F, 0x11, 0x7C, 0x24, 0x30, 0xF2, 0x44, 0x0F,
+ 0x11, 0x74, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, 0x20, 0xF2,
+ 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x48, 0x89, 0x3C, 0x24, 0x89, 0x74,
+ 0x24, 0x78, 0xF3, 0x0F, 0x11, 0x44, 0x24, 0x7C, 0x89, 0x94, 0x24, 0x80,
+ 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24, 0x84, 0x00, 0x00, 0x00, 0x48, 0x83,
+ 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20, 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24,
+ 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, 0x20, 0xF2, 0x44, 0x0F, 0x10,
+ 0x74, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x7C, 0x24, 0x30, 0x48, 0x83,
+ 0xC4, 0x38, 0x5B, 0x5D, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, 0x5F,
+ 0xC3,
};
static constexpr uint8_t expected_cfi_kX86_64[] = {
0x42, 0x0E, 0x10, 0x8F, 0x04, 0x42, 0x0E, 0x18, 0x8E, 0x06, 0x42, 0x0E,
0x20, 0x8D, 0x08, 0x42, 0x0E, 0x28, 0x8C, 0x0A, 0x41, 0x0E, 0x30, 0x86,
- 0x0C, 0x41, 0x0E, 0x38, 0x83, 0x0E, 0x44, 0x0E, 0x80, 0x01, 0x47, 0xA0,
- 0x10, 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x66, 0x0E,
- 0xA0, 0x01, 0x44, 0x0E, 0x80, 0x01, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x47,
- 0xDF, 0x47, 0xE0, 0x44, 0x0E, 0x38, 0x41, 0x0E, 0x30, 0xC3, 0x41, 0x0E,
- 0x28, 0xC6, 0x42, 0x0E, 0x20, 0xCC, 0x42, 0x0E, 0x18, 0xCD, 0x42, 0x0E,
- 0x10, 0xCE, 0x42, 0x0E, 0x08, 0xCF, 0x41, 0x0B, 0x0E, 0x80, 0x01,
+ 0x0C, 0x41, 0x0E, 0x38, 0x83, 0x0E, 0x44, 0x0E, 0x70, 0x47, 0xA0, 0x10,
+ 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x60, 0x0E, 0x90,
+ 0x01, 0x44, 0x0E, 0x70, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x47, 0xDF, 0x47,
+ 0xE0, 0x44, 0x0E, 0x38, 0x41, 0x0E, 0x30, 0xC3, 0x41, 0x0E, 0x28, 0xC6,
+ 0x42, 0x0E, 0x20, 0xCC, 0x42, 0x0E, 0x18, 0xCD, 0x42, 0x0E, 0x10, 0xCE,
+ 0x42, 0x0E, 0x08, 0xCF, 0x41, 0x0B, 0x0E, 0x70,
};
// 0x00000000: push r15
// 0x00000002: .cfi_def_cfa_offset: 16
@@ -286,55 +285,55 @@
// 0x00000009: push rbx
// 0x0000000a: .cfi_def_cfa_offset: 56
// 0x0000000a: .cfi_offset: r3 at cfa-56
-// 0x0000000a: subq rsp, 72
-// 0x0000000e: .cfi_def_cfa_offset: 128
-// 0x0000000e: movsd [rsp + 64], xmm15
+// 0x0000000a: subq rsp, 56
+// 0x0000000e: .cfi_def_cfa_offset: 112
+// 0x0000000e: movsd [rsp + 48], xmm15
// 0x00000015: .cfi_offset: r32 at cfa-64
-// 0x00000015: movsd [rsp + 56], xmm14
+// 0x00000015: movsd [rsp + 40], xmm14
// 0x0000001c: .cfi_offset: r31 at cfa-72
-// 0x0000001c: movsd [rsp + 48], xmm13
+// 0x0000001c: movsd [rsp + 32], xmm13
// 0x00000023: .cfi_offset: r30 at cfa-80
-// 0x00000023: movsd [rsp + 40], xmm12
+// 0x00000023: movsd [rsp + 24], xmm12
// 0x0000002a: .cfi_offset: r29 at cfa-88
// 0x0000002a: movq [rsp], rdi
-// 0x0000002e: mov [rsp + 136], esi
-// 0x00000035: movss [rsp + 140], xmm0
-// 0x0000003e: mov [rsp + 144], edx
-// 0x00000045: mov [rsp + 148], ecx
-// 0x0000004c: addq rsp, -32
-// 0x00000050: .cfi_def_cfa_offset: 160
-// 0x00000050: addq rsp, 32
-// 0x00000054: .cfi_def_cfa_offset: 128
-// 0x00000054: .cfi_remember_state
-// 0x00000054: movsd xmm12, [rsp + 40]
-// 0x0000005b: .cfi_restore: r29
-// 0x0000005b: movsd xmm13, [rsp + 48]
-// 0x00000062: .cfi_restore: r30
-// 0x00000062: movsd xmm14, [rsp + 56]
-// 0x00000069: .cfi_restore: r31
-// 0x00000069: movsd xmm15, [rsp + 64]
-// 0x00000070: .cfi_restore: r32
-// 0x00000070: addq rsp, 72
-// 0x00000074: .cfi_def_cfa_offset: 56
-// 0x00000074: pop rbx
-// 0x00000075: .cfi_def_cfa_offset: 48
-// 0x00000075: .cfi_restore: r3
-// 0x00000075: pop rbp
-// 0x00000076: .cfi_def_cfa_offset: 40
-// 0x00000076: .cfi_restore: r6
-// 0x00000076: pop r12
-// 0x00000078: .cfi_def_cfa_offset: 32
-// 0x00000078: .cfi_restore: r12
-// 0x00000078: pop r13
-// 0x0000007a: .cfi_def_cfa_offset: 24
-// 0x0000007a: .cfi_restore: r13
-// 0x0000007a: pop r14
-// 0x0000007c: .cfi_def_cfa_offset: 16
-// 0x0000007c: .cfi_restore: r14
-// 0x0000007c: pop r15
-// 0x0000007e: .cfi_def_cfa_offset: 8
-// 0x0000007e: .cfi_restore: r15
-// 0x0000007e: ret
-// 0x0000007f: .cfi_restore_state
-// 0x0000007f: .cfi_def_cfa_offset: 128
+// 0x0000002e: mov [rsp + 120], esi
+// 0x00000032: movss [rsp + 124], xmm0
+// 0x00000038: mov [rsp + 128], edx
+// 0x0000003f: mov [rsp + 132], ecx
+// 0x00000046: addq rsp, -32
+// 0x0000004a: .cfi_def_cfa_offset: 144
+// 0x0000004a: addq rsp, 32
+// 0x0000004e: .cfi_def_cfa_offset: 112
+// 0x0000004e: .cfi_remember_state
+// 0x0000004e: movsd xmm12, [rsp + 24]
+// 0x00000055: .cfi_restore: r29
+// 0x00000055: movsd xmm13, [rsp + 32]
+// 0x0000005c: .cfi_restore: r30
+// 0x0000005c: movsd xmm14, [rsp + 40]
+// 0x00000063: .cfi_restore: r31
+// 0x00000063: movsd xmm15, [rsp + 48]
+// 0x0000006a: .cfi_restore: r32
+// 0x0000006a: addq rsp, 56
+// 0x0000006e: .cfi_def_cfa_offset: 56
+// 0x0000006e: pop rbx
+// 0x0000006f: .cfi_def_cfa_offset: 48
+// 0x0000006f: .cfi_restore: r3
+// 0x0000006f: pop rbp
+// 0x00000070: .cfi_def_cfa_offset: 40
+// 0x00000070: .cfi_restore: r6
+// 0x00000070: pop r12
+// 0x00000072: .cfi_def_cfa_offset: 32
+// 0x00000072: .cfi_restore: r12
+// 0x00000072: pop r13
+// 0x00000074: .cfi_def_cfa_offset: 24
+// 0x00000074: .cfi_restore: r13
+// 0x00000074: pop r14
+// 0x00000076: .cfi_def_cfa_offset: 16
+// 0x00000076: .cfi_restore: r14
+// 0x00000076: pop r15
+// 0x00000078: .cfi_def_cfa_offset: 8
+// 0x00000078: .cfi_restore: r15
+// 0x00000078: ret
+// 0x00000079: .cfi_restore_state
+// 0x00000079: .cfi_def_cfa_offset: 112
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 3ee7e0e..dc5304c 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -21,6 +21,7 @@
#include "art_method-inl.h"
#include "base/bit_utils.h"
+#include "base/casts.h"
#include "base/mem_map.h"
#include "class_linker.h"
#include "common_compiler_test.h"
@@ -28,6 +29,7 @@
#include "dex/dex_file.h"
#include "gtest/gtest.h"
#include "indirect_reference_table.h"
+#include "java_frame_root_info.h"
#include "jni/java_vm_ext.h"
#include "jni/jni_internal.h"
#include "mirror/class-inl.h"
@@ -154,19 +156,6 @@
jcharArray, jfloatArray, jshortArray, jdoubleArray, jlongArray>::value;
};
-template <typename ... Args>
-struct count_refs_helper {
- using value_type = size_t;
- static constexpr const size_t value = 0;
-};
-
-template <typename Arg, typename ... Args>
-struct count_refs_helper<Arg, Args ...> {
- using value_type = size_t;
- static constexpr size_t value =
- (jni_type_traits<Arg>::is_ref ? 1 : 0) + count_refs_helper<Args ...>::value;
-};
-
// Base case: No parameters = 0 refs.
size_t count_nonnull_refs_helper() {
return 0;
@@ -399,12 +388,89 @@
jmethodID jmethod_;
private:
+ class ScopedSynchronizedEntryPointOverrides {
+ public:
+ ScopedSynchronizedEntryPointOverrides() {
+ QuickEntryPoints* qpoints = &Thread::Current()->tlsPtr_.quick_entrypoints;
+ jni_method_start_synchronized_original_ = qpoints->pJniMethodStartSynchronized;
+ qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronizedOverride;
+ jni_method_end_synchronized_original_ = qpoints->pJniMethodEndSynchronized;
+ qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronizedOverride;
+ jni_method_end_with_reference_synchronized_original_ =
+ qpoints->pJniMethodEndWithReferenceSynchronized;
+ qpoints->pJniMethodEndWithReferenceSynchronized =
+ JniMethodEndWithReferenceSynchronizedOverride;
+ }
+
+ ~ScopedSynchronizedEntryPointOverrides() {
+ QuickEntryPoints* qpoints = &Thread::Current()->tlsPtr_.quick_entrypoints;
+ qpoints->pJniMethodStartSynchronized = jni_method_start_synchronized_original_;
+ qpoints->pJniMethodEndSynchronized = jni_method_end_synchronized_original_;
+ qpoints->pJniMethodEndWithReferenceSynchronized =
+ jni_method_end_with_reference_synchronized_original_;
+ }
+ };
+
+ static uint32_t JniMethodStartSynchronizedOverride(jobject to_lock, Thread* self);
+ static void JniMethodEndSynchronizedOverride(uint32_t saved_local_ref_cookie,
+ jobject locked,
+ Thread* self);
+ static mirror::Object* JniMethodEndWithReferenceSynchronizedOverride(
+ jobject result,
+ uint32_t saved_local_ref_cookie,
+ jobject locked,
+ Thread* self);
+
+ using StartSynchronizedType = uint32_t (*)(jobject, Thread*);
+ using EndSynchronizedType = void (*)(uint32_t, jobject, Thread*);
+ using EndWithReferenceSynchronizedType = mirror::Object* (*)(jobject, uint32_t, jobject, Thread*);
+
+ static StartSynchronizedType jni_method_start_synchronized_original_;
+ static EndSynchronizedType jni_method_end_synchronized_original_;
+ static EndWithReferenceSynchronizedType jni_method_end_with_reference_synchronized_original_;
+ static uint32_t saved_local_ref_cookie_;
+ static jobject locked_object_;
+
bool check_generic_jni_;
};
jclass JniCompilerTest::jklass_;
jobject JniCompilerTest::jobj_;
jobject JniCompilerTest::class_loader_;
+JniCompilerTest::StartSynchronizedType JniCompilerTest::jni_method_start_synchronized_original_;
+JniCompilerTest::EndSynchronizedType JniCompilerTest::jni_method_end_synchronized_original_;
+JniCompilerTest::EndWithReferenceSynchronizedType
+ JniCompilerTest::jni_method_end_with_reference_synchronized_original_;
+uint32_t JniCompilerTest::saved_local_ref_cookie_;
+jobject JniCompilerTest::locked_object_;
+
+uint32_t JniCompilerTest::JniMethodStartSynchronizedOverride(jobject to_lock, Thread* self) {
+ locked_object_ = to_lock;
+ uint32_t cookie = jni_method_start_synchronized_original_(to_lock, self);
+ saved_local_ref_cookie_ = cookie;
+ return cookie;
+}
+
+void JniCompilerTest::JniMethodEndSynchronizedOverride(uint32_t saved_local_ref_cookie,
+ jobject locked,
+ Thread* self) {
+ EXPECT_EQ(saved_local_ref_cookie_, saved_local_ref_cookie);
+ EXPECT_EQ(locked_object_, locked);
+ jni_method_end_synchronized_original_(saved_local_ref_cookie, locked, self);
+}
+
+mirror::Object* JniCompilerTest::JniMethodEndWithReferenceSynchronizedOverride(
+ jobject result,
+ uint32_t saved_local_ref_cookie,
+ jobject locked,
+ Thread* self) {
+ EXPECT_EQ(saved_local_ref_cookie_, saved_local_ref_cookie);
+ EXPECT_EQ(locked_object_, locked);
+ return jni_method_end_with_reference_synchronized_original_(result,
+ saved_local_ref_cookie,
+ locked,
+ self);
+}
// Test the normal compiler and normal generic JNI only.
// The following features are unsupported in @FastNative:
@@ -553,42 +619,56 @@
BaseHandleScope* const handle_scope_;
};
-// Number of references allocated in JNI ShadowFrames on the given thread.
-static size_t NumJniShadowFrameReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
- return self->GetManagedStack()->NumJniShadowFrameReferences();
-}
-
-// Number of references in handle scope on the given thread.
-static size_t NumHandleReferences(Thread* self) {
- size_t count = 0;
- for (BaseHandleScope* cur = self->GetTopHandleScope(); cur != nullptr; cur = cur->GetLink()) {
- count += cur->NumberOfReferences();
+class CountReferencesVisitor : public RootVisitor {
+ public:
+ void VisitRoots(mirror::Object*** roots ATTRIBUTE_UNUSED,
+ size_t count,
+ const RootInfo& info) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (info.GetType() == art::RootType::kRootJavaFrame) {
+ const JavaFrameRootInfo& jrfi = static_cast<const JavaFrameRootInfo&>(info);
+ if (jrfi.GetVReg() == JavaFrameRootInfo::kNativeReferenceArgument) {
+ DCHECK_EQ(count, 1u);
+ num_references_ += count;
+ }
+ }
}
- return count;
-}
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots ATTRIBUTE_UNUSED,
+ size_t count ATTRIBUTE_UNUSED,
+ const RootInfo& info) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK_NE(info.GetType(), art::RootType::kRootJavaFrame);
+ }
+
+ size_t NumReferences() const {
+ return num_references_;
+ }
+
+ private:
+ size_t num_references_ = 0u;
+};
// Number of references allocated in handle scopes & JNI shadow frames on this thread.
static size_t NumStackReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
- return NumHandleReferences(self) + NumJniShadowFrameReferences(self);
+ CountReferencesVisitor visitor;
+ self->VisitRoots(&visitor, kVisitRootFlagAllRoots);
+ return visitor.NumReferences();
}
-static void expectNumStackReferences(size_t val1, size_t val2) {
+static void expectNumStackReferences(size_t expected) {
// In rare cases when JNI functions call themselves recursively,
// disable this test because it will have a false negative.
if (!IsCurrentJniCritical() && ScopedDisableCheckNumStackReferences::sCheckNumStackReferences) {
/* @CriticalNative doesn't build a HandleScope, so this test is meaningless then. */
ScopedObjectAccess soa(Thread::Current());
- size_t actual_num = NumStackReferences(Thread::Current());
- // XX: Not too sure what's going on.
- // Sometimes null references get placed and sometimes they don't?
- EXPECT_TRUE(val1 == actual_num || val2 == actual_num)
- << "expected either " << val1 << " or " << val2
- << " number of stack references, but got: " << actual_num;
+ size_t num_references = NumStackReferences(Thread::Current());
+ EXPECT_EQ(expected, num_references);
}
}
-#define EXPECT_NUM_STACK_REFERENCES(val1, val2) expectNumStackReferences(val1, val2)
+#define EXPECT_NUM_STACK_REFERENCES(expected) expectNumStackReferences(expected)
template <typename T, T* fn>
struct make_jni_test_decorator;
@@ -600,9 +680,9 @@
EXPECT_THREAD_STATE_FOR_CURRENT_JNI();
EXPECT_MUTATOR_LOCK_FOR_CURRENT_JNI();
EXPECT_JNI_ENV_AND_CLASS_FOR_CURRENT_JNI(env, kls);
- // All incoming parameters + the jclass get put into the transition's StackHandleScope.
- EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(kls, args...),
- (count_refs_helper<jclass, Args...>::value));
+ // All incoming parameters get spilled into the JNI transition frame.
+ // The `jclass` is just a reference to the method's declaring class field.
+ EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(args...));
return fn(env, kls, args...);
}
@@ -615,9 +695,8 @@
EXPECT_THREAD_STATE_FOR_CURRENT_JNI();
EXPECT_MUTATOR_LOCK_FOR_CURRENT_JNI();
EXPECT_JNI_ENV_AND_OBJECT_FOR_CURRENT_JNI(env, thisObj);
- // All incoming parameters + the implicit 'this' get put into the transition's StackHandleScope.
- EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(thisObj, args...),
- (count_refs_helper<jobject, Args...>::value));
+ // All incoming parameters + the implicit 'this' get spilled into the JNI transition frame.
+ EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(thisObj, args...));
return fn(env, thisObj, args...);
}
@@ -804,6 +883,7 @@
void JniCompilerTest::CompileAndRun_fooJJ_synchronizedImpl() {
SetUpForTest(false, "fooJJ_synchronized", "(JJ)J",
CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooJJ_synchronized));
+ ScopedSynchronizedEntryPointOverrides ssepo;
EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_synchronized_calls[gCurrentJni]);
jlong a = 0x1000000020000000ULL;
@@ -1103,6 +1183,7 @@
SetUpForTest(true, "fooSSIOO",
"(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooSSIOO));
+ ScopedSynchronizedEntryPointOverrides ssepo;
EXPECT_EQ(0, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]);
jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr);
@@ -1343,6 +1424,7 @@
void JniCompilerTest::GetSinkPropertiesNativeImpl() {
SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;",
CURRENT_JNI_WRAPPER(Java_MyClassNatives_GetSinkProperties));
+ ScopedSynchronizedEntryPointOverrides ssepo;
EXPECT_EQ(0, gJava_MyClassNatives_GetSinkProperties_calls[gCurrentJni]);
jarray result = down_cast<jarray>(
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 7afa8b1..bc1e866 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -21,7 +21,6 @@
#include "arch/arm/jni_frame_arm.h"
#include "arch/instruction_set.h"
#include "base/macros.h"
-#include "handle_scope-inl.h"
#include "utils/arm/managed_register_arm.h"
namespace art {
@@ -394,28 +393,27 @@
if (UNLIKELY(is_critical_native_)) {
CHECK(!SpillsMethod());
CHECK(!HasLocalReferenceSegmentState());
- CHECK(!HasHandleScope());
CHECK(!SpillsReturnValue());
return 0u; // There is no managed frame for @CriticalNative.
}
// Method*, callee save area size, local reference segment state
- CHECK(SpillsMethod());
+ DCHECK(SpillsMethod());
const size_t method_ptr_size = static_cast<size_t>(kArmPointerSize);
const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
size_t total_size = method_ptr_size + callee_save_area_size;
- CHECK(HasLocalReferenceSegmentState());
- // local reference segment state
- total_size += kFramePointerSize;
- // TODO: Probably better to use sizeof(IRTSegmentState) here...
-
- CHECK(HasHandleScope());
- total_size += HandleScope::SizeOf(kArmPointerSize, ReferenceCount());
+ DCHECK(HasLocalReferenceSegmentState());
+ const size_t cookie_size = SavedLocalReferenceCookieSize();
+ total_size += cookie_size;
// Plus return value spill area size
- CHECK(SpillsReturnValue());
- total_size += SizeOfReturnValue();
+ if (SpillsReturnValue()) {
+ // No padding between cookie and return value on arm.
+ DCHECK_EQ(ReturnValueSaveLocation().SizeValue(),
+ SavedLocalReferenceCookieOffset().SizeValue() + cookie_size);
+ total_size += SizeOfReturnValue();
+ }
return RoundUp(total_size, kStackAlignment);
}
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 06796c1..8d40f2e 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -20,7 +20,6 @@
#include "arch/arm64/jni_frame_arm64.h"
#include "arch/instruction_set.h"
-#include "handle_scope-inl.h"
#include "utils/arm64/managed_register_arm64.h"
namespace art {
@@ -241,26 +240,30 @@
if (is_critical_native_) {
CHECK(!SpillsMethod());
CHECK(!HasLocalReferenceSegmentState());
- CHECK(!HasHandleScope());
CHECK(!SpillsReturnValue());
return 0u; // There is no managed frame for @CriticalNative.
}
// Method*, callee save area size, local reference segment state
- CHECK(SpillsMethod());
+ DCHECK(SpillsMethod());
size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
size_t total_size = method_ptr_size + callee_save_area_size;
- CHECK(HasLocalReferenceSegmentState());
- total_size += sizeof(uint32_t);
-
- CHECK(HasHandleScope());
- total_size += HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
+ DCHECK(HasLocalReferenceSegmentState());
+ const size_t cookie_size = SavedLocalReferenceCookieSize();
+ total_size += cookie_size;
// Plus return value spill area size
- CHECK(SpillsReturnValue());
- total_size += SizeOfReturnValue();
+ if (SpillsReturnValue()) {
+ // For 64-bit return values there shall be a 4B alignment gap between the cookie
+ // and the saved return value. However, we do not need to round the intermediate
+ // `total_size` here as the final rounding below shall add sufficient padding.
+ DCHECK_ALIGNED(total_size, 4u);
+ DCHECK(!IsAligned<8u>(total_size));
+ static_assert(IsAligned<8u>(kStackAlignment));
+ total_size += SizeOfReturnValue();
+ }
return RoundUp(total_size, kStackAlignment);
}
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 1943756..2127f73 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -19,6 +19,7 @@
#include <android-base/logging.h>
#include "arch/instruction_set.h"
+#include "indirect_reference_table.h"
#ifdef ART_ENABLE_CODEGEN_arm
#include "jni/quick/arm/calling_convention_arm.h"
@@ -173,25 +174,24 @@
}
FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
- size_t references_size = handle_scope_pointer_size_ * ReferenceCount(); // size excluding header
- return FrameOffset(HandleReferencesOffset().Int32Value() + references_size);
+ // The cookie goes after the method pointer.
+ DCHECK_EQ(SavedLocalReferenceCookieSize(), sizeof(IRTSegmentState));
+ DCHECK(HasLocalReferenceSegmentState());
+ return FrameOffset(displacement_.SizeValue() + static_cast<size_t>(frame_pointer_size_));
}
FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
- if (LIKELY(HasHandleScope())) {
- // Initial offset already includes the displacement.
- // -- Remove the additional local reference cookie offset if we don't have a handle scope.
- const size_t saved_local_reference_cookie_offset =
- SavedLocalReferenceCookieOffset().Int32Value();
- // Segment state is 4 bytes long
- const size_t segment_state_size = 4;
- return FrameOffset(saved_local_reference_cookie_offset + segment_state_size);
- } else {
- // Include only the initial Method* as part of the offset.
- CHECK_LT(displacement_.SizeValue(),
- static_cast<size_t>(std::numeric_limits<int32_t>::max()));
- return FrameOffset(displacement_.Int32Value() + static_cast<size_t>(frame_pointer_size_));
+ // The saved return value goes at a properly aligned slot after the cookie.
+ DCHECK(SpillsReturnValue());
+ size_t cookie_offset = SavedLocalReferenceCookieOffset().SizeValue() - displacement_.SizeValue();
+ size_t return_value_offset = cookie_offset + SavedLocalReferenceCookieSize();
+ const size_t return_value_size = SizeOfReturnValue();
+ DCHECK(return_value_size == 4u || return_value_size == 8u) << return_value_size;
+ DCHECK_ALIGNED(return_value_offset, 4u);
+ if (return_value_size == 8u) {
+ return_value_offset = RoundUp(return_value_offset, 8u);
}
+ return FrameOffset(displacement_.SizeValue() + return_value_offset);
}
bool JniCallingConvention::HasNext() {
@@ -285,16 +285,6 @@
}
}
-// Return position of handle scope entry holding reference at the current iterator
-// position
-FrameOffset JniCallingConvention::CurrentParamHandleScopeEntryOffset() {
- CHECK(IsCurrentParamAReference());
- CHECK_LT(HandleScopeLinkOffset(), HandleScopeNumRefsOffset());
- int result = HandleReferencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_;
- CHECK_GT(result, HandleScopeNumRefsOffset().Int32Value());
- return FrameOffset(result);
-}
-
size_t JniCallingConvention::CurrentParamSize() const {
if (IsCurrentArgExtraForJni()) {
return static_cast<size_t>(frame_pointer_size_); // JNIEnv or jobject/jclass
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 005ae91..5679263 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -21,7 +21,6 @@
#include "base/array_ref.h"
#include "base/enums.h"
#include "dex/primitive.h"
-#include "handle_scope.h"
#include "thread.h"
#include "utils/managed_register.h"
@@ -81,7 +80,6 @@
: itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0),
itr_float_and_doubles_(0), displacement_(0),
frame_pointer_size_(frame_pointer_size),
- handle_scope_pointer_size_(sizeof(StackReference<mirror::Object>)),
is_static_(is_static), is_synchronized_(is_synchronized),
shorty_(shorty) {
num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
@@ -211,8 +209,6 @@
FrameOffset displacement_;
// The size of a pointer.
const PointerSize frame_pointer_size_;
- // The size of a reference entry within the handle scope.
- const size_t handle_scope_pointer_size_;
private:
const bool is_static_;
@@ -345,32 +341,12 @@
virtual ManagedRegister CurrentParamRegister() = 0;
virtual FrameOffset CurrentParamStackOffset() = 0;
- // Iterator interface extension for JNI
- FrameOffset CurrentParamHandleScopeEntryOffset();
-
- // Position of handle scope and interior fields
- FrameOffset HandleScopeOffset() const {
- return FrameOffset(this->displacement_.Int32Value() + static_cast<size_t>(frame_pointer_size_));
- // above Method reference
- }
-
- FrameOffset HandleScopeLinkOffset() const {
- return FrameOffset(HandleScopeOffset().Int32Value() +
- HandleScope::LinkOffset(frame_pointer_size_));
- }
-
- FrameOffset HandleScopeNumRefsOffset() const {
- return FrameOffset(HandleScopeOffset().Int32Value() +
- HandleScope::NumberOfReferencesOffset(frame_pointer_size_));
- }
-
- FrameOffset HandleReferencesOffset() const {
- return FrameOffset(HandleScopeOffset().Int32Value() +
- HandleScope::ReferencesOffset(frame_pointer_size_));
- }
-
virtual ~JniCallingConvention() {}
+ size_t SavedLocalReferenceCookieSize() const {
+ return 4u;
+ }
+
bool IsCriticalNative() const {
return is_critical_native_;
}
@@ -397,6 +373,13 @@
return_type == Primitive::kPrimChar;
}
+ // Does the transition back spill the return value in the stack frame?
+ bool SpillsReturnValue() const {
+ // Exclude return value for @CriticalNative methods for optimization speed.
+ // References are passed directly to the "end method" and there is nothing to save for `void`.
+ return !IsCriticalNative() && !IsReturnAReference() && SizeOfReturnValue() != 0u;
+ }
+
protected:
// Named iterator positions
enum IteratorPos {
@@ -415,24 +398,12 @@
protected:
size_t NumberOfExtraArgumentsForJni() const;
- // Does the transition have a StackHandleScope?
- bool HasHandleScope() const {
- // Exclude HandleScope for @CriticalNative methods for optimization speed.
- return !IsCriticalNative();
- }
-
// Does the transition have a local reference segment state?
bool HasLocalReferenceSegmentState() const {
// Exclude local reference segment states for @CriticalNative methods for optimization speed.
return !IsCriticalNative();
}
- // Does the transition back spill the return value in the stack frame?
- bool SpillsReturnValue() const {
- // Exclude return value for @CriticalNative methods for optimization speed.
- return !IsCriticalNative();
- }
-
// Are there extra JNI arguments (JNIEnv* and maybe jclass)?
bool HasExtraArgumentsForJni() const {
// @CriticalNative jni implementations exclude both JNIEnv* and the jclass/jobject parameters.
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index e7dd6cf..2fd9abd 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -230,7 +230,7 @@
if (LIKELY(!is_critical_native)) {
// Spill all register arguments.
- // TODO: Spill reference args directly to the HandleScope.
+ // TODO: Pass these in a single call to let the assembler use multi-register stores.
// TODO: Spill native stack args straight to their stack locations (adjust SP earlier).
mr_conv->ResetIterator(FrameOffset(current_frame_size));
for (; mr_conv->HasNext(); mr_conv->Next()) {
@@ -240,70 +240,7 @@
}
}
- // NOTE: @CriticalNative methods don't have a HandleScope
- // because they can't have any reference parameters or return values.
-
- // 2. Set up the HandleScope
- mr_conv->ResetIterator(FrameOffset(current_frame_size));
- main_jni_conv->ResetIterator(FrameOffset(0));
- __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(),
- main_jni_conv->ReferenceCount());
-
- __ CopyRawPtrFromThread(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<kPointerSize>());
- __ StoreStackOffsetToThread(Thread::TopHandleScopeOffset<kPointerSize>(),
- main_jni_conv->HandleScopeOffset());
-
- // 3. Place incoming reference arguments into handle scope
- main_jni_conv->Next(); // Skip JNIEnv*
- // 3.5. Create Class argument for static methods out of passed method
- if (is_static) {
- FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
- // Check handle scope offset is within frame
- CHECK_LT(handle_scope_offset.Uint32Value(), current_frame_size);
- // Note: This CopyRef() doesn't need heap unpoisoning since it's from the ArtMethod.
- // Note: This CopyRef() does not include read barrier. It will be handled below.
- __ CopyRef(handle_scope_offset,
- mr_conv->MethodRegister(),
- ArtMethod::DeclaringClassOffset(),
- /* unpoison_reference= */ false);
- main_jni_conv->Next(); // in handle scope so move to next argument
- }
- // Place every reference into the handle scope (ignore other parameters).
- while (mr_conv->HasNext()) {
- CHECK(main_jni_conv->HasNext());
- bool ref_param = main_jni_conv->IsCurrentParamAReference();
- CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
- // References need placing in handle scope and the entry value passing
- if (ref_param) {
- // Compute handle scope entry, note null is placed in the handle scope but its boxed value
- // must be null.
- FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
- // Check handle scope offset is within frame and doesn't run into the saved segment state.
- CHECK_LT(handle_scope_offset.Uint32Value(), current_frame_size);
- CHECK_NE(handle_scope_offset.Uint32Value(),
- main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
- // We spilled all registers above, so use stack locations.
- // TODO: Spill refs straight to the HandleScope.
- bool input_in_reg = false; // mr_conv->IsCurrentParamInRegister();
- bool input_on_stack = true; // mr_conv->IsCurrentParamOnStack();
- CHECK(input_in_reg || input_on_stack);
-
- if (input_in_reg) {
- ManagedRegister in_reg = mr_conv->CurrentParamRegister();
- __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull());
- __ StoreRef(handle_scope_offset, in_reg);
- } else if (input_on_stack) {
- FrameOffset in_off = mr_conv->CurrentParamStackOffset();
- __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
- __ CopyRef(handle_scope_offset, in_off);
- }
- }
- mr_conv->Next();
- main_jni_conv->Next();
- }
-
- // 4. Write out the end of the quick frames.
+ // 2. Write out the end of the quick frames.
__ StoreStackPointerToThread(Thread::TopOfManagedStackOffset<kPointerSize>());
// NOTE: @CriticalNative does not need to store the stack pointer to the thread
@@ -312,7 +249,7 @@
// (TODO: We could probably disable it for @FastNative too).
} // if (!is_critical_native)
- // 5. Move frame down to allow space for out going args.
+ // 3. Move frame down to allow space for out going args.
size_t current_out_arg_size = main_out_arg_size;
if (UNLIKELY(is_critical_native)) {
DCHECK_EQ(main_out_arg_size, current_frame_size);
@@ -321,8 +258,8 @@
current_frame_size += main_out_arg_size;
}
- // Call the read barrier for the declaring class loaded from the method for a static call.
- // Skip this for @CriticalNative because we didn't build a HandleScope to begin with.
+ // 4. Call the read barrier for the declaring class in the method for a static call.
+ // Skip this for @CriticalNative because we're not passing a `jclass` to the native method.
// Note that we always have outgoing param space available for at least two params.
if (kUseReadBarrier && is_static && !is_critical_native) {
const bool kReadBarrierFastPath = true; // Always true after Mips codegen was removed.
@@ -341,24 +278,15 @@
//
// Call into the runtime's ReadBarrierJni and have it fix up
// the object address if it was moved.
+ //
+ // TODO: Move this to an actual slow path, so that the fast path is a branch-not-taken.
ThreadOffset<kPointerSize> read_barrier = QUICK_ENTRYPOINT_OFFSET(kPointerSize,
pReadBarrierJni);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
- main_jni_conv->Next(); // Skip JNIEnv.
- FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
- main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
- // Pass the handle for the class as the first argument.
- if (main_jni_conv->IsCurrentParamOnStack()) {
- FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
- __ CreateHandleScopeEntry(out_off, class_handle_scope_offset, /*null_allowed=*/ false);
- } else {
- ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
- __ CreateHandleScopeEntry(out_reg,
- class_handle_scope_offset,
- ManagedRegister::NoRegister(),
- /*null_allowed=*/ false);
- }
+ // Pass the pointer to the method's declaring class as the first argument.
+ DCHECK_EQ(ArtMethod::DeclaringClassOffset().SizeValue(), 0u);
+ SetNativeParameter(jni_asm.get(), main_jni_conv.get(), method_register);
main_jni_conv->Next();
// Pass the current thread as the second argument and call.
if (main_jni_conv->IsCurrentParamInRegister()) {
@@ -368,18 +296,22 @@
__ GetCurrentThread(main_jni_conv->CurrentParamStackOffset());
__ CallFromThread(read_barrier);
}
- main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Reset.
+ if (is_synchronized) {
+ // Reload the method pointer in the slow path because it is needed below.
+ __ Load(method_register,
+ FrameOffset(current_out_arg_size + mr_conv->MethodStackOffset().SizeValue()),
+ static_cast<size_t>(kPointerSize));
+ }
if (kReadBarrierFastPath) {
__ Bind(skip_cold_path_label.get());
}
}
- // 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
+ // 5. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
// arguments.
- FrameOffset locked_object_handle_scope_offset(0xBEEFDEAD);
FrameOffset saved_cookie_offset(
FrameOffset(0xDEADBEEFu)); // @CriticalNative - use obviously bad value for debugging
if (LIKELY(!is_critical_native)) {
@@ -390,23 +322,26 @@
is_synchronized,
is_fast_native).SizeValue());
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
- locked_object_handle_scope_offset = FrameOffset(0);
if (is_synchronized) {
// Pass object for locking.
- main_jni_conv->Next(); // Skip JNIEnv.
- locked_object_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
- main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
- if (main_jni_conv->IsCurrentParamOnStack()) {
- FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
- __ CreateHandleScopeEntry(out_off,
- locked_object_handle_scope_offset,
- /*null_allowed=*/ false);
+ if (is_static) {
+ // Pass the pointer to the method's declaring class as the first argument.
+ DCHECK_EQ(ArtMethod::DeclaringClassOffset().SizeValue(), 0u);
+ SetNativeParameter(jni_asm.get(), main_jni_conv.get(), method_register);
} else {
- ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
- __ CreateHandleScopeEntry(out_reg,
- locked_object_handle_scope_offset,
- ManagedRegister::NoRegister(),
- /*null_allowed=*/ false);
+ // TODO: Use the register that still holds the `this` reference.
+ mr_conv->ResetIterator(FrameOffset(current_frame_size));
+ FrameOffset this_offset = mr_conv->CurrentParamStackOffset();
+ if (main_jni_conv->IsCurrentParamOnStack()) {
+ FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
+ __ CreateJObject(out_off, this_offset, /*null_allowed=*/ false);
+ } else {
+ ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
+ __ CreateJObject(out_reg,
+ this_offset,
+ ManagedRegister::NoRegister(),
+ /*null_allowed=*/ false);
+ }
}
main_jni_conv->Next();
}
@@ -417,6 +352,7 @@
__ GetCurrentThread(main_jni_conv->CurrentParamStackOffset());
__ CallFromThread(jni_start);
}
+ method_register = ManagedRegister::NoRegister(); // Method register is clobbered.
if (is_synchronized) { // Check for exceptions from monitor enter.
__ ExceptionPoll(main_out_arg_size);
}
@@ -426,7 +362,7 @@
__ Store(saved_cookie_offset, main_jni_conv->IntReturnRegister(), 4 /* sizeof cookie */);
}
- // 7. Fill arguments.
+ // 6. Fill arguments.
if (UNLIKELY(is_critical_native)) {
ArenaVector<ArgumentLocation> src_args(allocator.Adapter());
ArenaVector<ArgumentLocation> dest_args(allocator.Adapter());
@@ -485,21 +421,26 @@
}
CopyParameter(jni_asm.get(), mr_conv.get(), main_jni_conv.get());
}
+
+ // 7. For static method, create jclass argument as a pointer to the method's declaring class.
if (is_static) {
- // Create argument for Class
- mr_conv->ResetIterator(FrameOffset(current_frame_size));
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
main_jni_conv->Next(); // Skip JNIEnv*
- FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
+ // Load reference to the method's declaring class. The method register has been
+ // clobbered by the above call, so we need to load the method from the stack.
+ FrameOffset method_offset =
+ FrameOffset(current_out_arg_size + mr_conv->MethodStackOffset().SizeValue());
+ DCHECK_EQ(ArtMethod::DeclaringClassOffset().SizeValue(), 0u);
if (main_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
- __ CreateHandleScopeEntry(out_off, handle_scope_offset, /*null_allowed=*/ false);
+ __ Copy(out_off, method_offset, static_cast<size_t>(kPointerSize));
+ // TODO(x86): Get hold of the register used to copy the method pointer,
+ // so that we can use it also for loading the method entrypoint below.
} else {
ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
- __ CreateHandleScopeEntry(out_reg,
- handle_scope_offset,
- ManagedRegister::NoRegister(),
- /*null_allowed=*/ false);
+ __ Load(out_reg, method_offset, static_cast<size_t>(kPointerSize));
+ // Reuse the register also for loading the method entrypoint below.
+ method_register = out_reg;
}
}
@@ -527,8 +468,12 @@
__ Call(main_jni_conv->HiddenArgumentRegister(), jni_entrypoint_offset);
}
} else {
- __ Call(FrameOffset(main_out_arg_size + mr_conv->MethodStackOffset().SizeValue()),
- jni_entrypoint_offset);
+ if (method_register.IsRegister()) {
+ __ Call(method_register, jni_entrypoint_offset);
+ } else {
+ __ Call(FrameOffset(current_out_arg_size + mr_conv->MethodStackOffset().SizeValue()),
+ jni_entrypoint_offset);
+ }
}
// 10. Fix differences in result widths.
@@ -548,35 +493,36 @@
}
// 11. Process return value
- FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation();
- if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
- if (LIKELY(!is_critical_native)) {
- // For normal JNI, store the return value on the stack because the call to
- // JniMethodEnd will clobber the return value. It will be restored in (13).
- CHECK_LT(return_save_location.Uint32Value(), current_frame_size);
- __ Store(return_save_location,
- main_jni_conv->ReturnRegister(),
- main_jni_conv->SizeOfReturnValue());
- } else {
- // For @CriticalNative only,
- // move the JNI return register into the managed return register (if they don't match).
- ManagedRegister jni_return_reg = main_jni_conv->ReturnRegister();
- ManagedRegister mr_return_reg = mr_conv->ReturnRegister();
+ bool spill_return_value = main_jni_conv->SpillsReturnValue();
+ FrameOffset return_save_location =
+ spill_return_value ? main_jni_conv->ReturnValueSaveLocation() : FrameOffset(0);
+ if (spill_return_value) {
+ DCHECK(!is_critical_native);
+ // For normal JNI, store the return value on the stack because the call to
+ // JniMethodEnd will clobber the return value. It will be restored in (13).
+ CHECK_LT(return_save_location.Uint32Value(), current_frame_size);
+ __ Store(return_save_location,
+ main_jni_conv->ReturnRegister(),
+ main_jni_conv->SizeOfReturnValue());
+ } else if (UNLIKELY(is_critical_native) && main_jni_conv->SizeOfReturnValue() != 0) {
+ // For @CriticalNative only,
+ // move the JNI return register into the managed return register (if they don't match).
+ ManagedRegister jni_return_reg = main_jni_conv->ReturnRegister();
+ ManagedRegister mr_return_reg = mr_conv->ReturnRegister();
- // Check if the JNI return register matches the managed return register.
- // If they differ, only then do we have to do anything about it.
- // Otherwise the return value is already in the right place when we return.
- if (!jni_return_reg.Equals(mr_return_reg)) {
- CHECK(!main_jni_conv->UseTailCall());
- // This is typically only necessary on ARM32 due to native being softfloat
- // while managed is hardfloat.
- // -- For example VMOV {r0, r1} -> D0; VMOV r0 -> S0.
- __ Move(mr_return_reg, jni_return_reg, main_jni_conv->SizeOfReturnValue());
- } else if (jni_return_reg.IsNoRegister() && mr_return_reg.IsNoRegister()) {
- // Check that if the return value is passed on the stack for some reason,
- // that the size matches.
- CHECK_EQ(main_jni_conv->SizeOfReturnValue(), mr_conv->SizeOfReturnValue());
- }
+ // Check if the JNI return register matches the managed return register.
+ // If they differ, only then do we have to do anything about it.
+ // Otherwise the return value is already in the right place when we return.
+ if (!jni_return_reg.Equals(mr_return_reg)) {
+ CHECK(!main_jni_conv->UseTailCall());
+ // This is typically only necessary on ARM32 due to native being softfloat
+ // while managed is hardfloat.
+ // -- For example VMOV {r0, r1} -> D0; VMOV r0 -> S0.
+ __ Move(mr_return_reg, jni_return_reg, main_jni_conv->SizeOfReturnValue());
+ } else if (jni_return_reg.IsNoRegister() && mr_return_reg.IsNoRegister()) {
+ // Check that if the return value is passed on the stack for some reason,
+ // that the size matches.
+ CHECK_EQ(main_jni_conv->SizeOfReturnValue(), mr_conv->SizeOfReturnValue());
}
}
@@ -589,8 +535,6 @@
__ IncreaseFrameSize(out_arg_size_diff);
current_frame_size += out_arg_size_diff;
saved_cookie_offset = FrameOffset(saved_cookie_offset.SizeValue() + out_arg_size_diff);
- locked_object_handle_scope_offset =
- FrameOffset(locked_object_handle_scope_offset.SizeValue() + out_arg_size_diff);
return_save_location = FrameOffset(return_save_location.SizeValue() + out_arg_size_diff);
}
end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
@@ -617,17 +561,32 @@
end_jni_conv->Next();
if (is_synchronized) {
// Pass object for unlocking.
- if (end_jni_conv->IsCurrentParamOnStack()) {
- FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
- __ CreateHandleScopeEntry(out_off,
- locked_object_handle_scope_offset,
- /*null_allowed=*/ false);
+ if (is_static) {
+ // Load reference to the method's declaring class. The method register has been
+ // clobbered by the above call, so we need to load the method from the stack.
+ FrameOffset method_offset =
+ FrameOffset(current_out_arg_size + mr_conv->MethodStackOffset().SizeValue());
+ DCHECK_EQ(ArtMethod::DeclaringClassOffset().SizeValue(), 0u);
+ if (end_jni_conv->IsCurrentParamOnStack()) {
+ FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
+ __ Copy(out_off, method_offset, static_cast<size_t>(kPointerSize));
+ } else {
+ ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
+ __ Load(out_reg, method_offset, static_cast<size_t>(kPointerSize));
+ }
} else {
- ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
- __ CreateHandleScopeEntry(out_reg,
- locked_object_handle_scope_offset,
- ManagedRegister::NoRegister(),
- /*null_allowed=*/ false);
+ mr_conv->ResetIterator(FrameOffset(current_frame_size));
+ FrameOffset this_offset = mr_conv->CurrentParamStackOffset();
+ if (end_jni_conv->IsCurrentParamOnStack()) {
+ FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
+ __ CreateJObject(out_off, this_offset, /*null_allowed=*/ false);
+ } else {
+ ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
+ __ CreateJObject(out_reg,
+ this_offset,
+ ManagedRegister::NoRegister(),
+ /*null_allowed=*/ false);
+ }
}
end_jni_conv->Next();
}
@@ -640,11 +599,8 @@
}
// 13. Reload return value
- if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
+ if (spill_return_value) {
__ Load(mr_conv->ReturnRegister(), return_save_location, mr_conv->SizeOfReturnValue());
- // NIT: If it's @CriticalNative then we actually only need to do this IF
- // the calling convention's native return register doesn't match the managed convention's
- // return register.
}
} // if (!is_critical_native)
@@ -696,7 +652,7 @@
// TODO: Move args in registers for @CriticalNative.
bool input_in_reg = false; // mr_conv->IsCurrentParamInRegister();
bool output_in_reg = jni_conv->IsCurrentParamInRegister();
- FrameOffset handle_scope_offset(0);
+ FrameOffset spilled_reference_offset(0);
bool null_allowed = false;
bool ref_param = jni_conv->IsCurrentParamAReference();
CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
@@ -705,21 +661,21 @@
} else {
CHECK(jni_conv->IsCurrentParamOnStack());
}
- // References need placing in handle scope and the entry address passing.
+ // References are spilled to caller's reserved out vreg area.
if (ref_param) {
null_allowed = mr_conv->IsCurrentArgPossiblyNull();
- // Compute handle scope offset. Note null is placed in the handle scope but the jobject
- // passed to the native code must be null (not a pointer into the handle scope
+ // Compute spilled reference offset. Note that null is spilled but the jobject
+ // passed to the native code must be null (not a pointer into the spilled value
// as with regular references).
- handle_scope_offset = jni_conv->CurrentParamHandleScopeEntryOffset();
- // Check handle scope offset is within frame.
- CHECK_LT(handle_scope_offset.Uint32Value(), mr_conv->GetDisplacement().Uint32Value());
+ spilled_reference_offset = mr_conv->CurrentParamStackOffset();
+ // Check that spilled reference offset is in the spill area in the caller's frame.
+ CHECK_GT(spilled_reference_offset.Uint32Value(), mr_conv->GetDisplacement().Uint32Value());
}
if (input_in_reg && output_in_reg) {
ManagedRegister in_reg = mr_conv->CurrentParamRegister();
ManagedRegister out_reg = jni_conv->CurrentParamRegister();
if (ref_param) {
- __ CreateHandleScopeEntry(out_reg, handle_scope_offset, in_reg, null_allowed);
+ __ CreateJObject(out_reg, spilled_reference_offset, in_reg, null_allowed);
} else {
if (!mr_conv->IsCurrentParamOnStack()) {
// regular non-straddling move
@@ -731,7 +687,7 @@
} else if (!input_in_reg && !output_in_reg) {
FrameOffset out_off = jni_conv->CurrentParamStackOffset();
if (ref_param) {
- __ CreateHandleScopeEntry(out_off, handle_scope_offset, null_allowed);
+ __ CreateJObject(out_off, spilled_reference_offset, null_allowed);
} else {
FrameOffset in_off = mr_conv->CurrentParamStackOffset();
size_t param_size = mr_conv->CurrentParamSize();
@@ -744,10 +700,10 @@
// Check that incoming stack arguments are above the current stack frame.
CHECK_GT(in_off.Uint32Value(), mr_conv->GetDisplacement().Uint32Value());
if (ref_param) {
- __ CreateHandleScopeEntry(out_reg,
- handle_scope_offset,
- ManagedRegister::NoRegister(),
- null_allowed);
+ __ CreateJObject(out_reg,
+ spilled_reference_offset,
+ ManagedRegister::NoRegister(),
+ null_allowed);
} else {
size_t param_size = mr_conv->CurrentParamSize();
CHECK_EQ(param_size, jni_conv->CurrentParamSize());
@@ -760,8 +716,8 @@
// Check outgoing argument is within frame part dedicated to out args.
CHECK_LT(out_off.Uint32Value(), jni_conv->GetDisplacement().Uint32Value());
if (ref_param) {
- // TODO: recycle value in in_reg rather than reload from handle scope
- __ CreateHandleScopeEntry(out_off, handle_scope_offset, null_allowed);
+ // TODO: recycle value in in_reg rather than reload from spill slot.
+ __ CreateJObject(out_off, spilled_reference_offset, null_allowed);
} else {
size_t param_size = mr_conv->CurrentParamSize();
CHECK_EQ(param_size, jni_conv->CurrentParamSize());
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index df45627..d624831 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -20,7 +20,6 @@
#include "arch/instruction_set.h"
#include "arch/x86/jni_frame_x86.h"
-#include "handle_scope-inl.h"
#include "utils/x86/managed_register_x86.h"
namespace art {
@@ -195,27 +194,28 @@
if (is_critical_native_) {
CHECK(!SpillsMethod());
CHECK(!HasLocalReferenceSegmentState());
- CHECK(!HasHandleScope());
CHECK(!SpillsReturnValue());
return 0u; // There is no managed frame for @CriticalNative.
}
// Method*, PC return address and callee save area size, local reference segment state
- CHECK(SpillsMethod());
+ DCHECK(SpillsMethod());
const size_t method_ptr_size = static_cast<size_t>(kX86PointerSize);
const size_t pc_return_addr_size = kFramePointerSize;
const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
- CHECK(HasLocalReferenceSegmentState());
- total_size += kFramePointerSize;
-
- CHECK(HasHandleScope());
- total_size += HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
+ DCHECK(HasLocalReferenceSegmentState());
+ const size_t cookie_size = SavedLocalReferenceCookieSize();
+ total_size += cookie_size;
// Plus return value spill area size
- CHECK(SpillsReturnValue());
- total_size += SizeOfReturnValue();
+ if (SpillsReturnValue()) {
+ // No padding between cookie and return value on x86.
+ DCHECK_EQ(ReturnValueSaveLocation().SizeValue(),
+ SavedLocalReferenceCookieOffset().SizeValue() + cookie_size);
+ total_size += SizeOfReturnValue();
+ }
return RoundUp(total_size, kStackAlignment);
}
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 44ae8be..bb01371 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -21,7 +21,6 @@
#include "arch/instruction_set.h"
#include "arch/x86_64/jni_frame_x86_64.h"
#include "base/bit_utils.h"
-#include "handle_scope-inl.h"
#include "utils/x86_64/managed_register_x86_64.h"
namespace art {
@@ -183,27 +182,31 @@
if (is_critical_native_) {
CHECK(!SpillsMethod());
CHECK(!HasLocalReferenceSegmentState());
- CHECK(!HasHandleScope());
CHECK(!SpillsReturnValue());
return 0u; // There is no managed frame for @CriticalNative.
}
// Method*, PC return address and callee save area size, local reference segment state
- CHECK(SpillsMethod());
+ DCHECK(SpillsMethod());
const size_t method_ptr_size = static_cast<size_t>(kX86_64PointerSize);
const size_t pc_return_addr_size = kFramePointerSize;
const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
- CHECK(HasLocalReferenceSegmentState());
- total_size += kFramePointerSize;
-
- CHECK(HasHandleScope());
- total_size += HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
+ DCHECK(HasLocalReferenceSegmentState());
+ const size_t cookie_size = SavedLocalReferenceCookieSize();
+ total_size += cookie_size;
// Plus return value spill area size
- CHECK(SpillsReturnValue());
- total_size += SizeOfReturnValue();
+ if (SpillsReturnValue()) {
+ // For 64-bit return values there shall be a 4B alignment gap between the cookie
+ // and the saved return value. However, we do not need to round the intermediate
+ // `total_size` here as the final rounding below shall add sufficient padding.
+ DCHECK_ALIGNED(total_size, 4u);
+ DCHECK(!IsAligned<8u>(total_size));
+ static_assert(IsAligned<8u>(kStackAlignment));
+ total_size += SizeOfReturnValue();
+ }
return RoundUp(total_size, kStackAlignment);
}
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 38167fb..c7241c1 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -845,85 +845,79 @@
UNIMPLEMENTED(FATAL);
}
-void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg,
- bool null_allowed) {
+void ArmVIXLJNIMacroAssembler::CreateJObject(ManagedRegister mout_reg,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
vixl::aarch32::Register out_reg = AsVIXLRegister(mout_reg.AsArm());
vixl::aarch32::Register in_reg =
min_reg.AsArm().IsNoRegister() ? vixl::aarch32::Register() : AsVIXLRegister(min_reg.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
temps.Exclude(out_reg);
if (null_allowed) {
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ // Null values get a jobject value null. Otherwise, the jobject is
+ // the address of the spilled reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+spilled_reference_offset)
if (!in_reg.IsValid()) {
- asm_.LoadFromOffset(kLoadWord, out_reg, sp, handle_scope_offset.Int32Value());
+ asm_.LoadFromOffset(kLoadWord, out_reg, sp, spilled_reference_offset.Int32Value());
in_reg = out_reg;
}
temps.Exclude(in_reg);
___ Cmp(in_reg, 0);
- if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value())) {
+ if (asm_.ShifterOperandCanHold(ADD, spilled_reference_offset.Int32Value())) {
if (!out_reg.Is(in_reg)) {
ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
3 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
___ it(eq, 0xc);
___ mov(eq, out_reg, 0);
- asm_.AddConstantInIt(out_reg, sp, handle_scope_offset.Int32Value(), ne);
+ asm_.AddConstantInIt(out_reg, sp, spilled_reference_offset.Int32Value(), ne);
} else {
ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
2 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
___ it(ne, 0x8);
- asm_.AddConstantInIt(out_reg, sp, handle_scope_offset.Int32Value(), ne);
+ asm_.AddConstantInIt(out_reg, sp, spilled_reference_offset.Int32Value(), ne);
}
} else {
// TODO: Implement this (old arm assembler would have crashed here).
UNIMPLEMENTED(FATAL);
}
} else {
- asm_.AddConstant(out_reg, sp, handle_scope_offset.Int32Value());
+ asm_.AddConstant(out_reg, sp, spilled_reference_offset.Int32Value());
}
}
-void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- bool null_allowed) {
+void ArmVIXLJNIMacroAssembler::CreateJObject(FrameOffset out_off,
+ FrameOffset spilled_reference_offset,
+ bool null_allowed) {
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
vixl32::Register scratch = temps.Acquire();
if (null_allowed) {
- asm_.LoadFromOffset(kLoadWord, scratch, sp, handle_scope_offset.Int32Value());
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ asm_.LoadFromOffset(kLoadWord, scratch, sp, spilled_reference_offset.Int32Value());
+ // Null values get a jobject value null. Otherwise, the jobject is
+ // the address of the spilled reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+spilled_reference_offset)
___ Cmp(scratch, 0);
- if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value())) {
+ if (asm_.ShifterOperandCanHold(ADD, spilled_reference_offset.Int32Value())) {
ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
2 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
___ it(ne, 0x8);
- asm_.AddConstantInIt(scratch, sp, handle_scope_offset.Int32Value(), ne);
+ asm_.AddConstantInIt(scratch, sp, spilled_reference_offset.Int32Value(), ne);
} else {
// TODO: Implement this (old arm assembler would have crashed here).
UNIMPLEMENTED(FATAL);
}
} else {
- asm_.AddConstant(scratch, sp, handle_scope_offset.Int32Value());
+ asm_.AddConstant(scratch, sp, spilled_reference_offset.Int32Value());
}
asm_.StoreToOffset(kStoreWord, scratch, sp, out_off.Int32Value());
}
-void ArmVIXLJNIMacroAssembler::LoadReferenceFromHandleScope(
- ManagedRegister mout_reg ATTRIBUTE_UNUSED,
- ManagedRegister min_reg ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
void ArmVIXLJNIMacroAssembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
bool could_be_null ATTRIBUTE_UNUSED) {
// TODO: not validating references.
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 2f6813a..248fc67 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -153,24 +153,20 @@
void GetCurrentThread(ManagedRegister dest) override;
void GetCurrentThread(FrameOffset dest_offset) override;
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg,
- FrameOffset handlescope_offset,
- ManagedRegister in_reg,
- bool null_allowed) override;
+ // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+ // stale reference that can be used to avoid loading the spilled value to
+ // see if the value is null.
+ void CreateJObject(ManagedRegister out_reg,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) override;
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handlescope_offset,
- bool null_allowed) override;
-
- // src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst,
- ManagedRegister src) override;
+ // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`.
+ void CreateJObject(FrameOffset out_off,
+ FrameOffset spilled_reference_offset,
+ bool null_allowed) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index bb93a96..ff83828 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -650,70 +650,54 @@
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
-void Arm64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister m_out_reg,
- FrameOffset handle_scope_offs,
- ManagedRegister m_in_reg,
- bool null_allowed) {
+void Arm64JNIMacroAssembler::CreateJObject(ManagedRegister m_out_reg,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister m_in_reg,
+ bool null_allowed) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
// For now we only hold stale handle scope entries in x registers.
CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
CHECK(out_reg.IsXRegister()) << out_reg;
if (null_allowed) {
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ // Null values get a jobject value null. Otherwise, the jobject is
+ // the address of the spilled reference.
+ // e.g. out_reg = (in == 0) ? 0 : (SP+spilled_reference_offset)
if (in_reg.IsNoRegister()) {
LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
- handle_scope_offs.Int32Value());
+ spilled_reference_offset.Int32Value());
in_reg = out_reg;
}
___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsXRegister(), 0, eq);
}
- AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
+ AddConstant(out_reg.AsXRegister(), SP, spilled_reference_offset.Int32Value(), ne);
} else {
- AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
+ AddConstant(out_reg.AsXRegister(), SP, spilled_reference_offset.Int32Value(), al);
}
}
-void Arm64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- bool null_allowed) {
+void Arm64JNIMacroAssembler::CreateJObject(FrameOffset out_off,
+ FrameOffset spilled_reference_offset,
+ bool null_allowed) {
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
Register scratch = temps.AcquireX();
if (null_allowed) {
Register scratch2 = temps.AcquireW();
- ___ Ldr(scratch2, MEM_OP(reg_x(SP), handle_scope_offset.Int32Value()));
- ___ Add(scratch, reg_x(SP), handle_scope_offset.Int32Value());
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ ___ Ldr(scratch2, MEM_OP(reg_x(SP), spilled_reference_offset.Int32Value()));
+ ___ Add(scratch, reg_x(SP), spilled_reference_offset.Int32Value());
+ // Null values get a jobject value null. Otherwise, the jobject is
+ // the address of the spilled reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+spilled_reference_offset)
___ Cmp(scratch2, 0);
___ Csel(scratch, scratch, xzr, ne);
} else {
- ___ Add(scratch, reg_x(SP), handle_scope_offset.Int32Value());
+ ___ Add(scratch, reg_x(SP), spilled_reference_offset.Int32Value());
}
___ Str(scratch, MEM_OP(reg_x(SP), out_off.Int32Value()));
}
-void Arm64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
- ManagedRegister m_in_reg) {
- Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
- Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- CHECK(out_reg.IsXRegister()) << out_reg;
- CHECK(in_reg.IsXRegister()) << in_reg;
- vixl::aarch64::Label exit;
- if (!out_reg.Equals(in_reg)) {
- // FIXME: Who sets the flags here?
- LoadImmediate(out_reg.AsXRegister(), 0, eq);
- }
- ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
- LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
- ___ Bind(&exit);
-}
-
void Arm64JNIMacroAssembler::ExceptionPoll(size_t stack_adjust) {
CHECK_ALIGNED(stack_adjust, kStackAlignment);
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index 9f3eea2..ad027d3 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -135,23 +135,20 @@
void GetCurrentThread(ManagedRegister dest) override;
void GetCurrentThread(FrameOffset dest_offset) override;
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg,
- FrameOffset handlescope_offset,
- ManagedRegister in_reg,
- bool null_allowed) override;
+ // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+ // stale reference that can be used to avoid loading the spilled value to
+ // see if the value is null.
+ void CreateJObject(ManagedRegister out_reg,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) override;
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handlescope_offset,
- bool null_allowed) override;
-
- // src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
+ // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`.
+ void CreateJObject(FrameOffset out_off,
+ FrameOffset spilled_reference_offset,
+ bool null_allowed) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 9231f9c..5265152 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -169,13 +169,13 @@
__ Move(hidden_arg_register, method_register, 4);
__ VerifyObject(scratch_register, false);
- __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, true);
- __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, false);
- __ CreateHandleScopeEntry(method_register, FrameOffset(48), scratch_register, true);
- __ CreateHandleScopeEntry(FrameOffset(48), FrameOffset(64), true);
- __ CreateHandleScopeEntry(method_register, FrameOffset(0), scratch_register, true);
- __ CreateHandleScopeEntry(method_register, FrameOffset(1025), scratch_register, true);
- __ CreateHandleScopeEntry(scratch_register, FrameOffset(1025), scratch_register, true);
+ __ CreateJObject(scratch_register, FrameOffset(48), scratch_register, true);
+ __ CreateJObject(scratch_register, FrameOffset(48), scratch_register, false);
+ __ CreateJObject(method_register, FrameOffset(48), scratch_register, true);
+ __ CreateJObject(FrameOffset(48), FrameOffset(64), true);
+ __ CreateJObject(method_register, FrameOffset(0), scratch_register, true);
+ __ CreateJObject(method_register, FrameOffset(1025), scratch_register, true);
+ __ CreateJObject(scratch_register, FrameOffset(1025), scratch_register, true);
__ ExceptionPoll(0);
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 1470ef1..1678f87 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -1,12 +1,12 @@
const char* const VixlJniHelpersResults = {
" 0: 2d e9 e0 4d push.w {r5, r6, r7, r8, r10, r11, lr}\n"
" 4: 2d ed 10 8a vpush {s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31}\n"
- " 8: 89 b0 sub sp, #36\n"
+ " 8: 85 b0 sub sp, #20\n"
" a: 00 90 str r0, [sp]\n"
- " c: 21 91 str r1, [sp, #132]\n"
- " e: 8d ed 22 0a vstr s0, [sp, #136]\n"
- " 12: 23 92 str r2, [sp, #140]\n"
- " 14: 24 93 str r3, [sp, #144]\n"
+ " c: 1d 91 str r1, [sp, #116]\n"
+ " e: 8d ed 1e 0a vstr s0, [sp, #120]\n"
+ " 12: 1f 92 str r2, [sp, #124]\n"
+ " 14: 20 93 str r3, [sp, #128]\n"
" 16: 88 b0 sub sp, #32\n"
" 18: ad f5 80 5d sub.w sp, sp, #4096\n"
" 1c: 08 98 ldr r0, [sp, #32]\n"
@@ -147,7 +147,7 @@
" 208: cd f8 ff c7 str.w r12, [sp, #2047]\n"
" 20c: 0d f5 80 5d add.w sp, sp, #4096\n"
" 210: 08 b0 add sp, #32\n"
- " 212: 09 b0 add sp, #36\n"
+ " 212: 05 b0 add sp, #20\n"
" 214: bd ec 10 8a vpop {s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31}\n"
" 218: bd e8 e0 4d pop.w {r5, r6, r7, r8, r10, r11, lr}\n"
" 21c: d9 f8 30 80 ldr.w r8, [r9, #48]\n"
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index 3490959..d621122 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -208,23 +208,20 @@
virtual void GetCurrentThread(ManagedRegister dest) = 0;
virtual void GetCurrentThread(FrameOffset dest_offset) = 0;
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- virtual void CreateHandleScopeEntry(ManagedRegister out_reg,
- FrameOffset handlescope_offset,
- ManagedRegister in_reg,
- bool null_allowed) = 0;
+ // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+ // stale reference that can be used to avoid loading the spilled value to
+ // see if the value is null.
+ virtual void CreateJObject(ManagedRegister out_reg,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) = 0;
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- virtual void CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handlescope_offset,
- bool null_allowed) = 0;
-
- // src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0;
+ // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`.
+ virtual void CreateJObject(FrameOffset out_off,
+ FrameOffset spilled_reference_offset,
+ bool null_allowed) = 0;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index 2c7902b..2710eb1 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -466,10 +466,10 @@
__ mfence();
}
-void X86JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg,
- bool null_allowed) {
+void X86JNIMacroAssembler::CreateJObject(ManagedRegister mout_reg,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
X86ManagedRegister out_reg = mout_reg.AsX86();
X86ManagedRegister in_reg = min_reg.AsX86();
CHECK(in_reg.IsCpuRegister());
@@ -482,47 +482,30 @@
}
__ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
__ j(kZero, &null_arg);
- __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ leal(out_reg.AsCpuRegister(), Address(ESP, spilled_reference_offset));
__ Bind(&null_arg);
} else {
- __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ leal(out_reg.AsCpuRegister(), Address(ESP, spilled_reference_offset));
}
}
-void X86JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- bool null_allowed) {
+void X86JNIMacroAssembler::CreateJObject(FrameOffset out_off,
+ FrameOffset spilled_reference_offset,
+ bool null_allowed) {
Register scratch = GetScratchRegister();
if (null_allowed) {
Label null_arg;
- __ movl(scratch, Address(ESP, handle_scope_offset));
+ __ movl(scratch, Address(ESP, spilled_reference_offset));
__ testl(scratch, scratch);
__ j(kZero, &null_arg);
- __ leal(scratch, Address(ESP, handle_scope_offset));
+ __ leal(scratch, Address(ESP, spilled_reference_offset));
__ Bind(&null_arg);
} else {
- __ leal(scratch, Address(ESP, handle_scope_offset));
+ __ leal(scratch, Address(ESP, spilled_reference_offset));
}
__ movl(Address(ESP, out_off), scratch);
}
-// Given a handle scope entry, load the associated reference.
-void X86JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- X86ManagedRegister out_reg = mout_reg.AsX86();
- X86ManagedRegister in_reg = min_reg.AsX86();
- CHECK(out_reg.IsCpuRegister());
- CHECK(in_reg.IsCpuRegister());
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- __ j(kZero, &null_arg);
- __ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
- __ Bind(&null_arg);
-}
-
void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
// TODO: not validating references
}
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 0239ff7..448a7f4 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -126,23 +126,20 @@
void GetCurrentThread(ManagedRegister dest) override;
void GetCurrentThread(FrameOffset dest_offset) override;
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg,
- FrameOffset handlescope_offset,
- ManagedRegister in_reg,
- bool null_allowed) override;
+ // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+ // stale reference that can be used to avoid loading the spilled value to
+ // see if the value is null.
+ void CreateJObject(ManagedRegister out_reg,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) override;
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handlescope_offset,
- bool null_allowed) override;
-
- // src holds a handle scope entry (Object**) load this into dst
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
+ // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`.
+ void CreateJObject(FrameOffset out_off,
+ FrameOffset spilled_reference_offset,
+ bool null_allowed) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index 2649084..b5e17d1 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -541,17 +541,17 @@
__ mfence();
}
-void X86_64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg,
- bool null_allowed) {
+void X86_64JNIMacroAssembler::CreateJObject(ManagedRegister mout_reg,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
// Use out_reg as indicator of null.
in_reg = out_reg;
// TODO: movzwl
- __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
}
CHECK(in_reg.IsCpuRegister());
CHECK(out_reg.IsCpuRegister());
@@ -563,47 +563,30 @@
}
__ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
__ j(kZero, &null_arg);
- __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
__ Bind(&null_arg);
} else {
- __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
}
}
-void X86_64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- bool null_allowed) {
+void X86_64JNIMacroAssembler::CreateJObject(FrameOffset out_off,
+ FrameOffset spilled_reference_offset,
+ bool null_allowed) {
CpuRegister scratch = GetScratchRegister();
if (null_allowed) {
Label null_arg;
- __ movl(scratch, Address(CpuRegister(RSP), handle_scope_offset));
+ __ movl(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
__ testl(scratch, scratch);
__ j(kZero, &null_arg);
- __ leaq(scratch, Address(CpuRegister(RSP), handle_scope_offset));
+ __ leaq(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
__ Bind(&null_arg);
} else {
- __ leaq(scratch, Address(CpuRegister(RSP), handle_scope_offset));
+ __ leaq(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
}
__ movq(Address(CpuRegister(RSP), out_off), scratch);
}
-// Given a handle scope entry, load the associated reference.
-void X86_64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
- X86_64ManagedRegister in_reg = min_reg.AsX86_64();
- CHECK(out_reg.IsCpuRegister());
- CHECK(in_reg.IsCpuRegister());
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- __ j(kZero, &null_arg);
- __ movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
- __ Bind(&null_arg);
-}
-
void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
// TODO: not validating references
}
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index 6589544..a5f7bbb 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -146,23 +146,20 @@
void GetCurrentThread(ManagedRegister dest) override;
void GetCurrentThread(FrameOffset dest_offset) override;
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg,
- FrameOffset handlescope_offset,
- ManagedRegister in_reg,
- bool null_allowed) override;
+ // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+ // stale reference that can be used to avoid loading the spilled value to
+ // see if the value is null.
+ void CreateJObject(ManagedRegister out_reg,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) override;
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handlescope_offset,
- bool null_allowed) override;
-
- // src holds a handle scope entry (Object**) load this into dst
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
+ // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`.
+ void CreateJObject(FrameOffset out_off,
+ FrameOffset spilled_reference_offset,
+ bool null_allowed) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 4608af1..c6d2826 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -154,7 +154,6 @@
"jni/jni_id_manager.cc",
"jni/jni_internal.cc",
"linear_alloc.cc",
- "managed_stack.cc",
"method_handles.cc",
"metrics_reporter.cc",
"mirror/array.cc",
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 0b96573..01e8911 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -758,12 +758,21 @@
return method->IsStatic() && !method->IsConstructor();
}
-inline HandleScope* GetGenericJniHandleScope(ArtMethod** managed_sp,
- size_t num_handle_scope_references) {
- // The HandleScope is just below the cookie and padding to align as uintptr_t.
- const size_t offset =
- RoundUp(HandleScope::SizeOf(num_handle_scope_references) + kJniCookieSize, sizeof(uintptr_t));
- return reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(managed_sp) - offset);
+inline jobject GetGenericJniSynchronizationObject(Thread* self, ArtMethod* called)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(!called->IsCriticalNative());
+ DCHECK(!called->IsFastNative());
+ DCHECK(self->GetManagedStack()->GetTopQuickFrame() != nullptr);
+ DCHECK_EQ(*self->GetManagedStack()->GetTopQuickFrame(), called);
+ if (called->IsStatic()) {
+ // The `jclass` is a pointer to the method's declaring class.
+ return reinterpret_cast<jobject>(called->GetDeclaringClassAddressWithoutBarrier());
+ } else {
+ // The `this` reference is stored in the first out vreg in the caller's frame.
+ uint8_t* sp = reinterpret_cast<uint8_t*>(self->GetManagedStack()->GetTopQuickFrame());
+ size_t frame_size = RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
+ return reinterpret_cast<jobject>(sp + frame_size + static_cast<size_t>(kRuntimePointerSize));
+ }
}
} // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index dfc1edd..72b4c03 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -214,10 +214,11 @@
// The caller is responsible for performing that check.
bool NeedsClinitCheckBeforeCall(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
-constexpr size_t kJniCookieSize = sizeof(uint32_t);
-
-inline HandleScope* GetGenericJniHandleScope(ArtMethod** managed_sp,
- size_t num_handle_scope_references);
+// Returns the synchronization object for a native method for a GenericJni frame
+// we have just created or are about to exit. The synchronization object is
+// the class object for static methods and the `this` object otherwise.
+jobject GetGenericJniSynchronizationObject(Thread* self, ArtMethod* called)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update .bss method entrypoint if the `callee_reference` has an associated oat file
// and that oat file has a .bss entry for the `callee_reference`.
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index d75893d..d9fbbc7 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -84,7 +84,7 @@
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
-extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
+extern void ReadBarrierJni(mirror::CompressedReference<mirror::Class>* handle_on_stack,
Thread* self)
NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index e031b21..78e4dbc 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -174,7 +174,7 @@
V(UpdateInlineCache, void, void) \
V(CompileOptimized, void, ArtMethod*, Thread*) \
\
- V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \
+ V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Class>*, Thread*) \
V(ReadBarrierMarkReg00, mirror::Object*, mirror::Object*) \
V(ReadBarrierMarkReg01, mirror::Object*, mirror::Object*) \
V(ReadBarrierMarkReg02, mirror::Object*, mirror::Object*) \
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 38c6d3c..bda36d8 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -31,20 +31,20 @@
static inline void GoToRunnableFast(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
-extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
+extern void ReadBarrierJni(mirror::CompressedReference<mirror::Class>* declaring_class,
Thread* self ATTRIBUTE_UNUSED) {
DCHECK(kUseReadBarrier);
if (kUseBakerReadBarrier) {
- DCHECK(handle_on_stack->AsMirrorPtr() != nullptr)
+ DCHECK(declaring_class->AsMirrorPtr() != nullptr)
<< "The class of a static jni call must not be null";
// Check the mark bit and return early if it's already marked.
- if (LIKELY(handle_on_stack->AsMirrorPtr()->GetMarkBit() != 0)) {
+ if (LIKELY(declaring_class->AsMirrorPtr()->GetMarkBit() != 0)) {
return;
}
}
// Call the read barrier and update the handle.
- mirror::Object* to_ref = ReadBarrier::BarrierForRoot(handle_on_stack);
- handle_on_stack->Assign(to_ref);
+ mirror::Class* to_ref = ReadBarrier::BarrierForRoot(declaring_class);
+ declaring_class->Assign(to_ref);
}
// Called on entry to fast JNI, push a new local reference table only.
@@ -120,7 +120,6 @@
}
env->SetLocalSegmentState(env->GetLocalRefCookie());
env->SetLocalRefCookie(bit_cast<IRTSegmentState>(saved_local_ref_cookie));
- self->PopHandleScope();
}
// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
@@ -231,8 +230,7 @@
// locked object.
if (called->IsSynchronized()) {
DCHECK(normal_native) << "@FastNative/@CriticalNative and synchronize is not supported";
- HandleScope* handle_scope = down_cast<HandleScope*>(self->GetTopHandleScope());
- jobject lock = handle_scope->GetHandle(0).ToJObject();
+ jobject lock = GetGenericJniSynchronizationObject(self, called);
DCHECK(lock != nullptr);
UnlockJniSynchronizedMethod(lock, self);
}
@@ -242,7 +240,7 @@
result.l, saved_local_ref_cookie, self));
} else {
if (LIKELY(!critical_native)) {
- PopLocalReferences(saved_local_ref_cookie, self); // Invalidates top handle scope.
+ PopLocalReferences(saved_local_ref_cookie, self);
}
switch (return_shorty_char) {
case 'F': {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index d4ed187..11c3820 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1436,12 +1436,6 @@
* necessary.
*
* void PushStack(uintptr_t): Push a value to the stack.
- *
- * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
- * as this might be important for null initialization.
- * Must return the jobject, that is, the reference to the
- * entry in the HandleScope (nullptr if necessary).
- *
*/
template<class T> class BuildNativeCallFrameStateMachine {
public:
@@ -1526,22 +1520,6 @@
}
}
- bool HaveHandleScopeGpr() const {
- return gpr_index_ > 0;
- }
-
- void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
- uintptr_t handle = PushHandle(ptr);
- if (HaveHandleScopeGpr()) {
- gpr_index_--;
- PushGpr(handle);
- } else {
- stack_entries_++;
- PushStack(handle);
- gpr_index_ = 0;
- }
- }
-
bool HaveIntGpr() const {
return gpr_index_ > 0;
}
@@ -1718,9 +1696,6 @@
void PushStack(uintptr_t val) {
delegate_->PushStack(val);
}
- uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
- return delegate_->PushHandle(ref);
- }
uint32_t gpr_index_; // Number of free GPRs
uint32_t fpr_index_; // Number of free FPRs
@@ -1765,11 +1740,8 @@
Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
switch (cur_type_) {
case Primitive::kPrimNot:
- // TODO: fix abuse of mirror types.
- sm.AdvanceHandleScope(
- reinterpret_cast<mirror::Object*>(0x12345678));
+ sm.AdvancePointer(nullptr);
break;
-
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
case Primitive::kPrimChar:
@@ -1811,10 +1783,6 @@
// counting is already done in the superclass
}
- virtual uintptr_t PushHandle(mirror::Object* /* ptr */) {
- return reinterpret_cast<uintptr_t>(nullptr);
- }
-
protected:
uint32_t num_stack_entries_;
};
@@ -1822,26 +1790,18 @@
class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
public:
explicit ComputeGenericJniFrameSize(bool critical_native)
- : num_handle_scope_references_(0), critical_native_(critical_native) {}
+ : critical_native_(critical_native) {}
- uintptr_t* ComputeLayout(Thread* self,
- ArtMethod** managed_sp,
- const char* shorty,
- uint32_t shorty_len,
- HandleScope** handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
+ uintptr_t* ComputeLayout(ArtMethod** managed_sp, const char* shorty, uint32_t shorty_len)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
Walk(shorty, shorty_len);
- // Add space for cookie and HandleScope.
- void* storage = GetGenericJniHandleScope(managed_sp, num_handle_scope_references_);
- DCHECK_ALIGNED(storage, sizeof(uintptr_t));
- *handle_scope =
- HandleScope::Create(storage, self->GetTopHandleScope(), num_handle_scope_references_);
- DCHECK_EQ(*handle_scope, storage);
- uint8_t* sp8 = reinterpret_cast<uint8_t*>(*handle_scope);
- DCHECK_GE(static_cast<size_t>(reinterpret_cast<uint8_t*>(managed_sp) - sp8),
- HandleScope::SizeOf(num_handle_scope_references_) + kJniCookieSize);
+ // Add space for cookie.
+ DCHECK_ALIGNED(managed_sp, sizeof(uintptr_t));
+ static_assert(sizeof(uintptr_t) >= sizeof(IRTSegmentState));
+ uint8_t* sp8 = reinterpret_cast<uint8_t*>(managed_sp) - sizeof(uintptr_t);
// Layout stack arguments.
sp8 = LayoutStackArgs(sp8);
@@ -1873,22 +1833,14 @@
return GetHiddenArgSlot(reserved_area) + 1;
}
- uintptr_t PushHandle(mirror::Object* /* ptr */) override;
-
// Add JNIEnv* and jobj/jclass before the shorty-derived elements.
void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
- uint32_t num_handle_scope_references_;
const bool critical_native_;
};
-uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
- num_handle_scope_references_++;
- return reinterpret_cast<uintptr_t>(nullptr);
-}
-
void ComputeGenericJniFrameSize::WalkHeader(
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
// First 2 parameters are always excluded for @CriticalNative.
@@ -1900,7 +1852,7 @@
sm->AdvancePointer(nullptr);
// Class object or this as first argument
- sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
+ sm->AdvancePointer(nullptr);
}
// Class to push values to three separate regions. Used to fill the native call part. Adheres to
@@ -1939,11 +1891,6 @@
cur_stack_arg_++;
}
- virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) {
- LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
- UNREACHABLE();
- }
-
private:
uintptr_t* cur_gpr_reg_;
uint32_t* cur_fpr_reg_;
@@ -1962,14 +1909,14 @@
ArtMethod** managed_sp,
uintptr_t* reserved_area)
: QuickArgumentVisitor(managed_sp, is_static, shorty, shorty_len),
- jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native),
- sm_(&jni_call_) {
+ jni_call_(nullptr, nullptr, nullptr, critical_native),
+ sm_(&jni_call_),
+ current_vreg_(nullptr) {
DCHECK_ALIGNED(managed_sp, kStackAlignment);
DCHECK_ALIGNED(reserved_area, sizeof(uintptr_t));
ComputeGenericJniFrameSize fsc(critical_native);
- uintptr_t* out_args_sp =
- fsc.ComputeLayout(self, managed_sp, shorty, shorty_len, &handle_scope_);
+ uintptr_t* out_args_sp = fsc.ComputeLayout(managed_sp, shorty, shorty_len);
// Store hidden argument for @CriticalNative.
uintptr_t* hidden_arg_slot = fsc.GetHiddenArgSlot(reserved_area);
@@ -1982,10 +1929,15 @@
uintptr_t* out_args_sp_slot = fsc.GetOutArgsSpSlot(reserved_area);
*out_args_sp_slot = reinterpret_cast<uintptr_t>(out_args_sp);
+ // Prepare vreg pointer for spilling references.
+ static constexpr size_t frame_size =
+ RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
+ current_vreg_ = reinterpret_cast<uint32_t*>(
+ reinterpret_cast<uint8_t*>(managed_sp) + frame_size + sizeof(ArtMethod*));
+
jni_call_.Reset(fsc.GetStartGprRegs(reserved_area),
fsc.GetStartFprRegs(reserved_area),
- out_args_sp,
- handle_scope_);
+ out_args_sp);
// First 2 parameters are always excluded for CriticalNative methods.
if (LIKELY(!critical_native)) {
@@ -1993,82 +1945,52 @@
sm_.AdvancePointer(self->GetJniEnv());
if (is_static) {
- sm_.AdvanceHandleScope(method->GetDeclaringClass().Ptr());
+ // The `jclass` is a pointer to the method's declaring class.
+ // The declaring class must be marked.
+ method->GetDeclaringClass<kWithReadBarrier>();
+ sm_.AdvancePointer(method->GetDeclaringClassAddressWithoutBarrier());
} // else "this" reference is already handled by QuickArgumentVisitor.
}
}
void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
- void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
-
- StackReference<mirror::Object>* GetFirstHandleScopeEntry() {
- return handle_scope_->GetHandle(0).GetReference();
- }
-
- jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
- return handle_scope_->GetHandle(0).ToJObject();
- }
-
private:
// A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
class FillJniCall final : public FillNativeCall {
public:
- FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
- HandleScope* handle_scope, bool critical_native)
- : FillNativeCall(gpr_regs, fpr_regs, stack_args),
- handle_scope_(handle_scope),
- cur_entry_(0),
- critical_native_(critical_native) {}
+ FillJniCall(uintptr_t* gpr_regs,
+ uint32_t* fpr_regs,
+ uintptr_t* stack_args,
+ bool critical_native)
+ : FillNativeCall(gpr_regs, fpr_regs, stack_args),
+ cur_entry_(0),
+ critical_native_(critical_native) {}
- uintptr_t PushHandle(mirror::Object* ref) override REQUIRES_SHARED(Locks::mutator_lock_);
-
- void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
+ void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
- handle_scope_ = scope;
cur_entry_ = 0U;
}
- void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) {
- // Initialize padding entries.
- size_t expected_slots = handle_scope_->NumberOfReferences();
- while (cur_entry_ < expected_slots) {
- handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
- }
-
- if (!critical_native_) {
- // Non-critical natives have at least the self class (jclass) or this (jobject).
- DCHECK_NE(cur_entry_, 0U);
- }
- }
-
bool CriticalNative() const {
return critical_native_;
}
private:
- HandleScope* handle_scope_;
size_t cur_entry_;
const bool critical_native_;
};
- HandleScope* handle_scope_;
FillJniCall jni_call_;
-
BuildNativeCallFrameStateMachine<FillJniCall> sm_;
+ // Pointer to the current vreg in caller's reserved out vreg area.
+ // Used for spilling reference arguments.
+ uint32_t* current_vreg_;
+
DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
};
-uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
- uintptr_t tmp;
- MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_);
- h.Assign(ref);
- tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
- cur_entry_++;
- return tmp;
-}
-
void BuildGenericJniFrameVisitor::Visit() {
Primitive::Type type = GetParamPrimitiveType();
switch (type) {
@@ -2080,6 +2002,7 @@
long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
}
sm_.AdvanceLong(long_arg);
+ current_vreg_ += 2u;
break;
}
case Primitive::kPrimDouble: {
@@ -2091,16 +2014,22 @@
double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
}
sm_.AdvanceDouble(double_arg);
+ current_vreg_ += 2u;
break;
}
case Primitive::kPrimNot: {
- StackReference<mirror::Object>* stack_ref =
- reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
- sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
+ mirror::Object* obj =
+ reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress())->AsMirrorPtr();
+ StackReference<mirror::Object>* spill_ref =
+ reinterpret_cast<StackReference<mirror::Object>*>(current_vreg_);
+ spill_ref->Assign(obj);
+ sm_.AdvancePointer(obj != nullptr ? spill_ref : nullptr);
+ current_vreg_ += 1u;
break;
}
case Primitive::kPrimFloat:
sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
+ current_vreg_ += 1u;
break;
case Primitive::kPrimBoolean: // Fall-through.
case Primitive::kPrimByte: // Fall-through.
@@ -2108,6 +2037,7 @@
case Primitive::kPrimShort: // Fall-through.
case Primitive::kPrimInt: // Fall-through.
sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
+ current_vreg_ += 1u;
break;
case Primitive::kPrimVoid:
LOG(FATAL) << "UNREACHABLE";
@@ -2115,15 +2045,6 @@
}
}
-void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
- // Clear out rest of the scope.
- jni_call_.ResetRemainingScopeSlots();
- if (!jni_call_.CriticalNative()) {
- // Install HandleScope.
- self->PushHandleScope(handle_scope_);
- }
-}
-
/*
* Initializes the reserved area assumed to be directly below `managed_sp` for a native call:
*
@@ -2165,8 +2086,6 @@
{
ScopedAssertNoThreadSuspension sants(__FUNCTION__);
visitor.VisitArguments();
- // FinalizeHandleScope pushes the handle scope on the thread.
- visitor.FinalizeHandleScope(self);
}
// Fix up managed-stack things in Thread. After this we can walk the stack.
@@ -2204,7 +2123,8 @@
// Start JNI, save the cookie.
if (called->IsSynchronized()) {
DCHECK(normal_native) << " @FastNative and synchronize is not supported";
- cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
+ jobject lock = GetGenericJniSynchronizationObject(self, called);
+ cookie = JniMethodStartSynchronized(lock, self);
if (self->IsExceptionPending()) {
self->PopHandleScope();
return nullptr; // Report error.
@@ -2259,14 +2179,6 @@
uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
ArtMethod* called = *sp;
uint32_t cookie = *(sp32 - 1);
- if (kIsDebugBuild && !called->IsCriticalNative()) {
- BaseHandleScope* handle_scope = self->GetTopHandleScope();
- DCHECK(handle_scope != nullptr);
- DCHECK(!handle_scope->IsVariableSized());
- // Note: We do not hold mutator lock here for normal JNI, so we cannot use the method's shorty
- // to determine the number of references. Instead rely on the value from the HandleScope.
- DCHECK_EQ(handle_scope, GetGenericJniHandleScope(sp, handle_scope->NumberOfReferences()));
- }
return GenericJniMethodEnd(self, cookie, result, result_f, called);
}
diff --git a/runtime/handle.h b/runtime/handle.h
index 779345d..6f6e81f 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -93,14 +93,6 @@
return reference_->IsNull();
}
- ALWAYS_INLINE jobject ToJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
- if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) {
- // Special case so that we work with null handles.
- return nullptr;
- }
- return reinterpret_cast<jobject>(reference_);
- }
-
ALWAYS_INLINE StackReference<mirror::Object>* GetReference() {
return reference_;
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 6087077..56eee3b 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -29,8 +29,8 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-// Last change: Math.multiplyHigh intrinsic.
-const uint8_t ImageHeader::kImageVersion[] = { '0', '9', '5', '\0' };
+// Last change: JNI transition without HandleScope.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '9', '6', '\0' };
ImageHeader::ImageHeader(uint32_t image_reservation_size,
uint32_t component_count,
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 0deb917..f263b93 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -43,8 +43,8 @@
const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
switch (kind) {
- case kHandleScopeOrInvalid:
- return "HandleScopeOrInvalid";
+ case kJniTransitionOrInvalid:
+ return "JniTransitionOrInvalid";
case kLocal:
return "Local";
case kGlobal:
@@ -76,7 +76,7 @@
current_num_holes_(0),
resizable_(resizable) {
CHECK(error_msg != nullptr);
- CHECK_NE(desired_kind, kHandleScopeOrInvalid);
+ CHECK_NE(desired_kind, kJniTransitionOrInvalid);
// Overflow and maximum check.
CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
@@ -361,13 +361,16 @@
DCHECK(table_ != nullptr);
- if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) {
+ // TODO: We should eagerly check the ref kind against the `kind_` instead of
+ // relying on this weak check and postponing the rest until `CheckEntry()` below.
+ // Passing the wrong kind shall currently result in misleading warnings.
+ if (GetIndirectRefKind(iref) == kJniTransitionOrInvalid) {
auto* self = Thread::Current();
- if (self->HandleScopeContains(reinterpret_cast<jobject>(iref))) {
+ ScopedObjectAccess soa(self);
+ if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
auto* env = self->GetJniEnv();
DCHECK(env != nullptr);
if (env->IsCheckJniEnabled()) {
- ScopedObjectAccess soa(self);
LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
if (kDumpStackOnNonLocalReference) {
self->Dump(LOG_STREAM(WARNING));
@@ -376,6 +379,7 @@
return true;
}
}
+
const uint32_t idx = ExtractIndex(iref);
if (idx < bottom_index) {
// Wrong segment.
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index f877ce8..884e8d1 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -91,11 +91,11 @@
//
// For convenience these match up with enum jobjectRefType from jni.h.
enum IndirectRefKind {
- kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
- kLocal = 1, // <<local reference>>
- kGlobal = 2, // <<global reference>>
- kWeakGlobal = 3, // <<weak global reference>>
- kLastKind = kWeakGlobal
+ kJniTransitionOrInvalid = 0, // <<JNI transition frame reference or invalid reference>>
+ kLocal = 1, // <<local reference>>
+ kGlobal = 2, // <<global reference>>
+ kWeakGlobal = 3, // <<weak global reference>>
+ kLastKind = kWeakGlobal
};
std::ostream& operator<<(std::ostream& os, IndirectRefKind rhs);
const char* GetIndirectRefKindString(const IndirectRefKind& kind);
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index c21eee1..c2b4493 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -41,6 +41,8 @@
static constexpr size_t kMethodDeclaringClass = -3;
// The root is from the argument to a Proxy invoke.
static constexpr size_t kProxyReferenceArgument = -4;
+ // The root is from the argument to a native invoke.
+ static constexpr size_t kNativeReferenceArgument = -5;
// The maximum precise vreg number
static constexpr size_t kMaxVReg = std::numeric_limits<uint16_t>::max();
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index 42e46e9..4c7b1aa 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -56,7 +56,7 @@
// declared as a friend by JniVmExt and JniEnvExt.
inline IndirectReferenceTable* GetIndirectReferenceTable(ScopedObjectAccess& soa,
IndirectRefKind kind) {
- DCHECK_NE(kind, kHandleScopeOrInvalid);
+ DCHECK_NE(kind, kJniTransitionOrInvalid);
JNIEnvExt* env = soa.Env();
IndirectReferenceTable* irt =
(kind == kLocal) ? &env->locals_
@@ -718,11 +718,14 @@
return true;
}
- bool CheckReferenceKind(IndirectRefKind expected_kind, Thread* self, jobject obj) {
+ bool CheckReferenceKind(IndirectRefKind expected_kind, Thread* self, jobject obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
IndirectRefKind found_kind;
if (expected_kind == kLocal) {
found_kind = IndirectReferenceTable::GetIndirectRefKind(obj);
- if (found_kind == kHandleScopeOrInvalid && self->HandleScopeContains(obj)) {
+ if (found_kind == kJniTransitionOrInvalid &&
+ obj != nullptr &&
+ self->IsJniTransitionReference(obj)) {
found_kind = kLocal;
}
} else {
@@ -863,8 +866,8 @@
bool expect_null = false;
bool okay = true;
std::string error_msg;
- if (ref_kind == kHandleScopeOrInvalid) {
- if (!soa.Self()->HandleScopeContains(java_object)) {
+ if (ref_kind == kJniTransitionOrInvalid) {
+ if (!soa.Self()->IsJniTransitionReference(java_object)) {
okay = false;
error_msg = "use of invalid jobject";
} else {
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 9aad198..08d2061 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -2589,8 +2589,8 @@
return JNIGlobalRefType;
case kWeakGlobal:
return JNIWeakGlobalRefType;
- case kHandleScopeOrInvalid:
- // Assume value is in a handle scope.
+ case kJniTransitionOrInvalid:
+ // Assume value is in a JNI transition frame.
return JNILocalRefType;
}
LOG(FATAL) << "IndirectRefKind[" << kind << "]";
diff --git a/runtime/managed_stack.cc b/runtime/managed_stack.cc
deleted file mode 100644
index be609c3..0000000
--- a/runtime/managed_stack.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "managed_stack-inl.h"
-
-#include "android-base/stringprintf.h"
-
-#include "art_method.h"
-#include "mirror/object.h"
-#include "stack_reference.h"
-
-namespace art {
-
-size_t ManagedStack::NumJniShadowFrameReferences() const {
- size_t count = 0;
- for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
- current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_;
- current_frame != nullptr;
- current_frame = current_frame->GetLink()) {
- if (current_frame->GetMethod()->IsNative()) {
- // The JNI ShadowFrame only contains references. (For indirect reference.)
- count += current_frame->NumberOfVRegs();
- }
- }
- }
- return count;
-}
-
-bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const {
- for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
- current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_;
- current_frame != nullptr;
- current_frame = current_frame->GetLink()) {
- if (current_frame->Contains(shadow_frame_entry)) {
- return true;
- }
- }
- }
- return false;
-}
-
-} // namespace art
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index 3fb83ac..04a27fe 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -119,10 +119,6 @@
return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
}
- size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_);
-
- bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
-
private:
// Encodes the top quick frame (which must be at least 4-byte aligned)
// and a flag that marks the GenericJNI trampoline.
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 870936c..7d97b73 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -31,6 +31,7 @@
#include "dex/dex_file-inl.h"
#include "dex/dex_file_types.h"
#include "dex/dex_instruction-inl.h"
+#include "entrypoints/entrypoint_utils-inl.h"
#include "lock_word-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -1464,9 +1465,9 @@
// TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
if (m->IsNative()) {
if (m->IsSynchronized()) {
- ObjPtr<mirror::Object> jni_this =
- stack_visitor->GetCurrentHandleScope(sizeof(void*))->GetReference(0);
- callback(jni_this, callback_context);
+ Thread* thread = stack_visitor->GetThread();
+ jobject lock = GetGenericJniSynchronizationObject(thread, m);
+ callback(thread->DecodeJObject(lock), callback_context);
}
return;
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 771e012..f642bcb 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -1067,8 +1067,8 @@
IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
if (kind == kLocal) {
self->GetJniEnv()->UpdateLocal(obj, result);
- } else if (kind == kHandleScopeOrInvalid) {
- LOG(FATAL) << "Unsupported UpdateReference for kind kHandleScopeOrInvalid";
+ } else if (kind == kJniTransitionOrInvalid) {
+ LOG(FATAL) << "Unsupported UpdateReference for kind kJniTransitionOrInvalid";
} else if (kind == kGlobal) {
self->GetJniEnv()->GetVm()->UpdateGlobal(self, ref, result);
} else {
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 094c25b..233106e 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -150,19 +150,11 @@
return nullptr;
} else if (m->IsNative()) {
if (cur_quick_frame_ != nullptr) {
- HandleScope* hs;
- if (cur_oat_quick_method_header_ != nullptr) {
- hs = reinterpret_cast<HandleScope*>(
- reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*));
- } else {
- // GenericJNI frames have the HandleScope under the managed frame.
- uint32_t shorty_len;
- const char* shorty = m->GetShorty(&shorty_len);
- const size_t num_handle_scope_references =
- /* this */ 1u + std::count(shorty + 1, shorty + shorty_len, 'L');
- hs = GetGenericJniHandleScope(cur_quick_frame_, num_handle_scope_references);
- }
- return hs->GetReference(0);
+ // The `this` reference is stored in the first out vreg in the caller's frame.
+ const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
+ auto* stack_ref = reinterpret_cast<StackReference<mirror::Object>*>(
+ reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size + sizeof(ArtMethod*));
+ return stack_ref->AsMirrorPtr();
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
diff --git a/runtime/stack.h b/runtime/stack.h
index c746536..2a6fdc2 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -282,12 +282,6 @@
return cur_shadow_frame_;
}
- HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
- ArtMethod** sp = GetCurrentQuickFrame();
- // Skip ArtMethod*; handle scope comes next;
- return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
- }
-
std::string DescribeLocation() const REQUIRES_SHARED(Locks::mutator_lock_);
static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6639501..46aa2b5 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1338,7 +1338,7 @@
tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
- tlsPtr_.stack_size -= read_guard_size;
+ tlsPtr_.stack_size -= read_guard_size + kStackOverflowProtectedSize;
InstallImplicitProtection();
}
@@ -2541,16 +2541,82 @@
}
}
-bool Thread::HandleScopeContains(jobject obj) const {
- StackReference<mirror::Object>* hs_entry =
- reinterpret_cast<StackReference<mirror::Object>*>(obj);
- for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
- if (cur->Contains(hs_entry)) {
+template <bool kPointsToStack>
+class JniTransitionReferenceVisitor : public StackVisitor {
+ public:
+ JniTransitionReferenceVisitor(Thread* thread, void* obj) REQUIRES_SHARED(Locks::mutator_lock_)
+ : StackVisitor(thread, /*context=*/ nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
+ obj_(obj),
+ found_(false) {}
+
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = GetMethod();
+ if (!m->IsNative() || m->IsCriticalNative()) {
+ return true;
+ }
+ if (kPointsToStack) {
+ uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
+ size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
+ uint32_t* current_vreg = reinterpret_cast<uint32_t*>(sp + frame_size + sizeof(ArtMethod*));
+ if (!m->IsStatic()) {
+ if (current_vreg == obj_) {
+ found_ = true;
+ return false;
+ }
+ current_vreg += 1u;
+ }
+ const char* shorty = m->GetShorty();
+ for (size_t i = 1, len = strlen(shorty); i != len; ++i) {
+ switch (shorty[i]) {
+ case 'D':
+ case 'J':
+ current_vreg += 2u;
+ break;
+ case 'L':
+ if (current_vreg == obj_) {
+ found_ = true;
+ return false;
+ }
+ FALLTHROUGH_INTENDED;
+ default:
+ current_vreg += 1u;
+ break;
+ }
+ }
+ // Continue only if the object is somewhere higher on the stack.
+ return obj_ >= current_vreg;
+ } else { // if (kPointsToStack)
+ if (m->IsStatic() && obj_ == m->GetDeclaringClassAddressWithoutBarrier()) {
+ found_ = true;
+ return false;
+ }
return true;
}
}
- // JNI code invoked from portable code uses shadow frames rather than the handle scope.
- return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
+
+ bool Found() const {
+ return found_;
+ }
+
+ private:
+ void* obj_;
+ bool found_;
+};
+
+bool Thread::IsJniTransitionReference(jobject obj) const {
+ DCHECK(obj != nullptr);
+ // We need a non-const pointer for stack walk even if we're not modifying the thread state.
+ Thread* thread = const_cast<Thread*>(this);
+ uint8_t* raw_obj = reinterpret_cast<uint8_t*>(obj);
+ if (static_cast<size_t>(raw_obj - tlsPtr_.stack_begin) < tlsPtr_.stack_size) {
+ JniTransitionReferenceVisitor</*kPointsToStack=*/ true> visitor(thread, raw_obj);
+ visitor.WalkStack();
+ return visitor.Found();
+ } else {
+ JniTransitionReferenceVisitor</*kPointsToStack=*/ false> visitor(thread, raw_obj);
+ visitor.WalkStack();
+ return visitor.Found();
+ }
}
void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
@@ -2574,10 +2640,12 @@
IndirectReferenceTable& locals = tlsPtr_.jni_env->locals_;
// Local references do not need a read barrier.
result = locals.Get<kWithoutReadBarrier>(ref);
- } else if (kind == kHandleScopeOrInvalid) {
- // Read from handle scope.
- DCHECK(HandleScopeContains(obj));
- result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
+ } else if (kind == kJniTransitionOrInvalid) {
+ // The `jclass` for a static method points to the CompressedReference<> in the
+ // `ArtMethod::declaring_class_`. Other `jobject` arguments point to spilled stack
+ // references but a StackReference<> is just a subclass of CompressedReference<>.
+ DCHECK(IsJniTransitionReference(obj));
+ result = reinterpret_cast<mirror::CompressedReference<mirror::Object>*>(obj)->AsMirrorPtr();
VerifyObject(result);
} else if (kind == kGlobal) {
result = tlsPtr_.jni_env->vm_->DecodeGlobal(ref);
@@ -3808,8 +3876,63 @@
ArtMethod* m = *cur_quick_frame;
VisitDeclaringClass(m);
- // Process register map (which native and runtime methods don't have)
- if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) {
+ if (m->IsNative()) {
+ // TODO: Spill the `this` reference in the AOT-compiled String.charAt()
+ // slow-path for throwing SIOOBE, so that we can remove this carve-out.
+ if (UNLIKELY(m->IsIntrinsic()) &&
+ m->GetIntrinsic() == enum_cast<uint32_t>(Intrinsics::kStringCharAt)) {
+ // The String.charAt() method is AOT-compiled with an intrinsic implementation
+ // instead of a JNI stub. It has a slow path that constructs a runtime frame
+ // for throwing SIOOBE and in that path we do not get the `this` pointer
+ // spilled on the stack, so there is nothing to visit. We can distinguish
+ // this from the GenericJni path by checking that the PC is in the boot image
+ // (PC shall be known thanks to the runtime frame for throwing SIOOBE).
+ // Note that JIT does not emit that intrinic implementation.
+ const void* pc = reinterpret_cast<const void*>(GetCurrentQuickFramePc());
+ if (pc != 0u && Runtime::Current()->GetHeap()->IsInBootImageOatFile(pc)) {
+ return;
+ }
+ }
+ // Native methods spill their arguments to the reserved vregs in the caller's frame
+ // and use pointers to these stack references as jobject, jclass, jarray, etc.
+ // Note: We can come here for a @CriticalNative method when it needs to resolve the
+ // target native function but there would be no references to visit below.
+ const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
+ const size_t method_pointer_size = static_cast<size_t>(kRuntimePointerSize);
+ uint32_t* current_vreg = reinterpret_cast<uint32_t*>(
+ reinterpret_cast<uint8_t*>(cur_quick_frame) + frame_size + method_pointer_size);
+ auto visit = [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto* ref_addr = reinterpret_cast<StackReference<mirror::Object>*>(current_vreg);
+ mirror::Object* ref = ref_addr->AsMirrorPtr();
+ if (ref != nullptr) {
+ mirror::Object* new_ref = ref;
+ visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kNativeReferenceArgument, this);
+ if (ref != new_ref) {
+ ref_addr->Assign(new_ref);
+ }
+ }
+ };
+ const char* shorty = m->GetShorty();
+ if (!m->IsStatic()) {
+ visit();
+ current_vreg += 1u;
+ }
+ for (shorty += 1u; *shorty != 0; ++shorty) {
+ switch (*shorty) {
+ case 'D':
+ case 'J':
+ current_vreg += 2u;
+ break;
+ case 'L':
+ visit();
+ FALLTHROUGH_INTENDED;
+ default:
+ current_vreg += 1u;
+ break;
+ }
+ }
+ } else if (!m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) {
+ // Process register map (which native, runtime and proxy methods don't have)
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
DCHECK(method_header->IsOptimized());
StackReference<mirror::Object>* vreg_base =
diff --git a/runtime/thread.h b/runtime/thread.h
index 7475681..b63e39f 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -902,8 +902,8 @@
ManagedStack::TopShadowFrameOffset());
}
- // Is the given obj in this thread's stack indirect reference table?
- bool HandleScopeContains(jobject obj) const;
+ // Is the given obj in one of this thread's JNI transition frames?
+ bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1905,6 +1905,7 @@
friend class ThreadList; // For ~Thread and Destroy.
friend class EntrypointsOrderTest; // To test the order of tls entries.
+ friend class JniCompilerTest; // For intercepting JNI entrypoint calls.
DISALLOW_COPY_AND_ASSIGN(Thread);
};
diff --git a/test/913-heaps/expected-stdout.txt b/test/913-heaps/expected-stdout.txt
index 8fe2ba5..e9b82f3 100644
--- a/test/913-heaps/expected-stdout.txt
+++ b/test/913-heaps/expected-stdout.txt
@@ -50,7 +50,6 @@
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
-root@root --(thread)--> 1@1000 [size=16, length=-1]
root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780005, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780009, length=-1]
@@ -116,7 +115,6 @@
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
-root@root --(thread)--> 1@1000 [size=16, length=-1]
root@root --(thread)--> 3000@0 [size=124, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123456780025, length=-1]
@@ -178,7 +176,6 @@
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 1@1000 [size=16, length=-1]
1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
500@0 --(array-element@1)--> 2@1000 [size=16, length=-1]
@@ -250,7 +247,6 @@
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=4,method=runFollowReferences,vreg=3,location= 164])--> 1000@0 [size=123456780055, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
-root@root --(thread)--> 1@1000 [size=16, length=-1]
root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780055, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780059, length=-1]
@@ -371,7 +367,6 @@
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 1@1000 [size=16, length=-1]
1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
3@1001 --(field@4)--> 4@1000 [size=16, length=-1]