summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-06-14 15:18:26 +0000
committer Vladimir Marko <vmarko@google.com> 2023-06-21 09:25:55 +0000
commitc09acc0b9e8bb0c52effb2ab4ec68d504995b2ac (patch)
tree4d37b80493f7285fdb92ade4424f5e8e0993e677
parent4bf7a0e4e4ba1a8d8dafbf2495c4d3515660777a (diff)
riscv64: Implement `art_jni_dlsym_lookup_critical_stub`.
Test: run-gtests.sh Bug: 283082089 Change-Id: I96c7ff79278f7563c1c2cbc10258f6862fc27111
-rw-r--r--compiler/jni/jni_compiler_test.cc10
-rw-r--r--compiler/trampolines/trampoline_compiler.cc5
-rw-r--r--runtime/arch/riscv64/asm_support_riscv64.S37
-rw-r--r--runtime/arch/riscv64/jni_entrypoints_riscv64.S298
-rw-r--r--runtime/arch/riscv64/quick_entrypoints_riscv64.S7
-rw-r--r--runtime/arch/x86/jni_entrypoints_x86.S2
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc5
7 files changed, 329 insertions, 35 deletions
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 7feb400db7..93dd715286 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -735,11 +735,6 @@ void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() {
JNI_TEST(CompileAndRunIntMethodThroughStub)
void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() {
- if (!check_generic_jni_) {
- // TODO(riscv64): Implement `art_jni_dlsym_lookup_critical_stub`.
- TEST_DISABLED_FOR_RISCV64();
- }
-
SetUpForTest(true, "sbar", "(I)I", nullptr);
// calling through stub will link with &Java_MyClassNatives_sbar{,_1Fast,_1Critical}
@@ -2154,11 +2149,6 @@ void JniCompilerTest::WithoutImplementationRefReturnImpl() {
JNI_TEST(WithoutImplementationRefReturn)
void JniCompilerTest::StaticWithoutImplementationImpl() {
- if (!check_generic_jni_) {
- // TODO(riscv64): Implement `art_jni_dlsym_lookup_critical_stub`.
- TEST_DISABLED_FOR_RISCV64();
- }
-
// This will lead to error messages in the log.
ScopedLogSeverity sls(LogSeverity::FATAL);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index c3b9dbe492..d1f8e7dfa7 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -148,9 +148,8 @@ static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocat
__ Unimp();
break;
case kQuickAbi: // TR holds Thread*.
- // FIXME(riscv64): Do not clobber the hidden arg T0, see the JNI calling convention.
- __ Loadd(T0, TR, offset.Int32Value());
- __ Jr(T0);
+ __ Loadd(TMP, TR, offset.Int32Value());
+ __ Jr(TMP);
break;
}
diff --git a/runtime/arch/riscv64/asm_support_riscv64.S b/runtime/arch/riscv64/asm_support_riscv64.S
index 99e4080206..e7b7ebc7bb 100644
--- a/runtime/arch/riscv64/asm_support_riscv64.S
+++ b/runtime/arch/riscv64/asm_support_riscv64.S
@@ -66,6 +66,17 @@
.endm
+.macro CFI_EXPRESSION_BREG n, b, offset
+ .if (-0x40 <= (\offset)) && ((\offset) < 0x40)
+ CFI_EXPRESSION_BREG_1(\n, \b, \offset)
+ .elseif (-0x2000 <= (\offset)) && ((\offset) < 0x2000)
+ CFI_EXPRESSION_BREG_2(\n, \b, \offset)
+ .else
+ .error "Unsupported offset"
+ .endif
+.endm
+
+
.macro CFI_DEF_CFA_BREG_PLUS_UCONST reg, offset, size
.if (((\offset) < -0x40) || ((\offset) >= 0x40))
.error "Unsupported offset"
@@ -95,18 +106,28 @@
.endm
-.macro SAVE_GPR reg, offset
- sd \reg, (\offset)(sp)
+.macro SAVE_GPR_BASE base, reg, offset
+ sd \reg, (\offset)(\base)
.cfi_rel_offset \reg, (\offset)
.endm
-.macro RESTORE_GPR reg, offset
- ld \reg, (\offset)(sp)
+.macro SAVE_GPR reg, offset
+ SAVE_GPR_BASE sp, \reg, \offset
+.endm
+
+
+.macro RESTORE_GPR_BASE base, reg, offset
+ ld \reg, (\offset)(\base)
.cfi_restore \reg
.endm
+.macro RESTORE_GPR reg, offset
+ RESTORE_GPR_BASE sp, \reg, \offset
+.endm
+
+
.macro SAVE_FPR reg, offset
fsd \reg, (\offset)(sp)
.cfi_rel_offset \reg, (\offset)
@@ -149,8 +170,9 @@
SAVE_FPR fa7, (8*8)
SAVE_GPR fp, (9*8) // x8, frame pointer
+ // s1 (x9) is the ART thread register
- // a0 is the method pointer
+ // a0 (x10) is the method pointer
SAVE_GPR a1, (10*8) // x11
SAVE_GPR a2, (11*8) // x12
SAVE_GPR a3, (12*8) // x13
@@ -159,7 +181,6 @@
SAVE_GPR a6, (15*8) // x16
SAVE_GPR a7, (16*8) // x17
- // s1 is the ART thread register
SAVE_GPR s2, (17*8) // x18
SAVE_GPR s3, (18*8) // x19
SAVE_GPR s4, (19*8) // x20
@@ -175,7 +196,7 @@
.endm
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME_INTERNAL base
+.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
// stack slot (0*8)(sp) is for ArtMethod*
RESTORE_FPR fa0, (1*8)
@@ -277,7 +298,7 @@
// GP callee-saves
SAVE_GPR s0, (8*14) // x8/fp, frame pointer
- // s1 is the ART thread register
+ // s1 (x9) is the ART thread register
SAVE_GPR s2, (8*15) // x18
SAVE_GPR s3, (8*16) // x19
SAVE_GPR s4, (8*17) // x20
diff --git a/runtime/arch/riscv64/jni_entrypoints_riscv64.S b/runtime/arch/riscv64/jni_entrypoints_riscv64.S
index 0a85540507..8ec2967956 100644
--- a/runtime/arch/riscv64/jni_entrypoints_riscv64.S
+++ b/runtime/arch/riscv64/jni_entrypoints_riscv64.S
@@ -122,14 +122,294 @@ END art_jni_dlsym_lookup_stub
// JNI dlsym lookup stub for @CriticalNative.
ENTRY art_jni_dlsym_lookup_critical_stub
- // The hidden arg holding the tagged method is t6 (loaded by art_quick_generic_jni_trampoline).
- // FIXME(riscv64): Use T0 to align with the JNI calling convention.
- // Bit 0 set means generic JNI.
+ // The hidden arg holding the tagged method is t0 (loaded by compiled JNI stub, compiled
+ // managed code, or `art_quick_generic_jni_trampoline`). Bit 0 set means generic JNI.
// For generic JNI we already have a managed frame, so we reuse the art_jni_dlsym_lookup_stub.
- andi t6, t6, 1
- beqz t6, 1f
- j art_jni_dlsym_lookup_stub
-1:
- // TODO(riscv64): implement for code paths other than generic JNI trampoline.
- unimp
+ andi t6, t0, 1
+ bnez t6, art_jni_dlsym_lookup_stub
+
+ // Save args, the hidden arg and caller PC. No CFI needed for args and the hidden arg.
+ SAVE_ALL_ARGS_INCREASE_FRAME 2*8
+ SAVE_GPR t0, (ALL_ARGS_SIZE + 0)
+ SAVE_GPR ra, (ALL_ARGS_SIZE + 8)
+
+ // Call artCriticalNativeFrameSize(method, caller_pc)
+ mv a0, t0 // a0 := method (from hidden arg)
+ mv a1, ra // x1 := caller_pc
+ call artCriticalNativeFrameSize
+
+ // Move frame size to T2.
+ mv t2, a0
+
+ // Restore args, the hidden arg and caller PC.
+ RESTORE_GPR t0, (ALL_ARGS_SIZE + 0)
+ RESTORE_GPR ra, (ALL_ARGS_SIZE + 8)
+ RESTORE_ALL_ARGS_DECREASE_FRAME 2*8
+
+ // Reserve space for a SaveRefsAndArgs managed frame, either for the actual runtime
+ // method or for a GenericJNI frame which is similar but has a native method and a tag.
+ // Add space for RA and padding to keep the stack 16-byte aligned.
+ INCREASE_FRAME (FRAME_SIZE_SAVE_REFS_AND_ARGS + 16)
+
+ // Prepare the return address for managed stack walk of the SaveRefsAndArgs frame.
+ // If we're coming from JNI stub with tail call, it is RA. If we're coming from
+ // JNI stub that saved the return address, it will be the last value we copy below.
+ // If we're coming directly from compiled code, it is RA, set further down.
+ mv t4, ra
+
+ // Move the stack args if any. Calculate the base address of the managed frame in the process.
+ addi t1, sp, 16
+ beqz t2, .Lcritical_skip_copy_args
+.Lcritical_copy_args_loop:
+ ld t3, FRAME_SIZE_SAVE_REFS_AND_ARGS+0(t1)
+ ld t4, FRAME_SIZE_SAVE_REFS_AND_ARGS+8(t1)
+ addi t2, t2, -16
+ sd t3, 0-16(t1)
+ sd t4, 8-16(t1)
+ addi t1, t1, 16
+ bnez t2, .Lcritical_copy_args_loop
+.Lcritical_skip_copy_args:
+
+ // Spill registers for the SaveRefsAndArgs frame above the stack args.
+ // Note that the runtime shall not examine the args here, otherwise we would have to
+ // move them in registers and stack to account for the difference between managed and
+ // native ABIs. Do not update CFI while we hold the frame address in T1 and the values
+ // in registers are unchanged.
+ // stack slot (0*8)(t1) is for ArtMethod*
+ fsd fa0, (1*8)(t1)
+ fsd fa1, (2*8)(t1)
+ fsd fa2, (3*8)(t1)
+ fsd fa3, (4*8)(t1)
+ fsd fa4, (5*8)(t1)
+ fsd fa5, (6*8)(t1)
+ fsd fa6, (7*8)(t1)
+ fsd fa7, (8*8)(t1)
+ sd fp, (9*8)(t1) // x8, frame pointer
+ // s1 (x9) is the ART thread register
+ // a0 (x10) is the method pointer
+ sd a1, (10*8)(t1) // x11
+ sd a2, (11*8)(t1) // x12
+ sd a3, (12*8)(t1) // x13
+ sd a4, (13*8)(t1) // x14
+ sd a5, (14*8)(t1) // x15
+ sd a6, (15*8)(t1) // x16
+ sd a7, (16*8)(t1) // x17
+ sd s2, (17*8)(t1) // x18
+ sd s3, (18*8)(t1) // x19
+ sd s4, (19*8)(t1) // x20
+ sd s5, (20*8)(t1) // x21
+ sd s6, (21*8)(t1) // x22
+ sd s7, (22*8)(t1) // x23
+ sd s8, (23*8)(t1) // x24
+ sd s9, (24*8)(t1) // x25
+ sd s10, (25*8)(t1) // x26
+ sd s11, (26*8)(t1) // x27
+ sd t4, (27*8)(t1) // t4: Save return address for tail call from JNI stub.
+ // (If there were any stack args, we're storing the value that's already there.
+ // For direct calls from compiled managed code, we shall overwrite this below.)
+
+ // Move the managed frame address to native callee-save register fp (x8) and update CFI.
+ mv fp, t1
+ // Skip args FA0-FA7, A1-A7
+ CFI_EXPRESSION_BREG 8, 8, (9*8)
+ CFI_EXPRESSION_BREG 18, 8, (17*8)
+ CFI_EXPRESSION_BREG 19, 8, (18*8)
+ CFI_EXPRESSION_BREG 20, 8, (19*8)
+ CFI_EXPRESSION_BREG 21, 8, (20*8)
+ CFI_EXPRESSION_BREG 22, 8, (21*8)
+ CFI_EXPRESSION_BREG 23, 8, (22*8)
+ CFI_EXPRESSION_BREG 24, 8, (23*8)
+ CFI_EXPRESSION_BREG 25, 8, (24*8)
+ CFI_EXPRESSION_BREG 26, 8, (25*8)
+ CFI_EXPRESSION_BREG 27, 8, (26*8)
+ // The saved return PC for managed stack walk is not necessarily our RA.
+
+ // Save our return PC below the managed frame.
+ sd ra, -__SIZEOF_POINTER__(fp)
+ CFI_EXPRESSION_BREG 1, 8, -__SIZEOF_POINTER__
+
+ lw t2, ART_METHOD_ACCESS_FLAGS_OFFSET(t0) // Load access flags.
+ addi t1, fp, 1 // Prepare managed SP tagged for a GenericJNI frame.
+ slliw t2, t2, 31 - ACCESS_FLAGS_METHOD_IS_NATIVE_BIT
+ bltz t2, .Lcritical_skip_prepare_runtime_method
+
+ // When coming from a compiled method, the return PC for managed stack walk is RA.
+ // (When coming from a compiled stub, the correct return PC is already stored above.)
+ sd ra, (FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__)(fp)
+
+ // Replace the target method with the SaveRefsAndArgs runtime method.
+ LOAD_RUNTIME_INSTANCE t0
+ ld t0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET(t0)
+
+ mv t1, fp // Prepare untagged managed SP for the runtime method.
+
+.Lcritical_skip_prepare_runtime_method:
+ // Store the method on the bottom of the managed frame.
+ sd t0, (fp)
+
+ // Place (maybe tagged) managed SP in Thread::Current()->top_quick_frame.
+ sd t1, THREAD_TOP_QUICK_FRAME_OFFSET(xSELF)
+
+ // Preserve the native arg register A0 in callee-save register S2 (x18) which was saved above.
+ mv s2, a0
+
+ // Call artFindNativeMethodRunnable()
+ mv a0, xSELF // pass Thread::Current()
+ call artFindNativeMethodRunnable
+
+ // Store result in scratch reg.
+ mv t0, a0
+
+ // Restore the native arg register A0.
+ mv a0, s2
+
+ // Restore our return PC.
+ RESTORE_GPR_BASE fp, ra, -__SIZEOF_POINTER__
+
+ // Remember the end of out args before restoring FP.
+ addi t1, fp, -16
+
+ // Restore arg registers.
+ fld fa0, (1*8)(fp)
+ fld fa1, (2*8)(fp)
+ fld fa2, (3*8)(fp)
+ fld fa3, (4*8)(fp)
+ fld fa4, (5*8)(fp)
+ fld fa5, (6*8)(fp)
+ fld fa6, (7*8)(fp)
+ fld fa7, (8*8)(fp)
+ // fp (x8) is restored last to keep CFI data valid until then.
+ // s1 (x9) is the ART thread register
+ // a0 (x10) is the method pointer
+ ld a1, (10*8)(fp) // x11
+ ld a2, (11*8)(fp) // x12
+ ld a3, (12*8)(fp) // x13
+ ld a4, (13*8)(fp) // x14
+ ld a5, (14*8)(fp) // x15
+ ld a6, (15*8)(fp) // x16
+ ld a7, (16*8)(fp) // x17
+ RESTORE_GPR_BASE fp, s2, (17*8) // x18
+ RESTORE_GPR_BASE fp, s3, (18*8) // x19
+ RESTORE_GPR_BASE fp, s4, (19*8) // x20
+ RESTORE_GPR_BASE fp, s5, (20*8) // x21
+ RESTORE_GPR_BASE fp, s6, (21*8) // x22
+ RESTORE_GPR_BASE fp, s7, (22*8) // x23
+ RESTORE_GPR_BASE fp, s8, (23*8) // x24
+ RESTORE_GPR_BASE fp, s9, (24*8) // x25
+ RESTORE_GPR_BASE fp, s10, (25*8) // x26
+ RESTORE_GPR_BASE fp, s11, (26*8) // x27
+ RESTORE_GPR_BASE fp, fp, (9*8) // fp (x8) is restored last
+
+ // Check for exception before moving args back to keep the return PC for managed stack walk.
+ CFI_REMEMBER_STATE
+ beqz t0, .Lcritical_deliver_exception
+
+ // Move stack args to their original place.
+ beq t1, sp, .Lcritical_skip_copy_args_back
+ sub t2, t1, sp
+.Lcritical_copy_args_back_loop:
+ ld t3, 0-16(t1)
+ ld t4, 8-16(t1)
+ addi t2, t2, -16
+ sd t3, FRAME_SIZE_SAVE_REFS_AND_ARGS+0(t1)
+ sd t4, FRAME_SIZE_SAVE_REFS_AND_ARGS+8(t1)
+ addi t1, t1, -16
+ bnez t2, .Lcritical_skip_copy_args_back
+.Lcritical_skip_copy_args_back:
+
+ // Remove the frame reservation.
+ DECREASE_FRAME (FRAME_SIZE_SAVE_REFS_AND_ARGS + 16)
+
+ // Do the tail call.
+ jr t0
+
+.Lcritical_deliver_exception:
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_REFS_AND_ARGS + 16
+ // If this is called from a method that catches the exception, all callee-save registers need
+ // to be saved, so that the exception handling code can read them in case they contain live
+ // values later used by that method. This includes callee-save FP registers which are not
+ // saved in a SaveRefsAndArgs frame, so we cannot reuse the managed frame we have built above.
+ // That's why we checked for exception after restoring registers from that frame.
+ // We need to build a SaveAllCalleeSaves frame instead. Args are irrelevant at this
+ // point but keep the area allocated for stack args to keep CFA definition simple.
+#if FRAME_SIZE_SAVE_ALL_CALLEE_SAVES > FRAME_SIZE_SAVE_REFS_AND_ARGS
+#error "Expanding stack frame from kSaveRefsAndArgs to kSaveAllCalleeSaves is not implemented."
+#endif
+ DECREASE_FRAME FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
+
+ // Calculate the base address of the managed frame.
+ addi t1, t1, 16 + FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
+
+ // Spill registers for the SaveAllCalleeSaves frame above the stack args area. Do not update
+ // CFI while we hold the frame address in T1 and the values in registers are unchanged.
+ // stack slot (0*8)(t1) is for ArtMethod*
+ // stack slot (1*8)(t1) is for padding
+ // FP callee-saves.
+ fsd fs0, (8*2)(t1) // f8
+ fsd fs1, (8*3)(t1) // f9
+ fsd fs2, (8*4)(t1) // f18
+ fsd fs3, (8*5)(t1) // f19
+ fsd fs4, (8*6)(t1) // f20
+ fsd fs5, (8*7)(t1) // f21
+ fsd fs6, (8*8)(t1) // f22
+ fsd fs7, (8*9)(t1) // f23
+ fsd fs8, (8*10)(t1) // f24
+ fsd fs9, (8*11)(t1) // f25
+ fsd fs10, (8*12)(t1) // f26
+ fsd fs11, (8*13)(t1) // f27
+ // GP callee-saves
+ sd s0, (8*14)(t1) // x8/fp, frame pointer
+ // s1 (x9) is the ART thread register
+ sd s2, (8*15)(t1) // x18
+ sd s3, (8*16)(t1) // x19
+ sd s4, (8*17)(t1) // x20
+ sd s5, (8*18)(t1) // x21
+ sd s6, (8*19)(t1) // x22
+ sd s7, (8*20)(t1) // x23
+ sd s8, (8*21)(t1) // x24
+ sd s9, (8*22)(t1) // x25
+ sd s10, (8*23)(t1) // x26
+ sd s11, (8*24)(t1) // x27
+ // Keep the caller PC for managed stack walk.
+
+ // Move the managed frame address to native callee-save register fp (x8) and update CFI.
+ mv fp, t1
+ CFI_EXPRESSION_BREG 8, 8, (14*8) // fp/x8: The base register for these CFI expressions.
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 8, 8, (8*2) // fs0/f8
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 9, 8, (8*3) // fs1/f9
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 18, 8, (8*4) // fs2/f18
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 19, 8, (8*5) // fs3/f19
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 20, 8, (8*6) // fs4/f20
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 21, 8, (8*7) // fs5/f21
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 22, 8, (8*8) // fs6/f22
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 23, 8, (8*9) // fs7/f23
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 24, 8, (8*10) // fs8/f24
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 25, 8, (8*11) // fs9/f25
+ CFI_EXPRESSION_BREG /*FP reg*/ 32 + 26, 8, (8*12) // fs10/f26
+ // CFI expression for fp (x8) already emitted above.
+ CFI_EXPRESSION_BREG 18, 8, (15*8) // s2/x18
+ CFI_EXPRESSION_BREG 19, 8, (16*8) // s3/x19
+ CFI_EXPRESSION_BREG 20, 8, (17*8) // s4/x20
+ CFI_EXPRESSION_BREG 21, 8, (18*8) // s5/x21
+ CFI_EXPRESSION_BREG 22, 8, (19*8) // s6/x22
+ CFI_EXPRESSION_BREG 23, 8, (20*8) // s7/x23
+ CFI_EXPRESSION_BREG 24, 8, (21*8) // s8/x24
+ CFI_EXPRESSION_BREG 25, 8, (22*8) // s9/x25
+ CFI_EXPRESSION_BREG 26, 8, (23*8) // s10/x26
+ CFI_EXPRESSION_BREG 27, 8, (24*8) // s11/x27
+ // The saved return PC for managed stack walk is not necessarily our RA.
+
+ // Save our return PC below the managed frame.
+ sd ra, -__SIZEOF_POINTER__(fp)
+ CFI_EXPRESSION_BREG 1, 8, -__SIZEOF_POINTER__
+
+ // Store ArtMethod* Runtime::callee_save_methods_[kSaveAllCalleeSaves] to the managed frame.
+ LOAD_RUNTIME_INSTANCE t0
+ ld t0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET(t0)
+ sd t0, (fp)
+
+ // Place the managed frame SP in Thread::Current()->top_quick_frame.
+ sd fp, THREAD_TOP_QUICK_FRAME_OFFSET(xSELF)
+
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
END art_jni_dlsym_lookup_critical_stub
diff --git a/runtime/arch/riscv64/quick_entrypoints_riscv64.S b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
index c92687ad73..19f5a10d0e 100644
--- a/runtime/arch/riscv64/quick_entrypoints_riscv64.S
+++ b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
@@ -318,7 +318,7 @@ ENTRY art_quick_generic_jni_trampoline
// Check for error (class init check or locking for synchronized native method can throw).
beqz a0, .Lexception_in_native
- mv t0, a0 // save pointer to native method code into temporary
+ mv t2, a0 // save pointer to native method code into temporary
// Load argument GPRs from stack (saved there by artQuickGenericJniTrampoline).
ld a0, 8*0(sp) // JniEnv* for the native method
@@ -340,13 +340,12 @@ ENTRY art_quick_generic_jni_trampoline
fld fa6, 8*14(sp)
fld fa7, 8*15(sp)
- // FIXME(riscv64): Use T0 to align with the JNI calling convention.
- ld t6, 8*16(sp) // @CriticalNative arg, used by art_jni_dlsym_lookup_critical_stub
+ ld t0, 8*16(sp) // @CriticalNative arg, used by art_jni_dlsym_lookup_critical_stub
ld t1, 8*17(sp) // restore stack
mv sp, t1
- jalr t0 // call native method
+ jalr t2 // call native method
// result sign extension is handled in C code, prepare for artQuickGenericJniEndTrampoline call:
// uint64_t artQuickGenericJniEndTrampoline(Thread* self, // a0
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
index 70476d220e..448774a003 100644
--- a/runtime/arch/x86/jni_entrypoints_x86.S
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -173,7 +173,7 @@ DEFINE_FUNCTION art_jni_dlsym_lookup_critical_stub
// Calculate the base address of the managed frame.
leal (%esp, %eax, 1), %eax
- leal 1(%eax), %ecx // Prepare namaged SP tagged for a GenericJNI frame.
+ leal 1(%eax), %ecx // Prepare managed SP tagged for a GenericJNI frame.
testl LITERAL(ACCESS_FLAGS_METHOD_IS_NATIVE), ART_METHOD_ACCESS_FLAGS_OFFSET(%ebx)
jnz .Lcritical_skip_prepare_runtime_method
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index e606c2173a..359614bf92 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -19,6 +19,7 @@
#include "arch/arm/jni_frame_arm.h"
#include "arch/arm64/jni_frame_arm64.h"
#include "arch/instruction_set.h"
+#include "arch/riscv64/jni_frame_riscv64.h"
#include "arch/x86/jni_frame_x86.h"
#include "arch/x86_64/jni_frame_x86_64.h"
#include "art_method-inl.h"
@@ -147,6 +148,8 @@ extern "C" size_t artCriticalNativeFrameSize(ArtMethod* method, uintptr_t caller
return arm::GetCriticalNativeStubFrameSize(shorty, shorty_len);
case InstructionSet::kArm64:
return arm64::GetCriticalNativeStubFrameSize(shorty, shorty_len);
+ case InstructionSet::kRiscv64:
+ return riscv64::GetCriticalNativeStubFrameSize(shorty, shorty_len);
case InstructionSet::kX86:
return x86::GetCriticalNativeStubFrameSize(shorty, shorty_len);
case InstructionSet::kX86_64:
@@ -183,6 +186,8 @@ extern "C" size_t artCriticalNativeFrameSize(ArtMethod* method, uintptr_t caller
return arm::GetCriticalNativeDirectCallFrameSize(shorty, shorty_len);
case InstructionSet::kArm64:
return arm64::GetCriticalNativeDirectCallFrameSize(shorty, shorty_len);
+ case InstructionSet::kRiscv64:
+ return riscv64::GetCriticalNativeDirectCallFrameSize(shorty, shorty_len);
case InstructionSet::kX86:
return x86::GetCriticalNativeDirectCallFrameSize(shorty, shorty_len);
case InstructionSet::kX86_64: