summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-06-12 08:43:46 +0000
committer VladimĂ­r Marko <vmarko@google.com> 2023-06-15 10:39:36 +0000
commit30313798f5c1153dea3204183acc8f10b1a9665d (patch)
tree0f2d968164c963a42468a5c9a143514a76ebd8eb /compiler
parent993bcb1f021ea69e28bef882255416eac28f8b15 (diff)
riscv64: Implement JNI compiler for @CriticalNative.
Implement all JNI macro assembler functions needed by the JNI compiler to compile stubs for @CriticalNative methods. Enable most JNI compiler tests for @CriticalNative methods and document the reasons for keeping the remaining few tests disabled. Change `Riscv64Assembler::AddConst*` to store intermediate results in `TMP` to avoid unaligned SP in the middle of a macro operation. Test: m test-art-host-gtest Test: run-gtests.sh Bug: 283082089 Change-Id: I226cab7b2ffcab375a67eb37efdd093779c5c8c4
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.bp1
-rw-r--r--compiler/jni/jni_compiler_test.cc20
-rw-r--r--compiler/jni/quick/calling_convention.cc6
-rw-r--r--compiler/utils/riscv64/assembler_riscv64.cc8
-rw-r--r--compiler/utils/riscv64/assembler_riscv64_test.cc5
-rw-r--r--compiler/utils/riscv64/jni_macro_assembler_riscv64.cc355
-rw-r--r--compiler/utils/riscv64/jni_macro_assembler_riscv64.h20
-rw-r--r--compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc400
8 files changed, 748 insertions, 67 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index d667de8d07..9f63bd7254 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -557,6 +557,7 @@ art_cc_test {
riscv64 : {
srcs: [
"utils/riscv64/assembler_riscv64_test.cc",
+ "utils/riscv64/jni_macro_assembler_riscv64_test.cc",
],
},
x86: {
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index b99e78f2a2..7feb400db7 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -484,14 +484,12 @@ LockWord JniCompilerTest::GetLockWord(jobject obj) {
// Test (@CriticalNative) x (compiler, generic) only.
#define JNI_TEST_CRITICAL_ONLY(TestName) \
TEST_F(JniCompilerTest, TestName ## CriticalCompiler) { \
- TEST_DISABLED_FOR_RISCV64(); \
ScopedCheckHandleScope top_handle_scope_check; \
SCOPED_TRACE("@CriticalNative JNI with compiler"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kCritical); \
TestName ## Impl(); \
} \
TEST_F(JniCompilerTest, TestName ## CriticalGeneric) { \
- TEST_DISABLED_FOR_RISCV64(); \
ScopedCheckHandleScope top_handle_scope_check; \
SCOPED_TRACE("@CriticalNative JNI with generic"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kCritical); \
@@ -737,6 +735,11 @@ void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() {
JNI_TEST(CompileAndRunIntMethodThroughStub)
void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() {
+ if (!check_generic_jni_) {
+ // TODO(riscv64): Implement `art_jni_dlsym_lookup_critical_stub`.
+ TEST_DISABLED_FOR_RISCV64();
+ }
+
SetUpForTest(true, "sbar", "(I)I", nullptr);
// calling through stub will link with &Java_MyClassNatives_sbar{,_1Fast,_1Critical}
@@ -2151,6 +2154,11 @@ void JniCompilerTest::WithoutImplementationRefReturnImpl() {
JNI_TEST(WithoutImplementationRefReturn)
void JniCompilerTest::StaticWithoutImplementationImpl() {
+ if (!check_generic_jni_) {
+ // TODO(riscv64): Implement `art_jni_dlsym_lookup_critical_stub`.
+ TEST_DISABLED_FOR_RISCV64();
+ }
+
// This will lead to error messages in the log.
ScopedLogSeverity sls(LogSeverity::FATAL);
@@ -2273,6 +2281,14 @@ void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv*, jclass, jfloat f1, jfloat
}
void JniCompilerTest::StackArgsFloatsFirstImpl() {
+ if (check_generic_jni_) {
+ // FIXME(riscv64): Fix FP argument passing in GenericJNI.
+ TEST_DISABLED_FOR_RISCV64();
+ // TODO(riscv64): This test passes with compiled JNI stubs but the compiled code
+ // does not perform NaN-boxing of float args passed in GPRs. The test should be
+ // extended to check 64-bit values of these float args.
+ }
+
SetUpForTest(true, "stackArgsFloatsFirst", "(FFFFFFFFFFIIIIIIIIII)V",
CURRENT_JNI_WRAPPER(Java_MyClassNatives_stackArgsFloatsFirst));
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 9f26e01154..ade6781c61 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -166,6 +166,12 @@ std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocato
new (allocator) arm64::Arm64JniCallingConvention(
is_static, is_synchronized, is_fast_native, is_critical_native, shorty));
#endif
+#ifdef ART_ENABLE_CODEGEN_riscv64
+ case InstructionSet::kRiscv64:
+ return std::unique_ptr<JniCallingConvention>(
+ new (allocator) riscv64::Riscv64JniCallingConvention(
+ is_static, is_synchronized, is_fast_native, is_critical_native, shorty));
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86:
return std::unique_ptr<JniCallingConvention>(
diff --git a/compiler/utils/riscv64/assembler_riscv64.cc b/compiler/utils/riscv64/assembler_riscv64.cc
index 8cf33fc3fc..4df3a7e436 100644
--- a/compiler/utils/riscv64/assembler_riscv64.cc
+++ b/compiler/utils/riscv64/assembler_riscv64.cc
@@ -1049,11 +1049,11 @@ void AddConstImpl(XRegister rd,
constexpr int32_t kLowestValueForSimpleAdjustment = 2 * kNegativeValueSimpleAdjustment;
if (value >= 0 && value <= kHighestValueForSimpleAdjustment) {
- addi(rd, rs1, kPositiveValueSimpleAdjustment);
- addi(rd, rd, value - kPositiveValueSimpleAdjustment);
+ addi(TMP, rs1, kPositiveValueSimpleAdjustment);
+ addi(rd, TMP, value - kPositiveValueSimpleAdjustment);
} else if (value < 0 && value >= kLowestValueForSimpleAdjustment) {
- addi(rd, rs1, kNegativeValueSimpleAdjustment);
- addi(rd, rd, value - kNegativeValueSimpleAdjustment);
+ addi(TMP, rs1, kNegativeValueSimpleAdjustment);
+ addi(rd, TMP, value - kNegativeValueSimpleAdjustment);
} else {
add_large(rd, rs1, value);
}
diff --git a/compiler/utils/riscv64/assembler_riscv64_test.cc b/compiler/utils/riscv64/assembler_riscv64_test.cc
index 31f72aef71..fba82068ac 100644
--- a/compiler/utils/riscv64/assembler_riscv64_test.cc
+++ b/compiler/utils/riscv64/assembler_riscv64_test.cc
@@ -649,6 +649,7 @@ class AssemblerRISCV64Test : public AssemblerTest<riscv64::Riscv64Assembler,
large_values.push_back(0xfff);
std::string tmp_name = GetRegisterName(TMP);
+ std::string addi_tmp = "addi" + suffix + " " + tmp_name + ", ";
std::string expected;
for (XRegister* rd : GetRegisters()) {
@@ -670,8 +671,8 @@ class AssemblerRISCV64Test : public AssemblerTest<riscv64::Riscv64Assembler,
auto emit_simple_ops = [&](ArrayRef<const int64_t> imms, int64_t adjustment) {
for (int64_t imm : imms) {
emit_op(*rd, *rs1, imm);
- expected += addi_rd + rs1_name + ", " + std::to_string(adjustment) + "\n" +
- addi_rd + rd_name + ", " + std::to_string(imm - adjustment) + "\n";
+ expected += addi_tmp + rs1_name + ", " + std::to_string(adjustment) + "\n" +
+ addi_rd + tmp_name + ", " + std::to_string(imm - adjustment) + "\n";
}
};
emit_simple_ops(ArrayRef<const int64_t>(kSimplePositiveValues), 0x7ff);
diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc b/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
index 689d871ed8..b698d56c90 100644
--- a/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
+++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
@@ -16,6 +16,8 @@
#include "jni_macro_assembler_riscv64.h"
+#include "base/bit_utils_iterator.h"
+#include "dwarf/register.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "managed_register_riscv64.h"
#include "offsets.h"
@@ -24,6 +26,26 @@
namespace art {
namespace riscv64 {
+static constexpr size_t kSpillSize = 8; // Both GPRs and FPRs
+
+static std::pair<uint32_t, uint32_t> GetCoreAndFpSpillMasks(
+ ArrayRef<const ManagedRegister> callee_save_regs) {
+ uint32_t core_spill_mask = 0u;
+ uint32_t fp_spill_mask = 0u;
+ for (ManagedRegister r : callee_save_regs) {
+ Riscv64ManagedRegister reg = r.AsRiscv64();
+ if (reg.IsXRegister()) {
+ core_spill_mask |= 1u << reg.AsXRegister();
+ } else {
+ DCHECK(reg.IsFRegister());
+ fp_spill_mask |= 1u << reg.AsFRegister();
+ }
+ }
+ DCHECK_EQ(callee_save_regs.size(),
+ dchecked_integral_cast<size_t>(POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask)));
+ return {core_spill_mask, fp_spill_mask};
+}
+
#define __ asm_.
Riscv64JNIMacroAssembler::~Riscv64JNIMacroAssembler() {
@@ -36,30 +58,90 @@ void Riscv64JNIMacroAssembler::FinalizeCode() {
void Riscv64JNIMacroAssembler::BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs) {
- // TODO(riscv64): Implement this.
- UNUSED(frame_size, method_reg, callee_save_regs);
+ // Increase frame to required size.
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
+ // Must at least have space for Method* if we're going to spill it.
+ DCHECK_GE(frame_size,
+ (callee_save_regs.size() + (method_reg.IsRegister() ? 1u : 0u)) * kSpillSize);
+ IncreaseFrameSize(frame_size);
+
+ // Save callee-saves.
+ auto [core_spill_mask, fp_spill_mask] = GetCoreAndFpSpillMasks(callee_save_regs);
+ size_t offset = frame_size;
+ if ((core_spill_mask & (1u << RA)) != 0u) {
+ offset -= kSpillSize;
+ __ Stored(RA, SP, offset);
+ __ cfi().RelOffset(dwarf::Reg::Riscv64Core(RA), offset);
+ }
+ for (uint32_t reg : HighToLowBits(core_spill_mask & ~(1u << RA))) {
+ offset -= kSpillSize;
+ __ Stored(enum_cast<XRegister>(reg), SP, offset);
+ __ cfi().RelOffset(dwarf::Reg::Riscv64Core(enum_cast<XRegister>(reg)), offset);
+ }
+ for (uint32_t reg : HighToLowBits(fp_spill_mask)) {
+ offset -= kSpillSize;
+ __ FStored(enum_cast<FRegister>(reg), SP, offset);
+ __ cfi().RelOffset(dwarf::Reg::Riscv64Fp(enum_cast<FRegister>(reg)), offset);
+ }
+
+ if (method_reg.IsRegister()) {
+ // Write ArtMethod*.
+ DCHECK_EQ(A0, method_reg.AsRiscv64().AsXRegister());
+ __ Stored(A0, SP, 0);
+ }
}
void Riscv64JNIMacroAssembler::RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) {
- // TODO(riscv64): Implement this.
- UNUSED(frame_size, callee_save_regs, may_suspend);
+ [[maybe_unused]] bool may_suspend) {
+ cfi().RememberState();
+
+ // Restore callee-saves.
+ auto [core_spill_mask, fp_spill_mask] = GetCoreAndFpSpillMasks(callee_save_regs);
+ size_t offset = frame_size - callee_save_regs.size() * kSpillSize;
+ for (uint32_t reg : LowToHighBits(fp_spill_mask)) {
+ __ FLoadd(enum_cast<FRegister>(reg), SP, offset);
+ __ cfi().Restore(dwarf::Reg::Riscv64Fp(enum_cast<FRegister>(reg)));
+ offset += kSpillSize;
+ }
+ for (uint32_t reg : LowToHighBits(core_spill_mask & ~(1u << RA))) {
+ __ Loadd(enum_cast<XRegister>(reg), SP, offset);
+ __ cfi().Restore(dwarf::Reg::Riscv64Core(enum_cast<XRegister>(reg)));
+ offset += kSpillSize;
+ }
+ if ((core_spill_mask & (1u << RA)) != 0u) {
+ __ Loadd(RA, SP, offset);
+ __ cfi().Restore(dwarf::Reg::Riscv64Core(RA));
+ offset += kSpillSize;
+ }
+ DCHECK_EQ(offset, frame_size);
+
+ // Decrease the frame size.
+ DecreaseFrameSize(frame_size);
+
+ // Return to RA.
+ __ Ret();
+
+ // The CFI should be restored for any code that follows the exit block.
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(frame_size);
}
void Riscv64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
if (adjust != 0u) {
CHECK_ALIGNED(adjust, kStackAlignment);
- __ AddConst64(SP, SP, -adjust);
- __ cfi().AdjustCFAOffset(adjust);
+ int64_t adjustment = dchecked_integral_cast<int64_t>(adjust);
+ __ AddConst64(SP, SP, -adjustment);
+ __ cfi().AdjustCFAOffset(adjustment);
}
}
void Riscv64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
if (adjust != 0u) {
CHECK_ALIGNED(adjust, kStackAlignment);
- __ AddConst64(SP, SP, adjust);
- __ cfi().AdjustCFAOffset(-adjust);
+ int64_t adjustment = dchecked_integral_cast<int64_t>(adjust);
+ __ AddConst64(SP, SP, adjustment);
+ __ cfi().AdjustCFAOffset(-adjustment);
}
}
@@ -70,81 +152,223 @@ ManagedRegister Riscv64JNIMacroAssembler::CoreRegisterWithSize(ManagedRegister s
}
void Riscv64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
- // TODO(riscv64): Implement this.
- UNUSED(offs, m_src, size);
+ Store(Riscv64ManagedRegister::FromXRegister(SP), MemberOffset(offs.Int32Value()), m_src, size);
}
-void Riscv64JNIMacroAssembler::Store(ManagedRegister base,
+void Riscv64JNIMacroAssembler::Store(ManagedRegister m_base,
MemberOffset offs,
ManagedRegister m_src,
size_t size) {
- // TODO(riscv64): Implement this.
- UNUSED(base, offs, m_src, size);
+ Riscv64ManagedRegister base = m_base.AsRiscv64();
+ Riscv64ManagedRegister src = m_src.AsRiscv64();
+ if (src.IsXRegister()) {
+ if (size == 4u) {
+ __ Storew(src.AsXRegister(), base.AsXRegister(), offs.Int32Value());
+ } else {
+ CHECK_EQ(8u, size);
+ __ Stored(src.AsXRegister(), base.AsXRegister(), offs.Int32Value());
+ }
+ } else {
+ CHECK(src.IsFRegister()) << src;
+ if (size == 4u) {
+ __ FStorew(src.AsFRegister(), base.AsXRegister(), offs.Int32Value());
+ } else {
+ CHECK_EQ(8u, size);
+ __ FStored(src.AsFRegister(), base.AsXRegister(), offs.Int32Value());
+ }
+ }
}
void Riscv64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
- // TODO(riscv64): Implement this.
- UNUSED(offs, m_src);
+ Riscv64ManagedRegister sp = Riscv64ManagedRegister::FromXRegister(SP);
+ Store(sp, MemberOffset(offs.Int32Value()), m_src, static_cast<size_t>(kRiscv64PointerSize));
}
-void Riscv64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) {
- // TODO(riscv64): Implement this.
- UNUSED(thr_offs, tag_sp);
+void Riscv64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 offs, bool tag_sp) {
+ XRegister src = SP;
+ if (tag_sp) {
+ // Note: We use `TMP2` here because `TMP` can be used by `Stored()`.
+ __ Ori(TMP2, SP, 0x2);
+ src = TMP2;
+ }
+ __ Stored(src, TR, offs.Int32Value());
}
-void Riscv64JNIMacroAssembler::Load(ManagedRegister m_dest, FrameOffset src, size_t size) {
- // TODO(riscv64): Implement this.
- UNUSED(m_dest, src, size);
+void Riscv64JNIMacroAssembler::Load(ManagedRegister m_dest, FrameOffset offs, size_t size) {
+ Riscv64ManagedRegister sp = Riscv64ManagedRegister::FromXRegister(SP);
+ Load(m_dest, sp, MemberOffset(offs.Int32Value()), size);
}
void Riscv64JNIMacroAssembler::Load(ManagedRegister m_dest,
ManagedRegister m_base,
MemberOffset offs,
size_t size) {
- // TODO(riscv64): Implement this.
- UNUSED(m_dest, m_base, offs, size);
+ Riscv64ManagedRegister base = m_base.AsRiscv64();
+ Riscv64ManagedRegister dest = m_dest.AsRiscv64();
+ if (dest.IsXRegister()) {
+ if (size == 4u) {
+ __ Loadw(dest.AsXRegister(), base.AsXRegister(), offs.Int32Value());
+ } else {
+ CHECK_EQ(8u, size);
+ __ Loadd(dest.AsXRegister(), base.AsXRegister(), offs.Int32Value());
+ }
+ } else {
+ CHECK(dest.IsFRegister()) << dest;
+ if (size == 4u) {
+ __ FLoadw(dest.AsFRegister(), base.AsXRegister(), offs.Int32Value());
+ } else {
+ CHECK_EQ(8u, size);
+ __ FLoadd(dest.AsFRegister(), base.AsXRegister(), offs.Int32Value());
+ }
+ }
}
void Riscv64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dest, ThreadOffset64 offs) {
- // TODO(riscv64): Implement this.
- UNUSED(m_dest, offs);
+ Riscv64ManagedRegister tr = Riscv64ManagedRegister::FromXRegister(TR);
+ Load(m_dest, tr, MemberOffset(offs.Int32Value()), static_cast<size_t>(kRiscv64PointerSize));
}
void Riscv64JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests,
ArrayRef<ArgumentLocation> srcs,
ArrayRef<FrameOffset> refs) {
- // TODO(riscv64): Implement this.
- UNUSED(dests, srcs, refs);
+ size_t arg_count = dests.size();
+ DCHECK_EQ(arg_count, srcs.size());
+ DCHECK_EQ(arg_count, refs.size());
+
+ // Convert reference registers to `jobject` values.
+ for (size_t i = 0; i != arg_count; ++i) {
+ if (refs[i] != kInvalidReferenceOffset && srcs[i].IsRegister()) {
+ // Note: We can clobber `srcs[i]` here as the register cannot hold more than one argument.
+ ManagedRegister src_i_reg = srcs[i].GetRegister();
+ CreateJObject(src_i_reg, refs[i], src_i_reg, /*null_allowed=*/ i != 0u);
+ }
+ }
+
+ auto get_mask = [](ManagedRegister reg) -> uint64_t {
+ Riscv64ManagedRegister riscv64_reg = reg.AsRiscv64();
+ if (riscv64_reg.IsXRegister()) {
+ size_t core_reg_number = static_cast<size_t>(riscv64_reg.AsXRegister());
+ DCHECK_LT(core_reg_number, 32u);
+ return UINT64_C(1) << core_reg_number;
+ } else {
+ DCHECK(riscv64_reg.IsFRegister());
+ size_t fp_reg_number = static_cast<size_t>(riscv64_reg.AsFRegister());
+ DCHECK_LT(fp_reg_number, 32u);
+ return (UINT64_C(1) << 32u) << fp_reg_number;
+ }
+ };
+
+ // Collect registers to move while storing/copying args to stack slots.
+ // Convert copied references to `jobject`.
+ uint64_t src_regs = 0u;
+ uint64_t dest_regs = 0u;
+ for (size_t i = 0; i != arg_count; ++i) {
+ const ArgumentLocation& src = srcs[i];
+ const ArgumentLocation& dest = dests[i];
+ const FrameOffset ref = refs[i];
+ if (ref != kInvalidReferenceOffset) {
+ DCHECK_EQ(src.GetSize(), kObjectReferenceSize);
+ DCHECK_EQ(dest.GetSize(), static_cast<size_t>(kRiscv64PointerSize));
+ } else {
+ DCHECK_EQ(src.GetSize(), dest.GetSize());
+ }
+ if (dest.IsRegister()) {
+ if (src.IsRegister() && src.GetRegister().Equals(dest.GetRegister())) {
+ // Nothing to do.
+ } else {
+ if (src.IsRegister()) {
+ src_regs |= get_mask(src.GetRegister());
+ }
+ dest_regs |= get_mask(dest.GetRegister());
+ }
+ } else if (src.IsRegister()) {
+ Store(dest.GetFrameOffset(), src.GetRegister(), dest.GetSize());
+ } else {
+ // Note: We use `TMP2` here because `TMP` can be used by `Store()`.
+ Riscv64ManagedRegister tmp2 = Riscv64ManagedRegister::FromXRegister(TMP2);
+ Load(tmp2, src.GetFrameOffset(), src.GetSize());
+ if (ref != kInvalidReferenceOffset) {
+ CreateJObject(tmp2, ref, tmp2, /*null_allowed=*/ i != 0u);
+ }
+ Store(dest.GetFrameOffset(), tmp2, dest.GetSize());
+ }
+ }
+
+ // Fill destination registers.
+ // There should be no cycles, so this simple algorithm should make progress.
+ while (dest_regs != 0u) {
+ uint64_t old_dest_regs = dest_regs;
+ for (size_t i = 0; i != arg_count; ++i) {
+ const ArgumentLocation& src = srcs[i];
+ const ArgumentLocation& dest = dests[i];
+ const FrameOffset ref = refs[i];
+ if (!dest.IsRegister()) {
+ continue; // Stored in first loop above.
+ }
+ uint64_t dest_reg_mask = get_mask(dest.GetRegister());
+ if ((dest_reg_mask & dest_regs) == 0u) {
+ continue; // Equals source, or already filled in one of previous iterations.
+ }
+ if ((dest_reg_mask & src_regs) != 0u) {
+ continue; // Cannot clobber this register yet.
+ }
+ // FIXME(riscv64): FP args can be passed in GPRs if all argument FPRs have been used.
+ // In that case, a `float` needs to be NaN-boxed. However, we do not have sufficient
+ // information here to determine whether we're loading a `float` or a narrow integral arg.
+ // We shall need to change the macro assembler interface to pass this information.
+ if (src.IsRegister()) {
+ Move(dest.GetRegister(), src.GetRegister(), dest.GetSize());
+ src_regs &= ~get_mask(src.GetRegister()); // Allow clobbering source register.
+ } else {
+ Load(dest.GetRegister(), src.GetFrameOffset(), dest.GetSize());
+ if (ref != kInvalidReferenceOffset) {
+ CreateJObject(dest.GetRegister(), ref, dest.GetRegister(), /*null_allowed=*/ i != 0u);
+ }
+ }
+ dest_regs &= ~get_mask(dest.GetRegister()); // Destination register was filled.
+ }
+ CHECK_NE(old_dest_regs, dest_regs);
+ DCHECK_EQ(0u, dest_regs & ~old_dest_regs);
+ }
}
void Riscv64JNIMacroAssembler::Move(ManagedRegister m_dest, ManagedRegister m_src, size_t size) {
- // TODO(riscv64): Implement this.
- UNUSED(m_dest, m_src, size);
+ // Note: This function is used only for moving between GPRs.
+ // FP argument registers hold the same arguments in managed and native ABIs.
+ DCHECK(size == 4u || size == 8u) << size;
+ Riscv64ManagedRegister dest = m_dest.AsRiscv64();
+ Riscv64ManagedRegister src = m_src.AsRiscv64();
+ DCHECK(dest.IsXRegister());
+ DCHECK(src.IsXRegister());
+ if (!dest.Equals(src)) {
+ __ Mv(dest.AsXRegister(), src.AsXRegister());
+ }
}
void Riscv64JNIMacroAssembler::Move(ManagedRegister m_dest, size_t value) {
- // TODO(riscv64): Implement this.
- UNUSED(m_dest, value);
+ DCHECK(m_dest.AsRiscv64().IsXRegister());
+ __ LoadConst64(m_dest.AsRiscv64().AsXRegister(), dchecked_integral_cast<int64_t>(value));
}
-void Riscv64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
- // TODO(riscv64): Implement this.
- UNUSED(mreg, size);
+void Riscv64JNIMacroAssembler::SignExtend([[maybe_unused]] ManagedRegister mreg,
+ [[maybe_unused]] size_t size) {
+ LOG(FATAL) << "The result is already sign-extended in the native ABI.";
+ UNREACHABLE();
}
-void Riscv64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
- // TODO(riscv64): Implement this.
- UNUSED(mreg, size);
+void Riscv64JNIMacroAssembler::ZeroExtend([[maybe_unused]] ManagedRegister mreg,
+ [[maybe_unused]] size_t size) {
+ LOG(FATAL) << "The result is already zero-extended in the native ABI.";
+ UNREACHABLE();
}
-void Riscv64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
- // TODO(riscv64): Implement this.
- UNUSED(tr);
+void Riscv64JNIMacroAssembler::GetCurrentThread(ManagedRegister dest) {
+ DCHECK(dest.AsRiscv64().IsXRegister());
+ __ Mv(dest.AsRiscv64().AsXRegister(), TR);
}
void Riscv64JNIMacroAssembler::GetCurrentThread(FrameOffset offset) {
- // TODO(riscv64): Implement this.
- UNUSED(offset);
+ __ Stored(TR, SP, offset.Int32Value());
}
void Riscv64JNIMacroAssembler::DecodeJNITransitionOrLocalJObject(ManagedRegister m_reg,
@@ -167,17 +391,15 @@ void Riscv64JNIMacroAssembler::VerifyObject([[maybe_unused]] FrameOffset src,
void Riscv64JNIMacroAssembler::Jump(ManagedRegister m_base, Offset offs) {
Riscv64ManagedRegister base = m_base.AsRiscv64();
CHECK(base.IsXRegister()) << base;
- XRegister scratch = TMP;
- __ Loadd(scratch, base.AsXRegister(), offs.Int32Value());
- __ Jr(scratch);
+ __ Loadd(TMP, base.AsXRegister(), offs.Int32Value());
+ __ Jr(TMP);
}
void Riscv64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs) {
Riscv64ManagedRegister base = m_base.AsRiscv64();
CHECK(base.IsXRegister()) << base;
- XRegister scratch = TMP;
- __ Loadd(scratch, base.AsXRegister(), offs.Int32Value());
- __ Jalr(scratch);
+ __ Loadd(RA, base.AsXRegister(), offs.Int32Value());
+ __ Jalr(RA);
}
@@ -206,12 +428,19 @@ void Riscv64JNIMacroAssembler::SuspendCheck(JNIMacroLabel* label) {
}
void Riscv64JNIMacroAssembler::ExceptionPoll(JNIMacroLabel* label) {
- // TODO(riscv64): Implement this.
- UNUSED(label);
+ __ Loadd(TMP, TR, Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
+ __ Bnez(TMP, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
}
void Riscv64JNIMacroAssembler::DeliverPendingException() {
- // TODO(riscv64): Implement this.
+ // Pass exception object as argument.
+ // Don't care about preserving A0 as this won't return.
+ // Note: The scratch register from `ExceptionPoll()` may have been clobbered.
+ __ Loadd(A0, TR, Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
+ __ Loadd(RA, TR, QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value());
+ __ Jalr(RA);
+ // Call should never return.
+ __ Ebreak();
}
std::unique_ptr<JNIMacroLabel> Riscv64JNIMacroAssembler::CreateLabel() {
@@ -267,6 +496,28 @@ void Riscv64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
__ Bind(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
}
+void Riscv64JNIMacroAssembler::CreateJObject(ManagedRegister m_dest,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister m_ref,
+ bool null_allowed) {
+ Riscv64ManagedRegister dest = m_dest.AsRiscv64();
+ Riscv64ManagedRegister ref = m_ref.AsRiscv64();
+ DCHECK(dest.IsXRegister());
+ DCHECK(ref.IsXRegister());
+
+ Riscv64Label null_label;
+ if (null_allowed) {
+ if (!dest.Equals(ref)) {
+ __ Li(dest.AsXRegister(), 0);
+ }
+ __ Bnez(ref.AsXRegister(), &null_label);
+ }
+ __ AddConst64(dest.AsXRegister(), SP, spilled_reference_offset.Int32Value());
+ if (null_allowed) {
+ __ Bind(&null_label);
+ }
+}
+
#undef ___
} // namespace riscv64
diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64.h b/compiler/utils/riscv64/jni_macro_assembler_riscv64.h
index e472526f54..903c702f2d 100644
--- a/compiler/utils/riscv64/jni_macro_assembler_riscv64.h
+++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64.h
@@ -61,11 +61,11 @@ class Riscv64JNIMacroAssembler : public JNIMacroAssemblerFwd<Riscv64Assembler,
// Store routines.
void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
void Store(ManagedRegister base, MemberOffset offs, ManagedRegister src, size_t size) override;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) override;
+ void StoreRawPtr(FrameOffset offs, ManagedRegister src) override;
+ void StoreStackPointerToThread(ThreadOffset64 offs, bool tag_sp) override;
// Load routines.
- void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
+ void Load(ManagedRegister dest, FrameOffset offs, size_t size) override;
void Load(ManagedRegister dest, ManagedRegister base, MemberOffset offs, size_t size) override;
void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
@@ -83,8 +83,8 @@ class Riscv64JNIMacroAssembler : public JNIMacroAssemblerFwd<Riscv64Assembler,
void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr) override;
- void GetCurrentThread(FrameOffset dest_offset) override;
+ void GetCurrentThread(ManagedRegister dest) override;
+ void GetCurrentThread(FrameOffset offset) override;
// Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path.
void DecodeJNITransitionOrLocalJObject(ManagedRegister reg,
@@ -137,14 +137,20 @@ class Riscv64JNIMacroAssembler : public JNIMacroAssemblerFwd<Riscv64Assembler,
void TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) override;
// Code at this offset will serve as the target for the Jump call.
void Bind(JNIMacroLabel* label) override;
+
+ private:
+ void CreateJObject(ManagedRegister m_dest,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister m_ref,
+ bool null_allowed);
};
class Riscv64JNIMacroLabel final
: public JNIMacroLabelCommon<Riscv64JNIMacroLabel,
- art::Label,
+ Riscv64Label,
InstructionSet::kRiscv64> {
public:
- art::Label* AsRiscv64() {
+ Riscv64Label* AsRiscv64() {
return AsPlatformLabel();
}
};
diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc b/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc
new file mode 100644
index 0000000000..3dc9f62e8b
--- /dev/null
+++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dirent.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include <fstream>
+#include <map>
+#include <regex>
+
+#include "gtest/gtest.h"
+
+#include "jni/quick/calling_convention.h"
+#include "utils/riscv64/jni_macro_assembler_riscv64.h"
+#include "utils/assembler_test_base.h"
+
+#include "base/macros.h"
+#include "base/malloc_arena_pool.h"
+
+namespace art HIDDEN {
+namespace riscv64 {
+
+class JniMacroAssemblerRiscv64Test : public AssemblerTestBase {
+ public:
+ JniMacroAssemblerRiscv64Test() : pool_(), allocator_(&pool_), assembler_(&allocator_) { }
+
+ protected:
+ InstructionSet GetIsa() override { return InstructionSet::kRiscv64; }
+
+ void DriverStr(const std::string& assembly_text, const std::string& test_name) {
+ assembler_.FinalizeCode();
+ size_t cs = assembler_.CodeSize();
+ std::vector<uint8_t> data(cs);
+ MemoryRegion code(&data[0], data.size());
+ assembler_.FinalizeInstructions(code);
+ Driver(data, assembly_text, test_name);
+ }
+
+ static Riscv64ManagedRegister AsManaged(XRegister reg) {
+ return Riscv64ManagedRegister::FromXRegister(reg);
+ }
+
+ static Riscv64ManagedRegister AsManaged(FRegister reg) {
+ return Riscv64ManagedRegister::FromFRegister(reg);
+ }
+
+ static const size_t kWordSize = 4u;
+ static const size_t kDoubleWordSize = 8u;
+
+ MallocArenaPool pool_;
+ ArenaAllocator allocator_;
+ Riscv64JNIMacroAssembler assembler_;
+};
+
+#define __ assembler_.
+
+TEST_F(JniMacroAssemblerRiscv64Test, StackFrame) {
+ std::string expected;
+
+ std::unique_ptr<JniCallingConvention> jni_conv = JniCallingConvention::Create(
+ &allocator_,
+ /*is_static=*/ false,
+ /*is_synchronized=*/ false,
+ /*is_fast_native=*/ false,
+ /*is_critical_native=*/ false,
+ /*shorty=*/ "V",
+ InstructionSet::kRiscv64);
+ size_t frame_size = jni_conv->FrameSize();
+ ManagedRegister method_reg = AsManaged(A0);
+ ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
+
+ __ BuildFrame(frame_size, method_reg, callee_save_regs);
+ expected += "addi sp, sp, -208\n"
+ "sd ra, 200(sp)\n"
+ "sd s11, 192(sp)\n"
+ "sd s10, 184(sp)\n"
+ "sd s9, 176(sp)\n"
+ "sd s8, 168(sp)\n"
+ "sd s7, 160(sp)\n"
+ "sd s6, 152(sp)\n"
+ "sd s5, 144(sp)\n"
+ "sd s4, 136(sp)\n"
+ "sd s3, 128(sp)\n"
+ "sd s2, 120(sp)\n"
+ "sd s0, 112(sp)\n"
+ "fsd fs11, 104(sp)\n"
+ "fsd fs10, 96(sp)\n"
+ "fsd fs9, 88(sp)\n"
+ "fsd fs8, 80(sp)\n"
+ "fsd fs7, 72(sp)\n"
+ "fsd fs6, 64(sp)\n"
+ "fsd fs5, 56(sp)\n"
+ "fsd fs4, 48(sp)\n"
+ "fsd fs3, 40(sp)\n"
+ "fsd fs2, 32(sp)\n"
+ "fsd fs1, 24(sp)\n"
+ "fsd fs0, 16(sp)\n"
+ "sd a0, 0(sp)\n";
+
+ __ RemoveFrame(frame_size, callee_save_regs, /*may_suspend=*/ false);
+ expected += "fld fs0, 16(sp)\n"
+ "fld fs1, 24(sp)\n"
+ "fld fs2, 32(sp)\n"
+ "fld fs3, 40(sp)\n"
+ "fld fs4, 48(sp)\n"
+ "fld fs5, 56(sp)\n"
+ "fld fs6, 64(sp)\n"
+ "fld fs7, 72(sp)\n"
+ "fld fs8, 80(sp)\n"
+ "fld fs9, 88(sp)\n"
+ "fld fs10, 96(sp)\n"
+ "fld fs11, 104(sp)\n"
+ "ld s0, 112(sp)\n"
+ "ld s2, 120(sp)\n"
+ "ld s3, 128(sp)\n"
+ "ld s4, 136(sp)\n"
+ "ld s5, 144(sp)\n"
+ "ld s6, 152(sp)\n"
+ "ld s7, 160(sp)\n"
+ "ld s8, 168(sp)\n"
+ "ld s9, 176(sp)\n"
+ "ld s10, 184(sp)\n"
+ "ld s11, 192(sp)\n"
+ "ld ra, 200(sp)\n"
+ "addi sp, sp, 208\n"
+ "ret\n";
+
+ DriverStr(expected, "StackFrame");
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, ChangeFrameSize) {
+ std::string expected;
+
+ __ IncreaseFrameSize(128);
+ expected += "addi sp, sp, -128\n";
+ __ DecreaseFrameSize(128);
+ expected += "addi sp, sp, 128\n";
+
+ __ IncreaseFrameSize(0); // No-op
+ __ DecreaseFrameSize(0); // No-op
+
+ __ IncreaseFrameSize(2048);
+ expected += "addi sp, sp, -2048\n";
+ __ DecreaseFrameSize(2048);
+ expected += "addi t6, sp, 2047\n"
+ "addi sp, t6, 1\n";
+
+ __ IncreaseFrameSize(4096);
+ expected += "addi t6, sp, -2048\n"
+ "addi sp, t6, -2048\n";
+ __ DecreaseFrameSize(4096);
+ expected += "lui t6, 1\n"
+ "add sp, sp, t6\n";
+
+ __ IncreaseFrameSize(6 * KB);
+ expected += "addi t6, zero, -3\n"
+ "slli t6, t6, 11\n"
+ "add sp, sp, t6\n";
+ __ DecreaseFrameSize(6 * KB);
+ expected += "addi t6, zero, 3\n"
+ "slli t6, t6, 11\n"
+ "add sp, sp, t6\n";
+
+ __ IncreaseFrameSize(6 * KB + 16);
+ expected += "lui t6, 0xffffe\n"
+ "addiw t6, t6, 2048-16\n"
+ "add sp, sp, t6\n";
+ __ DecreaseFrameSize(6 * KB + 16);
+ expected += "lui t6, 2\n"
+ "addiw t6, t6, 16-2048\n"
+ "add sp, sp, t6\n";
+
+ DriverStr(expected, "ChangeFrameSize");
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, Store) {
+ std::string expected;
+
+ __ Store(FrameOffset(0), AsManaged(A0), kWordSize);
+ expected += "sw a0, 0(sp)\n";
+ __ Store(FrameOffset(2048), AsManaged(S0), kDoubleWordSize);
+ expected += "addi t6, sp, 0x7f8\n"
+ "sd s0, 8(t6)\n";
+
+ __ Store(AsManaged(A1), MemberOffset(256), AsManaged(S2), kDoubleWordSize);
+ expected += "sd s2, 256(a1)\n";
+ __ Store(AsManaged(S3), MemberOffset(4 * KB), AsManaged(T1), kWordSize);
+ expected += "lui t6, 1\n"
+ "add t6, t6, s3\n"
+ "sw t1, 0(t6)\n";
+
+ __ Store(AsManaged(A3), MemberOffset(384), AsManaged(FA5), kDoubleWordSize);
+ expected += "fsd fa5, 384(a3)\n";
+ __ Store(AsManaged(S4), MemberOffset(4 * KB + 16), AsManaged(FT10), kWordSize);
+ expected += "lui t6, 1\n"
+ "add t6, t6, s4\n"
+ "fsw ft10, 16(t6)\n";
+
+ __ StoreRawPtr(FrameOffset(128), AsManaged(A7));
+ expected += "sd a7, 128(sp)\n";
+ __ StoreRawPtr(FrameOffset(6 * KB), AsManaged(S11));
+ expected += "lui t6, 2\n"
+ "add t6, t6, sp\n"
+ "sd s11, -2048(t6)\n";
+
+ __ StoreStackPointerToThread(ThreadOffset64(512), /*tag_sp=*/ false);
+ expected += "sd sp, 512(s1)\n";
+ __ StoreStackPointerToThread(ThreadOffset64(3 * KB), /*tag_sp=*/ true);
+ expected += "ori t5, sp, 0x2\n"
+ "addi t6, s1, 0x7f8\n"
+ "sd t5, 0x408(t6)\n";
+
+ DriverStr(expected, "Store");
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, Load) {
+ std::string expected;
+
+ __ Load(AsManaged(A0), FrameOffset(0), kWordSize);
+ expected += "lw a0, 0(sp)\n";
+ __ Load(AsManaged(S0), FrameOffset(2048), kDoubleWordSize);
+ expected += "addi t6, sp, 0x7f8\n"
+ "ld s0, 8(t6)\n";
+
+ __ Load(AsManaged(S2), AsManaged(A1), MemberOffset(256), kDoubleWordSize);
+ expected += "ld s2, 256(a1)\n";
+ __ Load(AsManaged(T1), AsManaged(S3), MemberOffset(4 * KB), kWordSize);
+ expected += "lui t6, 1\n"
+ "add t6, t6, s3\n"
+ "lw t1, 0(t6)\n";
+
+ __ Load(AsManaged(FA5), AsManaged(A3), MemberOffset(384), kDoubleWordSize);
+ expected += "fld fa5, 384(a3)\n";
+ __ Load(AsManaged(FT10), AsManaged(S4), MemberOffset(4 * KB + 16), kWordSize);
+ expected += "lui t6, 1\n"
+ "add t6, t6, s4\n"
+ "flw ft10, 16(t6)\n";
+
+ __ LoadRawPtrFromThread(AsManaged(A7), ThreadOffset64(512));
+ expected += "ld a7, 512(s1)\n";
+ __ LoadRawPtrFromThread(AsManaged(S11), ThreadOffset64(3 * KB));
+ expected += "addi t6, s1, 0x7f8\n"
+ "ld s11, 0x408(t6)\n";
+
+ DriverStr(expected, "Load");
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, MoveArguments) {
+ // TODO(riscv64): Test `MoveArguments()`.
+ // We do not add the test yet while there is an outstanding FIXME in `MoveArguments()`.
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, Move) {
+ std::string expected;
+
+ __ Move(AsManaged(A0), AsManaged(A1), kWordSize);
+ expected += "mv a0, a1\n";
+ __ Move(AsManaged(A2), AsManaged(A3), kDoubleWordSize);
+ expected += "mv a2, a3\n";
+
+ __ Move(AsManaged(A4), AsManaged(A4), kWordSize); // No-op.
+ __ Move(AsManaged(A5), AsManaged(A5), kDoubleWordSize); // No-op.
+
+ DriverStr(expected, "Move");
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, GetCurrentThread) {
+ std::string expected;
+
+ __ GetCurrentThread(AsManaged(A0));
+ expected += "mv a0, s1\n";
+
+ __ GetCurrentThread(FrameOffset(256));
+ expected += "sd s1, 256(sp)\n";
+ __ GetCurrentThread(FrameOffset(3 * KB));
+ expected += "addi t6, sp, 0x7f8\n"
+ "sd s1, 0x408(t6)\n";
+
+ DriverStr(expected, "GetCurrentThread");
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, JumpCodePointer) {
+ std::string expected;
+
+ __ Jump(AsManaged(A0), Offset(24));
+ expected += "ld t6, 24(a0)\n"
+ "jr t6\n";
+
+ __ Jump(AsManaged(S2), Offset(2048));
+ expected += "addi t6, s2, 0x7f8\n"
+ "ld t6, 8(t6)\n"
+ "jr t6\n";
+
+ DriverStr(expected, "JumpCodePointer");
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, Call) {
+ std::string expected;
+
+ __ Call(AsManaged(A0), Offset(32));
+ expected += "ld ra, 32(a0)\n"
+ "jalr ra\n";
+
+ __ Call(AsManaged(S2), Offset(2048));
+ expected += "addi t6, s2, 0x7f8\n"
+ "ld ra, 8(t6)\n"
+ "jalr ra\n";
+
+ __ CallFromThread(ThreadOffset64(256));
+ expected += "ld ra, 256(s1)\n"
+ "jalr ra\n";
+
+ __ CallFromThread(ThreadOffset64(3 * KB));
+ expected += "addi t6, s1, 0x7f8\n"
+ "ld ra, 0x408(t6)\n"
+ "jalr ra\n";
+
+ DriverStr(expected, "Call");
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, Exception) {
+ std::string expected;
+
+ ThreadOffset64 exception_offset = Thread::ExceptionOffset<kArm64PointerSize>();
+ ThreadOffset64 deliver_offset = QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException);
+
+ std::unique_ptr<JNIMacroLabel> slow_path = __ CreateLabel();
+
+ __ ExceptionPoll(slow_path.get());
+ expected += "ld t6, " + std::to_string(exception_offset.Int32Value()) + "(s1)\n"
+ "bnez t6, 1f\n";
+
+ __ RemoveFrame(/*frame_size=*/ 0u,
+ /*callee_save_regs=*/ ArrayRef<const ManagedRegister>(),
+ /*may_suspend=*/ false);
+ expected += "ret\n";
+
+ __ Bind(slow_path.get());
+ expected += "1:\n";
+
+ __ DeliverPendingException();
+ expected += "ld a0, " + std::to_string(exception_offset.Int32Value()) + "(s1)\n"
+ "ld ra, " + std::to_string(deliver_offset.Int32Value()) + "(s1)\n"
+ "jalr ra\n"
+ "ebreak\n";
+
+ DriverStr(expected, "Exception");
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, JumpLabel) {
+ std::string expected;
+
+ std::unique_ptr<JNIMacroLabel> target = __ CreateLabel();
+ std::unique_ptr<JNIMacroLabel> back = __ CreateLabel();
+
+ __ Jump(target.get());
+ expected += "j 2f\n";
+
+ __ Bind(back.get());
+ expected += "1:\n";
+
+ __ Move(AsManaged(A0), AsManaged(A1), static_cast<size_t>(kRiscv64PointerSize));
+ expected += "mv a0, a1\n";
+
+ __ Bind(target.get());
+ expected += "2:\n";
+
+ __ Jump(back.get());
+ expected += "j 1b\n";
+
+ DriverStr(expected, "JumpLabel");
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, ReadBarrier) {
+ // TODO(riscv64): Test `TestGcMarking()` and `TestMarkBit()`.
+}
+
+TEST_F(JniMacroAssemblerRiscv64Test, TestByteAndJumpIfNotZero) {
+ // TODO(riscv64): Test `TestByteAndJumpIfNotZero()`.
+}
+
+#undef __
+
+} // namespace riscv64
+} // namespace art