diff options
author | 2023-06-19 15:36:36 +0000 | |
---|---|---|
committer | 2023-06-22 11:40:30 +0000 | |
commit | 32b6a6284af63a9049757f83b9232dc5fb0a6cf0 (patch) | |
tree | 18d736732b8e2ba1a1a03316c871023efb14e233 | |
parent | e3059d245f7558fa488d5ec27ce4408d428e6763 (diff) |
riscv64: Clean up `Riscv64JNIMacroAssembler::MoveArguments()`.
Change `Riscv64JNIMacroAssembler::MoveArguments()` to delay
creating `jobject`s and remove some code that is effectively
dead due to the calling convention. Add tests that exercise
all the code paths in this function.
Add some missing `HIDDEN` annotations on `namespace art`.
Remove FIXME for NaN-boxing FP args passed in GPRs. This was
based on a misreading of the C/C++ calling convention.
Test: m test-art-host-gtest
Test: run-gtests.sh
Bug: 283082089
Change-Id: Ifaba892a7d66707df03422b59ca171aebc91edf0
-rw-r--r-- | compiler/jni/jni_compiler_test.cc | 3 | ||||
-rw-r--r-- | compiler/utils/riscv64/assembler_riscv64.cc | 2 | ||||
-rw-r--r-- | compiler/utils/riscv64/assembler_riscv64.h | 2 | ||||
-rw-r--r-- | compiler/utils/riscv64/assembler_riscv64_test.cc | 2 | ||||
-rw-r--r-- | compiler/utils/riscv64/jni_macro_assembler_riscv64.cc | 50 | ||||
-rw-r--r-- | compiler/utils/riscv64/jni_macro_assembler_riscv64.h | 2 | ||||
-rw-r--r-- | compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc | 279 | ||||
-rw-r--r-- | compiler/utils/riscv64/managed_register_riscv64.cc | 2 | ||||
-rw-r--r-- | compiler/utils/riscv64/managed_register_riscv64.h | 2 | ||||
-rw-r--r-- | compiler/utils/riscv64/managed_register_riscv64_test.cc | 2 |
10 files changed, 308 insertions, 38 deletions
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index ec68ff2f4d..5ebba7d497 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -2272,9 +2272,6 @@ void JniCompilerTest::StackArgsFloatsFirstImpl() { if (check_generic_jni_) { // FIXME(riscv64): Fix FP argument passing in GenericJNI. TEST_DISABLED_FOR_RISCV64(); - // TODO(riscv64): This test passes with compiled JNI stubs but the compiled code - // does not perform NaN-boxing of float args passed in GPRs. The test should be - // extended to check 64-bit values of these float args. } SetUpForTest(true, "stackArgsFloatsFirst", "(FFFFFFFFFFIIIIIIIIII)V", diff --git a/compiler/utils/riscv64/assembler_riscv64.cc b/compiler/utils/riscv64/assembler_riscv64.cc index a09a23ee6f..448e39fa98 100644 --- a/compiler/utils/riscv64/assembler_riscv64.cc +++ b/compiler/utils/riscv64/assembler_riscv64.cc @@ -21,7 +21,7 @@ #include "base/logging.h" #include "base/memory_region.h" -namespace art { +namespace art HIDDEN { namespace riscv64 { static_assert(static_cast<size_t>(kRiscv64PointerSize) == kRiscv64DoublewordSize, diff --git a/compiler/utils/riscv64/assembler_riscv64.h b/compiler/utils/riscv64/assembler_riscv64.h index e7a8701494..fc86d5808d 100644 --- a/compiler/utils/riscv64/assembler_riscv64.h +++ b/compiler/utils/riscv64/assembler_riscv64.h @@ -31,7 +31,7 @@ #include "utils/assembler.h" #include "utils/label.h" -namespace art { +namespace art HIDDEN { namespace riscv64 { enum class FPRoundingMode : uint32_t { diff --git a/compiler/utils/riscv64/assembler_riscv64_test.cc b/compiler/utils/riscv64/assembler_riscv64_test.cc index fba82068ac..15026093b8 100644 --- a/compiler/utils/riscv64/assembler_riscv64_test.cc +++ b/compiler/utils/riscv64/assembler_riscv64_test.cc @@ -25,7 +25,7 @@ #define __ GetAssembler()-> -namespace art { +namespace art HIDDEN { namespace riscv64 { struct RISCV64CpuRegisterCompare { diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc b/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc index 59c6f11a10..a7bbee5111 100644 --- a/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc +++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc @@ -25,7 +25,7 @@ #include "offsets.h" #include "thread.h" -namespace art { +namespace art HIDDEN { namespace riscv64 { static constexpr size_t kSpillSize = 8; // Both GPRs and FPRs @@ -237,15 +237,6 @@ void Riscv64JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests, DCHECK_EQ(arg_count, srcs.size()); DCHECK_EQ(arg_count, refs.size()); - // Convert reference registers to `jobject` values. - for (size_t i = 0; i != arg_count; ++i) { - if (refs[i] != kInvalidReferenceOffset && srcs[i].IsRegister()) { - // Note: We can clobber `srcs[i]` here as the register cannot hold more than one argument. - ManagedRegister src_i_reg = srcs[i].GetRegister(); - CreateJObject(src_i_reg, refs[i], src_i_reg, /*null_allowed=*/ i != 0u); - } - } - auto get_mask = [](ManagedRegister reg) -> uint64_t { Riscv64ManagedRegister riscv64_reg = reg.AsRiscv64(); if (riscv64_reg.IsXRegister()) { @@ -261,7 +252,7 @@ void Riscv64JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests, }; // Collect registers to move while storing/copying args to stack slots. - // Convert copied references to `jobject`. + // Convert processed references to `jobject`. uint64_t src_regs = 0u; uint64_t dest_regs = 0u; for (size_t i = 0; i != arg_count; ++i) { @@ -276,23 +267,29 @@ void Riscv64JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests, } if (dest.IsRegister()) { if (src.IsRegister() && src.GetRegister().Equals(dest.GetRegister())) { - // Nothing to do. + // No move is necessary but we may need to convert a reference to a `jobject`. + if (ref != kInvalidReferenceOffset) { + CreateJObject(dest.GetRegister(), ref, src.GetRegister(), /*null_allowed=*/ i != 0u); + } } else { if (src.IsRegister()) { src_regs |= get_mask(src.GetRegister()); } dest_regs |= get_mask(dest.GetRegister()); } - } else if (src.IsRegister()) { - Store(dest.GetFrameOffset(), src.GetRegister(), dest.GetSize()); } else { // Note: We use `TMP2` here because `TMP` can be used by `Store()`. - Riscv64ManagedRegister tmp2 = Riscv64ManagedRegister::FromXRegister(TMP2); - Load(tmp2, src.GetFrameOffset(), src.GetSize()); + Riscv64ManagedRegister reg = src.IsRegister() + ? src.GetRegister().AsRiscv64() + : Riscv64ManagedRegister::FromXRegister(TMP2); + if (!src.IsRegister()) { + Load(reg, src.GetFrameOffset(), src.GetSize()); + } if (ref != kInvalidReferenceOffset) { - CreateJObject(tmp2, ref, tmp2, /*null_allowed=*/ i != 0u); + DCHECK_NE(i, 0u); + CreateJObject(reg, ref, reg, /*null_allowed=*/ true); } - Store(dest.GetFrameOffset(), tmp2, dest.GetSize()); + Store(dest.GetFrameOffset(), reg, dest.GetSize()); } } @@ -314,18 +311,19 @@ void Riscv64JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests, if ((dest_reg_mask & src_regs) != 0u) { continue; // Cannot clobber this register yet. } - // FIXME(riscv64): FP args can be passed in GPRs if all argument FPRs have been used. - // In that case, a `float` needs to be NaN-boxed. However, we do not have sufficient - // information here to determine whether we're loading a `float` or a narrow integral arg. - // We shall need to change the macro assembler interface to pass this information. if (src.IsRegister()) { - Move(dest.GetRegister(), src.GetRegister(), dest.GetSize()); + if (ref != kInvalidReferenceOffset) { + DCHECK_NE(i, 0u); // The `this` arg remains in the same register (handled above). + CreateJObject(dest.GetRegister(), ref, src.GetRegister(), /*null_allowed=*/ true); + } else { + Move(dest.GetRegister(), src.GetRegister(), dest.GetSize()); + } src_regs &= ~get_mask(src.GetRegister()); // Allow clobbering source register. } else { Load(dest.GetRegister(), src.GetFrameOffset(), dest.GetSize()); - if (ref != kInvalidReferenceOffset) { - CreateJObject(dest.GetRegister(), ref, dest.GetRegister(), /*null_allowed=*/ i != 0u); - } + // No `jobject` conversion needed. There are enough arg registers in managed ABI + // to hold all references that yield a register arg `jobject` in native ABI. + DCHECK_EQ(ref, kInvalidReferenceOffset); } dest_regs &= ~get_mask(dest.GetRegister()); // Destination register was filled. } diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64.h b/compiler/utils/riscv64/jni_macro_assembler_riscv64.h index daa2a58978..93c0001e6f 100644 --- a/compiler/utils/riscv64/jni_macro_assembler_riscv64.h +++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64.h @@ -31,7 +31,7 @@ #include "utils/assembler.h" #include "utils/jni_macro_assembler.h" -namespace art { +namespace art HIDDEN { namespace riscv64 { class Riscv64JNIMacroAssembler : public JNIMacroAssemblerFwd<Riscv64Assembler, PointerSize::k64> { diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc b/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc index f4e7d397a4..aab26164f3 100644 --- a/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc +++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc @@ -291,8 +291,283 @@ TEST_F(JniMacroAssemblerRiscv64Test, CreateJObject) { } TEST_F(JniMacroAssemblerRiscv64Test, MoveArguments) { - // TODO(riscv64): Test `MoveArguments()`. - // We do not add the test yet while there is an outstanding FIXME in `MoveArguments()`. + std::string expected; + + static constexpr FrameOffset kInvalidReferenceOffset = + JNIMacroAssembler<kArmPointerSize>::kInvalidReferenceOffset; + static constexpr size_t kNativePointerSize = static_cast<size_t>(kRiscv64PointerSize); + + // Normal or @FastNative static with parameters "LIJIJILJI". + // Note: This shall not spill references to the stack. The JNI compiler spills + // references in an separate initial pass before moving arguments and creating `jobject`s. + ArgumentLocation move_dests1[] = { + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A1), kNativePointerSize), // `jclass` + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A2), kNativePointerSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A3), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A4), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A5), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A6), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A7), kVRegSize), + ArgumentLocation(FrameOffset(0), kNativePointerSize), + ArgumentLocation(FrameOffset(8), 2 * kVRegSize), + ArgumentLocation(FrameOffset(16), kVRegSize), + }; + ArgumentLocation move_srcs1[] = { + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A0), kNativePointerSize), // `jclass` + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A1), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A2), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A3), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A4), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A5), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A6), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A7), kVRegSize), + ArgumentLocation(FrameOffset(76), 2 * kVRegSize), + ArgumentLocation(FrameOffset(84), kVRegSize), + }; + FrameOffset move_refs1[] { + FrameOffset(kInvalidReferenceOffset), + FrameOffset(40), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(72), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + }; + __ MoveArguments(ArrayRef<ArgumentLocation>(move_dests1), + ArrayRef<ArgumentLocation>(move_srcs1), + ArrayRef<FrameOffset>(move_refs1)); + expected += "beqz a7, 1f\n" + "addi a7, sp, 72\n" + "1:\n" + "sd a7, 0(sp)\n" + "ld t5, 76(sp)\n" + "sd t5, 8(sp)\n" + "lw t5, 84(sp)\n" + "sw t5, 16(sp)\n" + "mv a7, a6\n" + "mv a6, a5\n" + "mv a5, a4\n" + "mv a4, a3\n" + "mv a3, a2\n" + "li a2, 0\n" + "beqz a1, 2f\n" + "add a2, sp, 40\n" + "2:\n" + "mv a1, a0\n"; + + // Normal or @FastNative with parameters "LLIJIJIJLI" (first is `this`). + // Note: This shall not spill references to the stack. The JNI compiler spills + // references in an separate initial pass before moving arguments and creating `jobject`s. + ArgumentLocation move_dests2[] = { + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A1), kNativePointerSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A2), kNativePointerSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A3), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A4), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A5), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A6), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A7), kVRegSize), + ArgumentLocation(FrameOffset(0), 2 * kVRegSize), + ArgumentLocation(FrameOffset(8), kNativePointerSize), + ArgumentLocation(FrameOffset(16), kVRegSize), + }; + ArgumentLocation move_srcs2[] = { + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A1), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A2), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A3), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A4), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A5), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A6), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A7), kVRegSize), + ArgumentLocation(FrameOffset(76), 2 * kVRegSize), + ArgumentLocation(FrameOffset(84), kVRegSize), + ArgumentLocation(FrameOffset(88), kVRegSize), + }; + FrameOffset move_refs2[] { + FrameOffset(40), + FrameOffset(44), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(84), + FrameOffset(kInvalidReferenceOffset), + }; + __ MoveArguments(ArrayRef<ArgumentLocation>(move_dests2), + ArrayRef<ArgumentLocation>(move_srcs2), + ArrayRef<FrameOffset>(move_refs2)); + // Args in A1-A7 do not move but references are converted to `jobject`. + expected += "addi a1, sp, 40\n" + "beqz a2, 1f\n" + "addi a2, sp, 44\n" + "1:\n" + "ld t5, 76(sp)\n" + "sd t5, 0(sp)\n" + "lw t5, 84(sp)\n" + "beqz t5, 2f\n" + "addi t5, sp, 84\n" + "2:\n" + "sd t5, 8(sp)\n" + "lw t5, 88(sp)\n" + "sw t5, 16(sp)\n"; + + // Normal or @FastNative static with parameters "FDFDFDFDFDIJIJIJL". + ArgumentLocation move_dests3[] = { + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A1), kNativePointerSize), // `jclass` + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA0), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA1), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA2), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA3), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA4), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA5), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA6), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA7), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A2), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A3), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A4), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A5), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A6), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A7), 2 * kVRegSize), + ArgumentLocation(FrameOffset(0), kVRegSize), + ArgumentLocation(FrameOffset(8), 2 * kVRegSize), + ArgumentLocation(FrameOffset(16), kNativePointerSize), + }; + ArgumentLocation move_srcs3[] = { + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A0), kNativePointerSize), // `jclass` + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA0), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA1), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA2), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA3), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA4), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA5), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA6), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA7), 2 * kVRegSize), + ArgumentLocation(FrameOffset(88), kVRegSize), + ArgumentLocation(FrameOffset(92), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A1), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A2), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A3), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A4), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A5), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A6), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A7), kVRegSize), + }; + FrameOffset move_refs3[] { + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(88), + }; + __ MoveArguments(ArrayRef<ArgumentLocation>(move_dests3), + ArrayRef<ArgumentLocation>(move_srcs3), + ArrayRef<FrameOffset>(move_refs3)); + // FP args in FA0-FA7 do not move. + expected += "sw a5, 0(sp)\n" + "sd a6, 8(sp)\n" + "beqz a7, 1f\n" + "addi a7, sp, 88\n" + "1:\n" + "sd a7, 16(sp)\n" + "mv a5, a2\n" + "mv a6, a3\n" + "mv a7, a4\n" + "lw a2, 88(sp)\n" + "ld a3, 92(sp)\n" + "mv a4, a1\n" + "mv a1, a0\n"; + + // @CriticalNative with parameters "DFDFDFDFIDJIJFDIIJ". + ArgumentLocation move_dests4[] = { + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA0), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA1), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA2), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA3), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA4), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA5), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA6), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA7), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A0), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A1), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A2), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A3), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A4), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A5), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A6), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A7), kVRegSize), + ArgumentLocation(FrameOffset(0), kVRegSize), + ArgumentLocation(FrameOffset(8), 2 * kVRegSize), + }; + ArgumentLocation move_srcs4[] = { + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA0), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA1), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA2), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA3), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA4), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA5), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA6), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromFRegister(FA7), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A1), kVRegSize), + ArgumentLocation(FrameOffset(92), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A2), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A3), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A4), 2 * kVRegSize), + ArgumentLocation(FrameOffset(112), kVRegSize), + ArgumentLocation(FrameOffset(116), 2 * kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A5), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A6), kVRegSize), + ArgumentLocation(Riscv64ManagedRegister::FromXRegister(A7), 2 * kVRegSize), + }; + FrameOffset move_refs4[] { + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + }; + __ MoveArguments(ArrayRef<ArgumentLocation>(move_dests4), + ArrayRef<ArgumentLocation>(move_srcs4), + ArrayRef<FrameOffset>(move_refs4)); + // FP args in FA0-FA7 and integral args in A2-A4 do not move. + expected += "sw a6, 0(sp)\n" + "sd a7, 8(sp)\n" + "mv a0, a1\n" + "ld a1, 92(sp)\n" + "ld a6, 116(sp)\n" + "mv a7, a5\n" + "lw a5, 112(sp)\n"; + + DriverStr(expected, "MoveArguments"); } TEST_F(JniMacroAssemblerRiscv64Test, Move) { diff --git a/compiler/utils/riscv64/managed_register_riscv64.cc b/compiler/utils/riscv64/managed_register_riscv64.cc index 560019ae09..99bd4be784 100644 --- a/compiler/utils/riscv64/managed_register_riscv64.cc +++ b/compiler/utils/riscv64/managed_register_riscv64.cc @@ -18,7 +18,7 @@ #include "base/globals.h" -namespace art { +namespace art HIDDEN { namespace riscv64 { bool Riscv64ManagedRegister::Overlaps(const Riscv64ManagedRegister& other) const { diff --git a/compiler/utils/riscv64/managed_register_riscv64.h b/compiler/utils/riscv64/managed_register_riscv64.h index 8e02a9dcc8..622d766945 100644 --- a/compiler/utils/riscv64/managed_register_riscv64.h +++ b/compiler/utils/riscv64/managed_register_riscv64.h @@ -24,7 +24,7 @@ #include "base/macros.h" #include "utils/managed_register.h" -namespace art { +namespace art HIDDEN { namespace riscv64 { const int kNumberOfXRegIds = kNumberOfXRegisters; diff --git a/compiler/utils/riscv64/managed_register_riscv64_test.cc b/compiler/utils/riscv64/managed_register_riscv64_test.cc index c6ad2dc38a..d7012a796a 100644 --- a/compiler/utils/riscv64/managed_register_riscv64_test.cc +++ b/compiler/utils/riscv64/managed_register_riscv64_test.cc @@ -19,7 +19,7 @@ #include "base/globals.h" #include "gtest/gtest.h" -namespace art { +namespace art HIDDEN { namespace riscv64 { TEST(Riscv64ManagedRegister, NoRegister) { |