riscv64: Add JNI calling convention.

Test: m test-art-host-gtest
Bug: 283082089
Change-Id: Ie088ad01f6170ecea9c96c10199cc7efd722210c
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 00e1bb9..de8ab1f 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -131,6 +131,7 @@
         },
         riscv64: {
             srcs: [
+                "jni/quick/riscv64/calling_convention_riscv64.cc",
                 "utils/riscv64/assembler_riscv64.cc",
                 "utils/riscv64/jni_macro_assembler_riscv64.cc",
                 "utils/riscv64/managed_register_riscv64.cc",
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index e716502..cd6aac5 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -323,7 +323,7 @@
       static_assert(kCalleeSaveRegisters[lr_index].Equals(
                         Arm64ManagedRegister::FromXRegister(LR)));
       return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
-          /*pos*/ lr_index, /*length=*/ 1u);
+          /*pos=*/ lr_index, /*length=*/ 1u);
     }
   } else {
     return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 2b9da6b..9f26e01 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -29,6 +29,10 @@
 #include "jni/quick/arm64/calling_convention_arm64.h"
 #endif
 
+#ifdef ART_ENABLE_CODEGEN_riscv64
+#include "jni/quick/riscv64/calling_convention_riscv64.h"
+#endif
+
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "jni/quick/x86/calling_convention_x86.h"
 #endif
@@ -61,6 +65,12 @@
           new (allocator) arm64::Arm64ManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
+#ifdef ART_ENABLE_CODEGEN_riscv64
+    case InstructionSet::kRiscv64:
+      return std::unique_ptr<ManagedRuntimeCallingConvention>(
+          new (allocator) riscv64::Riscv64ManagedRuntimeCallingConvention(
+              is_static, is_synchronized, shorty));
+#endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case InstructionSet::kX86:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
diff --git a/compiler/jni/quick/riscv64/calling_convention_riscv64.cc b/compiler/jni/quick/riscv64/calling_convention_riscv64.cc
new file mode 100644
index 0000000..31718a1
--- /dev/null
+++ b/compiler/jni/quick/riscv64/calling_convention_riscv64.cc
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_riscv64.h"
+
+#include <android-base/logging.h>
+
+#include "arch/instruction_set.h"
+#include "arch/riscv64/jni_frame_riscv64.h"
+#include "utils/riscv64/managed_register_riscv64.h"
+
+namespace art HIDDEN {
+namespace riscv64 {
+
+static constexpr ManagedRegister kXArgumentRegisters[] = {
+    Riscv64ManagedRegister::FromXRegister(A0),
+    Riscv64ManagedRegister::FromXRegister(A1),
+    Riscv64ManagedRegister::FromXRegister(A2),
+    Riscv64ManagedRegister::FromXRegister(A3),
+    Riscv64ManagedRegister::FromXRegister(A4),
+    Riscv64ManagedRegister::FromXRegister(A5),
+    Riscv64ManagedRegister::FromXRegister(A6),
+    Riscv64ManagedRegister::FromXRegister(A7),
+};
+static_assert(kMaxIntLikeArgumentRegisters == arraysize(kXArgumentRegisters));
+
+static const FRegister kFArgumentRegisters[] = {
+  FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7
+};
+static_assert(kMaxFloatOrDoubleArgumentRegisters == arraysize(kFArgumentRegisters));
+
+static constexpr ManagedRegister kCalleeSaveRegisters[] = {
+    // Core registers.
+    Riscv64ManagedRegister::FromXRegister(S0),
+    // ART thread register (TR = S1) is not saved on the stack.
+    Riscv64ManagedRegister::FromXRegister(S2),
+    Riscv64ManagedRegister::FromXRegister(S3),
+    Riscv64ManagedRegister::FromXRegister(S4),
+    Riscv64ManagedRegister::FromXRegister(S5),
+    Riscv64ManagedRegister::FromXRegister(S6),
+    Riscv64ManagedRegister::FromXRegister(S7),
+    Riscv64ManagedRegister::FromXRegister(S8),
+    Riscv64ManagedRegister::FromXRegister(S9),
+    Riscv64ManagedRegister::FromXRegister(S10),
+    Riscv64ManagedRegister::FromXRegister(S11),
+    Riscv64ManagedRegister::FromXRegister(RA),
+
+    // Hard float registers.
+    Riscv64ManagedRegister::FromFRegister(FS0),
+    Riscv64ManagedRegister::FromFRegister(FS1),
+    Riscv64ManagedRegister::FromFRegister(FS2),
+    Riscv64ManagedRegister::FromFRegister(FS3),
+    Riscv64ManagedRegister::FromFRegister(FS4),
+    Riscv64ManagedRegister::FromFRegister(FS5),
+    Riscv64ManagedRegister::FromFRegister(FS6),
+    Riscv64ManagedRegister::FromFRegister(FS7),
+    Riscv64ManagedRegister::FromFRegister(FS8),
+    Riscv64ManagedRegister::FromFRegister(FS9),
+    Riscv64ManagedRegister::FromFRegister(FS10),
+    Riscv64ManagedRegister::FromFRegister(FS11),
+};
+
+template <size_t size>
+static constexpr uint32_t CalculateCoreCalleeSpillMask(
+    const ManagedRegister (&callee_saves)[size]) {
+  uint32_t result = 0u;
+  for (auto&& r : callee_saves) {
+    if (r.AsRiscv64().IsXRegister()) {
+      result |= (1u << r.AsRiscv64().AsXRegister());
+    }
+  }
+  return result;
+}
+
+template <size_t size>
+static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) {
+  uint32_t result = 0u;
+  for (auto&& r : callee_saves) {
+    if (r.AsRiscv64().IsFRegister()) {
+      result |= (1u << r.AsRiscv64().AsFRegister());
+    }
+  }
+  return result;
+}
+
+static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
+static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters);
+
+static constexpr ManagedRegister kNativeCalleeSaveRegisters[] = {
+    // Core registers.
+    Riscv64ManagedRegister::FromXRegister(S0),
+    Riscv64ManagedRegister::FromXRegister(S1),
+    Riscv64ManagedRegister::FromXRegister(S2),
+    Riscv64ManagedRegister::FromXRegister(S3),
+    Riscv64ManagedRegister::FromXRegister(S4),
+    Riscv64ManagedRegister::FromXRegister(S5),
+    Riscv64ManagedRegister::FromXRegister(S6),
+    Riscv64ManagedRegister::FromXRegister(S7),
+    Riscv64ManagedRegister::FromXRegister(S8),
+    Riscv64ManagedRegister::FromXRegister(S9),
+    Riscv64ManagedRegister::FromXRegister(S10),
+    Riscv64ManagedRegister::FromXRegister(S11),
+    Riscv64ManagedRegister::FromXRegister(RA),
+
+    // Hard float registers.
+    Riscv64ManagedRegister::FromFRegister(FS0),
+    Riscv64ManagedRegister::FromFRegister(FS1),
+    Riscv64ManagedRegister::FromFRegister(FS2),
+    Riscv64ManagedRegister::FromFRegister(FS3),
+    Riscv64ManagedRegister::FromFRegister(FS4),
+    Riscv64ManagedRegister::FromFRegister(FS5),
+    Riscv64ManagedRegister::FromFRegister(FS6),
+    Riscv64ManagedRegister::FromFRegister(FS7),
+    Riscv64ManagedRegister::FromFRegister(FS8),
+    Riscv64ManagedRegister::FromFRegister(FS9),
+    Riscv64ManagedRegister::FromFRegister(FS10),
+    Riscv64ManagedRegister::FromFRegister(FS11),
+};
+
+static constexpr uint32_t kNativeCoreCalleeSpillMask =
+    CalculateCoreCalleeSpillMask(kNativeCalleeSaveRegisters);
+static constexpr uint32_t kNativeFpCalleeSpillMask =
+    CalculateFpCalleeSpillMask(kNativeCalleeSaveRegisters);
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+  if (shorty[0] == 'F' || shorty[0] == 'D') {
+    return Riscv64ManagedRegister::FromFRegister(FA0);
+  } else if (shorty[0] == 'V') {
+    return Riscv64ManagedRegister::NoRegister();
+  } else {
+    // All other return types use A0. Note that there is no managed type wide enough to use A1/FA1.
+    return Riscv64ManagedRegister::FromXRegister(A0);
+  }
+}
+
+// Managed runtime calling convention
+
+ManagedRegister Riscv64ManagedRuntimeCallingConvention::ReturnRegister() const {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Riscv64ManagedRuntimeCallingConvention::MethodRegister() {
+  return Riscv64ManagedRegister::FromXRegister(A0);
+}
+
+ManagedRegister Riscv64ManagedRuntimeCallingConvention::ArgumentRegisterForMethodExitHook() {
+  DCHECK(!Riscv64ManagedRegister::FromXRegister(A4).Overlaps(ReturnRegister().AsRiscv64()));
+  return Riscv64ManagedRegister::FromXRegister(A4);
+}
+
+bool Riscv64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+  // Note: The managed ABI does not pass FP args in general purpose registers.
+  // This differs from the native ABI which does that after using all FP arg registers.
+  if (IsCurrentParamAFloatOrDouble()) {
+    return itr_float_and_doubles_ < kMaxFloatOrDoubleArgumentRegisters;
+  } else {
+    size_t non_fp_arg_number = itr_args_ - itr_float_and_doubles_;
+    return /* method */ 1u + non_fp_arg_number < kMaxIntLikeArgumentRegisters;
+  }
+}
+
+bool Riscv64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+  return !IsCurrentParamInRegister();
+}
+
+ManagedRegister Riscv64ManagedRuntimeCallingConvention::CurrentParamRegister() {
+  DCHECK(IsCurrentParamInRegister());
+  if (IsCurrentParamAFloatOrDouble()) {
+    return Riscv64ManagedRegister::FromFRegister(kFArgumentRegisters[itr_float_and_doubles_]);
+  } else {
+    size_t non_fp_arg_number = itr_args_ - itr_float_and_doubles_;
+    return kXArgumentRegisters[/* method */ 1u + non_fp_arg_number];
+  }
+}
+
+FrameOffset Riscv64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  return FrameOffset(displacement_.Int32Value() +  // displacement
+                     kFramePointerSize +  // Method ref
+                     (itr_slots_ * sizeof(uint32_t)));  // offset into in args
+}
+
+// JNI calling convention
+
+Riscv64JniCallingConvention::Riscv64JniCallingConvention(bool is_static,
+                                                         bool is_synchronized,
+                                                         bool is_fast_native,
+                                                         bool is_critical_native,
+                                                         const char* shorty)
+    : JniCallingConvention(is_static,
+                           is_synchronized,
+                           is_fast_native,
+                           is_critical_native,
+                           shorty,
+                           kRiscv64PointerSize) {
+}
+
+ManagedRegister Riscv64JniCallingConvention::ReturnRegister() const {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Riscv64JniCallingConvention::IntReturnRegister() const {
+  return Riscv64ManagedRegister::FromXRegister(A0);
+}
+
+size_t Riscv64JniCallingConvention::FrameSize() const {
+  if (is_critical_native_) {
+    CHECK(!SpillsMethod());
+    CHECK(!HasLocalReferenceSegmentState());
+    return 0u;  // There is no managed frame for @CriticalNative.
+  }
+
+  // Method*, callee save area size, local reference segment state
+  DCHECK(SpillsMethod());
+  size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
+  size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
+  size_t total_size = method_ptr_size + callee_save_area_size;
+
+  DCHECK(HasLocalReferenceSegmentState());
+  // Cookie is saved in one of the spilled registers.
+
+  return RoundUp(total_size, kStackAlignment);
+}
+
+size_t Riscv64JniCallingConvention::OutFrameSize() const {
+  // Count param args, including JNIEnv* and jclass*.
+  size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs();
+  size_t num_fp_args = NumFloatOrDoubleArgs();
+  DCHECK_GE(all_args, num_fp_args);
+  size_t num_non_fp_args = all_args - num_fp_args;
+  // The size of outgoing arguments.
+  size_t size = GetNativeOutArgsSize(num_fp_args, num_non_fp_args);
+
+  // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS64.
+  static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u);
+  static_assert((kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask) == 0u);
+
+  // For @CriticalNative, we can make a tail call if there are no stack args.
+  // Otherwise, add space for return PC.
+  // Note: Result does not neeed to be zero- or sign-extended.
+  DCHECK(!RequiresSmallResultTypeExtension());
+  if (is_critical_native_ && size != 0u) {
+    size += kFramePointerSize;  // We need to spill RA with the args.
+  }
+  size_t out_args_size = RoundUp(size, kNativeStackAlignment);
+  if (UNLIKELY(IsCriticalNative())) {
+    DCHECK_EQ(out_args_size, GetCriticalNativeStubFrameSize(GetShorty(), NumArgs() + 1u));
+  }
+  return out_args_size;
+}
+
+ArrayRef<const ManagedRegister> Riscv64JniCallingConvention::CalleeSaveRegisters() const {
+  if (UNLIKELY(IsCriticalNative())) {
+    if (UseTailCall()) {
+      return ArrayRef<const ManagedRegister>();  // Do not spill anything.
+    } else {
+      // Spill RA with out args.
+      static_assert((kCoreCalleeSpillMask & (1 << RA)) != 0u);  // Contains RA.
+      constexpr size_t ra_index = POPCOUNT(kCoreCalleeSpillMask) - 1u;
+      static_assert(kCalleeSaveRegisters[ra_index].Equals(
+                        Riscv64ManagedRegister::FromXRegister(RA)));
+      return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
+          /*pos=*/ ra_index, /*length=*/ 1u);
+    }
+  } else {
+    return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
+  }
+}
+
+ArrayRef<const ManagedRegister> Riscv64JniCallingConvention::CalleeSaveScratchRegisters() const {
+  DCHECK(!IsCriticalNative());
+  // Use S3-S11 from managed callee saves. All these registers are also native callee saves.
+  constexpr size_t kStart = 2u;
+  constexpr size_t kLength = 9u;
+  static_assert(kCalleeSaveRegisters[kStart].Equals(Riscv64ManagedRegister::FromXRegister(S3)));
+  static_assert(kCalleeSaveRegisters[kStart + kLength - 1u].Equals(
+                    Riscv64ManagedRegister::FromXRegister(S11)));
+  static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u);
+  return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(kStart, kLength);
+}
+
+ArrayRef<const ManagedRegister> Riscv64JniCallingConvention::ArgumentScratchRegisters() const {
+  DCHECK(!IsCriticalNative());
+  // Exclude A0 if it's used as a return register.
+  static_assert(kXArgumentRegisters[0].Equals(Riscv64ManagedRegister::FromXRegister(A0)));
+  ArrayRef<const ManagedRegister> scratch_regs(kXArgumentRegisters);
+  Riscv64ManagedRegister return_reg = ReturnRegister().AsRiscv64();
+  auto return_reg_overlaps = [return_reg](ManagedRegister reg) {
+    return return_reg.Overlaps(reg.AsRiscv64());
+  };
+  if (return_reg_overlaps(scratch_regs[0])) {
+    scratch_regs = scratch_regs.SubArray(/*pos=*/ 1u);
+  }
+  DCHECK(std::none_of(scratch_regs.begin(), scratch_regs.end(), return_reg_overlaps));
+  return scratch_regs;
+}
+
+uint32_t Riscv64JniCallingConvention::CoreSpillMask() const {
+  return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
+}
+
+uint32_t Riscv64JniCallingConvention::FpSpillMask() const {
+  return is_critical_native_ ? 0u : kFpCalleeSpillMask;
+}
+
+bool Riscv64JniCallingConvention::IsCurrentParamInRegister() {
+  // FP args use FPRs, then GPRs and only then the stack.
+  if (itr_float_and_doubles_ < kMaxFloatOrDoubleArgumentRegisters) {
+    if (IsCurrentParamAFloatOrDouble()) {
+      return true;
+    } else {
+      size_t num_non_fp_args = itr_args_ - itr_float_and_doubles_;
+      return num_non_fp_args < kMaxIntLikeArgumentRegisters;
+    }
+  } else {
+    return (itr_args_ < kMaxFloatOrDoubleArgumentRegisters + kMaxIntLikeArgumentRegisters);
+  }
+}
+
+bool Riscv64JniCallingConvention::IsCurrentParamOnStack() {
+  return !IsCurrentParamInRegister();
+}
+
+ManagedRegister Riscv64JniCallingConvention::CurrentParamRegister() {
+  // FP args use FPRs, then GPRs and only then the stack.
+  CHECK(IsCurrentParamInRegister());
+  if (itr_float_and_doubles_ < kMaxFloatOrDoubleArgumentRegisters) {
+    if (IsCurrentParamAFloatOrDouble()) {
+      return Riscv64ManagedRegister::FromFRegister(kFArgumentRegisters[itr_float_and_doubles_]);
+    } else {
+      size_t num_non_fp_args = itr_args_ - itr_float_and_doubles_;
+      DCHECK_LT(num_non_fp_args, kMaxIntLikeArgumentRegisters);
+      return kXArgumentRegisters[num_non_fp_args];
+    }
+  } else {
+    // This argument is in a GPR, whether it's a FP arg or a non-FP arg.
+    DCHECK_LT(itr_args_, kMaxFloatOrDoubleArgumentRegisters + kMaxIntLikeArgumentRegisters);
+    return kXArgumentRegisters[itr_args_ - kMaxFloatOrDoubleArgumentRegisters];
+  }
+}
+
+FrameOffset Riscv64JniCallingConvention::CurrentParamStackOffset() {
+  CHECK(IsCurrentParamOnStack());
+  // Account for FP arguments passed through FA0-FA7.
+  // All other args are passed through A0-A7 (even FP args) and the stack.
+  size_t num_gpr_and_stack_args =
+      itr_args_ - std::min<size_t>(kMaxFloatOrDoubleArgumentRegisters, itr_float_and_doubles_);
+  size_t args_on_stack =
+      num_gpr_and_stack_args - std::min(kMaxIntLikeArgumentRegisters, num_gpr_and_stack_args);
+  size_t offset = displacement_.Int32Value() - OutFrameSize() + (args_on_stack * kFramePointerSize);
+  CHECK_LT(offset, OutFrameSize());
+  return FrameOffset(offset);
+}
+
+bool Riscv64JniCallingConvention::RequiresSmallResultTypeExtension() const {
+  // RISC-V native calling convention requires values to be returned the way that the first
+  // argument would be passed. Arguments are zero-/sign-extended to 32 bits based on their
+  // type, then sign-extended to 64 bits. This is the same as in the ART mamaged ABI.
+  // (Not applicable to FP args which are returned in `FA0`. A `float` is NaN-boxed.)
+  return false;
+}
+
+// T0 is neither managed callee-save, nor argument register. It is suitable for use as the
+// locking argument for synchronized methods and hidden argument for @CriticalNative methods.
+static void AssertT0IsNeitherCalleeSaveNorArgumentRegister() {
+  // TODO: Change to static_assert; std::none_of should be constexpr since C++20.
+  DCHECK(std::none_of(kCalleeSaveRegisters,
+                      kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
+                      [](ManagedRegister callee_save) constexpr {
+                        return callee_save.Equals(Riscv64ManagedRegister::FromXRegister(T0));
+                      }));
+  DCHECK(std::none_of(kXArgumentRegisters,
+                      kXArgumentRegisters + std::size(kXArgumentRegisters),
+                      [](ManagedRegister arg) { return arg.AsRiscv64().AsXRegister() == T0; }));
+}
+
+ManagedRegister Riscv64JniCallingConvention::LockingArgumentRegister() const {
+  DCHECK(!IsFastNative());
+  DCHECK(!IsCriticalNative());
+  DCHECK(IsSynchronized());
+  AssertT0IsNeitherCalleeSaveNorArgumentRegister();
+  return Riscv64ManagedRegister::FromXRegister(T0);
+}
+
+ManagedRegister Riscv64JniCallingConvention::HiddenArgumentRegister() const {
+  DCHECK(IsCriticalNative());
+  AssertT0IsNeitherCalleeSaveNorArgumentRegister();
+  return Riscv64ManagedRegister::FromXRegister(T0);
+}
+
+// Whether to use tail call (used only for @CriticalNative).
+bool Riscv64JniCallingConvention::UseTailCall() const {
+  CHECK(IsCriticalNative());
+  return OutFrameSize() == 0u;
+}
+
+}  // namespace riscv64
+}  // namespace art
diff --git a/compiler/jni/quick/riscv64/calling_convention_riscv64.h b/compiler/jni/quick/riscv64/calling_convention_riscv64.h
new file mode 100644
index 0000000..336d2bf
--- /dev/null
+++ b/compiler/jni/quick/riscv64/calling_convention_riscv64.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JNI_QUICK_RISCV64_CALLING_CONVENTION_RISCV64_H_
+#define ART_COMPILER_JNI_QUICK_RISCV64_CALLING_CONVENTION_RISCV64_H_
+
+#include "base/enums.h"
+#include "base/macros.h"
+#include "jni/quick/calling_convention.h"
+
+namespace art HIDDEN {
+namespace riscv64 {
+
+class Riscv64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
+ public:
+  Riscv64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : ManagedRuntimeCallingConvention(is_static,
+                                        is_synchronized,
+                                        shorty,
+                                        PointerSize::k64) {}
+  ~Riscv64ManagedRuntimeCallingConvention() override {}
+  // Calling convention
+  ManagedRegister ReturnRegister() const override;
+  // Managed runtime calling convention
+  ManagedRegister MethodRegister() override;
+  ManagedRegister ArgumentRegisterForMethodExitHook() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Riscv64ManagedRuntimeCallingConvention);
+};
+
+class Riscv64JniCallingConvention final : public JniCallingConvention {
+ public:
+  Riscv64JniCallingConvention(bool is_static,
+                              bool is_synchronized,
+                              bool is_fast_native,
+                              bool is_critical_native,
+                              const char* shorty);
+  ~Riscv64JniCallingConvention() override {}
+  // Calling convention
+  ManagedRegister ReturnRegister() const override;
+  ManagedRegister IntReturnRegister() const override;
+  // JNI calling convention
+  size_t FrameSize() const override;
+  size_t OutFrameSize() const override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ArrayRef<const ManagedRegister> CalleeSaveScratchRegisters() const override;
+  ArrayRef<const ManagedRegister> ArgumentScratchRegisters() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  bool RequiresSmallResultTypeExtension() const override;
+
+  // Locking argument register, used to pass the synchronization object for calls
+  // to `JniLockObject()` and `JniUnlockObject()`.
+  ManagedRegister LockingArgumentRegister() const override;
+
+  // Hidden argument register, used to pass the method pointer for @CriticalNative call.
+  ManagedRegister HiddenArgumentRegister() const override;
+
+  // Whether to use tail call (used only for @CriticalNative).
+  bool UseTailCall() const override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Riscv64JniCallingConvention);
+};
+
+}  // namespace riscv64
+}  // namespace art
+
+#endif  // ART_COMPILER_JNI_QUICK_RISCV64_CALLING_CONVENTION_RISCV64_H_
diff --git a/runtime/arch/riscv64/jni_entrypoints_riscv64.S b/runtime/arch/riscv64/jni_entrypoints_riscv64.S
index 63412b0..0a85540 100644
--- a/runtime/arch/riscv64/jni_entrypoints_riscv64.S
+++ b/runtime/arch/riscv64/jni_entrypoints_riscv64.S
@@ -123,6 +123,7 @@
 // JNI dlsym lookup stub for @CriticalNative.
 ENTRY art_jni_dlsym_lookup_critical_stub
     // The hidden arg holding the tagged method is t6 (loaded by art_quick_generic_jni_trampoline).
+    // FIXME(riscv64): Use T0 to align with the JNI calling convention.
     // Bit 0 set means generic JNI.
     // For generic JNI we already have a managed frame, so we reuse the art_jni_dlsym_lookup_stub.
     andi  t6, t6, 1
diff --git a/runtime/arch/riscv64/jni_frame_riscv64.h b/runtime/arch/riscv64/jni_frame_riscv64.h
new file mode 100644
index 0000000..0228d67
--- /dev/null
+++ b/runtime/arch/riscv64/jni_frame_riscv64.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_RISCV64_JNI_FRAME_RISCV64_H_
+#define ART_RUNTIME_ARCH_RISCV64_JNI_FRAME_RISCV64_H_
+
+#include <string.h>
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/globals.h"
+#include "base/logging.h"
+
+namespace art {
+namespace riscv64 {
+
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
+static_assert(kRiscv64PointerSize == PointerSize::k64, "Unexpected RISCV64 pointer size");
+
+// The RISCV64 requires 16-byte alignment. This is the same as the Managed ABI stack alignment.
+static constexpr size_t kNativeStackAlignment = 16u;
+static_assert(kNativeStackAlignment == kStackAlignment);
+
+// Up to how many float-like (float, double) args can be in FP registers.
+// The rest of the args must go to general purpose registers (native ABI only) or on the stack.
+constexpr size_t kMaxFloatOrDoubleArgumentRegisters = 8u;
+// Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
+// in registers. The rest of the args must go on the stack. Note that even FP args can use these
+// registers in native ABI after using all FP arg registers. We do not pass FP args in registers in
+// managed ABI to avoid some complexity in the compiler - more than 8 FP args are quite rare anyway.
+constexpr size_t kMaxIntLikeArgumentRegisters = 8u;
+
+// Get the size of the arguments for a native call.
+inline size_t GetNativeOutArgsSize(size_t num_fp_args, size_t num_non_fp_args) {
+  // Account for FP arguments passed through FA0-FA7.
+  size_t num_fp_args_without_fprs =
+      num_fp_args - std::min(kMaxFloatOrDoubleArgumentRegisters, num_fp_args);
+  // All other args are passed through A0-A7 (even FP args) and the stack.
+  size_t num_gpr_and_stack_args = num_non_fp_args + num_fp_args_without_fprs;
+  size_t num_stack_args =
+      num_gpr_and_stack_args - std::min(kMaxIntLikeArgumentRegisters, num_gpr_and_stack_args);
+  // Each stack argument takes 8 bytes.
+  return num_stack_args * static_cast<size_t>(kRiscv64PointerSize);
+}
+
+// Get stack args size for @CriticalNative method calls.
+inline size_t GetCriticalNativeCallArgsSize(const char* shorty, uint32_t shorty_len) {
+  DCHECK_EQ(shorty_len, strlen(shorty));
+
+  size_t num_fp_args =
+      std::count_if(shorty + 1, shorty + shorty_len, [](char c) { return c == 'F' || c == 'D'; });
+  size_t num_non_fp_args = shorty_len - 1u - num_fp_args;
+
+  return GetNativeOutArgsSize(num_fp_args, num_non_fp_args);
+}
+
+// Get the frame size for @CriticalNative method stub.
+// This must match the size of the extra frame emitted by the compiler at the native call site.
+inline size_t GetCriticalNativeStubFrameSize(const char* shorty, uint32_t shorty_len) {
+  // The size of outgoing arguments.
+  size_t size = GetCriticalNativeCallArgsSize(shorty, shorty_len);
+
+  // We can make a tail call if there are no stack args. Otherwise, add space for return PC.
+  // Note: Result does not neeed to be zero- or sign-extended.
+  if (size != 0u) {
+    size += kFramePointerSize;  // We need to spill RA with the args.
+  }
+  return RoundUp(size, kNativeStackAlignment);
+}
+
+// Get the frame size for direct call to a @CriticalNative method.
+// This must match the size of the frame emitted by the JNI compiler at the native call site.
+inline size_t GetCriticalNativeDirectCallFrameSize(const char* shorty, uint32_t shorty_len) {
+  // The size of outgoing arguments.
+  size_t size = GetCriticalNativeCallArgsSize(shorty, shorty_len);
+
+  // No return PC to save.
+  return RoundUp(size, kNativeStackAlignment);
+}
+
+}  // namespace riscv64
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_RISCV64_JNI_FRAME_RISCV64_H_
+
diff --git a/runtime/arch/riscv64/quick_entrypoints_riscv64.S b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
index 49289df..c92687a 100644
--- a/runtime/arch/riscv64/quick_entrypoints_riscv64.S
+++ b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
@@ -340,6 +340,7 @@
     fld  fa6, 8*14(sp)
     fld  fa7, 8*15(sp)
 
+    // FIXME(riscv64): Use T0 to align with the JNI calling convention.
     ld  t6, 8*16(sp)  // @CriticalNative arg, used by art_jni_dlsym_lookup_critical_stub
 
     ld  t1, 8*17(sp)  // restore stack
@@ -414,6 +415,7 @@
     SETUP_SAVE_EVERYTHING_FRAME \
         RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
 
+    // frame_size is passed in A4 from JITed code and `art_quick_generic_jni_trampoline`.
     addi a3, sp, SAVE_EVERYTHING_FRAME_OFFSET_FA0  // FP result ptr in kSaveEverything frame
     addi a2, sp, SAVE_EVERYTHING_FRAME_OFFSET_A0   // integer result ptr in kSaveEverything frame
     addi a1, sp, FRAME_SIZE_SAVE_EVERYTHING        // ArtMethod**
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 905cee2..71fff04 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1526,6 +1526,7 @@
           PushFpr4(val);
         }
       } else {
+        // FIXME(riscv64): Excessive FP args can be passed in available GPRs.
         stack_entries_++;
         PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val)));
         fpr_index_ = 0;
@@ -1561,6 +1562,7 @@
         PushFpr8(val);
         fpr_index_ -= kRegistersNeededForDouble;
       } else {
+        // FIXME(riscv64): Excessive FP args can be passed in available GPRs.
         if (DoubleStackNeedsPadding()) {
           PushStack(0);
           stack_entries_++;