diff options
author | 2023-06-09 11:05:33 +0000 | |
---|---|---|
committer | 2023-06-13 09:19:53 +0000 | |
commit | 0a9a7f4083287df6bbf7794c8db341828e83622f (patch) | |
tree | 771fd0084cd67199635c32e3f74ae52fb68bd088 /compiler | |
parent | b89b62d8ca79c1a8d349c6c30f43bfa8b9a43ed3 (diff) |
riscv64: Add JNI calling convention.
Test: m test-art-host-gtest
Bug: 283082089
Change-Id: Ie088ad01f6170ecea9c96c10199cc7efd722210c
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/Android.bp | 1 | ||||
-rw-r--r-- | compiler/jni/quick/arm64/calling_convention_arm64.cc | 2 | ||||
-rw-r--r-- | compiler/jni/quick/calling_convention.cc | 10 | ||||
-rw-r--r-- | compiler/jni/quick/riscv64/calling_convention_riscv64.cc | 410 | ||||
-rw-r--r-- | compiler/jni/quick/riscv64/calling_convention_riscv64.h | 91 |
5 files changed, 513 insertions, 1 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp index 00e1bb9713..de8ab1f986 100644 --- a/compiler/Android.bp +++ b/compiler/Android.bp @@ -131,6 +131,7 @@ art_cc_defaults { }, riscv64: { srcs: [ + "jni/quick/riscv64/calling_convention_riscv64.cc", "utils/riscv64/assembler_riscv64.cc", "utils/riscv64/jni_macro_assembler_riscv64.cc", "utils/riscv64/managed_register_riscv64.cc", diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc index e716502911..cd6aac517d 100644 --- a/compiler/jni/quick/arm64/calling_convention_arm64.cc +++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc @@ -323,7 +323,7 @@ ArrayRef<const ManagedRegister> Arm64JniCallingConvention::CalleeSaveRegisters() static_assert(kCalleeSaveRegisters[lr_index].Equals( Arm64ManagedRegister::FromXRegister(LR))); return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray( - /*pos*/ lr_index, /*length=*/ 1u); + /*pos=*/ lr_index, /*length=*/ 1u); } } else { return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters); diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc index 2b9da6ba1a..9f26e01154 100644 --- a/compiler/jni/quick/calling_convention.cc +++ b/compiler/jni/quick/calling_convention.cc @@ -29,6 +29,10 @@ #include "jni/quick/arm64/calling_convention_arm64.h" #endif +#ifdef ART_ENABLE_CODEGEN_riscv64 +#include "jni/quick/riscv64/calling_convention_riscv64.h" +#endif + #ifdef ART_ENABLE_CODEGEN_x86 #include "jni/quick/x86/calling_convention_x86.h" #endif @@ -61,6 +65,12 @@ std::unique_ptr<ManagedRuntimeCallingConvention> ManagedRuntimeCallingConvention new (allocator) arm64::Arm64ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif +#ifdef ART_ENABLE_CODEGEN_riscv64 + case InstructionSet::kRiscv64: + return std::unique_ptr<ManagedRuntimeCallingConvention>( + new (allocator) riscv64::Riscv64ManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); +#endif #ifdef ART_ENABLE_CODEGEN_x86 case InstructionSet::kX86: return std::unique_ptr<ManagedRuntimeCallingConvention>( diff --git a/compiler/jni/quick/riscv64/calling_convention_riscv64.cc b/compiler/jni/quick/riscv64/calling_convention_riscv64.cc new file mode 100644 index 0000000000..31718a13ee --- /dev/null +++ b/compiler/jni/quick/riscv64/calling_convention_riscv64.cc @@ -0,0 +1,410 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention_riscv64.h" + +#include <android-base/logging.h> + +#include "arch/instruction_set.h" +#include "arch/riscv64/jni_frame_riscv64.h" +#include "utils/riscv64/managed_register_riscv64.h" + +namespace art HIDDEN { +namespace riscv64 { + +static constexpr ManagedRegister kXArgumentRegisters[] = { + Riscv64ManagedRegister::FromXRegister(A0), + Riscv64ManagedRegister::FromXRegister(A1), + Riscv64ManagedRegister::FromXRegister(A2), + Riscv64ManagedRegister::FromXRegister(A3), + Riscv64ManagedRegister::FromXRegister(A4), + Riscv64ManagedRegister::FromXRegister(A5), + Riscv64ManagedRegister::FromXRegister(A6), + Riscv64ManagedRegister::FromXRegister(A7), +}; +static_assert(kMaxIntLikeArgumentRegisters == arraysize(kXArgumentRegisters)); + +static const FRegister kFArgumentRegisters[] = { + FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7 +}; +static_assert(kMaxFloatOrDoubleArgumentRegisters == arraysize(kFArgumentRegisters)); + +static constexpr ManagedRegister kCalleeSaveRegisters[] = { + // Core registers. + Riscv64ManagedRegister::FromXRegister(S0), + // ART thread register (TR = S1) is not saved on the stack. + Riscv64ManagedRegister::FromXRegister(S2), + Riscv64ManagedRegister::FromXRegister(S3), + Riscv64ManagedRegister::FromXRegister(S4), + Riscv64ManagedRegister::FromXRegister(S5), + Riscv64ManagedRegister::FromXRegister(S6), + Riscv64ManagedRegister::FromXRegister(S7), + Riscv64ManagedRegister::FromXRegister(S8), + Riscv64ManagedRegister::FromXRegister(S9), + Riscv64ManagedRegister::FromXRegister(S10), + Riscv64ManagedRegister::FromXRegister(S11), + Riscv64ManagedRegister::FromXRegister(RA), + + // Hard float registers. + Riscv64ManagedRegister::FromFRegister(FS0), + Riscv64ManagedRegister::FromFRegister(FS1), + Riscv64ManagedRegister::FromFRegister(FS2), + Riscv64ManagedRegister::FromFRegister(FS3), + Riscv64ManagedRegister::FromFRegister(FS4), + Riscv64ManagedRegister::FromFRegister(FS5), + Riscv64ManagedRegister::FromFRegister(FS6), + Riscv64ManagedRegister::FromFRegister(FS7), + Riscv64ManagedRegister::FromFRegister(FS8), + Riscv64ManagedRegister::FromFRegister(FS9), + Riscv64ManagedRegister::FromFRegister(FS10), + Riscv64ManagedRegister::FromFRegister(FS11), +}; + +template <size_t size> +static constexpr uint32_t CalculateCoreCalleeSpillMask( + const ManagedRegister (&callee_saves)[size]) { + uint32_t result = 0u; + for (auto&& r : callee_saves) { + if (r.AsRiscv64().IsXRegister()) { + result |= (1u << r.AsRiscv64().AsXRegister()); + } + } + return result; +} + +template <size_t size> +static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) { + uint32_t result = 0u; + for (auto&& r : callee_saves) { + if (r.AsRiscv64().IsFRegister()) { + result |= (1u << r.AsRiscv64().AsFRegister()); + } + } + return result; +} + +static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters); +static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters); + +static constexpr ManagedRegister kNativeCalleeSaveRegisters[] = { + // Core registers. + Riscv64ManagedRegister::FromXRegister(S0), + Riscv64ManagedRegister::FromXRegister(S1), + Riscv64ManagedRegister::FromXRegister(S2), + Riscv64ManagedRegister::FromXRegister(S3), + Riscv64ManagedRegister::FromXRegister(S4), + Riscv64ManagedRegister::FromXRegister(S5), + Riscv64ManagedRegister::FromXRegister(S6), + Riscv64ManagedRegister::FromXRegister(S7), + Riscv64ManagedRegister::FromXRegister(S8), + Riscv64ManagedRegister::FromXRegister(S9), + Riscv64ManagedRegister::FromXRegister(S10), + Riscv64ManagedRegister::FromXRegister(S11), + Riscv64ManagedRegister::FromXRegister(RA), + + // Hard float registers. + Riscv64ManagedRegister::FromFRegister(FS0), + Riscv64ManagedRegister::FromFRegister(FS1), + Riscv64ManagedRegister::FromFRegister(FS2), + Riscv64ManagedRegister::FromFRegister(FS3), + Riscv64ManagedRegister::FromFRegister(FS4), + Riscv64ManagedRegister::FromFRegister(FS5), + Riscv64ManagedRegister::FromFRegister(FS6), + Riscv64ManagedRegister::FromFRegister(FS7), + Riscv64ManagedRegister::FromFRegister(FS8), + Riscv64ManagedRegister::FromFRegister(FS9), + Riscv64ManagedRegister::FromFRegister(FS10), + Riscv64ManagedRegister::FromFRegister(FS11), +}; + +static constexpr uint32_t kNativeCoreCalleeSpillMask = + CalculateCoreCalleeSpillMask(kNativeCalleeSaveRegisters); +static constexpr uint32_t kNativeFpCalleeSpillMask = + CalculateFpCalleeSpillMask(kNativeCalleeSaveRegisters); + +static ManagedRegister ReturnRegisterForShorty(const char* shorty) { + if (shorty[0] == 'F' || shorty[0] == 'D') { + return Riscv64ManagedRegister::FromFRegister(FA0); + } else if (shorty[0] == 'V') { + return Riscv64ManagedRegister::NoRegister(); + } else { + // All other return types use A0. Note that there is no managed type wide enough to use A1/FA1. + return Riscv64ManagedRegister::FromXRegister(A0); + } +} + +// Managed runtime calling convention + +ManagedRegister Riscv64ManagedRuntimeCallingConvention::ReturnRegister() const { + return ReturnRegisterForShorty(GetShorty()); +} + +ManagedRegister Riscv64ManagedRuntimeCallingConvention::MethodRegister() { + return Riscv64ManagedRegister::FromXRegister(A0); +} + +ManagedRegister Riscv64ManagedRuntimeCallingConvention::ArgumentRegisterForMethodExitHook() { + DCHECK(!Riscv64ManagedRegister::FromXRegister(A4).Overlaps(ReturnRegister().AsRiscv64())); + return Riscv64ManagedRegister::FromXRegister(A4); +} + +bool Riscv64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + // Note: The managed ABI does not pass FP args in general purpose registers. + // This differs from the native ABI which does that after using all FP arg registers. + if (IsCurrentParamAFloatOrDouble()) { + return itr_float_and_doubles_ < kMaxFloatOrDoubleArgumentRegisters; + } else { + size_t non_fp_arg_number = itr_args_ - itr_float_and_doubles_; + return /* method */ 1u + non_fp_arg_number < kMaxIntLikeArgumentRegisters; + } +} + +bool Riscv64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + return !IsCurrentParamInRegister(); +} + +ManagedRegister Riscv64ManagedRuntimeCallingConvention::CurrentParamRegister() { + DCHECK(IsCurrentParamInRegister()); + if (IsCurrentParamAFloatOrDouble()) { + return Riscv64ManagedRegister::FromFRegister(kFArgumentRegisters[itr_float_and_doubles_]); + } else { + size_t non_fp_arg_number = itr_args_ - itr_float_and_doubles_; + return kXArgumentRegisters[/* method */ 1u + non_fp_arg_number]; + } +} + +FrameOffset Riscv64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { + return FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method ref + (itr_slots_ * sizeof(uint32_t))); // offset into in args +} + +// JNI calling convention + +Riscv64JniCallingConvention::Riscv64JniCallingConvention(bool is_static, + bool is_synchronized, + bool is_fast_native, + bool is_critical_native, + const char* shorty) + : JniCallingConvention(is_static, + is_synchronized, + is_fast_native, + is_critical_native, + shorty, + kRiscv64PointerSize) { +} + +ManagedRegister Riscv64JniCallingConvention::ReturnRegister() const { + return ReturnRegisterForShorty(GetShorty()); +} + +ManagedRegister Riscv64JniCallingConvention::IntReturnRegister() const { + return Riscv64ManagedRegister::FromXRegister(A0); +} + +size_t Riscv64JniCallingConvention::FrameSize() const { + if (is_critical_native_) { + CHECK(!SpillsMethod()); + CHECK(!HasLocalReferenceSegmentState()); + return 0u; // There is no managed frame for @CriticalNative. + } + + // Method*, callee save area size, local reference segment state + DCHECK(SpillsMethod()); + size_t method_ptr_size = static_cast<size_t>(kFramePointerSize); + size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize; + size_t total_size = method_ptr_size + callee_save_area_size; + + DCHECK(HasLocalReferenceSegmentState()); + // Cookie is saved in one of the spilled registers. + + return RoundUp(total_size, kStackAlignment); +} + +size_t Riscv64JniCallingConvention::OutFrameSize() const { + // Count param args, including JNIEnv* and jclass*. + size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs(); + size_t num_fp_args = NumFloatOrDoubleArgs(); + DCHECK_GE(all_args, num_fp_args); + size_t num_non_fp_args = all_args - num_fp_args; + // The size of outgoing arguments. + size_t size = GetNativeOutArgsSize(num_fp_args, num_non_fp_args); + + // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS64. + static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u); + static_assert((kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask) == 0u); + + // For @CriticalNative, we can make a tail call if there are no stack args. + // Otherwise, add space for return PC. + // Note: Result does not neeed to be zero- or sign-extended. + DCHECK(!RequiresSmallResultTypeExtension()); + if (is_critical_native_ && size != 0u) { + size += kFramePointerSize; // We need to spill RA with the args. + } + size_t out_args_size = RoundUp(size, kNativeStackAlignment); + if (UNLIKELY(IsCriticalNative())) { + DCHECK_EQ(out_args_size, GetCriticalNativeStubFrameSize(GetShorty(), NumArgs() + 1u)); + } + return out_args_size; +} + +ArrayRef<const ManagedRegister> Riscv64JniCallingConvention::CalleeSaveRegisters() const { + if (UNLIKELY(IsCriticalNative())) { + if (UseTailCall()) { + return ArrayRef<const ManagedRegister>(); // Do not spill anything. + } else { + // Spill RA with out args. + static_assert((kCoreCalleeSpillMask & (1 << RA)) != 0u); // Contains RA. + constexpr size_t ra_index = POPCOUNT(kCoreCalleeSpillMask) - 1u; + static_assert(kCalleeSaveRegisters[ra_index].Equals( + Riscv64ManagedRegister::FromXRegister(RA))); + return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray( + /*pos=*/ ra_index, /*length=*/ 1u); + } + } else { + return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters); + } +} + +ArrayRef<const ManagedRegister> Riscv64JniCallingConvention::CalleeSaveScratchRegisters() const { + DCHECK(!IsCriticalNative()); + // Use S3-S11 from managed callee saves. All these registers are also native callee saves. + constexpr size_t kStart = 2u; + constexpr size_t kLength = 9u; + static_assert(kCalleeSaveRegisters[kStart].Equals(Riscv64ManagedRegister::FromXRegister(S3))); + static_assert(kCalleeSaveRegisters[kStart + kLength - 1u].Equals( + Riscv64ManagedRegister::FromXRegister(S11))); + static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u); + return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(kStart, kLength); +} + +ArrayRef<const ManagedRegister> Riscv64JniCallingConvention::ArgumentScratchRegisters() const { + DCHECK(!IsCriticalNative()); + // Exclude A0 if it's used as a return register. + static_assert(kXArgumentRegisters[0].Equals(Riscv64ManagedRegister::FromXRegister(A0))); + ArrayRef<const ManagedRegister> scratch_regs(kXArgumentRegisters); + Riscv64ManagedRegister return_reg = ReturnRegister().AsRiscv64(); + auto return_reg_overlaps = [return_reg](ManagedRegister reg) { + return return_reg.Overlaps(reg.AsRiscv64()); + }; + if (return_reg_overlaps(scratch_regs[0])) { + scratch_regs = scratch_regs.SubArray(/*pos=*/ 1u); + } + DCHECK(std::none_of(scratch_regs.begin(), scratch_regs.end(), return_reg_overlaps)); + return scratch_regs; +} + +uint32_t Riscv64JniCallingConvention::CoreSpillMask() const { + return is_critical_native_ ? 0u : kCoreCalleeSpillMask; +} + +uint32_t Riscv64JniCallingConvention::FpSpillMask() const { + return is_critical_native_ ? 0u : kFpCalleeSpillMask; +} + +bool Riscv64JniCallingConvention::IsCurrentParamInRegister() { + // FP args use FPRs, then GPRs and only then the stack. + if (itr_float_and_doubles_ < kMaxFloatOrDoubleArgumentRegisters) { + if (IsCurrentParamAFloatOrDouble()) { + return true; + } else { + size_t num_non_fp_args = itr_args_ - itr_float_and_doubles_; + return num_non_fp_args < kMaxIntLikeArgumentRegisters; + } + } else { + return (itr_args_ < kMaxFloatOrDoubleArgumentRegisters + kMaxIntLikeArgumentRegisters); + } +} + +bool Riscv64JniCallingConvention::IsCurrentParamOnStack() { + return !IsCurrentParamInRegister(); +} + +ManagedRegister Riscv64JniCallingConvention::CurrentParamRegister() { + // FP args use FPRs, then GPRs and only then the stack. + CHECK(IsCurrentParamInRegister()); + if (itr_float_and_doubles_ < kMaxFloatOrDoubleArgumentRegisters) { + if (IsCurrentParamAFloatOrDouble()) { + return Riscv64ManagedRegister::FromFRegister(kFArgumentRegisters[itr_float_and_doubles_]); + } else { + size_t num_non_fp_args = itr_args_ - itr_float_and_doubles_; + DCHECK_LT(num_non_fp_args, kMaxIntLikeArgumentRegisters); + return kXArgumentRegisters[num_non_fp_args]; + } + } else { + // This argument is in a GPR, whether it's a FP arg or a non-FP arg. + DCHECK_LT(itr_args_, kMaxFloatOrDoubleArgumentRegisters + kMaxIntLikeArgumentRegisters); + return kXArgumentRegisters[itr_args_ - kMaxFloatOrDoubleArgumentRegisters]; + } +} + +FrameOffset Riscv64JniCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + // Account for FP arguments passed through FA0-FA7. + // All other args are passed through A0-A7 (even FP args) and the stack. + size_t num_gpr_and_stack_args = + itr_args_ - std::min<size_t>(kMaxFloatOrDoubleArgumentRegisters, itr_float_and_doubles_); + size_t args_on_stack = + num_gpr_and_stack_args - std::min(kMaxIntLikeArgumentRegisters, num_gpr_and_stack_args); + size_t offset = displacement_.Int32Value() - OutFrameSize() + (args_on_stack * kFramePointerSize); + CHECK_LT(offset, OutFrameSize()); + return FrameOffset(offset); +} + +bool Riscv64JniCallingConvention::RequiresSmallResultTypeExtension() const { + // RISC-V native calling convention requires values to be returned the way that the first + // argument would be passed. Arguments are zero-/sign-extended to 32 bits based on their + // type, then sign-extended to 64 bits. This is the same as in the ART mamaged ABI. + // (Not applicable to FP args which are returned in `FA0`. A `float` is NaN-boxed.) + return false; +} + +// T0 is neither managed callee-save, nor argument register. It is suitable for use as the +// locking argument for synchronized methods and hidden argument for @CriticalNative methods. +static void AssertT0IsNeitherCalleeSaveNorArgumentRegister() { + // TODO: Change to static_assert; std::none_of should be constexpr since C++20. + DCHECK(std::none_of(kCalleeSaveRegisters, + kCalleeSaveRegisters + std::size(kCalleeSaveRegisters), + [](ManagedRegister callee_save) constexpr { + return callee_save.Equals(Riscv64ManagedRegister::FromXRegister(T0)); + })); + DCHECK(std::none_of(kXArgumentRegisters, + kXArgumentRegisters + std::size(kXArgumentRegisters), + [](ManagedRegister arg) { return arg.AsRiscv64().AsXRegister() == T0; })); +} + +ManagedRegister Riscv64JniCallingConvention::LockingArgumentRegister() const { + DCHECK(!IsFastNative()); + DCHECK(!IsCriticalNative()); + DCHECK(IsSynchronized()); + AssertT0IsNeitherCalleeSaveNorArgumentRegister(); + return Riscv64ManagedRegister::FromXRegister(T0); +} + +ManagedRegister Riscv64JniCallingConvention::HiddenArgumentRegister() const { + DCHECK(IsCriticalNative()); + AssertT0IsNeitherCalleeSaveNorArgumentRegister(); + return Riscv64ManagedRegister::FromXRegister(T0); +} + +// Whether to use tail call (used only for @CriticalNative). +bool Riscv64JniCallingConvention::UseTailCall() const { + CHECK(IsCriticalNative()); + return OutFrameSize() == 0u; +} + +} // namespace riscv64 +} // namespace art diff --git a/compiler/jni/quick/riscv64/calling_convention_riscv64.h b/compiler/jni/quick/riscv64/calling_convention_riscv64.h new file mode 100644 index 0000000000..336d2bf374 --- /dev/null +++ b/compiler/jni/quick/riscv64/calling_convention_riscv64.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_RISCV64_CALLING_CONVENTION_RISCV64_H_ +#define ART_COMPILER_JNI_QUICK_RISCV64_CALLING_CONVENTION_RISCV64_H_ + +#include "base/enums.h" +#include "base/macros.h" +#include "jni/quick/calling_convention.h" + +namespace art HIDDEN { +namespace riscv64 { + +class Riscv64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention { + public: + Riscv64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, + is_synchronized, + shorty, + PointerSize::k64) {} + ~Riscv64ManagedRuntimeCallingConvention() override {} + // Calling convention + ManagedRegister ReturnRegister() const override; + // Managed runtime calling convention + ManagedRegister MethodRegister() override; + ManagedRegister ArgumentRegisterForMethodExitHook() override; + bool IsCurrentParamInRegister() override; + bool IsCurrentParamOnStack() override; + ManagedRegister CurrentParamRegister() override; + FrameOffset CurrentParamStackOffset() override; + + private: + DISALLOW_COPY_AND_ASSIGN(Riscv64ManagedRuntimeCallingConvention); +}; + +class Riscv64JniCallingConvention final : public JniCallingConvention { + public: + Riscv64JniCallingConvention(bool is_static, + bool is_synchronized, + bool is_fast_native, + bool is_critical_native, + const char* shorty); + ~Riscv64JniCallingConvention() override {} + // Calling convention + ManagedRegister ReturnRegister() const override; + ManagedRegister IntReturnRegister() const override; + // JNI calling convention + size_t FrameSize() const override; + size_t OutFrameSize() const override; + ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override; + ArrayRef<const ManagedRegister> CalleeSaveScratchRegisters() const override; + ArrayRef<const ManagedRegister> ArgumentScratchRegisters() const override; + uint32_t CoreSpillMask() const override; + uint32_t FpSpillMask() const override; + bool IsCurrentParamInRegister() override; + bool IsCurrentParamOnStack() override; + ManagedRegister CurrentParamRegister() override; + FrameOffset CurrentParamStackOffset() override; + bool RequiresSmallResultTypeExtension() const override; + + // Locking argument register, used to pass the synchronization object for calls + // to `JniLockObject()` and `JniUnlockObject()`. + ManagedRegister LockingArgumentRegister() const override; + + // Hidden argument register, used to pass the method pointer for @CriticalNative call. + ManagedRegister HiddenArgumentRegister() const override; + + // Whether to use tail call (used only for @CriticalNative). + bool UseTailCall() const override; + + private: + DISALLOW_COPY_AND_ASSIGN(Riscv64JniCallingConvention); +}; + +} // namespace riscv64 +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_RISCV64_CALLING_CONVENTION_RISCV64_H_ |