Add jni macro assembler skeleton for riscv64.
Note: Most functions are unimplemeted.
Test: m test-art-host-gtest
Bug: 283082089
Signed-off-by: Lifang Xia <lifang_xia@linux.alibaba.com>
Signed-off-by: Wendong Wang <wangwd@xcvmbyte.com>
Change-Id: Ifb0c6c15bd9e1159ea2c90edc306335ca634c1d8
diff --git a/compiler/Android.bp b/compiler/Android.bp
index acb15d9..00e1bb9 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -132,6 +132,7 @@
riscv64: {
srcs: [
"utils/riscv64/assembler_riscv64.cc",
+ "utils/riscv64/jni_macro_assembler_riscv64.cc",
"utils/riscv64/managed_register_riscv64.cc",
],
},
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 8b47b38..a0230e3 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -25,6 +25,9 @@
#ifdef ART_ENABLE_CODEGEN_arm64
#include "arm64/jni_macro_assembler_arm64.h"
#endif
+#ifdef ART_ENABLE_CODEGEN_riscv64
+#include "riscv64/jni_macro_assembler_riscv64.h"
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
#include "x86/jni_macro_assembler_x86.h"
#endif
@@ -79,6 +82,10 @@
case InstructionSet::kArm64:
return MacroAsm64UniquePtr(new (allocator) arm64::Arm64JNIMacroAssembler(allocator));
#endif
+#ifdef ART_ENABLE_CODEGEN_riscv64
+ case InstructionSet::kRiscv64:
+ return MacroAsm64UniquePtr(new (allocator) riscv64::Riscv64JNIMacroAssembler(allocator));
+#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case InstructionSet::kX86_64:
return MacroAsm64UniquePtr(new (allocator) x86_64::X86_64JNIMacroAssembler(allocator));
diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc b/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
new file mode 100644
index 0000000..689d871
--- /dev/null
+++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_riscv64.h"
+
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "managed_register_riscv64.h"
+#include "offsets.h"
+#include "thread.h"
+
+namespace art {
+namespace riscv64 {
+
+#define __ asm_.
+
+Riscv64JNIMacroAssembler::~Riscv64JNIMacroAssembler() {
+}
+
+void Riscv64JNIMacroAssembler::FinalizeCode() {
+ __ FinalizeCode();
+}
+
+void Riscv64JNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs) {
+ // TODO(riscv64): Implement this.
+ UNUSED(frame_size, method_reg, callee_save_regs);
+}
+
+void Riscv64JNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) {
+ // TODO(riscv64): Implement this.
+ UNUSED(frame_size, callee_save_regs, may_suspend);
+}
+
+void Riscv64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ if (adjust != 0u) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ __ AddConst64(SP, SP, -adjust);
+ __ cfi().AdjustCFAOffset(adjust);
+ }
+}
+
+void Riscv64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ if (adjust != 0u) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ __ AddConst64(SP, SP, adjust);
+ __ cfi().AdjustCFAOffset(-adjust);
+ }
+}
+
+ManagedRegister Riscv64JNIMacroAssembler::CoreRegisterWithSize(ManagedRegister src, size_t size) {
+ DCHECK(src.AsRiscv64().IsXRegister());
+ DCHECK(size == 4u || size == 8u) << size;
+ return src;
+}
+
+void Riscv64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
+ // TODO(riscv64): Implement this.
+ UNUSED(offs, m_src, size);
+}
+
+void Riscv64JNIMacroAssembler::Store(ManagedRegister base,
+ MemberOffset offs,
+ ManagedRegister m_src,
+ size_t size) {
+ // TODO(riscv64): Implement this.
+ UNUSED(base, offs, m_src, size);
+}
+
+void Riscv64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
+ // TODO(riscv64): Implement this.
+ UNUSED(offs, m_src);
+}
+
+void Riscv64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) {
+ // TODO(riscv64): Implement this.
+ UNUSED(thr_offs, tag_sp);
+}
+
+void Riscv64JNIMacroAssembler::Load(ManagedRegister m_dest, FrameOffset src, size_t size) {
+ // TODO(riscv64): Implement this.
+ UNUSED(m_dest, src, size);
+}
+
+void Riscv64JNIMacroAssembler::Load(ManagedRegister m_dest,
+ ManagedRegister m_base,
+ MemberOffset offs,
+ size_t size) {
+ // TODO(riscv64): Implement this.
+ UNUSED(m_dest, m_base, offs, size);
+}
+
+void Riscv64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dest, ThreadOffset64 offs) {
+ // TODO(riscv64): Implement this.
+ UNUSED(m_dest, offs);
+}
+
+void Riscv64JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests,
+ ArrayRef<ArgumentLocation> srcs,
+ ArrayRef<FrameOffset> refs) {
+ // TODO(riscv64): Implement this.
+ UNUSED(dests, srcs, refs);
+}
+
+void Riscv64JNIMacroAssembler::Move(ManagedRegister m_dest, ManagedRegister m_src, size_t size) {
+ // TODO(riscv64): Implement this.
+ UNUSED(m_dest, m_src, size);
+}
+
+void Riscv64JNIMacroAssembler::Move(ManagedRegister m_dest, size_t value) {
+ // TODO(riscv64): Implement this.
+ UNUSED(m_dest, value);
+}
+
+void Riscv64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+ // TODO(riscv64): Implement this.
+ UNUSED(mreg, size);
+}
+
+void Riscv64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ // TODO(riscv64): Implement this.
+ UNUSED(mreg, size);
+}
+
+void Riscv64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ // TODO(riscv64): Implement this.
+ UNUSED(tr);
+}
+
+void Riscv64JNIMacroAssembler::GetCurrentThread(FrameOffset offset) {
+ // TODO(riscv64): Implement this.
+ UNUSED(offset);
+}
+
+void Riscv64JNIMacroAssembler::DecodeJNITransitionOrLocalJObject(ManagedRegister m_reg,
+ JNIMacroLabel* slow_path,
+ JNIMacroLabel* resume) {
+ // TODO(riscv64): Implement this.
+ UNUSED(m_reg, slow_path, resume);
+}
+
+void Riscv64JNIMacroAssembler::VerifyObject([[maybe_unused]] ManagedRegister m_src,
+ [[maybe_unused]] bool could_be_null) {
+ // TODO: not validating references.
+}
+
+void Riscv64JNIMacroAssembler::VerifyObject([[maybe_unused]] FrameOffset src,
+ [[maybe_unused]] bool could_be_null) {
+ // TODO: not validating references.
+}
+
+void Riscv64JNIMacroAssembler::Jump(ManagedRegister m_base, Offset offs) {
+ Riscv64ManagedRegister base = m_base.AsRiscv64();
+ CHECK(base.IsXRegister()) << base;
+ XRegister scratch = TMP;
+ __ Loadd(scratch, base.AsXRegister(), offs.Int32Value());
+ __ Jr(scratch);
+}
+
+void Riscv64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs) {
+ Riscv64ManagedRegister base = m_base.AsRiscv64();
+ CHECK(base.IsXRegister()) << base;
+ XRegister scratch = TMP;
+ __ Loadd(scratch, base.AsXRegister(), offs.Int32Value());
+ __ Jalr(scratch);
+}
+
+
+void Riscv64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset) {
+ Call(Riscv64ManagedRegister::FromXRegister(TR), offset);
+}
+
+void Riscv64JNIMacroAssembler::TryToTransitionFromRunnableToNative(
+ JNIMacroLabel* label,
+ ArrayRef<const ManagedRegister> scratch_regs) {
+ // TODO(riscv64): Implement this.
+ UNUSED(label, scratch_regs);
+}
+
+void Riscv64JNIMacroAssembler::TryToTransitionFromNativeToRunnable(
+ JNIMacroLabel* label,
+ ArrayRef<const ManagedRegister> scratch_regs,
+ ManagedRegister return_reg) {
+ // TODO(riscv64): Implement this.
+ UNUSED(label, scratch_regs, return_reg);
+}
+
+void Riscv64JNIMacroAssembler::SuspendCheck(JNIMacroLabel* label) {
+ // TODO(riscv64): Implement this.
+ UNUSED(label);
+}
+
+void Riscv64JNIMacroAssembler::ExceptionPoll(JNIMacroLabel* label) {
+ // TODO(riscv64): Implement this.
+ UNUSED(label);
+}
+
+void Riscv64JNIMacroAssembler::DeliverPendingException() {
+ // TODO(riscv64): Implement this.
+}
+
+std::unique_ptr<JNIMacroLabel> Riscv64JNIMacroAssembler::CreateLabel() {
+ return std::unique_ptr<JNIMacroLabel>(new Riscv64JNIMacroLabel());
+}
+
+void Riscv64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ J(down_cast<Riscv64Label*>(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64()));
+}
+
+void Riscv64JNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) {
+ CHECK(label != nullptr);
+
+ DCHECK_EQ(Thread::IsGcMarkingSize(), 4u);
+ DCHECK(gUseReadBarrier);
+
+ XRegister test_reg = TMP;
+ int32_t is_gc_marking_offset = Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value();
+ __ Loadw(test_reg, TR, is_gc_marking_offset);
+ switch (cond) {
+ case JNIMacroUnaryCondition::kZero:
+ __ Beqz(test_reg, down_cast<Riscv64Label*>(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64()));
+ break;
+ case JNIMacroUnaryCondition::kNotZero:
+ __ Bnez(test_reg, down_cast<Riscv64Label*>(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64()));
+ break;
+ default:
+ LOG(FATAL) << "Not implemented unary condition: " << static_cast<int>(cond);
+ UNREACHABLE();
+ }
+}
+
+void Riscv64JNIMacroAssembler::TestMarkBit(ManagedRegister ref,
+ JNIMacroLabel* label,
+ JNIMacroUnaryCondition cond) {
+ // TODO(riscv64): Implement this.
+ UNUSED(ref, label, cond);
+}
+
+void Riscv64JNIMacroAssembler::TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) {
+ XRegister test_reg = TMP;
+ int32_t small_offset = dchecked_integral_cast<int32_t>(address & 0xfff) -
+ dchecked_integral_cast<int32_t>((address & 0x800) << 1);
+ int32_t remainder = static_cast<int64_t>(address) - small_offset;
+ __ Li(test_reg, remainder);
+ __ Lb(test_reg, test_reg, small_offset);
+ __ Bnez(test_reg, down_cast<Riscv64Label*>(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64()));
+}
+
+void Riscv64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ Bind(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
+}
+
+#undef ___
+
+} // namespace riscv64
+} // namespace art
diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64.h b/compiler/utils/riscv64/jni_macro_assembler_riscv64.h
new file mode 100644
index 0000000..e472526
--- /dev/null
+++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_RISCV64_JNI_MACRO_ASSEMBLER_RISCV64_H_
+#define ART_COMPILER_UTILS_RISCV64_JNI_MACRO_ASSEMBLER_RISCV64_H_
+
+#include <stdint.h>
+#include <memory>
+#include <vector>
+
+#include <android-base/logging.h>
+
+#include "assembler_riscv64.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "offsets.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+
+namespace art {
+namespace riscv64 {
+
+class Riscv64JNIMacroAssembler : public JNIMacroAssemblerFwd<Riscv64Assembler, PointerSize::k64> {
+ public:
+ explicit Riscv64JNIMacroAssembler(ArenaAllocator* allocator)
+ : JNIMacroAssemblerFwd<Riscv64Assembler, PointerSize::k64>(allocator) {}
+ ~Riscv64JNIMacroAssembler();
+
+ // Finalize the code.
+ void FinalizeCode() override;
+
+ // Emit code that will create an activation on the stack.
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs) override;
+
+ // Emit code that will remove an activation from the stack.
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) override;
+
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
+
+ ManagedRegister CoreRegisterWithSize(ManagedRegister src, size_t size) override;
+
+ // Store routines.
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+ void Store(ManagedRegister base, MemberOffset offs, ManagedRegister src, size_t size) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) override;
+
+ // Load routines.
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
+ void Load(ManagedRegister dest, ManagedRegister base, MemberOffset offs, size_t size) override;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
+
+ // Copying routines.
+ void MoveArguments(ArrayRef<ArgumentLocation> dests,
+ ArrayRef<ArgumentLocation> srcs,
+ ArrayRef<FrameOffset> refs) override;
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
+ void Move(ManagedRegister dest, size_t value) override;
+
+ // Sign extension.
+ void SignExtend(ManagedRegister mreg, size_t size) override;
+
+ // Zero extension.
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
+
+ // Exploit fast access in managed code to Thread::Current().
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset) override;
+
+ // Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path.
+ void DecodeJNITransitionOrLocalJObject(ManagedRegister reg,
+ JNIMacroLabel* slow_path,
+ JNIMacroLabel* resume) override;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
+
+ // Jump to address held at [base+offset] (used for tail calls).
+ void Jump(ManagedRegister base, Offset offset) override;
+
+ // Call to address held at [base+offset].
+ void Call(ManagedRegister base, Offset offset) override;
+ void CallFromThread(ThreadOffset64 offset) override;
+
+ // Generate fast-path for transition to Native. Go to `label` if any thread flag is set.
+ // The implementation can use `scratch_regs` which should be callee save core registers
+ // (already saved before this call) and must preserve all argument registers.
+ void TryToTransitionFromRunnableToNative(JNIMacroLabel* label,
+ ArrayRef<const ManagedRegister> scratch_regs) override;
+
+ // Generate fast-path for transition to Runnable. Go to `label` if any thread flag is set.
+ // The implementation can use `scratch_regs` which should be core argument registers
+ // not used as return registers and it must preserve the `return_reg` if any.
+ void TryToTransitionFromNativeToRunnable(JNIMacroLabel* label,
+ ArrayRef<const ManagedRegister> scratch_regs,
+ ManagedRegister return_reg) override;
+
+ // Generate suspend check and branch to `label` if there is a pending suspend request.
+ void SuspendCheck(JNIMacroLabel* label) override;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to the `label` if it is.
+ void ExceptionPoll(JNIMacroLabel* label) override;
+ // Deliver pending exception.
+ void DeliverPendingException() override;
+
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override;
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label) override;
+ // Emit a conditional jump to the label by applying a unary condition test to the GC marking flag.
+ void TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) override;
+ // Emit a conditional jump to the label by applying a unary condition test to object's mark bit.
+ void TestMarkBit(ManagedRegister ref, JNIMacroLabel* label, JNIMacroUnaryCondition cond) override;
+ // Emit a conditional jump to label if the loaded value from specified locations is not zero.
+ void TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) override;
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label) override;
+};
+
+class Riscv64JNIMacroLabel final
+ : public JNIMacroLabelCommon<Riscv64JNIMacroLabel,
+ art::Label,
+ InstructionSet::kRiscv64> {
+ public:
+ art::Label* AsRiscv64() {
+ return AsPlatformLabel();
+ }
+};
+
+} // namespace riscv64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_RISCV64_JNI_MACRO_ASSEMBLER_RISCV64_H_