RISCV: Define Registers for RISCV in Codegen
Test: m test-art-host-gtest
Bug: 283082089
Change-Id: If161af9995d4611eca7209e0f3bbda55acf5049f
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 60c9973..848a592 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -209,6 +209,7 @@
riscv64: {
srcs: [
"jni/quick/riscv64/calling_convention_riscv64.cc",
+ "optimizing/code_generator_riscv64.cc",
"utils/riscv64/assembler_riscv64.cc",
"utils/riscv64/jni_macro_assembler_riscv64.cc",
"utils/riscv64/managed_register_riscv64.cc",
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index d6d9009..ea66820 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -909,6 +909,12 @@
new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats));
}
#endif
+#ifdef ART_ENABLE_CODEGEN_riscv64
+ case InstructionSet::kRiscv64: {
+ return std::unique_ptr<CodeGenerator>(
+ new (allocator) riscv64::CodeGeneratorRISCV64(graph, compiler_options, stats));
+ }
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86: {
return std::unique_ptr<CodeGenerator>(
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
new file mode 100644
index 0000000..e01c7c9
--- /dev/null
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_riscv64.h"
+
+#include "android-base/logging.h"
+#include "arch/riscv64/registers_riscv64.h"
+#include "base/macros.h"
+#include "intrinsics_list.h"
+
+namespace art {
+namespace riscv64 {
+
+static constexpr XRegister kCoreCalleeSaves[] = {
+ // S1(TR) is excluded as the ART thread register.
+ S0, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, RA
+};
+
+static constexpr FRegister kFpuCalleeSaves[] = {
+ FS0, FS1, FS2, FS3, FS4, FS5, FS6, FS7, FS8, FS9, FS10, FS11
+};
+
+namespace detail {
+
+// Mark which intrinsics we don't have handcrafted code for.
+template <Intrinsics T>
+struct IsUnimplemented {
+ bool is_unimplemented = false;
+};
+
+#define TRUE_OVERRIDE(Name, ...) \
+ template <> \
+ struct IsUnimplemented<Intrinsics::k##Name> { \
+ bool is_unimplemented = true; \
+ };
+UNIMPLEMENTED_INTRINSIC_LIST_RISCV64(TRUE_OVERRIDE)
+#undef TRUE_OVERRIDE
+
+static constexpr bool kIsIntrinsicUnimplemented[] = {
+ false, // kNone
+#define IS_UNIMPLEMENTED(Intrinsic, ...) \
+ IsUnimplemented<Intrinsics::k##Intrinsic>().is_unimplemented,
+ INTRINSICS_LIST(IS_UNIMPLEMENTED)
+#undef IS_UNIMPLEMENTED
+};
+
+} // namespace detail
+
+CodeGeneratorRISCV64::CodeGeneratorRISCV64(HGraph* graph,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
+ : CodeGenerator(graph,
+ kNumberOfXRegisters,
+ kNumberOfFRegisters,
+ /*number_of_register_pairs=*/ 0u,
+ ComputeRegisterMask(kCoreCalleeSaves, arraysize(kCoreCalleeSaves)),
+ ComputeRegisterMask(kFpuCalleeSaves, arraysize(kFpuCalleeSaves)),
+ compiler_options,
+ stats,
+ ArrayRef<const bool>(detail::kIsIntrinsicUnimplemented)) {
+ LOG(FATAL) << "Unimplemented";
+}
+
+void CodeGeneratorRISCV64::GenerateFrameEntry() { LOG(FATAL) << "Unimplemented"; }
+void CodeGeneratorRISCV64::GenerateFrameExit() { LOG(FATAL) << "Unimplemented"; }
+
+void CodeGeneratorRISCV64::Bind(HBasicBlock* block) {
+ UNUSED(block);
+ LOG(FATAL) << "Unimplemented";
+}
+
+size_t CodeGeneratorRISCV64::GetSIMDRegisterWidth() const {
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+void CodeGeneratorRISCV64::MoveConstant(Location destination, int32_t value) {
+ UNUSED(destination);
+ UNUSED(value);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+void CodeGeneratorRISCV64::MoveLocation(Location dst, Location src, DataType::Type dst_type) {
+ UNUSED(dst);
+ UNUSED(src);
+ UNUSED(dst_type);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+void CodeGeneratorRISCV64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ UNUSED(location);
+ UNUSED(locations);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+void CodeGeneratorRISCV64::SetupBlockedRegisters() const {
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+size_t CodeGeneratorRISCV64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+size_t CodeGeneratorRISCV64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+size_t CodeGeneratorRISCV64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+size_t CodeGeneratorRISCV64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+void CodeGeneratorRISCV64::DumpCoreRegister(std::ostream& stream, int reg) const {
+ UNUSED(stream);
+ UNUSED(reg);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+void CodeGeneratorRISCV64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ UNUSED(stream);
+ UNUSED(reg);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+void CodeGeneratorRISCV64::Finalize() {
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+// Generate code to invoke a runtime entry point.
+void CodeGeneratorRISCV64::InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ UNUSED(entrypoint);
+ UNUSED(instruction);
+ UNUSED(dex_pc);
+ UNUSED(slow_path);
+ LOG(FATAL) << "Unimplemented";
+}
+
+// Generate code to invoke a runtime entry point, but do not record
+// PC-related information in a stack map.
+void CodeGeneratorRISCV64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
+ UNUSED(entry_point_offset);
+ UNUSED(instruction);
+ UNUSED(slow_path);
+ LOG(FATAL) << "Unimplemented";
+}
+
+void CodeGeneratorRISCV64::IncreaseFrame(size_t adjustment) {
+ UNUSED(adjustment);
+ LOG(FATAL) << "Unimplemented";
+}
+
+void CodeGeneratorRISCV64::DecreaseFrame(size_t adjustment) {
+ UNUSED(adjustment);
+ LOG(FATAL) << "Unimplemented";
+}
+
+void CodeGeneratorRISCV64::GenerateNop() { LOG(FATAL) << "Unimplemented"; }
+
+void CodeGeneratorRISCV64::GenerateImplicitNullCheck(HNullCheck* instruction) {
+ UNUSED(instruction);
+ LOG(FATAL) << "Unimplemented";
+}
+void CodeGeneratorRISCV64::GenerateExplicitNullCheck(HNullCheck* instruction) {
+ UNUSED(instruction);
+ LOG(FATAL) << "Unimplemented";
+}
+
+HLoadString::LoadKind CodeGeneratorRISCV64::GetSupportedLoadStringKind(
+ HLoadString::LoadKind desired_string_load_kind) {
+ UNUSED(desired_string_load_kind);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+HLoadClass::LoadKind CodeGeneratorRISCV64::GetSupportedLoadClassKind(
+ HLoadClass::LoadKind desired_class_load_kind) {
+ UNUSED(desired_class_load_kind);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorRISCV64::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, ArtMethod* method) {
+ UNUSED(desired_dispatch_info);
+ UNUSED(method);
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+}
+
+void CodeGeneratorRISCV64::LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke) {
+ UNUSED(load_kind);
+ UNUSED(temp);
+ UNUSED(invoke);
+ LOG(FATAL) << "Unimplemented";
+}
+
+void CodeGeneratorRISCV64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp,
+ SlowPathCode* slow_path) {
+ UNUSED(temp);
+ UNUSED(invoke);
+ UNUSED(slow_path);
+ LOG(FATAL) << "Unimplemented";
+}
+
+void CodeGeneratorRISCV64::GenerateVirtualCall(HInvokeVirtual* invoke,
+ Location temp,
+ SlowPathCode* slow_path) {
+ UNUSED(temp);
+ UNUSED(invoke);
+ UNUSED(slow_path);
+ LOG(FATAL) << "Unimplemented";
+}
+
+void CodeGeneratorRISCV64::MoveFromReturnRegister(Location trg, DataType::Type type) {
+ UNUSED(trg);
+ UNUSED(type);
+ LOG(FATAL) << "Unimplemented";
+}
+
+} // namespace riscv64
+} // namespace art
diff --git a/compiler/optimizing/code_generator_riscv64.h b/compiler/optimizing/code_generator_riscv64.h
index 405b39a..4ef008a 100644
--- a/compiler/optimizing/code_generator_riscv64.h
+++ b/compiler/optimizing/code_generator_riscv64.h
@@ -17,7 +17,174 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
+#include "android-base/logging.h"
+#include "arch/riscv64/registers_riscv64.h"
+#include "base/macros.h"
#include "code_generator.h"
#include "driver/compiler_options.h"
+#include "optimizing/locations.h"
+#include "utils/riscv64/assembler_riscv64.h"
+
+namespace art {
+
+namespace riscv64 {
+
+// InvokeDexCallingConvention registers
+static constexpr XRegister kParameterCoreRegisters[] = {A1, A2, A3, A4, A5, A6, A7};
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+static constexpr FRegister kParameterFpuRegisters[] = {FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7};
+static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
+
+// InvokeRuntimeCallingConvention registers
+static constexpr XRegister kRuntimeParameterCoreRegisters[] = {A0, A1, A2, A3, A4, A5, A6, A7};
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+
+static constexpr FRegister kRuntimeParameterFpuRegisters[] = {
+ FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7
+};
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+ arraysize(kRuntimeParameterFpuRegisters);
+
+#define UNIMPLEMENTED_INTRINSIC_LIST_RISCV64(V) INTRINSICS_LIST(V)
+
+class CodeGeneratorRISCV64;
+
+class CodeGeneratorRISCV64 : public CodeGenerator {
+ public:
+ CodeGeneratorRISCV64(HGraph* graph,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
+ virtual ~CodeGeneratorRISCV64() {}
+
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
+
+ void Bind(HBasicBlock* block) override;
+
+ size_t GetWordSize() const override { return kRiscv64WordSize; }
+
+ bool SupportsPredicatedSIMD() const override {
+ // TODO(riscv64): Check the vector extension.
+ return false;
+ }
+
+ size_t GetSlowPathFPWidth() const override {
+ LOG(FATAL) << "CodeGeneratorRISCV64::GetSlowPathFPWidth is unimplemented";
+ UNREACHABLE();
+ }
+
+ size_t GetCalleePreservedFPWidth() const override {
+ LOG(FATAL) << "CodeGeneratorRISCV64::GetCalleePreservedFPWidth is unimplemented";
+ UNREACHABLE();
+ };
+
+ size_t GetSIMDRegisterWidth() const override;
+
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
+ UNUSED(block);
+ LOG(FATAL) << "CodeGeneratorRISCV64::GetAddressOf is unimplemented";
+ UNREACHABLE();
+ };
+
+ void Initialize() override { LOG(FATAL) << "unimplemented"; }
+
+ void MoveConstant(Location destination, int32_t value) override;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
+
+ HGraphVisitor* GetInstructionVisitor() override {
+ LOG(FATAL) << "unimplemented";
+ UNREACHABLE();
+ }
+ Riscv64Assembler* GetAssembler() override {
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+ }
+ const Riscv64Assembler& GetAssembler() const override {
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+ }
+
+ HGraphVisitor* GetLocationBuilder() override {
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+ }
+
+ void SetupBlockedRegisters() const override;
+
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
+
+ InstructionSet GetInstructionSet() const override { return InstructionSet::kRiscv64; }
+
+ uint32_t GetPreferredSlotsAlignment() const override {
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+ }
+
+ void Finalize() override;
+
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path = nullptr) override;
+
+ // Generate code to invoke a runtime entry point, but do not record
+ // PC-related information in a stack map.
+ void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path);
+
+ // TODO(riscv64): Add ParallelMoveResolverRISCV64 Later
+ ParallelMoveResolver* GetMoveResolver() override {
+ LOG(FATAL) << "Unimplemented";
+ UNREACHABLE();
+ }
+
+ bool NeedsTwoRegisters([[maybe_unused]] DataType::Type type) const override { return false; }
+
+ void IncreaseFrame(size_t adjustment) override;
+ void DecreaseFrame(size_t adjustment) override;
+
+ void GenerateNop() override;
+
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
+
+ // Check if the desired_string_load_kind is supported. If it is, return it,
+ // otherwise return a fall-back kind that should be used instead.
+ HLoadString::LoadKind GetSupportedLoadStringKind(
+ HLoadString::LoadKind desired_string_load_kind) override;
+
+ // Check if the desired_class_load_kind is supported. If it is, return it,
+ // otherwise return a fall-back kind that should be used instead.
+ HLoadClass::LoadKind GetSupportedLoadClassKind(
+ HLoadClass::LoadKind desired_class_load_kind) override;
+
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, ArtMethod* method) override;
+
+ void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp,
+ SlowPathCode* slow_path = nullptr) override;
+ void GenerateVirtualCall(HInvokeVirtual* invoke,
+ Location temp,
+ SlowPathCode* slow_path = nullptr) override;
+ void MoveFromReturnRegister(Location trg, DataType::Type type) override;
+};
+
+} // namespace riscv64
+} // namespace art
#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_