riscv64: add initial support for ART.

The only supported mode is the switch interpreter. JNI transitions are
not implemented yet (not even generic JNI trampoline), therefore it is
impossible to run even a simple hello world test (because it requires
loading Java libraries and initializing classes in them, many of which
call native methods). The only passing ART test is 000-nop.

Test: run ART test 000-nop on a Linux RISC-V VM:
  lunch aosp_riscv64-userdebug

  export ART_TEST_SSH_USER=ubuntu
  export ART_TEST_SSH_HOST=localhost
  export ART_TEST_SSH_PORT=10001
  export ART_TEST_ON_VM=true

  . art/tools/buildbot-utils.sh
  art/tools/buildbot-build.sh --target

  # Create, boot and configure the VM.
  art/tools/buildbot-vm.sh create
  art/tools/buildbot-vm.sh boot
  art/tools/buildbot-vm.sh setup-ssh  # password: 'ubuntu'

  art/tools/buildbot-cleanup-device.sh
  art/tools/buildbot-setup-device.sh
  art/tools/buildbot-sync.sh

  art/test.py --target -r --no-prebuild --ndebug --no-image \
    --64 --interpreter 000-nop

Co-authored-by: Lifang Xia <lifang_xia@linux.alibaba.com>
Change-Id: Iac267525527901b74e863cb8c72ddcf0789e8a5d
diff --git a/build/Android.bp b/build/Android.bp
index 4627e6f..a6f8661 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -243,6 +243,9 @@
         arm64: {
             cflags: ["-DART_ENABLE_CODEGEN_arm64"],
         },
+        riscv64: {
+            cflags: ["-DART_ENABLE_CODEGEN_riscv64"],
+        },
         x86: {
             cflags: ["-DART_ENABLE_CODEGEN_x86"],
         },
@@ -280,6 +283,9 @@
         arm64: {
             ldflags: ["-z max-page-size=0x200000"],
         },
+        riscv64: {
+            ldflags: ["-z max-page-size=0x200000"],
+        },
         x86_64: {
             ldflags: ["-z max-page-size=0x200000"],
         },
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 9ca4d0f..966f99d 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -17,7 +17,7 @@
 ifndef ART_ANDROID_COMMON_MK
 ART_ANDROID_COMMON_MK = true
 
-ART_TARGET_SUPPORTED_ARCH := arm arm64 x86 x86_64
+ART_TARGET_SUPPORTED_ARCH := arm arm64 riscv64 x86 x86_64
 ART_HOST_SUPPORTED_ARCH := x86 x86_64
 ART_DEXPREOPT_BOOT_JAR_DIR := apex/com.android.art/javalib
 CONSCRYPT_DEXPREOPT_BOOT_JAR_DIR := apex/com.android.conscrypt/javalib
diff --git a/build/apex/art_apex_test.py b/build/apex/art_apex_test.py
index 6e58cf6..f55e352 100755
--- a/build/apex/art_apex_test.py
+++ b/build/apex/art_apex_test.py
@@ -44,7 +44,7 @@
 
 # Architectures supported by APEX packages.
 ARCHS_32 = ["arm", "x86"]
-ARCHS_64 = ["arm64", "x86_64"]
+ARCHS_64 = ["arm64", "riscv64", "x86_64"]
 
 # Multilib options
 MULTILIB_32 = '32'
diff --git a/build/art.go b/build/art.go
index cc53719..dd2106e 100644
--- a/build/art.go
+++ b/build/art.go
@@ -29,7 +29,7 @@
 	"android/soong/cc/config"
 )
 
-var supportedArches = []string{"arm", "arm64", "x86", "x86_64"}
+var supportedArches = []string{"arm", "arm64", "riscv64", "x86", "x86_64"}
 
 func globalFlags(ctx android.LoadHookContext) ([]string, []string) {
 	var cflags []string
diff --git a/build/codegen.go b/build/codegen.go
index 66569be..3cc51a8 100644
--- a/build/codegen.go
+++ b/build/codegen.go
@@ -218,6 +218,8 @@
 		arches[s] = true
 		if s == "arm64" {
 			arches["arm"] = true
+		} else if s == "riscv64" {
+			arches["riscv64"] = true
 		} else if s == "x86_64" {
 			arches["x86"] = true
 		}
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 7c538a8..c1bc32a 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -25,6 +25,10 @@
 #include "code_generator_arm64.h"
 #endif
 
+#ifdef ART_ENABLE_CODEGEN_riscv64
+#include "code_generator_riscv64.h"
+#endif
+
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "code_generator_x86.h"
 #endif
diff --git a/compiler/optimizing/code_generator_riscv64.h b/compiler/optimizing/code_generator_riscv64.h
new file mode 100644
index 0000000..405b39a
--- /dev/null
+++ b/compiler/optimizing/code_generator_riscv64.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
+
+#include "code_generator.h"
+#include "driver/compiler_options.h"
+
+#endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 88551d8..7af9d0f 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -36,6 +36,10 @@
 #include "code_generator_arm64.h"
 #endif
 
+#ifdef ART_ENABLE_CODEGEN_riscv64
+#include "code_generator_riscv64.h"
+#endif
+
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "code_generator_x86.h"
 #endif
@@ -329,6 +333,10 @@
 }
 #endif
 
+#ifdef ART_ENABLE_CODEGEN_riscv64
+inline CodeGenerator* create_codegen_riscv64(HGraph*, const CompilerOptions&) { return nullptr; }
+#endif
+
 #ifdef ART_ENABLE_CODEGEN_x86
 inline CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) {
   return new (graph->GetAllocator()) TestCodeGeneratorX86(graph, compiler_options);
diff --git a/libartbase/arch/instruction_set.cc b/libartbase/arch/instruction_set.cc
index 811e723..e0de4e8 100644
--- a/libartbase/arch/instruction_set.cc
+++ b/libartbase/arch/instruction_set.cc
@@ -123,6 +123,8 @@
 
 static_assert(IsAligned<kPageSize>(kArmStackOverflowReservedBytes), "ARM gap not page aligned");
 static_assert(IsAligned<kPageSize>(kArm64StackOverflowReservedBytes), "ARM64 gap not page aligned");
+static_assert(IsAligned<kPageSize>(kRiscv64StackOverflowReservedBytes),
+              "RISCV64 gap not page aligned");
 static_assert(IsAligned<kPageSize>(kX86StackOverflowReservedBytes), "X86 gap not page aligned");
 static_assert(IsAligned<kPageSize>(kX86_64StackOverflowReservedBytes),
               "X86_64 gap not page aligned");
@@ -135,6 +137,8 @@
 static_assert(ART_FRAME_SIZE_LIMIT < kArmStackOverflowReservedBytes, "Frame size limit too large");
 static_assert(ART_FRAME_SIZE_LIMIT < kArm64StackOverflowReservedBytes,
               "Frame size limit too large");
+static_assert(ART_FRAME_SIZE_LIMIT < kRiscv64StackOverflowReservedBytes,
+              "Frame size limit too large");
 static_assert(ART_FRAME_SIZE_LIMIT < kX86StackOverflowReservedBytes,
               "Frame size limit too large");
 static_assert(ART_FRAME_SIZE_LIMIT < kX86_64StackOverflowReservedBytes,
diff --git a/libartbase/arch/instruction_set.h b/libartbase/arch/instruction_set.h
index 8d59f1b..0e9ebf0 100644
--- a/libartbase/arch/instruction_set.h
+++ b/libartbase/arch/instruction_set.h
@@ -339,7 +339,7 @@
   return ((hi64 << 32) | lo32);
 }
 
-#elif defined(__x86_64__) || defined(__aarch64__)
+#elif defined(__x86_64__) || defined(__aarch64__) || defined(__riscv)
 
 // Note: TwoWordReturn can't be constexpr for 64-bit targets. We'd need a constexpr constructor,
 //       which would violate C-linkage in the entrypoint functions.
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 42120a3..3d74f8a 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -29,7 +29,8 @@
 
 namespace art {
 
-#if defined(__LP64__) && !defined(__Fuchsia__) && (defined(__aarch64__) || defined(__APPLE__))
+#if defined(__LP64__) && !defined(__Fuchsia__) && \
+    (defined(__aarch64__) || defined(__riscv) || defined(__APPLE__))
 #define USE_ART_LOW_4G_ALLOCATOR 1
 #else
 #if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 93b1daf..47fc3ca 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -339,6 +339,7 @@
         arm: {
             srcs: [
                 "interpreter/mterp/nterp.cc",
+                "interpreter/mterp/nterp_impl.cc",
                 ":libart_mterp.armng",
                 "arch/arm/context_arm.cc",
                 "arch/arm/entrypoints_init_arm.cc",
@@ -354,6 +355,7 @@
         arm64: {
             srcs: [
                 "interpreter/mterp/nterp.cc",
+                "interpreter/mterp/nterp_impl.cc",
                 ":libart_mterp.arm64ng",
                 "arch/arm64/context_arm64.cc",
                 "arch/arm64/entrypoints_init_arm64.cc",
@@ -368,12 +370,20 @@
         riscv64: {
             srcs: [
                 "arch/riscv64/context_riscv64.cc",
+                "arch/riscv64/entrypoints_init_riscv64.cc",
+                "arch/riscv64/fault_handler_riscv64.cc",
+                "arch/riscv64/jni_entrypoints_riscv64.S",
                 "arch/riscv64/quick_entrypoints_riscv64.S",
+                "arch/riscv64/thread_riscv64.cc",
+                "interpreter/mterp/nterp.cc",
+                "interpreter/mterp/nterp_stub.cc",
+                "monitor_pool.cc",
             ],
         },
         x86: {
             srcs: [
                 "interpreter/mterp/nterp.cc",
+                "interpreter/mterp/nterp_impl.cc",
                 ":libart_mterp.x86ng",
                 "arch/x86/context_x86.cc",
                 "arch/x86/entrypoints_init_x86.cc",
@@ -395,6 +405,7 @@
                 // Note that the fault_handler_x86.cc is not a mistake.  This file is
                 // shared between the x86 and x86_64 architectures.
                 "interpreter/mterp/nterp.cc",
+                "interpreter/mterp/nterp_impl.cc",
                 ":libart_mterp.x86_64ng",
                 "arch/x86_64/context_x86_64.cc",
                 "arch/x86_64/entrypoints_init_x86_64.cc",
diff --git a/runtime/arch/riscv64/asm_support_riscv64.S b/runtime/arch/riscv64/asm_support_riscv64.S
index 139c085..ad91cb1 100644
--- a/runtime/arch/riscv64/asm_support_riscv64.S
+++ b/runtime/arch/riscv64/asm_support_riscv64.S
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_ARCH_RISCV64_ASM_SUPPORT_RISCV64_S_
 
 #include "asm_support_riscv64.h"
+#include "interpreter/cfi_asm_support.h"
 
 // Define special registers.
 
diff --git a/runtime/arch/riscv64/entrypoints_init_riscv64.cc b/runtime/arch/riscv64/entrypoints_init_riscv64.cc
new file mode 100644
index 0000000..811624c
--- /dev/null
+++ b/runtime/arch/riscv64/entrypoints_init_riscv64.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/quick/quick_default_init_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+
+namespace art {
+
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* /*qpoints*/, bool /*is_active*/) {
+  // TODO(riscv64): add read barrier entrypoints
+}
+
+void InitEntryPoints(JniEntryPoints* jpoints,
+                     QuickEntryPoints* qpoints,
+                     bool monitor_jni_entry_exit) {
+  DefaultInitEntryPoints(jpoints, qpoints, monitor_jni_entry_exit);
+  // TODO(riscv64): add other entrypoints
+}
+
+}  // namespace art
diff --git a/runtime/arch/riscv64/fault_handler_riscv64.cc b/runtime/arch/riscv64/fault_handler_riscv64.cc
new file mode 100644
index 0000000..a251966
--- /dev/null
+++ b/runtime/arch/riscv64/fault_handler_riscv64.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fault_handler.h"
+
+extern "C" void art_quick_throw_stack_overflow();
+extern "C" void art_quick_throw_null_pointer_exception_from_signal();
+extern "C" void art_quick_implicit_suspend();
+
+// RISCV64 specific fault handler functions (or stubs if unimplemented yet).
+
+namespace art {
+
+uintptr_t FaultManager::GetFaultPc(siginfo_t*, void*) {
+  LOG(FATAL) << "FaultManager::GetFaultPc is not implemented for RISC-V";
+  return 0;
+}
+
+uintptr_t FaultManager::GetFaultSp(void*) {
+  LOG(FATAL) << "FaultManager::GetFaultSp is not implemented for RISC-V";
+  return 0;
+}
+
+bool NullPointerHandler::Action(int, siginfo_t*, void*) {
+  LOG(FATAL) << "NullPointerHandler::Action is not implemented for RISC-V";
+  return false;
+}
+
+bool SuspensionHandler::Action(int, siginfo_t*, void*) {
+  LOG(FATAL) << "SuspensionHandler::Action is not implemented for RISC-V";
+  return false;
+}
+
+bool StackOverflowHandler::Action(int, siginfo_t*, void*) {
+  LOG(FATAL) << "StackOverflowHandler::Action is not implemented for RISC-V";
+  return false;
+}
+
+}  // namespace art
diff --git a/runtime/arch/riscv64/jni_entrypoints_riscv64.S b/runtime/arch/riscv64/jni_entrypoints_riscv64.S
new file mode 100644
index 0000000..cf1175c
--- /dev/null
+++ b/runtime/arch/riscv64/jni_entrypoints_riscv64.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_riscv64.S"
+
+UNDEFINED art_jni_dlsym_lookup_stub
+UNDEFINED art_jni_dlsym_lookup_critical_stub
+UNDEFINED art_jni_method_start
+UNDEFINED art_jni_method_end
+UNDEFINED art_jni_read_barrier
+UNDEFINED art_jni_method_entry_hook
+UNDEFINED art_jni_lock_object_no_inline
+UNDEFINED art_jni_lock_object
+UNDEFINED art_jni_unlock_object_no_inline
+UNDEFINED art_jni_unlock_object
diff --git a/runtime/arch/riscv64/quick_entrypoints_riscv64.S b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
index 87025a9..d338531 100644
--- a/runtime/arch/riscv64/quick_entrypoints_riscv64.S
+++ b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
@@ -16,4 +16,220 @@
 
 #include "asm_support_riscv64.S"
 
+UNDEFINED ExecuteSwitchImplAsm
+
+UNDEFINED art_quick_invoke_stub
+UNDEFINED art_quick_invoke_static_stub
+UNDEFINED art_quick_imt_conflict_trampoline
+UNDEFINED art_quick_to_interpreter_bridge
+UNDEFINED art_invoke_obsolete_method_stub
+UNDEFINED art_quick_generic_jni_trampoline
+UNDEFINED art_quick_method_exit_hook
+UNDEFINED art_quick_proxy_invoke_handler
+UNDEFINED art_quick_resolution_trampoline
+UNDEFINED art_quick_deoptimize_from_compiled_code
+UNDEFINED art_quick_string_builder_append
+UNDEFINED art_quick_compile_optimized
+UNDEFINED art_quick_method_entry_hook
+UNDEFINED art_quick_check_instance_of
 UNDEFINED art_quick_do_long_jump
+UNDEFINED art_quick_osr_stub
+
+UNDEFINED art_quick_alloc_array_resolved_dlmalloc
+UNDEFINED art_quick_alloc_array_resolved_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_array_resolved8_dlmalloc
+UNDEFINED art_quick_alloc_array_resolved8_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_array_resolved16_dlmalloc
+UNDEFINED art_quick_alloc_array_resolved16_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_array_resolved32_dlmalloc
+UNDEFINED art_quick_alloc_array_resolved32_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_array_resolved64_dlmalloc
+UNDEFINED art_quick_alloc_array_resolved64_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_object_resolved_dlmalloc
+UNDEFINED art_quick_alloc_object_resolved_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_object_initialized_dlmalloc
+UNDEFINED art_quick_alloc_object_initialized_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_object_with_checks_dlmalloc
+UNDEFINED art_quick_alloc_object_with_checks_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_string_object_dlmalloc
+UNDEFINED art_quick_alloc_string_object_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_string_from_bytes_dlmalloc
+UNDEFINED art_quick_alloc_string_from_bytes_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_string_from_chars_dlmalloc
+UNDEFINED art_quick_alloc_string_from_chars_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_string_from_string_dlmalloc
+UNDEFINED art_quick_alloc_string_from_string_dlmalloc_instrumented
+UNDEFINED art_quick_alloc_array_resolved_rosalloc
+UNDEFINED art_quick_alloc_array_resolved_rosalloc_instrumented
+UNDEFINED art_quick_alloc_array_resolved8_rosalloc
+UNDEFINED art_quick_alloc_array_resolved8_rosalloc_instrumented
+UNDEFINED art_quick_alloc_array_resolved16_rosalloc
+UNDEFINED art_quick_alloc_array_resolved16_rosalloc_instrumented
+UNDEFINED art_quick_alloc_array_resolved32_rosalloc
+UNDEFINED art_quick_alloc_array_resolved32_rosalloc_instrumented
+UNDEFINED art_quick_alloc_array_resolved64_rosalloc
+UNDEFINED art_quick_alloc_array_resolved64_rosalloc_instrumented
+UNDEFINED art_quick_alloc_object_resolved_rosalloc
+UNDEFINED art_quick_alloc_object_resolved_rosalloc_instrumented
+UNDEFINED art_quick_alloc_object_initialized_rosalloc
+UNDEFINED art_quick_alloc_object_initialized_rosalloc_instrumented
+UNDEFINED art_quick_alloc_object_with_checks_rosalloc
+UNDEFINED art_quick_alloc_object_with_checks_rosalloc_instrumented
+UNDEFINED art_quick_alloc_string_object_rosalloc
+UNDEFINED art_quick_alloc_string_object_rosalloc_instrumented
+UNDEFINED art_quick_alloc_string_from_bytes_rosalloc
+UNDEFINED art_quick_alloc_string_from_bytes_rosalloc_instrumented
+UNDEFINED art_quick_alloc_string_from_chars_rosalloc
+UNDEFINED art_quick_alloc_string_from_chars_rosalloc_instrumented
+UNDEFINED art_quick_alloc_string_from_string_rosalloc
+UNDEFINED art_quick_alloc_string_from_string_rosalloc_instrumented
+UNDEFINED art_quick_alloc_array_resolved_bump_pointer
+UNDEFINED art_quick_alloc_array_resolved_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_array_resolved8_bump_pointer
+UNDEFINED art_quick_alloc_array_resolved8_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_array_resolved16_bump_pointer
+UNDEFINED art_quick_alloc_array_resolved16_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_array_resolved32_bump_pointer
+UNDEFINED art_quick_alloc_array_resolved32_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_array_resolved64_bump_pointer
+UNDEFINED art_quick_alloc_array_resolved64_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_object_resolved_bump_pointer
+UNDEFINED art_quick_alloc_object_resolved_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_object_initialized_bump_pointer
+UNDEFINED art_quick_alloc_object_initialized_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_object_with_checks_bump_pointer
+UNDEFINED art_quick_alloc_object_with_checks_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_string_object_bump_pointer
+UNDEFINED art_quick_alloc_string_object_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_string_from_bytes_bump_pointer
+UNDEFINED art_quick_alloc_string_from_bytes_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_string_from_chars_bump_pointer
+UNDEFINED art_quick_alloc_string_from_chars_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_string_from_string_bump_pointer
+UNDEFINED art_quick_alloc_string_from_string_bump_pointer_instrumented
+UNDEFINED art_quick_alloc_array_resolved_tlab
+UNDEFINED art_quick_alloc_array_resolved_tlab_instrumented
+UNDEFINED art_quick_alloc_array_resolved8_tlab
+UNDEFINED art_quick_alloc_array_resolved8_tlab_instrumented
+UNDEFINED art_quick_alloc_array_resolved16_tlab
+UNDEFINED art_quick_alloc_array_resolved16_tlab_instrumented
+UNDEFINED art_quick_alloc_array_resolved32_tlab
+UNDEFINED art_quick_alloc_array_resolved32_tlab_instrumented
+UNDEFINED art_quick_alloc_array_resolved64_tlab
+UNDEFINED art_quick_alloc_array_resolved64_tlab_instrumented
+UNDEFINED art_quick_alloc_object_resolved_tlab
+UNDEFINED art_quick_alloc_object_resolved_tlab_instrumented
+UNDEFINED art_quick_alloc_object_initialized_tlab
+UNDEFINED art_quick_alloc_object_initialized_tlab_instrumented
+UNDEFINED art_quick_alloc_object_with_checks_tlab
+UNDEFINED art_quick_alloc_object_with_checks_tlab_instrumented
+UNDEFINED art_quick_alloc_string_object_tlab
+UNDEFINED art_quick_alloc_string_object_tlab_instrumented
+UNDEFINED art_quick_alloc_string_from_bytes_tlab
+UNDEFINED art_quick_alloc_string_from_bytes_tlab_instrumented
+UNDEFINED art_quick_alloc_string_from_chars_tlab
+UNDEFINED art_quick_alloc_string_from_chars_tlab_instrumented
+UNDEFINED art_quick_alloc_string_from_string_tlab
+UNDEFINED art_quick_alloc_string_from_string_tlab_instrumented
+UNDEFINED art_quick_alloc_array_resolved_region
+UNDEFINED art_quick_alloc_array_resolved_region_instrumented
+UNDEFINED art_quick_alloc_array_resolved8_region
+UNDEFINED art_quick_alloc_array_resolved8_region_instrumented
+UNDEFINED art_quick_alloc_array_resolved16_region
+UNDEFINED art_quick_alloc_array_resolved16_region_instrumented
+UNDEFINED art_quick_alloc_array_resolved32_region
+UNDEFINED art_quick_alloc_array_resolved32_region_instrumented
+UNDEFINED art_quick_alloc_array_resolved64_region
+UNDEFINED art_quick_alloc_array_resolved64_region_instrumented
+UNDEFINED art_quick_alloc_object_resolved_region
+UNDEFINED art_quick_alloc_object_resolved_region_instrumented
+UNDEFINED art_quick_alloc_object_initialized_region
+UNDEFINED art_quick_alloc_object_initialized_region_instrumented
+UNDEFINED art_quick_alloc_object_with_checks_region
+UNDEFINED art_quick_alloc_object_with_checks_region_instrumented
+UNDEFINED art_quick_alloc_string_object_region
+UNDEFINED art_quick_alloc_string_object_region_instrumented
+UNDEFINED art_quick_alloc_string_from_bytes_region
+UNDEFINED art_quick_alloc_string_from_bytes_region_instrumented
+UNDEFINED art_quick_alloc_string_from_chars_region
+UNDEFINED art_quick_alloc_string_from_chars_region_instrumented
+UNDEFINED art_quick_alloc_string_from_string_region
+UNDEFINED art_quick_alloc_string_from_string_region_instrumented
+UNDEFINED art_quick_alloc_array_resolved_region_tlab
+UNDEFINED art_quick_alloc_array_resolved_region_tlab_instrumented
+UNDEFINED art_quick_alloc_array_resolved8_region_tlab
+UNDEFINED art_quick_alloc_array_resolved8_region_tlab_instrumented
+UNDEFINED art_quick_alloc_array_resolved16_region_tlab
+UNDEFINED art_quick_alloc_array_resolved16_region_tlab_instrumented
+UNDEFINED art_quick_alloc_array_resolved32_region_tlab
+UNDEFINED art_quick_alloc_array_resolved32_region_tlab_instrumented
+UNDEFINED art_quick_alloc_array_resolved64_region_tlab
+UNDEFINED art_quick_alloc_array_resolved64_region_tlab_instrumented
+UNDEFINED art_quick_alloc_object_resolved_region_tlab
+UNDEFINED art_quick_alloc_object_resolved_region_tlab_instrumented
+UNDEFINED art_quick_alloc_object_initialized_region_tlab
+UNDEFINED art_quick_alloc_object_initialized_region_tlab_instrumented
+UNDEFINED art_quick_alloc_object_with_checks_region_tlab
+UNDEFINED art_quick_alloc_object_with_checks_region_tlab_instrumented
+UNDEFINED art_quick_alloc_string_object_region_tlab
+UNDEFINED art_quick_alloc_string_object_region_tlab_instrumented
+UNDEFINED art_quick_alloc_string_from_bytes_region_tlab
+UNDEFINED art_quick_alloc_string_from_bytes_region_tlab_instrumented
+UNDEFINED art_quick_alloc_string_from_chars_region_tlab
+UNDEFINED art_quick_alloc_string_from_chars_region_tlab_instrumented
+UNDEFINED art_quick_alloc_string_from_string_region_tlab
+UNDEFINED art_quick_alloc_string_from_string_region_tlab_instrumented
+
+UNDEFINED art_quick_initialize_static_storage
+UNDEFINED art_quick_resolve_type_and_verify_access
+UNDEFINED art_quick_resolve_type
+UNDEFINED art_quick_resolve_method_handle
+UNDEFINED art_quick_resolve_method_type
+UNDEFINED art_quick_resolve_string
+UNDEFINED art_quick_set8_instance
+UNDEFINED art_quick_set8_static
+UNDEFINED art_quick_set16_instance
+UNDEFINED art_quick_set16_static
+UNDEFINED art_quick_set32_instance
+UNDEFINED art_quick_set32_static
+UNDEFINED art_quick_set64_instance
+UNDEFINED art_quick_set64_static
+UNDEFINED art_quick_set_obj_instance
+UNDEFINED art_quick_set_obj_static
+UNDEFINED art_quick_get_byte_instance
+UNDEFINED art_quick_get_boolean_instance
+UNDEFINED art_quick_get_short_instance
+UNDEFINED art_quick_get_char_instance
+UNDEFINED art_quick_get32_instance
+UNDEFINED art_quick_get64_instance
+UNDEFINED art_quick_get_obj_instance
+UNDEFINED art_quick_get_byte_static
+UNDEFINED art_quick_get_boolean_static
+UNDEFINED art_quick_get_short_static
+UNDEFINED art_quick_get_char_static
+UNDEFINED art_quick_get32_static
+UNDEFINED art_quick_get64_static
+UNDEFINED art_quick_get_obj_static
+UNDEFINED art_quick_aput_obj
+UNDEFINED art_quick_lock_object_no_inline
+UNDEFINED art_quick_lock_object
+UNDEFINED art_quick_unlock_object_no_inline
+UNDEFINED art_quick_unlock_object
+UNDEFINED art_quick_invoke_direct_trampoline_with_access_check
+UNDEFINED art_quick_invoke_interface_trampoline_with_access_check
+UNDEFINED art_quick_invoke_static_trampoline_with_access_check
+UNDEFINED art_quick_invoke_super_trampoline_with_access_check
+UNDEFINED art_quick_invoke_virtual_trampoline_with_access_check
+UNDEFINED art_quick_invoke_polymorphic
+UNDEFINED art_quick_invoke_custom
+UNDEFINED art_quick_test_suspend
+UNDEFINED art_quick_deliver_exception
+UNDEFINED art_quick_throw_array_bounds
+UNDEFINED art_quick_throw_div_zero
+UNDEFINED art_quick_throw_null_pointer_exception
+UNDEFINED art_quick_throw_stack_overflow
+UNDEFINED art_quick_throw_string_bounds
+UNDEFINED art_quick_update_inline_cache
+UNDEFINED art_jni_monitored_method_start
+UNDEFINED art_jni_monitored_method_end
+UNDEFINED art_quick_indexof
diff --git a/runtime/arch/riscv64/thread_riscv64.cc b/runtime/arch/riscv64/thread_riscv64.cc
new file mode 100644
index 0000000..cb2d2ad
--- /dev/null
+++ b/runtime/arch/riscv64/thread_riscv64.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_riscv64.h"
+#include "base/enums.h"
+#include "thread.h"
+
+namespace art {
+
+void Thread::InitCpu() {
+  CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k64>().Int32Value());
+  CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k64>().Int32Value());
+  CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k64>().Int32Value());
+  CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<PointerSize::k64>().Int32Value());
+}
+
+void Thread::CleanupCpu() {
+  // Do nothing.
+}
+
+}  // namespace art
diff --git a/runtime/base/atomic_pair.h b/runtime/base/atomic_pair.h
index 3e9e820..1523b3b 100644
--- a/runtime/base/atomic_pair.h
+++ b/runtime/base/atomic_pair.h
@@ -40,18 +40,16 @@
 template <typename IntType>
 ALWAYS_INLINE static inline AtomicPair<IntType> AtomicPairLoadAcquire(
     std::atomic<AtomicPair<IntType>>* target) {
-  static_assert(std::atomic<AtomicPair<IntType>>::is_always_lock_free);
   return target->load(std::memory_order_acquire);
 }
 
 template <typename IntType>
-ALWAYS_INLINE static inline void AtomicPairStoreRelease(
-    std::atomic<AtomicPair<IntType>>* target, AtomicPair<IntType> value) {
-  static_assert(std::atomic<AtomicPair<IntType>>::is_always_lock_free);
+ALWAYS_INLINE static inline void AtomicPairStoreRelease(std::atomic<AtomicPair<IntType>>* target,
+                                                        AtomicPair<IntType> value) {
   target->store(value, std::memory_order_release);
 }
 
-// llvm does not implement 16-byte atomic operations on x86-64.
+// LLVM uses generic lock-based implementation for x86_64, we can do better with CMPXCHG16B.
 #if defined(__x86_64__)
 ALWAYS_INLINE static inline AtomicPair<uint64_t> AtomicPairLoadAcquire(
     std::atomic<AtomicPair<uint64_t>>* target) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index f0c5953..164f069 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -141,6 +141,51 @@
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
+#elif defined(__riscv)
+  // The callee save frame is pointed to by SP.
+  // | argN            |  |
+  // | ...             |  |
+  // | reg. arg spills |  |  Caller's frame
+  // | Method*         | ---
+  // | RA              |
+  // | S11/X27         |  callee-saved 11
+  // | S10/X26         |  callee-saved 10
+  // | S9/X25          |  callee-saved 9
+  // | S9/X24          |  callee-saved 8
+  // | S7/X23          |  callee-saved 7
+  // | S6/X22          |  callee-saved 6
+  // | S5/X21          |  callee-saved 5
+  // | S4/X20          |  callee-saved 4
+  // | S3/X19          |  callee-saved 3
+  // | S2/X18          |  callee-saved 2
+  // | A7/X17          |  arg 7
+  // | A6/X16          |  arg 6
+  // | A5/X15          |  arg 5
+  // | A4/X14          |  arg 4
+  // | A3/X13          |  arg 3
+  // | A2/X12          |  arg 2
+  // | A1/X11          |  arg 1 (A0 is the method => skipped)
+  // | S0/X8/FP        |  callee-saved 0 (S1 is TR => skipped)
+  // | FA7             |  float arg 8
+  // | FA6             |  float arg 7
+  // | FA5             |  float arg 6
+  // | FA4             |  float arg 5
+  // | FA3             |  float arg 4
+  // | FA2             |  float arg 3
+  // | FA1             |  float arg 2
+  // | FA0             |  float arg 1
+  // | A0/Method*      | <- sp
+  static constexpr bool kSplitPairAcrossRegisterAndStack = false;
+  static constexpr bool kAlignPairRegister = false;
+  static constexpr bool kQuickSoftFloatAbi = false;
+  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
+  static constexpr bool kQuickSkipOddFpRegisters = false;
+  static constexpr size_t kNumQuickGprArgs = 7;
+  static constexpr size_t kNumQuickFprArgs = 8;
+  static constexpr bool kGprFprLockstep = false;
+  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
+    return (gpr_index + 1) * GetBytesPerGprSpillLocation(kRuntimeISA);  // skip S0/X8/FP
+  }
 #elif defined(__i386__)
   // The callee save frame is pointed to by SP.
   // | argN        |  |
@@ -1337,6 +1382,18 @@
   static constexpr bool kMultiGPRegistersWidened = false;
   static constexpr bool kAlignLongOnStack = false;
   static constexpr bool kAlignDoubleOnStack = false;
+#elif defined(__riscv)
+  static constexpr bool kNativeSoftFloatAbi = false;
+  static constexpr size_t kNumNativeGprArgs = 8;
+  static constexpr size_t kNumNativeFprArgs = 8;
+
+  static constexpr size_t kRegistersNeededForLong = 1;
+  static constexpr size_t kRegistersNeededForDouble = 1;
+  static constexpr bool kMultiRegistersAligned = false;
+  static constexpr bool kMultiFPRegistersWidened = false;
+  static constexpr bool kMultiGPRegistersWidened = true;
+  static constexpr bool kAlignLongOnStack = false;
+  static constexpr bool kAlignDoubleOnStack = false;
 #elif defined(__i386__)
   static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
   static constexpr size_t kNumNativeGprArgs = 0;  // 0 arguments passed in GPRs.
diff --git a/runtime/interpreter/mterp/nterp.cc b/runtime/interpreter/mterp/nterp.cc
index a4e64e3..81e80ed 100644
--- a/runtime/interpreter/mterp/nterp.cc
+++ b/runtime/interpreter/mterp/nterp.cc
@@ -33,62 +33,6 @@
 namespace art {
 namespace interpreter {
 
-bool IsNterpSupported() {
-  return !kPoisonHeapReferences && kReserveMarkingRegister;
-}
-
-bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
-  Runtime* runtime = Runtime::Current();
-  instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
-  // If the runtime is interpreter only, we currently don't use nterp as some
-  // parts of the runtime (like instrumentation) make assumption on an
-  // interpreter-only runtime to always be in a switch-like interpreter.
-  return IsNterpSupported() &&
-      !runtime->IsJavaDebuggable() &&
-      !instr->EntryExitStubsInstalled() &&
-      !instr->InterpretOnly() &&
-      !runtime->IsAotCompiler() &&
-      !instr->NeedsSlowInterpreterForListeners() &&
-      // An async exception has been thrown. We need to go to the switch interpreter. nterp doesn't
-      // know how to deal with these so we could end up never dealing with it if we are in an
-      // infinite loop.
-      !runtime->AreAsyncExceptionsThrown() &&
-      (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
-}
-
-// The entrypoint for nterp, which ArtMethods can directly point to.
-extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
-
-// Another entrypoint, which does a clinit check at entry.
-extern "C" void ExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_);
-
-const void* GetNterpEntryPoint() {
-  return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
-}
-
-const void* GetNterpWithClinitEntryPoint() {
-  return reinterpret_cast<const void*>(interpreter::ExecuteNterpWithClinitImpl);
-}
-
-/*
- * Verify some constants used by the nterp interpreter.
- */
-void CheckNterpAsmConstants() {
-  /*
-   * If we're using computed goto instruction transitions, make sure
-   * none of the handlers overflows the byte limit.  This won't tell
-   * which one did, but if any one is too big the total size will
-   * overflow.
-   */
-  const int width = kNterpHandlerSize;
-  ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
-                          reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
-  if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
-      LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
-                 << "(did an instruction handler exceed " << width << " bytes?)";
-  }
-}
-
 inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
   // The hotness we will add to a method when we perform a
   // field/method/class/string lookup.
diff --git a/runtime/interpreter/mterp/nterp_impl.cc b/runtime/interpreter/mterp/nterp_impl.cc
new file mode 100644
index 0000000..98b8d6d
--- /dev/null
+++ b/runtime/interpreter/mterp/nterp_impl.cc
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter/interpreter_common.h"
+#include "nterp.h"
+
+/*
+ * Definitions for targets that support nterp.
+ */
+
+namespace art {
+
+namespace interpreter {
+
+bool IsNterpSupported() { return !kPoisonHeapReferences && kReserveMarkingRegister; }
+
+bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
+  Runtime* runtime = Runtime::Current();
+  instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
+  // If the runtime is interpreter only, we currently don't use nterp as some
+  // parts of the runtime (like instrumentation) make assumption on an
+  // interpreter-only runtime to always be in a switch-like interpreter.
+  return IsNterpSupported() && !runtime->IsJavaDebuggable() && !instr->EntryExitStubsInstalled() &&
+         !instr->InterpretOnly() && !runtime->IsAotCompiler() &&
+         !instr->NeedsSlowInterpreterForListeners() &&
+         // An async exception has been thrown. We need to go to the switch interpreter. nterp
+         // doesn't know how to deal with these so we could end up never dealing with it if we are
+         // in an infinite loop.
+         !runtime->AreAsyncExceptionsThrown() &&
+         (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
+}
+
+// The entrypoint for nterp, which ArtMethods can directly point to.
+extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
+
+const void* GetNterpEntryPoint() {
+  return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
+}
+
+// Another entrypoint, which does a clinit check at entry.
+extern "C" void ExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_);
+
+const void* GetNterpWithClinitEntryPoint() {
+  return reinterpret_cast<const void*>(interpreter::ExecuteNterpWithClinitImpl);
+}
+
+/*
+ * Verify some constants used by the nterp interpreter.
+ */
+void CheckNterpAsmConstants() {
+  /*
+   * If we're using computed goto instruction transitions, make sure
+   * none of the handlers overflows the byte limit.  This won't tell
+   * which one did, but if any one is too big the total size will
+   * overflow.
+   */
+  const int width = kNterpHandlerSize;
+  ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
+                          reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
+  if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
+    LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
+               << "(did an instruction handler exceed " << width << " bytes?)";
+  }
+}
+
+}  // namespace interpreter
+}  // namespace art
diff --git a/runtime/interpreter/mterp/nterp_stub.cc b/runtime/interpreter/mterp/nterp_stub.cc
index 95d11c2..35a5e77 100644
--- a/runtime/interpreter/mterp/nterp_stub.cc
+++ b/runtime/interpreter/mterp/nterp_stub.cc
@@ -27,17 +27,13 @@
 
 namespace interpreter {
 
-bool IsNterpSupported() {
-  return false;
-}
+bool IsNterpSupported() { return false; }
 
-bool CanRuntimeUseNterp() {
-  return false;
-}
+bool CanRuntimeUseNterp() { return false; }
 
-const void* GetNterpEntryPoint() {
-  return nullptr;
-}
+const void* GetNterpEntryPoint() { return nullptr; }
+
+const void* GetNterpWithClinitEntryPoint() { return nullptr; }
 
 void CheckNterpAsmConstants() {
 }
@@ -46,6 +42,10 @@
   UNIMPLEMENTED(FATAL);
 }
 
+extern "C" void ExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_) {
+  UNIMPLEMENTED(FATAL);
+}
+
 extern "C" void* artNterpAsmInstructionStart[] = { nullptr };
 extern "C" void* artNterpAsmInstructionEnd[] = { nullptr };
 
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 8b5703e..259b3dd 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -107,7 +107,8 @@
   LockWord lw(static_cast<uint32_t>(result));
   uint32_t rb_state = lw.ReadBarrierState();
   return rb_state;
-#elif defined(__i386__) || defined(__x86_64__)
+#elif defined(__i386__) || defined(__x86_64__) || defined(__riscv)
+  // TODO(riscv64): add arch-specific implementation
   LockWord lw = GetLockWord(false);
   // i386/x86_64 don't need fake address dependency. Use a compiler fence to avoid compiler
   // reordering.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 02cba22..040d819 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -2720,6 +2720,7 @@
       break;
     case InstructionSet::kArm:
     case InstructionSet::kArm64:
+    case InstructionSet::kRiscv64:
     case InstructionSet::kX86:
     case InstructionSet::kX86_64:
       break;
diff --git a/test/Android.bp b/test/Android.bp
index c5322cb..651541c 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -94,6 +94,9 @@
         android_arm64: {
             relative_install_path: "art/arm64",
         },
+        android_riscv64: {
+            relative_install_path: "art/riscv64",
+        },
         android_x86: {
             relative_install_path: "art/x86",
         },
@@ -125,6 +128,9 @@
         android_arm64: {
             relative_install_path: "com.android.art/lib64",
         },
+        android_riscv64: {
+            relative_install_path: "com.android.art/lib64",
+        },
         android_x86: {
             relative_install_path: "com.android.art/lib",
         },
diff --git a/tools/signal_dumper/signal_dumper.cc b/tools/signal_dumper/signal_dumper.cc
index e88ac19..bedb8dc 100644
--- a/tools/signal_dumper/signal_dumper.cc
+++ b/tools/signal_dumper/signal_dumper.cc
@@ -368,11 +368,13 @@
 }
 
 void DumpABI(pid_t forked_pid) {
-  enum class ABI { kArm, kArm64, kX86, kX86_64 };
+  enum class ABI { kArm, kArm64, kRiscv64, kX86, kX86_64 };
 #if defined(__arm__)
   constexpr ABI kDumperABI = ABI::kArm;
 #elif defined(__aarch64__)
   constexpr ABI kDumperABI = ABI::kArm64;
+#elif defined(__riscv)
+  constexpr ABI kDumperABI = ABI::kRiscv64;
 #elif defined(__i386__)
   constexpr ABI kDumperABI = ABI::kX86;
 #elif defined(__x86_64__)
@@ -394,6 +396,9 @@
       case ABI::kArm64:
         to_print = ABI::kArm64;
         break;
+      case ABI::kRiscv64:
+        to_print = ABI::kRiscv64;
+        break;
       case ABI::kX86:
       case ABI::kX86_64:
         to_print = ABI::kX86_64;
@@ -408,6 +413,9 @@
       case ABI::kArm64:
         to_print = io_vec.iov_len == 18 * sizeof(uint32_t) ? ABI::kArm : ABI::kArm64;
         break;
+      case ABI::kRiscv64:
+        to_print = ABI::kRiscv64;
+        break;
       case ABI::kX86:
       case ABI::kX86_64:
         to_print = io_vec.iov_len == 17 * sizeof(uint32_t) ? ABI::kX86 : ABI::kX86_64;
@@ -424,6 +432,9 @@
     case ABI::kArm64:
       abi_str = "arm64";
       break;
+    case ABI::kRiscv64:
+      abi_str = "riscv64";
+      break;
     case ABI::kX86:
       abi_str = "x86";
       break;