riscv64: integer opcodes
Test: Run these opcodes against all interpreter
tests on a Linux RISC-V VM.
(1) setup
lunch aosp_riscv64-userdebug
export ART_TEST_SSH_USER=ubuntu
export ART_TEST_SSH_HOST=localhost
export ART_TEST_SSH_PORT=10001
export ART_TEST_ON_VM=true
. art/tools/buildbot-utils.sh
art/tools/buildbot-build.sh --target
# Create, boot and configure the VM.
art/tools/buildbot-vm.sh create
art/tools/buildbot-vm.sh boot
art/tools/buildbot-vm.sh setup-ssh # password: 'ubuntu'
art/tools/buildbot-cleanup-device.sh
art/tools/buildbot-setup-device.sh
art/tools/buildbot-sync.sh
(2) test
art/test.py --target -r --no-prebuild --ndebug --64 -j 12 --cdex-none --interpreter
Also exercised on cuttlefish boot. No SIGSEGV or SIGILL noted.
Bug: 283082047
Change-Id: Ib0f6a980d6bf19475a2d97e98b4bab98e98cca6e
diff --git a/runtime/arch/riscv64/quick_entrypoints_riscv64.S b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
index 6170c19..ecaf84c 100644
--- a/runtime/arch/riscv64/quick_entrypoints_riscv64.S
+++ b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
@@ -552,19 +552,36 @@
END art_quick_proxy_invoke_handler
+.macro NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name
+.extern \cxx_name
+ENTRY \c_name
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context.
+ mv a0, xSELF // pass Thread::Current.
+ call \cxx_name // \cxx_name(arg, Thread*).
+ ebreak
+END \c_name
+.endm
+
+
.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context.
mv a1, xSELF // pass Thread::Current.
- call \cxx_name // \cxx_name(arg, Thread*).
+ call \cxx_name // \cxx_name(arg, Thread*).
ebreak
END \c_name
.endm
+
+// Called by managed code to deliver an ArithmeticException.
+NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode
+
+
// Called by managed code to deliver an exception.
ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
+
// Called to attempt to execute an obsolete method.
ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod
@@ -936,7 +953,6 @@
UNDEFINED art_quick_invoke_polymorphic
UNDEFINED art_quick_invoke_custom
UNDEFINED art_quick_throw_array_bounds
-UNDEFINED art_quick_throw_div_zero
UNDEFINED art_quick_throw_null_pointer_exception
UNDEFINED art_quick_throw_stack_overflow
UNDEFINED art_quick_throw_string_bounds
diff --git a/runtime/interpreter/mterp/riscv64/arithmetic.S b/runtime/interpreter/mterp/riscv64/arithmetic.S
index 30cb903..a75a964 100644
--- a/runtime/interpreter/mterp/riscv64/arithmetic.S
+++ b/runtime/interpreter/mterp/riscv64/arithmetic.S
@@ -1,248 +1,621 @@
-%def binop(preinstr="", result="r0", chkzero="0", instr=""):
- unimp
+//
+// unop vA, vB
+// Format 12x: B|A|op
+// (see floating_point.S for float/double ops)
+//
-%def binop2addr(preinstr="", result="r0", chkzero="0", instr=""):
- unimp
-
-%def binopLit16(result="r0", chkzero="0", instr=""):
- unimp
-
-%def binopLit8(extract="unimp", result="r0", chkzero="0", instr=""):
- unimp
-
-%def binopWide(preinstr="", result0="r0", result1="r1", chkzero="0", instr=""):
- unimp
-
-%def binopWide2addr(preinstr="", result0="r0", result1="r1", chkzero="0", instr=""):
- unimp
-
-%def unop(preinstr="", instr=""):
- unimp
-
-%def unopNarrower(preinstr="", instr=""):
- unimp
-
-%def unopWide(preinstr="", instr=""):
- unimp
-
-%def unopWider(preinstr="", instr=""):
- unimp
-
-%def op_add_int():
- unimp
-
-%def op_add_int_2addr():
- unimp
-
-%def op_add_int_lit16():
- unimp
-
-%def op_add_int_lit8():
- unimp
-
-%def op_add_long():
- unimp
-
-%def op_add_long_2addr():
- unimp
-
-%def op_and_int():
- unimp
-
-%def op_and_int_2addr():
- unimp
-
-%def op_and_int_lit16():
- unimp
-
-%def op_and_int_lit8():
- unimp
-
-%def op_and_long():
- unimp
-
-%def op_and_long_2addr():
- unimp
-
-%def op_cmp_long():
- unimp
-
-%def op_div_int():
- unimp
-
-%def op_div_int_2addr():
- unimp
-
-%def op_div_int_lit16():
- unimp
-
-%def op_div_int_lit8():
- unimp
-
-%def op_div_long():
- unimp
-
-%def op_div_long_2addr():
- unimp
-
-%def op_int_to_byte():
- unimp
-
-%def op_int_to_char():
- unimp
-
-%def op_int_to_long():
- unimp
-
-%def op_int_to_short():
- unimp
-
-%def op_long_to_int():
- unimp
-
-%def op_mul_int():
- unimp
-
-%def op_mul_int_2addr():
- unimp
-
-%def op_mul_int_lit16():
- unimp
-
-%def op_mul_int_lit8():
- unimp
-
-%def op_mul_long():
- unimp
-
-%def op_mul_long_2addr():
- unimp
-
+// neg-int vA, vB
+// Format 12x: B|A|7b
%def op_neg_int():
- unimp
+% generic_unop(instr="negw t1, t1")
-%def op_neg_long():
- unimp
-
+// not-int vA, vB
+// Format 12x: B|A|7c
%def op_not_int():
- unimp
+% generic_unop(instr="not t1, t1")
+// neg-long vA, vB
+// Format 12x: B|A|7d
+%def op_neg_long():
+% generic_unop(instr="neg t1, t1", is_wide="1")
+
+// not-long vA, vB
+// Format 12x: B|A|7e
%def op_not_long():
- unimp
+% generic_unop(instr="not t1, t1", is_wide="1")
-%def op_or_int():
- unimp
+// int-to-long vA, vB
+// Format 12x: B|A|81
+// Note: Sign extension of int32 into int64.
+// Read from 32-bit vreg and write to 64-bit vreg, hence a custom impl.
+%def op_int_to_long():
+ srliw t1, xINST, 12 // t1 := B
+ srliw t2, xINST, 8 // t2 := B|A
+ GET_VREG t1, t1 // t1 := fp[B] with sign extension to 64 bits
+ FETCH_ADVANCE_INST 1 // advance xPC, load xINST
+ and t2, t2, 0xF // t2 := A
+ GET_INST_OPCODE t3 // t3 holds next opcode
+ SET_VREG_WIDE t1, t2 // fp[A:A+1] := t1
+ GOTO_OPCODE t3 // continue to next
-%def op_or_int_2addr():
- unimp
+// long-to-int vA, vB
+// Format 12x: B|A|84
+// Note: Truncation of int64 into int32.
+// Implemented as a read from the low bits from vB, write them to vA.
+// Note: instr is intentionally empty.
+%def op_long_to_int():
+% generic_unop()
-%def op_or_int_lit16():
- unimp
+// int-to-byte vA, vB
+// Format 12x: B|A|8d
+// Note: Truncation of int32 to int8, sign extending the result.
+%def op_int_to_byte():
+% generic_unop(instr="sext.b t1, t1")
-%def op_or_int_lit8():
- unimp
+// int-to-byte vA, vB
+// Format 12x: B|A|8e
+// Note: Truncation of int32 to uint16, without sign extension.
+%def op_int_to_char():
+% generic_unop(instr="zext.h t1, t1")
-%def op_or_long():
- unimp
+// int-to-byte vA, vB
+// Format 12x: B|A|8f
+// Note: Truncation of int32 to int16, sign extending the result.
+%def op_int_to_short():
+% generic_unop(instr="sext.h t1, t1")
-%def op_or_long_2addr():
- unimp
+// unop boilerplate
+// instr: operand held in t1, result written to t1.
+// instr must not clobber t2.
+// Clobbers: t0, t1, t2
+%def generic_unop(instr="", is_wide="0"):
+ srliw t1, xINST, 12 // t1 := B
+ srliw t2, xINST, 8 // t2 := B|A
+ GET_VREG t1, t1, is_wide=$is_wide
+ // t1 := fp[B]
+ and t2, t2, 0xF // t2 := A
+ FETCH_ADVANCE_INST 1 // advance xPC, load xINST
+ $instr // read t1, write result to t1.
+ // do not clobber t2!
+ SET_VREG t1, t2, is_wide=$is_wide
+ // fp[A] := t1
+ GET_INST_OPCODE t0 // t0 holds next opcode
+ GOTO_OPCODE t0 // continue to next
-%def op_rem_int():
- unimp
+//
+// binop vAA, vBB, vCC
+// Format 23x: AA|op CC|BB
+// (see floating_point.S for float/double ops)
+//
-%def op_rem_int_2addr():
- unimp
+// add-int vAA, vBB, vCC
+// Format 23x: AA|90 CC|BB
+%def op_add_int():
+% generic_binop(instr="addw t1, t1, t2")
-%def op_rem_int_lit16():
- unimp
-
-%def op_rem_int_lit8():
- unimp
-
-%def op_rem_long():
- unimp
-
-%def op_rem_long_2addr():
- unimp
-
-%def op_rsub_int():
- unimp
-
-%def op_rsub_int_lit8():
- unimp
-
-%def op_shl_int():
- unimp
-
-%def op_shl_int_2addr():
- unimp
-
-%def op_shl_int_lit8():
- unimp
-
-%def op_shl_long():
- unimp
-
-%def op_shl_long_2addr():
- unimp
-
-%def op_shr_int():
- unimp
-
-%def op_shr_int_2addr():
- unimp
-
-%def op_shr_int_lit8():
- unimp
-
-%def op_shr_long():
- unimp
-
-%def op_shr_long_2addr():
- unimp
-
+// sub-int vAA, vBB, vCC
+// Format 23x: AA|91 CC|BB
%def op_sub_int():
- unimp
+% generic_binop(instr="subw t1, t1, t2")
-%def op_sub_int_2addr():
- unimp
+// mul-int vAA, vBB, vCC
+// Format 23x: AA|92 CC|BB
+%def op_mul_int():
+% generic_binop(instr="mulw t1, t1, t2")
-%def op_sub_long():
- unimp
+// div-int vAA, vBB, vCC
+// Format 23x: AA|93 CC|BB
+// Note: Twos-complement division, rounded towards zero (that is, truncated to integer). This throws
+// ArithmeticException if b == 0.
+%def op_div_int():
+% generic_binop(instr="divw t1, t1, t2", divz_throw="1")
-%def op_sub_long_2addr():
- unimp
+// rem-int vAA, vBB, vCC
+// Format 23x: AA|94 CC|BB
+// Note: Twos-complement remainder after division. The sign of the result is the same as that of a,
+// and it is more precisely defined as result == a - (a / b) * b. This throws ArithmeticException if
+// b == 0.
+%def op_rem_int():
+% generic_binop(instr="remw t1, t1, t2", divz_throw="1")
-%def op_ushr_int():
- unimp
+// and-int vAA, vBB, vCC
+// Format 23x: AA|95 CC|BB
+%def op_and_int():
+% generic_binop(instr="and t1, t1, t2")
-%def op_ushr_int_2addr():
- unimp
+// or-int vAA, vBB, vCC
+// Format 23x: AA|96 CC|BB
+%def op_or_int():
+% generic_binop(instr="or t1, t1, t2")
-%def op_ushr_int_lit8():
- unimp
-
-%def op_ushr_long():
- unimp
-
-%def op_ushr_long_2addr():
- unimp
-
+// xor-int vAA, vBB, vCC
+// Format 23x: AA|97 CC|BB
%def op_xor_int():
- unimp
+% generic_binop(instr="xor t1, t1, t2")
-%def op_xor_int_2addr():
- unimp
+// shl-int vAA, vBB, vCC
+// Format 23x: AA|98 CC|BB
+// Note: SLLW uses t2[4:0] for the shift amount.
+%def op_shl_int():
+% generic_binop(instr="sllw t1, t1, t2")
-%def op_xor_int_lit16():
- unimp
+// shr-int vAA, vBB, vCC
+// Format 23x: AA|99 CC|BB
+// Note: SRAW uses t2[4:0] for the shift amount.
+%def op_shr_int():
+% generic_binop(instr="sraw t1, t1, t2")
-%def op_xor_int_lit8():
- unimp
+// ushr-int vAA, vBB, vCC
+// Format 23x: AA|9a CC|BB
+// Note: SRLW uses t2[4:0] for the shift amount.
+%def op_ushr_int():
+% generic_binop(instr="srlw t1, t1, t2")
+// add-long vAA, vBB, vCC
+// Format 23x: AA|9b CC|BB
+%def op_add_long():
+% generic_binop(instr="add t1, t1, t2", is_wide="1")
+
+// sub-long vAA, vBB, vCC
+// Format 23x: AA|9c CC|BB
+%def op_sub_long():
+% generic_binop(instr="sub t1, t1, t2", is_wide="1")
+
+// mul-long vAA, vBB, vCC
+// Format 23x: AA|9d CC|BB
+%def op_mul_long():
+% generic_binop(instr="mul t1, t1, t2", is_wide="1")
+
+// div-long vAA, vBB, vCC
+// Format 23x: AA|9e CC|BB
+// Note: Twos-complement division, rounded towards zero (that is, truncated to integer). This throws
+// ArithmeticException if b == 0.
+%def op_div_long():
+% generic_binop(instr="div t1, t1, t2", divz_throw="1", is_wide="1")
+
+// rem-long vAA, vBB, vCC
+// Format 23x: AA|9f CC|BB
+// Note: Twos-complement remainder after division. The sign of the result is the same as that of a,
+// and it is more precisely defined as result == a - (a / b) * b. This throws ArithmeticException if
+// b == 0.
+%def op_rem_long():
+% generic_binop(instr="rem t1, t1, t2", divz_throw="1", is_wide="1")
+
+// and-long vAA, vBB, vCC
+// Format 23x: AA|a0 CC|BB
+%def op_and_long():
+% generic_binop(instr="and t1, t1, t2", is_wide="1")
+
+// or-long vAA, vBB, vCC
+// Format 23x: AA|a1 CC|BB
+%def op_or_long():
+% generic_binop(instr="or t1, t1, t2", is_wide="1")
+
+// xor-long vAA, vBB, vCC
+// Format 23x: AA|a2 CC|BB
%def op_xor_long():
- unimp
+% generic_binop(instr="xor t1, t1, t2", is_wide="1")
+// shl-long vAA, vBB, vCC
+// Format 23x: AA|a3 CC|BB
+// Note: SLL uses t2[5:0] for the shift amount.
+%def op_shl_long():
+% generic_shift_wide(instr="sll t1, t1, t2")
+
+// shr-long vAA, vBB, vCC
+// Format 23x: AA|a4 CC|BB
+// Note: SRA uses t2[5:0] for the shift amount.
+%def op_shr_long():
+% generic_shift_wide(instr="sra t1, t1, t2")
+
+// ushr-long vAA, vBB, vCC
+// Format 23x: AA|a5 CC|BB
+// Note: SRL uses t2[5:0] for the shift amount.
+%def op_ushr_long():
+% generic_shift_wide(instr="srl t1, t1, t2")
+
+// binop boilerplate
+// instr: operands held in t1 and t2, result written to t1.
+// instr must not throw. Exceptions to be thrown prior to instr.
+// instr must not clobber t3.
+//
+// The divz_throw flag generates check-and-throw code for div/0.
+// The is_wide flag ensures vregs are read and written in 64-bit widths.
+// Clobbers: t0, t1, t2, t3
+%def generic_binop(instr="", divz_throw="0", is_wide="0"):
+ FETCH t1, count=1 // t1 := CC|BB
+ srliw t3, xINST, 8 // t3 := AA
+ srliw t2, t1, 8 // t2 := CC
+ and t1, t1, 0xFF // t1 := BB
+ GET_VREG t2, t2, is_wide=$is_wide // t2 := fp[CC]
+ GET_VREG t1, t1, is_wide=$is_wide // t1 := fp[BB]
+ .if $divz_throw
+ beqz t2, 1f // Must throw before FETCH_ADVANCE_INST.
+ .endif
+ FETCH_ADVANCE_INST 2 // advance xPC, load xINST
+ $instr // read t1 and t2, write result to t1.
+ // do not clobber t3!
+ GET_INST_OPCODE t2 // t2 holds next opcode
+ SET_VREG t1, t3, is_wide=$is_wide // fp[AA] := t1
+ GOTO_OPCODE t2 // continue to next
+1:
+ .if $divz_throw
+ tail common_errDivideByZero
+ .endif
+
+// binop wide shift boilerplate
+// instr: operands held in t1 (64-bit) and t2 (32-bit), result written to t1.
+// instr must not clobber t3.
+// Clobbers: t0, t1, t2, t3
+//
+// Note: Contrary to other -long mathematical operations (which take register pairs for both their
+// first and their second source), shl-long, shr-long, and ushr-long take a register pair for their
+// first source (the value to be shifted), but a single register for their second source (the
+// shifting distance).
+%def generic_shift_wide(instr=""):
+ FETCH t1, count=1 // t1 := CC|BB
+ srliw t3, xINST, 8 // t3 := AA
+ srliw t2, t1, 8 // t2 := CC
+ and t1, t1, 0xFF // t1 := BB
+ GET_VREG t2, t2 // t2 := fp[CC]
+ GET_VREG_WIDE t1, t1 // t1 := fp[BB]
+ FETCH_ADVANCE_INST 2 // advance xPC, load xINST
+ $instr // read t1 and t2, write result to t1.
+ // do not clobber t3!
+ GET_INST_OPCODE t2 // t2 holds next opcode
+ SET_VREG_WIDE t1, t3 // fp[AA] := t1
+ GOTO_OPCODE t2 // continue to next
+
+//
+// binop/2addr vA, vB
+// Format 12x: B|A|op
+// (see floating_point.S for float/double ops)
+//
+
+// add-int/2addr vA, vB
+// Format 12x: B|A|b0
+%def op_add_int_2addr():
+% generic_binop_2addr(instr="addw t1, t1, t2")
+
+// sub-int/2addr vA, vB
+// Format 12x: B|A|b1
+%def op_sub_int_2addr():
+% generic_binop_2addr(instr="subw t1, t1, t2")
+
+// mul-int/2addr vA, vB
+// Format 12x: B|A|b2
+%def op_mul_int_2addr():
+% generic_binop_2addr(instr="mulw t1, t1, t2")
+
+// div-int/2addr vA, vB
+// Format 12x: B|A|b3
+// Note: Twos-complement division, rounded towards zero (that is, truncated to integer). This throws
+// ArithmeticException if b == 0.
+%def op_div_int_2addr():
+% generic_binop_2addr(instr="divw t1, t1, t2", divz_throw="1")
+
+// rem-int/2addr vA, vB
+// Format 12x: B|A|b4
+// Note: Twos-complement remainder after division. The sign of the result is the same as that of a,
+// and it is more precisely defined as result == a - (a / b) * b. This throws ArithmeticException if
+// b == 0.
+%def op_rem_int_2addr():
+% generic_binop_2addr(instr="remw t1, t1, t2", divz_throw="1")
+
+// and-int/2addr vA, vB
+// Format 12x: B|A|b5
+%def op_and_int_2addr():
+% generic_binop_2addr(instr="and t1, t1, t2")
+
+// or-int/2addr vA, vB
+// Format 12x: B|A|b6
+%def op_or_int_2addr():
+% generic_binop_2addr(instr="or t1, t1, t2")
+
+// xor-int/2addr vA, vB
+// Format 12x: B|A|b7
+%def op_xor_int_2addr():
+% generic_binop_2addr(instr="xor t1, t1, t2")
+
+// shl-int/2addr vA, vB
+// Format 12x: B|A|b8
+%def op_shl_int_2addr():
+% generic_binop_2addr(instr="sllw t1, t1, t2")
+
+// shr-int/2addr vA, vB
+// Format 12x: B|A|b9
+%def op_shr_int_2addr():
+% generic_binop_2addr(instr="sraw t1, t1, t2")
+
+// ushr-int/2addr vA, vB
+// Format 12x: B|A|ba
+%def op_ushr_int_2addr():
+% generic_binop_2addr(instr="srlw t1, t1, t2")
+
+// add-long/2addr vA, vB
+// Format 12x: B|A|bb
+%def op_add_long_2addr():
+% generic_binop_2addr(instr="add t1, t1, t2", is_wide="1")
+
+// sub-long/2addr vA, vB
+// Format 12x: B|A|bc
+%def op_sub_long_2addr():
+% generic_binop_2addr(instr="sub t1, t1, t2", is_wide="1")
+
+// mul-long/2addr vA, vB
+// Format 12x: B|A|bd
+%def op_mul_long_2addr():
+% generic_binop_2addr(instr="mul t1, t1, t2", is_wide="1")
+
+// div-long/2addr vA, vB
+// Format 12x: B|A|be
+%def op_div_long_2addr():
+% generic_binop_2addr(instr="div t1, t1, t2", divz_throw="1", is_wide="1")
+
+// rem-long/2addr vA, vB
+// Format 12x: B|A|bf
+%def op_rem_long_2addr():
+% generic_binop_2addr(instr="rem t1, t1, t2", divz_throw="1", is_wide="1")
+
+// and-long/2addr vA, vB
+// Format 12x: B|A|c0
+%def op_and_long_2addr():
+% generic_binop_2addr(instr="and t1, t1, t2", is_wide="1")
+
+// or-long/2addr vA, vB
+// Format 12x: B|A|c1
+%def op_or_long_2addr():
+% generic_binop_2addr(instr="or t1, t1, t2", is_wide="1")
+
+// xor-long/2addr vA, vB
+// Format 12x: B|A|c2
%def op_xor_long_2addr():
- unimp
+% generic_binop_2addr(instr="xor t1, t1, t2", is_wide="1")
+
+// shl-long/2addr vA, vB
+// Format 12x: B|A|c3
+// Note: SLL uses t2[5:0] for the shift amount.
+%def op_shl_long_2addr():
+% generic_shift_wide_2addr(instr="sll t1, t1, t2")
+
+// shr-long/2addr vA, vB
+// Format 12x: B|A|c4
+// Note: SRA uses t2[5:0] for the shift amount.
+%def op_shr_long_2addr():
+% generic_shift_wide_2addr(instr="sra t1, t1, t2")
+
+// ushr-long/2addr vA, vB
+// Format 12x: B|A|c5
+// Note: SRL uses t2[5:0] for the shift amount.
+%def op_ushr_long_2addr():
+% generic_shift_wide_2addr(instr="srl t1, t1, t2")
+
+// binop 2addr boilerplate
+// instr: operands held in t1 and t2, result written to t1.
+// instr must not throw. Exceptions to be thrown prior to instr.
+// instr must not clobber t3.
+//
+// The divz_throw flag generates check-and-throw code for div/0.
+// The is_wide flag ensures vregs are read and written in 64-bit widths.
+// Clobbers: t0, t1, t2, t3, t4
+%def generic_binop_2addr(instr="", divz_throw="0", is_wide="0"):
+ srliw t2, xINST, 12 // t2 := B
+ srliw t3, xINST, 8 // t3 := B|A
+ GET_VREG t2, t2, is_wide=$is_wide
+ // t2 := fp[B]
+ and t3, t3, 0xF // t3 := A (cached for SET_VREG)
+ mv t4, t3 // t4 := A
+ GET_VREG t1, t4, is_wide=$is_wide
+ // t1 := fp[A]
+ .if $divz_throw
+ beqz t2, 1f // Must throw before FETCH_ADVANCE_INST.
+ .endif
+ FETCH_ADVANCE_INST 1 // advance xPC, load xINST
+ $instr // read t1 and t2, write result to t1.
+ // do not clobber t3!
+ GET_INST_OPCODE t2 // t2 holds next opcode
+ SET_VREG t1, t3, is_wide=$is_wide
+ // fp[A] := t1
+ GOTO_OPCODE t2 // continue to next
+1:
+ .if $divz_throw
+ tail common_errDivideByZero
+ .endif
+
+// binop wide shift 2addr boilerplate
+// instr: operands held in t1 (64-bit) and t2 (32-bit), result written to t1.
+// instr must not clobber t3.
+// Clobbers: t0, t1, t2, t3, t4
+//
+// Note: Contrary to other -long/2addr mathematical operations (which take register pairs for both
+// their destination/first source and their second source), shl-long/2addr, shr-long/2addr, and
+// ushr-long/2addr take a register pair for their destination/first source (the value to be
+// shifted), but a single register for their second source (the shifting distance).
+%def generic_shift_wide_2addr(instr=""):
+ srliw t2, xINST, 12 // t2 := B
+ srliw t3, xINST, 8 // t3 := B|A
+ GET_VREG t2, t2 // t2 := fp[B]
+ and t3, t3, 0xF // t3 := A (cached for SET_VREG_WIDE)
+ mv t4, t3 // t4 := A
+ FETCH_ADVANCE_INST 1 // advance xPC, load xINST
+ GET_VREG_WIDE t1, t4 // t1 := fp[A]
+ $instr // read t1 and t2, write result to t1.
+ // do not clobber t3!
+ GET_INST_OPCODE t2 // t2 holds next opcode
+ SET_VREG_WIDE t1, t3 // fp[A] := t1
+ GOTO_OPCODE t2 // continue to next
+
+//
+// binop/lit16 vA, vB, #+CCCC
+// Format 22s: B|A|op CCCC
+//
+
+// add-int/lit16 vA, vB, #+CCCC
+// Format 22s: B|A|d0 CCCC
+%def op_add_int_lit16():
+% generic_binop_lit16(instr="addw t1, t1, t2")
+
+// rsub-int vA, vB, #+CCCC
+// Format 22s: B|A|d1 CCCC
+// Note: rsub-int does not have a suffix since this version is the main opcode of its family.
+// Note: Twos-complement reverse subtraction.
+%def op_rsub_int():
+% generic_binop_lit16(instr="subw t1, t2, t1")
+
+// mul-int/lit16 vA, vB, #+CCCC
+// Format 22s: B|A|d2 CCCC
+%def op_mul_int_lit16():
+% generic_binop_lit16(instr="mulw t1, t1, t2")
+
+// div-int/lit16 vA, vB, #+CCCC
+// Format 22s: B|A|d3 CCCC
+// Note: Twos-complement division, rounded towards zero (that is, truncated to integer). This throws
+// ArithmeticException if b == 0.
+%def op_div_int_lit16():
+% generic_binop_lit16(instr="divw t1, t1, t2", divz_throw="1")
+
+// rem-int/lit16 vA, vB, #+CCCC
+// Format 22s: B|A|d4 CCCC
+// Note: Twos-complement remainder after division. The sign of the result is the same as that of a,
+// and it is more precisely defined as result == a - (a / b) * b. This throws ArithmeticException if
+// b == 0.
+%def op_rem_int_lit16():
+% generic_binop_lit16(instr="remw t1, t1, t2", divz_throw="1")
+
+// and-int/lit16 vA, vB, #+CCCC
+// Format 22s: B|A|d5 CCCC
+%def op_and_int_lit16():
+% generic_binop_lit16(instr="and t1, t1, t2")
+
+// or-int/lit16 vA, vB, #+CCCC
+// Format 22s: B|A|d6 CCCC
+%def op_or_int_lit16():
+% generic_binop_lit16(instr="or t1, t1, t2")
+
+// xor-int/lit16 vA, vB, #+CCCC
+// Format 22s: B|A|d7 CCCC
+%def op_xor_int_lit16():
+% generic_binop_lit16(instr="xor t1, t1, t2")
+
+// binop lit16 boilerplate
+// instr: operands held in t1 and t2, result written to t1.
+// instr must not throw. Exceptions to be thrown prior to instr.
+// instr must not clobber t3.
+//
+// The divz_throw flag generates check-and-throw code for div/0.
+// Clobbers: t0, t1, t2, t3
+%def generic_binop_lit16(instr="", divz_throw="0"):
+ FETCH t2, count=1, signed=1 // t2 := ssssCCCC
+ srliw t1, xINST, 12 // t1 := B
+ srliw t3, xINST, 8 // t3 := B|A
+ .if $divz_throw
+ beqz t2, 1f // Must throw before FETCH_ADVANCE_INST.
+ .endif
+ GET_VREG t1, t1 // t1 := fp[B]
+ and t3, t3, 0xF // t3 := A
+ FETCH_ADVANCE_INST 2 // advance xPC, load xINST
+ $instr // read t1 and t2, write result to t1.
+ // do not clobber t3!
+ GET_INST_OPCODE t2 // t2 holds next opcode
+ SET_VREG t1, t3 // fp[A] := t1
+ GOTO_OPCODE t2 // continue to next
+1:
+ .if $divz_throw
+ tail common_errDivideByZero
+ .endif
+
+//
+// binop/lit8 vAA, vBB, #+CC
+// Format 22b: AA|op CC|BB
+//
+
+// add-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|d8, CC|BB
+%def op_add_int_lit8():
+% generic_binop_lit8(instr="addw t1, t1, t2")
+
+// rsub-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|d9, CC|BB
+// Note: Twos-complement reverse subtraction.
+%def op_rsub_int_lit8():
+% generic_binop_lit8(instr="subw t1, t2, t1")
+
+// mul-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|da, CC|BB
+%def op_mul_int_lit8():
+% generic_binop_lit8(instr="mulw t1, t1, t2")
+
+// div-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|db, CC|BB
+// Note: Twos-complement division, rounded towards zero (that is, truncated to integer). This throws
+// ArithmeticException if b == 0.
+%def op_div_int_lit8():
+% generic_binop_lit8(instr="divw t1, t1, t2", divz_throw="1")
+
+// rem-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|dc, CC|BB
+// Note: Twos-complement remainder after division. The sign of the result is the same as that of a,
+// and it is more precisely defined as result == a - (a / b) * b. This throws ArithmeticException if
+// b == 0.
+%def op_rem_int_lit8():
+% generic_binop_lit8(instr="remw t1, t1, t2", divz_throw="1")
+
+// and-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|dd, CC|BB
+%def op_and_int_lit8():
+% generic_binop_lit8(instr="and t1, t1, t2")
+
+// or-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|de, CC|BB
+%def op_or_int_lit8():
+% generic_binop_lit8(instr="or t1, t1, t2")
+
+// xor-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|df, CC|BB
+%def op_xor_int_lit8():
+% generic_binop_lit8(instr="xor t1, t1, t2")
+
+// shl-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|e0, CC|BB
+// Note: SLLW uses t2[4:0] for the shift amount.
+%def op_shl_int_lit8():
+% generic_binop_lit8(instr="sllw t1, t1, t2")
+
+// shr-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|e1, CC|BB
+// Note: SRAW uses t2[4:0] for the shift amount.
+%def op_shr_int_lit8():
+% generic_binop_lit8(instr="sraw t1, t1, t2")
+
+// ushr-int/lit8, vAA, vBB, #+CC
+// Format 22b: AA|e2, CC|BB
+// Note: SRLW uses t2[4:0] for the shift amount.
+%def op_ushr_int_lit8():
+% generic_binop_lit8(instr="srlw t1, t1, t2")
+
+// binop lit8 boilerplate
+// instr: operands held in t1 and t2, result written to t1.
+// instr must not throw. Exceptions to be thrown prior to instr.
+// instr must not clobber t3.
+//
+// The divz_throw flag generates check-and-throw code for div/0.
+// Clobbers: t0, t1, t2, t3
+%def generic_binop_lit8(instr="", divz_throw="0"):
+ FETCH t1, count=1, signed=1 // t1 := ssssCC|BB
+ srliw t3, xINST, 8 // t3 := AA
+ sraiw t2, t1, 8 // t2 := ssssssCC
+ andi t1, t1, 0xFF // t1 := BB
+ .if $divz_throw
+ beqz t2, 1f // Must throw before FETCH_ADVANCE_INST.
+ .endif
+ GET_VREG t1, t1 // t1 := fp[BB]
+ FETCH_ADVANCE_INST 2 // advance xPC, load xINST
+ $instr // read t1 and t2, write result to t1.
+ // do not clobber t3!
+ GET_INST_OPCODE t2 // t2 holds next opcode
+ SET_VREG t1, t3 // fp[AA] := t1
+ GOTO_OPCODE t2 // continue to next
+1:
+ .if $divz_throw
+ tail common_errDivideByZero
+ .endif
diff --git a/runtime/interpreter/mterp/riscv64/control_flow.S b/runtime/interpreter/mterp/riscv64/control_flow.S
index 9a2b547..05ad426 100644
--- a/runtime/interpreter/mterp/riscv64/control_flow.S
+++ b/runtime/interpreter/mterp/riscv64/control_flow.S
@@ -59,6 +59,9 @@
%def op_goto_32():
unimp
+%def op_cmp_long():
+ unimp
+
%def op_if_eq():
% bincmp(condition="eq")
diff --git a/runtime/interpreter/mterp/riscv64/floating_point.S b/runtime/interpreter/mterp/riscv64/floating_point.S
index cd6c82a..5bcfce9 100644
--- a/runtime/interpreter/mterp/riscv64/floating_point.S
+++ b/runtime/interpreter/mterp/riscv64/floating_point.S
@@ -1,3 +1,6 @@
+// Note: Floating point operations must follow IEEE 754 rules, using round-to-nearest and gradual
+// underflow, except where stated otherwise.
+
%def fbinop(instr=""):
unimp
diff --git a/runtime/interpreter/mterp/riscv64/main.S b/runtime/interpreter/mterp/riscv64/main.S
index 3ae381a..a908612 100644
--- a/runtime/interpreter/mterp/riscv64/main.S
+++ b/runtime/interpreter/mterp/riscv64/main.S
@@ -314,7 +314,7 @@
.endm
// Typed read, defaults to 32-bit read.
-// Clobbers: \reg
+// Clobbers: \reg, \vreg
// Safe if \reg == \vreg.
.macro GET_VREG reg, vreg, is_wide=0
.if \is_wide
@@ -436,6 +436,11 @@
.cfi_endproc
NAME_END nterp_helper
+common_errDivideByZero:
+ EXPORT_PC
+ // Control doesn't return, but stack walking needs the return address.
+ call art_quick_throw_div_zero
+
// EndExecuteNterpImpl includes the methods after .cfi_endproc, as we want the runtime to see them
// as part of the Nterp PCs. This label marks the end of PCs contained by the OatQuickMethodHeader
// created for the interpreter entry point.
diff --git a/runtime/nterp_helpers.cc b/runtime/nterp_helpers.cc
index 6aadff5..8e8e617 100644
--- a/runtime/nterp_helpers.cc
+++ b/runtime/nterp_helpers.cc
@@ -278,6 +278,78 @@
case Instruction::CONST_WIDE_32:
case Instruction::CONST_WIDE:
case Instruction::CONST_WIDE_HIGH16:
+ case Instruction::NEG_INT:
+ case Instruction::NOT_INT:
+ case Instruction::NEG_LONG:
+ case Instruction::NOT_LONG:
+ case Instruction::INT_TO_LONG:
+ case Instruction::LONG_TO_INT:
+ case Instruction::INT_TO_BYTE:
+ case Instruction::INT_TO_CHAR:
+ case Instruction::INT_TO_SHORT:
+ case Instruction::ADD_INT:
+ case Instruction::SUB_INT:
+ case Instruction::MUL_INT:
+ case Instruction::DIV_INT:
+ case Instruction::REM_INT:
+ case Instruction::AND_INT:
+ case Instruction::OR_INT:
+ case Instruction::XOR_INT:
+ case Instruction::SHL_INT:
+ case Instruction::SHR_INT:
+ case Instruction::USHR_INT:
+ case Instruction::ADD_LONG:
+ case Instruction::SUB_LONG:
+ case Instruction::MUL_LONG:
+ case Instruction::DIV_LONG:
+ case Instruction::REM_LONG:
+ case Instruction::AND_LONG:
+ case Instruction::OR_LONG:
+ case Instruction::XOR_LONG:
+ case Instruction::SHL_LONG:
+ case Instruction::SHR_LONG:
+ case Instruction::USHR_LONG:
+ case Instruction::ADD_INT_2ADDR:
+ case Instruction::SUB_INT_2ADDR:
+ case Instruction::MUL_INT_2ADDR:
+ case Instruction::DIV_INT_2ADDR:
+ case Instruction::REM_INT_2ADDR:
+ case Instruction::AND_INT_2ADDR:
+ case Instruction::OR_INT_2ADDR:
+ case Instruction::XOR_INT_2ADDR:
+ case Instruction::SHL_INT_2ADDR:
+ case Instruction::SHR_INT_2ADDR:
+ case Instruction::USHR_INT_2ADDR:
+ case Instruction::ADD_LONG_2ADDR:
+ case Instruction::SUB_LONG_2ADDR:
+ case Instruction::MUL_LONG_2ADDR:
+ case Instruction::DIV_LONG_2ADDR:
+ case Instruction::REM_LONG_2ADDR:
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::OR_LONG_2ADDR:
+ case Instruction::XOR_LONG_2ADDR:
+ case Instruction::SHL_LONG_2ADDR:
+ case Instruction::SHR_LONG_2ADDR:
+ case Instruction::USHR_LONG_2ADDR:
+ case Instruction::ADD_INT_LIT16:
+ case Instruction::RSUB_INT:
+ case Instruction::MUL_INT_LIT16:
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::REM_INT_LIT16:
+ case Instruction::AND_INT_LIT16:
+ case Instruction::OR_INT_LIT16:
+ case Instruction::XOR_INT_LIT16:
+ case Instruction::ADD_INT_LIT8:
+ case Instruction::RSUB_INT_LIT8:
+ case Instruction::MUL_INT_LIT8:
+ case Instruction::DIV_INT_LIT8:
+ case Instruction::REM_INT_LIT8:
+ case Instruction::AND_INT_LIT8:
+ case Instruction::OR_INT_LIT8:
+ case Instruction::XOR_INT_LIT8:
+ case Instruction::SHL_INT_LIT8:
+ case Instruction::SHR_INT_LIT8:
+ case Instruction::USHR_INT_LIT8:
continue;
default:
return false;