summaryrefslogtreecommitdiff
path: root/compiler/optimizing/optimizing_compiler.cc
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-08-24 13:32:56 +0000
committer VladimĂ­r Marko <vmarko@google.com> 2023-09-06 09:00:07 +0000
commitd82f788fa6b4f02069fb2e61bd101716b3221507 (patch)
treea3042d0288a368d2daa1212b60109c0932cc844d /compiler/optimizing/optimizing_compiler.cc
parentaf81d552ec0f1d7967c27143046f91d79419b910 (diff)
riscv64: Enable Optimizing compiler for invokes.
Enable InvokeVirtual, InvokeInterface, InvokeStaticOrDirect and a few other instructions. Note that this also enables some implemented intrinsics that were previously excluded. Test: aosp_cf_riscv64_phone-userdebug boots. Test: run-gtests.sh # Ignore pre-existing timeout in `TestImageLayout`. Test: testrunner.py --target --64 --ndebug --optimizing --jit # Ignore pre-existing failures (51 for --optimizing, # down from 58; 65 for --jit, down from 69). Bug: 283082089 Change-Id: I02a18e4ccf244205e78d6376cdaa6e32cf8c55e5
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r--compiler/optimizing/optimizing_compiler.cc38
1 files changed, 20 insertions, 18 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 211c6417c2..911a429d0b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -775,28 +775,30 @@ static bool CanAssembleGraphForRiscv64(HGraph* graph) {
case HInstruction::kAbs:
case HInstruction::kBooleanNot:
case HInstruction::kMul:
+ case HInstruction::kNeg:
+ case HInstruction::kNot:
+ case HInstruction::kInvokeVirtual:
+ case HInstruction::kInvokeInterface:
+ case HInstruction::kCurrentMethod:
break;
- case HInstruction::kInvokeStaticOrDirect: {
- Intrinsics intrinsic = it.Current()->AsInvokeStaticOrDirect()->GetIntrinsic();
- if (intrinsic != Intrinsics::kDoubleDoubleToRawLongBits &&
- intrinsic != Intrinsics::kDoubleIsInfinite &&
- intrinsic != Intrinsics::kDoubleLongBitsToDouble &&
- intrinsic != Intrinsics::kFloatFloatToRawIntBits &&
- intrinsic != Intrinsics::kFloatIsInfinite &&
- intrinsic != Intrinsics::kFloatIntBitsToFloat &&
- intrinsic != Intrinsics::kMemoryPeekByte &&
- intrinsic != Intrinsics::kMemoryPeekIntNative &&
- intrinsic != Intrinsics::kMemoryPeekLongNative &&
- intrinsic != Intrinsics::kMemoryPeekShortNative &&
- intrinsic != Intrinsics::kMemoryPokeByte &&
- intrinsic != Intrinsics::kMemoryPokeIntNative &&
- intrinsic != Intrinsics::kMemoryPokeLongNative &&
- intrinsic != Intrinsics::kMemoryPokeShortNative) {
+ case HInstruction::kInvokeStaticOrDirect:
+ if (it.Current()->AsInvokeStaticOrDirect()->GetCodePtrLocation() ==
+ CodePtrLocation::kCallCriticalNative &&
+ it.Current()->AsInvokeStaticOrDirect()->GetNumberOfArguments() >= 8u) {
+ // TODO(riscv64): If there are more than 8 FP args, some may be passed in GPRs
+ // and this requires a `CriticalNativeAbiFixupRiscv64` pass similar to the one
+ // we have for ARM. This is not yet implemented. For simplicity, we reject all
+ // direct @CriticalNative calls with more than 8 args.
return false;
}
break;
- }
- case HInstruction::kCurrentMethod:
+ case HInstruction::kMin:
+ case HInstruction::kMax:
+ if (DataType::IsFloatingPointType(it.Current()->GetType())) {
+ // FIXME(riscv64): If one of the operands is NaN and the other is not, riscv64
+ // FMIN/FMAX yield the non-NaN operand but we want the result to be the NaN operand.
+ return false;
+ }
break;
default:
// Unimplemented instruction.