summaryrefslogtreecommitdiff
path: root/compiler/optimizing/optimizing_compiler.cc
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-10-13 10:28:36 +0000
committer VladimĂ­r Marko <vmarko@google.com> 2023-10-17 11:56:00 +0000
commitd5c097bcda44e237ecabcdba9b3dca2348289138 (patch)
tree46be9f64c07e806c8c1f68e19ac09b0d41f8a635 /compiler/optimizing/optimizing_compiler.cc
parentf7bd87edf3b80ce3bbd6e571fd119c878cb79992 (diff)
riscv64: Implement `CriticalNativeAbiFixupRiscv64`.
And pass integral stack args sign-extended to 64 bits for direct @CriticalNative calls. Enable direct @CriticalNative call codegen unconditionally and also enable `HClinitCheck` codegen and extend the 178-app-image-native-method run-test to properly test these use cases. Test: # Edit `run-test` to disable checker, then testrunner.py --target --64 --ndebug --optimizing # Ignore 6 pre-existing failures (down from 7). Bug: 283082089 Change-Id: Ia514c62006c7079b04182cc39e413eb2deb089c1
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r--compiler/optimizing/optimizing_compiler.cc37
1 files changed, 26 insertions, 11 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 682c2418f8..0fea53c247 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -461,6 +461,18 @@ bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
arm_optimizations);
}
#endif
+#if defined(ART_ENABLE_CODEGEN_riscv64)
+ case InstructionSet::kRiscv64: {
+ OptimizationDef riscv64_optimizations[] = {
+ OptDef(OptimizationPass::kCriticalNativeAbiFixupRiscv64),
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ riscv64_optimizations);
+ }
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86: {
OptimizationDef x86_optimizations[] = {
@@ -519,6 +531,18 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
arm64_optimizations);
}
#endif
+#if defined(ART_ENABLE_CODEGEN_riscv64)
+ case InstructionSet::kRiscv64: {
+ OptimizationDef riscv64_optimizations[] = {
+ OptDef(OptimizationPass::kCriticalNativeAbiFixupRiscv64)
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ riscv64_optimizations);
+ }
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86: {
OptimizationDef x86_optimizations[] = {
@@ -759,6 +783,7 @@ static bool CanAssembleGraphForRiscv64(HGraph* graph) {
case HInstruction::kLongConstant:
case HInstruction::kNullConstant:
case HInstruction::kLoadClass:
+ case HInstruction::kClinitCheck:
case HInstruction::kLoadString:
case HInstruction::kLoadMethodHandle:
case HInstruction::kLoadMethodType:
@@ -800,22 +825,12 @@ static bool CanAssembleGraphForRiscv64(HGraph* graph) {
case HInstruction::kNot:
case HInstruction::kMin:
case HInstruction::kMax:
+ case HInstruction::kInvokeStaticOrDirect:
case HInstruction::kInvokeVirtual:
case HInstruction::kInvokeInterface:
case HInstruction::kCurrentMethod:
case HInstruction::kNullCheck:
break;
- case HInstruction::kInvokeStaticOrDirect:
- if (it.Current()->AsInvokeStaticOrDirect()->GetCodePtrLocation() ==
- CodePtrLocation::kCallCriticalNative &&
- it.Current()->AsInvokeStaticOrDirect()->GetNumberOfArguments() >= 8u) {
- // TODO(riscv64): If there are more than 8 FP args, some may be passed in GPRs
- // and this requires a `CriticalNativeAbiFixupRiscv64` pass similar to the one
- // we have for ARM. This is not yet implemented. For simplicity, we reject all
- // direct @CriticalNative calls with more than 8 args.
- return false;
- }
- break;
default:
// Unimplemented instruction.
return false;