summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-07-12 12:43:22 +0000
committer Treehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com> 2023-07-27 08:28:19 +0000
commitbdd5ec781296f36cd974431285ea3db38d47b765 (patch)
tree5951ad60c3b9269a29581dd3fe8879e30ecd667f /compiler/optimizing
parent5945a7f87d9fc36b3f8cede0b857f7967d56f702 (diff)
riscv64: Enable Optimizing compiler for some methods.
Add a filter that allows empty methods (return-void), methods returning an argument and methods with empty infinite loops (testing SuspendCheck support). Test: run-gtests.sh Test: # Edit `run-test` to disable checker, then testrunner.py --target --64 --ndebug --optimizing --jit # Ignore pre-existing failures (7 for --optimizing, # 59 for --jit). Bug: 283082089 Change-Id: Ie71afd85f79f6394d33364e1adf3dc50f8e4ebfa
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/optimizing_compiler.cc62
1 files changed, 56 insertions, 6 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 74fe28f25b..62cae41665 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -435,11 +435,12 @@ bool OptimizingCompiler::CanCompileMethod([[maybe_unused]] uint32_t method_idx,
}
static bool IsInstructionSetSupported(InstructionSet instruction_set) {
- return instruction_set == InstructionSet::kArm
- || instruction_set == InstructionSet::kArm64
- || instruction_set == InstructionSet::kThumb2
- || instruction_set == InstructionSet::kX86
- || instruction_set == InstructionSet::kX86_64;
+ return instruction_set == InstructionSet::kArm ||
+ instruction_set == InstructionSet::kArm64 ||
+ instruction_set == InstructionSet::kThumb2 ||
+ instruction_set == InstructionSet::kRiscv64 ||
+ instruction_set == InstructionSet::kX86 ||
+ instruction_set == InstructionSet::kX86_64;
}
bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
@@ -728,6 +729,35 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
return compiled_method;
}
+// TODO(riscv64): Remove this check when codegen is complete.
+#ifdef ART_ENABLE_CODEGEN_riscv64
+static bool CanAssembleGraphForRiscv64(HGraph* graph) {
+ for (HBasicBlock* block : graph->GetPostOrder()) {
+ // Phis are implemented (and they have no code to emit), so check only non-Phi instructions.
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ switch (it.Current()->GetKind()) {
+ case HInstruction::kParallelMove:
+ // ParallelMove is supported but it is inserted by the register allocator
+ // and this check is done before register allocation.
+ LOG(FATAL) << "Unexpected ParallelMove before register allocation!";
+ UNREACHABLE();
+ case HInstruction::kExit:
+ case HInstruction::kGoto:
+ case HInstruction::kParameterValue:
+ case HInstruction::kReturn:
+ case HInstruction::kReturnVoid:
+ case HInstruction::kSuspendCheck:
+ break;
+ default:
+ // Unimplemented instruction.
+ return false;
+ }
+ }
+ }
+ return true;
+}
+#endif
+
CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
ArenaStack* arena_stack,
const DexCompilationUnit& dex_compilation_unit,
@@ -886,6 +916,15 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
WriteBarrierElimination(graph, compilation_stats_.get()).Run();
}
+ // TODO(riscv64): Remove this check when codegen is complete.
+#ifdef ART_ENABLE_CODEGEN_riscv64
+ if (instruction_set == InstructionSet::kRiscv64 && !CanAssembleGraphForRiscv64(graph)) {
+ MaybeRecordStat(compilation_stats_.get(),
+ MethodCompilationStat::kNotCompiledUnsupportedIsa);
+ return nullptr;
+ }
+#endif
+
RegisterAllocator::Strategy regalloc_strategy =
compiler_options.GetRegisterAllocationStrategy();
AllocateRegisters(graph,
@@ -981,6 +1020,15 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
WriteBarrierElimination(graph, compilation_stats_.get()).Run();
}
+ // TODO(riscv64): Remove this check when codegen is complete.
+#ifdef ART_ENABLE_CODEGEN_riscv64
+ if (instruction_set == InstructionSet::kRiscv64 && !CanAssembleGraphForRiscv64(graph)) {
+ MaybeRecordStat(compilation_stats_.get(),
+ MethodCompilationStat::kNotCompiledUnsupportedIsa);
+ return nullptr;
+ }
+#endif
+
AllocateRegisters(graph,
codegen.get(),
&pass_observer,
@@ -1090,7 +1138,9 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
if (kIsDebugBuild &&
compiler_options.CompileArtTest() &&
- IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
+ IsInstructionSetSupported(compiler_options.GetInstructionSet()) &&
+ // TODO(riscv64): Enable this check when codegen is complete.
+ compiler_options.GetInstructionSet() != InstructionSet::kRiscv64) {
// For testing purposes, we put a special marker on method names
// that should be compiled with this compiler (when the
// instruction set is supported). This makes sure we're not