summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-10-23 12:45:59 +0000
committer VladimĂ­r Marko <vmarko@google.com> 2023-10-24 12:58:35 +0000
commit8e47d43161ffef47805927afeab4de4ead478b35 (patch)
treeb5c1ccf33ee7f4c935b2e8b51bdd96be4b6213c6
parentf248454761f054985689b40a431d06f0a958e348 (diff)
riscv64: Fix type conversions, enable codegen.
Enable codegen for all remaining instructions. Test: m # aosp_cf_riscv64_phone-userdebug Test: # Edit `run-test` to disable checker, then testrunner.py --target --64 --ndebug --optimizing --jit # Ignore pre-existing failures (optimzing: 3 -> 1, # jit: 18 -> 4). Bug: 283082089 Change-Id: I4dab3cdc49b9c979c97096d14db16007715bd05e
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc7
-rw-r--r--compiler/optimizing/optimizing_compiler.cc131
2 files changed, 8 insertions, 130 deletions
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index a5e9b98b0e..899dbca295 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -5176,6 +5176,13 @@ void InstructionCodeGeneratorRISCV64::VisitTypeConversion(HTypeConversion* instr
__ FCvtWD(dst, src, FPRoundingMode::kRTZ);
}
}
+ // For NaN inputs we need to return 0.
+ ScratchRegisterScope srs(GetAssembler());
+ XRegister tmp = srs.AllocateXRegister();
+ FClass(tmp, src, input_type);
+ __ Sltiu(tmp, tmp, kFClassNaNMinValue); // 0 for NaN, 1 otherwise.
+ __ Neg(tmp, tmp); // 0 for NaN, -1 otherwise.
+ __ And(dst, dst, tmp); // Cleared for NaN.
} else if (DataType::IsFloatingPointType(result_type) &&
DataType::IsFloatingPointType(input_type)) {
FRegister dst = locations->Out().AsFpuRegister<FRegister>();
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f3f0a13548..bf0896ff8c 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -753,115 +753,6 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
return compiled_method;
}
-// TODO(riscv64): Remove this check when codegen is complete.
-#ifdef ART_ENABLE_CODEGEN_riscv64
-static bool CanAssembleGraphForRiscv64(HGraph* graph) {
- for (HBasicBlock* block : graph->GetPostOrder()) {
- // Phis are implemented (and they have no code to emit), so check only non-Phi instructions.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- switch (it.Current()->GetKind()) {
- case HInstruction::kParallelMove:
- // ParallelMove is supported but it is inserted by the register allocator
- // and this check is done before register allocation.
- LOG(FATAL) << "Unexpected ParallelMove before register allocation!";
- UNREACHABLE();
- case HInstruction::kMethodEntryHook:
- case HInstruction::kMethodExitHook:
- case HInstruction::kExit:
- case HInstruction::kGoto:
- case HInstruction::kPackedSwitch:
- case HInstruction::kSelect:
- case HInstruction::kThrow:
- case HInstruction::kNop:
- case HInstruction::kTryBoundary:
- case HInstruction::kClearException:
- case HInstruction::kLoadException:
- case HInstruction::kParameterValue:
- case HInstruction::kReturn:
- case HInstruction::kReturnVoid:
- case HInstruction::kSuspendCheck:
- case HInstruction::kDoubleConstant:
- case HInstruction::kFloatConstant:
- case HInstruction::kIntConstant:
- case HInstruction::kLongConstant:
- case HInstruction::kNullConstant:
- case HInstruction::kLoadClass:
- case HInstruction::kClinitCheck:
- case HInstruction::kLoadString:
- case HInstruction::kLoadMethodHandle:
- case HInstruction::kLoadMethodType:
- case HInstruction::kNewArray:
- case HInstruction::kNewInstance:
- case HInstruction::kConstructorFence:
- case HInstruction::kMemoryBarrier:
- case HInstruction::kInstanceFieldGet:
- case HInstruction::kInstanceFieldSet:
- case HInstruction::kStaticFieldGet:
- case HInstruction::kStaticFieldSet:
- case HInstruction::kUnresolvedInstanceFieldGet:
- case HInstruction::kUnresolvedInstanceFieldSet:
- case HInstruction::kUnresolvedStaticFieldGet:
- case HInstruction::kUnresolvedStaticFieldSet:
- case HInstruction::kArrayGet:
- case HInstruction::kArrayLength:
- case HInstruction::kArraySet:
- case HInstruction::kBoundsCheck:
- case HInstruction::kAbove:
- case HInstruction::kAboveOrEqual:
- case HInstruction::kBelow:
- case HInstruction::kBelowOrEqual:
- case HInstruction::kEqual:
- case HInstruction::kGreaterThan:
- case HInstruction::kGreaterThanOrEqual:
- case HInstruction::kLessThan:
- case HInstruction::kLessThanOrEqual:
- case HInstruction::kNotEqual:
- case HInstruction::kCompare:
- case HInstruction::kIf:
- case HInstruction::kAdd:
- case HInstruction::kAnd:
- case HInstruction::kOr:
- case HInstruction::kSub:
- case HInstruction::kXor:
- case HInstruction::kRor:
- case HInstruction::kShl:
- case HInstruction::kShr:
- case HInstruction::kUShr:
- case HInstruction::kAbs:
- case HInstruction::kBooleanNot:
- case HInstruction::kDiv:
- case HInstruction::kRem:
- case HInstruction::kMul:
- case HInstruction::kNeg:
- case HInstruction::kNot:
- case HInstruction::kMin:
- case HInstruction::kMax:
- case HInstruction::kMonitorOperation:
- case HInstruction::kStringBuilderAppend:
- case HInstruction::kInvokeStaticOrDirect:
- case HInstruction::kInvokeVirtual:
- case HInstruction::kInvokeInterface:
- case HInstruction::kInvokeCustom:
- case HInstruction::kInvokePolymorphic:
- case HInstruction::kInvokeUnresolved:
- case HInstruction::kCurrentMethod:
- case HInstruction::kNullCheck:
- case HInstruction::kDeoptimize:
- case HInstruction::kDivZeroCheck:
- case HInstruction::kCheckCast:
- case HInstruction::kInstanceOf:
- case HInstruction::kBoundType:
- break;
- default:
- // Unimplemented instruction.
- return false;
- }
- }
- }
- return true;
-}
-#endif
-
CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
ArenaStack* arena_stack,
const DexCompilationUnit& dex_compilation_unit,
@@ -1020,15 +911,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
WriteBarrierElimination(graph, compilation_stats_.get()).Run();
}
- // TODO(riscv64): Remove this check when codegen is complete.
-#ifdef ART_ENABLE_CODEGEN_riscv64
- if (instruction_set == InstructionSet::kRiscv64 && !CanAssembleGraphForRiscv64(graph)) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledUnsupportedIsa);
- return nullptr;
- }
-#endif
-
RegisterAllocator::Strategy regalloc_strategy =
compiler_options.GetRegisterAllocationStrategy();
AllocateRegisters(graph,
@@ -1124,15 +1006,6 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
WriteBarrierElimination(graph, compilation_stats_.get()).Run();
}
- // TODO(riscv64): Remove this check when codegen is complete.
-#ifdef ART_ENABLE_CODEGEN_riscv64
- if (instruction_set == InstructionSet::kRiscv64 && !CanAssembleGraphForRiscv64(graph)) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledUnsupportedIsa);
- return nullptr;
- }
-#endif
-
AllocateRegisters(graph,
codegen.get(),
&pass_observer,
@@ -1242,9 +1115,7 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
if (kIsDebugBuild &&
compiler_options.CompileArtTest() &&
- IsInstructionSetSupported(compiler_options.GetInstructionSet()) &&
- // TODO(riscv64): Enable this check when codegen is complete.
- compiler_options.GetInstructionSet() != InstructionSet::kRiscv64) {
+ IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
// For testing purposes, we put a special marker on method names
// that should be compiled with this compiler (when the
// instruction set is supported). This makes sure we're not