summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/code_generator_riscv64.h2
-rw-r--r--compiler/optimizing/intrinsics_riscv64.cc52
2 files changed, 51 insertions, 3 deletions
diff --git a/compiler/optimizing/code_generator_riscv64.h b/compiler/optimizing/code_generator_riscv64.h
index d21b5b6cbc..6a37eb01cd 100644
--- a/compiler/optimizing/code_generator_riscv64.h
+++ b/compiler/optimizing/code_generator_riscv64.h
@@ -50,9 +50,7 @@ static constexpr size_t kRuntimeParameterFpuRegistersLength =
#define UNIMPLEMENTED_INTRINSIC_LIST_RISCV64(V) \
V(IntegerReverse) \
- V(IntegerDivideUnsigned) \
V(LongReverse) \
- V(LongDivideUnsigned) \
V(MathFmaDouble) \
V(MathFmaFloat) \
V(MathCos) \
diff --git a/compiler/optimizing/intrinsics_riscv64.cc b/compiler/optimizing/intrinsics_riscv64.cc
index 077c06de5b..1ed1bb65b4 100644
--- a/compiler/optimizing/intrinsics_riscv64.cc
+++ b/compiler/optimizing/intrinsics_riscv64.cc
@@ -1309,7 +1309,6 @@ void IntrinsicLocationsBuilderRISCV64::VisitThreadCurrentThread(HInvoke* invoke)
void IntrinsicCodeGeneratorRISCV64::VisitThreadCurrentThread(HInvoke* invoke) {
Riscv64Assembler* assembler = GetAssembler();
XRegister out = invoke->GetLocations()->Out().AsRegister<XRegister>();
-
__ Loadwu(out, TR, Thread::PeerOffset<kRiscv64PointerSize>().Int32Value());
}
@@ -1321,6 +1320,57 @@ void IntrinsicLocationsBuilderRISCV64::VisitReachabilityFence(HInvoke* invoke) {
void IntrinsicCodeGeneratorRISCV64::VisitReachabilityFence([[maybe_unused]] HInvoke* invoke) {}
+static void GenerateDivideUnsigned(HInvoke* invoke, CodeGeneratorRISCV64* codegen) {
+ LocationSummary* locations = invoke->GetLocations();
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+ DataType::Type type = invoke->GetType();
+ DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
+
+ XRegister dividend = locations->InAt(0).AsRegister<XRegister>();
+ XRegister divisor = locations->InAt(1).AsRegister<XRegister>();
+ XRegister out = locations->Out().AsRegister<XRegister>();
+
+ // Check if divisor is zero, bail to managed implementation to handle.
+ SlowPathCodeRISCV64* slow_path =
+ new (codegen->GetScopedAllocator()) IntrinsicSlowPathRISCV64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ Beqz(divisor, slow_path->GetEntryLabel());
+
+ if (type == DataType::Type::kInt32) {
+ __ Divuw(out, dividend, divisor);
+ } else {
+ __ Divu(out, dividend, divisor);
+ }
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitIntegerDivideUnsigned(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Force kOutputOverlap; see comments in IntrinsicSlowPath::EmitNativeCode.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitIntegerDivideUnsigned(HInvoke* invoke) {
+ GenerateDivideUnsigned(invoke, codegen_);
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitLongDivideUnsigned(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Force kOutputOverlap; see comments in IntrinsicSlowPath::EmitNativeCode.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitLongDivideUnsigned(HInvoke* invoke) {
+ GenerateDivideUnsigned(invoke, codegen_);
+}
+
#define MARK_UNIMPLEMENTED(Name) UNIMPLEMENTED_INTRINSIC(RISCV64, Name)
UNIMPLEMENTED_INTRINSIC_LIST_RISCV64(MARK_UNIMPLEMENTED);
#undef MARK_UNIMPLEMENTED