summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Lifang Xia <lifang_xia@linux.alibaba.com> 2023-11-06 16:26:57 +0800
committer VladimĂ­r Marko <vmarko@google.com> 2023-11-07 08:20:16 +0000
commit2eb923be06f7a2f28b3d2e92e47d4772d7725100 (patch)
tree02de4e6d2d1246528ff7638667b9e83e7c3e92c6 /compiler/optimizing
parent770134a0d6041e27745b5d1f84c26512cb75554a (diff)
riscv64: Implement Integer/Long.divideUnsigned() instrinsics.
Test: testrunner.py --target --64 --ndebug --optimizing --jit -t 082-inline-execute Bug: 283082089 Change-Id: Ic14a1a901269fe5e91fa2c426f384a9056d9f10f
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_riscv64.h2
-rw-r--r--compiler/optimizing/intrinsics_riscv64.cc52
2 files changed, 51 insertions, 3 deletions
diff --git a/compiler/optimizing/code_generator_riscv64.h b/compiler/optimizing/code_generator_riscv64.h
index d21b5b6cbc..6a37eb01cd 100644
--- a/compiler/optimizing/code_generator_riscv64.h
+++ b/compiler/optimizing/code_generator_riscv64.h
@@ -50,9 +50,7 @@ static constexpr size_t kRuntimeParameterFpuRegistersLength =
#define UNIMPLEMENTED_INTRINSIC_LIST_RISCV64(V) \
V(IntegerReverse) \
- V(IntegerDivideUnsigned) \
V(LongReverse) \
- V(LongDivideUnsigned) \
V(MathFmaDouble) \
V(MathFmaFloat) \
V(MathCos) \
diff --git a/compiler/optimizing/intrinsics_riscv64.cc b/compiler/optimizing/intrinsics_riscv64.cc
index 077c06de5b..1ed1bb65b4 100644
--- a/compiler/optimizing/intrinsics_riscv64.cc
+++ b/compiler/optimizing/intrinsics_riscv64.cc
@@ -1309,7 +1309,6 @@ void IntrinsicLocationsBuilderRISCV64::VisitThreadCurrentThread(HInvoke* invoke)
void IntrinsicCodeGeneratorRISCV64::VisitThreadCurrentThread(HInvoke* invoke) {
Riscv64Assembler* assembler = GetAssembler();
XRegister out = invoke->GetLocations()->Out().AsRegister<XRegister>();
-
__ Loadwu(out, TR, Thread::PeerOffset<kRiscv64PointerSize>().Int32Value());
}
@@ -1321,6 +1320,57 @@ void IntrinsicLocationsBuilderRISCV64::VisitReachabilityFence(HInvoke* invoke) {
void IntrinsicCodeGeneratorRISCV64::VisitReachabilityFence([[maybe_unused]] HInvoke* invoke) {}
+static void GenerateDivideUnsigned(HInvoke* invoke, CodeGeneratorRISCV64* codegen) {
+ LocationSummary* locations = invoke->GetLocations();
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+ DataType::Type type = invoke->GetType();
+ DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
+
+ XRegister dividend = locations->InAt(0).AsRegister<XRegister>();
+ XRegister divisor = locations->InAt(1).AsRegister<XRegister>();
+ XRegister out = locations->Out().AsRegister<XRegister>();
+
+ // Check if divisor is zero, bail to managed implementation to handle.
+ SlowPathCodeRISCV64* slow_path =
+ new (codegen->GetScopedAllocator()) IntrinsicSlowPathRISCV64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ Beqz(divisor, slow_path->GetEntryLabel());
+
+ if (type == DataType::Type::kInt32) {
+ __ Divuw(out, dividend, divisor);
+ } else {
+ __ Divu(out, dividend, divisor);
+ }
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitIntegerDivideUnsigned(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Force kOutputOverlap; see comments in IntrinsicSlowPath::EmitNativeCode.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitIntegerDivideUnsigned(HInvoke* invoke) {
+ GenerateDivideUnsigned(invoke, codegen_);
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitLongDivideUnsigned(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Force kOutputOverlap; see comments in IntrinsicSlowPath::EmitNativeCode.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitLongDivideUnsigned(HInvoke* invoke) {
+ GenerateDivideUnsigned(invoke, codegen_);
+}
+
#define MARK_UNIMPLEMENTED(Name) UNIMPLEMENTED_INTRINSIC(RISCV64, Name)
UNIMPLEMENTED_INTRINSIC_LIST_RISCV64(MARK_UNIMPLEMENTED);
#undef MARK_UNIMPLEMENTED