summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-11-20 13:58:23 +0100
committer VladimĂ­r Marko <vmarko@google.com> 2023-11-21 17:43:46 +0000
commitd78605f9c00340f4574232a2c6c7ad36ea0ef02a (patch)
tree6119d43b214460d18af97061845b9107dc98cd25 /compiler
parent8732b281dfcb7a3fe9b7d3018ce8c72f961d934e (diff)
riscv64: Improve Math.round() intrinsic.
And clean up some intrinsics code. Test: testrunnr.py --target --64 --ndebug --optimizing Bug: 283082089 Change-Id: I365eda7cde2d1f58bca60c935c3a548f027c6726
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc9
-rw-r--r--compiler/optimizing/code_generator_riscv64.h12
-rw-r--r--compiler/optimizing/intrinsics_riscv64.cc190
3 files changed, 96 insertions, 115 deletions
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 698bf434d4..5006371377 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -56,13 +56,6 @@ constexpr uint32_t kLinkTimeOffsetPlaceholderLow = 0x678;
// We switch to the table-based method starting with 6 entries.
static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 6;
-// FCLASS returns a 10-bit classification mask with the two highest bits marking NaNs
-// (signaling and quiet). To detect a NaN, we can compare (either BGE or BGEU, the sign
-// bit is always clear) the result with the `kFClassNaNMinValue`.
-static_assert(kSignalingNaN == 0x100);
-static_assert(kQuietNaN == 0x200);
-static constexpr int32_t kFClassNaNMinValue = 0x100;
-
static constexpr XRegister kCoreCalleeSaves[] = {
// S1(TR) is excluded as the ART thread register.
S0, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, RA
@@ -872,7 +865,7 @@ inline void InstructionCodeGeneratorRISCV64::FMvX(
FpUnOp<XRegister, &Riscv64Assembler::FMvXW, &Riscv64Assembler::FMvXD>(rd, rs1, type);
}
-inline void InstructionCodeGeneratorRISCV64::FClass(
+void InstructionCodeGeneratorRISCV64::FClass(
XRegister rd, FRegister rs1, DataType::Type type) {
FpUnOp<XRegister, &Riscv64Assembler::FClassS, &Riscv64Assembler::FClassD>(rd, rs1, type);
}
diff --git a/compiler/optimizing/code_generator_riscv64.h b/compiler/optimizing/code_generator_riscv64.h
index 69f646c6ae..b5407d8f50 100644
--- a/compiler/optimizing/code_generator_riscv64.h
+++ b/compiler/optimizing/code_generator_riscv64.h
@@ -48,6 +48,13 @@ static constexpr FRegister kRuntimeParameterFpuRegisters[] = {
static constexpr size_t kRuntimeParameterFpuRegistersLength =
arraysize(kRuntimeParameterFpuRegisters);
+// FCLASS returns a 10-bit classification mask with the two highest bits marking NaNs
+// (signaling and quiet). To detect a NaN, we can compare (either BGE or BGEU, the sign
+// bit is always clear) the result with the `kFClassNaNMinValue`.
+static_assert(kSignalingNaN == 0x100);
+static_assert(kQuietNaN == 0x200);
+static constexpr int32_t kFClassNaNMinValue = 0x100;
+
#define UNIMPLEMENTED_INTRINSIC_LIST_RISCV64(V) \
V(IntegerReverse) \
V(LongReverse) \
@@ -360,7 +367,7 @@ class InstructionCodeGeneratorRISCV64 : public InstructionCodeGenerator {
void GenerateMemoryBarrier(MemBarrierKind kind);
- void ShNAdd(XRegister rd, XRegister rs1, XRegister rs2, DataType::Type type);
+ void FClass(XRegister rd, FRegister rs1, DataType::Type type);
void Load(Location out, XRegister rs1, int32_t offset, DataType::Type type);
void Store(Location value, XRegister rs1, int32_t offset, DataType::Type type);
@@ -374,6 +381,8 @@ class InstructionCodeGeneratorRISCV64 : public InstructionCodeGenerator {
DataType::Type type,
HInstruction* instruction = nullptr);
+ void ShNAdd(XRegister rd, XRegister rs1, XRegister rs2, DataType::Type type);
+
protected:
void GenerateClassInitializationCheck(SlowPathCodeRISCV64* slow_path, XRegister class_reg);
void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, XRegister temp);
@@ -482,7 +491,6 @@ class InstructionCodeGeneratorRISCV64 : public InstructionCodeGenerator {
void FNeg(FRegister rd, FRegister rs1, DataType::Type type);
void FMv(FRegister rd, FRegister rs1, DataType::Type type);
void FMvX(XRegister rd, FRegister rs1, DataType::Type type);
- void FClass(XRegister rd, FRegister rs1, DataType::Type type);
Riscv64Assembler* const assembler_;
CodeGeneratorRISCV64* const codegen_;
diff --git a/compiler/optimizing/intrinsics_riscv64.cc b/compiler/optimizing/intrinsics_riscv64.cc
index caced77824..225809d58a 100644
--- a/compiler/optimizing/intrinsics_riscv64.cc
+++ b/compiler/optimizing/intrinsics_riscv64.cc
@@ -68,7 +68,7 @@ static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke
locations->SetOut(calling_convention.GetReturnLocation(invoke->GetType()));
}
-static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType()));
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(1)->GetType()));
@@ -83,6 +83,22 @@ static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke)
locations->SetOut(calling_convention.GetReturnLocation(invoke->GetType()));
}
+static void CreateFpFpFpToFpNoOverlapLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ DCHECK_EQ(invoke->GetNumberOfArguments(), 3U);
+ DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType()));
+ DCHECK(DataType::IsFloatingPointType(invoke->InputAt(1)->GetType()));
+ DCHECK(DataType::IsFloatingPointType(invoke->InputAt(2)->GetType()));
+ DCHECK(DataType::IsFloatingPointType(invoke->GetType()));
+
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+}
+
static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
@@ -156,7 +172,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitFloatIsInfinite(HInvoke* invoke) {
__ Snez(out, out);
}
-static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+static void CreateIntToIntNoOverlapLocations(ArenaAllocator* allocator, HInvoke* invoke) {
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
@@ -170,7 +186,7 @@ void EmitMemoryPeek(HInvoke* invoke, EmitOp&& emit_op) {
}
void IntrinsicLocationsBuilderRISCV64::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -179,7 +195,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitMemoryPeekByte(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -188,7 +204,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitMemoryPeekIntNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -197,7 +213,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitMemoryPeekLongNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -212,6 +228,15 @@ static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invo
locations->SetInAt(1, Location::RequiresRegister());
}
+static void CreateIntIntToIntSlowPathCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Force kOutputOverlap; see comments in IntrinsicSlowPath::EmitNativeCode.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
template <typename EmitOp>
void EmitMemoryPoke(HInvoke* invoke, EmitOp&& emit_op) {
LocationSummary* locations = invoke->GetLocations();
@@ -303,7 +328,7 @@ static void GenerateReverseBytes(Riscv64Assembler* assembler,
}
void IntrinsicLocationsBuilderRISCV64::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -311,7 +336,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitIntegerReverseBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -319,7 +344,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitLongReverseBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitShortReverseBytes(HInvoke* invoke) {
@@ -333,7 +358,7 @@ void EmitIntegralUnOp(HInvoke* invoke, EmitOp&& emit_op) {
}
void IntrinsicLocationsBuilderRISCV64::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -342,7 +367,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitIntegerBitCount(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitLongBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitLongBitCount(HInvoke* invoke) {
@@ -351,7 +376,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitLongBitCount(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -368,7 +393,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitIntegerHighestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -385,7 +410,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitLongHighestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -399,7 +424,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitIntegerLowestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitLongLowestOneBit(HInvoke* invoke) {
@@ -413,7 +438,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitLongLowestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -422,7 +447,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitIntegerNumberOfLeadingZeros(HInvoke* in
}
void IntrinsicLocationsBuilderRISCV64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -431,7 +456,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitLongNumberOfLeadingZeros(HInvoke* invok
}
void IntrinsicLocationsBuilderRISCV64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -440,7 +465,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitIntegerNumberOfTrailingZeros(HInvoke* i
}
void IntrinsicLocationsBuilderRISCV64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(allocator_, invoke);
+ CreateIntToIntNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2033,7 +2058,7 @@ void VarHandleSlowPathRISCV64::EmitByteArrayViewCode(CodeGenerator* codegen_in)
void IntrinsicLocationsBuilderRISCV64::VisitThreadCurrentThread(HInvoke* invoke) {
LocationSummary* locations =
- new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -2045,7 +2070,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitThreadCurrentThread(HInvoke* invoke) {
void IntrinsicLocationsBuilderRISCV64::VisitReachabilityFence(HInvoke* invoke) {
LocationSummary* locations =
- new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
}
@@ -2077,12 +2102,7 @@ static void GenerateDivideUnsigned(HInvoke* invoke, CodeGeneratorRISCV64* codege
}
void IntrinsicLocationsBuilderRISCV64::VisitIntegerDivideUnsigned(HInvoke* invoke) {
- LocationSummary* locations =
- new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- // Force kOutputOverlap; see comments in IntrinsicSlowPath::EmitNativeCode.
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ CreateIntIntToIntSlowPathCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitIntegerDivideUnsigned(HInvoke* invoke) {
@@ -2090,12 +2110,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitIntegerDivideUnsigned(HInvoke* invoke)
}
void IntrinsicLocationsBuilderRISCV64::VisitLongDivideUnsigned(HInvoke* invoke) {
- LocationSummary* locations =
- new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- // Force kOutputOverlap; see comments in IntrinsicSlowPath::EmitNativeCode.
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ CreateIntIntToIntSlowPathCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitLongDivideUnsigned(HInvoke* invoke) {
@@ -2103,19 +2118,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitLongDivideUnsigned(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitMathFmaDouble(HInvoke* invoke) {
- DCHECK_EQ(invoke->GetNumberOfArguments(), 3U);
- DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType()));
- DCHECK(DataType::IsFloatingPointType(invoke->InputAt(1)->GetType()));
- DCHECK(DataType::IsFloatingPointType(invoke->InputAt(2)->GetType()));
- DCHECK(DataType::IsFloatingPointType(invoke->GetType()));
-
- LocationSummary* const locations =
- new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
- locations->SetInAt(2, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ CreateFpFpFpToFpNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitMathFmaDouble(HInvoke* invoke) {
@@ -2130,19 +2133,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitMathFmaDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitMathFmaFloat(HInvoke* invoke) {
- DCHECK_EQ(invoke->GetNumberOfArguments(), 3U);
- DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType()));
- DCHECK(DataType::IsFloatingPointType(invoke->InputAt(1)->GetType()));
- DCHECK(DataType::IsFloatingPointType(invoke->InputAt(2)->GetType()));
- DCHECK(DataType::IsFloatingPointType(invoke->GetType()));
-
- LocationSummary* const locations =
- new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
- locations->SetInAt(2, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ CreateFpFpFpToFpNoOverlapLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitMathFmaFloat(HInvoke* invoke) {
@@ -2198,7 +2189,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitMathAtan(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPLocations(allocator_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitMathAtan2(HInvoke* invoke) {
@@ -2206,7 +2197,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitMathAtan2(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitMathPow(HInvoke* invoke) {
- CreateFPFPToFPLocations(allocator_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitMathPow(HInvoke* invoke) {
@@ -2246,7 +2237,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitMathExpm1(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPLocations(allocator_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitMathHypot(HInvoke* invoke) {
@@ -2270,7 +2261,7 @@ void IntrinsicCodeGeneratorRISCV64::VisitMathLog10(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderRISCV64::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPLocations(allocator_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorRISCV64::VisitMathNextAfter(HInvoke* invoke) {
@@ -2369,66 +2360,55 @@ void IntrinsicCodeGeneratorRISCV64::VisitMathRint(HInvoke* invoke) {
GenDoubleRound(GetAssembler(), invoke, FPRoundingMode::kRNE);
}
-void IntrinsicLocationsBuilderRISCV64::VisitMathRoundDouble(HInvoke* invoke) {
- CreateFPToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorRISCV64::VisitMathRoundDouble(HInvoke* invoke) {
+void GenMathRound(CodeGeneratorRISCV64* codegen, HInvoke* invoke, DataType::Type type) {
+ Riscv64Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
- Riscv64Assembler* assembler = GetAssembler();
FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
XRegister out = locations->Out().AsRegister<XRegister>();
ScratchRegisterScope srs(assembler);
FRegister ftmp = srs.AllocateFRegister();
- XRegister tmp = srs.AllocateXRegister();
Riscv64Label done;
// Check NaN
- __ FClassD(tmp, in);
- __ Andi(tmp, tmp, kSignalingNaN | kQuietNaN);
- __ Li(out, 0);
- __ Bnez(tmp, &done);
-
- // Add 0.5(0x3fe0000000000000)
- __ LoadConst64(out, 0x3fe0000000000000L);
- __ FMvDX(ftmp, out);
- __ FAddD(ftmp, ftmp, in, FPRoundingMode::kRDN);
+ codegen->GetInstructionVisitor()->FClass(out, in, type);
+ __ Slti(out, out, kFClassNaNMinValue);
+ __ Beqz(out, &done);
+
+ if (type == DataType::Type::kFloat64) {
+ // Add 0.5 (0x3fe0000000000000), rounding down (towards negative infinity).
+ __ LoadConst64(out, 0x3fe0000000000000L);
+ __ FMvDX(ftmp, out);
+ __ FAddD(ftmp, ftmp, in, FPRoundingMode::kRDN);
+
+ // Convert to managed `long`, rounding down (towards negative infinity).
+ __ FCvtLD(out, ftmp, FPRoundingMode::kRDN);
+ } else {
+ // Add 0.5 (0x3f000000), rounding down (towards negative infinity).
+ __ LoadConst32(out, 0x3f000000);
+ __ FMvWX(ftmp, out);
+ __ FAddS(ftmp, ftmp, in, FPRoundingMode::kRDN);
- // Convert with rounding mode(kRDN)
- __ FCvtLD(out, ftmp, FPRoundingMode::kRDN);
+ // Convert to managed `int`, rounding down (towards negative infinity).
+ __ FCvtWS(out, ftmp, FPRoundingMode::kRDN);
+ }
__ Bind(&done);
}
-void IntrinsicLocationsBuilderRISCV64::VisitMathRoundFloat(HInvoke* invoke) {
+void IntrinsicLocationsBuilderRISCV64::VisitMathRoundDouble(HInvoke* invoke) {
CreateFPToIntLocations(allocator_, invoke);
}
-void IntrinsicCodeGeneratorRISCV64::VisitMathRoundFloat(HInvoke* invoke) {
- LocationSummary* locations = invoke->GetLocations();
- Riscv64Assembler* assembler = GetAssembler();
- FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
- XRegister out = locations->Out().AsRegister<XRegister>();
- ScratchRegisterScope srs(assembler);
- FRegister ftmp = srs.AllocateFRegister();
- XRegister tmp = srs.AllocateXRegister();
- Riscv64Label done;
-
- // Check NaN
- __ FClassS(tmp, in);
- __ Andi(tmp, tmp, kSignalingNaN | kQuietNaN);
- __ Li(out, 0);
- __ Bnez(tmp, &done);
-
- // Add 0.5(0x3f000000)
- __ LoadConst32(out, 0x3f000000);
- __ FMvWX(ftmp, out);
- __ FAddS(ftmp, ftmp, in, FPRoundingMode::kRDN);
+void IntrinsicCodeGeneratorRISCV64::VisitMathRoundDouble(HInvoke* invoke) {
+ GenMathRound(codegen_, invoke, DataType::Type::kFloat64);
+}
- // Convert with rounding mode(kRDN)
- __ FCvtWS(out, ftmp, FPRoundingMode::kRDN);
+void IntrinsicLocationsBuilderRISCV64::VisitMathRoundFloat(HInvoke* invoke) {
+ CreateFPToIntLocations(allocator_, invoke);
+}
- __ Bind(&done);
+void IntrinsicCodeGeneratorRISCV64::VisitMathRoundFloat(HInvoke* invoke) {
+ GenMathRound(codegen_, invoke, DataType::Type::kFloat32);
}
void IntrinsicLocationsBuilderRISCV64::VisitMathMultiplyHigh(HInvoke* invoke) {