ARM64: FP16.rint() intrinsic for ARMv8
This CL implements an intrinsic for rint() method with
ARMv8.2 FP16 instructions.
This intrinsic implementation achieves bit-level compatibility with the
original Java implementation android.util.Half.rint().
The time required in milliseconds to execute the below code on Pixel3:
- Java implementation android.util.Half.rint():
- big cluster only: 19828
- little cluster only: 61457
- arm64 Intrinisic implementation:
- big cluster only: 14186 (~28% faster)
- little cluster only: 54405 (~11% faster)
Analysis of this function with simpleperf showed that approximately only
60-65% of the time is spent in libcore.util.FP16.rint. So the percentage
improvement using intrinsics is likely to be more than the numbers stated
above.
Another reason that the performance improvement with intrinsic is lower
than expected is because the java implementation for values between -1 and
1 (abs < 0x3c00) only requires a few instructions and should almost give
a similar performance to the intrinsic in this case. In the benchmark function
below, 46.8% of the values tested are between -1 and 1.
public static short benchmarkrint(){
short ret = 0;
long before = 0;
long after = 0;
before = System.currentTimeMillis();
for(int i = 0; i < 50000; i++){
for (short h = Short.MIN_VALUE; h < Short.MAX_VALUE; h++) {
ret += FP16.rint(h);
}
}
after = System.currentTimeMillis();
System.out.println("Time of FP16.rint (ms): " + (after - before));
System.out.println(ret);
return ret;
}
Test: 580-fp16
Test: art/test/testrunner/run_build_test_target.py -j80 art-test-javac
Change-Id: I075c3e85a36fd9bce14deee437c5b961bd667b5d
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 0859596..9ef2e69 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -3288,6 +3288,22 @@
GenerateFP16Round(invoke, codegen_, masm, roundOp);
}
+void IntrinsicLocationsBuilderARM64::VisitFP16Rint(HInvoke* invoke) {
+ if (!codegen_->GetInstructionSetFeatures().HasFP16()) {
+ return;
+ }
+
+ CreateIntToIntLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFP16Rint(HInvoke* invoke) {
+ MacroAssembler* masm = GetVIXLAssembler();
+ auto roundOp = [masm](const FPRegister& out, const FPRegister& in) {
+ __ Frintn(out, in); // Round to nearest, with ties to even
+ };
+ GenerateFP16Round(invoke, codegen_, masm, roundOp);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 77dcbfb..1dfebdd 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -3074,6 +3074,7 @@
UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16ToHalf)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16Floor)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16Ceil)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16Rint)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index fc06691..ea9c591 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2711,6 +2711,7 @@
UNIMPLEMENTED_INTRINSIC(MIPS, FP16ToHalf)
UNIMPLEMENTED_INTRINSIC(MIPS, FP16Floor)
UNIMPLEMENTED_INTRINSIC(MIPS, FP16Ceil)
+UNIMPLEMENTED_INTRINSIC(MIPS, FP16Rint)
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 8a6e94c..fd93902 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2361,6 +2361,7 @@
UNIMPLEMENTED_INTRINSIC(MIPS64, FP16ToHalf)
UNIMPLEMENTED_INTRINSIC(MIPS64, FP16Floor)
UNIMPLEMENTED_INTRINSIC(MIPS64, FP16Ceil)
+UNIMPLEMENTED_INTRINSIC(MIPS64, FP16Rint)
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index e10214b..9d3cecb 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3085,6 +3085,7 @@
UNIMPLEMENTED_INTRINSIC(X86, FP16ToHalf)
UNIMPLEMENTED_INTRINSIC(X86, FP16Floor)
UNIMPLEMENTED_INTRINSIC(X86, FP16Ceil)
+UNIMPLEMENTED_INTRINSIC(X86, FP16Rint)
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index d8ccd9b..1111a59 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2752,6 +2752,7 @@
UNIMPLEMENTED_INTRINSIC(X86_64, FP16ToHalf)
UNIMPLEMENTED_INTRINSIC(X86_64, FP16Floor)
UNIMPLEMENTED_INTRINSIC(X86_64, FP16Ceil)
+UNIMPLEMENTED_INTRINSIC(X86_64, FP16Rint)
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);