Merge "Implemented ABS vectorization."
diff --git a/compiler/optimizing/code_generator_vector_arm.cc b/compiler/optimizing/code_generator_vector_arm.cc
index ba2b2cb..e7f7b30 100644
--- a/compiler/optimizing/code_generator_vector_arm.cc
+++ b/compiler/optimizing/code_generator_vector_arm.cc
@@ -81,6 +81,14 @@
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderARM::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
void LocationsBuilderARM::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
}
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 96d0021..f4874fe 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -169,6 +169,37 @@
}
}
+void LocationsBuilderARM64::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ FPRegister src = DRegisterFrom(locations->InAt(0));
+ FPRegister dst = DRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Abs(dst.V8B(), src.V8B());
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Abs(dst.V4H(), src.V4H());
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Abs(dst.V2S(), src.V2S());
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Fabs(dst.V2S(), src.V2S());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ }
+}
+
void LocationsBuilderARM64::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
}
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index 1711989..74fa584 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -81,6 +81,14 @@
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderARMVIXL::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
void LocationsBuilderARMVIXL::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
}
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index 6f5fe0d..6969abd 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -81,6 +81,14 @@
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderMIPS::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
void LocationsBuilderMIPS::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
}
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 2ee7ac9..87118ce 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -81,6 +81,14 @@
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderMIPS64::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
void LocationsBuilderMIPS64::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
}
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 4f3988e..8dabb4d 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -199,6 +199,46 @@
}
}
+void LocationsBuilderX86::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ if (instruction->GetPackedType() == Primitive::kPrimInt) {
+ instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitVecAbs(HVecAbs* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimInt: {
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ __ movaps(dst, src);
+ __ pxor(tmp, tmp);
+ __ pcmpgtd(tmp, dst);
+ __ pxor(dst, tmp);
+ __ psubd(dst, tmp);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrld(dst, Immediate(1));
+ __ andps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrlq(dst, Immediate(1));
+ __ andpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
void LocationsBuilderX86::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
// Boolean-not requires a temporary to construct the 16 x one.
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index b1c1494..e956088 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -192,6 +192,46 @@
}
}
+void LocationsBuilderX86_64::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ if (instruction->GetPackedType() == Primitive::kPrimInt) {
+ instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecAbs(HVecAbs* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimInt: {
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ __ movaps(dst, src);
+ __ pxor(tmp, tmp);
+ __ pcmpgtd(tmp, dst);
+ __ pxor(dst, tmp);
+ __ psubd(dst, tmp);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrld(dst, Immediate(1));
+ __ andps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrlq(dst, Immediate(1));
+ __ andpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
void LocationsBuilderX86_64::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
// Boolean-not requires a temporary to construct the 16 x one.
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 42ed04d..b5f46a6 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -735,8 +735,32 @@
return true;
}
} else if (instruction->IsInvokeStaticOrDirect()) {
- // TODO: coming soon.
- return false;
+ // Accept particular intrinsics.
+ HInvokeStaticOrDirect* invoke = instruction->AsInvokeStaticOrDirect();
+ switch (invoke->GetIntrinsic()) {
+ case Intrinsics::kMathAbsInt:
+ case Intrinsics::kMathAbsLong:
+ case Intrinsics::kMathAbsFloat:
+ case Intrinsics::kMathAbsDouble: {
+ // Deal with vector restrictions.
+ if (HasVectorRestrictions(restrictions, kNoAbs) ||
+ HasVectorRestrictions(restrictions, kNoHiBits)) {
+ // TODO: we can do better for some hibits cases.
+ return false;
+ }
+ // Accept ABS(x) for vectorizable operand.
+ HInstruction* opa = instruction->InputAt(0);
+ if (VectorizeUse(node, opa, generate_code, type, restrictions)) {
+ if (generate_code) {
+ GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
+ }
+ return true;
+ }
+ return false;
+ }
+ default:
+ return false;
+ } // switch
}
return false;
}
@@ -754,11 +778,11 @@
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoAbs;
return TrySetVectorLength(8);
case Primitive::kPrimChar:
case Primitive::kPrimShort:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoAbs;
return TrySetVectorLength(4);
case Primitive::kPrimInt:
*restrictions |= kNoDiv;
@@ -775,17 +799,17 @@
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
- *restrictions |= kNoMul | kNoDiv | kNoShift;
+ *restrictions |= kNoMul | kNoDiv | kNoShift | kNoAbs;
return TrySetVectorLength(16);
case Primitive::kPrimChar:
case Primitive::kPrimShort:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoAbs;
return TrySetVectorLength(8);
case Primitive::kPrimInt:
*restrictions |= kNoDiv;
return TrySetVectorLength(4);
case Primitive::kPrimLong:
- *restrictions |= kNoMul | kNoDiv | kNoShr;
+ *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs;
return TrySetVectorLength(2);
case Primitive::kPrimFloat:
return TrySetVectorLength(4);
@@ -956,7 +980,41 @@
new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_),
new (global_allocator_) HUShr(type, opa, opb));
case HInstruction::kInvokeStaticOrDirect: {
- // TODO: coming soon.
+ HInvokeStaticOrDirect* invoke = org->AsInvokeStaticOrDirect();
+ if (vector_mode_ == kVector) {
+ switch (invoke->GetIntrinsic()) {
+ case Intrinsics::kMathAbsInt:
+ case Intrinsics::kMathAbsLong:
+ case Intrinsics::kMathAbsFloat:
+ case Intrinsics::kMathAbsDouble:
+ DCHECK(opb == nullptr);
+ vector = new (global_allocator_) HVecAbs(global_allocator_, opa, type, vector_length_);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD intrinsic";
+ UNREACHABLE();
+ } // switch invoke
+ } else {
+ // In scalar code, simply clone the method invoke, and replace its operands
+ // with the corresponding new scalar instructions in the loop.
+ DCHECK(vector_mode_ == kSequential);
+ HInvokeStaticOrDirect* new_invoke = new (global_allocator_) HInvokeStaticOrDirect(
+ global_allocator_,
+ invoke->GetNumberOfArguments(),
+ invoke->GetType(),
+ invoke->GetDexPc(),
+ invoke->GetDexMethodIndex(),
+ invoke->GetResolvedMethod(),
+ invoke->GetDispatchInfo(),
+ invoke->GetInvokeType(),
+ invoke->GetTargetMethod(),
+ invoke->GetClinitCheckRequirement());
+ HInputsRef inputs = invoke->GetInputs();
+ for (size_t index = 0; index < inputs.size(); ++index) {
+ new_invoke->SetArgumentAt(index, vector_map_->Get(inputs[index]));
+ }
+ vector = new_invoke;
+ }
break;
}
default:
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 16f7691..d8f50aa 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -68,6 +68,7 @@
kNoShift = 4, // no shift
kNoShr = 8, // no arithmetic shift right
kNoHiBits = 16, // "wider" operations cannot bring in higher order bits
+ kNoAbs = 32, // no absolute value
};
/*
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 52a02c2..671f950 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1374,6 +1374,7 @@
M(VecSumReduce, VecUnaryOperation) \
M(VecCnv, VecUnaryOperation) \
M(VecNeg, VecUnaryOperation) \
+ M(VecAbs, VecUnaryOperation) \
M(VecNot, VecUnaryOperation) \
M(VecAdd, VecBinaryOperation) \
M(VecSub, VecBinaryOperation) \
@@ -4224,6 +4225,10 @@
dispatch_info_ = dispatch_info;
}
+ DispatchInfo GetDispatchInfo() const {
+ return dispatch_info_;
+ }
+
void AddSpecialInput(HInstruction* input) {
// We allow only one special input.
DCHECK(!IsStringInit() && !HasCurrentMethodInput());
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 9f9b918..0cbbf2a 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -278,6 +278,25 @@
DISALLOW_COPY_AND_ASSIGN(HVecNeg);
};
+// Takes absolute value of every component in the vector,
+// viz. abs[ x1, .. , xn ] = [ |x1|, .. , |xn| ].
+class HVecAbs FINAL : public HVecUnaryOperation {
+ public:
+ HVecAbs(ArenaAllocator* arena,
+ HInstruction* input,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecUnaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(input->IsVecOperation());
+ DCHECK_EQ(input->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, input);
+ }
+ DECLARE_INSTRUCTION(VecAbs);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecAbs);
+};
+
// Bitwise- or boolean-nots every component in the vector,
// viz. not[ x1, .. , xn ] = [ ~x1, .. , ~xn ], or
// not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean.
diff --git a/test/640-checker-float-simd/src/Main.java b/test/640-checker-float-simd/src/Main.java
index 80c3112..4bcb7e2 100644
--- a/test/640-checker-float-simd/src/Main.java
+++ b/test/640-checker-float-simd/src/Main.java
@@ -107,8 +107,10 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.abs() loop_optimization (after)
- //
- // TODO: fill in when supported
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void abs() {
for (int i = 0; i < 128; i++)
a[i] = Math.abs(a[i]);
diff --git a/test/645-checker-abs-simd/expected.txt b/test/645-checker-abs-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/645-checker-abs-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/645-checker-abs-simd/info.txt b/test/645-checker-abs-simd/info.txt
new file mode 100644
index 0000000..8fa4066
--- /dev/null
+++ b/test/645-checker-abs-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on abs SIMD vectorization.
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
new file mode 100644
index 0000000..3111350
--- /dev/null
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for ABS vectorization.
+ */
+public class Main {
+
+ private static final int SPQUIET = 1 << 22;
+ private static final long DPQUIET = 1L << 51;
+
+ private static void doitInt(int[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ private static void doitLong(long[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ private static void doitFloat(float[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ private static void doitDouble(double[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ public static void main(String[] args) {
+ // Set up minint32, maxint32 and some others.
+ int[] xi = new int[8];
+ xi[0] = 0x80000000;
+ xi[1] = 0x7fffffff;
+ xi[2] = 0x80000001;
+ xi[3] = -13;
+ xi[4] = -1;
+ xi[5] = 0;
+ xi[6] = 1;
+ xi[7] = 999;
+ doitInt(xi);
+ expectEquals32(0x80000000, xi[0]);
+ expectEquals32(0x7fffffff, xi[1]);
+ expectEquals32(0x7fffffff, xi[2]);
+ expectEquals32(13, xi[3]);
+ expectEquals32(1, xi[4]);
+ expectEquals32(0, xi[5]);
+ expectEquals32(1, xi[6]);
+ expectEquals32(999, xi[7]);
+
+ // Set up minint64, maxint64 and some others.
+ long[] xl = new long[8];
+ xl[0] = 0x8000000000000000L;
+ xl[1] = 0x7fffffffffffffffL;
+ xl[2] = 0x8000000000000001L;
+ xl[3] = -13;
+ xl[4] = -1;
+ xl[5] = 0;
+ xl[6] = 1;
+ xl[7] = 999;
+ doitLong(xl);
+ expectEquals64(0x8000000000000000L, xl[0]);
+ expectEquals64(0x7fffffffffffffffL, xl[1]);
+ expectEquals64(0x7fffffffffffffffL, xl[2]);
+ expectEquals64(13, xl[3]);
+ expectEquals64(1, xl[4]);
+ expectEquals64(0, xl[5]);
+ expectEquals64(1, xl[6]);
+ expectEquals64(999, xl[7]);
+
+ // Set up float NaN and some others.
+ float[] xf = new float[16];
+ xf[0] = Float.intBitsToFloat(0x7f800001);
+ xf[1] = Float.intBitsToFloat(0x7fa00000);
+ xf[2] = Float.intBitsToFloat(0x7fc00000);
+ xf[3] = Float.intBitsToFloat(0x7fffffff);
+ xf[4] = Float.intBitsToFloat(0xff800001);
+ xf[5] = Float.intBitsToFloat(0xffa00000);
+ xf[6] = Float.intBitsToFloat(0xffc00000);
+ xf[7] = Float.intBitsToFloat(0xffffffff);
+ xf[8] = Float.NEGATIVE_INFINITY;
+ xf[9] = -99.2f;
+ xf[10] = -1.0f;
+ xf[11] = -0.0f;
+ xf[12] = +0.0f;
+ xf[13] = +1.0f;
+ xf[14] = +99.2f;
+ xf[15] = Float.POSITIVE_INFINITY;
+ doitFloat(xf);
+ expectEqualsNaN32(0x7f800001, Float.floatToRawIntBits(xf[0]));
+ expectEqualsNaN32(0x7fa00000, Float.floatToRawIntBits(xf[1]));
+ expectEqualsNaN32(0x7fc00000, Float.floatToRawIntBits(xf[2]));
+ expectEqualsNaN32(0x7fffffff, Float.floatToRawIntBits(xf[3]));
+ expectEqualsNaN32(0x7f800001, Float.floatToRawIntBits(xf[4]));
+ expectEqualsNaN32(0x7fa00000, Float.floatToRawIntBits(xf[5]));
+ expectEqualsNaN32(0x7fc00000, Float.floatToRawIntBits(xf[6]));
+ expectEqualsNaN32(0x7fffffff, Float.floatToRawIntBits(xf[7]));
+ expectEquals32(
+ Float.floatToRawIntBits(Float.POSITIVE_INFINITY),
+ Float.floatToRawIntBits(xf[8]));
+ expectEquals32(
+ Float.floatToRawIntBits(99.2f),
+ Float.floatToRawIntBits(xf[9]));
+ expectEquals32(
+ Float.floatToRawIntBits(1.0f),
+ Float.floatToRawIntBits(xf[10]));
+ expectEquals32(0, Float.floatToRawIntBits(xf[11]));
+ expectEquals32(0, Float.floatToRawIntBits(xf[12]));
+ expectEquals32(
+ Float.floatToRawIntBits(1.0f),
+ Float.floatToRawIntBits(xf[13]));
+ expectEquals32(
+ Float.floatToRawIntBits(99.2f),
+ Float.floatToRawIntBits(xf[14]));
+ expectEquals32(
+ Float.floatToRawIntBits(Float.POSITIVE_INFINITY),
+ Float.floatToRawIntBits(xf[15]));
+
+ // Set up double NaN and some others.
+ double[] xd = new double[16];
+ xd[0] = Double.longBitsToDouble(0x7ff0000000000001L);
+ xd[1] = Double.longBitsToDouble(0x7ff4000000000000L);
+ xd[2] = Double.longBitsToDouble(0x7ff8000000000000L);
+ xd[3] = Double.longBitsToDouble(0x7fffffffffffffffL);
+ xd[4] = Double.longBitsToDouble(0xfff0000000000001L);
+ xd[5] = Double.longBitsToDouble(0xfff4000000000000L);
+ xd[6] = Double.longBitsToDouble(0xfff8000000000000L);
+ xd[7] = Double.longBitsToDouble(0xffffffffffffffffL);
+ xd[8] = Double.NEGATIVE_INFINITY;
+ xd[9] = -99.2f;
+ xd[10] = -1.0f;
+ xd[11] = -0.0f;
+ xd[12] = +0.0f;
+ xd[13] = +1.0f;
+ xd[14] = +99.2f;
+ xd[15] = Double.POSITIVE_INFINITY;
+ doitDouble(xd);
+ expectEqualsNaN64(0x7ff0000000000001L, Double.doubleToRawLongBits(xd[0]));
+ expectEqualsNaN64(0x7ff4000000000000L, Double.doubleToRawLongBits(xd[1]));
+ expectEqualsNaN64(0x7ff8000000000000L, Double.doubleToRawLongBits(xd[2]));
+ expectEqualsNaN64(0x7fffffffffffffffL, Double.doubleToRawLongBits(xd[3]));
+ expectEqualsNaN64(0x7ff0000000000001L, Double.doubleToRawLongBits(xd[4]));
+ expectEqualsNaN64(0x7ff4000000000000L, Double.doubleToRawLongBits(xd[5]));
+ expectEqualsNaN64(0x7ff8000000000000L, Double.doubleToRawLongBits(xd[6]));
+ expectEqualsNaN64(0x7fffffffffffffffL, Double.doubleToRawLongBits(xd[7]));
+ expectEquals64(
+ Double.doubleToRawLongBits(Double.POSITIVE_INFINITY),
+ Double.doubleToRawLongBits(xd[8]));
+ expectEquals64(
+ Double.doubleToRawLongBits(99.2f),
+ Double.doubleToRawLongBits(xd[9]));
+ expectEquals64(
+ Double.doubleToRawLongBits(1.0f),
+ Double.doubleToRawLongBits(xd[10]));
+ expectEquals64(0, Double.doubleToRawLongBits(xd[11]));
+ expectEquals64(0, Double.doubleToRawLongBits(xd[12]));
+ expectEquals64(
+ Double.doubleToRawLongBits(1.0f),
+ Double.doubleToRawLongBits(xd[13]));
+ expectEquals64(
+ Double.doubleToRawLongBits(99.2f),
+ Double.doubleToRawLongBits(xd[14]));
+ expectEquals64(
+ Double.doubleToRawLongBits(Double.POSITIVE_INFINITY),
+ Double.doubleToRawLongBits(xd[15]));
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals32(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals64(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ // We allow that an expected NaN result has become quiet.
+ private static void expectEqualsNaN32(int expected, int result) {
+ if (expected != result && (expected | SPQUIET) != result) {
+ throw new Error("Expected: 0x" + Integer.toHexString(expected)
+ + ", found: 0x" + Integer.toHexString(result));
+ }
+ }
+
+ // We allow that an expected NaN result has become quiet.
+ private static void expectEqualsNaN64(long expected, long result) {
+ if (expected != result && (expected | DPQUIET) != result) {
+ throw new Error("Expected: 0x" + Long.toHexString(expected)
+ + ", found: 0x" + Long.toHexString(result));
+ }
+ }
+}