diff options
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/intrinsics.h | 3 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 10 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86.cc | 5 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 10 |
4 files changed, 24 insertions, 4 deletions
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index e459516e59..a6db1e8e2a 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -27,6 +27,9 @@ namespace art { class CompilerDriver; class DexFile; +// Temporary measure until we have caught up with the Java 7 definition of Math.round. b/26327751 +static constexpr bool kRoundIsPlusPointFive = false; + // Recognize intrinsics from HInvoke nodes. class IntrinsicsRecognizer : public HOptimization { public: diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 9f6863cf6e..f723940444 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -614,7 +614,10 @@ static void GenMathRound(LocationSummary* locations, } void IntrinsicLocationsBuilderARM64::VisitMathRoundDouble(HInvoke* invoke) { - CreateFPToIntPlusTempLocations(arena_, invoke); + // See intrinsics.h. + if (kRoundIsPlusPointFive) { + CreateFPToIntPlusTempLocations(arena_, invoke); + } } void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) { @@ -622,7 +625,10 @@ void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) { - CreateFPToIntPlusTempLocations(arena_, invoke); + // See intrinsics.h. + if (kRoundIsPlusPointFive) { + CreateFPToIntPlusTempLocations(arena_, invoke); + } } void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) { diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 80190629ee..677f2e9c81 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -720,6 +720,11 @@ void IntrinsicCodeGeneratorX86::VisitMathRint(HInvoke* invoke) { // Note that 32 bit x86 doesn't have the capability to inline MathRoundDouble, // as it needs 64 bit instructions. void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) { + // See intrinsics.h. + if (!kRoundIsPlusPointFive) { + return; + } + // Do we have instruction support? if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) { LocationSummary* locations = new (arena_) LocationSummary(invoke, diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index aa1c109738..690cf3d413 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -610,7 +610,10 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) { - CreateSSE41FPToIntLocations(arena_, invoke, codegen_); + // See intrinsics.h. + if (kRoundIsPlusPointFive) { + CreateSSE41FPToIntLocations(arena_, invoke, codegen_); + } } void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) { @@ -657,7 +660,10 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) { - CreateSSE41FPToIntLocations(arena_, invoke, codegen_); + // See intrinsics.h. + if (kRoundIsPlusPointFive) { + CreateSSE41FPToIntLocations(arena_, invoke, codegen_); + } } void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) { |