diff options
| author | 2015-12-28 17:59:43 +0000 | |
|---|---|---|
| committer | 2015-12-28 17:59:43 +0000 | |
| commit | 6147f7520a1279b58d58c5d73a707dea2fbdd376 (patch) | |
| tree | 66fdb4f09ffe5d62f491f53b9d311ea3e7349f71 /compiler | |
| parent | e38e4b467bdcca1bf5f8b80adc66d3064fa9cf45 (diff) | |
| parent | e6d0d8de85f79c8702ee722a04cd89ee7e89aeb7 (diff) | |
Merge changes I00dc6cfc,Iae8f1c88
* changes:
ART: Disable Math.round intrinsics
Revert "Make Math.round consistent on arm64."
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/dex/quick/arm64/fp_arm64.cc | 2 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics.h | 3 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 14 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86.cc | 5 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 10 |
5 files changed, 23 insertions, 11 deletions
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc index 97f13e9e2b..0130ef481a 100644 --- a/compiler/dex/quick/arm64/fp_arm64.cc +++ b/compiler/dex/quick/arm64/fp_arm64.cc @@ -448,8 +448,8 @@ bool Arm64Mir2Lir::GenInlinedRint(CallInfo* info) { } bool Arm64Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) { + // b/26327751. if ((true)) { - // TODO(26327751): Re-enable? return false; } int32_t encoded_imm = EncodeImmSingle(bit_cast<uint32_t, float>(0.5f)); diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index e459516e59..a6db1e8e2a 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -27,6 +27,9 @@ namespace art { class CompilerDriver; class DexFile; +// Temporary measure until we have caught up with the Java 7 definition of Math.round. b/26327751 +static constexpr bool kRoundIsPlusPointFive = false; + // Recognize intrinsics from HInvoke nodes. class IntrinsicsRecognizer : public HOptimization { public: diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 4e7f60dad6..f723940444 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -614,11 +614,10 @@ static void GenMathRound(LocationSummary* locations, } void IntrinsicLocationsBuilderARM64::VisitMathRoundDouble(HInvoke* invoke) { - if ((true)) { - // TODO(26327751): Re-enable? - return; + // See intrinsics.h. + if (kRoundIsPlusPointFive) { + CreateFPToIntPlusTempLocations(arena_, invoke); } - CreateFPToIntPlusTempLocations(arena_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) { @@ -626,11 +625,10 @@ void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) { - if ((true)) { - // TODO(26327751): Re-enable? - return; + // See intrinsics.h. + if (kRoundIsPlusPointFive) { + CreateFPToIntPlusTempLocations(arena_, invoke); } - CreateFPToIntPlusTempLocations(arena_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) { diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 80190629ee..677f2e9c81 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -720,6 +720,11 @@ void IntrinsicCodeGeneratorX86::VisitMathRint(HInvoke* invoke) { // Note that 32 bit x86 doesn't have the capability to inline MathRoundDouble, // as it needs 64 bit instructions. void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) { + // See intrinsics.h. + if (!kRoundIsPlusPointFive) { + return; + } + // Do we have instruction support? if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) { LocationSummary* locations = new (arena_) LocationSummary(invoke, diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index aa1c109738..690cf3d413 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -610,7 +610,10 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) { - CreateSSE41FPToIntLocations(arena_, invoke, codegen_); + // See intrinsics.h. + if (kRoundIsPlusPointFive) { + CreateSSE41FPToIntLocations(arena_, invoke, codegen_); + } } void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) { @@ -657,7 +660,10 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) { - CreateSSE41FPToIntLocations(arena_, invoke, codegen_); + // See intrinsics.h. + if (kRoundIsPlusPointFive) { + CreateSSE41FPToIntLocations(arena_, invoke, codegen_); + } } void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) { |