diff options
author | 2016-01-22 09:31:52 +0000 | |
---|---|---|
committer | 2016-01-22 09:31:52 +0000 | |
commit | 6b5afdd144d2bb3bf994240797834b5666b2cf98 (patch) | |
tree | d536cd7b3aaf55c563e82c2c522521a91b2bb953 /compiler/optimizing | |
parent | debeb98aaa8950caf1a19df490f2ac9bf563075b (diff) |
Revert "ARM/ARM64: Extend support of instruction combining."
The test fails its checker parts.
This reverts commit debeb98aaa8950caf1a19df490f2ac9bf563075b.
Change-Id: I49929e15950c7814da6c411ecd2b640d12de80df
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 27 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 34 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/graph_visualizer.cc | 10 | ||||
-rw-r--r-- | compiler/optimizing/instruction_simplifier_arm.cc | 30 | ||||
-rw-r--r-- | compiler/optimizing/instruction_simplifier_arm.h | 58 | ||||
-rw-r--r-- | compiler/optimizing/instruction_simplifier_arm64.cc | 133 | ||||
-rw-r--r-- | compiler/optimizing/instruction_simplifier_arm64.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/instruction_simplifier_shared.cc | 189 | ||||
-rw-r--r-- | compiler/optimizing/instruction_simplifier_shared.h | 28 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 17 | ||||
-rw-r--r-- | compiler/optimizing/nodes_arm64.h | 34 | ||||
-rw-r--r-- | compiler/optimizing/nodes_shared.h | 58 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 6 |
15 files changed, 185 insertions, 447 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index f265a0c7d3..272579219f 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -6449,33 +6449,6 @@ Literal* CodeGeneratorARM::DeduplicateMethodCodeLiteral(MethodReference target_m return DeduplicateMethodLiteral(target_method, &call_patches_); } -void LocationsBuilderARM::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall); - locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex, - Location::RequiresRegister()); - locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister()); - locations->SetInAt(HMultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister()); - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); -} - -void InstructionCodeGeneratorARM::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { - LocationSummary* locations = instr->GetLocations(); - Register res = locations->Out().AsRegister<Register>(); - Register accumulator = locations->InAt(HMultiplyAccumulate::kInputAccumulatorIndex) - .AsRegister<Register>(); - Register mul_left = locations->InAt(HMultiplyAccumulate::kInputMulLeftIndex) - .AsRegister<Register>(); - Register mul_right = locations->InAt(HMultiplyAccumulate::kInputMulRightIndex) - .AsRegister<Register>(); - - if (instr->GetOpKind() == HInstruction::kAdd) { - __ mla(res, mul_left, mul_right, accumulator); - } else { - __ mls(res, mul_left, mul_right, accumulator); - } -} - void LocationsBuilderARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { // Nothing to do, this should be removed during prepare for register allocator. LOG(FATAL) << "Unreachable"; diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index df2126c653..d45ea973f9 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -159,7 +159,6 @@ class LocationsBuilderARM : public HGraphVisitor { FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION) - FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION @@ -198,7 +197,6 @@ class InstructionCodeGeneratorARM : public InstructionCodeGenerator { FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION) - FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 3fdd7186d1..3d65e9c53c 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -1952,27 +1952,21 @@ void InstructionCodeGeneratorARM64::VisitArm64IntermediateAddress( Operand(InputOperandAt(instruction, 1))); } -void LocationsBuilderARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { +void LocationsBuilderARM64::VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instr) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall); - HInstruction* accumulator = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex); - if (instr->GetOpKind() == HInstruction::kSub && - accumulator->IsConstant() && - accumulator->AsConstant()->IsZero()) { - // Don't allocate register for Mneg instruction. - } else { - locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex, - Location::RequiresRegister()); - } - locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister()); - locations->SetInAt(HMultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister()); + locations->SetInAt(HArm64MultiplyAccumulate::kInputAccumulatorIndex, + Location::RequiresRegister()); + locations->SetInAt(HArm64MultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister()); + locations->SetInAt(HArm64MultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } -void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { +void InstructionCodeGeneratorARM64::VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instr) { Register res = OutputRegister(instr); - Register mul_left = InputRegisterAt(instr, HMultiplyAccumulate::kInputMulLeftIndex); - Register mul_right = InputRegisterAt(instr, HMultiplyAccumulate::kInputMulRightIndex); + Register accumulator = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputAccumulatorIndex); + Register mul_left = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputMulLeftIndex); + Register mul_right = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputMulRightIndex); // Avoid emitting code that could trigger Cortex A53's erratum 835769. // This fixup should be carried out for all multiply-accumulate instructions: @@ -1992,18 +1986,10 @@ void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* } if (instr->GetOpKind() == HInstruction::kAdd) { - Register accumulator = InputRegisterAt(instr, HMultiplyAccumulate::kInputAccumulatorIndex); __ Madd(res, mul_left, mul_right, accumulator); } else { DCHECK(instr->GetOpKind() == HInstruction::kSub); - HInstruction* accum_instr = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex); - if (accum_instr->IsConstant() && accum_instr->AsConstant()->IsZero()) { - __ Mneg(res, mul_left, mul_right); - } else { - Register accumulator = InputRegisterAt(instr, - HMultiplyAccumulate::kInputAccumulatorIndex); - __ Msub(res, mul_left, mul_right, accumulator); - } + __ Msub(res, mul_left, mul_right, accumulator); } } diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 98303f67ad..8eb9fcc558 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -195,7 +195,6 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator { FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION) - FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION @@ -246,7 +245,6 @@ class LocationsBuilderARM64 : public HGraphVisitor { FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION) - FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index 3c88ba7ba7..32c3a925e0 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -426,12 +426,6 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit"); } -#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64) - void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) OVERRIDE { - StartAttributeStream("kind") << instruction->GetOpKind(); - } -#endif - #ifdef ART_ENABLE_CODEGEN_arm64 void VisitArm64DataProcWithShifterOp(HArm64DataProcWithShifterOp* instruction) OVERRIDE { StartAttributeStream("kind") << instruction->GetInstrKind() << "+" << instruction->GetOpKind(); @@ -439,6 +433,10 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { StartAttributeStream("shift") << instruction->GetShiftAmount(); } } + + void VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instruction) OVERRIDE { + StartAttributeStream("kind") << instruction->GetOpKind(); + } #endif bool IsPass(const char* name) { diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc deleted file mode 100644 index db1f9a79aa..0000000000 --- a/compiler/optimizing/instruction_simplifier_arm.cc +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "instruction_simplifier_arm.h" -#include "instruction_simplifier_shared.h" - -namespace art { -namespace arm { - -void InstructionSimplifierArmVisitor::VisitMul(HMul* instruction) { - if (TryCombineMultiplyAccumulate(instruction, kArm)) { - RecordSimplification(); - } -} - -} // namespace arm -} // namespace art diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h deleted file mode 100644 index 379b95d6ae..0000000000 --- a/compiler/optimizing/instruction_simplifier_arm.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_ARM_H_ -#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_ARM_H_ - -#include "nodes.h" -#include "optimization.h" - -namespace art { -namespace arm { - -class InstructionSimplifierArmVisitor : public HGraphVisitor { - public: - InstructionSimplifierArmVisitor(HGraph* graph, OptimizingCompilerStats* stats) - : HGraphVisitor(graph), stats_(stats) {} - - private: - void RecordSimplification() { - if (stats_ != nullptr) { - stats_->RecordStat(kInstructionSimplificationsArch); - } - } - - void VisitMul(HMul* instruction) OVERRIDE; - - OptimizingCompilerStats* stats_; -}; - - -class InstructionSimplifierArm : public HOptimization { - public: - InstructionSimplifierArm(HGraph* graph, OptimizingCompilerStats* stats) - : HOptimization(graph, "instruction_simplifier_arm", stats) {} - - void Run() OVERRIDE { - InstructionSimplifierArmVisitor visitor(graph_, stats_); - visitor.VisitReversePostOrder(); - } -}; - -} // namespace arm -} // namespace art - -#endif // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_ARM_H_ diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index 83126a5c4d..4bcfc54791 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -17,7 +17,6 @@ #include "instruction_simplifier_arm64.h" #include "common_arm64.h" -#include "instruction_simplifier_shared.h" #include "mirror/array-inl.h" namespace art { @@ -180,6 +179,67 @@ bool InstructionSimplifierArm64Visitor::TryMergeIntoUsersShifterOperand(HInstruc return true; } +bool InstructionSimplifierArm64Visitor::TrySimpleMultiplyAccumulatePatterns( + HMul* mul, HBinaryOperation* input_binop, HInstruction* input_other) { + DCHECK(Primitive::IsIntOrLongType(mul->GetType())); + DCHECK(input_binop->IsAdd() || input_binop->IsSub()); + DCHECK_NE(input_binop, input_other); + if (!input_binop->HasOnlyOneNonEnvironmentUse()) { + return false; + } + + // Try to interpret patterns like + // a * (b <+/-> 1) + // as + // (a * b) <+/-> a + HInstruction* input_a = input_other; + HInstruction* input_b = nullptr; // Set to a non-null value if we found a pattern to optimize. + HInstruction::InstructionKind op_kind; + + if (input_binop->IsAdd()) { + if ((input_binop->GetConstantRight() != nullptr) && input_binop->GetConstantRight()->IsOne()) { + // Interpret + // a * (b + 1) + // as + // (a * b) + a + input_b = input_binop->GetLeastConstantLeft(); + op_kind = HInstruction::kAdd; + } + } else { + DCHECK(input_binop->IsSub()); + if (input_binop->GetRight()->IsConstant() && + input_binop->GetRight()->AsConstant()->IsMinusOne()) { + // Interpret + // a * (b - (-1)) + // as + // a + (a * b) + input_b = input_binop->GetLeft(); + op_kind = HInstruction::kAdd; + } else if (input_binop->GetLeft()->IsConstant() && + input_binop->GetLeft()->AsConstant()->IsOne()) { + // Interpret + // a * (1 - b) + // as + // a - (a * b) + input_b = input_binop->GetRight(); + op_kind = HInstruction::kSub; + } + } + + if (input_b == nullptr) { + // We did not find a pattern we can optimize. + return false; + } + + HArm64MultiplyAccumulate* mulacc = new(GetGraph()->GetArena()) HArm64MultiplyAccumulate( + mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc()); + + mul->GetBlock()->ReplaceAndRemoveInstructionWith(mul, mulacc); + input_binop->GetBlock()->RemoveInstruction(input_binop); + + return false; +} + void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) { TryExtractArrayAccessAddress(instruction, instruction->GetArray(), @@ -195,8 +255,75 @@ void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) { } void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) { - if (TryCombineMultiplyAccumulate(instruction, kArm64)) { - RecordSimplification(); + Primitive::Type type = instruction->GetType(); + if (!Primitive::IsIntOrLongType(type)) { + return; + } + + HInstruction* use = instruction->HasNonEnvironmentUses() + ? instruction->GetUses().GetFirst()->GetUser() + : nullptr; + + if (instruction->HasOnlyOneNonEnvironmentUse() && (use->IsAdd() || use->IsSub())) { + // Replace code looking like + // MUL tmp, x, y + // SUB dst, acc, tmp + // with + // MULSUB dst, acc, x, y + // Note that we do not want to (unconditionally) perform the merge when the + // multiplication has multiple uses and it can be merged in all of them. + // Multiple uses could happen on the same control-flow path, and we would + // then increase the amount of work. In the future we could try to evaluate + // whether all uses are on different control-flow paths (using dominance and + // reverse-dominance information) and only perform the merge when they are. + HInstruction* accumulator = nullptr; + HBinaryOperation* binop = use->AsBinaryOperation(); + HInstruction* binop_left = binop->GetLeft(); + HInstruction* binop_right = binop->GetRight(); + // Be careful after GVN. This should not happen since the `HMul` has only + // one use. + DCHECK_NE(binop_left, binop_right); + if (binop_right == instruction) { + accumulator = binop_left; + } else if (use->IsAdd()) { + DCHECK_EQ(binop_left, instruction); + accumulator = binop_right; + } + + if (accumulator != nullptr) { + HArm64MultiplyAccumulate* mulacc = + new (GetGraph()->GetArena()) HArm64MultiplyAccumulate(type, + binop->GetKind(), + accumulator, + instruction->GetLeft(), + instruction->GetRight()); + + binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc); + DCHECK(!instruction->HasUses()); + instruction->GetBlock()->RemoveInstruction(instruction); + RecordSimplification(); + return; + } + } + + // Use multiply accumulate instruction for a few simple patterns. + // We prefer not applying the following transformations if the left and + // right inputs perform the same operation. + // We rely on GVN having squashed the inputs if appropriate. However the + // results are still correct even if that did not happen. + if (instruction->GetLeft() == instruction->GetRight()) { + return; + } + + HInstruction* left = instruction->GetLeft(); + HInstruction* right = instruction->GetRight(); + if ((right->IsAdd() || right->IsSub()) && + TrySimpleMultiplyAccumulatePatterns(instruction, right->AsBinaryOperation(), left)) { + return; + } + if ((left->IsAdd() || left->IsSub()) && + TrySimpleMultiplyAccumulatePatterns(instruction, left->AsBinaryOperation(), right)) { + return; } } diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h index 37a34c0373..b7f490bb8c 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.h +++ b/compiler/optimizing/instruction_simplifier_arm64.h @@ -51,6 +51,10 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor { return TryMergeIntoShifterOperand(use, bitfield_op, true); } + bool TrySimpleMultiplyAccumulatePatterns(HMul* mul, + HBinaryOperation* input_binop, + HInstruction* input_other); + // HInstruction visitors, sorted alphabetically. void VisitArrayGet(HArrayGet* instruction) OVERRIDE; void VisitArraySet(HArraySet* instruction) OVERRIDE; diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc deleted file mode 100644 index 45d196fa6d..0000000000 --- a/compiler/optimizing/instruction_simplifier_shared.cc +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "instruction_simplifier_shared.h" - -namespace art { - -namespace { - -bool TrySimpleMultiplyAccumulatePatterns(HMul* mul, - HBinaryOperation* input_binop, - HInstruction* input_other) { - DCHECK(Primitive::IsIntOrLongType(mul->GetType())); - DCHECK(input_binop->IsAdd() || input_binop->IsSub()); - DCHECK_NE(input_binop, input_other); - if (!input_binop->HasOnlyOneNonEnvironmentUse()) { - return false; - } - - // Try to interpret patterns like - // a * (b <+/-> 1) - // as - // (a * b) <+/-> a - HInstruction* input_a = input_other; - HInstruction* input_b = nullptr; // Set to a non-null value if we found a pattern to optimize. - HInstruction::InstructionKind op_kind; - - if (input_binop->IsAdd()) { - if ((input_binop->GetConstantRight() != nullptr) && input_binop->GetConstantRight()->IsOne()) { - // Interpret - // a * (b + 1) - // as - // (a * b) + a - input_b = input_binop->GetLeastConstantLeft(); - op_kind = HInstruction::kAdd; - } - } else { - DCHECK(input_binop->IsSub()); - if (input_binop->GetRight()->IsConstant() && - input_binop->GetRight()->AsConstant()->IsMinusOne()) { - // Interpret - // a * (b - (-1)) - // as - // a + (a * b) - input_b = input_binop->GetLeft(); - op_kind = HInstruction::kAdd; - } else if (input_binop->GetLeft()->IsConstant() && - input_binop->GetLeft()->AsConstant()->IsOne()) { - // Interpret - // a * (1 - b) - // as - // a - (a * b) - input_b = input_binop->GetRight(); - op_kind = HInstruction::kSub; - } - } - - if (input_b == nullptr) { - // We did not find a pattern we can optimize. - return false; - } - - ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena(); - HMultiplyAccumulate* mulacc = new(arena) HMultiplyAccumulate( - mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc()); - - mul->GetBlock()->ReplaceAndRemoveInstructionWith(mul, mulacc); - input_binop->GetBlock()->RemoveInstruction(input_binop); - - return true; -} - -} // namespace - -bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) { - Primitive::Type type = mul->GetType(); - switch (isa) { - case kArm: - case kThumb2: - if (type != Primitive::kPrimInt) { - return false; - } - break; - case kArm64: - if (!Primitive::IsIntOrLongType(type)) { - return false; - } - break; - default: - return false; - } - - HInstruction* use = mul->HasNonEnvironmentUses() - ? mul->GetUses().GetFirst()->GetUser() - : nullptr; - - ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena(); - - if (mul->HasOnlyOneNonEnvironmentUse()) { - if (use->IsAdd() || use->IsSub()) { - // Replace code looking like - // MUL tmp, x, y - // SUB dst, acc, tmp - // with - // MULSUB dst, acc, x, y - // Note that we do not want to (unconditionally) perform the merge when the - // multiplication has multiple uses and it can be merged in all of them. - // Multiple uses could happen on the same control-flow path, and we would - // then increase the amount of work. In the future we could try to evaluate - // whether all uses are on different control-flow paths (using dominance and - // reverse-dominance information) and only perform the merge when they are. - HInstruction* accumulator = nullptr; - HBinaryOperation* binop = use->AsBinaryOperation(); - HInstruction* binop_left = binop->GetLeft(); - HInstruction* binop_right = binop->GetRight(); - // Be careful after GVN. This should not happen since the `HMul` has only - // one use. - DCHECK_NE(binop_left, binop_right); - if (binop_right == mul) { - accumulator = binop_left; - } else if (use->IsAdd()) { - DCHECK_EQ(binop_left, mul); - accumulator = binop_right; - } - - if (accumulator != nullptr) { - HMultiplyAccumulate* mulacc = - new (arena) HMultiplyAccumulate(type, - binop->GetKind(), - accumulator, - mul->GetLeft(), - mul->GetRight()); - - binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc); - DCHECK(!mul->HasUses()); - mul->GetBlock()->RemoveInstruction(mul); - return true; - } - } else if (use->IsNeg() && isa != kArm) { - HMultiplyAccumulate* mulacc = - new (arena) HMultiplyAccumulate(type, - HInstruction::kSub, - mul->GetBlock()->GetGraph()->GetConstant(type, 0), - mul->GetLeft(), - mul->GetRight()); - - use->GetBlock()->ReplaceAndRemoveInstructionWith(use, mulacc); - DCHECK(!mul->HasUses()); - mul->GetBlock()->RemoveInstruction(mul); - return true; - } - } - - // Use multiply accumulate instruction for a few simple patterns. - // We prefer not applying the following transformations if the left and - // right inputs perform the same operation. - // We rely on GVN having squashed the inputs if appropriate. However the - // results are still correct even if that did not happen. - if (mul->GetLeft() == mul->GetRight()) { - return false; - } - - HInstruction* left = mul->GetLeft(); - HInstruction* right = mul->GetRight(); - if ((right->IsAdd() || right->IsSub()) && - TrySimpleMultiplyAccumulatePatterns(mul, right->AsBinaryOperation(), left)) { - return true; - } - if ((left->IsAdd() || left->IsSub()) && - TrySimpleMultiplyAccumulatePatterns(mul, left->AsBinaryOperation(), right)) { - return true; - } - return false; -} - -} // namespace art diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h deleted file mode 100644 index 9832ecc058..0000000000 --- a/compiler/optimizing/instruction_simplifier_shared.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_SHARED_H_ -#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_SHARED_H_ - -#include "nodes.h" - -namespace art { - -bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa); - -} // namespace art - -#endif // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_SHARED_H_ diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 57fa558129..019be5d494 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -1226,16 +1226,6 @@ class HLoopInformationOutwardIterator : public ValueObject { M(UShr, BinaryOperation) \ M(Xor, BinaryOperation) \ -/* - * Instructions, shared across several (not all) architectures. - */ -#if !defined(ART_ENABLE_CODEGEN_arm) && !defined(ART_ENABLE_CODEGEN_arm64) -#define FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M) -#else -#define FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M) \ - M(MultiplyAccumulate, Instruction) -#endif - #ifndef ART_ENABLE_CODEGEN_arm #define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M) #else @@ -1248,7 +1238,8 @@ class HLoopInformationOutwardIterator : public ValueObject { #else #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \ M(Arm64DataProcWithShifterOp, Instruction) \ - M(Arm64IntermediateAddress, Instruction) + M(Arm64IntermediateAddress, Instruction) \ + M(Arm64MultiplyAccumulate, Instruction) #endif #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) @@ -1268,7 +1259,6 @@ class HLoopInformationOutwardIterator : public ValueObject { #define FOR_EACH_CONCRETE_INSTRUCTION(M) \ FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \ - FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M) \ FOR_EACH_CONCRETE_INSTRUCTION_ARM(M) \ FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \ FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \ @@ -5728,9 +5718,6 @@ class HParallelMove : public HTemplateInstruction<0> { } // namespace art -#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64) -#include "nodes_shared.h" -#endif #ifdef ART_ENABLE_CODEGEN_arm #include "nodes_arm.h" #endif diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h index 173852a55d..445cdab191 100644 --- a/compiler/optimizing/nodes_arm64.h +++ b/compiler/optimizing/nodes_arm64.h @@ -118,6 +118,40 @@ class HArm64IntermediateAddress : public HExpression<2> { DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress); }; +class HArm64MultiplyAccumulate : public HExpression<3> { + public: + HArm64MultiplyAccumulate(Primitive::Type type, + InstructionKind op, + HInstruction* accumulator, + HInstruction* mul_left, + HInstruction* mul_right, + uint32_t dex_pc = kNoDexPc) + : HExpression(type, SideEffects::None(), dex_pc), op_kind_(op) { + SetRawInputAt(kInputAccumulatorIndex, accumulator); + SetRawInputAt(kInputMulLeftIndex, mul_left); + SetRawInputAt(kInputMulRightIndex, mul_right); + } + + static constexpr int kInputAccumulatorIndex = 0; + static constexpr int kInputMulLeftIndex = 1; + static constexpr int kInputMulRightIndex = 2; + + bool CanBeMoved() const OVERRIDE { return true; } + bool InstructionDataEquals(HInstruction* other) const OVERRIDE { + return op_kind_ == other->AsArm64MultiplyAccumulate()->op_kind_; + } + + InstructionKind GetOpKind() const { return op_kind_; } + + DECLARE_INSTRUCTION(Arm64MultiplyAccumulate); + + private: + // Indicates if this is a MADD or MSUB. + InstructionKind op_kind_; + + DISALLOW_COPY_AND_ASSIGN(HArm64MultiplyAccumulate); +}; + } // namespace art #endif // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_ diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h deleted file mode 100644 index b04b622838..0000000000 --- a/compiler/optimizing/nodes_shared.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_COMPILER_OPTIMIZING_NODES_SHARED_H_ -#define ART_COMPILER_OPTIMIZING_NODES_SHARED_H_ - -namespace art { - -class HMultiplyAccumulate : public HExpression<3> { - public: - HMultiplyAccumulate(Primitive::Type type, - InstructionKind op, - HInstruction* accumulator, - HInstruction* mul_left, - HInstruction* mul_right, - uint32_t dex_pc = kNoDexPc) - : HExpression(type, SideEffects::None(), dex_pc), op_kind_(op) { - SetRawInputAt(kInputAccumulatorIndex, accumulator); - SetRawInputAt(kInputMulLeftIndex, mul_left); - SetRawInputAt(kInputMulRightIndex, mul_right); - } - - static constexpr int kInputAccumulatorIndex = 0; - static constexpr int kInputMulLeftIndex = 1; - static constexpr int kInputMulRightIndex = 2; - - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - return op_kind_ == other->AsMultiplyAccumulate()->op_kind_; - } - - InstructionKind GetOpKind() const { return op_kind_; } - - DECLARE_INSTRUCTION(MultiplyAccumulate); - - private: - // Indicates if this is a MADD or MSUB. - const InstructionKind op_kind_; - - DISALLOW_COPY_AND_ASSIGN(HMultiplyAccumulate); -}; - -} // namespace art - -#endif // ART_COMPILER_OPTIMIZING_NODES_SHARED_H_ diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 4da48bdfc3..fffd00535c 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -62,7 +62,6 @@ #include "induction_var_analysis.h" #include "inliner.h" #include "instruction_simplifier.h" -#include "instruction_simplifier_arm.h" #include "intrinsics.h" #include "jit/debugger_interface.h" #include "jit/jit_code_cache.h" @@ -446,11 +445,8 @@ static void RunArchOptimizations(InstructionSet instruction_set, case kThumb2: case kArm: { arm::DexCacheArrayFixups* fixups = new (arena) arm::DexCacheArrayFixups(graph, stats); - arm::InstructionSimplifierArm* simplifier = - new (arena) arm::InstructionSimplifierArm(graph, stats); HOptimization* arm_optimizations[] = { - fixups, - simplifier + fixups }; RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer); break; |