diff options
-rw-r--r-- | compiler/optimizing/scheduler_arm.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/scheduler_arm.h | 1 | ||||
-rw-r--r-- | compiler/optimizing/scheduler_arm64.cc | 151 | ||||
-rw-r--r-- | compiler/optimizing/scheduler_arm64.h | 79 |
4 files changed, 218 insertions, 18 deletions
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc index 1a89567991..832a7e1571 100644 --- a/compiler/optimizing/scheduler_arm.cc +++ b/compiler/optimizing/scheduler_arm.cc @@ -288,6 +288,11 @@ void SchedulingLatencyVisitorARM::VisitIntermediateAddress(HIntermediateAddress* last_visited_latency_ = kArmIntegerOpLatency; } +void SchedulingLatencyVisitorARM::VisitIntermediateAddressIndex( + HIntermediateAddressIndex* ATTRIBUTE_UNUSED) { + UNIMPLEMENTED(FATAL) << "IntermediateAddressIndex is not implemented for ARM"; +} + void SchedulingLatencyVisitorARM::VisitMultiplyAccumulate(HMultiplyAccumulate* ATTRIBUTE_UNUSED) { last_visited_latency_ = kArmMulIntegerLatency; } diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h index cb679fcd2d..897e97da49 100644 --- a/compiler/optimizing/scheduler_arm.h +++ b/compiler/optimizing/scheduler_arm.h @@ -103,6 +103,7 @@ class SchedulingLatencyVisitorARM : public SchedulingLatencyVisitor { M(BitwiseNegatedRight, unused) \ M(MultiplyAccumulate, unused) \ M(IntermediateAddress, unused) \ + M(IntermediateAddressIndex, unused) \ M(DataProcWithShifterOp, unused) #define DECLARE_VISIT_INSTRUCTION(type, unused) \ diff --git a/compiler/optimizing/scheduler_arm64.cc b/compiler/optimizing/scheduler_arm64.cc index 558dcc4cbc..83b487fb5b 100644 --- a/compiler/optimizing/scheduler_arm64.cc +++ b/compiler/optimizing/scheduler_arm64.cc @@ -16,6 +16,7 @@ #include "scheduler_arm64.h" #include "code_generator_utils.h" +#include "mirror/array-inl.h" namespace art { namespace arm64 { @@ -43,6 +44,13 @@ void SchedulingLatencyVisitorARM64::VisitIntermediateAddress( last_visited_latency_ = kArm64IntegerOpLatency + 2; } +void SchedulingLatencyVisitorARM64::VisitIntermediateAddressIndex( + HIntermediateAddressIndex* instr ATTRIBUTE_UNUSED) { + // Although the code generated is a simple `add` instruction, we found through empirical results + // that spacing it from its use in memory accesses was beneficial. + last_visited_latency_ = kArm64DataProcWithShifterOpLatency + 2; +} + void SchedulingLatencyVisitorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* ATTRIBUTE_UNUSED) { last_visited_latency_ = kArm64MulIntegerLatency; } @@ -192,5 +200,148 @@ void SchedulingLatencyVisitorARM64::VisitTypeConversion(HTypeConversion* instr) } } +void SchedulingLatencyVisitorARM64::HandleSimpleArithmeticSIMD(HVecOperation *instr) { + if (Primitive::IsFloatingPointType(instr->GetPackedType())) { + last_visited_latency_ = kArm64SIMDFloatingPointOpLatency; + } else { + last_visited_latency_ = kArm64SIMDIntegerOpLatency; + } +} + +void SchedulingLatencyVisitorARM64::VisitVecReplicateScalar( + HVecReplicateScalar* instr ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64SIMDReplicateOpLatency; +} + +void SchedulingLatencyVisitorARM64::VisitVecSetScalars(HVecSetScalars* instr) { + LOG(FATAL) << "Unsupported SIMD instruction " << instr->GetId(); +} + +void SchedulingLatencyVisitorARM64::VisitVecSumReduce(HVecSumReduce* instr) { + LOG(FATAL) << "Unsupported SIMD instruction " << instr->GetId(); +} + +void SchedulingLatencyVisitorARM64::VisitVecCnv(HVecCnv* instr ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64SIMDTypeConversionInt2FPLatency; +} + +void SchedulingLatencyVisitorARM64::VisitVecNeg(HVecNeg* instr) { + HandleSimpleArithmeticSIMD(instr); +} + +void SchedulingLatencyVisitorARM64::VisitVecAbs(HVecAbs* instr) { + HandleSimpleArithmeticSIMD(instr); +} + +void SchedulingLatencyVisitorARM64::VisitVecNot(HVecNot* instr) { + if (instr->GetPackedType() == Primitive::kPrimBoolean) { + last_visited_internal_latency_ = kArm64SIMDIntegerOpLatency; + } + last_visited_latency_ = kArm64SIMDIntegerOpLatency; +} + +void SchedulingLatencyVisitorARM64::VisitVecAdd(HVecAdd* instr) { + HandleSimpleArithmeticSIMD(instr); +} + +void SchedulingLatencyVisitorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instr) { + HandleSimpleArithmeticSIMD(instr); +} + +void SchedulingLatencyVisitorARM64::VisitVecSub(HVecSub* instr) { + HandleSimpleArithmeticSIMD(instr); +} + +void SchedulingLatencyVisitorARM64::VisitVecMul(HVecMul* instr) { + if (Primitive::IsFloatingPointType(instr->GetPackedType())) { + last_visited_latency_ = kArm64SIMDMulFloatingPointLatency; + } else { + last_visited_latency_ = kArm64SIMDMulIntegerLatency; + } +} + +void SchedulingLatencyVisitorARM64::VisitVecDiv(HVecDiv* instr) { + if (instr->GetPackedType() == Primitive::kPrimFloat) { + last_visited_latency_ = kArm64SIMDDivFloatLatency; + } else { + DCHECK(instr->GetPackedType() == Primitive::kPrimDouble); + last_visited_latency_ = kArm64SIMDDivDoubleLatency; + } +} + +void SchedulingLatencyVisitorARM64::VisitVecMin(HVecMin* instr) { + HandleSimpleArithmeticSIMD(instr); +} + +void SchedulingLatencyVisitorARM64::VisitVecMax(HVecMax* instr) { + HandleSimpleArithmeticSIMD(instr); +} + +void SchedulingLatencyVisitorARM64::VisitVecAnd(HVecAnd* instr ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64SIMDIntegerOpLatency; +} + +void SchedulingLatencyVisitorARM64::VisitVecAndNot(HVecAndNot* instr) { + LOG(FATAL) << "Unsupported SIMD instruction " << instr->GetId(); +} + +void SchedulingLatencyVisitorARM64::VisitVecOr(HVecOr* instr ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64SIMDIntegerOpLatency; +} + +void SchedulingLatencyVisitorARM64::VisitVecXor(HVecXor* instr ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64SIMDIntegerOpLatency; +} + +void SchedulingLatencyVisitorARM64::VisitVecShl(HVecShl* instr) { + HandleSimpleArithmeticSIMD(instr); +} + +void SchedulingLatencyVisitorARM64::VisitVecShr(HVecShr* instr) { + HandleSimpleArithmeticSIMD(instr); +} + +void SchedulingLatencyVisitorARM64::VisitVecUShr(HVecUShr* instr) { + HandleSimpleArithmeticSIMD(instr); +} + +void SchedulingLatencyVisitorARM64::VisitVecMultiplyAccumulate( + HVecMultiplyAccumulate* instr ATTRIBUTE_UNUSED) { + last_visited_latency_ = kArm64SIMDMulIntegerLatency; +} + +void SchedulingLatencyVisitorARM64::HandleVecAddress( + HVecMemoryOperation* instruction, + size_t size ATTRIBUTE_UNUSED) { + HInstruction* index = instruction->InputAt(1); + if (!index->IsConstant()) { + last_visited_internal_latency_ += kArm64DataProcWithShifterOpLatency; + } +} + +void SchedulingLatencyVisitorARM64::VisitVecLoad(HVecLoad* instr) { + last_visited_internal_latency_ = 0; + size_t size = Primitive::ComponentSize(instr->GetPackedType()); + + if (instr->GetPackedType() == Primitive::kPrimChar + && mirror::kUseStringCompression + && instr->IsStringCharAt()) { + // Set latencies for the uncompressed case. + last_visited_internal_latency_ += kArm64MemoryLoadLatency + kArm64BranchLatency; + HandleVecAddress(instr, size); + last_visited_latency_ = kArm64SIMDMemoryLoadLatency; + } else { + HandleVecAddress(instr, size); + last_visited_latency_ = kArm64SIMDMemoryLoadLatency; + } +} + +void SchedulingLatencyVisitorARM64::VisitVecStore(HVecStore* instr) { + last_visited_internal_latency_ = 0; + size_t size = Primitive::ComponentSize(instr->GetPackedType()); + HandleVecAddress(instr, size); + last_visited_latency_ = kArm64SIMDMemoryStoreLatency; +} + } // namespace arm64 } // namespace art diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h index 7a33720655..63d5b7d6b6 100644 --- a/compiler/optimizing/scheduler_arm64.h +++ b/compiler/optimizing/scheduler_arm64.h @@ -42,6 +42,18 @@ static constexpr uint32_t kArm64LoadStringInternalLatency = 7; static constexpr uint32_t kArm64MulFloatingPointLatency = 6; static constexpr uint32_t kArm64MulIntegerLatency = 6; static constexpr uint32_t kArm64TypeConversionFloatingPointIntegerLatency = 5; +static constexpr uint32_t kArm64BranchLatency = kArm64IntegerOpLatency; + +static constexpr uint32_t kArm64SIMDFloatingPointOpLatency = 10; +static constexpr uint32_t kArm64SIMDIntegerOpLatency = 6; +static constexpr uint32_t kArm64SIMDMemoryLoadLatency = 10; +static constexpr uint32_t kArm64SIMDMemoryStoreLatency = 6; +static constexpr uint32_t kArm64SIMDMulFloatingPointLatency = 12; +static constexpr uint32_t kArm64SIMDMulIntegerLatency = 12; +static constexpr uint32_t kArm64SIMDReplicateOpLatency = 16; +static constexpr uint32_t kArm64SIMDDivDoubleLatency = 60; +static constexpr uint32_t kArm64SIMDDivFloatLatency = 30; +static constexpr uint32_t kArm64SIMDTypeConversionInt2FPLatency = 10; class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor { public: @@ -52,29 +64,54 @@ class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor { // We add a second unused parameter to be able to use this macro like the others // defined in `nodes.h`. -#define FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(M) \ - M(ArrayGet , unused) \ - M(ArrayLength , unused) \ - M(ArraySet , unused) \ - M(BinaryOperation , unused) \ - M(BoundsCheck , unused) \ - M(Div , unused) \ - M(InstanceFieldGet , unused) \ - M(InstanceOf , unused) \ - M(Invoke , unused) \ - M(LoadString , unused) \ - M(Mul , unused) \ - M(NewArray , unused) \ - M(NewInstance , unused) \ - M(Rem , unused) \ - M(StaticFieldGet , unused) \ - M(SuspendCheck , unused) \ - M(TypeConversion , unused) +#define FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(M) \ + M(ArrayGet , unused) \ + M(ArrayLength , unused) \ + M(ArraySet , unused) \ + M(BinaryOperation , unused) \ + M(BoundsCheck , unused) \ + M(Div , unused) \ + M(InstanceFieldGet , unused) \ + M(InstanceOf , unused) \ + M(Invoke , unused) \ + M(LoadString , unused) \ + M(Mul , unused) \ + M(NewArray , unused) \ + M(NewInstance , unused) \ + M(Rem , unused) \ + M(StaticFieldGet , unused) \ + M(SuspendCheck , unused) \ + M(TypeConversion , unused) \ + M(VecReplicateScalar , unused) \ + M(VecSetScalars , unused) \ + M(VecSumReduce , unused) \ + M(VecCnv , unused) \ + M(VecNeg , unused) \ + M(VecAbs , unused) \ + M(VecNot , unused) \ + M(VecAdd , unused) \ + M(VecHalvingAdd , unused) \ + M(VecSub , unused) \ + M(VecMul , unused) \ + M(VecDiv , unused) \ + M(VecMin , unused) \ + M(VecMax , unused) \ + M(VecAnd , unused) \ + M(VecAndNot , unused) \ + M(VecOr , unused) \ + M(VecXor , unused) \ + M(VecShl , unused) \ + M(VecShr , unused) \ + M(VecUShr , unused) \ + M(VecMultiplyAccumulate, unused) \ + M(VecLoad , unused) \ + M(VecStore , unused) #define FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(M) \ M(BitwiseNegatedRight, unused) \ M(MultiplyAccumulate, unused) \ M(IntermediateAddress, unused) \ + M(IntermediateAddressIndex, unused) \ M(DataProcWithShifterOp, unused) #define DECLARE_VISIT_INSTRUCTION(type, unused) \ @@ -85,6 +122,10 @@ class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor { FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION + + private: + void HandleSimpleArithmeticSIMD(HVecOperation *instr); + void HandleVecAddress(HVecMemoryOperation* instruction, size_t size); }; class HSchedulerARM64 : public HScheduler { @@ -101,6 +142,8 @@ class HSchedulerARM64 : public HScheduler { return true; FOR_EACH_CONCRETE_INSTRUCTION_ARM64(CASE_INSTRUCTION_KIND) return true; + FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(CASE_INSTRUCTION_KIND) + return true; default: return HScheduler::IsSchedulable(instruction); } |