diff options
Diffstat (limited to 'compiler/optimizing')
117 files changed, 2234 insertions, 2104 deletions
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc index dfefa524bf..1c3660c0a7 100644 --- a/compiler/optimizing/bounds_check_elimination.cc +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -388,10 +388,10 @@ class MonotonicValueRange : public ValueRange { return induction_variable_->GetBlock(); } - MonotonicValueRange* AsMonotonicValueRange() OVERRIDE { return this; } + MonotonicValueRange* AsMonotonicValueRange() override { return this; } // If it's certain that this value range fits in other_range. - bool FitsIn(ValueRange* other_range) const OVERRIDE { + bool FitsIn(ValueRange* other_range) const override { if (other_range == nullptr) { return true; } @@ -402,7 +402,7 @@ class MonotonicValueRange : public ValueRange { // Try to narrow this MonotonicValueRange given another range. // Ideally it will return a normal ValueRange. But due to // possible overflow/underflow, that may not be possible. - ValueRange* Narrow(ValueRange* range) OVERRIDE { + ValueRange* Narrow(ValueRange* range) override { if (range == nullptr) { return this; } @@ -530,7 +530,7 @@ class BCEVisitor : public HGraphVisitor { induction_range_(induction_analysis), next_(nullptr) {} - void VisitBasicBlock(HBasicBlock* block) OVERRIDE { + void VisitBasicBlock(HBasicBlock* block) override { DCHECK(!IsAddedBlock(block)); first_index_bounds_check_map_.clear(); // Visit phis and instructions using a safe iterator. The iteration protects @@ -820,7 +820,7 @@ class BCEVisitor : public HGraphVisitor { } } - void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE { + void VisitBoundsCheck(HBoundsCheck* bounds_check) override { HBasicBlock* block = bounds_check->GetBlock(); HInstruction* index = bounds_check->InputAt(0); HInstruction* array_length = bounds_check->InputAt(1); @@ -945,7 +945,7 @@ class BCEVisitor : public HGraphVisitor { return true; } - void VisitPhi(HPhi* phi) OVERRIDE { + void VisitPhi(HPhi* phi) override { if (phi->IsLoopHeaderPhi() && (phi->GetType() == DataType::Type::kInt32) && HasSameInputAtBackEdges(phi)) { @@ -992,14 +992,14 @@ class BCEVisitor : public HGraphVisitor { } } - void VisitIf(HIf* instruction) OVERRIDE { + void VisitIf(HIf* instruction) override { if (instruction->InputAt(0)->IsCondition()) { HCondition* cond = instruction->InputAt(0)->AsCondition(); HandleIf(instruction, cond->GetLeft(), cond->GetRight(), cond->GetCondition()); } } - void VisitAdd(HAdd* add) OVERRIDE { + void VisitAdd(HAdd* add) override { HInstruction* right = add->GetRight(); if (right->IsIntConstant()) { ValueRange* left_range = LookupValueRange(add->GetLeft(), add->GetBlock()); @@ -1013,7 +1013,7 @@ class BCEVisitor : public HGraphVisitor { } } - void VisitSub(HSub* sub) OVERRIDE { + void VisitSub(HSub* sub) override { HInstruction* left = sub->GetLeft(); HInstruction* right = sub->GetRight(); if (right->IsIntConstant()) { @@ -1115,19 +1115,19 @@ class BCEVisitor : public HGraphVisitor { } } - void VisitDiv(HDiv* div) OVERRIDE { + void VisitDiv(HDiv* div) override { FindAndHandlePartialArrayLength(div); } - void VisitShr(HShr* shr) OVERRIDE { + void VisitShr(HShr* shr) override { FindAndHandlePartialArrayLength(shr); } - void VisitUShr(HUShr* ushr) OVERRIDE { + void VisitUShr(HUShr* ushr) override { FindAndHandlePartialArrayLength(ushr); } - void VisitAnd(HAnd* instruction) OVERRIDE { + void VisitAnd(HAnd* instruction) override { if (instruction->GetRight()->IsIntConstant()) { int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue(); if (constant > 0) { @@ -1142,7 +1142,7 @@ class BCEVisitor : public HGraphVisitor { } } - void VisitRem(HRem* instruction) OVERRIDE { + void VisitRem(HRem* instruction) override { HInstruction* left = instruction->GetLeft(); HInstruction* right = instruction->GetRight(); @@ -1202,7 +1202,7 @@ class BCEVisitor : public HGraphVisitor { } } - void VisitNewArray(HNewArray* new_array) OVERRIDE { + void VisitNewArray(HNewArray* new_array) override { HInstruction* len = new_array->GetLength(); if (!len->IsIntConstant()) { HInstruction *left; @@ -1240,7 +1240,7 @@ class BCEVisitor : public HGraphVisitor { * has occurred (see AddCompareWithDeoptimization()), since in those cases it would be * unsafe to hoist array references across their deoptimization instruction inside a loop. */ - void VisitArrayGet(HArrayGet* array_get) OVERRIDE { + void VisitArrayGet(HArrayGet* array_get) override { if (!has_dom_based_dynamic_bce_ && array_get->IsInLoop()) { HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation(); if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) && diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h index 92ab7984c8..ef08877daa 100644 --- a/compiler/optimizing/bounds_check_elimination.h +++ b/compiler/optimizing/bounds_check_elimination.h @@ -34,7 +34,7 @@ class BoundsCheckElimination : public HOptimization { side_effects_(side_effects), induction_analysis_(induction_analysis) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kBoundsCheckEliminationPassName = "BCE"; diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc index bdc395b52d..c6232ef661 100644 --- a/compiler/optimizing/cha_guard_optimization.cc +++ b/compiler/optimizing/cha_guard_optimization.cc @@ -44,9 +44,9 @@ class CHAGuardVisitor : HGraphVisitor { GetGraph()->SetNumberOfCHAGuards(0); } - void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) OVERRIDE; + void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) override; - void VisitBasicBlock(HBasicBlock* block) OVERRIDE; + void VisitBasicBlock(HBasicBlock* block) override; private: void RemoveGuard(HShouldDeoptimizeFlag* flag); diff --git a/compiler/optimizing/cha_guard_optimization.h b/compiler/optimizing/cha_guard_optimization.h index d2c5a344b7..440d51a969 100644 --- a/compiler/optimizing/cha_guard_optimization.h +++ b/compiler/optimizing/cha_guard_optimization.h @@ -30,7 +30,7 @@ class CHAGuardOptimization : public HOptimization { const char* name = kCHAGuardOptimizationPassName) : HOptimization(graph, name) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kCHAGuardOptimizationPassName = "cha_guard_optimization"; diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index a13efcaee2..d440cf3e4c 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -197,7 +197,7 @@ class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllo return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots(); } - void EmitJitRoots(Handle<mirror::ObjectArray<mirror::Object>> roots) + void EmitJitRoots(/*out*/std::vector<Handle<mirror::Object>>* roots) REQUIRES_SHARED(Locks::mutator_lock_); private: @@ -230,29 +230,31 @@ class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllo }; void CodeGenerator::CodeGenerationData::EmitJitRoots( - Handle<mirror::ObjectArray<mirror::Object>> roots) { - DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots()); + /*out*/std::vector<Handle<mirror::Object>>* roots) { + DCHECK(roots->empty()); + roots->reserve(GetNumberOfJitRoots()); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); size_t index = 0; for (auto& entry : jit_string_roots_) { // Update the `roots` with the string, and replace the address temporarily // stored to the index in the table. uint64_t address = entry.second; - roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr()); - DCHECK(roots->Get(index) != nullptr); + roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address)); + DCHECK(roots->back() != nullptr); + DCHECK(roots->back()->IsString()); entry.second = index; // Ensure the string is strongly interned. This is a requirement on how the JIT // handles strings. b/32995596 - class_linker->GetInternTable()->InternStrong( - reinterpret_cast<mirror::String*>(roots->Get(index))); + class_linker->GetInternTable()->InternStrong(roots->back()->AsString()); ++index; } for (auto& entry : jit_class_roots_) { // Update the `roots` with the class, and replace the address temporarily // stored to the index in the table. uint64_t address = entry.second; - roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr()); - DCHECK(roots->Get(index) != nullptr); + roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address)); + DCHECK(roots->back() != nullptr); + DCHECK(roots->back()->IsClass()); entry.second = index; ++index; } @@ -1394,37 +1396,12 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo } bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) { - HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves(); - - return (first_next_not_move != nullptr) - && first_next_not_move->CanDoImplicitNullCheckOn(null_check->InputAt(0)); + return null_check->IsEmittedAtUseSite(); } void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { - if (!compiler_options_.GetImplicitNullChecks()) { - return; - } - - // If we are from a static path don't record the pc as we can't throw NPE. - // NB: having the checks here makes the code much less verbose in the arch - // specific code generators. - if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) { - return; - } - - if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) { - return; - } - - // Find the first previous instruction which is not a move. - HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves(); - - // If the instruction is a null check it means that `instr` is the first user - // and needs to record the pc. - if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) { - HNullCheck* null_check = first_prev_not_move->AsNullCheck(); - // TODO: The parallel moves modify the environment. Their changes need to be - // reverted otherwise the stack maps at the throw point will not be correct. + HNullCheck* null_check = instr->GetImplicitNullCheck(); + if (null_check != nullptr) { RecordPcInfo(null_check, null_check->GetDexPc()); } } @@ -1514,7 +1491,12 @@ void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint, << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString(); } else { - DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) || + // 'CanTriggerGC' side effect is used to restrict optimization of instructions which depend + // on GC (e.g. IntermediateAddress) - to ensure they are not alive across GC points. However + // if execution never returns to the compiled code from a GC point this restriction is + // unnecessary - in particular for fatal slow paths which might trigger GC. + DCHECK((slow_path->IsFatal() && !instruction->GetLocations()->WillCall()) || + instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) || // When (non-Baker) read barriers are enabled, some instructions // use a slow path to emit a read barrier, which does not trigger // GC. @@ -1665,8 +1647,8 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) { } void CodeGenerator::EmitJitRoots(uint8_t* code, - Handle<mirror::ObjectArray<mirror::Object>> roots, - const uint8_t* roots_data) { + const uint8_t* roots_data, + /*out*/std::vector<Handle<mirror::Object>>* roots) { code_generation_data_->EmitJitRoots(roots); EmitJitRootPatches(code, roots_data); } diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index e77d621b58..4e73e0bdcb 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -356,8 +356,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { // Fills the `literals` array with literals collected during code generation. // Also emits literal patches. void EmitJitRoots(uint8_t* code, - Handle<mirror::ObjectArray<mirror::Object>> roots, - const uint8_t* roots_data) + const uint8_t* roots_data, + /*out*/std::vector<Handle<mirror::Object>>* roots) REQUIRES_SHARED(Locks::mutator_lock_); bool IsLeafMethod() const { @@ -622,7 +622,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { // otherwise return a fall-back info that should be used instead. virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke) = 0; + ArtMethod* method) = 0; // Generate a call to a static or direct method. virtual void GenerateStaticOrDirectCall( diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 462225aafb..25eadcdc72 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -247,7 +247,7 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { public: explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : SlowPathCodeARM64(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); @@ -273,9 +273,9 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; } + const char* GetDescription() const override { return "BoundsCheckSlowPathARM64"; } private: DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64); @@ -285,16 +285,16 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { public: explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : SlowPathCodeARM64(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); __ Bind(GetEntryLabel()); arm64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; } + const char* GetDescription() const override { return "DivZeroCheckSlowPathARM64"; } private: DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64); @@ -308,7 +308,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Location out = locations->Out(); const uint32_t dex_pc = instruction_->GetDexPc(); @@ -349,7 +349,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM64"; } + const char* GetDescription() const override { return "LoadClassSlowPathARM64"; } private: // The class this slow path will load. @@ -363,7 +363,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { explicit LoadStringSlowPathARM64(HLoadString* instruction) : SlowPathCodeARM64(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); @@ -384,7 +384,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; } + const char* GetDescription() const override { return "LoadStringSlowPathARM64"; } private: DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64); @@ -394,7 +394,7 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { public: explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); __ Bind(GetEntryLabel()); if (instruction_->CanThrowIntoCatchBlock()) { @@ -408,9 +408,9 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; } + const char* GetDescription() const override { return "NullCheckSlowPathARM64"; } private: DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64); @@ -421,7 +421,7 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor) : SlowPathCodeARM64(instruction), successor_(successor) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); __ Bind(GetEntryLabel()); @@ -445,7 +445,7 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { return successor_; } - const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; } + const char* GetDescription() const override { return "SuspendCheckSlowPathARM64"; } private: // If not null, the block to branch to after the suspend check. @@ -462,7 +462,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal) : SlowPathCodeARM64(instruction), is_fatal_(is_fatal) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DCHECK(instruction_->IsCheckCast() @@ -503,8 +503,8 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { } } - const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; } - bool IsFatal() const OVERRIDE { return is_fatal_; } + const char* GetDescription() const override { return "TypeCheckSlowPathARM64"; } + bool IsFatal() const override { return is_fatal_; } private: const bool is_fatal_; @@ -517,7 +517,7 @@ class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 { explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction) : SlowPathCodeARM64(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); __ Bind(GetEntryLabel()); LocationSummary* locations = instruction_->GetLocations(); @@ -529,7 +529,7 @@ class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 { CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>(); } - const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; } + const char* GetDescription() const override { return "DeoptimizationSlowPathARM64"; } private: DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64); @@ -539,7 +539,7 @@ class ArraySetSlowPathARM64 : public SlowPathCodeARM64 { public: explicit ArraySetSlowPathARM64(HInstruction* instruction) : SlowPathCodeARM64(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); @@ -570,7 +570,7 @@ class ArraySetSlowPathARM64 : public SlowPathCodeARM64 { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; } + const char* GetDescription() const override { return "ArraySetSlowPathARM64"; } private: DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64); @@ -628,7 +628,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); LocationSummary* locations = instruction_->GetLocations(); DataType::Type type = DataType::Type::kReference; @@ -754,7 +754,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathARM64"; } + const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathARM64"; } private: Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) { @@ -794,7 +794,7 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { DCHECK(kEmitCompilerReadBarrier); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DataType::Type type = DataType::Type::kReference; DCHECK(locations->CanCall()); @@ -831,7 +831,7 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM64"; } + const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARM64"; } private: const Location out_; @@ -4053,7 +4053,7 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codege HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) { + ArtMethod* method ATTRIBUTE_UNUSED) { // On ARM64 we support all dispatch types. return desired_dispatch_info; } @@ -5065,7 +5065,7 @@ void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) { return; } { - // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + // Ensure that between load and RecordPcInfo there are no pools emitted. EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); Location obj = instruction->GetLocations()->InAt(0); __ Ldr(wzr, HeapOperandFrom(obj, Offset(0))); diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 4f6a44fe4d..1ba58b1a9f 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -125,8 +125,8 @@ class SlowPathCodeARM64 : public SlowPathCode { vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; } vixl::aarch64::Label* GetExitLabel() { return &exit_label_; } - void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE; - void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE; + void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override; + void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override; private: vixl::aarch64::Label entry_label_; @@ -216,11 +216,11 @@ class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConvention InvokeDexCallingConventionVisitorARM64() {} virtual ~InvokeDexCallingConventionVisitorARM64() {} - Location GetNextLocation(DataType::Type type) OVERRIDE; - Location GetReturnLocation(DataType::Type return_type) const OVERRIDE { + Location GetNextLocation(DataType::Type type) override; + Location GetReturnLocation(DataType::Type return_type) const override { return calling_convention.GetReturnLocation(return_type); } - Location GetMethodLocation() const OVERRIDE; + Location GetMethodLocation() const override; private: InvokeDexCallingConvention calling_convention; @@ -232,22 +232,22 @@ class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention { public: FieldAccessCallingConventionARM64() {} - Location GetObjectLocation() const OVERRIDE { + Location GetObjectLocation() const override { return helpers::LocationFrom(vixl::aarch64::x1); } - Location GetFieldIndexLocation() const OVERRIDE { + Location GetFieldIndexLocation() const override { return helpers::LocationFrom(vixl::aarch64::x0); } - Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { return helpers::LocationFrom(vixl::aarch64::x0); } Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, - bool is_instance) const OVERRIDE { + bool is_instance) const override { return is_instance ? helpers::LocationFrom(vixl::aarch64::x2) : helpers::LocationFrom(vixl::aarch64::x1); } - Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { return helpers::LocationFrom(vixl::aarch64::d0); } @@ -260,7 +260,7 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator { InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen); #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION) @@ -268,7 +268,7 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator { #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -360,7 +360,7 @@ class LocationsBuilderARM64 : public HGraphVisitor { : HGraphVisitor(graph), codegen_(codegen) {} #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION) @@ -368,7 +368,7 @@ class LocationsBuilderARM64 : public HGraphVisitor { #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -393,11 +393,11 @@ class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap { : ParallelMoveResolverNoSwap(allocator), codegen_(codegen), vixl_temps_() {} protected: - void PrepareForEmitNativeCode() OVERRIDE; - void FinishEmitNativeCode() OVERRIDE; - Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE; - void FreeScratchLocation(Location loc) OVERRIDE; - void EmitMove(size_t index) OVERRIDE; + void PrepareForEmitNativeCode() override; + void FinishEmitNativeCode() override; + Location AllocateScratchLocationFor(Location::Kind kind) override; + void FreeScratchLocation(Location loc) override; + void EmitMove(size_t index) override; private: Arm64Assembler* GetAssembler() const; @@ -418,39 +418,39 @@ class CodeGeneratorARM64 : public CodeGenerator { OptimizingCompilerStats* stats = nullptr); virtual ~CodeGeneratorARM64() {} - void GenerateFrameEntry() OVERRIDE; - void GenerateFrameExit() OVERRIDE; + void GenerateFrameEntry() override; + void GenerateFrameExit() override; vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const; vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const; - void Bind(HBasicBlock* block) OVERRIDE; + void Bind(HBasicBlock* block) override; vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) { block = FirstNonEmptyBlock(block); return &(block_labels_[block->GetBlockId()]); } - size_t GetWordSize() const OVERRIDE { + size_t GetWordSize() const override { return kArm64WordSize; } - size_t GetFloatingPointSpillSlotSize() const OVERRIDE { + size_t GetFloatingPointSpillSlotSize() const override { return GetGraph()->HasSIMD() ? 2 * kArm64WordSize // 16 bytes == 2 arm64 words for each spill : 1 * kArm64WordSize; // 8 bytes == 1 arm64 words for each spill } - uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) override { vixl::aarch64::Label* block_entry_label = GetLabelOf(block); DCHECK(block_entry_label->IsBound()); return block_entry_label->GetLocation(); } - HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; } - HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; } - Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; } - const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; } + HGraphVisitor* GetLocationBuilder() override { return &location_builder_; } + HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; } + Arm64Assembler* GetAssembler() override { return &assembler_; } + const Arm64Assembler& GetAssembler() const override { return assembler_; } vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); } // Emit a write barrier. @@ -462,12 +462,12 @@ class CodeGeneratorARM64 : public CodeGenerator { // Register allocation. - void SetupBlockedRegisters() const OVERRIDE; + void SetupBlockedRegisters() const override; - size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; // The number of registers that can be allocated. The register allocator may // decide to reserve and not use a few of them. @@ -479,35 +479,35 @@ class CodeGeneratorARM64 : public CodeGenerator { static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfFPRegisters; static constexpr int kNumberOfAllocatableRegisterPairs = 0; - void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE; - void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE; + void DumpCoreRegister(std::ostream& stream, int reg) const override; + void DumpFloatingPointRegister(std::ostream& stream, int reg) const override; - InstructionSet GetInstructionSet() const OVERRIDE { + InstructionSet GetInstructionSet() const override { return InstructionSet::kArm64; } const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const; - void Initialize() OVERRIDE { + void Initialize() override { block_labels_.resize(GetGraph()->GetBlocks().size()); } // We want to use the STP and LDP instructions to spill and restore registers for slow paths. // These instructions can only encode offsets that are multiples of the register size accessed. - uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return vixl::aarch64::kXRegSizeInBytes; } + uint32_t GetPreferredSlotsAlignment() const override { return vixl::aarch64::kXRegSizeInBytes; } JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) { jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr)); return jump_tables_.back().get(); } - void Finalize(CodeAllocator* allocator) OVERRIDE; + void Finalize(CodeAllocator* allocator) override; // Code generation helpers. void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant); - void MoveConstant(Location destination, int32_t value) OVERRIDE; - void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; - void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + void MoveConstant(Location destination, int32_t value) override; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) override; + void AddLocationAsTemp(Location location, LocationSummary* locations) override; void Load(DataType::Type type, vixl::aarch64::CPURegister dst, @@ -529,7 +529,7 @@ class CodeGeneratorARM64 : public CodeGenerator { void InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, - SlowPathCode* slow_path = nullptr) OVERRIDE; + SlowPathCode* slow_path = nullptr) override; // Generate code to invoke a runtime entry point, but do not record // PC-related information in a stack map. @@ -537,35 +537,35 @@ class CodeGeneratorARM64 : public CodeGenerator { HInstruction* instruction, SlowPathCode* slow_path); - ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; } + ParallelMoveResolverARM64* GetMoveResolver() override { return &move_resolver_; } - bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override { return false; } // Check if the desired_string_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadString::LoadKind GetSupportedLoadStringKind( - HLoadString::LoadKind desired_string_load_kind) OVERRIDE; + HLoadString::LoadKind desired_string_load_kind) override; // Check if the desired_class_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadClass::LoadKind GetSupportedLoadClassKind( - HLoadClass::LoadKind desired_class_load_kind) OVERRIDE; + HLoadClass::LoadKind desired_class_load_kind) override; // Check if the desired_dispatch_info is supported. If it is, return it, // otherwise return a fall-back info that should be used instead. HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke) OVERRIDE; + ArtMethod* method) override; void GenerateStaticOrDirectCall( - HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; void GenerateVirtualCall( - HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED, - DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE { + DataType::Type type ATTRIBUTE_UNUSED) override { UNIMPLEMENTED(FATAL); } @@ -652,13 +652,13 @@ class CodeGeneratorARM64 : public CodeGenerator { void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_reference); void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset); - void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE; - bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE; + void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override; + bool NeedsThunkCode(const linker::LinkerPatch& patch) const override; void EmitThunkCode(const linker::LinkerPatch& patch, /*out*/ ArenaVector<uint8_t>* code, - /*out*/ std::string* debug_name) OVERRIDE; + /*out*/ std::string* debug_name) override; - void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; + void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override; // Generate a GC root reference load: // @@ -765,10 +765,10 @@ class CodeGeneratorARM64 : public CodeGenerator { // artReadBarrierForRootSlow. void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); - void GenerateNop() OVERRIDE; + void GenerateNop() override; - void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; - void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateImplicitNullCheck(HNullCheck* instruction) override; + void GenerateExplicitNullCheck(HNullCheck* instruction) override; private: // Encoding of thunk type and data for link-time generated thunks for Baker read barriers. diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 8c5eafd0bb..17d973653a 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -383,7 +383,7 @@ class NullCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { public: explicit NullCheckSlowPathARMVIXL(HNullCheck* instruction) : SlowPathCodeARMVIXL(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); __ Bind(GetEntryLabel()); if (instruction_->CanThrowIntoCatchBlock()) { @@ -397,9 +397,9 @@ class NullCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARMVIXL"; } + const char* GetDescription() const override { return "NullCheckSlowPathARMVIXL"; } private: DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARMVIXL); @@ -410,16 +410,16 @@ class DivZeroCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { explicit DivZeroCheckSlowPathARMVIXL(HDivZeroCheck* instruction) : SlowPathCodeARMVIXL(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); __ Bind(GetEntryLabel()); arm_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARMVIXL"; } + const char* GetDescription() const override { return "DivZeroCheckSlowPathARMVIXL"; } private: DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARMVIXL); @@ -430,7 +430,7 @@ class SuspendCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { SuspendCheckSlowPathARMVIXL(HSuspendCheck* instruction, HBasicBlock* successor) : SlowPathCodeARMVIXL(instruction), successor_(successor) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); __ Bind(GetEntryLabel()); arm_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this); @@ -451,7 +451,7 @@ class SuspendCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { return successor_; } - const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARMVIXL"; } + const char* GetDescription() const override { return "SuspendCheckSlowPathARMVIXL"; } private: // If not null, the block to branch to after the suspend check. @@ -468,7 +468,7 @@ class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { explicit BoundsCheckSlowPathARMVIXL(HBoundsCheck* instruction) : SlowPathCodeARMVIXL(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); LocationSummary* locations = instruction_->GetLocations(); @@ -495,9 +495,9 @@ class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARMVIXL"; } + const char* GetDescription() const override { return "BoundsCheckSlowPathARMVIXL"; } private: DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARMVIXL); @@ -511,7 +511,7 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL { DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Location out = locations->Out(); const uint32_t dex_pc = instruction_->GetDexPc(); @@ -549,7 +549,7 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARMVIXL"; } + const char* GetDescription() const override { return "LoadClassSlowPathARMVIXL"; } private: // The class this slow path will load. @@ -563,7 +563,7 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL { explicit LoadStringSlowPathARMVIXL(HLoadString* instruction) : SlowPathCodeARMVIXL(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { DCHECK(instruction_->IsLoadString()); DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry); LocationSummary* locations = instruction_->GetLocations(); @@ -585,7 +585,7 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARMVIXL"; } + const char* GetDescription() const override { return "LoadStringSlowPathARMVIXL"; } private: DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARMVIXL); @@ -596,7 +596,7 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { TypeCheckSlowPathARMVIXL(HInstruction* instruction, bool is_fatal) : SlowPathCodeARMVIXL(instruction), is_fatal_(is_fatal) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -640,9 +640,9 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { } } - const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARMVIXL"; } + const char* GetDescription() const override { return "TypeCheckSlowPathARMVIXL"; } - bool IsFatal() const OVERRIDE { return is_fatal_; } + bool IsFatal() const override { return is_fatal_; } private: const bool is_fatal_; @@ -655,7 +655,7 @@ class DeoptimizationSlowPathARMVIXL : public SlowPathCodeARMVIXL { explicit DeoptimizationSlowPathARMVIXL(HDeoptimize* instruction) : SlowPathCodeARMVIXL(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); __ Bind(GetEntryLabel()); LocationSummary* locations = instruction_->GetLocations(); @@ -668,7 +668,7 @@ class DeoptimizationSlowPathARMVIXL : public SlowPathCodeARMVIXL { CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>(); } - const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARMVIXL"; } + const char* GetDescription() const override { return "DeoptimizationSlowPathARMVIXL"; } private: DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARMVIXL); @@ -678,7 +678,7 @@ class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL { public: explicit ArraySetSlowPathARMVIXL(HInstruction* instruction) : SlowPathCodeARMVIXL(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); @@ -709,7 +709,7 @@ class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARMVIXL"; } + const char* GetDescription() const override { return "ArraySetSlowPathARMVIXL"; } private: DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARMVIXL); @@ -744,7 +744,7 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL { DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); LocationSummary* locations = instruction_->GetLocations(); vixl32::Register reg_out = RegisterFrom(out_); @@ -868,7 +868,7 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { + const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathARMVIXL"; } @@ -910,7 +910,7 @@ class ReadBarrierForRootSlowPathARMVIXL : public SlowPathCodeARMVIXL { DCHECK(kEmitCompilerReadBarrier); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); vixl32::Register reg_out = RegisterFrom(out_); DCHECK(locations->CanCall()); @@ -936,7 +936,7 @@ class ReadBarrierForRootSlowPathARMVIXL : public SlowPathCodeARMVIXL { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARMVIXL"; } + const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARMVIXL"; } private: const Location out_; @@ -2677,6 +2677,18 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) { const Location first = locations->InAt(0); const Location out = locations->Out(); const Location second = locations->InAt(1); + + // In the unlucky case the output of this instruction overlaps + // with an input of an "emitted-at-use-site" condition, and + // the output of this instruction is not one of its inputs, we'll + // need to fallback to branches instead of conditional ARM instructions. + bool output_overlaps_with_condition_inputs = + !IsBooleanValueOrMaterializedCondition(condition) && + !out.Equals(first) && + !out.Equals(second) && + (condition->GetLocations()->InAt(0).Equals(out) || + condition->GetLocations()->InAt(1).Equals(out)); + DCHECK(!output_overlaps_with_condition_inputs || condition->IsCondition()); Location src; if (condition->IsIntConstant()) { @@ -2690,7 +2702,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) { return; } - if (!DataType::IsFloatingPointType(type)) { + if (!DataType::IsFloatingPointType(type) && !output_overlaps_with_condition_inputs) { bool invert = false; if (out.Equals(second)) { @@ -2762,6 +2774,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) { vixl32::Label* false_target = nullptr; vixl32::Label* true_target = nullptr; vixl32::Label select_end; + vixl32::Label other_case; vixl32::Label* const target = codegen_->GetFinalLabel(select, &select_end); if (out.Equals(second)) { @@ -2772,12 +2785,21 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) { src = second; if (!out.Equals(first)) { - codegen_->MoveLocation(out, first, type); + if (output_overlaps_with_condition_inputs) { + false_target = &other_case; + } else { + codegen_->MoveLocation(out, first, type); + } } } GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target */ false); codegen_->MoveLocation(out, src, type); + if (output_overlaps_with_condition_inputs) { + __ B(target); + __ Bind(&other_case); + codegen_->MoveLocation(out, first, type); + } if (select_end.IsReferenced()) { __ Bind(&select_end); @@ -2876,31 +2898,16 @@ void CodeGeneratorARMVIXL::GenerateConditionWithZero(IfCondition condition, void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) { LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall); - // Handle the long/FP comparisons made in instruction simplification. - switch (cond->InputAt(0)->GetType()) { - case DataType::Type::kInt64: - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1))); - if (!cond->IsEmittedAtUseSite()) { - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); - } - break; - - case DataType::Type::kFloat32: - case DataType::Type::kFloat64: - locations->SetInAt(0, Location::RequiresFpuRegister()); - locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1))); - if (!cond->IsEmittedAtUseSite()) { - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); - } - break; - - default: - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1))); - if (!cond->IsEmittedAtUseSite()) { - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); - } + const DataType::Type type = cond->InputAt(0)->GetType(); + if (DataType::IsFloatingPointType(type)) { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1))); + } else { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1))); + } + if (!cond->IsEmittedAtUseSite()) { + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } } @@ -8650,7 +8657,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruct // otherwise return a fall-back info that should be used instead. HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) { + ArtMethod* method ATTRIBUTE_UNUSED) { return desired_dispatch_info; } diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index cb131a7ac1..5edca87147 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -178,9 +178,9 @@ class InvokeDexCallingConventionVisitorARMVIXL : public InvokeDexCallingConventi InvokeDexCallingConventionVisitorARMVIXL() {} virtual ~InvokeDexCallingConventionVisitorARMVIXL() {} - Location GetNextLocation(DataType::Type type) OVERRIDE; - Location GetReturnLocation(DataType::Type type) const OVERRIDE; - Location GetMethodLocation() const OVERRIDE; + Location GetNextLocation(DataType::Type type) override; + Location GetReturnLocation(DataType::Type type) const override; + Location GetMethodLocation() const override; private: InvokeDexCallingConventionARMVIXL calling_convention; @@ -193,25 +193,25 @@ class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention public: FieldAccessCallingConventionARMVIXL() {} - Location GetObjectLocation() const OVERRIDE { + Location GetObjectLocation() const override { return helpers::LocationFrom(vixl::aarch32::r1); } - Location GetFieldIndexLocation() const OVERRIDE { + Location GetFieldIndexLocation() const override { return helpers::LocationFrom(vixl::aarch32::r0); } - Location GetReturnLocation(DataType::Type type) const OVERRIDE { + Location GetReturnLocation(DataType::Type type) const override { return DataType::Is64BitType(type) ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1) : helpers::LocationFrom(vixl::aarch32::r0); } - Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE { + Location GetSetValueLocation(DataType::Type type, bool is_instance) const override { return DataType::Is64BitType(type) ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3) : (is_instance ? helpers::LocationFrom(vixl::aarch32::r2) : helpers::LocationFrom(vixl::aarch32::r1)); } - Location GetFpuLocation(DataType::Type type) const OVERRIDE { + Location GetFpuLocation(DataType::Type type) const override { return DataType::Is64BitType(type) ? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1) : helpers::LocationFrom(vixl::aarch32::s0); @@ -229,8 +229,8 @@ class SlowPathCodeARMVIXL : public SlowPathCode { vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; } vixl::aarch32::Label* GetExitLabel() { return &exit_label_; } - void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE; - void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE; + void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override; + void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override; private: vixl::aarch32::Label entry_label_; @@ -244,10 +244,10 @@ class ParallelMoveResolverARMVIXL : public ParallelMoveResolverWithSwap { ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen) : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} - void EmitMove(size_t index) OVERRIDE; - void EmitSwap(size_t index) OVERRIDE; - void SpillScratch(int reg) OVERRIDE; - void RestoreScratch(int reg) OVERRIDE; + void EmitMove(size_t index) override; + void EmitSwap(size_t index) override; + void SpillScratch(int reg) override; + void RestoreScratch(int reg) override; ArmVIXLAssembler* GetAssembler() const; @@ -266,7 +266,7 @@ class LocationsBuilderARMVIXL : public HGraphVisitor { : HGraphVisitor(graph), codegen_(codegen) {} #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION) @@ -274,7 +274,7 @@ class LocationsBuilderARMVIXL : public HGraphVisitor { #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -304,7 +304,7 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator { InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen); #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION) @@ -312,7 +312,7 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator { #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -432,48 +432,48 @@ class CodeGeneratorARMVIXL : public CodeGenerator { OptimizingCompilerStats* stats = nullptr); virtual ~CodeGeneratorARMVIXL() {} - void GenerateFrameEntry() OVERRIDE; - void GenerateFrameExit() OVERRIDE; - void Bind(HBasicBlock* block) OVERRIDE; - void MoveConstant(Location destination, int32_t value) OVERRIDE; - void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; - void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + void GenerateFrameEntry() override; + void GenerateFrameExit() override; + void Bind(HBasicBlock* block) override; + void MoveConstant(Location destination, int32_t value) override; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) override; + void AddLocationAsTemp(Location location, LocationSummary* locations) override; - size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; - size_t GetWordSize() const OVERRIDE { + size_t GetWordSize() const override { return static_cast<size_t>(kArmPointerSize); } - size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return vixl::aarch32::kRegSizeInBytes; } + size_t GetFloatingPointSpillSlotSize() const override { return vixl::aarch32::kRegSizeInBytes; } - HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; } + HGraphVisitor* GetLocationBuilder() override { return &location_builder_; } - HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; } + HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; } - ArmVIXLAssembler* GetAssembler() OVERRIDE { return &assembler_; } + ArmVIXLAssembler* GetAssembler() override { return &assembler_; } - const ArmVIXLAssembler& GetAssembler() const OVERRIDE { return assembler_; } + const ArmVIXLAssembler& GetAssembler() const override { return assembler_; } ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); } - uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) override { vixl::aarch32::Label* block_entry_label = GetLabelOf(block); DCHECK(block_entry_label->IsBound()); return block_entry_label->GetLocation(); } void FixJumpTables(); - void SetupBlockedRegisters() const OVERRIDE; + void SetupBlockedRegisters() const override; - void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE; - void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE; + void DumpCoreRegister(std::ostream& stream, int reg) const override; + void DumpFloatingPointRegister(std::ostream& stream, int reg) const override; - ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; } - InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kThumb2; } + ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; } + InstructionSet GetInstructionSet() const override { return InstructionSet::kThumb2; } const ArmInstructionSetFeatures& GetInstructionSetFeatures() const; @@ -495,7 +495,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator { void InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, - SlowPathCode* slow_path = nullptr) OVERRIDE; + SlowPathCode* slow_path = nullptr) override; // Generate code to invoke a runtime entry point, but do not record // PC-related information in a stack map. @@ -519,42 +519,42 @@ class CodeGeneratorARMVIXL : public CodeGenerator { vixl32::Label* GetFinalLabel(HInstruction* instruction, vixl32::Label* final_label); - void Initialize() OVERRIDE { + void Initialize() override { block_labels_.resize(GetGraph()->GetBlocks().size()); } - void Finalize(CodeAllocator* allocator) OVERRIDE; + void Finalize(CodeAllocator* allocator) override; - bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE { + bool NeedsTwoRegisters(DataType::Type type) const override { return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64; } - void ComputeSpillMask() OVERRIDE; + void ComputeSpillMask() override; vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; } // Check if the desired_string_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadString::LoadKind GetSupportedLoadStringKind( - HLoadString::LoadKind desired_string_load_kind) OVERRIDE; + HLoadString::LoadKind desired_string_load_kind) override; // Check if the desired_class_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadClass::LoadKind GetSupportedLoadClassKind( - HLoadClass::LoadKind desired_class_load_kind) OVERRIDE; + HLoadClass::LoadKind desired_class_load_kind) override; // Check if the desired_dispatch_info is supported. If it is, return it, // otherwise return a fall-back info that should be used instead. HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke) OVERRIDE; + ArtMethod* method) override; void GenerateStaticOrDirectCall( - HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; void GenerateVirtualCall( - HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; - void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE; + void MoveFromReturnRegister(Location trg, DataType::Type type) override; // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types, // whether through .data.bimg.rel.ro, .bss, or directly in the boot image. @@ -604,13 +604,13 @@ class CodeGeneratorARMVIXL : public CodeGenerator { void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_reference); void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset); - void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE; - bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE; + void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override; + bool NeedsThunkCode(const linker::LinkerPatch& patch) const override; void EmitThunkCode(const linker::LinkerPatch& patch, /*out*/ ArenaVector<uint8_t>* code, - /*out*/ std::string* debug_name) OVERRIDE; + /*out*/ std::string* debug_name) override; - void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; + void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override; // Generate a GC root reference load: // @@ -722,10 +722,10 @@ class CodeGeneratorARMVIXL : public CodeGenerator { // artReadBarrierForRootSlow. void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); - void GenerateNop() OVERRIDE; + void GenerateNop() override; - void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; - void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateImplicitNullCheck(HNullCheck* instruction) override; + void GenerateExplicitNullCheck(HNullCheck* instruction) override; JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) { jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr)); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index aed334b024..1cf55152bb 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -176,7 +176,7 @@ class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS { public: explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen); __ Bind(GetEntryLabel()); @@ -201,9 +201,9 @@ class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS { CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; } + const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS"; } private: DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS); @@ -213,16 +213,16 @@ class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS { public: explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen); __ Bind(GetEntryLabel()); mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; } + const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS"; } private: DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS); @@ -236,7 +236,7 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS { DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Location out = locations->Out(); const uint32_t dex_pc = instruction_->GetDexPc(); @@ -280,7 +280,7 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; } + const char* GetDescription() const override { return "LoadClassSlowPathMIPS"; } private: // The class this slow path will load. @@ -294,7 +294,7 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS { explicit LoadStringSlowPathMIPS(HLoadString* instruction) : SlowPathCodeMIPS(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { DCHECK(instruction_->IsLoadString()); DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry); LocationSummary* locations = instruction_->GetLocations(); @@ -318,7 +318,7 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; } + const char* GetDescription() const override { return "LoadStringSlowPathMIPS"; } private: DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS); @@ -328,7 +328,7 @@ class NullCheckSlowPathMIPS : public SlowPathCodeMIPS { public: explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen); __ Bind(GetEntryLabel()); if (instruction_->CanThrowIntoCatchBlock()) { @@ -342,9 +342,9 @@ class NullCheckSlowPathMIPS : public SlowPathCodeMIPS { CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; } + const char* GetDescription() const override { return "NullCheckSlowPathMIPS"; } private: DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS); @@ -355,7 +355,7 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS { SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor) : SlowPathCodeMIPS(instruction), successor_(successor) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen); __ Bind(GetEntryLabel()); @@ -375,7 +375,7 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS { return &return_label_; } - const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; } + const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS"; } HBasicBlock* GetSuccessor() const { return successor_; @@ -396,7 +396,7 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { explicit TypeCheckSlowPathMIPS(HInstruction* instruction, bool is_fatal) : SlowPathCodeMIPS(instruction), is_fatal_(is_fatal) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() @@ -435,9 +435,9 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { } } - const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; } + const char* GetDescription() const override { return "TypeCheckSlowPathMIPS"; } - bool IsFatal() const OVERRIDE { return is_fatal_; } + bool IsFatal() const override { return is_fatal_; } private: const bool is_fatal_; @@ -450,7 +450,7 @@ class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS { explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction) : SlowPathCodeMIPS(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen); __ Bind(GetEntryLabel()); LocationSummary* locations = instruction_->GetLocations(); @@ -462,7 +462,7 @@ class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS { CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>(); } - const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; } + const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS"; } private: DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS); @@ -472,7 +472,7 @@ class ArraySetSlowPathMIPS : public SlowPathCodeMIPS { public: explicit ArraySetSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); @@ -503,7 +503,7 @@ class ArraySetSlowPathMIPS : public SlowPathCodeMIPS { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS"; } + const char* GetDescription() const override { return "ArraySetSlowPathMIPS"; } private: DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS); @@ -533,9 +533,9 @@ class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS { DCHECK(kEmitCompilerReadBarrier); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; } + const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Register ref_reg = ref_.AsRegister<Register>(); DCHECK(locations->CanCall()); @@ -627,11 +627,11 @@ class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS { DCHECK(kEmitCompilerReadBarrier); } - const char* GetDescription() const OVERRIDE { + const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS"; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Register ref_reg = ref_.AsRegister<Register>(); DCHECK(locations->CanCall()); @@ -798,7 +798,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS { DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen); LocationSummary* locations = instruction_->GetLocations(); Register reg_out = out_.AsRegister<Register>(); @@ -922,7 +922,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathMIPS"; } + const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathMIPS"; } private: Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) { @@ -965,7 +965,7 @@ class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS { DCHECK(kEmitCompilerReadBarrier); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Register reg_out = out_.AsRegister<Register>(); DCHECK(locations->CanCall()); @@ -995,7 +995,7 @@ class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS"; } + const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS"; } private: const Location out_; @@ -7964,7 +7964,7 @@ Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticO HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) { + ArtMethod* method ATTRIBUTE_UNUSED) { return desired_dispatch_info; } diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h index 4830ac9bc6..50807310b6 100644 --- a/compiler/optimizing/code_generator_mips.h +++ b/compiler/optimizing/code_generator_mips.h @@ -81,9 +81,9 @@ class InvokeDexCallingConventionVisitorMIPS : public InvokeDexCallingConventionV InvokeDexCallingConventionVisitorMIPS() {} virtual ~InvokeDexCallingConventionVisitorMIPS() {} - Location GetNextLocation(DataType::Type type) OVERRIDE; - Location GetReturnLocation(DataType::Type type) const OVERRIDE; - Location GetMethodLocation() const OVERRIDE; + Location GetNextLocation(DataType::Type type) override; + Location GetReturnLocation(DataType::Type type) const override; + Location GetMethodLocation() const override; private: InvokeDexCallingConvention calling_convention; @@ -110,23 +110,23 @@ class FieldAccessCallingConventionMIPS : public FieldAccessCallingConvention { public: FieldAccessCallingConventionMIPS() {} - Location GetObjectLocation() const OVERRIDE { + Location GetObjectLocation() const override { return Location::RegisterLocation(A1); } - Location GetFieldIndexLocation() const OVERRIDE { + Location GetFieldIndexLocation() const override { return Location::RegisterLocation(A0); } - Location GetReturnLocation(DataType::Type type) const OVERRIDE { + Location GetReturnLocation(DataType::Type type) const override { return DataType::Is64BitType(type) ? Location::RegisterPairLocation(V0, V1) : Location::RegisterLocation(V0); } - Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE { + Location GetSetValueLocation(DataType::Type type, bool is_instance) const override { return DataType::Is64BitType(type) ? Location::RegisterPairLocation(A2, A3) : (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1)); } - Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { return Location::FpuRegisterLocation(F0); } @@ -139,10 +139,10 @@ class ParallelMoveResolverMIPS : public ParallelMoveResolverWithSwap { ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen) : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} - void EmitMove(size_t index) OVERRIDE; - void EmitSwap(size_t index) OVERRIDE; - void SpillScratch(int reg) OVERRIDE; - void RestoreScratch(int reg) OVERRIDE; + void EmitMove(size_t index) override; + void EmitSwap(size_t index) override; + void SpillScratch(int reg) override; + void RestoreScratch(int reg) override; void Exchange(int index1, int index2, bool double_slot); void ExchangeQuadSlots(int index1, int index2); @@ -176,14 +176,14 @@ class LocationsBuilderMIPS : public HGraphVisitor { : HGraphVisitor(graph), codegen_(codegen) {} #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -210,14 +210,14 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator { InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen); #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -374,35 +374,35 @@ class CodeGeneratorMIPS : public CodeGenerator { OptimizingCompilerStats* stats = nullptr); virtual ~CodeGeneratorMIPS() {} - void ComputeSpillMask() OVERRIDE; - bool HasAllocatedCalleeSaveRegisters() const OVERRIDE; - void GenerateFrameEntry() OVERRIDE; - void GenerateFrameExit() OVERRIDE; + void ComputeSpillMask() override; + bool HasAllocatedCalleeSaveRegisters() const override; + void GenerateFrameEntry() override; + void GenerateFrameExit() override; - void Bind(HBasicBlock* block) OVERRIDE; + void Bind(HBasicBlock* block) override; void MoveConstant(Location location, HConstant* c); - size_t GetWordSize() const OVERRIDE { return kMipsWordSize; } + size_t GetWordSize() const override { return kMipsWordSize; } - size_t GetFloatingPointSpillSlotSize() const OVERRIDE { + size_t GetFloatingPointSpillSlotSize() const override { return GetGraph()->HasSIMD() ? 2 * kMipsDoublewordSize // 16 bytes for each spill. : 1 * kMipsDoublewordSize; // 8 bytes for each spill. } - uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) override { return assembler_.GetLabelLocation(GetLabelOf(block)); } - HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; } - HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; } - MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; } - const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; } + HGraphVisitor* GetLocationBuilder() override { return &location_builder_; } + HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; } + MipsAssembler* GetAssembler() override { return &assembler_; } + const MipsAssembler& GetAssembler() const override { return assembler_; } // Emit linker patches. - void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE; - void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; + void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override; + void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override; // Fast path implementation of ReadBarrier::Barrier for a heap // reference field load when Baker's read barriers are used. @@ -493,20 +493,20 @@ class CodeGeneratorMIPS : public CodeGenerator { // Register allocation. - void SetupBlockedRegisters() const OVERRIDE; + void SetupBlockedRegisters() const override; - size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; void ClobberRA() { clobbered_ra_ = true; } - void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE; - void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE; + void DumpCoreRegister(std::ostream& stream, int reg) const override; + void DumpFloatingPointRegister(std::ostream& stream, int reg) const override; - InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; } + InstructionSet GetInstructionSet() const override { return InstructionSet::kMips; } const MipsInstructionSetFeatures& GetInstructionSetFeatures() const; @@ -514,25 +514,25 @@ class CodeGeneratorMIPS : public CodeGenerator { return CommonGetLabelOf<MipsLabel>(block_labels_, block); } - void Initialize() OVERRIDE { + void Initialize() override { block_labels_ = CommonInitializeLabels<MipsLabel>(); } - void Finalize(CodeAllocator* allocator) OVERRIDE; + void Finalize(CodeAllocator* allocator) override; // Code generation helpers. - void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) override; - void MoveConstant(Location destination, int32_t value) OVERRIDE; + void MoveConstant(Location destination, int32_t value) override; - void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + void AddLocationAsTemp(Location location, LocationSummary* locations) override; // Generate code to invoke a runtime entry point. void InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, - SlowPathCode* slow_path = nullptr) OVERRIDE; + SlowPathCode* slow_path = nullptr) override; // Generate code to invoke a runtime entry point, but do not record // PC-related information in a stack map. @@ -543,41 +543,41 @@ class CodeGeneratorMIPS : public CodeGenerator { void GenerateInvokeRuntime(int32_t entry_point_offset, bool direct); - ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; } + ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; } - bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE { + bool NeedsTwoRegisters(DataType::Type type) const override { return type == DataType::Type::kInt64; } // Check if the desired_string_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadString::LoadKind GetSupportedLoadStringKind( - HLoadString::LoadKind desired_string_load_kind) OVERRIDE; + HLoadString::LoadKind desired_string_load_kind) override; // Check if the desired_class_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadClass::LoadKind GetSupportedLoadClassKind( - HLoadClass::LoadKind desired_class_load_kind) OVERRIDE; + HLoadClass::LoadKind desired_class_load_kind) override; // Check if the desired_dispatch_info is supported. If it is, return it, // otherwise return a fall-back info that should be used instead. HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke) OVERRIDE; + ArtMethod* method) override; void GenerateStaticOrDirectCall( - HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; void GenerateVirtualCall( - HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED, - DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE { + DataType::Type type ATTRIBUTE_UNUSED) override { UNIMPLEMENTED(FATAL) << "Not implemented on MIPS"; } - void GenerateNop() OVERRIDE; - void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; - void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateNop() override; + void GenerateImplicitNullCheck(HNullCheck* instruction) override; + void GenerateExplicitNullCheck(HNullCheck* instruction) override; // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types, // whether through .data.bimg.rel.ro, .bss, or directly in the boot image. diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 72318e98b0..27534b004a 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -128,7 +128,7 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : SlowPathCodeMIPS64(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen); __ Bind(GetEntryLabel()); @@ -153,9 +153,9 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; } + const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS64"; } private: DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64); @@ -166,16 +166,16 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : SlowPathCodeMIPS64(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen); __ Bind(GetEntryLabel()); mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; } + const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS64"; } private: DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64); @@ -189,7 +189,7 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 { DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Location out = locations->Out(); const uint32_t dex_pc = instruction_->GetDexPc(); @@ -233,7 +233,7 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 { __ Bc(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; } + const char* GetDescription() const override { return "LoadClassSlowPathMIPS64"; } private: // The class this slow path will load. @@ -247,7 +247,7 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 { explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : SlowPathCodeMIPS64(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { DCHECK(instruction_->IsLoadString()); DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry); LocationSummary* locations = instruction_->GetLocations(); @@ -274,7 +274,7 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 { __ Bc(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; } + const char* GetDescription() const override { return "LoadStringSlowPathMIPS64"; } private: DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64); @@ -284,7 +284,7 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : SlowPathCodeMIPS64(instr) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen); __ Bind(GetEntryLabel()); if (instruction_->CanThrowIntoCatchBlock()) { @@ -298,9 +298,9 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; } + const char* GetDescription() const override { return "NullCheckSlowPathMIPS64"; } private: DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64); @@ -311,7 +311,7 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor) : SlowPathCodeMIPS64(instruction), successor_(successor) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen); __ Bind(GetEntryLabel()); @@ -331,7 +331,7 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { return &return_label_; } - const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; } + const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS64"; } HBasicBlock* GetSuccessor() const { return successor_; @@ -352,7 +352,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { explicit TypeCheckSlowPathMIPS64(HInstruction* instruction, bool is_fatal) : SlowPathCodeMIPS64(instruction), is_fatal_(is_fatal) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); uint32_t dex_pc = instruction_->GetDexPc(); @@ -392,9 +392,9 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { } } - const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; } + const char* GetDescription() const override { return "TypeCheckSlowPathMIPS64"; } - bool IsFatal() const OVERRIDE { return is_fatal_; } + bool IsFatal() const override { return is_fatal_; } private: const bool is_fatal_; @@ -407,7 +407,7 @@ class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 { explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction) : SlowPathCodeMIPS64(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen); __ Bind(GetEntryLabel()); LocationSummary* locations = instruction_->GetLocations(); @@ -419,7 +419,7 @@ class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 { CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>(); } - const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; } + const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS64"; } private: DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64); @@ -429,7 +429,7 @@ class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: explicit ArraySetSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); @@ -460,7 +460,7 @@ class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 { __ Bc(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS64"; } + const char* GetDescription() const override { return "ArraySetSlowPathMIPS64"; } private: DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS64); @@ -490,9 +490,9 @@ class ReadBarrierMarkSlowPathMIPS64 : public SlowPathCodeMIPS64 { DCHECK(kEmitCompilerReadBarrier); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; } + const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); GpuRegister ref_reg = ref_.AsRegister<GpuRegister>(); DCHECK(locations->CanCall()); @@ -583,11 +583,11 @@ class ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 : public SlowPathCodeMIPS64 { DCHECK(kEmitCompilerReadBarrier); } - const char* GetDescription() const OVERRIDE { + const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS64"; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); GpuRegister ref_reg = ref_.AsRegister<GpuRegister>(); DCHECK(locations->CanCall()); @@ -744,7 +744,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 { DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen); LocationSummary* locations = instruction_->GetLocations(); DataType::Type type = DataType::Type::kReference; @@ -864,7 +864,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 { __ Bc(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { + const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathMIPS64"; } @@ -909,7 +909,7 @@ class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 { DCHECK(kEmitCompilerReadBarrier); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DataType::Type type = DataType::Type::kReference; GpuRegister reg_out = out_.AsRegister<GpuRegister>(); @@ -938,7 +938,7 @@ class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 { __ Bc(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS64"; } + const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS64"; } private: const Location out_; @@ -6059,7 +6059,7 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind( HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) { + ArtMethod* method ATTRIBUTE_UNUSED) { // On MIPS64 we support all dispatch types. return desired_dispatch_info; } diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h index fc0908b2cb..52f3a62f33 100644 --- a/compiler/optimizing/code_generator_mips64.h +++ b/compiler/optimizing/code_generator_mips64.h @@ -79,9 +79,9 @@ class InvokeDexCallingConventionVisitorMIPS64 : public InvokeDexCallingConventio InvokeDexCallingConventionVisitorMIPS64() {} virtual ~InvokeDexCallingConventionVisitorMIPS64() {} - Location GetNextLocation(DataType::Type type) OVERRIDE; - Location GetReturnLocation(DataType::Type type) const OVERRIDE; - Location GetMethodLocation() const OVERRIDE; + Location GetNextLocation(DataType::Type type) override; + Location GetReturnLocation(DataType::Type type) const override; + Location GetMethodLocation() const override; private: InvokeDexCallingConvention calling_convention; @@ -108,22 +108,22 @@ class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention { public: FieldAccessCallingConventionMIPS64() {} - Location GetObjectLocation() const OVERRIDE { + Location GetObjectLocation() const override { return Location::RegisterLocation(A1); } - Location GetFieldIndexLocation() const OVERRIDE { + Location GetFieldIndexLocation() const override { return Location::RegisterLocation(A0); } - Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { return Location::RegisterLocation(V0); } Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, - bool is_instance) const OVERRIDE { + bool is_instance) const override { return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1); } - Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { return Location::FpuRegisterLocation(F0); } @@ -136,10 +136,10 @@ class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap { ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen) : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} - void EmitMove(size_t index) OVERRIDE; - void EmitSwap(size_t index) OVERRIDE; - void SpillScratch(int reg) OVERRIDE; - void RestoreScratch(int reg) OVERRIDE; + void EmitMove(size_t index) override; + void EmitSwap(size_t index) override; + void SpillScratch(int reg) override; + void RestoreScratch(int reg) override; void Exchange(int index1, int index2, bool double_slot); void ExchangeQuadSlots(int index1, int index2); @@ -173,14 +173,14 @@ class LocationsBuilderMIPS64 : public HGraphVisitor { : HGraphVisitor(graph), codegen_(codegen) {} #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -207,14 +207,14 @@ class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator { InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen); #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -356,31 +356,31 @@ class CodeGeneratorMIPS64 : public CodeGenerator { OptimizingCompilerStats* stats = nullptr); virtual ~CodeGeneratorMIPS64() {} - void GenerateFrameEntry() OVERRIDE; - void GenerateFrameExit() OVERRIDE; + void GenerateFrameEntry() override; + void GenerateFrameExit() override; - void Bind(HBasicBlock* block) OVERRIDE; + void Bind(HBasicBlock* block) override; - size_t GetWordSize() const OVERRIDE { return kMips64DoublewordSize; } + size_t GetWordSize() const override { return kMips64DoublewordSize; } - size_t GetFloatingPointSpillSlotSize() const OVERRIDE { + size_t GetFloatingPointSpillSlotSize() const override { return GetGraph()->HasSIMD() ? 2 * kMips64DoublewordSize // 16 bytes for each spill. : 1 * kMips64DoublewordSize; // 8 bytes for each spill. } - uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) override { return assembler_.GetLabelLocation(GetLabelOf(block)); } - HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; } - HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; } - Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; } - const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; } + HGraphVisitor* GetLocationBuilder() override { return &location_builder_; } + HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; } + Mips64Assembler* GetAssembler() override { return &assembler_; } + const Mips64Assembler& GetAssembler() const override { return assembler_; } // Emit linker patches. - void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE; - void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; + void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override; + void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override; // Fast path implementation of ReadBarrier::Barrier for a heap // reference field load when Baker's read barriers are used. @@ -471,17 +471,17 @@ class CodeGeneratorMIPS64 : public CodeGenerator { // Register allocation. - void SetupBlockedRegisters() const OVERRIDE; + void SetupBlockedRegisters() const override; - size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; - void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE; - void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE; + void DumpCoreRegister(std::ostream& stream, int reg) const override; + void DumpFloatingPointRegister(std::ostream& stream, int reg) const override; - InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips64; } + InstructionSet GetInstructionSet() const override { return InstructionSet::kMips64; } const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const; @@ -489,22 +489,22 @@ class CodeGeneratorMIPS64 : public CodeGenerator { return CommonGetLabelOf<Mips64Label>(block_labels_, block); } - void Initialize() OVERRIDE { + void Initialize() override { block_labels_ = CommonInitializeLabels<Mips64Label>(); } // We prefer aligned loads and stores (less code), so spill and restore registers in slow paths // at aligned locations. - uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return kMips64DoublewordSize; } + uint32_t GetPreferredSlotsAlignment() const override { return kMips64DoublewordSize; } - void Finalize(CodeAllocator* allocator) OVERRIDE; + void Finalize(CodeAllocator* allocator) override; // Code generation helpers. - void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) override; - void MoveConstant(Location destination, int32_t value) OVERRIDE; + void MoveConstant(Location destination, int32_t value) override; - void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + void AddLocationAsTemp(Location location, LocationSummary* locations) override; void SwapLocations(Location loc1, Location loc2, DataType::Type type); @@ -513,7 +513,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator { void InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, - SlowPathCode* slow_path = nullptr) OVERRIDE; + SlowPathCode* slow_path = nullptr) override; // Generate code to invoke a runtime entry point, but do not record // PC-related information in a stack map. @@ -523,39 +523,39 @@ class CodeGeneratorMIPS64 : public CodeGenerator { void GenerateInvokeRuntime(int32_t entry_point_offset); - ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; } + ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; } - bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; } + bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override { return false; } // Check if the desired_string_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadString::LoadKind GetSupportedLoadStringKind( - HLoadString::LoadKind desired_string_load_kind) OVERRIDE; + HLoadString::LoadKind desired_string_load_kind) override; // Check if the desired_class_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadClass::LoadKind GetSupportedLoadClassKind( - HLoadClass::LoadKind desired_class_load_kind) OVERRIDE; + HLoadClass::LoadKind desired_class_load_kind) override; // Check if the desired_dispatch_info is supported. If it is, return it, // otherwise return a fall-back info that should be used instead. HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke) OVERRIDE; + ArtMethod* method) override; void GenerateStaticOrDirectCall( - HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; void GenerateVirtualCall( - HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED, - DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE { + DataType::Type type ATTRIBUTE_UNUSED) override { UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64"; } - void GenerateNop() OVERRIDE; - void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; - void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateNop() override; + void GenerateImplicitNullCheck(HNullCheck* instruction) override; + void GenerateExplicitNullCheck(HNullCheck* instruction) override; // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types, // whether through .data.bimg.rel.ro, .bss, or directly in the boot image. diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc index 6d135a9bfb..e79a96bc2a 100644 --- a/compiler/optimizing/code_generator_vector_arm64.cc +++ b/compiler/optimizing/code_generator_vector_arm64.cc @@ -1277,6 +1277,74 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins } } +void LocationsBuilderARM64::VisitVecDotProd(HVecDotProd* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + DCHECK(instruction->GetPackedType() == DataType::Type::kInt32); + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(2, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + + // For Int8 and Uint8 we need a temp register. + if (DataType::Size(instruction->InputAt(1)->AsVecOperation()->GetPackedType()) == 1) { + locations->AddTemp(Location::RequiresFpuRegister()); + } +} + +void InstructionCodeGeneratorARM64::VisitVecDotProd(HVecDotProd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + VRegister acc = VRegisterFrom(locations->InAt(0)); + VRegister left = VRegisterFrom(locations->InAt(1)); + VRegister right = VRegisterFrom(locations->InAt(2)); + HVecOperation* a = instruction->InputAt(1)->AsVecOperation(); + HVecOperation* b = instruction->InputAt(2)->AsVecOperation(); + DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()), + HVecOperation::ToSignedType(b->GetPackedType())); + DCHECK_EQ(instruction->GetPackedType(), DataType::Type::kInt32); + DCHECK_EQ(4u, instruction->GetVectorLength()); + + size_t inputs_data_size = DataType::Size(a->GetPackedType()); + switch (inputs_data_size) { + case 1u: { + DCHECK_EQ(16u, a->GetVectorLength()); + VRegister tmp = VRegisterFrom(locations->GetTemp(0)); + if (instruction->IsZeroExtending()) { + // TODO: Use Armv8.4-A UDOT instruction when it is available. + __ Umull(tmp.V8H(), left.V8B(), right.V8B()); + __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H()); + __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H()); + + __ Umull2(tmp.V8H(), left.V16B(), right.V16B()); + __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H()); + __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H()); + } else { + // TODO: Use Armv8.4-A SDOT instruction when it is available. + __ Smull(tmp.V8H(), left.V8B(), right.V8B()); + __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H()); + __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H()); + + __ Smull2(tmp.V8H(), left.V16B(), right.V16B()); + __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H()); + __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H()); + } + break; + } + case 2u: + DCHECK_EQ(8u, a->GetVectorLength()); + if (instruction->IsZeroExtending()) { + __ Umlal(acc.V4S(), left.V4H(), right.V4H()); + __ Umlal2(acc.V4S(), left.V8H(), right.V8H()); + } else { + __ Smlal(acc.V4S(), left.V4H(), right.V4H()); + __ Smlal2(acc.V4S(), left.V8H(), right.V8H()); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type size: " << inputs_data_size; + } +} + // Helper to set up locations for vector memory operations. static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, @@ -1354,6 +1422,7 @@ void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) { Register scratch; switch (instruction->GetPackedType()) { + case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt. case DataType::Type::kUint16: DCHECK_EQ(8u, instruction->GetVectorLength()); // Special handling of compressed/uncompressed string load. @@ -1385,7 +1454,6 @@ void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) { case DataType::Type::kBool: case DataType::Type::kUint8: case DataType::Type::kInt8: - case DataType::Type::kInt16: case DataType::Type::kInt32: case DataType::Type::kFloat32: case DataType::Type::kInt64: diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc index 7b66b17983..62b6c4ea01 100644 --- a/compiler/optimizing/code_generator_vector_arm_vixl.cc +++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc @@ -854,6 +854,14 @@ void InstructionCodeGeneratorARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* i } } +void LocationsBuilderARMVIXL::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + // Return whether the vector memory access operation is guaranteed to be word-aligned (ARM word // size equals to 4). static bool IsWordAligned(HVecMemoryOperation* instruction) { diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc index df0e1485d6..24f4fb2d7b 100644 --- a/compiler/optimizing/code_generator_vector_mips.cc +++ b/compiler/optimizing/code_generator_vector_mips.cc @@ -1274,6 +1274,14 @@ void InstructionCodeGeneratorMIPS::VisitVecSADAccumulate(HVecSADAccumulate* inst } } +void LocationsBuilderMIPS::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void InstructionCodeGeneratorMIPS::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + // Helper to set up locations for vector memory operations. static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc index de354b63a1..972c49ebb1 100644 --- a/compiler/optimizing/code_generator_vector_mips64.cc +++ b/compiler/optimizing/code_generator_vector_mips64.cc @@ -1272,6 +1272,14 @@ void InstructionCodeGeneratorMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* in } } +void LocationsBuilderMIPS64::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void InstructionCodeGeneratorMIPS64::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + // Helper to set up locations for vector memory operations. static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc index 086ae07a06..c52ecc77c5 100644 --- a/compiler/optimizing/code_generator_vector_x86.cc +++ b/compiler/optimizing/code_generator_vector_x86.cc @@ -1143,6 +1143,14 @@ void InstructionCodeGeneratorX86::VisitVecSADAccumulate(HVecSADAccumulate* instr LOG(FATAL) << "No SIMD for " << instruction->GetId(); } +void LocationsBuilderX86::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void InstructionCodeGeneratorX86::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + // Helper to set up locations for vector memory operations. static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, @@ -1205,6 +1213,7 @@ void InstructionCodeGeneratorX86::VisitVecLoad(HVecLoad* instruction) { XmmRegister reg = locations->Out().AsFpuRegister<XmmRegister>(); bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16); switch (instruction->GetPackedType()) { + case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt. case DataType::Type::kUint16: DCHECK_EQ(8u, instruction->GetVectorLength()); // Special handling of compressed/uncompressed string load. @@ -1232,7 +1241,6 @@ void InstructionCodeGeneratorX86::VisitVecLoad(HVecLoad* instruction) { case DataType::Type::kBool: case DataType::Type::kUint8: case DataType::Type::kInt8: - case DataType::Type::kInt16: case DataType::Type::kInt32: case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc index 4d31ab68d1..87d0106c3e 100644 --- a/compiler/optimizing/code_generator_vector_x86_64.cc +++ b/compiler/optimizing/code_generator_vector_x86_64.cc @@ -1116,6 +1116,14 @@ void InstructionCodeGeneratorX86_64::VisitVecSADAccumulate(HVecSADAccumulate* in LOG(FATAL) << "No SIMD for " << instruction->GetId(); } +void LocationsBuilderX86_64::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void InstructionCodeGeneratorX86_64::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + // Helper to set up locations for vector memory operations. static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, @@ -1178,6 +1186,7 @@ void InstructionCodeGeneratorX86_64::VisitVecLoad(HVecLoad* instruction) { XmmRegister reg = locations->Out().AsFpuRegister<XmmRegister>(); bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16); switch (instruction->GetPackedType()) { + case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt. case DataType::Type::kUint16: DCHECK_EQ(8u, instruction->GetVectorLength()); // Special handling of compressed/uncompressed string load. @@ -1205,7 +1214,6 @@ void InstructionCodeGeneratorX86_64::VisitVecLoad(HVecLoad* instruction) { case DataType::Type::kBool: case DataType::Type::kUint8: case DataType::Type::kInt8: - case DataType::Type::kInt16: case DataType::Type::kInt32: case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index df00ec7d30..39cbe5e850 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -72,7 +72,7 @@ class NullCheckSlowPathX86 : public SlowPathCode { public: explicit NullCheckSlowPathX86(HNullCheck* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); if (instruction_->CanThrowIntoCatchBlock()) { @@ -86,9 +86,9 @@ class NullCheckSlowPathX86 : public SlowPathCode { CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86"; } + const char* GetDescription() const override { return "NullCheckSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86); @@ -98,16 +98,16 @@ class DivZeroCheckSlowPathX86 : public SlowPathCode { public: explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86"; } + const char* GetDescription() const override { return "DivZeroCheckSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86); @@ -118,7 +118,7 @@ class DivRemMinusOneSlowPathX86 : public SlowPathCode { DivRemMinusOneSlowPathX86(HInstruction* instruction, Register reg, bool is_div) : SlowPathCode(instruction), reg_(reg), is_div_(is_div) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { __ Bind(GetEntryLabel()); if (is_div_) { __ negl(reg_); @@ -128,7 +128,7 @@ class DivRemMinusOneSlowPathX86 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86"; } + const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86"; } private: Register reg_; @@ -140,7 +140,7 @@ class BoundsCheckSlowPathX86 : public SlowPathCode { public: explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); @@ -187,9 +187,9 @@ class BoundsCheckSlowPathX86 : public SlowPathCode { CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86"; } + const char* GetDescription() const override { return "BoundsCheckSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86); @@ -200,7 +200,7 @@ class SuspendCheckSlowPathX86 : public SlowPathCode { SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor) : SlowPathCode(instruction), successor_(successor) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); @@ -224,7 +224,7 @@ class SuspendCheckSlowPathX86 : public SlowPathCode { return successor_; } - const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86"; } + const char* GetDescription() const override { return "SuspendCheckSlowPathX86"; } private: HBasicBlock* const successor_; @@ -237,7 +237,7 @@ class LoadStringSlowPathX86 : public SlowPathCode { public: explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -256,7 +256,7 @@ class LoadStringSlowPathX86 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86"; } + const char* GetDescription() const override { return "LoadStringSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86); @@ -270,7 +270,7 @@ class LoadClassSlowPathX86 : public SlowPathCode { DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Location out = locations->Out(); const uint32_t dex_pc = instruction_->GetDexPc(); @@ -308,7 +308,7 @@ class LoadClassSlowPathX86 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86"; } + const char* GetDescription() const override { return "LoadClassSlowPathX86"; } private: // The class this slow path will load. @@ -322,7 +322,7 @@ class TypeCheckSlowPathX86 : public SlowPathCode { TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal) : SlowPathCode(instruction), is_fatal_(is_fatal) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -375,8 +375,8 @@ class TypeCheckSlowPathX86 : public SlowPathCode { } } - const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86"; } - bool IsFatal() const OVERRIDE { return is_fatal_; } + const char* GetDescription() const override { return "TypeCheckSlowPathX86"; } + bool IsFatal() const override { return is_fatal_; } private: const bool is_fatal_; @@ -389,7 +389,7 @@ class DeoptimizationSlowPathX86 : public SlowPathCode { explicit DeoptimizationSlowPathX86(HDeoptimize* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); LocationSummary* locations = instruction_->GetLocations(); @@ -402,7 +402,7 @@ class DeoptimizationSlowPathX86 : public SlowPathCode { CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>(); } - const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; } + const char* GetDescription() const override { return "DeoptimizationSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86); @@ -412,7 +412,7 @@ class ArraySetSlowPathX86 : public SlowPathCode { public: explicit ArraySetSlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); @@ -443,7 +443,7 @@ class ArraySetSlowPathX86 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86"; } + const char* GetDescription() const override { return "ArraySetSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86); @@ -471,9 +471,9 @@ class ReadBarrierMarkSlowPathX86 : public SlowPathCode { DCHECK(kEmitCompilerReadBarrier); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86"; } + const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Register ref_reg = ref_.AsRegister<Register>(); DCHECK(locations->CanCall()); @@ -558,9 +558,9 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86 : public SlowPathCode { DCHECK(kEmitCompilerReadBarrier); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; } + const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Register ref_reg = ref_.AsRegister<Register>(); DCHECK(locations->CanCall()); @@ -724,7 +724,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); LocationSummary* locations = instruction_->GetLocations(); Register reg_out = out_.AsRegister<Register>(); @@ -843,7 +843,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathX86"; } + const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathX86"; } private: Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) { @@ -883,7 +883,7 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode { DCHECK(kEmitCompilerReadBarrier); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Register reg_out = out_.AsRegister<Register>(); DCHECK(locations->CanCall()); @@ -909,7 +909,7 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86"; } + const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86"; } private: const Location out_; @@ -4785,7 +4785,7 @@ void CodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) { HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) { + ArtMethod* method ATTRIBUTE_UNUSED) { return desired_dispatch_info; } @@ -8100,7 +8100,7 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera HX86ComputeBaseMethodAddress* base_method_address_; private: - void Process(const MemoryRegion& region, int pos) OVERRIDE { + void Process(const MemoryRegion& region, int pos) override { // Patch the correct offset for the instruction. The place to patch is the // last 4 bytes of the instruction. // The value to patch is the distance from the offset in the constant area @@ -8301,7 +8301,7 @@ void CodeGeneratorX86::PatchJitRootUse(uint8_t* code, uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment; uintptr_t address = reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); - typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t; + using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t; reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] = dchecked_integral_cast<uint32_t>(address); } diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index cb58e920ea..93b0461975 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -83,9 +83,9 @@ class InvokeDexCallingConventionVisitorX86 : public InvokeDexCallingConventionVi InvokeDexCallingConventionVisitorX86() {} virtual ~InvokeDexCallingConventionVisitorX86() {} - Location GetNextLocation(DataType::Type type) OVERRIDE; - Location GetReturnLocation(DataType::Type type) const OVERRIDE; - Location GetMethodLocation() const OVERRIDE; + Location GetNextLocation(DataType::Type type) override; + Location GetReturnLocation(DataType::Type type) const override; + Location GetMethodLocation() const override; private: InvokeDexCallingConvention calling_convention; @@ -97,18 +97,18 @@ class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention { public: FieldAccessCallingConventionX86() {} - Location GetObjectLocation() const OVERRIDE { + Location GetObjectLocation() const override { return Location::RegisterLocation(ECX); } - Location GetFieldIndexLocation() const OVERRIDE { + Location GetFieldIndexLocation() const override { return Location::RegisterLocation(EAX); } - Location GetReturnLocation(DataType::Type type) const OVERRIDE { + Location GetReturnLocation(DataType::Type type) const override { return DataType::Is64BitType(type) ? Location::RegisterPairLocation(EAX, EDX) : Location::RegisterLocation(EAX); } - Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE { + Location GetSetValueLocation(DataType::Type type, bool is_instance) const override { return DataType::Is64BitType(type) ? (is_instance ? Location::RegisterPairLocation(EDX, EBX) @@ -117,7 +117,7 @@ class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention { ? Location::RegisterLocation(EDX) : Location::RegisterLocation(ECX)); } - Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { return Location::FpuRegisterLocation(XMM0); } @@ -130,10 +130,10 @@ class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap { ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen) : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} - void EmitMove(size_t index) OVERRIDE; - void EmitSwap(size_t index) OVERRIDE; - void SpillScratch(int reg) OVERRIDE; - void RestoreScratch(int reg) OVERRIDE; + void EmitMove(size_t index) override; + void EmitSwap(size_t index) override; + void SpillScratch(int reg) override; + void RestoreScratch(int reg) override; X86Assembler* GetAssembler() const; @@ -155,14 +155,14 @@ class LocationsBuilderX86 : public HGraphVisitor { : HGraphVisitor(graph), codegen_(codegen) {} #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -186,14 +186,14 @@ class InstructionCodeGeneratorX86 : public InstructionCodeGenerator { InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen); #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -320,23 +320,23 @@ class CodeGeneratorX86 : public CodeGenerator { OptimizingCompilerStats* stats = nullptr); virtual ~CodeGeneratorX86() {} - void GenerateFrameEntry() OVERRIDE; - void GenerateFrameExit() OVERRIDE; - void Bind(HBasicBlock* block) OVERRIDE; - void MoveConstant(Location destination, int32_t value) OVERRIDE; - void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; - void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + void GenerateFrameEntry() override; + void GenerateFrameExit() override; + void Bind(HBasicBlock* block) override; + void MoveConstant(Location destination, int32_t value) override; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) override; + void AddLocationAsTemp(Location location, LocationSummary* locations) override; - size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; // Generate code to invoke a runtime entry point. void InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, - SlowPathCode* slow_path = nullptr) OVERRIDE; + SlowPathCode* slow_path = nullptr) override; // Generate code to invoke a runtime entry point, but do not record // PC-related information in a stack map. @@ -346,46 +346,46 @@ class CodeGeneratorX86 : public CodeGenerator { void GenerateInvokeRuntime(int32_t entry_point_offset); - size_t GetWordSize() const OVERRIDE { + size_t GetWordSize() const override { return kX86WordSize; } - size_t GetFloatingPointSpillSlotSize() const OVERRIDE { + size_t GetFloatingPointSpillSlotSize() const override { return GetGraph()->HasSIMD() ? 4 * kX86WordSize // 16 bytes == 4 words for each spill : 2 * kX86WordSize; // 8 bytes == 2 words for each spill } - HGraphVisitor* GetLocationBuilder() OVERRIDE { + HGraphVisitor* GetLocationBuilder() override { return &location_builder_; } - HGraphVisitor* GetInstructionVisitor() OVERRIDE { + HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; } - X86Assembler* GetAssembler() OVERRIDE { + X86Assembler* GetAssembler() override { return &assembler_; } - const X86Assembler& GetAssembler() const OVERRIDE { + const X86Assembler& GetAssembler() const override { return assembler_; } - uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) override { return GetLabelOf(block)->Position(); } - void SetupBlockedRegisters() const OVERRIDE; + void SetupBlockedRegisters() const override; - void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE; - void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE; + void DumpCoreRegister(std::ostream& stream, int reg) const override; + void DumpFloatingPointRegister(std::ostream& stream, int reg) const override; - ParallelMoveResolverX86* GetMoveResolver() OVERRIDE { + ParallelMoveResolverX86* GetMoveResolver() override { return &move_resolver_; } - InstructionSet GetInstructionSet() const OVERRIDE { + InstructionSet GetInstructionSet() const override { return InstructionSet::kX86; } @@ -399,25 +399,25 @@ class CodeGeneratorX86 : public CodeGenerator { // Check if the desired_string_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadString::LoadKind GetSupportedLoadStringKind( - HLoadString::LoadKind desired_string_load_kind) OVERRIDE; + HLoadString::LoadKind desired_string_load_kind) override; // Check if the desired_class_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadClass::LoadKind GetSupportedLoadClassKind( - HLoadClass::LoadKind desired_class_load_kind) OVERRIDE; + HLoadClass::LoadKind desired_class_load_kind) override; // Check if the desired_dispatch_info is supported. If it is, return it, // otherwise return a fall-back info that should be used instead. HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke) OVERRIDE; + ArtMethod* method) override; // Generate a call to a static or direct method. void GenerateStaticOrDirectCall( - HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; // Generate a call to a virtual method. void GenerateVirtualCall( - HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; void RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address, uint32_t intrinsic_data); @@ -442,16 +442,16 @@ class CodeGeneratorX86 : public CodeGenerator { dex::TypeIndex type_index, Handle<mirror::Class> handle); - void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE; + void MoveFromReturnRegister(Location trg, DataType::Type type) override; // Emit linker patches. - void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE; + void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override; void PatchJitRootUse(uint8_t* code, const uint8_t* roots_data, const PatchInfo<Label>& info, uint64_t index_in_table) const; - void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; + void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override; // Emit a write barrier. void MarkGCCard(Register temp, @@ -466,15 +466,15 @@ class CodeGeneratorX86 : public CodeGenerator { return CommonGetLabelOf<Label>(block_labels_, block); } - void Initialize() OVERRIDE { + void Initialize() override { block_labels_ = CommonInitializeLabels<Label>(); } - bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE { + bool NeedsTwoRegisters(DataType::Type type) const override { return type == DataType::Type::kInt64; } - bool ShouldSplitLongMoves() const OVERRIDE { return true; } + bool ShouldSplitLongMoves() const override { return true; } Label* GetFrameEntryLabel() { return &frame_entry_label_; } @@ -513,7 +513,7 @@ class CodeGeneratorX86 : public CodeGenerator { Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value); - void Finalize(CodeAllocator* allocator) OVERRIDE; + void Finalize(CodeAllocator* allocator) override; // Fast path implementation of ReadBarrier::Barrier for a heap // reference field load when Baker's read barriers are used. @@ -609,9 +609,9 @@ class CodeGeneratorX86 : public CodeGenerator { } } - void GenerateNop() OVERRIDE; - void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; - void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateNop() override; + void GenerateImplicitNullCheck(HNullCheck* instruction) override; + void GenerateExplicitNullCheck(HNullCheck* instruction) override; // When we don't know the proper offset for the value, we use kDummy32BitOffset. // The correct value will be inserted when processing Assembler fixups. diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index ae2a000d07..e458dfffb4 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -71,7 +71,7 @@ class NullCheckSlowPathX86_64 : public SlowPathCode { public: explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); if (instruction_->CanThrowIntoCatchBlock()) { @@ -85,9 +85,9 @@ class NullCheckSlowPathX86_64 : public SlowPathCode { CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; } + const char* GetDescription() const override { return "NullCheckSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64); @@ -97,16 +97,16 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCode { public: explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); x86_64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; } + const char* GetDescription() const override { return "DivZeroCheckSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64); @@ -117,7 +117,7 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCode { DivRemMinusOneSlowPathX86_64(HInstruction* at, Register reg, DataType::Type type, bool is_div) : SlowPathCode(at), cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { __ Bind(GetEntryLabel()); if (type_ == DataType::Type::kInt32) { if (is_div_) { @@ -137,7 +137,7 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86_64"; } + const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86_64"; } private: const CpuRegister cpu_reg_; @@ -151,7 +151,7 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode { SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor) : SlowPathCode(instruction), successor_(successor) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); @@ -175,7 +175,7 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode { return successor_; } - const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86_64"; } + const char* GetDescription() const override { return "SuspendCheckSlowPathX86_64"; } private: HBasicBlock* const successor_; @@ -189,7 +189,7 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode { explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); @@ -236,9 +236,9 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode { CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); } - bool IsFatal() const OVERRIDE { return true; } + bool IsFatal() const override { return true; } - const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; } + const char* GetDescription() const override { return "BoundsCheckSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64); @@ -252,7 +252,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCode { DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Location out = locations->Out(); const uint32_t dex_pc = instruction_->GetDexPc(); @@ -291,7 +291,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86_64"; } + const char* GetDescription() const override { return "LoadClassSlowPathX86_64"; } private: // The class this slow path will load. @@ -304,7 +304,7 @@ class LoadStringSlowPathX86_64 : public SlowPathCode { public: explicit LoadStringSlowPathX86_64(HLoadString* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -326,7 +326,7 @@ class LoadStringSlowPathX86_64 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86_64"; } + const char* GetDescription() const override { return "LoadStringSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64); @@ -337,7 +337,7 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal) : SlowPathCode(instruction), is_fatal_(is_fatal) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() @@ -385,9 +385,9 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { } } - const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86_64"; } + const char* GetDescription() const override { return "TypeCheckSlowPathX86_64"; } - bool IsFatal() const OVERRIDE { return is_fatal_; } + bool IsFatal() const override { return is_fatal_; } private: const bool is_fatal_; @@ -400,7 +400,7 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCode { explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); LocationSummary* locations = instruction_->GetLocations(); @@ -413,7 +413,7 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCode { CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>(); } - const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; } + const char* GetDescription() const override { return "DeoptimizationSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64); @@ -423,7 +423,7 @@ class ArraySetSlowPathX86_64 : public SlowPathCode { public: explicit ArraySetSlowPathX86_64(HInstruction* instruction) : SlowPathCode(instruction) {} - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); @@ -454,7 +454,7 @@ class ArraySetSlowPathX86_64 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86_64"; } + const char* GetDescription() const override { return "ArraySetSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64); @@ -482,9 +482,9 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode { DCHECK(kEmitCompilerReadBarrier); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86_64"; } + const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86_64"; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>(); Register ref_reg = ref_cpu_reg.AsRegister(); @@ -573,11 +573,11 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86_64 : public SlowPathCode { DCHECK(kEmitCompilerReadBarrier); } - const char* GetDescription() const OVERRIDE { + const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86_64"; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>(); Register ref_reg = ref_cpu_reg.AsRegister(); @@ -745,7 +745,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode { DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); LocationSummary* locations = instruction_->GetLocations(); CpuRegister reg_out = out_.AsRegister<CpuRegister>(); @@ -864,7 +864,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { + const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathX86_64"; } @@ -906,7 +906,7 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode { DCHECK(kEmitCompilerReadBarrier); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg())); @@ -931,7 +931,7 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86_64"; } + const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86_64"; } private: const Location out_; @@ -978,7 +978,7 @@ inline Condition X86_64FPCondition(IfCondition cond) { HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) { + ArtMethod* method ATTRIBUTE_UNUSED) { return desired_dispatch_info; } @@ -7395,7 +7395,7 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera CodeGeneratorX86_64* codegen_; private: - void Process(const MemoryRegion& region, int pos) OVERRIDE { + void Process(const MemoryRegion& region, int pos) override { // Patch the correct offset for the instruction. We use the address of the // 'next' instruction, which is 'pos' (patch the 4 bytes before). int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_; @@ -7542,7 +7542,7 @@ void CodeGeneratorX86_64::PatchJitRootUse(uint8_t* code, uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment; uintptr_t address = reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); - typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t; + using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t; reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] = dchecked_integral_cast<uint32_t>(address); } diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 5ba7f9cb71..1e7139718b 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -83,22 +83,22 @@ class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention { public: FieldAccessCallingConventionX86_64() {} - Location GetObjectLocation() const OVERRIDE { + Location GetObjectLocation() const override { return Location::RegisterLocation(RSI); } - Location GetFieldIndexLocation() const OVERRIDE { + Location GetFieldIndexLocation() const override { return Location::RegisterLocation(RDI); } - Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { return Location::RegisterLocation(RAX); } Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, bool is_instance) - const OVERRIDE { + const override { return is_instance ? Location::RegisterLocation(RDX) : Location::RegisterLocation(RSI); } - Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { return Location::FpuRegisterLocation(XMM0); } @@ -112,9 +112,9 @@ class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventio InvokeDexCallingConventionVisitorX86_64() {} virtual ~InvokeDexCallingConventionVisitorX86_64() {} - Location GetNextLocation(DataType::Type type) OVERRIDE; - Location GetReturnLocation(DataType::Type type) const OVERRIDE; - Location GetMethodLocation() const OVERRIDE; + Location GetNextLocation(DataType::Type type) override; + Location GetReturnLocation(DataType::Type type) const override; + Location GetMethodLocation() const override; private: InvokeDexCallingConvention calling_convention; @@ -129,10 +129,10 @@ class ParallelMoveResolverX86_64 : public ParallelMoveResolverWithSwap { ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen) : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} - void EmitMove(size_t index) OVERRIDE; - void EmitSwap(size_t index) OVERRIDE; - void SpillScratch(int reg) OVERRIDE; - void RestoreScratch(int reg) OVERRIDE; + void EmitMove(size_t index) override; + void EmitSwap(size_t index) override; + void SpillScratch(int reg) override; + void RestoreScratch(int reg) override; X86_64Assembler* GetAssembler() const; @@ -157,14 +157,14 @@ class LocationsBuilderX86_64 : public HGraphVisitor { : HGraphVisitor(graph), codegen_(codegen) {} #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -188,14 +188,14 @@ class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator { InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen); #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE; + void Visit##name(H##name* instr) override; FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION) #undef DECLARE_VISIT_INSTRUCTION - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id " << instruction->GetId() << ")"; } @@ -300,23 +300,23 @@ class CodeGeneratorX86_64 : public CodeGenerator { OptimizingCompilerStats* stats = nullptr); virtual ~CodeGeneratorX86_64() {} - void GenerateFrameEntry() OVERRIDE; - void GenerateFrameExit() OVERRIDE; - void Bind(HBasicBlock* block) OVERRIDE; - void MoveConstant(Location destination, int32_t value) OVERRIDE; - void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; - void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + void GenerateFrameEntry() override; + void GenerateFrameExit() override; + void Bind(HBasicBlock* block) override; + void MoveConstant(Location destination, int32_t value) override; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) override; + void AddLocationAsTemp(Location location, LocationSummary* locations) override; - size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; - size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; // Generate code to invoke a runtime entry point. void InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, - SlowPathCode* slow_path = nullptr) OVERRIDE; + SlowPathCode* slow_path = nullptr) override; // Generate code to invoke a runtime entry point, but do not record // PC-related information in a stack map. @@ -326,46 +326,46 @@ class CodeGeneratorX86_64 : public CodeGenerator { void GenerateInvokeRuntime(int32_t entry_point_offset); - size_t GetWordSize() const OVERRIDE { + size_t GetWordSize() const override { return kX86_64WordSize; } - size_t GetFloatingPointSpillSlotSize() const OVERRIDE { + size_t GetFloatingPointSpillSlotSize() const override { return GetGraph()->HasSIMD() ? 2 * kX86_64WordSize // 16 bytes == 2 x86_64 words for each spill : 1 * kX86_64WordSize; // 8 bytes == 1 x86_64 words for each spill } - HGraphVisitor* GetLocationBuilder() OVERRIDE { + HGraphVisitor* GetLocationBuilder() override { return &location_builder_; } - HGraphVisitor* GetInstructionVisitor() OVERRIDE { + HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; } - X86_64Assembler* GetAssembler() OVERRIDE { + X86_64Assembler* GetAssembler() override { return &assembler_; } - const X86_64Assembler& GetAssembler() const OVERRIDE { + const X86_64Assembler& GetAssembler() const override { return assembler_; } - ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE { + ParallelMoveResolverX86_64* GetMoveResolver() override { return &move_resolver_; } - uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) override { return GetLabelOf(block)->Position(); } - void SetupBlockedRegisters() const OVERRIDE; - void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE; - void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE; - void Finalize(CodeAllocator* allocator) OVERRIDE; + void SetupBlockedRegisters() const override; + void DumpCoreRegister(std::ostream& stream, int reg) const override; + void DumpFloatingPointRegister(std::ostream& stream, int reg) const override; + void Finalize(CodeAllocator* allocator) override; - InstructionSet GetInstructionSet() const OVERRIDE { + InstructionSet GetInstructionSet() const override { return InstructionSet::kX86_64; } @@ -387,34 +387,34 @@ class CodeGeneratorX86_64 : public CodeGenerator { return CommonGetLabelOf<Label>(block_labels_, block); } - void Initialize() OVERRIDE { + void Initialize() override { block_labels_ = CommonInitializeLabels<Label>(); } - bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override { return false; } // Check if the desired_string_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadString::LoadKind GetSupportedLoadStringKind( - HLoadString::LoadKind desired_string_load_kind) OVERRIDE; + HLoadString::LoadKind desired_string_load_kind) override; // Check if the desired_class_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. HLoadClass::LoadKind GetSupportedLoadClassKind( - HLoadClass::LoadKind desired_class_load_kind) OVERRIDE; + HLoadClass::LoadKind desired_class_load_kind) override; // Check if the desired_dispatch_info is supported. If it is, return it, // otherwise return a fall-back info that should be used instead. HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, - HInvokeStaticOrDirect* invoke) OVERRIDE; + ArtMethod* method) override; void GenerateStaticOrDirectCall( - HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; void GenerateVirtualCall( - HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; + HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; void RecordBootImageIntrinsicPatch(uint32_t intrinsic_data); void RecordBootImageRelRoPatch(uint32_t boot_image_offset); @@ -434,14 +434,14 @@ class CodeGeneratorX86_64 : public CodeGenerator { void LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference); void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset); - void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE; + void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override; void PatchJitRootUse(uint8_t* code, const uint8_t* roots_data, const PatchInfo<Label>& info, uint64_t index_in_table) const; - void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; + void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override; // Fast path implementation of ReadBarrier::Barrier for a heap // reference field load when Baker's read barriers are used. @@ -565,7 +565,7 @@ class CodeGeneratorX86_64 : public CodeGenerator { // Store a 64 bit value into a DoubleStackSlot in the most efficient manner. void Store64BitValueToStack(Location dest, int64_t value); - void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE; + void MoveFromReturnRegister(Location trg, DataType::Type type) override; // Assign a 64 bit constant to an address. void MoveInt64ToAddress(const Address& addr_low, @@ -585,9 +585,9 @@ class CodeGeneratorX86_64 : public CodeGenerator { } } - void GenerateNop() OVERRIDE; - void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; - void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateNop() override; + void GenerateImplicitNullCheck(HNullCheck* instruction) override; + void GenerateExplicitNullCheck(HNullCheck* instruction) override; // When we don't know the proper offset for the value, we use kDummy32BitOffset. // We will fix this up in the linker later to have the right value. diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h index 5db0b6dcc5..8eb3a520c3 100644 --- a/compiler/optimizing/code_sinking.h +++ b/compiler/optimizing/code_sinking.h @@ -33,7 +33,7 @@ class CodeSinking : public HOptimization { const char* name = kCodeSinkingPassName) : HOptimization(graph, name, stats) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kCodeSinkingPassName = "code_sinking"; diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index 86687e60a9..f186191a0f 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -453,7 +453,7 @@ TEST_F(CodegenTest, NonMaterializedCondition) { ASSERT_FALSE(equal->IsEmittedAtUseSite()); graph->BuildDominatorTree(); - PrepareForRegisterAllocation(graph).Run(); + PrepareForRegisterAllocation(graph, *compiler_options_).Run(); ASSERT_TRUE(equal->IsEmittedAtUseSite()); auto hook_before_codegen = [](HGraph* graph_in) { diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h index 91811262de..0289e9c4a7 100644 --- a/compiler/optimizing/codegen_test_utils.h +++ b/compiler/optimizing/codegen_test_utils.h @@ -101,7 +101,7 @@ class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL { AddAllocatedRegister(Location::RegisterLocation(arm::R7)); } - void SetupBlockedRegisters() const OVERRIDE { + void SetupBlockedRegisters() const override { arm::CodeGeneratorARMVIXL::SetupBlockedRegisters(); blocked_core_registers_[arm::R4] = true; blocked_core_registers_[arm::R6] = false; @@ -109,7 +109,7 @@ class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL { } void MaybeGenerateMarkingRegisterCheck(int code ATTRIBUTE_UNUSED, - Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE { + Location temp_loc ATTRIBUTE_UNUSED) override { // When turned on, the marking register checks in // CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck expects the // Thread Register and the Marking Register to be set to @@ -141,7 +141,7 @@ class TestCodeGeneratorARM64 : public arm64::CodeGeneratorARM64 { : arm64::CodeGeneratorARM64(graph, compiler_options) {} void MaybeGenerateMarkingRegisterCheck(int codem ATTRIBUTE_UNUSED, - Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE { + Location temp_loc ATTRIBUTE_UNUSED) override { // When turned on, the marking register checks in // CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck expect the // Thread Register and the Marking Register to be set to @@ -161,7 +161,7 @@ class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 { AddAllocatedRegister(Location::RegisterLocation(x86::EDI)); } - void SetupBlockedRegisters() const OVERRIDE { + void SetupBlockedRegisters() const override { x86::CodeGeneratorX86::SetupBlockedRegisters(); // ebx is a callee-save register in C, but caller-save for ART. blocked_core_registers_[x86::EBX] = true; @@ -183,7 +183,7 @@ class InternalCodeAllocator : public CodeAllocator { } size_t GetSize() const { return size_; } - ArrayRef<const uint8_t> GetMemory() const OVERRIDE { + ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_.get(), size_); } @@ -288,7 +288,7 @@ static void RunCodeNoCheck(CodeGenerator* codegen, { ScopedArenaAllocator local_allocator(graph->GetArenaStack()); SsaLivenessAnalysis liveness(graph, codegen, &local_allocator); - PrepareForRegisterAllocation(graph).Run(); + PrepareForRegisterAllocation(graph, codegen->GetCompilerOptions()).Run(); liveness.Analyze(); std::unique_ptr<RegisterAllocator> register_allocator = RegisterAllocator::Create(&local_allocator, codegen, liveness); diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc index bb78c2357e..09e7cabfa4 100644 --- a/compiler/optimizing/constant_folding.cc +++ b/compiler/optimizing/constant_folding.cc @@ -26,13 +26,13 @@ class HConstantFoldingVisitor : public HGraphDelegateVisitor { : HGraphDelegateVisitor(graph) {} private: - void VisitBasicBlock(HBasicBlock* block) OVERRIDE; + void VisitBasicBlock(HBasicBlock* block) override; - void VisitUnaryOperation(HUnaryOperation* inst) OVERRIDE; - void VisitBinaryOperation(HBinaryOperation* inst) OVERRIDE; + void VisitUnaryOperation(HUnaryOperation* inst) override; + void VisitBinaryOperation(HBinaryOperation* inst) override; - void VisitTypeConversion(HTypeConversion* inst) OVERRIDE; - void VisitDivZeroCheck(HDivZeroCheck* inst) OVERRIDE; + void VisitTypeConversion(HTypeConversion* inst) override; + void VisitDivZeroCheck(HDivZeroCheck* inst) override; DISALLOW_COPY_AND_ASSIGN(HConstantFoldingVisitor); }; @@ -47,24 +47,24 @@ class InstructionWithAbsorbingInputSimplifier : public HGraphVisitor { private: void VisitShift(HBinaryOperation* shift); - void VisitEqual(HEqual* instruction) OVERRIDE; - void VisitNotEqual(HNotEqual* instruction) OVERRIDE; - - void VisitAbove(HAbove* instruction) OVERRIDE; - void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE; - void VisitBelow(HBelow* instruction) OVERRIDE; - void VisitBelowOrEqual(HBelowOrEqual* instruction) OVERRIDE; - - void VisitAnd(HAnd* instruction) OVERRIDE; - void VisitCompare(HCompare* instruction) OVERRIDE; - void VisitMul(HMul* instruction) OVERRIDE; - void VisitOr(HOr* instruction) OVERRIDE; - void VisitRem(HRem* instruction) OVERRIDE; - void VisitShl(HShl* instruction) OVERRIDE; - void VisitShr(HShr* instruction) OVERRIDE; - void VisitSub(HSub* instruction) OVERRIDE; - void VisitUShr(HUShr* instruction) OVERRIDE; - void VisitXor(HXor* instruction) OVERRIDE; + void VisitEqual(HEqual* instruction) override; + void VisitNotEqual(HNotEqual* instruction) override; + + void VisitAbove(HAbove* instruction) override; + void VisitAboveOrEqual(HAboveOrEqual* instruction) override; + void VisitBelow(HBelow* instruction) override; + void VisitBelowOrEqual(HBelowOrEqual* instruction) override; + + void VisitAnd(HAnd* instruction) override; + void VisitCompare(HCompare* instruction) override; + void VisitMul(HMul* instruction) override; + void VisitOr(HOr* instruction) override; + void VisitRem(HRem* instruction) override; + void VisitShl(HShl* instruction) override; + void VisitShr(HShr* instruction) override; + void VisitSub(HSub* instruction) override; + void VisitUShr(HUShr* instruction) override; + void VisitXor(HXor* instruction) override; }; diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h index f4dbc805c4..72bd95b3cb 100644 --- a/compiler/optimizing/constant_folding.h +++ b/compiler/optimizing/constant_folding.h @@ -41,7 +41,7 @@ class HConstantFolding : public HOptimization { public: HConstantFolding(HGraph* graph, const char* name) : HOptimization(graph, name) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kConstantFoldingPassName = "constant_folding"; diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc index 54bff22e98..3a1a9e023d 100644 --- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc +++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc @@ -34,7 +34,7 @@ class CFREVisitor : public HGraphVisitor { candidate_fence_targets_(scoped_allocator_.Adapter(kArenaAllocCFRE)), stats_(stats) {} - void VisitBasicBlock(HBasicBlock* block) OVERRIDE { + void VisitBasicBlock(HBasicBlock* block) override { // Visit all instructions in block. HGraphVisitor::VisitBasicBlock(block); @@ -43,7 +43,7 @@ class CFREVisitor : public HGraphVisitor { MergeCandidateFences(); } - void VisitConstructorFence(HConstructorFence* constructor_fence) OVERRIDE { + void VisitConstructorFence(HConstructorFence* constructor_fence) override { candidate_fences_.push_back(constructor_fence); for (size_t input_idx = 0; input_idx < constructor_fence->InputCount(); ++input_idx) { @@ -51,78 +51,78 @@ class CFREVisitor : public HGraphVisitor { } } - void VisitBoundType(HBoundType* bound_type) OVERRIDE { + void VisitBoundType(HBoundType* bound_type) override { VisitAlias(bound_type); } - void VisitNullCheck(HNullCheck* null_check) OVERRIDE { + void VisitNullCheck(HNullCheck* null_check) override { VisitAlias(null_check); } - void VisitSelect(HSelect* select) OVERRIDE { + void VisitSelect(HSelect* select) override { VisitAlias(select); } - void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE { + void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override { HInstruction* value = instruction->InputAt(1); VisitSetLocation(instruction, value); } - void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE { + void VisitStaticFieldSet(HStaticFieldSet* instruction) override { HInstruction* value = instruction->InputAt(1); VisitSetLocation(instruction, value); } - void VisitArraySet(HArraySet* instruction) OVERRIDE { + void VisitArraySet(HArraySet* instruction) override { HInstruction* value = instruction->InputAt(2); VisitSetLocation(instruction, value); } - void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) { + void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) override { // Pessimize: Merge all fences. MergeCandidateFences(); } - void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE { + void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override { HandleInvoke(invoke); } - void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE { + void VisitInvokeVirtual(HInvokeVirtual* invoke) override { HandleInvoke(invoke); } - void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE { + void VisitInvokeInterface(HInvokeInterface* invoke) override { HandleInvoke(invoke); } - void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE { + void VisitInvokeUnresolved(HInvokeUnresolved* invoke) override { HandleInvoke(invoke); } - void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE { + void VisitInvokePolymorphic(HInvokePolymorphic* invoke) override { HandleInvoke(invoke); } - void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE { + void VisitClinitCheck(HClinitCheck* clinit) override { HandleInvoke(clinit); } - void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE { + void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) override { // Conservatively treat it as an invocation. HandleInvoke(instruction); } - void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE { + void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) override { // Conservatively treat it as an invocation. HandleInvoke(instruction); } - void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE { + void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) override { // Conservatively treat it as an invocation. HandleInvoke(instruction); } - void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE { + void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) override { // Conservatively treat it as an invocation. HandleInvoke(instruction); } diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.h b/compiler/optimizing/constructor_fence_redundancy_elimination.h index 367d9f21a0..014b342258 100644 --- a/compiler/optimizing/constructor_fence_redundancy_elimination.h +++ b/compiler/optimizing/constructor_fence_redundancy_elimination.h @@ -52,7 +52,7 @@ class ConstructorFenceRedundancyElimination : public HOptimization { const char* name = kCFREPassName) : HOptimization(graph, name, stats) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kCFREPassName = "constructor_fence_redundancy_elimination"; diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h index 5ac6e46003..3cbcc9e0c3 100644 --- a/compiler/optimizing/data_type.h +++ b/compiler/optimizing/data_type.h @@ -231,6 +231,21 @@ class DataType { } } + static Type ToUnsigned(Type type) { + switch (type) { + case Type::kInt8: + return Type::kUint8; + case Type::kInt16: + return Type::kUint16; + case Type::kInt32: + return Type::kUint32; + case Type::kInt64: + return Type::kUint64; + default: + return type; + } + } + static const char* PrettyDescriptor(Type type); private: diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h index 90caa53764..799721acf2 100644 --- a/compiler/optimizing/dead_code_elimination.h +++ b/compiler/optimizing/dead_code_elimination.h @@ -32,7 +32,7 @@ class HDeadCodeElimination : public HOptimization { HDeadCodeElimination(HGraph* graph, OptimizingCompilerStats* stats, const char* name) : HOptimization(graph, name, stats) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kDeadCodeEliminationPassName = "dead_code_elimination"; diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc index 293c1ab3f3..63a370a47b 100644 --- a/compiler/optimizing/emit_swap_mips_test.cc +++ b/compiler/optimizing/emit_swap_mips_test.cc @@ -27,7 +27,7 @@ namespace art { class EmitSwapMipsTest : public OptimizingUnitTest { public: - void SetUp() OVERRIDE { + void SetUp() override { instruction_set_ = InstructionSet::kMips; instruction_set_features_ = MipsInstructionSetFeatures::FromCppDefines(); OptimizingUnitTest::SetUp(); @@ -46,7 +46,7 @@ class EmitSwapMipsTest : public OptimizingUnitTest { GetAssemblyHeader())); } - void TearDown() OVERRIDE { + void TearDown() override { test_helper_.reset(); codegen_.reset(); graph_ = nullptr; diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h index 3a2bb7a00c..d085609197 100644 --- a/compiler/optimizing/graph_checker.h +++ b/compiler/optimizing/graph_checker.h @@ -44,30 +44,30 @@ class GraphChecker : public HGraphDelegateVisitor { // and return value pass along the observed graph sizes. size_t Run(bool pass_change = true, size_t last_size = 0); - void VisitBasicBlock(HBasicBlock* block) OVERRIDE; - - void VisitInstruction(HInstruction* instruction) OVERRIDE; - void VisitPhi(HPhi* phi) OVERRIDE; - - void VisitBinaryOperation(HBinaryOperation* op) OVERRIDE; - void VisitBooleanNot(HBooleanNot* instruction) OVERRIDE; - void VisitBoundType(HBoundType* instruction) OVERRIDE; - void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE; - void VisitCheckCast(HCheckCast* check) OVERRIDE; - void VisitCondition(HCondition* op) OVERRIDE; - void VisitConstant(HConstant* instruction) OVERRIDE; - void VisitDeoptimize(HDeoptimize* instruction) OVERRIDE; - void VisitIf(HIf* instruction) OVERRIDE; - void VisitInstanceOf(HInstanceOf* check) OVERRIDE; - void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE; - void VisitLoadException(HLoadException* load) OVERRIDE; - void VisitNeg(HNeg* instruction) OVERRIDE; - void VisitPackedSwitch(HPackedSwitch* instruction) OVERRIDE; - void VisitReturn(HReturn* ret) OVERRIDE; - void VisitReturnVoid(HReturnVoid* ret) OVERRIDE; - void VisitSelect(HSelect* instruction) OVERRIDE; - void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE; - void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE; + void VisitBasicBlock(HBasicBlock* block) override; + + void VisitInstruction(HInstruction* instruction) override; + void VisitPhi(HPhi* phi) override; + + void VisitBinaryOperation(HBinaryOperation* op) override; + void VisitBooleanNot(HBooleanNot* instruction) override; + void VisitBoundType(HBoundType* instruction) override; + void VisitBoundsCheck(HBoundsCheck* check) override; + void VisitCheckCast(HCheckCast* check) override; + void VisitCondition(HCondition* op) override; + void VisitConstant(HConstant* instruction) override; + void VisitDeoptimize(HDeoptimize* instruction) override; + void VisitIf(HIf* instruction) override; + void VisitInstanceOf(HInstanceOf* check) override; + void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override; + void VisitLoadException(HLoadException* load) override; + void VisitNeg(HNeg* instruction) override; + void VisitPackedSwitch(HPackedSwitch* instruction) override; + void VisitReturn(HReturn* ret) override; + void VisitReturnVoid(HReturnVoid* ret) override; + void VisitSelect(HSelect* instruction) override; + void VisitTryBoundary(HTryBoundary* try_boundary) override; + void VisitTypeConversion(HTypeConversion* instruction) override; void CheckTypeCheckBitstringInput(HTypeCheckInstruction* check, size_t input_pos, diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index d65ad40565..a1af2be9de 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -106,8 +106,7 @@ std::ostream& operator<<(std::ostream& os, const StringList& list) { } } -typedef Disassembler* create_disasm_prototype(InstructionSet instruction_set, - DisassemblerOptions* options); +using create_disasm_prototype = Disassembler*(InstructionSet, DisassemblerOptions*); class HGraphVisualizerDisassembler { public: HGraphVisualizerDisassembler(InstructionSet instruction_set, @@ -333,7 +332,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { return output_; } - void VisitParallelMove(HParallelMove* instruction) OVERRIDE { + void VisitParallelMove(HParallelMove* instruction) override { StartAttributeStream("liveness") << instruction->GetLifetimePosition(); StringList moves; for (size_t i = 0, e = instruction->NumMoves(); i < e; ++i) { @@ -346,36 +345,36 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { StartAttributeStream("moves") << moves; } - void VisitIntConstant(HIntConstant* instruction) OVERRIDE { + void VisitIntConstant(HIntConstant* instruction) override { StartAttributeStream() << instruction->GetValue(); } - void VisitLongConstant(HLongConstant* instruction) OVERRIDE { + void VisitLongConstant(HLongConstant* instruction) override { StartAttributeStream() << instruction->GetValue(); } - void VisitFloatConstant(HFloatConstant* instruction) OVERRIDE { + void VisitFloatConstant(HFloatConstant* instruction) override { StartAttributeStream() << instruction->GetValue(); } - void VisitDoubleConstant(HDoubleConstant* instruction) OVERRIDE { + void VisitDoubleConstant(HDoubleConstant* instruction) override { StartAttributeStream() << instruction->GetValue(); } - void VisitPhi(HPhi* phi) OVERRIDE { + void VisitPhi(HPhi* phi) override { StartAttributeStream("reg") << phi->GetRegNumber(); StartAttributeStream("is_catch_phi") << std::boolalpha << phi->IsCatchPhi() << std::noboolalpha; } - void VisitMemoryBarrier(HMemoryBarrier* barrier) OVERRIDE { + void VisitMemoryBarrier(HMemoryBarrier* barrier) override { StartAttributeStream("kind") << barrier->GetBarrierKind(); } - void VisitMonitorOperation(HMonitorOperation* monitor) OVERRIDE { + void VisitMonitorOperation(HMonitorOperation* monitor) override { StartAttributeStream("kind") << (monitor->IsEnter() ? "enter" : "exit"); } - void VisitLoadClass(HLoadClass* load_class) OVERRIDE { + void VisitLoadClass(HLoadClass* load_class) override { StartAttributeStream("load_kind") << load_class->GetLoadKind(); const char* descriptor = load_class->GetDexFile().GetTypeDescriptor( load_class->GetDexFile().GetTypeId(load_class->GetTypeIndex())); @@ -386,19 +385,19 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { << load_class->NeedsAccessCheck() << std::noboolalpha; } - void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) OVERRIDE { + void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) override { StartAttributeStream("load_kind") << "RuntimeCall"; StartAttributeStream("method_handle_index") << load_method_handle->GetMethodHandleIndex(); } - void VisitLoadMethodType(HLoadMethodType* load_method_type) OVERRIDE { + void VisitLoadMethodType(HLoadMethodType* load_method_type) override { StartAttributeStream("load_kind") << "RuntimeCall"; const DexFile& dex_file = load_method_type->GetDexFile(); const DexFile::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex()); StartAttributeStream("method_type") << dex_file.GetProtoSignature(proto_id); } - void VisitLoadString(HLoadString* load_string) OVERRIDE { + void VisitLoadString(HLoadString* load_string) override { StartAttributeStream("load_kind") << load_string->GetLoadKind(); } @@ -413,15 +412,15 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { } } - void VisitCheckCast(HCheckCast* check_cast) OVERRIDE { + void VisitCheckCast(HCheckCast* check_cast) override { HandleTypeCheckInstruction(check_cast); } - void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE { + void VisitInstanceOf(HInstanceOf* instance_of) override { HandleTypeCheckInstruction(instance_of); } - void VisitArrayLength(HArrayLength* array_length) OVERRIDE { + void VisitArrayLength(HArrayLength* array_length) override { StartAttributeStream("is_string_length") << std::boolalpha << array_length->IsStringLength() << std::noboolalpha; if (array_length->IsEmittedAtUseSite()) { @@ -429,31 +428,31 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { } } - void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE { + void VisitBoundsCheck(HBoundsCheck* bounds_check) override { StartAttributeStream("is_string_char_at") << std::boolalpha << bounds_check->IsStringCharAt() << std::noboolalpha; } - void VisitArrayGet(HArrayGet* array_get) OVERRIDE { + void VisitArrayGet(HArrayGet* array_get) override { StartAttributeStream("is_string_char_at") << std::boolalpha << array_get->IsStringCharAt() << std::noboolalpha; } - void VisitArraySet(HArraySet* array_set) OVERRIDE { + void VisitArraySet(HArraySet* array_set) override { StartAttributeStream("value_can_be_null") << std::boolalpha << array_set->GetValueCanBeNull() << std::noboolalpha; StartAttributeStream("needs_type_check") << std::boolalpha << array_set->NeedsTypeCheck() << std::noboolalpha; } - void VisitCompare(HCompare* compare) OVERRIDE { + void VisitCompare(HCompare* compare) override { ComparisonBias bias = compare->GetBias(); StartAttributeStream("bias") << (bias == ComparisonBias::kGtBias ? "gt" : (bias == ComparisonBias::kLtBias ? "lt" : "none")); } - void VisitInvoke(HInvoke* invoke) OVERRIDE { + void VisitInvoke(HInvoke* invoke) override { StartAttributeStream("dex_file_index") << invoke->GetDexMethodIndex(); ArtMethod* method = invoke->GetResolvedMethod(); // We don't print signatures, which conflict with c1visualizer format. @@ -470,12 +469,12 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { << std::noboolalpha; } - void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE { + void VisitInvokeUnresolved(HInvokeUnresolved* invoke) override { VisitInvoke(invoke); StartAttributeStream("invoke_type") << invoke->GetInvokeType(); } - void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE { + void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override { VisitInvoke(invoke); StartAttributeStream("method_load_kind") << invoke->GetMethodLoadKind(); StartAttributeStream("intrinsic") << invoke->GetIntrinsic(); @@ -484,96 +483,104 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { } } - void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE { + void VisitInvokeVirtual(HInvokeVirtual* invoke) override { VisitInvoke(invoke); StartAttributeStream("intrinsic") << invoke->GetIntrinsic(); } - void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE { + void VisitInvokePolymorphic(HInvokePolymorphic* invoke) override { VisitInvoke(invoke); StartAttributeStream("invoke_type") << "InvokePolymorphic"; } - void VisitInstanceFieldGet(HInstanceFieldGet* iget) OVERRIDE { + void VisitInstanceFieldGet(HInstanceFieldGet* iget) override { StartAttributeStream("field_name") << iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(), /* with type */ false); StartAttributeStream("field_type") << iget->GetFieldType(); } - void VisitInstanceFieldSet(HInstanceFieldSet* iset) OVERRIDE { + void VisitInstanceFieldSet(HInstanceFieldSet* iset) override { StartAttributeStream("field_name") << iset->GetFieldInfo().GetDexFile().PrettyField(iset->GetFieldInfo().GetFieldIndex(), /* with type */ false); StartAttributeStream("field_type") << iset->GetFieldType(); } - void VisitStaticFieldGet(HStaticFieldGet* sget) OVERRIDE { + void VisitStaticFieldGet(HStaticFieldGet* sget) override { StartAttributeStream("field_name") << sget->GetFieldInfo().GetDexFile().PrettyField(sget->GetFieldInfo().GetFieldIndex(), /* with type */ false); StartAttributeStream("field_type") << sget->GetFieldType(); } - void VisitStaticFieldSet(HStaticFieldSet* sset) OVERRIDE { + void VisitStaticFieldSet(HStaticFieldSet* sset) override { StartAttributeStream("field_name") << sset->GetFieldInfo().GetDexFile().PrettyField(sset->GetFieldInfo().GetFieldIndex(), /* with type */ false); StartAttributeStream("field_type") << sset->GetFieldType(); } - void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE { + void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) override { StartAttributeStream("field_type") << field_access->GetFieldType(); } - void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) OVERRIDE { + void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) override { StartAttributeStream("field_type") << field_access->GetFieldType(); } - void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) OVERRIDE { + void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) override { StartAttributeStream("field_type") << field_access->GetFieldType(); } - void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) OVERRIDE { + void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) override { StartAttributeStream("field_type") << field_access->GetFieldType(); } - void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE { + void VisitTryBoundary(HTryBoundary* try_boundary) override { StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit"); } - void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE { + void VisitDeoptimize(HDeoptimize* deoptimize) override { StartAttributeStream("kind") << deoptimize->GetKind(); } - void VisitVecOperation(HVecOperation* vec_operation) OVERRIDE { + void VisitVecOperation(HVecOperation* vec_operation) override { StartAttributeStream("packed_type") << vec_operation->GetPackedType(); } - void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) OVERRIDE { + void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) override { StartAttributeStream("alignment") << vec_mem_operation->GetAlignment().ToString(); } - void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE { + void VisitVecHalvingAdd(HVecHalvingAdd* hadd) override { VisitVecBinaryOperation(hadd); StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha; } - void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE { + void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) override { VisitVecOperation(instruction); StartAttributeStream("kind") << instruction->GetOpKind(); } + void VisitVecDotProd(HVecDotProd* instruction) override { + VisitVecOperation(instruction); + DataType::Type arg_type = instruction->InputAt(1)->AsVecOperation()->GetPackedType(); + StartAttributeStream("type") << (instruction->IsZeroExtending() ? + DataType::ToUnsigned(arg_type) : + DataType::ToSigned(arg_type)); + } + #if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64) - void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) OVERRIDE { + void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) override { StartAttributeStream("kind") << instruction->GetOpKind(); } - void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) OVERRIDE { + void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) override { StartAttributeStream("kind") << instruction->GetOpKind(); } - void VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) OVERRIDE { + void VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) override { StartAttributeStream("kind") << instruction->GetInstrKind() << "+" << instruction->GetOpKind(); if (HDataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())) { StartAttributeStream("shift") << instruction->GetShiftAmount(); @@ -814,7 +821,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { Flush(); } - void VisitBasicBlock(HBasicBlock* block) OVERRIDE { + void VisitBasicBlock(HBasicBlock* block) override { StartTag("block"); PrintProperty("name", "B", block->GetBlockId()); if (block->GetLifetimeStart() != kNoLifetime) { diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h index 75cfff2140..bbf2265e98 100644 --- a/compiler/optimizing/gvn.h +++ b/compiler/optimizing/gvn.h @@ -31,7 +31,7 @@ class GVNOptimization : public HOptimization { const char* pass_name = kGlobalValueNumberingPassName) : HOptimization(graph, pass_name), side_effects_(side_effects) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kGlobalValueNumberingPassName = "GVN"; diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h index 89fed2ec64..a48aa90059 100644 --- a/compiler/optimizing/induction_var_analysis.h +++ b/compiler/optimizing/induction_var_analysis.h @@ -37,7 +37,7 @@ class HInductionVarAnalysis : public HOptimization { public: explicit HInductionVarAnalysis(HGraph* graph, const char* name = kInductionPassName); - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kInductionPassName = "induction_var_analysis"; diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 3ba741472e..7f94a298eb 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -1296,9 +1296,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, // If invoke_instruction is devirtualized to a different method, give intrinsics // another chance before we try to inline it. - bool wrong_invoke_type = false; - if (invoke_instruction->GetResolvedMethod() != method && - IntrinsicsRecognizer::Recognize(invoke_instruction, method, &wrong_invoke_type)) { + if (invoke_instruction->GetResolvedMethod() != method && method->IsIntrinsic()) { MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized); if (invoke_instruction->IsInvokeInterface()) { // We don't intrinsify an invoke-interface directly. @@ -1311,6 +1309,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, invoke_instruction->GetDexMethodIndex(), // Use interface method's dex method index. method, method->GetMethodIndex()); + DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone); HInputsRef inputs = invoke_instruction->GetInputs(); for (size_t index = 0; index != inputs.size(); ++index) { new_invoke->SetArgumentAt(index, inputs[index]); @@ -1320,14 +1319,11 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, if (invoke_instruction->GetType() == DataType::Type::kReference) { new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo()); } - // Run intrinsic recognizer again to set new_invoke's intrinsic. - IntrinsicsRecognizer::Recognize(new_invoke, method, &wrong_invoke_type); - DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone); return_replacement = new_invoke; // invoke_instruction is replaced with new_invoke. should_remove_invoke_instruction = true; } else { - // invoke_instruction is intrinsified and stays. + invoke_instruction->SetResolvedMethod(method); } } else if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) { if (invoke_instruction->IsInvokeInterface()) { @@ -2022,13 +2018,9 @@ void HInliner::RunOptimizations(HGraph* callee_graph, // optimization that could lead to a HDeoptimize. The following optimizations do not. HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner"); HConstantFolding fold(callee_graph, "constant_folding$inliner"); - HSharpening sharpening(callee_graph, codegen_); InstructionSimplifier simplify(callee_graph, codegen_, inline_stats_); - IntrinsicsRecognizer intrinsics(callee_graph, inline_stats_); HOptimization* optimizations[] = { - &intrinsics, - &sharpening, &simplify, &fold, &dce, diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h index 2fdf6a1306..6fd0c204b2 100644 --- a/compiler/optimizing/inliner.h +++ b/compiler/optimizing/inliner.h @@ -60,7 +60,7 @@ class HInliner : public HOptimization { handles_(handles), inline_stats_(nullptr) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kInlinerPassName = "inliner"; diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index e555d0d890..b576f8399d 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -466,22 +466,17 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) { } ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() { - // The callback gets called when the line number changes. - // In other words, it marks the start of new java statement. - struct Callback { - static bool Position(void* ctx, const DexFile::PositionInfo& entry) { - static_cast<ArenaBitVector*>(ctx)->SetBit(entry.address_); - return false; - } - }; ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_, code_item_accessor_.InsnsSizeInCodeUnits(), /* expandable */ false, kArenaAllocGraphBuilder); locations->ClearAllBits(); - dex_file_->DecodeDebugPositionInfo(code_item_accessor_.DebugInfoOffset(), - Callback::Position, - locations); + // The visitor gets called when the line number changes. + // In other words, it marks the start of new java statement. + code_item_accessor_.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) { + locations->SetBit(entry.address_); + return false; + }); // Instruction-specific tweaks. for (const DexInstructionPcPair& inst : code_item_accessor_) { switch (inst->Opcode()) { @@ -983,11 +978,8 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, } } - HInvokeStaticOrDirect::DispatchInfo dispatch_info = { - HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall, - HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod, - 0u - }; + HInvokeStaticOrDirect::DispatchInfo dispatch_info = + HSharpening::SharpenInvokeStaticOrDirect(resolved_method, code_generator_); MethodReference target_method(resolved_method->GetDexFile(), resolved_method->GetDexMethodIndex()); invoke = new (allocator_) HInvokeStaticOrDirect(allocator_, @@ -1505,21 +1497,22 @@ bool HInstructionBuilder::HandleStringInit(HInvoke* invoke, // to be visited once it is clear whether it has remaining uses. if (arg_this->IsNewInstance()) { ssa_builder_->AddUninitializedString(arg_this->AsNewInstance()); - // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`. - for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) { - if ((*current_locals_)[vreg] == arg_this) { - (*current_locals_)[vreg] = invoke; - } - } } else { DCHECK(arg_this->IsPhi()); // We can get a phi as input of a String.<init> if there is a loop between the // allocation and the String.<init> call. As we don't know which other phis might alias - // with `arg_this`, we keep a record of these phis and will analyze their inputs and - // uses once the inputs and users are populated (in ssa_builder.cc). - // Note: we only do this for phis, as it is a somewhat more expensive operation than - // what we're doing above when the input is the `HNewInstance`. - ssa_builder_->AddUninitializedStringPhi(arg_this->AsPhi(), invoke); + // with `arg_this`, we keep a record of those invocations so we can later replace + // the allocation with the invocation. + // Add the actual 'this' input so the analysis knows what is the allocation instruction. + // The input will be removed during the analysis. + invoke->AddInput(arg_this); + ssa_builder_->AddUninitializedStringPhi(invoke); + } + // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`. + for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) { + if ((*current_locals_)[vreg] == arg_this) { + (*current_locals_)[vreg] = invoke; + } } return true; } diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index f493b66cfd..ad50bb877e 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -66,44 +66,44 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { bool TryCombineVecMultiplyAccumulate(HVecMul* mul); void VisitShift(HBinaryOperation* shift); - void VisitEqual(HEqual* equal) OVERRIDE; - void VisitNotEqual(HNotEqual* equal) OVERRIDE; - void VisitBooleanNot(HBooleanNot* bool_not) OVERRIDE; - void VisitInstanceFieldSet(HInstanceFieldSet* equal) OVERRIDE; - void VisitStaticFieldSet(HStaticFieldSet* equal) OVERRIDE; - void VisitArraySet(HArraySet* equal) OVERRIDE; - void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE; - void VisitNullCheck(HNullCheck* instruction) OVERRIDE; - void VisitArrayLength(HArrayLength* instruction) OVERRIDE; - void VisitCheckCast(HCheckCast* instruction) OVERRIDE; - void VisitAbs(HAbs* instruction) OVERRIDE; - void VisitAdd(HAdd* instruction) OVERRIDE; - void VisitAnd(HAnd* instruction) OVERRIDE; - void VisitCondition(HCondition* instruction) OVERRIDE; - void VisitGreaterThan(HGreaterThan* condition) OVERRIDE; - void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) OVERRIDE; - void VisitLessThan(HLessThan* condition) OVERRIDE; - void VisitLessThanOrEqual(HLessThanOrEqual* condition) OVERRIDE; - void VisitBelow(HBelow* condition) OVERRIDE; - void VisitBelowOrEqual(HBelowOrEqual* condition) OVERRIDE; - void VisitAbove(HAbove* condition) OVERRIDE; - void VisitAboveOrEqual(HAboveOrEqual* condition) OVERRIDE; - void VisitDiv(HDiv* instruction) OVERRIDE; - void VisitMul(HMul* instruction) OVERRIDE; - void VisitNeg(HNeg* instruction) OVERRIDE; - void VisitNot(HNot* instruction) OVERRIDE; - void VisitOr(HOr* instruction) OVERRIDE; - void VisitShl(HShl* instruction) OVERRIDE; - void VisitShr(HShr* instruction) OVERRIDE; - void VisitSub(HSub* instruction) OVERRIDE; - void VisitUShr(HUShr* instruction) OVERRIDE; - void VisitXor(HXor* instruction) OVERRIDE; - void VisitSelect(HSelect* select) OVERRIDE; - void VisitIf(HIf* instruction) OVERRIDE; - void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE; - void VisitInvoke(HInvoke* invoke) OVERRIDE; - void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE; - void VisitVecMul(HVecMul* instruction) OVERRIDE; + void VisitEqual(HEqual* equal) override; + void VisitNotEqual(HNotEqual* equal) override; + void VisitBooleanNot(HBooleanNot* bool_not) override; + void VisitInstanceFieldSet(HInstanceFieldSet* equal) override; + void VisitStaticFieldSet(HStaticFieldSet* equal) override; + void VisitArraySet(HArraySet* equal) override; + void VisitTypeConversion(HTypeConversion* instruction) override; + void VisitNullCheck(HNullCheck* instruction) override; + void VisitArrayLength(HArrayLength* instruction) override; + void VisitCheckCast(HCheckCast* instruction) override; + void VisitAbs(HAbs* instruction) override; + void VisitAdd(HAdd* instruction) override; + void VisitAnd(HAnd* instruction) override; + void VisitCondition(HCondition* instruction) override; + void VisitGreaterThan(HGreaterThan* condition) override; + void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) override; + void VisitLessThan(HLessThan* condition) override; + void VisitLessThanOrEqual(HLessThanOrEqual* condition) override; + void VisitBelow(HBelow* condition) override; + void VisitBelowOrEqual(HBelowOrEqual* condition) override; + void VisitAbove(HAbove* condition) override; + void VisitAboveOrEqual(HAboveOrEqual* condition) override; + void VisitDiv(HDiv* instruction) override; + void VisitMul(HMul* instruction) override; + void VisitNeg(HNeg* instruction) override; + void VisitNot(HNot* instruction) override; + void VisitOr(HOr* instruction) override; + void VisitShl(HShl* instruction) override; + void VisitShr(HShr* instruction) override; + void VisitSub(HSub* instruction) override; + void VisitUShr(HUShr* instruction) override; + void VisitXor(HXor* instruction) override; + void VisitSelect(HSelect* select) override; + void VisitIf(HIf* instruction) override; + void VisitInstanceOf(HInstanceOf* instruction) override; + void VisitInvoke(HInvoke* invoke) override; + void VisitDeoptimize(HDeoptimize* deoptimize) override; + void VisitVecMul(HVecMul* instruction) override; bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const; @@ -1317,7 +1317,7 @@ void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) { } HNeg* neg = left_is_neg ? left->AsNeg() : right->AsNeg(); - if ((left_is_neg ^ right_is_neg) && neg->HasOnlyOneNonEnvironmentUse()) { + if (left_is_neg != right_is_neg && neg->HasOnlyOneNonEnvironmentUse()) { // Replace code looking like // NEG tmp, b // ADD dst, a, tmp @@ -2146,22 +2146,6 @@ void InstructionSimplifierVisitor::SimplifyStringEquals(HInvoke* instruction) { ReferenceTypeInfo argument_rti = argument->GetReferenceTypeInfo(); if (argument_rti.IsValid() && argument_rti.IsStringClass()) { optimizations.SetArgumentIsString(); - } else if (kUseReadBarrier) { - DCHECK(instruction->GetResolvedMethod() != nullptr); - DCHECK(instruction->GetResolvedMethod()->GetDeclaringClass()->IsStringClass() || - // Object.equals() can be devirtualized to String.equals(). - instruction->GetResolvedMethod()->GetDeclaringClass()->IsObjectClass()); - Runtime* runtime = Runtime::Current(); - // For AOT, we always assume that the boot image shall contain the String.class and - // we do not need a read barrier for boot image classes as they are non-moveable. - // For JIT, check if we actually have a boot image; if we do, the String.class - // should also be non-moveable. - if (runtime->IsAotCompiler() || runtime->GetHeap()->HasBootImageSpace()) { - DCHECK(runtime->IsAotCompiler() || - !runtime->GetHeap()->IsMovableObject( - instruction->GetResolvedMethod()->GetDeclaringClass())); - optimizations.SetNoReadBarrierForStringClass(); - } } } } @@ -2306,7 +2290,7 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction) // the invoke, as we would need to look it up in the current dex file, and it // is unlikely that it exists. The most usual situation for such typed // arraycopy methods is a direct pointer to the boot image. - HSharpening::SharpenInvokeStaticOrDirect(invoke, codegen_); + invoke->SetDispatchInfo(HSharpening::SharpenInvokeStaticOrDirect(method, codegen_)); } } } diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h index 2d134e0067..982a24a6f0 100644 --- a/compiler/optimizing/instruction_simplifier.h +++ b/compiler/optimizing/instruction_simplifier.h @@ -46,7 +46,7 @@ class InstructionSimplifier : public HOptimization { static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier"; - bool Run() OVERRIDE; + bool Run() override; private: CodeGenerator* codegen_; diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc index 37fcdb9d5c..24fbb6cb4c 100644 --- a/compiler/optimizing/instruction_simplifier_arm.cc +++ b/compiler/optimizing/instruction_simplifier_arm.cc @@ -56,7 +56,7 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor { * (2) Since statements can be removed in a "forward" fashion, * the visitor should test if each statement is still there. */ - void VisitBasicBlock(HBasicBlock* block) OVERRIDE { + void VisitBasicBlock(HBasicBlock* block) override { // TODO: fragile iteration, provide more robust iterators? for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* instruction = it.Current(); @@ -66,15 +66,15 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor { } } - void VisitAnd(HAnd* instruction) OVERRIDE; - void VisitArrayGet(HArrayGet* instruction) OVERRIDE; - void VisitArraySet(HArraySet* instruction) OVERRIDE; - void VisitMul(HMul* instruction) OVERRIDE; - void VisitOr(HOr* instruction) OVERRIDE; - void VisitShl(HShl* instruction) OVERRIDE; - void VisitShr(HShr* instruction) OVERRIDE; - void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE; - void VisitUShr(HUShr* instruction) OVERRIDE; + void VisitAnd(HAnd* instruction) override; + void VisitArrayGet(HArrayGet* instruction) override; + void VisitArraySet(HArraySet* instruction) override; + void VisitMul(HMul* instruction) override; + void VisitOr(HOr* instruction) override; + void VisitShl(HShl* instruction) override; + void VisitShr(HShr* instruction) override; + void VisitTypeConversion(HTypeConversion* instruction) override; + void VisitUShr(HUShr* instruction) override; OptimizingCompilerStats* stats_; }; diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h index f1a16efc61..fca9341d59 100644 --- a/compiler/optimizing/instruction_simplifier_arm.h +++ b/compiler/optimizing/instruction_simplifier_arm.h @@ -30,7 +30,7 @@ class InstructionSimplifierArm : public HOptimization { static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm"; - bool Run() OVERRIDE; + bool Run() override; }; } // namespace arm diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index e0a627994d..b536cb4dc4 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -58,7 +58,7 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor { * (2) Since statements can be removed in a "forward" fashion, * the visitor should test if each statement is still there. */ - void VisitBasicBlock(HBasicBlock* block) OVERRIDE { + void VisitBasicBlock(HBasicBlock* block) override { // TODO: fragile iteration, provide more robust iterators? for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* instruction = it.Current(); @@ -69,18 +69,18 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor { } // HInstruction visitors, sorted alphabetically. - void VisitAnd(HAnd* instruction) OVERRIDE; - void VisitArrayGet(HArrayGet* instruction) OVERRIDE; - void VisitArraySet(HArraySet* instruction) OVERRIDE; - void VisitMul(HMul* instruction) OVERRIDE; - void VisitOr(HOr* instruction) OVERRIDE; - void VisitShl(HShl* instruction) OVERRIDE; - void VisitShr(HShr* instruction) OVERRIDE; - void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE; - void VisitUShr(HUShr* instruction) OVERRIDE; - void VisitXor(HXor* instruction) OVERRIDE; - void VisitVecLoad(HVecLoad* instruction) OVERRIDE; - void VisitVecStore(HVecStore* instruction) OVERRIDE; + void VisitAnd(HAnd* instruction) override; + void VisitArrayGet(HArrayGet* instruction) override; + void VisitArraySet(HArraySet* instruction) override; + void VisitMul(HMul* instruction) override; + void VisitOr(HOr* instruction) override; + void VisitShl(HShl* instruction) override; + void VisitShr(HShr* instruction) override; + void VisitTypeConversion(HTypeConversion* instruction) override; + void VisitUShr(HUShr* instruction) override; + void VisitXor(HXor* instruction) override; + void VisitVecLoad(HVecLoad* instruction) override; + void VisitVecStore(HVecStore* instruction) override; OptimizingCompilerStats* stats_; }; diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h index 8659c1f5f4..8d93c01ebf 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.h +++ b/compiler/optimizing/instruction_simplifier_arm64.h @@ -30,7 +30,7 @@ class InstructionSimplifierArm64 : public HOptimization { static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64"; - bool Run() OVERRIDE; + bool Run() override; }; } // namespace arm64 diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc index 3bdf90f652..5d0c63b76b 100644 --- a/compiler/optimizing/instruction_simplifier_mips.cc +++ b/compiler/optimizing/instruction_simplifier_mips.cc @@ -39,8 +39,8 @@ class InstructionSimplifierMipsVisitor : public HGraphVisitor { bool TryExtractArrayAccessIndex(HInstruction* access, HInstruction* index, DataType::Type packed_type); - void VisitArrayGet(HArrayGet* instruction) OVERRIDE; - void VisitArraySet(HArraySet* instruction) OVERRIDE; + void VisitArrayGet(HArrayGet* instruction) override; + void VisitArraySet(HArraySet* instruction) override; OptimizingCompilerStats* stats_; CodeGeneratorMIPS* codegen_; diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h index 94ef73d425..b431334811 100644 --- a/compiler/optimizing/instruction_simplifier_mips.h +++ b/compiler/optimizing/instruction_simplifier_mips.h @@ -35,7 +35,7 @@ class InstructionSimplifierMips : public HOptimization { static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips"; - bool Run() OVERRIDE; + bool Run() override; private: CodeGeneratorMIPS* codegen_; diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 21efe11f31..619cd8ed41 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -32,179 +32,6 @@ namespace art { -// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags. -#define CHECK_INTRINSICS_ENUM_VALUES(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - static_assert( \ - static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \ - "Instrinsics enumeration space overflow."); -#include "intrinsics_list.h" - INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES) -#undef INTRINSICS_LIST -#undef CHECK_INTRINSICS_ENUM_VALUES - -// Function that returns whether an intrinsic is static/direct or virtual. -static inline InvokeType GetIntrinsicInvokeType(Intrinsics i) { - switch (i) { - case Intrinsics::kNone: - return kInterface; // Non-sensical for intrinsic. -#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - case Intrinsics::k ## Name: \ - return IsStatic; -#include "intrinsics_list.h" - INTRINSICS_LIST(OPTIMIZING_INTRINSICS) -#undef INTRINSICS_LIST -#undef OPTIMIZING_INTRINSICS - } - return kInterface; -} - -// Function that returns whether an intrinsic needs an environment or not. -static inline IntrinsicNeedsEnvironmentOrCache NeedsEnvironmentOrCache(Intrinsics i) { - switch (i) { - case Intrinsics::kNone: - return kNeedsEnvironmentOrCache; // Non-sensical for intrinsic. -#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - case Intrinsics::k ## Name: \ - return NeedsEnvironmentOrCache; -#include "intrinsics_list.h" - INTRINSICS_LIST(OPTIMIZING_INTRINSICS) -#undef INTRINSICS_LIST -#undef OPTIMIZING_INTRINSICS - } - return kNeedsEnvironmentOrCache; -} - -// Function that returns whether an intrinsic has side effects. -static inline IntrinsicSideEffects GetSideEffects(Intrinsics i) { - switch (i) { - case Intrinsics::kNone: - return kAllSideEffects; -#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - case Intrinsics::k ## Name: \ - return SideEffects; -#include "intrinsics_list.h" - INTRINSICS_LIST(OPTIMIZING_INTRINSICS) -#undef INTRINSICS_LIST -#undef OPTIMIZING_INTRINSICS - } - return kAllSideEffects; -} - -// Function that returns whether an intrinsic can throw exceptions. -static inline IntrinsicExceptions GetExceptions(Intrinsics i) { - switch (i) { - case Intrinsics::kNone: - return kCanThrow; -#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - case Intrinsics::k ## Name: \ - return Exceptions; -#include "intrinsics_list.h" - INTRINSICS_LIST(OPTIMIZING_INTRINSICS) -#undef INTRINSICS_LIST -#undef OPTIMIZING_INTRINSICS - } - return kCanThrow; -} - -static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke) - REQUIRES_SHARED(Locks::mutator_lock_) { - // Whenever the intrinsic is marked as static, report an error if we find an InvokeVirtual. - // - // Whenever the intrinsic is marked as direct and we find an InvokeVirtual, a devirtualization - // failure occured. We might be in a situation where we have inlined a method that calls an - // intrinsic, but that method is in a different dex file on which we do not have a - // verified_method that would have helped the compiler driver sharpen the call. In that case, - // make sure that the intrinsic is actually for some final method (or in a final class), as - // otherwise the intrinsics setup is broken. - // - // For the last direction, we have intrinsics for virtual functions that will perform a check - // inline. If the precise type is known, however, the instruction will be sharpened to an - // InvokeStaticOrDirect. - InvokeType intrinsic_type = GetIntrinsicInvokeType(intrinsic); - InvokeType invoke_type = invoke->GetInvokeType(); - - switch (intrinsic_type) { - case kStatic: - return (invoke_type == kStatic); - - case kDirect: - if (invoke_type == kDirect) { - return true; - } - if (invoke_type == kVirtual) { - ArtMethod* art_method = invoke->GetResolvedMethod(); - return (art_method->IsFinal() || art_method->GetDeclaringClass()->IsFinal()); - } - return false; - - case kVirtual: - // Call might be devirtualized. - return (invoke_type == kVirtual || invoke_type == kDirect || invoke_type == kInterface); - - case kSuper: - case kInterface: - case kPolymorphic: - case kCustom: - return false; - } - LOG(FATAL) << "Unknown intrinsic invoke type: " << intrinsic_type; - UNREACHABLE(); -} - -bool IntrinsicsRecognizer::Recognize(HInvoke* invoke, - ArtMethod* art_method, - /*out*/ bool* wrong_invoke_type) { - if (art_method == nullptr) { - art_method = invoke->GetResolvedMethod(); - } - *wrong_invoke_type = false; - if (art_method == nullptr || !art_method->IsIntrinsic()) { - return false; - } - - // TODO: b/65872996 The intent is that polymorphic signature methods should - // be compiler intrinsics. At present, they are only interpreter intrinsics. - if (art_method->IsPolymorphicSignature()) { - return false; - } - - Intrinsics intrinsic = static_cast<Intrinsics>(art_method->GetIntrinsic()); - if (CheckInvokeType(intrinsic, invoke) == false) { - *wrong_invoke_type = true; - return false; - } - - invoke->SetIntrinsic(intrinsic, - NeedsEnvironmentOrCache(intrinsic), - GetSideEffects(intrinsic), - GetExceptions(intrinsic)); - return true; -} - -bool IntrinsicsRecognizer::Run() { - bool didRecognize = false; - ScopedObjectAccess soa(Thread::Current()); - for (HBasicBlock* block : graph_->GetReversePostOrder()) { - for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done(); - inst_it.Advance()) { - HInstruction* inst = inst_it.Current(); - if (inst->IsInvoke()) { - bool wrong_invoke_type = false; - if (Recognize(inst->AsInvoke(), /* art_method */ nullptr, &wrong_invoke_type)) { - didRecognize = true; - MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized); - } else if (wrong_invoke_type) { - LOG(WARNING) - << "Found an intrinsic with unexpected invoke type: " - << inst->AsInvoke()->GetResolvedMethod()->PrettyMethod() << " " - << inst->DebugName(); - } - } - } - } - return didRecognize; -} - std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) { switch (intrinsic) { case Intrinsics::kNone: diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 993648f765..59012faea7 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -34,28 +34,6 @@ static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000) static constexpr uint32_t kNanFloat = 0x7fc00000U; static constexpr uint64_t kNanDouble = 0x7ff8000000000000; -// Recognize intrinsics from HInvoke nodes. -class IntrinsicsRecognizer : public HOptimization { - public: - IntrinsicsRecognizer(HGraph* graph, - OptimizingCompilerStats* stats, - const char* name = kIntrinsicsRecognizerPassName) - : HOptimization(graph, name, stats) {} - - bool Run() OVERRIDE; - - // Static helper that recognizes intrinsic call. Returns true on success. - // If it fails due to invoke type mismatch, wrong_invoke_type is set. - // Useful to recognize intrinsics on individual calls outside this full pass. - static bool Recognize(HInvoke* invoke, ArtMethod* method, /*out*/ bool* wrong_invoke_type) - REQUIRES_SHARED(Locks::mutator_lock_); - - static constexpr const char* kIntrinsicsRecognizerPassName = "intrinsics_recognition"; - - private: - DISALLOW_COPY_AND_ASSIGN(IntrinsicsRecognizer); -}; - class IntrinsicVisitor : public ValueObject { public: virtual ~IntrinsicVisitor() {} @@ -219,7 +197,6 @@ class StringEqualsOptimizations : public IntrinsicOptimizations { INTRINSIC_OPTIMIZATION(ArgumentNotNull, 0); INTRINSIC_OPTIMIZATION(ArgumentIsString, 1); - INTRINSIC_OPTIMIZATION(NoReadBarrierForStringClass, 2); private: DISALLOW_COPY_AND_ASSIGN(StringEqualsOptimizations); diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index a657b5818f..7684dc79f2 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -112,7 +112,7 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 { explicit IntrinsicSlowPathARM64(HInvoke* invoke) : SlowPathCodeARM64(invoke), invoke_(invoke) { } - void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen_in) override { CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in); __ Bind(GetEntryLabel()); @@ -145,7 +145,7 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathARM64"; } + const char* GetDescription() const override { return "IntrinsicSlowPathARM64"; } private: // The instruction where this slow path is happening. @@ -163,7 +163,7 @@ class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 { DCHECK(kUseBakerReadBarrier); } - void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen_in) override { CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in); LocationSummary* locations = instruction_->GetLocations(); DCHECK(locations->CanCall()); @@ -216,7 +216,7 @@ class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathARM64"; } + const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathARM64"; } private: Location tmp_; @@ -1006,9 +1006,9 @@ class BakerReadBarrierCasSlowPathARM64 : public SlowPathCodeARM64 { explicit BakerReadBarrierCasSlowPathARM64(HInvoke* invoke) : SlowPathCodeARM64(invoke) {} - const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARM64"; } + const char* GetDescription() const override { return "BakerReadBarrierCasSlowPathARM64"; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); Arm64Assembler* assembler = arm64_codegen->GetAssembler(); MacroAssembler* masm = assembler->GetVIXLAssembler(); @@ -1398,13 +1398,6 @@ static const char* GetConstString(HInstruction* candidate, uint32_t* utf16_lengt } void IntrinsicLocationsBuilderARM64::VisitStringEquals(HInvoke* invoke) { - if (kEmitCompilerReadBarrier && - !StringEqualsOptimizations(invoke).GetArgumentIsString() && - !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) { - // No support for this odd case (String class is moveable, not in the boot image). - return; - } - LocationSummary* locations = new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h index 033a644f34..9c46efddec 100644 --- a/compiler/optimizing/intrinsics_arm64.h +++ b/compiler/optimizing/intrinsics_arm64.h @@ -37,7 +37,7 @@ namespace arm64 { class CodeGeneratorARM64; -class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor { +class IntrinsicLocationsBuilderARM64 final : public IntrinsicVisitor { public: explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen) : allocator_(allocator), codegen_(codegen) {} @@ -45,7 +45,7 @@ class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor { // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST @@ -63,14 +63,14 @@ class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor { DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64); }; -class IntrinsicCodeGeneratorARM64 FINAL : public IntrinsicVisitor { +class IntrinsicCodeGeneratorARM64 final : public IntrinsicVisitor { public: explicit IntrinsicCodeGeneratorARM64(CodeGeneratorARM64* codegen) : codegen_(codegen) {} // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 74a779d9e2..38e4c8968a 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -85,7 +85,7 @@ class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL { return calling_convention_visitor.GetMethodLocation(); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { ArmVIXLAssembler* assembler = down_cast<ArmVIXLAssembler*>(codegen->GetAssembler()); __ Bind(GetEntryLabel()); @@ -111,7 +111,7 @@ class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; } + const char* GetDescription() const override { return "IntrinsicSlowPath"; } private: // The instruction where this slow path is happening. @@ -173,7 +173,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { DCHECK(kUseBakerReadBarrier); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); ArmVIXLAssembler* assembler = arm_codegen->GetAssembler(); LocationSummary* locations = instruction_->GetLocations(); @@ -233,7 +233,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { + const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathARMVIXL"; } @@ -969,9 +969,9 @@ class BakerReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL { explicit BakerReadBarrierCasSlowPathARMVIXL(HInvoke* invoke) : SlowPathCodeARMVIXL(invoke) {} - const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARMVIXL"; } + const char* GetDescription() const override { return "BakerReadBarrierCasSlowPathARMVIXL"; } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); ArmVIXLAssembler* assembler = arm_codegen->GetAssembler(); __ Bind(GetEntryLabel()); @@ -1092,7 +1092,8 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c assembler->MaybeUnpoisonHeapReference(tmp); } __ Subs(tmp, tmp, expected); - __ B(ne, failure, (failure == loop_exit) ? kNear : kBranchWithoutHint); + static_cast<vixl32::MacroAssembler*>(assembler->GetVIXLAssembler())-> + B(ne, failure, /* hint= */ (failure == loop_exit) ? kNear : kBranchWithoutHint); if (type == DataType::Type::kReference) { assembler->MaybePoisonHeapReference(value); } @@ -1458,13 +1459,6 @@ static const char* GetConstString(HInstruction* candidate, uint32_t* utf16_lengt } void IntrinsicLocationsBuilderARMVIXL::VisitStringEquals(HInvoke* invoke) { - if (kEmitCompilerReadBarrier && - !StringEqualsOptimizations(invoke).GetArgumentIsString() && - !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) { - // No support for this odd case (String class is moveable, not in the boot image). - return; - } - LocationSummary* locations = new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); InvokeRuntimeCallingConventionARMVIXL calling_convention; diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h index 9c02d0a4ad..1fea776f0d 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.h +++ b/compiler/optimizing/intrinsics_arm_vixl.h @@ -27,14 +27,14 @@ namespace arm { class ArmVIXLAssembler; class CodeGeneratorARMVIXL; -class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor { +class IntrinsicLocationsBuilderARMVIXL final : public IntrinsicVisitor { public: explicit IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen); // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST @@ -54,14 +54,14 @@ class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor { DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARMVIXL); }; -class IntrinsicCodeGeneratorARMVIXL FINAL : public IntrinsicVisitor { +class IntrinsicCodeGeneratorARMVIXL final : public IntrinsicVisitor { public: explicit IntrinsicCodeGeneratorARMVIXL(CodeGeneratorARMVIXL* codegen) : codegen_(codegen) {} // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 01d9f962f2..6f7f5e49c1 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -108,7 +108,7 @@ class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS { public: explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : SlowPathCodeMIPS(invoke), invoke_(invoke) { } - void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen_in) override { CodeGeneratorMIPS* codegen = down_cast<CodeGeneratorMIPS*>(codegen_in); __ Bind(GetEntryLabel()); @@ -137,7 +137,7 @@ class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS { __ B(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS"; } + const char* GetDescription() const override { return "IntrinsicSlowPathMIPS"; } private: // The instruction where this slow path is happening. @@ -1516,13 +1516,6 @@ void IntrinsicCodeGeneratorMIPS::VisitStringCompareTo(HInvoke* invoke) { // boolean java.lang.String.equals(Object anObject) void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) { - if (kEmitCompilerReadBarrier && - !StringEqualsOptimizations(invoke).GetArgumentIsString() && - !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) { - // No support for this odd case (String class is moveable, not in the boot image). - return; - } - LocationSummary* locations = new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h index 1c1ba40132..08d4e82139 100644 --- a/compiler/optimizing/intrinsics_mips.h +++ b/compiler/optimizing/intrinsics_mips.h @@ -30,14 +30,14 @@ namespace mips { class CodeGeneratorMIPS; class MipsAssembler; -class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor { +class IntrinsicLocationsBuilderMIPS final : public IntrinsicVisitor { public: explicit IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen); // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST @@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor { DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS); }; -class IntrinsicCodeGeneratorMIPS FINAL : public IntrinsicVisitor { +class IntrinsicCodeGeneratorMIPS final : public IntrinsicVisitor { public: explicit IntrinsicCodeGeneratorMIPS(CodeGeneratorMIPS* codegen) : codegen_(codegen) {} // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index 0bd69c6ec8..2eb252908c 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -97,7 +97,7 @@ class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 { explicit IntrinsicSlowPathMIPS64(HInvoke* invoke) : SlowPathCodeMIPS64(invoke), invoke_(invoke) { } - void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen_in) override { CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in); __ Bind(GetEntryLabel()); @@ -126,7 +126,7 @@ class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 { __ Bc(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; } + const char* GetDescription() const override { return "IntrinsicSlowPathMIPS64"; } private: // The instruction where this slow path is happening. @@ -1369,13 +1369,6 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) { // boolean java.lang.String.equals(Object anObject) void IntrinsicLocationsBuilderMIPS64::VisitStringEquals(HInvoke* invoke) { - if (kEmitCompilerReadBarrier && - !StringEqualsOptimizations(invoke).GetArgumentIsString() && - !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) { - // No support for this odd case (String class is moveable, not in the boot image). - return; - } - LocationSummary* locations = new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h index 748b0b02b2..ca8bc8f55a 100644 --- a/compiler/optimizing/intrinsics_mips64.h +++ b/compiler/optimizing/intrinsics_mips64.h @@ -30,14 +30,14 @@ namespace mips64 { class CodeGeneratorMIPS64; class Mips64Assembler; -class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor { +class IntrinsicLocationsBuilderMIPS64 final : public IntrinsicVisitor { public: explicit IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen); // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST @@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor { DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64); }; -class IntrinsicCodeGeneratorMIPS64 FINAL : public IntrinsicVisitor { +class IntrinsicCodeGeneratorMIPS64 final : public IntrinsicVisitor { public: explicit IntrinsicCodeGeneratorMIPS64(CodeGeneratorMIPS64* codegen) : codegen_(codegen) {} // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h index 8c69d9b643..41947f1ccd 100644 --- a/compiler/optimizing/intrinsics_utils.h +++ b/compiler/optimizing/intrinsics_utils.h @@ -47,7 +47,7 @@ class IntrinsicSlowPath : public SlowPathCode { return calling_convention_visitor.GetMethodLocation(); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { Assembler* assembler = codegen->GetAssembler(); assembler->Bind(GetEntryLabel()); @@ -73,7 +73,7 @@ class IntrinsicSlowPath : public SlowPathCode { assembler->Jump(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; } + const char* GetDescription() const override { return "IntrinsicSlowPath"; } private: // The instruction where this slow path is happening. diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 5c7be54037..3504d7a6f8 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -82,7 +82,7 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode { DCHECK(kUseBakerReadBarrier); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); LocationSummary* locations = instruction_->GetLocations(); DCHECK(locations->CanCall()); @@ -160,7 +160,7 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86"; } + const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86); @@ -922,13 +922,6 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitStringEquals(HInvoke* invoke) { - if (kEmitCompilerReadBarrier && - !StringEqualsOptimizations(invoke).GetArgumentIsString() && - !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) { - // No support for this odd case (String class is moveable, not in the boot image). - return; - } - LocationSummary* locations = new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h index e3555e78fc..ae150dad43 100644 --- a/compiler/optimizing/intrinsics_x86.h +++ b/compiler/optimizing/intrinsics_x86.h @@ -30,14 +30,14 @@ namespace x86 { class CodeGeneratorX86; class X86Assembler; -class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor { +class IntrinsicLocationsBuilderX86 final : public IntrinsicVisitor { public: explicit IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen); // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST @@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor { DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86); }; -class IntrinsicCodeGeneratorX86 FINAL : public IntrinsicVisitor { +class IntrinsicCodeGeneratorX86 final : public IntrinsicVisitor { public: explicit IntrinsicCodeGeneratorX86(CodeGeneratorX86* codegen) : codegen_(codegen) {} // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index b5afe931ff..96f6eaaf33 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -80,7 +80,7 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode { DCHECK(kUseBakerReadBarrier); } - void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); LocationSummary* locations = instruction_->GetLocations(); DCHECK(locations->CanCall()); @@ -118,7 +118,7 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode { __ jmp(GetExitLabel()); } - const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86_64"; } + const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86_64); @@ -1230,13 +1230,6 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitStringEquals(HInvoke* invoke) { - if (kEmitCompilerReadBarrier && - !StringEqualsOptimizations(invoke).GetArgumentIsString() && - !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) { - // No support for this odd case (String class is moveable, not in the boot image). - return; - } - LocationSummary* locations = new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h index 5cb601edfe..199cfede1a 100644 --- a/compiler/optimizing/intrinsics_x86_64.h +++ b/compiler/optimizing/intrinsics_x86_64.h @@ -30,14 +30,14 @@ namespace x86_64 { class CodeGeneratorX86_64; class X86_64Assembler; -class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor { +class IntrinsicLocationsBuilderX86_64 final : public IntrinsicVisitor { public: explicit IntrinsicLocationsBuilderX86_64(CodeGeneratorX86_64* codegen); // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST @@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor { DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64); }; -class IntrinsicCodeGeneratorX86_64 FINAL : public IntrinsicVisitor { +class IntrinsicCodeGeneratorX86_64 final : public IntrinsicVisitor { public: explicit IntrinsicCodeGeneratorX86_64(CodeGeneratorX86_64* codegen) : codegen_(codegen) {} // Define visitor methods. #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \ - void Visit ## Name(HInvoke* invoke) OVERRIDE; + void Visit ## Name(HInvoke* invoke) override; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST diff --git a/compiler/optimizing/licm.h b/compiler/optimizing/licm.h index f72d195ab2..9cafddb05a 100644 --- a/compiler/optimizing/licm.h +++ b/compiler/optimizing/licm.h @@ -33,7 +33,7 @@ class LICM : public HOptimization { : HOptimization(graph, name, stats), side_effects_(side_effects) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kLoopInvariantCodeMotionPassName = "licm"; diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc index 0fb90fb370..60f513ca48 100644 --- a/compiler/optimizing/live_ranges_test.cc +++ b/compiler/optimizing/live_ranges_test.cc @@ -38,7 +38,7 @@ HGraph* LiveRangesTest::BuildGraph(const std::vector<uint16_t>& data) { // on how instructions are ordered. RemoveSuspendChecks(graph); // `Inline` conditions into ifs. - PrepareForRegisterAllocation(graph).Run(); + PrepareForRegisterAllocation(graph, *compiler_options_).Run(); return graph; } diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc index 72f995e773..f11f7a9779 100644 --- a/compiler/optimizing/liveness_test.cc +++ b/compiler/optimizing/liveness_test.cc @@ -47,7 +47,7 @@ static void DumpBitVector(BitVector* vector, void LivenessTest::TestCode(const std::vector<uint16_t>& data, const char* expected) { HGraph* graph = CreateCFG(data); // `Inline` conditions into ifs. - PrepareForRegisterAllocation(graph).Run(); + PrepareForRegisterAllocation(graph, *compiler_options_).Run(); std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_); SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator()); liveness.Analyze(); diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h index 769a3f1b59..08d9309a3e 100644 --- a/compiler/optimizing/load_store_analysis.h +++ b/compiler/optimizing/load_store_analysis.h @@ -492,12 +492,12 @@ class HeapLocationCollector : public HGraphVisitor { HeapLocation::kDeclaringClassDefIndexForArrays); } - void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE { + void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override { VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo()); CreateReferenceInfoForReferenceType(instruction); } - void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE { + void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override { HeapLocation* location = VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo()); has_heap_stores_ = true; if (location->GetReferenceInfo()->IsSingleton()) { @@ -523,12 +523,12 @@ class HeapLocationCollector : public HGraphVisitor { } } - void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE { + void VisitStaticFieldGet(HStaticFieldGet* instruction) override { VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo()); CreateReferenceInfoForReferenceType(instruction); } - void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE { + void VisitStaticFieldSet(HStaticFieldSet* instruction) override { VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo()); has_heap_stores_ = true; } @@ -536,7 +536,7 @@ class HeapLocationCollector : public HGraphVisitor { // We intentionally don't collect HUnresolvedInstanceField/HUnresolvedStaticField accesses // since we cannot accurately track the fields. - void VisitArrayGet(HArrayGet* instruction) OVERRIDE { + void VisitArrayGet(HArrayGet* instruction) override { HInstruction* array = instruction->InputAt(0); HInstruction* index = instruction->InputAt(1); DataType::Type type = instruction->GetType(); @@ -544,7 +544,7 @@ class HeapLocationCollector : public HGraphVisitor { CreateReferenceInfoForReferenceType(instruction); } - void VisitArraySet(HArraySet* instruction) OVERRIDE { + void VisitArraySet(HArraySet* instruction) override { HInstruction* array = instruction->InputAt(0); HInstruction* index = instruction->InputAt(1); DataType::Type type = instruction->GetComponentType(); @@ -552,7 +552,7 @@ class HeapLocationCollector : public HGraphVisitor { has_heap_stores_ = true; } - void VisitVecLoad(HVecLoad* instruction) OVERRIDE { + void VisitVecLoad(HVecLoad* instruction) override { HInstruction* array = instruction->InputAt(0); HInstruction* index = instruction->InputAt(1); DataType::Type type = instruction->GetPackedType(); @@ -560,7 +560,7 @@ class HeapLocationCollector : public HGraphVisitor { CreateReferenceInfoForReferenceType(instruction); } - void VisitVecStore(HVecStore* instruction) OVERRIDE { + void VisitVecStore(HVecStore* instruction) override { HInstruction* array = instruction->InputAt(0); HInstruction* index = instruction->InputAt(1); DataType::Type type = instruction->GetPackedType(); @@ -568,7 +568,7 @@ class HeapLocationCollector : public HGraphVisitor { has_heap_stores_ = true; } - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { // Any new-instance or new-array cannot alias with references that // pre-exist the new-instance/new-array. We append entries into // ref_info_array_ which keeps track of the order of creation @@ -580,7 +580,7 @@ class HeapLocationCollector : public HGraphVisitor { CreateReferenceInfoForReferenceType(instruction); } - void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE { + void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) override { has_monitor_operations_ = true; } @@ -605,7 +605,7 @@ class LoadStoreAnalysis : public HOptimization { return heap_location_collector_; } - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kLoadStoreAnalysisPassName = "load_store_analysis"; diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index 28ac94273c..b33d0f488e 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -107,7 +107,7 @@ class LSEVisitor : public HGraphDelegateVisitor { singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)) { } - void VisitBasicBlock(HBasicBlock* block) OVERRIDE { + void VisitBasicBlock(HBasicBlock* block) override { // Populate the heap_values array for this block. // TODO: try to reuse the heap_values array from one predecessor if possible. if (block->IsLoopHeader()) { @@ -656,13 +656,13 @@ class LSEVisitor : public HGraphDelegateVisitor { } } - void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE { + void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override { HInstruction* object = instruction->InputAt(0); const FieldInfo& field = instruction->GetFieldInfo(); VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(object, &field)); } - void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE { + void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override { HInstruction* object = instruction->InputAt(0); const FieldInfo& field = instruction->GetFieldInfo(); HInstruction* value = instruction->InputAt(1); @@ -670,29 +670,29 @@ class LSEVisitor : public HGraphDelegateVisitor { VisitSetLocation(instruction, idx, value); } - void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE { + void VisitStaticFieldGet(HStaticFieldGet* instruction) override { HInstruction* cls = instruction->InputAt(0); const FieldInfo& field = instruction->GetFieldInfo(); VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(cls, &field)); } - void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE { + void VisitStaticFieldSet(HStaticFieldSet* instruction) override { HInstruction* cls = instruction->InputAt(0); const FieldInfo& field = instruction->GetFieldInfo(); size_t idx = heap_location_collector_.GetFieldHeapLocation(cls, &field); VisitSetLocation(instruction, idx, instruction->InputAt(1)); } - void VisitArrayGet(HArrayGet* instruction) OVERRIDE { + void VisitArrayGet(HArrayGet* instruction) override { VisitGetLocation(instruction, heap_location_collector_.GetArrayHeapLocation(instruction)); } - void VisitArraySet(HArraySet* instruction) OVERRIDE { + void VisitArraySet(HArraySet* instruction) override { size_t idx = heap_location_collector_.GetArrayHeapLocation(instruction); VisitSetLocation(instruction, idx, instruction->InputAt(2)); } - void VisitDeoptimize(HDeoptimize* instruction) { + void VisitDeoptimize(HDeoptimize* instruction) override { const ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[instruction->GetBlock()->GetBlockId()]; for (HInstruction* heap_value : heap_values) { @@ -743,15 +743,15 @@ class LSEVisitor : public HGraphDelegateVisitor { } } - void VisitReturn(HReturn* instruction) OVERRIDE { + void VisitReturn(HReturn* instruction) override { HandleExit(instruction->GetBlock()); } - void VisitReturnVoid(HReturnVoid* return_void) OVERRIDE { + void VisitReturnVoid(HReturnVoid* return_void) override { HandleExit(return_void->GetBlock()); } - void VisitThrow(HThrow* throw_instruction) OVERRIDE { + void VisitThrow(HThrow* throw_instruction) override { HandleExit(throw_instruction->GetBlock()); } @@ -777,35 +777,35 @@ class LSEVisitor : public HGraphDelegateVisitor { } } - void VisitInvoke(HInvoke* invoke) OVERRIDE { + void VisitInvoke(HInvoke* invoke) override { HandleInvoke(invoke); } - void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE { + void VisitClinitCheck(HClinitCheck* clinit) override { HandleInvoke(clinit); } - void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE { + void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) override { // Conservatively treat it as an invocation. HandleInvoke(instruction); } - void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE { + void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) override { // Conservatively treat it as an invocation. HandleInvoke(instruction); } - void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE { + void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) override { // Conservatively treat it as an invocation. HandleInvoke(instruction); } - void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE { + void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) override { // Conservatively treat it as an invocation. HandleInvoke(instruction); } - void VisitNewInstance(HNewInstance* new_instance) OVERRIDE { + void VisitNewInstance(HNewInstance* new_instance) override { ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_instance); if (ref_info == nullptr) { // new_instance isn't used for field accesses. No need to process it. @@ -829,7 +829,7 @@ class LSEVisitor : public HGraphDelegateVisitor { } } - void VisitNewArray(HNewArray* new_array) OVERRIDE { + void VisitNewArray(HNewArray* new_array) override { ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_array); if (ref_info == nullptr) { // new_array isn't used for array accesses. No need to process it. diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h index 408386bd82..f7ba41a1af 100644 --- a/compiler/optimizing/load_store_elimination.h +++ b/compiler/optimizing/load_store_elimination.h @@ -35,7 +35,7 @@ class LoadStoreElimination : public HOptimization { side_effects_(side_effects), lsa_(lsa) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination"; diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc index d355cedb35..2ae3683ffa 100644 --- a/compiler/optimizing/loop_analysis.cc +++ b/compiler/optimizing/loop_analysis.cc @@ -87,14 +87,14 @@ class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper { // Maximum number of instructions to be created as a result of full unrolling. static constexpr uint32_t kScalarHeuristicFullyUnrolledMaxInstrThreshold = 35; - bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const OVERRIDE { + bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const override { return analysis_info->HasLongTypeInstructions() || IsLoopTooBig(analysis_info, kScalarHeuristicMaxBodySizeInstr, kScalarHeuristicMaxBodySizeBlocks); } - uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const OVERRIDE { + uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const override { int64_t trip_count = analysis_info->GetTripCount(); // Unroll only loops with known trip count. if (trip_count == LoopAnalysisInfo::kUnknownTripCount) { @@ -108,9 +108,9 @@ class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper { return desired_unrolling_factor; } - bool IsLoopPeelingEnabled() const OVERRIDE { return true; } + bool IsLoopPeelingEnabled() const override { return true; } - bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const OVERRIDE { + bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const override { int64_t trip_count = analysis_info->GetTripCount(); // We assume that trip count is known. DCHECK_NE(trip_count, LoopAnalysisInfo::kUnknownTripCount); @@ -144,7 +144,7 @@ class Arm64LoopHelper : public ArchDefaultLoopHelper { // Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled. static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeBlocks = 8; - bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const OVERRIDE { + bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const override { return IsLoopTooBig(loop_analysis_info, kArm64ScalarHeuristicMaxBodySizeInstr, kArm64ScalarHeuristicMaxBodySizeBlocks); @@ -153,7 +153,7 @@ class Arm64LoopHelper : public ArchDefaultLoopHelper { uint32_t GetSIMDUnrollingFactor(HBasicBlock* block, int64_t trip_count, uint32_t max_peel, - uint32_t vector_length) const OVERRIDE { + uint32_t vector_length) const override { // Don't unroll with insufficient iterations. // TODO: Unroll loops with unknown trip count. DCHECK_NE(vector_length, 0u); diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc index 7d66155b39..12b180d5ff 100644 --- a/compiler/optimizing/loop_optimization.cc +++ b/compiler/optimizing/loop_optimization.cc @@ -351,7 +351,10 @@ static bool HasReductionFormat(HInstruction* reduction, HInstruction* phi) { // Translates vector operation to reduction kind. static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) { - if (reduction->IsVecAdd() || reduction->IsVecSub() || reduction->IsVecSADAccumulate()) { + if (reduction->IsVecAdd() || + reduction->IsVecSub() || + reduction->IsVecSADAccumulate() || + reduction->IsVecDotProd()) { return HVecReduce::kSum; } LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId(); @@ -431,6 +434,23 @@ static void PeelByCount(HLoopInformation* loop_info, int count) { } } +// Returns the narrower type out of instructions a and b types. +static DataType::Type GetNarrowerType(HInstruction* a, HInstruction* b) { + DataType::Type type = a->GetType(); + if (DataType::Size(b->GetType()) < DataType::Size(type)) { + type = b->GetType(); + } + if (a->IsTypeConversion() && + DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(type)) { + type = a->InputAt(0)->GetType(); + } + if (b->IsTypeConversion() && + DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(type)) { + type = b->InputAt(0)->GetType(); + } + return type; +} + // // Public methods. // @@ -1289,6 +1309,7 @@ bool HLoopOptimization::VectorizeDef(LoopNode* node, DataType::Type type = instruction->GetType(); // Recognize SAD idiom or direct reduction. if (VectorizeSADIdiom(node, instruction, generate_code, type, restrictions) || + VectorizeDotProdIdiom(node, instruction, generate_code, type, restrictions) || (TrySetVectorType(type, &restrictions) && VectorizeUse(node, instruction, generate_code, type, restrictions))) { if (generate_code) { @@ -1531,11 +1552,11 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict case DataType::Type::kBool: case DataType::Type::kUint8: case DataType::Type::kInt8: - *restrictions |= kNoDiv | kNoReduction; + *restrictions |= kNoDiv | kNoReduction | kNoDotProd; return TrySetVectorLength(8); case DataType::Type::kUint16: case DataType::Type::kInt16: - *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction; + *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoDotProd; return TrySetVectorLength(4); case DataType::Type::kInt32: *restrictions |= kNoDiv | kNoWideSAD; @@ -1580,12 +1601,23 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict case DataType::Type::kBool: case DataType::Type::kUint8: case DataType::Type::kInt8: - *restrictions |= - kNoMul | kNoDiv | kNoShift | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD; + *restrictions |= kNoMul | + kNoDiv | + kNoShift | + kNoAbs | + kNoSignedHAdd | + kNoUnroundedHAdd | + kNoSAD | + kNoDotProd; return TrySetVectorLength(16); case DataType::Type::kUint16: case DataType::Type::kInt16: - *restrictions |= kNoDiv | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD; + *restrictions |= kNoDiv | + kNoAbs | + kNoSignedHAdd | + kNoUnroundedHAdd | + kNoSAD| + kNoDotProd; return TrySetVectorLength(8); case DataType::Type::kInt32: *restrictions |= kNoDiv | kNoSAD; @@ -1610,11 +1642,11 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict case DataType::Type::kBool: case DataType::Type::kUint8: case DataType::Type::kInt8: - *restrictions |= kNoDiv; + *restrictions |= kNoDiv | kNoDotProd; return TrySetVectorLength(16); case DataType::Type::kUint16: case DataType::Type::kInt16: - *restrictions |= kNoDiv | kNoStringCharAt; + *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd; return TrySetVectorLength(8); case DataType::Type::kInt32: *restrictions |= kNoDiv; @@ -1639,11 +1671,11 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict case DataType::Type::kBool: case DataType::Type::kUint8: case DataType::Type::kInt8: - *restrictions |= kNoDiv; + *restrictions |= kNoDiv | kNoDotProd; return TrySetVectorLength(16); case DataType::Type::kUint16: case DataType::Type::kInt16: - *restrictions |= kNoDiv | kNoStringCharAt; + *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd; return TrySetVectorLength(8); case DataType::Type::kInt32: *restrictions |= kNoDiv; @@ -2071,18 +2103,7 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node, HInstruction* r = a; HInstruction* s = b; bool is_unsigned = false; - DataType::Type sub_type = a->GetType(); - if (DataType::Size(b->GetType()) < DataType::Size(sub_type)) { - sub_type = b->GetType(); - } - if (a->IsTypeConversion() && - DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(sub_type)) { - sub_type = a->InputAt(0)->GetType(); - } - if (b->IsTypeConversion() && - DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(sub_type)) { - sub_type = b->InputAt(0)->GetType(); - } + DataType::Type sub_type = GetNarrowerType(a, b); if (reduction_type != sub_type && (!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) { return false; @@ -2123,6 +2144,75 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node, return false; } +// Method recognises the following dot product idiom: +// q += a * b for operands a, b whose type is narrower than the reduction one. +// Provided that the operands have the same type or are promoted to a wider form. +// Since this may involve a vector length change, the idiom is handled by going directly +// to a dot product node (rather than relying combining finer grained nodes later). +bool HLoopOptimization::VectorizeDotProdIdiom(LoopNode* node, + HInstruction* instruction, + bool generate_code, + DataType::Type reduction_type, + uint64_t restrictions) { + if (!instruction->IsAdd() || (reduction_type != DataType::Type::kInt32)) { + return false; + } + + HInstruction* q = instruction->InputAt(0); + HInstruction* v = instruction->InputAt(1); + if (!v->IsMul() || v->GetType() != reduction_type) { + return false; + } + + HInstruction* a = v->InputAt(0); + HInstruction* b = v->InputAt(1); + HInstruction* r = a; + HInstruction* s = b; + DataType::Type op_type = GetNarrowerType(a, b); + bool is_unsigned = false; + + if (!IsNarrowerOperands(a, b, op_type, &r, &s, &is_unsigned)) { + return false; + } + op_type = HVecOperation::ToProperType(op_type, is_unsigned); + + if (!TrySetVectorType(op_type, &restrictions) || + HasVectorRestrictions(restrictions, kNoDotProd)) { + return false; + } + + DCHECK(r != nullptr && s != nullptr); + // Accept dot product idiom for vectorizable operands. Vectorized code uses the shorthand + // idiomatic operation. Sequential code uses the original scalar expressions. + if (generate_code && vector_mode_ != kVector) { // de-idiom + r = a; + s = b; + } + if (VectorizeUse(node, q, generate_code, op_type, restrictions) && + VectorizeUse(node, r, generate_code, op_type, restrictions) && + VectorizeUse(node, s, generate_code, op_type, restrictions)) { + if (generate_code) { + if (vector_mode_ == kVector) { + vector_map_->Put(instruction, new (global_allocator_) HVecDotProd( + global_allocator_, + vector_map_->Get(q), + vector_map_->Get(r), + vector_map_->Get(s), + reduction_type, + is_unsigned, + GetOtherVL(reduction_type, op_type, vector_length_), + kNoDexPc)); + MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom); + } else { + GenerateVecOp(v, vector_map_->Get(r), vector_map_->Get(s), reduction_type); + GenerateVecOp(instruction, vector_map_->Get(q), vector_map_->Get(v), reduction_type); + } + } + return true; + } + return false; +} + // // Vectorization heuristics. // diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h index 644b740ed4..1a842c4bf3 100644 --- a/compiler/optimizing/loop_optimization.h +++ b/compiler/optimizing/loop_optimization.h @@ -43,7 +43,7 @@ class HLoopOptimization : public HOptimization { OptimizingCompilerStats* stats, const char* name = kLoopOptimizationPassName); - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kLoopOptimizationPassName = "loop_optimization"; @@ -82,6 +82,7 @@ class HLoopOptimization : public HOptimization { kNoReduction = 1 << 9, // no reduction kNoSAD = 1 << 10, // no sum of absolute differences (SAD) kNoWideSAD = 1 << 11, // no sum of absolute differences (SAD) with operand widening + kNoDotProd = 1 << 12, // no dot product }; /* @@ -217,6 +218,11 @@ class HLoopOptimization : public HOptimization { bool generate_code, DataType::Type type, uint64_t restrictions); + bool VectorizeDotProdIdiom(LoopNode* node, + HInstruction* instruction, + bool generate_code, + DataType::Type type, + uint64_t restrictions); // Vectorization heuristics. Alignment ComputeAlignment(HInstruction* offset, diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 79a7e2c858..aad06b91b6 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -3180,4 +3180,77 @@ std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind) { } } +// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags. +#define CHECK_INTRINSICS_ENUM_VALUES(Name, InvokeType, _, SideEffects, Exceptions, ...) \ + static_assert( \ + static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \ + "Instrinsics enumeration space overflow."); +#include "intrinsics_list.h" + INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES) +#undef INTRINSICS_LIST +#undef CHECK_INTRINSICS_ENUM_VALUES + +// Function that returns whether an intrinsic needs an environment or not. +static inline IntrinsicNeedsEnvironmentOrCache NeedsEnvironmentOrCacheIntrinsic(Intrinsics i) { + switch (i) { + case Intrinsics::kNone: + return kNeedsEnvironmentOrCache; // Non-sensical for intrinsic. +#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \ + case Intrinsics::k ## Name: \ + return NeedsEnvOrCache; +#include "intrinsics_list.h" + INTRINSICS_LIST(OPTIMIZING_INTRINSICS) +#undef INTRINSICS_LIST +#undef OPTIMIZING_INTRINSICS + } + return kNeedsEnvironmentOrCache; +} + +// Function that returns whether an intrinsic has side effects. +static inline IntrinsicSideEffects GetSideEffectsIntrinsic(Intrinsics i) { + switch (i) { + case Intrinsics::kNone: + return kAllSideEffects; +#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \ + case Intrinsics::k ## Name: \ + return SideEffects; +#include "intrinsics_list.h" + INTRINSICS_LIST(OPTIMIZING_INTRINSICS) +#undef INTRINSICS_LIST +#undef OPTIMIZING_INTRINSICS + } + return kAllSideEffects; +} + +// Function that returns whether an intrinsic can throw exceptions. +static inline IntrinsicExceptions GetExceptionsIntrinsic(Intrinsics i) { + switch (i) { + case Intrinsics::kNone: + return kCanThrow; +#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \ + case Intrinsics::k ## Name: \ + return Exceptions; +#include "intrinsics_list.h" + INTRINSICS_LIST(OPTIMIZING_INTRINSICS) +#undef INTRINSICS_LIST +#undef OPTIMIZING_INTRINSICS + } + return kCanThrow; +} + +void HInvoke::SetResolvedMethod(ArtMethod* method) { + // TODO: b/65872996 The intent is that polymorphic signature methods should + // be compiler intrinsics. At present, they are only interpreter intrinsics. + if (method != nullptr && + method->IsIntrinsic() && + !method->IsPolymorphicSignature()) { + Intrinsics intrinsic = static_cast<Intrinsics>(method->GetIntrinsic()); + SetIntrinsic(intrinsic, + NeedsEnvironmentOrCacheIntrinsic(intrinsic), + GetSideEffectsIntrinsic(intrinsic), + GetExceptionsIntrinsic(intrinsic)); + } + resolved_method_ = method; +} + } // namespace art diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 8b9e1da0d3..7921061326 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -29,6 +29,7 @@ #include "base/quasi_atomic.h" #include "base/stl_util.h" #include "base/transform_array_ref.h" +#include "art_method.h" #include "data_type.h" #include "deoptimization_kind.h" #include "dex/dex_file.h" @@ -128,6 +129,7 @@ enum GraphAnalysisResult { kAnalysisInvalidBytecode, kAnalysisFailThrowCatchLoop, kAnalysisFailAmbiguousArrayOp, + kAnalysisFailIrreducibleLoopAndStringInit, kAnalysisSuccess, }; @@ -1453,6 +1455,7 @@ class HLoopInformationOutwardIterator : public ValueObject { M(VecSetScalars, VecOperation) \ M(VecMultiplyAccumulate, VecOperation) \ M(VecSADAccumulate, VecOperation) \ + M(VecDotProd, VecOperation) \ M(VecLoad, VecMemoryOperation) \ M(VecStore, VecMemoryOperation) \ @@ -1529,12 +1532,12 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION) private: \ H##type& operator=(const H##type&) = delete; \ public: \ - const char* DebugName() const OVERRIDE { return #type; } \ - HInstruction* Clone(ArenaAllocator* arena) const OVERRIDE { \ + const char* DebugName() const override { return #type; } \ + HInstruction* Clone(ArenaAllocator* arena) const override { \ DCHECK(IsClonable()); \ return new (arena) H##type(*this->As##type()); \ } \ - void Accept(HGraphVisitor* visitor) OVERRIDE + void Accept(HGraphVisitor* visitor) override #define DECLARE_ABSTRACT_INSTRUCTION(type) \ private: \ @@ -1626,6 +1629,21 @@ using HConstInputsRef = TransformArrayRef<const HUserRecord<HInstruction*>, HInp * the same, and any reference read depends on any reference read without * further regard of its type). * + * kDependsOnGCBit is defined in the following way: instructions with kDependsOnGCBit must not be + * alive across the point where garbage collection might happen. + * + * Note: Instructions with kCanTriggerGCBit do not depend on each other. + * + * kCanTriggerGCBit must be used for instructions for which GC might happen on the path across + * those instructions from the compiler perspective (between this instruction and the next one + * in the IR). + * + * Note: Instructions which can cause GC only on a fatal slow path do not need + * kCanTriggerGCBit as the execution never returns to the instruction next to the exceptional + * one. However the execution may return to compiled code if there is a catch block in the + * current method; for this purpose the TryBoundary exit instruction has kCanTriggerGCBit + * set. + * * The internal representation uses 38-bit and is described in the table below. * The first line indicates the side effect, and for field/array accesses the * second line indicates the type of the access (in the order of the @@ -1698,10 +1716,17 @@ class SideEffects : public ValueObject { return SideEffects(TypeFlag(type, kArrayReadOffset)); } + // Returns whether GC might happen across this instruction from the compiler perspective so + // the next instruction in the IR would see that. + // + // See the SideEffect class comments. static SideEffects CanTriggerGC() { return SideEffects(1ULL << kCanTriggerGCBit); } + // Returns whether the instruction must not be alive across a GC point. + // + // See the SideEffect class comments. static SideEffects DependsOnGC() { return SideEffects(1ULL << kDependsOnGCBit); } @@ -2079,6 +2104,19 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { return false; } + // If this instruction will do an implicit null check, return the `HNullCheck` associated + // with it. Otherwise return null. + HNullCheck* GetImplicitNullCheck() const { + // Find the first previous instruction which is not a move. + HInstruction* first_prev_not_move = GetPreviousDisregardingMoves(); + if (first_prev_not_move != nullptr && + first_prev_not_move->IsNullCheck() && + first_prev_not_move->IsEmittedAtUseSite()) { + return first_prev_not_move->AsNullCheck(); + } + return nullptr; + } + virtual bool IsActualObject() const { return GetType() == DataType::Type::kReference; } @@ -2582,7 +2620,7 @@ class HBackwardInstructionIterator : public ValueObject { class HVariableInputSizeInstruction : public HInstruction { public: using HInstruction::GetInputRecords; // Keep the const version visible. - ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE { + ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override { return ArrayRef<HUserRecord<HInstruction*>>(inputs_); } @@ -2632,7 +2670,7 @@ class HExpression : public HInstruction { virtual ~HExpression() {} using HInstruction::GetInputRecords; // Keep the const version visible. - ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL { + ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final { return ArrayRef<HUserRecord<HInstruction*>>(inputs_); } @@ -2654,7 +2692,7 @@ class HExpression<0> : public HInstruction { virtual ~HExpression() {} using HInstruction::GetInputRecords; // Keep the const version visible. - ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL { + ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final { return ArrayRef<HUserRecord<HInstruction*>>(); } @@ -2667,13 +2705,13 @@ class HExpression<0> : public HInstruction { // Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow // instruction that branches to the exit block. -class HReturnVoid FINAL : public HExpression<0> { +class HReturnVoid final : public HExpression<0> { public: explicit HReturnVoid(uint32_t dex_pc = kNoDexPc) : HExpression(kReturnVoid, SideEffects::None(), dex_pc) { } - bool IsControlFlow() const OVERRIDE { return true; } + bool IsControlFlow() const override { return true; } DECLARE_INSTRUCTION(ReturnVoid); @@ -2683,14 +2721,14 @@ class HReturnVoid FINAL : public HExpression<0> { // Represents dex's RETURN opcodes. A HReturn is a control flow // instruction that branches to the exit block. -class HReturn FINAL : public HExpression<1> { +class HReturn final : public HExpression<1> { public: explicit HReturn(HInstruction* value, uint32_t dex_pc = kNoDexPc) : HExpression(kReturn, SideEffects::None(), dex_pc) { SetRawInputAt(0, value); } - bool IsControlFlow() const OVERRIDE { return true; } + bool IsControlFlow() const override { return true; } DECLARE_INSTRUCTION(Return); @@ -2698,7 +2736,7 @@ class HReturn FINAL : public HExpression<1> { DEFAULT_COPY_CONSTRUCTOR(Return); }; -class HPhi FINAL : public HVariableInputSizeInstruction { +class HPhi final : public HVariableInputSizeInstruction { public: HPhi(ArenaAllocator* allocator, uint32_t reg_number, @@ -2722,7 +2760,7 @@ class HPhi FINAL : public HVariableInputSizeInstruction { SetPackedFlag<kFlagCanBeNull>(true); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } // Returns a type equivalent to the given `type`, but that a `HPhi` can hold. static DataType::Type ToPhiType(DataType::Type type) { @@ -2742,7 +2780,7 @@ class HPhi FINAL : public HVariableInputSizeInstruction { SetPackedField<TypeField>(new_type); } - bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); } + bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); } void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); } uint32_t GetRegNumber() const { return reg_number_; } @@ -2800,13 +2838,13 @@ class HPhi FINAL : public HVariableInputSizeInstruction { // The exit instruction is the only instruction of the exit block. // Instructions aborting the method (HThrow and HReturn) must branch to the // exit block. -class HExit FINAL : public HExpression<0> { +class HExit final : public HExpression<0> { public: explicit HExit(uint32_t dex_pc = kNoDexPc) : HExpression(kExit, SideEffects::None(), dex_pc) { } - bool IsControlFlow() const OVERRIDE { return true; } + bool IsControlFlow() const override { return true; } DECLARE_INSTRUCTION(Exit); @@ -2815,14 +2853,14 @@ class HExit FINAL : public HExpression<0> { }; // Jumps from one block to another. -class HGoto FINAL : public HExpression<0> { +class HGoto final : public HExpression<0> { public: explicit HGoto(uint32_t dex_pc = kNoDexPc) : HExpression(kGoto, SideEffects::None(), dex_pc) { } - bool IsClonable() const OVERRIDE { return true; } - bool IsControlFlow() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } + bool IsControlFlow() const override { return true; } HBasicBlock* GetSuccessor() const { return GetBlock()->GetSingleSuccessor(); @@ -2840,7 +2878,7 @@ class HConstant : public HExpression<0> { : HExpression(kind, type, SideEffects::None(), dex_pc) { } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } // Is this constant -1 in the arithmetic sense? virtual bool IsMinusOne() const { return false; } @@ -2859,15 +2897,15 @@ class HConstant : public HExpression<0> { DEFAULT_COPY_CONSTRUCTOR(Constant); }; -class HNullConstant FINAL : public HConstant { +class HNullConstant final : public HConstant { public: - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - uint64_t GetValueAsUint64() const OVERRIDE { return 0; } + uint64_t GetValueAsUint64() const override { return 0; } - size_t ComputeHashCode() const OVERRIDE { return 0; } + size_t ComputeHashCode() const override { return 0; } // The null constant representation is a 0-bit pattern. virtual bool IsZeroBitPattern() const { return true; } @@ -2887,25 +2925,25 @@ class HNullConstant FINAL : public HConstant { // Constants of the type int. Those can be from Dex instructions, or // synthesized (for example with the if-eqz instruction). -class HIntConstant FINAL : public HConstant { +class HIntConstant final : public HConstant { public: int32_t GetValue() const { return value_; } - uint64_t GetValueAsUint64() const OVERRIDE { + uint64_t GetValueAsUint64() const override { return static_cast<uint64_t>(static_cast<uint32_t>(value_)); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsIntConstant()) << other->DebugName(); return other->AsIntConstant()->value_ == value_; } - size_t ComputeHashCode() const OVERRIDE { return GetValue(); } + size_t ComputeHashCode() const override { return GetValue(); } - bool IsMinusOne() const OVERRIDE { return GetValue() == -1; } - bool IsArithmeticZero() const OVERRIDE { return GetValue() == 0; } - bool IsZeroBitPattern() const OVERRIDE { return GetValue() == 0; } - bool IsOne() const OVERRIDE { return GetValue() == 1; } + bool IsMinusOne() const override { return GetValue() == -1; } + bool IsArithmeticZero() const override { return GetValue() == 0; } + bool IsZeroBitPattern() const override { return GetValue() == 0; } + bool IsOne() const override { return GetValue() == 1; } // Integer constants are used to encode Boolean values as well, // where 1 means true and 0 means false. @@ -2933,23 +2971,23 @@ class HIntConstant FINAL : public HConstant { ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast); }; -class HLongConstant FINAL : public HConstant { +class HLongConstant final : public HConstant { public: int64_t GetValue() const { return value_; } - uint64_t GetValueAsUint64() const OVERRIDE { return value_; } + uint64_t GetValueAsUint64() const override { return value_; } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsLongConstant()) << other->DebugName(); return other->AsLongConstant()->value_ == value_; } - size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); } + size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); } - bool IsMinusOne() const OVERRIDE { return GetValue() == -1; } - bool IsArithmeticZero() const OVERRIDE { return GetValue() == 0; } - bool IsZeroBitPattern() const OVERRIDE { return GetValue() == 0; } - bool IsOne() const OVERRIDE { return GetValue() == 1; } + bool IsMinusOne() const override { return GetValue() == -1; } + bool IsArithmeticZero() const override { return GetValue() == 0; } + bool IsZeroBitPattern() const override { return GetValue() == 0; } + bool IsOne() const override { return GetValue() == 1; } DECLARE_INSTRUCTION(LongConstant); @@ -2967,25 +3005,25 @@ class HLongConstant FINAL : public HConstant { friend class HGraph; }; -class HFloatConstant FINAL : public HConstant { +class HFloatConstant final : public HConstant { public: float GetValue() const { return value_; } - uint64_t GetValueAsUint64() const OVERRIDE { + uint64_t GetValueAsUint64() const override { return static_cast<uint64_t>(bit_cast<uint32_t, float>(value_)); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsFloatConstant()) << other->DebugName(); return other->AsFloatConstant()->GetValueAsUint64() == GetValueAsUint64(); } - size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); } + size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); } - bool IsMinusOne() const OVERRIDE { + bool IsMinusOne() const override { return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>((-1.0f)); } - bool IsArithmeticZero() const OVERRIDE { + bool IsArithmeticZero() const override { return std::fpclassify(value_) == FP_ZERO; } bool IsArithmeticPositiveZero() const { @@ -2994,10 +3032,10 @@ class HFloatConstant FINAL : public HConstant { bool IsArithmeticNegativeZero() const { return IsArithmeticZero() && std::signbit(value_); } - bool IsZeroBitPattern() const OVERRIDE { + bool IsZeroBitPattern() const override { return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(0.0f); } - bool IsOne() const OVERRIDE { + bool IsOne() const override { return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(1.0f); } bool IsNaN() const { @@ -3026,23 +3064,23 @@ class HFloatConstant FINAL : public HConstant { friend class HGraph; }; -class HDoubleConstant FINAL : public HConstant { +class HDoubleConstant final : public HConstant { public: double GetValue() const { return value_; } - uint64_t GetValueAsUint64() const OVERRIDE { return bit_cast<uint64_t, double>(value_); } + uint64_t GetValueAsUint64() const override { return bit_cast<uint64_t, double>(value_); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsDoubleConstant()) << other->DebugName(); return other->AsDoubleConstant()->GetValueAsUint64() == GetValueAsUint64(); } - size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); } + size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); } - bool IsMinusOne() const OVERRIDE { + bool IsMinusOne() const override { return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((-1.0)); } - bool IsArithmeticZero() const OVERRIDE { + bool IsArithmeticZero() const override { return std::fpclassify(value_) == FP_ZERO; } bool IsArithmeticPositiveZero() const { @@ -3051,10 +3089,10 @@ class HDoubleConstant FINAL : public HConstant { bool IsArithmeticNegativeZero() const { return IsArithmeticZero() && std::signbit(value_); } - bool IsZeroBitPattern() const OVERRIDE { + bool IsZeroBitPattern() const override { return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((0.0)); } - bool IsOne() const OVERRIDE { + bool IsOne() const override { return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>(1.0); } bool IsNaN() const { @@ -3085,15 +3123,15 @@ class HDoubleConstant FINAL : public HConstant { // Conditional branch. A block ending with an HIf instruction must have // two successors. -class HIf FINAL : public HExpression<1> { +class HIf final : public HExpression<1> { public: explicit HIf(HInstruction* input, uint32_t dex_pc = kNoDexPc) : HExpression(kIf, SideEffects::None(), dex_pc) { SetRawInputAt(0, input); } - bool IsClonable() const OVERRIDE { return true; } - bool IsControlFlow() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } + bool IsControlFlow() const override { return true; } HBasicBlock* IfTrueSuccessor() const { return GetBlock()->GetSuccessors()[0]; @@ -3115,7 +3153,7 @@ class HIf FINAL : public HExpression<1> { // non-exceptional control flow. // Normal-flow successor is stored at index zero, exception handlers under // higher indices in no particular order. -class HTryBoundary FINAL : public HExpression<0> { +class HTryBoundary final : public HExpression<0> { public: enum class BoundaryKind { kEntry, @@ -3123,12 +3161,19 @@ class HTryBoundary FINAL : public HExpression<0> { kLast = kExit }; + // SideEffects::CanTriggerGC prevents instructions with SideEffects::DependOnGC to be alive + // across the catch block entering edges as GC might happen during throwing an exception. + // TryBoundary with BoundaryKind::kExit is conservatively used for that as there is no + // HInstruction which a catch block must start from. explicit HTryBoundary(BoundaryKind kind, uint32_t dex_pc = kNoDexPc) - : HExpression(kTryBoundary, SideEffects::None(), dex_pc) { + : HExpression(kTryBoundary, + (kind == BoundaryKind::kExit) ? SideEffects::CanTriggerGC() + : SideEffects::None(), + dex_pc) { SetPackedField<BoundaryKindField>(kind); } - bool IsControlFlow() const OVERRIDE { return true; } + bool IsControlFlow() const override { return true; } // Returns the block's non-exceptional successor (index zero). HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessors()[0]; } @@ -3174,7 +3219,7 @@ class HTryBoundary FINAL : public HExpression<0> { }; // Deoptimize to interpreter, upon checking a condition. -class HDeoptimize FINAL : public HVariableInputSizeInstruction { +class HDeoptimize final : public HVariableInputSizeInstruction { public: // Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move // across. @@ -3194,7 +3239,7 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction { SetRawInputAt(0, cond); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } // Use this constructor when the `HDeoptimize` guards an instruction, and any user // that relies on the deoptimization to pass should have its input be the `HDeoptimize` @@ -3220,15 +3265,15 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction { SetRawInputAt(1, guard); } - bool CanBeMoved() const OVERRIDE { return GetPackedFlag<kFieldCanBeMoved>(); } + bool CanBeMoved() const override { return GetPackedFlag<kFieldCanBeMoved>(); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { return (other->CanBeMoved() == CanBeMoved()) && (other->AsDeoptimize()->GetKind() == GetKind()); } - bool NeedsEnvironment() const OVERRIDE { return true; } + bool NeedsEnvironment() const override { return true; } - bool CanThrow() const OVERRIDE { return true; } + bool CanThrow() const override { return true; } DeoptimizationKind GetDeoptimizationKind() const { return GetPackedField<DeoptimizeKindField>(); } @@ -3268,7 +3313,7 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction { // if it's true, starts to do deoptimization. // It has a 4-byte slot on stack. // TODO: allocate a register for this flag. -class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction { +class HShouldDeoptimizeFlag final : public HVariableInputSizeInstruction { public: // CHA guards are only optimized in a separate pass and it has no side effects // with regard to other passes. @@ -3286,7 +3331,7 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction { // further guard elimination/motion since a guard might have been used for justification // of the elimination of another guard. Therefore, we pretend this guard cannot be moved // to avoid other optimizations trying to move it. - bool CanBeMoved() const OVERRIDE { return false; } + bool CanBeMoved() const override { return false; } DECLARE_INSTRUCTION(ShouldDeoptimizeFlag); @@ -3297,7 +3342,7 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction { // Represents the ArtMethod that was passed as a first argument to // the method. It is used by instructions that depend on it, like // instructions that work with the dex cache. -class HCurrentMethod FINAL : public HExpression<0> { +class HCurrentMethod final : public HExpression<0> { public: explicit HCurrentMethod(DataType::Type type, uint32_t dex_pc = kNoDexPc) : HExpression(kCurrentMethod, type, SideEffects::None(), dex_pc) { @@ -3311,7 +3356,7 @@ class HCurrentMethod FINAL : public HExpression<0> { // Fetches an ArtMethod from the virtual table or the interface method table // of a class. -class HClassTableGet FINAL : public HExpression<1> { +class HClassTableGet final : public HExpression<1> { public: enum class TableKind { kVTable, @@ -3329,9 +3374,9 @@ class HClassTableGet FINAL : public HExpression<1> { SetRawInputAt(0, cls); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other) const override { return other->AsClassTableGet()->GetIndex() == index_ && other->AsClassTableGet()->GetPackedFields() == GetPackedFields(); } @@ -3360,7 +3405,7 @@ class HClassTableGet FINAL : public HExpression<1> { // PackedSwitch (jump table). A block ending with a PackedSwitch instruction will // have one successor for each entry in the switch table, and the final successor // will be the block containing the next Dex opcode. -class HPackedSwitch FINAL : public HExpression<1> { +class HPackedSwitch final : public HExpression<1> { public: HPackedSwitch(int32_t start_value, uint32_t num_entries, @@ -3372,9 +3417,9 @@ class HPackedSwitch FINAL : public HExpression<1> { SetRawInputAt(0, input); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } - bool IsControlFlow() const OVERRIDE { return true; } + bool IsControlFlow() const override { return true; } int32_t GetStartValue() const { return start_value_; } @@ -3405,13 +3450,13 @@ class HUnaryOperation : public HExpression<1> { } // All of the UnaryOperation instructions are clonable. - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } HInstruction* GetInput() const { return InputAt(0); } DataType::Type GetResultType() const { return GetType(); } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } @@ -3446,7 +3491,7 @@ class HBinaryOperation : public HExpression<2> { } // All of the BinaryOperation instructions are clonable. - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } HInstruction* GetLeft() const { return InputAt(0); } HInstruction* GetRight() const { return InputAt(1); } @@ -3486,8 +3531,8 @@ class HBinaryOperation : public HExpression<2> { } } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } @@ -3568,7 +3613,7 @@ class HCondition : public HBinaryOperation { ComparisonBias GetBias() const { return GetPackedField<ComparisonBiasField>(); } void SetBias(ComparisonBias bias) { SetPackedField<ComparisonBiasField>(bias); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { return GetPackedFields() == other->AsCondition()->GetPackedFields(); } @@ -3625,42 +3670,42 @@ class HCondition : public HBinaryOperation { }; // Instruction to check if two inputs are equal to each other. -class HEqual FINAL : public HCondition { +class HEqual final : public HCondition { public: HEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(kEqual, first, second, dex_pc) { } - bool IsCommutative() const OVERRIDE { return true; } + bool IsCommutative() const override { return true; } HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED, - HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HNullConstant* y ATTRIBUTE_UNUSED) const override { return MakeConstantCondition(true, GetDexPc()); } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } // In the following Evaluate methods, a HCompare instruction has // been merged into this HEqual instruction; evaluate it as // `Compare(x, y) == 0`. - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } DECLARE_INSTRUCTION(Equal); - IfCondition GetCondition() const OVERRIDE { + IfCondition GetCondition() const override { return kCondEQ; } - IfCondition GetOppositeCondition() const OVERRIDE { + IfCondition GetOppositeCondition() const override { return kCondNE; } @@ -3671,42 +3716,42 @@ class HEqual FINAL : public HCondition { template <typename T> static bool Compute(T x, T y) { return x == y; } }; -class HNotEqual FINAL : public HCondition { +class HNotEqual final : public HCondition { public: HNotEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(kNotEqual, first, second, dex_pc) { } - bool IsCommutative() const OVERRIDE { return true; } + bool IsCommutative() const override { return true; } HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED, - HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HNullConstant* y ATTRIBUTE_UNUSED) const override { return MakeConstantCondition(false, GetDexPc()); } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } // In the following Evaluate methods, a HCompare instruction has // been merged into this HNotEqual instruction; evaluate it as // `Compare(x, y) != 0`. - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } DECLARE_INSTRUCTION(NotEqual); - IfCondition GetCondition() const OVERRIDE { + IfCondition GetCondition() const override { return kCondNE; } - IfCondition GetOppositeCondition() const OVERRIDE { + IfCondition GetOppositeCondition() const override { return kCondEQ; } @@ -3717,36 +3762,36 @@ class HNotEqual FINAL : public HCondition { template <typename T> static bool Compute(T x, T y) { return x != y; } }; -class HLessThan FINAL : public HCondition { +class HLessThan final : public HCondition { public: HLessThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(kLessThan, first, second, dex_pc) { } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } // In the following Evaluate methods, a HCompare instruction has // been merged into this HLessThan instruction; evaluate it as // `Compare(x, y) < 0`. - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } DECLARE_INSTRUCTION(LessThan); - IfCondition GetCondition() const OVERRIDE { + IfCondition GetCondition() const override { return kCondLT; } - IfCondition GetOppositeCondition() const OVERRIDE { + IfCondition GetOppositeCondition() const override { return kCondGE; } @@ -3757,36 +3802,36 @@ class HLessThan FINAL : public HCondition { template <typename T> static bool Compute(T x, T y) { return x < y; } }; -class HLessThanOrEqual FINAL : public HCondition { +class HLessThanOrEqual final : public HCondition { public: HLessThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(kLessThanOrEqual, first, second, dex_pc) { } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } // In the following Evaluate methods, a HCompare instruction has // been merged into this HLessThanOrEqual instruction; evaluate it as // `Compare(x, y) <= 0`. - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } DECLARE_INSTRUCTION(LessThanOrEqual); - IfCondition GetCondition() const OVERRIDE { + IfCondition GetCondition() const override { return kCondLE; } - IfCondition GetOppositeCondition() const OVERRIDE { + IfCondition GetOppositeCondition() const override { return kCondGT; } @@ -3797,35 +3842,35 @@ class HLessThanOrEqual FINAL : public HCondition { template <typename T> static bool Compute(T x, T y) { return x <= y; } }; -class HGreaterThan FINAL : public HCondition { +class HGreaterThan final : public HCondition { public: HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(kGreaterThan, first, second, dex_pc) { } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } // In the following Evaluate methods, a HCompare instruction has // been merged into this HGreaterThan instruction; evaluate it as // `Compare(x, y) > 0`. - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } DECLARE_INSTRUCTION(GreaterThan); - IfCondition GetCondition() const OVERRIDE { + IfCondition GetCondition() const override { return kCondGT; } - IfCondition GetOppositeCondition() const OVERRIDE { + IfCondition GetOppositeCondition() const override { return kCondLE; } @@ -3836,35 +3881,35 @@ class HGreaterThan FINAL : public HCondition { template <typename T> static bool Compute(T x, T y) { return x > y; } }; -class HGreaterThanOrEqual FINAL : public HCondition { +class HGreaterThanOrEqual final : public HCondition { public: HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(kGreaterThanOrEqual, first, second, dex_pc) { } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } // In the following Evaluate methods, a HCompare instruction has // been merged into this HGreaterThanOrEqual instruction; evaluate it as // `Compare(x, y) >= 0`. - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc()); } DECLARE_INSTRUCTION(GreaterThanOrEqual); - IfCondition GetCondition() const OVERRIDE { + IfCondition GetCondition() const override { return kCondGE; } - IfCondition GetOppositeCondition() const OVERRIDE { + IfCondition GetOppositeCondition() const override { return kCondLT; } @@ -3875,36 +3920,36 @@ class HGreaterThanOrEqual FINAL : public HCondition { template <typename T> static bool Compute(T x, T y) { return x >= y; } }; -class HBelow FINAL : public HCondition { +class HBelow final : public HCondition { public: HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(kBelow, first, second, dex_pc) { } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } DECLARE_INSTRUCTION(Below); - IfCondition GetCondition() const OVERRIDE { + IfCondition GetCondition() const override { return kCondB; } - IfCondition GetOppositeCondition() const OVERRIDE { + IfCondition GetOppositeCondition() const override { return kCondAE; } @@ -3917,36 +3962,36 @@ class HBelow FINAL : public HCondition { } }; -class HBelowOrEqual FINAL : public HCondition { +class HBelowOrEqual final : public HCondition { public: HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(kBelowOrEqual, first, second, dex_pc) { } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } DECLARE_INSTRUCTION(BelowOrEqual); - IfCondition GetCondition() const OVERRIDE { + IfCondition GetCondition() const override { return kCondBE; } - IfCondition GetOppositeCondition() const OVERRIDE { + IfCondition GetOppositeCondition() const override { return kCondA; } @@ -3959,36 +4004,36 @@ class HBelowOrEqual FINAL : public HCondition { } }; -class HAbove FINAL : public HCondition { +class HAbove final : public HCondition { public: HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(kAbove, first, second, dex_pc) { } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } DECLARE_INSTRUCTION(Above); - IfCondition GetCondition() const OVERRIDE { + IfCondition GetCondition() const override { return kCondA; } - IfCondition GetOppositeCondition() const OVERRIDE { + IfCondition GetOppositeCondition() const override { return kCondBE; } @@ -4001,36 +4046,36 @@ class HAbove FINAL : public HCondition { } }; -class HAboveOrEqual FINAL : public HCondition { +class HAboveOrEqual final : public HCondition { public: HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(kAboveOrEqual, first, second, dex_pc) { } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } DECLARE_INSTRUCTION(AboveOrEqual); - IfCondition GetCondition() const OVERRIDE { + IfCondition GetCondition() const override { return kCondAE; } - IfCondition GetOppositeCondition() const OVERRIDE { + IfCondition GetOppositeCondition() const override { return kCondB; } @@ -4045,7 +4090,7 @@ class HAboveOrEqual FINAL : public HCondition { // Instruction to check how two inputs compare to each other. // Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1. -class HCompare FINAL : public HBinaryOperation { +class HCompare final : public HBinaryOperation { public: // Note that `comparison_type` is the type of comparison performed // between the comparison's inputs, not the type of the instantiated @@ -4077,7 +4122,7 @@ class HCompare FINAL : public HBinaryOperation { return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compute(x, y); } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { // Note that there is no "cmp-int" Dex instruction so we shouldn't // reach this code path when processing a freshly built HIR // graph. However HCompare integer instructions can be synthesized @@ -4085,17 +4130,17 @@ class HCompare FINAL : public HBinaryOperation { // IntegerSignum intrinsics, so we have to handle this case. return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc()); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { return GetPackedFields() == other->AsCompare()->GetPackedFields(); } @@ -4134,7 +4179,7 @@ class HCompare FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Compare); }; -class HNewInstance FINAL : public HExpression<1> { +class HNewInstance final : public HExpression<1> { public: HNewInstance(HInstruction* cls, uint32_t dex_pc, @@ -4153,16 +4198,16 @@ class HNewInstance FINAL : public HExpression<1> { SetRawInputAt(0, cls); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } dex::TypeIndex GetTypeIndex() const { return type_index_; } const DexFile& GetDexFile() const { return dex_file_; } // Calls runtime so needs an environment. - bool NeedsEnvironment() const OVERRIDE { return true; } + bool NeedsEnvironment() const override { return true; } // Can throw errors when out-of-memory or if it's not instantiable/accessible. - bool CanThrow() const OVERRIDE { return true; } + bool CanThrow() const override { return true; } bool NeedsChecks() const { return entrypoint_ == kQuickAllocObjectWithChecks; @@ -4170,7 +4215,7 @@ class HNewInstance FINAL : public HExpression<1> { bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); } - bool CanBeNull() const OVERRIDE { return false; } + bool CanBeNull() const override { return false; } QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; } @@ -4224,7 +4269,7 @@ enum IntrinsicExceptions { class HInvoke : public HVariableInputSizeInstruction { public: - bool NeedsEnvironment() const OVERRIDE; + bool NeedsEnvironment() const override; void SetArgumentAt(size_t index, HInstruction* argument) { SetRawInputAt(index, argument); @@ -4257,15 +4302,15 @@ class HInvoke : public HVariableInputSizeInstruction { void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); } - bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); } + bool CanThrow() const override { return GetPackedFlag<kFlagCanThrow>(); } void SetAlwaysThrows(bool always_throws) { SetPackedFlag<kFlagAlwaysThrows>(always_throws); } - bool AlwaysThrows() const OVERRIDE { return GetPackedFlag<kFlagAlwaysThrows>(); } + bool AlwaysThrows() const override { return GetPackedFlag<kFlagAlwaysThrows>(); } - bool CanBeMoved() const OVERRIDE { return IsIntrinsic() && !DoesAnyWrite(); } + bool CanBeMoved() const override { return IsIntrinsic() && !DoesAnyWrite(); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { return intrinsic_ != Intrinsics::kNone && intrinsic_ == other->AsInvoke()->intrinsic_; } @@ -4280,7 +4325,7 @@ class HInvoke : public HVariableInputSizeInstruction { bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; } ArtMethod* GetResolvedMethod() const { return resolved_method_; } - void SetResolvedMethod(ArtMethod* method) { resolved_method_ = method; } + void SetResolvedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); DECLARE_ABSTRACT_INSTRUCTION(Invoke); @@ -4312,12 +4357,14 @@ class HInvoke : public HVariableInputSizeInstruction { number_of_arguments + number_of_other_inputs, kArenaAllocInvokeInputs), number_of_arguments_(number_of_arguments), - resolved_method_(resolved_method), dex_method_index_(dex_method_index), intrinsic_(Intrinsics::kNone), intrinsic_optimizations_(0) { SetPackedField<InvokeTypeField>(invoke_type); SetPackedFlag<kFlagCanThrow>(true); + // Check mutator lock, constructors lack annotalysis support. + Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current()); + SetResolvedMethod(resolved_method); } DEFAULT_COPY_CONSTRUCTOR(Invoke); @@ -4331,7 +4378,7 @@ class HInvoke : public HVariableInputSizeInstruction { uint32_t intrinsic_optimizations_; }; -class HInvokeUnresolved FINAL : public HInvoke { +class HInvokeUnresolved final : public HInvoke { public: HInvokeUnresolved(ArenaAllocator* allocator, uint32_t number_of_arguments, @@ -4350,7 +4397,7 @@ class HInvokeUnresolved FINAL : public HInvoke { invoke_type) { } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } DECLARE_INSTRUCTION(InvokeUnresolved); @@ -4358,7 +4405,7 @@ class HInvokeUnresolved FINAL : public HInvoke { DEFAULT_COPY_CONSTRUCTOR(InvokeUnresolved); }; -class HInvokePolymorphic FINAL : public HInvoke { +class HInvokePolymorphic final : public HInvoke { public: HInvokePolymorphic(ArenaAllocator* allocator, uint32_t number_of_arguments, @@ -4376,7 +4423,7 @@ class HInvokePolymorphic FINAL : public HInvoke { kVirtual) { } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } DECLARE_INSTRUCTION(InvokePolymorphic); @@ -4384,7 +4431,7 @@ class HInvokePolymorphic FINAL : public HInvoke { DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic); }; -class HInvokeCustom FINAL : public HInvoke { +class HInvokeCustom final : public HInvoke { public: HInvokeCustom(ArenaAllocator* allocator, uint32_t number_of_arguments, @@ -4405,7 +4452,7 @@ class HInvokeCustom FINAL : public HInvoke { uint32_t GetCallSiteIndex() const { return call_site_index_; } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } DECLARE_INSTRUCTION(InvokeCustom); @@ -4416,7 +4463,7 @@ class HInvokeCustom FINAL : public HInvoke { uint32_t call_site_index_; }; -class HInvokeStaticOrDirect FINAL : public HInvoke { +class HInvokeStaticOrDirect final : public HInvoke { public: // Requirements of this method call regarding the class // initialization (clinit) check of its declaring class. @@ -4491,8 +4538,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { allocator, number_of_arguments, // There is potentially one extra argument for the HCurrentMethod node, and - // potentially one other if the clinit check is explicit, and potentially - // one other if the method is a string factory. + // potentially one other if the clinit check is explicit. (NeedsCurrentMethodInput(dispatch_info.method_load_kind) ? 1u : 0u) + (clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u), return_type, @@ -4505,7 +4551,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } void SetDispatchInfo(const DispatchInfo& dispatch_info) { bool had_current_method_input = HasCurrentMethodInput(); @@ -4535,7 +4581,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { } using HInstruction::GetInputRecords; // Keep the const version visible. - ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE { + ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override { ArrayRef<HUserRecord<HInstruction*>> input_records = HInvoke::GetInputRecords(); if (kIsDebugBuild && IsStaticWithExplicitClinitCheck()) { DCHECK(!input_records.empty()); @@ -4553,13 +4599,13 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { return input_records; } - bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override { // We access the method via the dex cache so we can't do an implicit null check. // TODO: for intrinsics we can generate implicit null checks. return false; } - bool CanBeNull() const OVERRIDE { + bool CanBeNull() const override { return GetType() == DataType::Type::kReference && !IsStringInit(); } @@ -4574,7 +4620,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; } CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; } bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; } - bool NeedsDexCacheOfDeclaringClass() const OVERRIDE; + bool NeedsDexCacheOfDeclaringClass() const override; bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; } bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; } bool HasPcRelativeMethodLoadKind() const { @@ -4675,7 +4721,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs); std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs); -class HInvokeVirtual FINAL : public HInvoke { +class HInvokeVirtual final : public HInvoke { public: HInvokeVirtual(ArenaAllocator* allocator, uint32_t number_of_arguments, @@ -4696,9 +4742,9 @@ class HInvokeVirtual FINAL : public HInvoke { vtable_index_(vtable_index) { } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } - bool CanBeNull() const OVERRIDE { + bool CanBeNull() const override { switch (GetIntrinsic()) { case Intrinsics::kThreadCurrentThread: case Intrinsics::kStringBufferAppend: @@ -4711,9 +4757,9 @@ class HInvokeVirtual FINAL : public HInvoke { } } - bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj) const override { // TODO: Add implicit null checks in intrinsics. - return (obj == InputAt(0)) && !GetLocations()->Intrinsified(); + return (obj == InputAt(0)) && !IsIntrinsic(); } uint32_t GetVTableIndex() const { return vtable_index_; } @@ -4728,7 +4774,7 @@ class HInvokeVirtual FINAL : public HInvoke { const uint32_t vtable_index_; }; -class HInvokeInterface FINAL : public HInvoke { +class HInvokeInterface final : public HInvoke { public: HInvokeInterface(ArenaAllocator* allocator, uint32_t number_of_arguments, @@ -4749,14 +4795,14 @@ class HInvokeInterface FINAL : public HInvoke { imt_index_(imt_index) { } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } - bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj) const override { // TODO: Add implicit null checks in intrinsics. - return (obj == InputAt(0)) && !GetLocations()->Intrinsified(); + return (obj == InputAt(0)) && !IsIntrinsic(); } - bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { + bool NeedsDexCacheOfDeclaringClass() const override { // The assembly stub currently needs it. return true; } @@ -4773,7 +4819,7 @@ class HInvokeInterface FINAL : public HInvoke { const uint32_t imt_index_; }; -class HNeg FINAL : public HUnaryOperation { +class HNeg final : public HUnaryOperation { public: HNeg(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) : HUnaryOperation(kNeg, result_type, input, dex_pc) { @@ -4782,16 +4828,16 @@ class HNeg FINAL : public HUnaryOperation { template <typename T> static T Compute(T x) { return -x; } - HConstant* Evaluate(HIntConstant* x) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x) const override { return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x) const override { return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x) const override { return GetBlock()->GetGraph()->GetFloatConstant(Compute(x->GetValue()), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x) const override { return GetBlock()->GetGraph()->GetDoubleConstant(Compute(x->GetValue()), GetDexPc()); } @@ -4801,7 +4847,7 @@ class HNeg FINAL : public HUnaryOperation { DEFAULT_COPY_CONSTRUCTOR(Neg); }; -class HNewArray FINAL : public HExpression<2> { +class HNewArray final : public HExpression<2> { public: HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc) : HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) { @@ -4809,15 +4855,15 @@ class HNewArray FINAL : public HExpression<2> { SetRawInputAt(1, length); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } // Calls runtime so needs an environment. - bool NeedsEnvironment() const OVERRIDE { return true; } + bool NeedsEnvironment() const override { return true; } // May throw NegativeArraySizeException, OutOfMemoryError, etc. - bool CanThrow() const OVERRIDE { return true; } + bool CanThrow() const override { return true; } - bool CanBeNull() const OVERRIDE { return false; } + bool CanBeNull() const override { return false; } HLoadClass* GetLoadClass() const { DCHECK(InputAt(0)->IsLoadClass()); @@ -4834,7 +4880,7 @@ class HNewArray FINAL : public HExpression<2> { DEFAULT_COPY_CONSTRUCTOR(NewArray); }; -class HAdd FINAL : public HBinaryOperation { +class HAdd final : public HBinaryOperation { public: HAdd(DataType::Type result_type, HInstruction* left, @@ -4843,23 +4889,23 @@ class HAdd FINAL : public HBinaryOperation { : HBinaryOperation(kAdd, result_type, left, right, SideEffects::None(), dex_pc) { } - bool IsCommutative() const OVERRIDE { return true; } + bool IsCommutative() const override { return true; } template <typename T> static T Compute(T x, T y) { return x + y; } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return GetBlock()->GetGraph()->GetFloatConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return GetBlock()->GetGraph()->GetDoubleConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } @@ -4870,7 +4916,7 @@ class HAdd FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Add); }; -class HSub FINAL : public HBinaryOperation { +class HSub final : public HBinaryOperation { public: HSub(DataType::Type result_type, HInstruction* left, @@ -4881,19 +4927,19 @@ class HSub FINAL : public HBinaryOperation { template <typename T> static T Compute(T x, T y) { return x - y; } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return GetBlock()->GetGraph()->GetFloatConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return GetBlock()->GetGraph()->GetDoubleConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } @@ -4904,7 +4950,7 @@ class HSub FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Sub); }; -class HMul FINAL : public HBinaryOperation { +class HMul final : public HBinaryOperation { public: HMul(DataType::Type result_type, HInstruction* left, @@ -4913,23 +4959,23 @@ class HMul FINAL : public HBinaryOperation { : HBinaryOperation(kMul, result_type, left, right, SideEffects::None(), dex_pc) { } - bool IsCommutative() const OVERRIDE { return true; } + bool IsCommutative() const override { return true; } template <typename T> static T Compute(T x, T y) { return x * y; } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return GetBlock()->GetGraph()->GetFloatConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return GetBlock()->GetGraph()->GetDoubleConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } @@ -4940,7 +4986,7 @@ class HMul FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Mul); }; -class HDiv FINAL : public HBinaryOperation { +class HDiv final : public HBinaryOperation { public: HDiv(DataType::Type result_type, HInstruction* left, @@ -4965,19 +5011,19 @@ class HDiv FINAL : public HBinaryOperation { return x / y; } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return GetBlock()->GetGraph()->GetFloatConstant( ComputeFP(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return GetBlock()->GetGraph()->GetDoubleConstant( ComputeFP(x->GetValue(), y->GetValue()), GetDexPc()); } @@ -4988,7 +5034,7 @@ class HDiv FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Div); }; -class HRem FINAL : public HBinaryOperation { +class HRem final : public HBinaryOperation { public: HRem(DataType::Type result_type, HInstruction* left, @@ -5013,19 +5059,19 @@ class HRem FINAL : public HBinaryOperation { return std::fmod(x, y); } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override { return GetBlock()->GetGraph()->GetFloatConstant( ComputeFP(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override { return GetBlock()->GetGraph()->GetDoubleConstant( ComputeFP(x->GetValue(), y->GetValue()), GetDexPc()); } @@ -5036,7 +5082,7 @@ class HRem FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Rem); }; -class HMin FINAL : public HBinaryOperation { +class HMin final : public HBinaryOperation { public: HMin(DataType::Type result_type, HInstruction* left, @@ -5044,26 +5090,26 @@ class HMin FINAL : public HBinaryOperation { uint32_t dex_pc) : HBinaryOperation(kMin, result_type, left, right, SideEffects::None(), dex_pc) {} - bool IsCommutative() const OVERRIDE { return true; } + bool IsCommutative() const override { return true; } // Evaluation for integral values. template <typename T> static T ComputeIntegral(T x, T y) { return (x <= y) ? x : y; } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc()); } // TODO: Evaluation for floating-point values. HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; } + HFloatConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; } HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; } + HDoubleConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; } DECLARE_INSTRUCTION(Min); @@ -5071,7 +5117,7 @@ class HMin FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Min); }; -class HMax FINAL : public HBinaryOperation { +class HMax final : public HBinaryOperation { public: HMax(DataType::Type result_type, HInstruction* left, @@ -5079,26 +5125,26 @@ class HMax FINAL : public HBinaryOperation { uint32_t dex_pc) : HBinaryOperation(kMax, result_type, left, right, SideEffects::None(), dex_pc) {} - bool IsCommutative() const OVERRIDE { return true; } + bool IsCommutative() const override { return true; } // Evaluation for integral values. template <typename T> static T ComputeIntegral(T x, T y) { return (x >= y) ? x : y; } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc()); } // TODO: Evaluation for floating-point values. HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; } + HFloatConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; } HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; } + HDoubleConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; } DECLARE_INSTRUCTION(Max); @@ -5106,7 +5152,7 @@ class HMax FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Max); }; -class HAbs FINAL : public HUnaryOperation { +class HAbs final : public HUnaryOperation { public: HAbs(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) : HUnaryOperation(kAbs, result_type, input, dex_pc) {} @@ -5126,17 +5172,17 @@ class HAbs FINAL : public HUnaryOperation { return bit_cast<T, S>(bits & std::numeric_limits<S>::max()); } - HConstant* Evaluate(HIntConstant* x) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x) const override { return GetBlock()->GetGraph()->GetIntConstant(ComputeIntegral(x->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x) const override { return GetBlock()->GetGraph()->GetLongConstant(ComputeIntegral(x->GetValue()), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x) const override { return GetBlock()->GetGraph()->GetFloatConstant( ComputeFP<float, int32_t>(x->GetValue()), GetDexPc()); } - HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x) const override { return GetBlock()->GetGraph()->GetDoubleConstant( ComputeFP<double, int64_t>(x->GetValue()), GetDexPc()); } @@ -5147,24 +5193,25 @@ class HAbs FINAL : public HUnaryOperation { DEFAULT_COPY_CONSTRUCTOR(Abs); }; -class HDivZeroCheck FINAL : public HExpression<1> { +class HDivZeroCheck final : public HExpression<1> { public: // `HDivZeroCheck` can trigger GC, as it may call the `ArithmeticException` - // constructor. + // constructor. However it can only do it on a fatal slow path so execution never returns to the + // instruction following the current one; thus 'SideEffects::None()' is used. HDivZeroCheck(HInstruction* value, uint32_t dex_pc) - : HExpression(kDivZeroCheck, value->GetType(), SideEffects::CanTriggerGC(), dex_pc) { + : HExpression(kDivZeroCheck, value->GetType(), SideEffects::None(), dex_pc) { SetRawInputAt(0, value); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - bool NeedsEnvironment() const OVERRIDE { return true; } - bool CanThrow() const OVERRIDE { return true; } + bool NeedsEnvironment() const override { return true; } + bool CanThrow() const override { return true; } DECLARE_INSTRUCTION(DivZeroCheck); @@ -5172,7 +5219,7 @@ class HDivZeroCheck FINAL : public HExpression<1> { DEFAULT_COPY_CONSTRUCTOR(DivZeroCheck); }; -class HShl FINAL : public HBinaryOperation { +class HShl final : public HBinaryOperation { public: HShl(DataType::Type result_type, HInstruction* value, @@ -5188,26 +5235,26 @@ class HShl FINAL : public HBinaryOperation { return value << (distance & max_shift_distance); } - HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE { + HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc()); } - HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE { + HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc()); } HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED, - HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HLongConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for the (long, long) case."; UNREACHABLE(); } HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED, - HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED, - HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } @@ -5218,7 +5265,7 @@ class HShl FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Shl); }; -class HShr FINAL : public HBinaryOperation { +class HShr final : public HBinaryOperation { public: HShr(DataType::Type result_type, HInstruction* value, @@ -5234,26 +5281,26 @@ class HShr FINAL : public HBinaryOperation { return value >> (distance & max_shift_distance); } - HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE { + HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc()); } - HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE { + HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc()); } HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED, - HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HLongConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for the (long, long) case."; UNREACHABLE(); } HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED, - HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED, - HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } @@ -5264,7 +5311,7 @@ class HShr FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Shr); }; -class HUShr FINAL : public HBinaryOperation { +class HUShr final : public HBinaryOperation { public: HUShr(DataType::Type result_type, HInstruction* value, @@ -5282,26 +5329,26 @@ class HUShr FINAL : public HBinaryOperation { return static_cast<T>(ux >> (distance & max_shift_distance)); } - HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE { + HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc()); } - HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE { + HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc()); } HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED, - HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HLongConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for the (long, long) case."; UNREACHABLE(); } HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED, - HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED, - HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } @@ -5312,7 +5359,7 @@ class HUShr FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(UShr); }; -class HAnd FINAL : public HBinaryOperation { +class HAnd final : public HBinaryOperation { public: HAnd(DataType::Type result_type, HInstruction* left, @@ -5321,25 +5368,25 @@ class HAnd FINAL : public HBinaryOperation { : HBinaryOperation(kAnd, result_type, left, right, SideEffects::None(), dex_pc) { } - bool IsCommutative() const OVERRIDE { return true; } + bool IsCommutative() const override { return true; } template <typename T> static T Compute(T x, T y) { return x & y; } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } @@ -5350,7 +5397,7 @@ class HAnd FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(And); }; -class HOr FINAL : public HBinaryOperation { +class HOr final : public HBinaryOperation { public: HOr(DataType::Type result_type, HInstruction* left, @@ -5359,25 +5406,25 @@ class HOr FINAL : public HBinaryOperation { : HBinaryOperation(kOr, result_type, left, right, SideEffects::None(), dex_pc) { } - bool IsCommutative() const OVERRIDE { return true; } + bool IsCommutative() const override { return true; } template <typename T> static T Compute(T x, T y) { return x | y; } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } @@ -5388,7 +5435,7 @@ class HOr FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Or); }; -class HXor FINAL : public HBinaryOperation { +class HXor final : public HBinaryOperation { public: HXor(DataType::Type result_type, HInstruction* left, @@ -5397,25 +5444,25 @@ class HXor FINAL : public HBinaryOperation { : HBinaryOperation(kXor, result_type, left, right, SideEffects::None(), dex_pc) { } - bool IsCommutative() const OVERRIDE { return true; } + bool IsCommutative() const override { return true; } template <typename T> static T Compute(T x, T y) { return x ^ y; } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } @@ -5426,7 +5473,7 @@ class HXor FINAL : public HBinaryOperation { DEFAULT_COPY_CONSTRUCTOR(Xor); }; -class HRor FINAL : public HBinaryOperation { +class HRor final : public HBinaryOperation { public: HRor(DataType::Type result_type, HInstruction* value, HInstruction* distance) : HBinaryOperation(kRor, result_type, value, distance) { @@ -5447,26 +5494,26 @@ class HRor FINAL : public HBinaryOperation { } } - HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE { + HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc()); } - HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE { + HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc()); } HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED, - HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HLongConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for the (long, long) case."; UNREACHABLE(); } HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED, - HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED, - HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* distance ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } @@ -5479,7 +5526,7 @@ class HRor FINAL : public HBinaryOperation { // The value of a parameter in this method. Its location depends on // the calling convention. -class HParameterValue FINAL : public HExpression<0> { +class HParameterValue final : public HExpression<0> { public: HParameterValue(const DexFile& dex_file, dex::TypeIndex type_index, @@ -5499,7 +5546,7 @@ class HParameterValue FINAL : public HExpression<0> { uint8_t GetIndex() const { return index_; } bool IsThis() const { return GetPackedFlag<kFlagIsThis>(); } - bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); } + bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); } void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); } DECLARE_INSTRUCTION(ParameterValue); @@ -5522,30 +5569,30 @@ class HParameterValue FINAL : public HExpression<0> { const uint8_t index_; }; -class HNot FINAL : public HUnaryOperation { +class HNot final : public HUnaryOperation { public: HNot(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) : HUnaryOperation(kNot, result_type, input, dex_pc) { } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } template <typename T> static T Compute(T x) { return ~x; } - HConstant* Evaluate(HIntConstant* x) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x) const override { return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x) const override { return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc()); } - HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } - HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } @@ -5556,14 +5603,14 @@ class HNot FINAL : public HUnaryOperation { DEFAULT_COPY_CONSTRUCTOR(Not); }; -class HBooleanNot FINAL : public HUnaryOperation { +class HBooleanNot final : public HUnaryOperation { public: explicit HBooleanNot(HInstruction* input, uint32_t dex_pc = kNoDexPc) : HUnaryOperation(kBooleanNot, DataType::Type::kBool, input, dex_pc) { } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } @@ -5572,18 +5619,18 @@ class HBooleanNot FINAL : public HUnaryOperation { return !x; } - HConstant* Evaluate(HIntConstant* x) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x) const override { return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for long values"; UNREACHABLE(); } - HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE { + HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } - HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE { + HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } @@ -5594,7 +5641,7 @@ class HBooleanNot FINAL : public HUnaryOperation { DEFAULT_COPY_CONSTRUCTOR(BooleanNot); }; -class HTypeConversion FINAL : public HExpression<1> { +class HTypeConversion final : public HExpression<1> { public: // Instantiate a type conversion of `input` to `result_type`. HTypeConversion(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) @@ -5608,9 +5655,9 @@ class HTypeConversion FINAL : public HExpression<1> { DataType::Type GetInputType() const { return GetInput()->GetType(); } DataType::Type GetResultType() const { return GetType(); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } @@ -5626,26 +5673,27 @@ class HTypeConversion FINAL : public HExpression<1> { static constexpr uint32_t kNoRegNumber = -1; -class HNullCheck FINAL : public HExpression<1> { +class HNullCheck final : public HExpression<1> { public: // `HNullCheck` can trigger GC, as it may call the `NullPointerException` - // constructor. + // constructor. However it can only do it on a fatal slow path so execution never returns to the + // instruction following the current one; thus 'SideEffects::None()' is used. HNullCheck(HInstruction* value, uint32_t dex_pc) - : HExpression(kNullCheck, value->GetType(), SideEffects::CanTriggerGC(), dex_pc) { + : HExpression(kNullCheck, value->GetType(), SideEffects::None(), dex_pc) { SetRawInputAt(0, value); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - bool NeedsEnvironment() const OVERRIDE { return true; } + bool NeedsEnvironment() const override { return true; } - bool CanThrow() const OVERRIDE { return true; } + bool CanThrow() const override { return true; } - bool CanBeNull() const OVERRIDE { return false; } + bool CanBeNull() const override { return false; } DECLARE_INSTRUCTION(NullCheck); @@ -5690,7 +5738,7 @@ class FieldInfo : public ValueObject { const DexFile& dex_file_; }; -class HInstanceFieldGet FINAL : public HExpression<1> { +class HInstanceFieldGet final : public HExpression<1> { public: HInstanceFieldGet(HInstruction* value, ArtField* field, @@ -5715,19 +5763,19 @@ class HInstanceFieldGet FINAL : public HExpression<1> { SetRawInputAt(0, value); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return !IsVolatile(); } + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return !IsVolatile(); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { const HInstanceFieldGet* other_get = other->AsInstanceFieldGet(); return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue(); } - bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj) const override { return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value()); } - size_t ComputeHashCode() const OVERRIDE { + size_t ComputeHashCode() const override { return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue(); } @@ -5752,7 +5800,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> { const FieldInfo field_info_; }; -class HInstanceFieldSet FINAL : public HExpression<2> { +class HInstanceFieldSet final : public HExpression<2> { public: HInstanceFieldSet(HInstruction* object, HInstruction* value, @@ -5779,9 +5827,9 @@ class HInstanceFieldSet FINAL : public HExpression<2> { SetRawInputAt(1, value); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } - bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj) const override { return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value()); } @@ -5807,7 +5855,7 @@ class HInstanceFieldSet FINAL : public HExpression<2> { const FieldInfo field_info_; }; -class HArrayGet FINAL : public HExpression<2> { +class HArrayGet final : public HExpression<2> { public: HArrayGet(HInstruction* array, HInstruction* index, @@ -5833,12 +5881,12 @@ class HArrayGet FINAL : public HExpression<2> { SetRawInputAt(1, index); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override { // TODO: We can be smarter here. // Currently, unless the array is the result of NewArray, the array access is always // preceded by some form of null NullCheck necessary for the bounds check, usually @@ -5898,7 +5946,7 @@ class HArrayGet FINAL : public HExpression<2> { "Too many packed fields."); }; -class HArraySet FINAL : public HExpression<3> { +class HArraySet final : public HExpression<3> { public: HArraySet(HInstruction* array, HInstruction* index, @@ -5930,17 +5978,17 @@ class HArraySet FINAL : public HExpression<3> { SetRawInputAt(2, value); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } - bool NeedsEnvironment() const OVERRIDE { + bool NeedsEnvironment() const override { // We call a runtime method to throw ArrayStoreException. return NeedsTypeCheck(); } // Can throw ArrayStoreException. - bool CanThrow() const OVERRIDE { return NeedsTypeCheck(); } + bool CanThrow() const override { return NeedsTypeCheck(); } - bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override { // TODO: Same as for ArrayGet. return false; } @@ -6017,7 +6065,7 @@ class HArraySet FINAL : public HExpression<3> { BitField<DataType::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>; }; -class HArrayLength FINAL : public HExpression<1> { +class HArrayLength final : public HExpression<1> { public: HArrayLength(HInstruction* array, uint32_t dex_pc, bool is_string_length = false) : HExpression(kArrayLength, DataType::Type::kInt32, SideEffects::None(), dex_pc) { @@ -6027,12 +6075,12 @@ class HArrayLength FINAL : public HExpression<1> { SetRawInputAt(0, array); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj) const override { return obj == InputAt(0); } @@ -6055,30 +6103,31 @@ class HArrayLength FINAL : public HExpression<1> { "Too many packed fields."); }; -class HBoundsCheck FINAL : public HExpression<2> { +class HBoundsCheck final : public HExpression<2> { public: // `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException` - // constructor. + // constructor. However it can only do it on a fatal slow path so execution never returns to the + // instruction following the current one; thus 'SideEffects::None()' is used. HBoundsCheck(HInstruction* index, HInstruction* length, uint32_t dex_pc, bool is_string_char_at = false) - : HExpression(kBoundsCheck, index->GetType(), SideEffects::CanTriggerGC(), dex_pc) { + : HExpression(kBoundsCheck, index->GetType(), SideEffects::None(), dex_pc) { DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(index->GetType())); SetPackedFlag<kFlagIsStringCharAt>(is_string_char_at); SetRawInputAt(0, index); SetRawInputAt(1, length); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - bool NeedsEnvironment() const OVERRIDE { return true; } + bool NeedsEnvironment() const override { return true; } - bool CanThrow() const OVERRIDE { return true; } + bool CanThrow() const override { return true; } bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); } @@ -6091,18 +6140,21 @@ class HBoundsCheck FINAL : public HExpression<2> { private: static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits; + static constexpr size_t kNumberOfBoundsCheckPackedBits = kFlagIsStringCharAt + 1; + static_assert(kNumberOfBoundsCheckPackedBits <= HInstruction::kMaxNumberOfPackedBits, + "Too many packed fields."); }; -class HSuspendCheck FINAL : public HExpression<0> { +class HSuspendCheck final : public HExpression<0> { public: explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc) : HExpression(kSuspendCheck, SideEffects::CanTriggerGC(), dex_pc), slow_path_(nullptr) { } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } - bool NeedsEnvironment() const OVERRIDE { + bool NeedsEnvironment() const override { return true; } @@ -6128,7 +6180,7 @@ class HNativeDebugInfo : public HExpression<0> { : HExpression<0>(kNativeDebugInfo, SideEffects::None(), dex_pc) { } - bool NeedsEnvironment() const OVERRIDE { + bool NeedsEnvironment() const override { return true; } @@ -6141,7 +6193,7 @@ class HNativeDebugInfo : public HExpression<0> { /** * Instruction to load a Class object. */ -class HLoadClass FINAL : public HInstruction { +class HLoadClass final : public HInstruction { public: // Determines how to load the Class. enum class LoadKind { @@ -6204,7 +6256,7 @@ class HLoadClass FINAL : public HInstruction { SetPackedFlag<kFlagValidLoadedClassRTI>(false); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } void SetLoadKind(LoadKind load_kind); @@ -6218,15 +6270,15 @@ class HLoadClass FINAL : public HInstruction { GetLoadKind() == LoadKind::kBssEntry; } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } bool InstructionDataEquals(const HInstruction* other) const; - size_t ComputeHashCode() const OVERRIDE { return type_index_.index_; } + size_t ComputeHashCode() const override { return type_index_.index_; } - bool CanBeNull() const OVERRIDE { return false; } + bool CanBeNull() const override { return false; } - bool NeedsEnvironment() const OVERRIDE { + bool NeedsEnvironment() const override { return CanCallRuntime(); } @@ -6244,7 +6296,7 @@ class HLoadClass FINAL : public HInstruction { GetLoadKind() == LoadKind::kBssEntry; } - bool CanThrow() const OVERRIDE { + bool CanThrow() const override { return NeedsAccessCheck() || MustGenerateClinitCheck() || // If the class is in the boot image, the lookup in the runtime call cannot throw. @@ -6271,7 +6323,7 @@ class HLoadClass FINAL : public HInstruction { dex::TypeIndex GetTypeIndex() const { return type_index_; } const DexFile& GetDexFile() const { return dex_file_; } - bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { + bool NeedsDexCacheOfDeclaringClass() const override { return GetLoadKind() == LoadKind::kRuntimeCall; } @@ -6298,7 +6350,7 @@ class HLoadClass FINAL : public HInstruction { void AddSpecialInput(HInstruction* special_input); using HInstruction::GetInputRecords; // Keep the const version visible. - ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL { + ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final { return ArrayRef<HUserRecord<HInstruction*>>( &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u); } @@ -6379,7 +6431,7 @@ inline void HLoadClass::AddSpecialInput(HInstruction* special_input) { special_input->AddUseAt(this, 0); } -class HLoadString FINAL : public HInstruction { +class HLoadString final : public HInstruction { public: // Determines how to load the String. enum class LoadKind { @@ -6423,7 +6475,7 @@ class HLoadString FINAL : public HInstruction { SetPackedField<LoadKindField>(LoadKind::kRuntimeCall); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } void SetLoadKind(LoadKind load_kind); @@ -6453,15 +6505,15 @@ class HLoadString FINAL : public HInstruction { string_ = str; } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE; + bool InstructionDataEquals(const HInstruction* other) const override; - size_t ComputeHashCode() const OVERRIDE { return string_index_.index_; } + size_t ComputeHashCode() const override { return string_index_.index_; } // Will call the runtime if we need to load the string through // the dex cache and the string is not guaranteed to be there yet. - bool NeedsEnvironment() const OVERRIDE { + bool NeedsEnvironment() const override { LoadKind load_kind = GetLoadKind(); if (load_kind == LoadKind::kBootImageLinkTimePcRelative || load_kind == LoadKind::kBootImageRelRo || @@ -6472,12 +6524,12 @@ class HLoadString FINAL : public HInstruction { return true; } - bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { + bool NeedsDexCacheOfDeclaringClass() const override { return GetLoadKind() == LoadKind::kRuntimeCall; } - bool CanBeNull() const OVERRIDE { return false; } - bool CanThrow() const OVERRIDE { return NeedsEnvironment(); } + bool CanBeNull() const override { return false; } + bool CanThrow() const override { return NeedsEnvironment(); } static SideEffects SideEffectsForArchRuntimeCalls() { return SideEffects::CanTriggerGC(); @@ -6486,7 +6538,7 @@ class HLoadString FINAL : public HInstruction { void AddSpecialInput(HInstruction* special_input); using HInstruction::GetInputRecords; // Keep the const version visible. - ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL { + ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final { return ArrayRef<HUserRecord<HInstruction*>>( &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u); } @@ -6548,7 +6600,7 @@ inline void HLoadString::AddSpecialInput(HInstruction* special_input) { special_input->AddUseAt(this, 0); } -class HLoadMethodHandle FINAL : public HInstruction { +class HLoadMethodHandle final : public HInstruction { public: HLoadMethodHandle(HCurrentMethod* current_method, uint16_t method_handle_idx, @@ -6564,12 +6616,12 @@ class HLoadMethodHandle FINAL : public HInstruction { } using HInstruction::GetInputRecords; // Keep the const version visible. - ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL { + ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final { return ArrayRef<HUserRecord<HInstruction*>>( &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } uint16_t GetMethodHandleIndex() const { return method_handle_idx_; } @@ -6592,7 +6644,7 @@ class HLoadMethodHandle FINAL : public HInstruction { const DexFile& dex_file_; }; -class HLoadMethodType FINAL : public HInstruction { +class HLoadMethodType final : public HInstruction { public: HLoadMethodType(HCurrentMethod* current_method, dex::ProtoIndex proto_index, @@ -6608,12 +6660,12 @@ class HLoadMethodType FINAL : public HInstruction { } using HInstruction::GetInputRecords; // Keep the const version visible. - ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL { + ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final { return ArrayRef<HUserRecord<HInstruction*>>( &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } dex::ProtoIndex GetProtoIndex() const { return proto_index_; } @@ -6639,7 +6691,7 @@ class HLoadMethodType FINAL : public HInstruction { /** * Performs an initialization check on its Class object input. */ -class HClinitCheck FINAL : public HExpression<1> { +class HClinitCheck final : public HExpression<1> { public: HClinitCheck(HLoadClass* constant, uint32_t dex_pc) : HExpression( @@ -6650,17 +6702,17 @@ class HClinitCheck FINAL : public HExpression<1> { SetRawInputAt(0, constant); } // TODO: Make ClinitCheck clonable. - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - bool NeedsEnvironment() const OVERRIDE { + bool NeedsEnvironment() const override { // May call runtime to initialize the class. return true; } - bool CanThrow() const OVERRIDE { return true; } + bool CanThrow() const override { return true; } HLoadClass* GetLoadClass() const { DCHECK(InputAt(0)->IsLoadClass()); @@ -6674,7 +6726,7 @@ class HClinitCheck FINAL : public HExpression<1> { DEFAULT_COPY_CONSTRUCTOR(ClinitCheck); }; -class HStaticFieldGet FINAL : public HExpression<1> { +class HStaticFieldGet final : public HExpression<1> { public: HStaticFieldGet(HInstruction* cls, ArtField* field, @@ -6700,15 +6752,15 @@ class HStaticFieldGet FINAL : public HExpression<1> { } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return !IsVolatile(); } + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return !IsVolatile(); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { const HStaticFieldGet* other_get = other->AsStaticFieldGet(); return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue(); } - size_t ComputeHashCode() const OVERRIDE { + size_t ComputeHashCode() const override { return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue(); } @@ -6733,7 +6785,7 @@ class HStaticFieldGet FINAL : public HExpression<1> { const FieldInfo field_info_; }; -class HStaticFieldSet FINAL : public HExpression<2> { +class HStaticFieldSet final : public HExpression<2> { public: HStaticFieldSet(HInstruction* cls, HInstruction* value, @@ -6760,7 +6812,7 @@ class HStaticFieldSet FINAL : public HExpression<2> { SetRawInputAt(1, value); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } const FieldInfo& GetFieldInfo() const { return field_info_; } MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); } DataType::Type GetFieldType() const { return field_info_.GetFieldType(); } @@ -6784,7 +6836,7 @@ class HStaticFieldSet FINAL : public HExpression<2> { const FieldInfo field_info_; }; -class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> { +class HUnresolvedInstanceFieldGet final : public HExpression<1> { public: HUnresolvedInstanceFieldGet(HInstruction* obj, DataType::Type field_type, @@ -6798,9 +6850,9 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> { SetRawInputAt(0, obj); } - bool IsClonable() const OVERRIDE { return true; } - bool NeedsEnvironment() const OVERRIDE { return true; } - bool CanThrow() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } + bool NeedsEnvironment() const override { return true; } + bool CanThrow() const override { return true; } DataType::Type GetFieldType() const { return GetType(); } uint32_t GetFieldIndex() const { return field_index_; } @@ -6814,7 +6866,7 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> { const uint32_t field_index_; }; -class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> { +class HUnresolvedInstanceFieldSet final : public HExpression<2> { public: HUnresolvedInstanceFieldSet(HInstruction* obj, HInstruction* value, @@ -6829,9 +6881,9 @@ class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> { SetRawInputAt(1, value); } - bool IsClonable() const OVERRIDE { return true; } - bool NeedsEnvironment() const OVERRIDE { return true; } - bool CanThrow() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } + bool NeedsEnvironment() const override { return true; } + bool CanThrow() const override { return true; } DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); } uint32_t GetFieldIndex() const { return field_index_; } @@ -6854,7 +6906,7 @@ class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> { const uint32_t field_index_; }; -class HUnresolvedStaticFieldGet FINAL : public HExpression<0> { +class HUnresolvedStaticFieldGet final : public HExpression<0> { public: HUnresolvedStaticFieldGet(DataType::Type field_type, uint32_t field_index, @@ -6866,9 +6918,9 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> { field_index_(field_index) { } - bool IsClonable() const OVERRIDE { return true; } - bool NeedsEnvironment() const OVERRIDE { return true; } - bool CanThrow() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } + bool NeedsEnvironment() const override { return true; } + bool CanThrow() const override { return true; } DataType::Type GetFieldType() const { return GetType(); } uint32_t GetFieldIndex() const { return field_index_; } @@ -6882,7 +6934,7 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> { const uint32_t field_index_; }; -class HUnresolvedStaticFieldSet FINAL : public HExpression<1> { +class HUnresolvedStaticFieldSet final : public HExpression<1> { public: HUnresolvedStaticFieldSet(HInstruction* value, DataType::Type field_type, @@ -6895,9 +6947,9 @@ class HUnresolvedStaticFieldSet FINAL : public HExpression<1> { SetRawInputAt(0, value); } - bool IsClonable() const OVERRIDE { return true; } - bool NeedsEnvironment() const OVERRIDE { return true; } - bool CanThrow() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } + bool NeedsEnvironment() const override { return true; } + bool CanThrow() const override { return true; } DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); } uint32_t GetFieldIndex() const { return field_index_; } @@ -6921,13 +6973,13 @@ class HUnresolvedStaticFieldSet FINAL : public HExpression<1> { }; // Implement the move-exception DEX instruction. -class HLoadException FINAL : public HExpression<0> { +class HLoadException final : public HExpression<0> { public: explicit HLoadException(uint32_t dex_pc = kNoDexPc) : HExpression(kLoadException, DataType::Type::kReference, SideEffects::None(), dex_pc) { } - bool CanBeNull() const OVERRIDE { return false; } + bool CanBeNull() const override { return false; } DECLARE_INSTRUCTION(LoadException); @@ -6937,7 +6989,7 @@ class HLoadException FINAL : public HExpression<0> { // Implicit part of move-exception which clears thread-local exception storage. // Must not be removed because the runtime expects the TLS to get cleared. -class HClearException FINAL : public HExpression<0> { +class HClearException final : public HExpression<0> { public: explicit HClearException(uint32_t dex_pc = kNoDexPc) : HExpression(kClearException, SideEffects::AllWrites(), dex_pc) { @@ -6949,20 +7001,20 @@ class HClearException FINAL : public HExpression<0> { DEFAULT_COPY_CONSTRUCTOR(ClearException); }; -class HThrow FINAL : public HExpression<1> { +class HThrow final : public HExpression<1> { public: HThrow(HInstruction* exception, uint32_t dex_pc) : HExpression(kThrow, SideEffects::CanTriggerGC(), dex_pc) { SetRawInputAt(0, exception); } - bool IsControlFlow() const OVERRIDE { return true; } + bool IsControlFlow() const override { return true; } - bool NeedsEnvironment() const OVERRIDE { return true; } + bool NeedsEnvironment() const override { return true; } - bool CanThrow() const OVERRIDE { return true; } + bool CanThrow() const override { return true; } - bool AlwaysThrows() const OVERRIDE { return true; } + bool AlwaysThrows() const override { return true; } DECLARE_INSTRUCTION(Throw); @@ -7049,10 +7101,10 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction { return static_cast<uint32_t>(mask->AsIntConstant()->GetValue()); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsInstanceOf() || other->IsCheckCast()) << other->DebugName(); return GetPackedFields() == down_cast<const HTypeCheckInstruction*>(other)->GetPackedFields(); } @@ -7097,7 +7149,7 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction { Handle<mirror::Class> klass_; }; -class HInstanceOf FINAL : public HTypeCheckInstruction { +class HInstanceOf final : public HTypeCheckInstruction { public: HInstanceOf(HInstruction* object, HInstruction* target_class_or_null, @@ -7119,9 +7171,9 @@ class HInstanceOf FINAL : public HTypeCheckInstruction { bitstring_mask, SideEffectsForArchRuntimeCalls(check_kind)) {} - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } - bool NeedsEnvironment() const OVERRIDE { + bool NeedsEnvironment() const override { return CanCallRuntime(GetTypeCheckKind()); } @@ -7140,7 +7192,7 @@ class HInstanceOf FINAL : public HTypeCheckInstruction { DEFAULT_COPY_CONSTRUCTOR(InstanceOf); }; -class HBoundType FINAL : public HExpression<1> { +class HBoundType final : public HExpression<1> { public: explicit HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc) : HExpression(kBoundType, DataType::Type::kReference, SideEffects::None(), dex_pc), @@ -7151,8 +7203,8 @@ class HBoundType FINAL : public HExpression<1> { SetRawInputAt(0, input); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE; - bool IsClonable() const OVERRIDE { return true; } + bool InstructionDataEquals(const HInstruction* other) const override; + bool IsClonable() const override { return true; } // {Get,Set}Upper* should only be used in reference type propagation. const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; } @@ -7164,7 +7216,7 @@ class HBoundType FINAL : public HExpression<1> { SetPackedFlag<kFlagCanBeNull>(can_be_null); } - bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); } + bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); } DECLARE_INSTRUCTION(BoundType); @@ -7188,7 +7240,7 @@ class HBoundType FINAL : public HExpression<1> { ReferenceTypeInfo upper_bound_; }; -class HCheckCast FINAL : public HTypeCheckInstruction { +class HCheckCast final : public HTypeCheckInstruction { public: HCheckCast(HInstruction* object, HInstruction* target_class_or_null, @@ -7210,13 +7262,13 @@ class HCheckCast FINAL : public HTypeCheckInstruction { bitstring_mask, SideEffects::CanTriggerGC()) {} - bool IsClonable() const OVERRIDE { return true; } - bool NeedsEnvironment() const OVERRIDE { + bool IsClonable() const override { return true; } + bool NeedsEnvironment() const override { // Instruction may throw a CheckCastError. return true; } - bool CanThrow() const OVERRIDE { return true; } + bool CanThrow() const override { return true; } DECLARE_INSTRUCTION(CheckCast); @@ -7250,7 +7302,7 @@ enum MemBarrierKind { }; std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind); -class HMemoryBarrier FINAL : public HExpression<0> { +class HMemoryBarrier final : public HExpression<0> { public: explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc) : HExpression(kMemoryBarrier, @@ -7259,7 +7311,7 @@ class HMemoryBarrier FINAL : public HExpression<0> { SetPackedField<BarrierKindField>(barrier_kind); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } MemBarrierKind GetBarrierKind() { return GetPackedField<BarrierKindField>(); } @@ -7335,7 +7387,7 @@ class HMemoryBarrier FINAL : public HExpression<0> { // * CompilerDriver::RequiresConstructorBarrier // * QuasiAtomic::ThreadFenceForConstructor // -class HConstructorFence FINAL : public HVariableInputSizeInstruction { +class HConstructorFence final : public HVariableInputSizeInstruction { // A fence has variable inputs because the inputs can be removed // after prepare_for_register_allocation phase. // (TODO: In the future a fence could freeze multiple objects @@ -7432,7 +7484,7 @@ class HConstructorFence FINAL : public HVariableInputSizeInstruction { DEFAULT_COPY_CONSTRUCTOR(ConstructorFence); }; -class HMonitorOperation FINAL : public HExpression<1> { +class HMonitorOperation final : public HExpression<1> { public: enum class OperationKind { kEnter, @@ -7449,9 +7501,9 @@ class HMonitorOperation FINAL : public HExpression<1> { } // Instruction may go into runtime, so we need an environment. - bool NeedsEnvironment() const OVERRIDE { return true; } + bool NeedsEnvironment() const override { return true; } - bool CanThrow() const OVERRIDE { + bool CanThrow() const override { // Verifier guarantees that monitor-exit cannot throw. // This is important because it allows the HGraphBuilder to remove // a dead throw-catch loop generated for `synchronized` blocks/methods. @@ -7477,7 +7529,7 @@ class HMonitorOperation FINAL : public HExpression<1> { using OperationKindField = BitField<OperationKind, kFieldOperationKind, kFieldOperationKindSize>; }; -class HSelect FINAL : public HExpression<3> { +class HSelect final : public HExpression<3> { public: HSelect(HInstruction* condition, HInstruction* true_value, @@ -7495,17 +7547,17 @@ class HSelect FINAL : public HExpression<3> { SetRawInputAt(2, condition); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } HInstruction* GetFalseValue() const { return InputAt(0); } HInstruction* GetTrueValue() const { return InputAt(1); } HInstruction* GetCondition() const { return InputAt(2); } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - bool CanBeNull() const OVERRIDE { + bool CanBeNull() const override { return GetTrueValue()->CanBeNull() || GetFalseValue()->CanBeNull(); } @@ -7593,7 +7645,7 @@ std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs); static constexpr size_t kDefaultNumberOfMoves = 4; -class HParallelMove FINAL : public HExpression<0> { +class HParallelMove final : public HExpression<0> { public: explicit HParallelMove(ArenaAllocator* allocator, uint32_t dex_pc = kNoDexPc) : HExpression(kParallelMove, SideEffects::None(), dex_pc), @@ -7655,7 +7707,7 @@ class HParallelMove FINAL : public HExpression<0> { // never used across anything that can trigger GC. // The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`. // So we represent it by the type `DataType::Type::kInt`. -class HIntermediateAddress FINAL : public HExpression<2> { +class HIntermediateAddress final : public HExpression<2> { public: HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc) : HExpression(kIntermediateAddress, @@ -7669,12 +7721,12 @@ class HIntermediateAddress FINAL : public HExpression<2> { SetRawInputAt(1, offset); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - bool IsActualObject() const OVERRIDE { return false; } + bool IsActualObject() const override { return false; } HInstruction* GetBaseAddress() const { return InputAt(0); } HInstruction* GetOffset() const { return InputAt(1); } @@ -7747,7 +7799,7 @@ class HGraphDelegateVisitor : public HGraphVisitor { // Visit functions that delegate to to super class. #define DECLARE_VISIT_INSTRUCTION(name, super) \ - void Visit##name(H##name* instr) OVERRIDE { Visit##super(instr); } + void Visit##name(H##name* instr) override { Visit##super(instr); } FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION) @@ -7769,7 +7821,7 @@ class CloneAndReplaceInstructionVisitor : public HGraphDelegateVisitor { explicit CloneAndReplaceInstructionVisitor(HGraph* graph) : HGraphDelegateVisitor(graph), instr_replaced_by_clones_count_(0) {} - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { if (instruction->IsClonable()) { ReplaceInstrOrPhiByClone(instruction); instr_replaced_by_clones_count_++; diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h index 05b27a7810..4993f5737e 100644 --- a/compiler/optimizing/nodes_mips.h +++ b/compiler/optimizing/nodes_mips.h @@ -30,7 +30,7 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> { kNoDexPc) { } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(MipsComputeBaseMethodAddress); @@ -39,7 +39,7 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> { }; // Mips version of HPackedSwitch that holds a pointer to the base method address. -class HMipsPackedSwitch FINAL : public HExpression<2> { +class HMipsPackedSwitch final : public HExpression<2> { public: HMipsPackedSwitch(int32_t start_value, int32_t num_entries, @@ -53,7 +53,7 @@ class HMipsPackedSwitch FINAL : public HExpression<2> { SetRawInputAt(1, method_base); } - bool IsControlFlow() const OVERRIDE { return true; } + bool IsControlFlow() const override { return true; } int32_t GetStartValue() const { return start_value_; } @@ -91,7 +91,7 @@ class HMipsPackedSwitch FINAL : public HExpression<2> { // // Note: as the instruction doesn't involve base array address into computations it has no side // effects. -class HIntermediateArrayAddressIndex FINAL : public HExpression<2> { +class HIntermediateArrayAddressIndex final : public HExpression<2> { public: HIntermediateArrayAddressIndex(HInstruction* index, HInstruction* shift, uint32_t dex_pc) : HExpression(kIntermediateArrayAddressIndex, @@ -102,11 +102,11 @@ class HIntermediateArrayAddressIndex FINAL : public HExpression<2> { SetRawInputAt(1, shift); } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - bool IsActualObject() const OVERRIDE { return false; } + bool IsActualObject() const override { return false; } HInstruction* GetIndex() const { return InputAt(0); } HInstruction* GetShift() const { return InputAt(1); } diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h index 29358e1141..7dcac1787e 100644 --- a/compiler/optimizing/nodes_shared.h +++ b/compiler/optimizing/nodes_shared.h @@ -24,7 +24,7 @@ namespace art { -class HMultiplyAccumulate FINAL : public HExpression<3> { +class HMultiplyAccumulate final : public HExpression<3> { public: HMultiplyAccumulate(DataType::Type type, InstructionKind op, @@ -39,14 +39,14 @@ class HMultiplyAccumulate FINAL : public HExpression<3> { SetRawInputAt(kInputMulRightIndex, mul_right); } - bool IsClonable() const OVERRIDE { return true; } + bool IsClonable() const override { return true; } static constexpr int kInputAccumulatorIndex = 0; static constexpr int kInputMulLeftIndex = 1; static constexpr int kInputMulRightIndex = 2; - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other) const override { return op_kind_ == other->AsMultiplyAccumulate()->op_kind_; } @@ -62,7 +62,7 @@ class HMultiplyAccumulate FINAL : public HExpression<3> { const InstructionKind op_kind_; }; -class HBitwiseNegatedRight FINAL : public HBinaryOperation { +class HBitwiseNegatedRight final : public HBinaryOperation { public: HBitwiseNegatedRight(DataType::Type result_type, InstructionKind op, @@ -97,21 +97,21 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation { } } - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override { return GetBlock()->GetGraph()->GetIntConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override { return GetBlock()->GetGraph()->GetLongConstant( Compute(x->GetValue(), y->GetValue()), GetDexPc()); } HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HFloatConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for float values"; UNREACHABLE(); } HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + HDoubleConstant* y ATTRIBUTE_UNUSED) const override { LOG(FATAL) << DebugName() << " is not defined for double values"; UNREACHABLE(); } @@ -145,7 +145,7 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation { // // Note: as the instruction doesn't involve base array address into computations it has no side // effects (in comparison of HIntermediateAddress). -class HIntermediateAddressIndex FINAL : public HExpression<3> { +class HIntermediateAddressIndex final : public HExpression<3> { public: HIntermediateAddressIndex( HInstruction* index, HInstruction* offset, HInstruction* shift, uint32_t dex_pc) @@ -158,12 +158,12 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> { SetRawInputAt(2, shift); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } - bool IsActualObject() const OVERRIDE { return false; } + bool IsActualObject() const override { return false; } HInstruction* GetIndex() const { return InputAt(0); } HInstruction* GetOffset() const { return InputAt(1); } @@ -175,7 +175,7 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> { DEFAULT_COPY_CONSTRUCTOR(IntermediateAddressIndex); }; -class HDataProcWithShifterOp FINAL : public HExpression<2> { +class HDataProcWithShifterOp final : public HExpression<2> { public: enum OpKind { kLSL, // Logical shift left. @@ -212,9 +212,9 @@ class HDataProcWithShifterOp FINAL : public HExpression<2> { SetRawInputAt(1, right); } - bool IsClonable() const OVERRIDE { return true; } - bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(const HInstruction* other_instr) const OVERRIDE { + bool IsClonable() const override { return true; } + bool CanBeMoved() const override { return true; } + bool InstructionDataEquals(const HInstruction* other_instr) const override { const HDataProcWithShifterOp* other = other_instr->AsDataProcWithShifterOp(); return instr_kind_ == other->instr_kind_ && op_kind_ == other->op_kind_ && diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h index 95fb5ab76a..597e399dd1 100644 --- a/compiler/optimizing/nodes_vector.h +++ b/compiler/optimizing/nodes_vector.h @@ -117,12 +117,12 @@ class HVecOperation : public HVariableInputSizeInstruction { // Note: For newly introduced vector instructions HScheduler${ARCH}::IsSchedulingBarrier must be // altered to return true if the instruction might reside outside the SIMD loop body since SIMD // registers are not kept alive across vector loop boundaries (yet). - bool CanBeMoved() const OVERRIDE { return false; } + bool CanBeMoved() const override { return false; } // Tests if all data of a vector node (vector length and packed type) is equal. // Each concrete implementation that adds more fields should test equality of // those fields in its own method *and* call all super methods. - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsVecOperation()); const HVecOperation* o = other->AsVecOperation(); return GetVectorLength() == o->GetVectorLength() && GetPackedType() == o->GetPackedType(); @@ -280,7 +280,7 @@ class HVecMemoryOperation : public HVecOperation { HInstruction* GetArray() const { return InputAt(0); } HInstruction* GetIndex() const { return InputAt(1); } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsVecMemoryOperation()); const HVecMemoryOperation* o = other->AsVecMemoryOperation(); return HVecOperation::InstructionDataEquals(o) && GetAlignment() == o->GetAlignment(); @@ -315,7 +315,7 @@ inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type // Replicates the given scalar into a vector, // viz. replicate(x) = [ x, .. , x ]. -class HVecReplicateScalar FINAL : public HVecUnaryOperation { +class HVecReplicateScalar final : public HVecUnaryOperation { public: HVecReplicateScalar(ArenaAllocator* allocator, HInstruction* scalar, @@ -329,7 +329,7 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation { // A replicate needs to stay in place, since SIMD registers are not // kept alive across vector loop boundaries (yet). - bool CanBeMoved() const OVERRIDE { return false; } + bool CanBeMoved() const override { return false; } DECLARE_INSTRUCTION(VecReplicateScalar); @@ -341,7 +341,7 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation { // viz. extract[ x1, .. , xn ] = x_i. // // TODO: for now only i == 1 case supported. -class HVecExtractScalar FINAL : public HVecUnaryOperation { +class HVecExtractScalar final : public HVecUnaryOperation { public: HVecExtractScalar(ArenaAllocator* allocator, HInstruction* input, @@ -361,7 +361,7 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation { // An extract needs to stay in place, since SIMD registers are not // kept alive across vector loop boundaries (yet). - bool CanBeMoved() const OVERRIDE { return false; } + bool CanBeMoved() const override { return false; } DECLARE_INSTRUCTION(VecExtractScalar); @@ -372,7 +372,7 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation { // Reduces the given vector into the first element as sum/min/max, // viz. sum-reduce[ x1, .. , xn ] = [ y, ---- ], where y = sum xi // and the "-" denotes "don't care" (implementation dependent). -class HVecReduce FINAL : public HVecUnaryOperation { +class HVecReduce final : public HVecUnaryOperation { public: enum ReductionKind { kSum = 1, @@ -393,9 +393,9 @@ class HVecReduce FINAL : public HVecUnaryOperation { ReductionKind GetKind() const { return kind_; } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsVecReduce()); const HVecReduce* o = other->AsVecReduce(); return HVecOperation::InstructionDataEquals(o) && GetKind() == o->GetKind(); @@ -412,7 +412,7 @@ class HVecReduce FINAL : public HVecUnaryOperation { // Converts every component in the vector, // viz. cnv[ x1, .. , xn ] = [ cnv(x1), .. , cnv(xn) ]. -class HVecCnv FINAL : public HVecUnaryOperation { +class HVecCnv final : public HVecUnaryOperation { public: HVecCnv(ArenaAllocator* allocator, HInstruction* input, @@ -427,7 +427,7 @@ class HVecCnv FINAL : public HVecUnaryOperation { DataType::Type GetInputType() const { return InputAt(0)->AsVecOperation()->GetPackedType(); } DataType::Type GetResultType() const { return GetPackedType(); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecCnv); @@ -437,7 +437,7 @@ class HVecCnv FINAL : public HVecUnaryOperation { // Negates every component in the vector, // viz. neg[ x1, .. , xn ] = [ -x1, .. , -xn ]. -class HVecNeg FINAL : public HVecUnaryOperation { +class HVecNeg final : public HVecUnaryOperation { public: HVecNeg(ArenaAllocator* allocator, HInstruction* input, @@ -448,7 +448,7 @@ class HVecNeg FINAL : public HVecUnaryOperation { DCHECK(HasConsistentPackedTypes(input, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecNeg); @@ -459,7 +459,7 @@ class HVecNeg FINAL : public HVecUnaryOperation { // Takes absolute value of every component in the vector, // viz. abs[ x1, .. , xn ] = [ |x1|, .. , |xn| ] // for signed operand x. -class HVecAbs FINAL : public HVecUnaryOperation { +class HVecAbs final : public HVecUnaryOperation { public: HVecAbs(ArenaAllocator* allocator, HInstruction* input, @@ -470,7 +470,7 @@ class HVecAbs FINAL : public HVecUnaryOperation { DCHECK(HasConsistentPackedTypes(input, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecAbs); @@ -481,7 +481,7 @@ class HVecAbs FINAL : public HVecUnaryOperation { // Bitwise- or boolean-nots every component in the vector, // viz. not[ x1, .. , xn ] = [ ~x1, .. , ~xn ], or // not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean. -class HVecNot FINAL : public HVecUnaryOperation { +class HVecNot final : public HVecUnaryOperation { public: HVecNot(ArenaAllocator* allocator, HInstruction* input, @@ -492,7 +492,7 @@ class HVecNot FINAL : public HVecUnaryOperation { DCHECK(input->IsVecOperation()); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecNot); @@ -506,7 +506,7 @@ class HVecNot FINAL : public HVecUnaryOperation { // Adds every component in the two vectors, // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ]. -class HVecAdd FINAL : public HVecBinaryOperation { +class HVecAdd final : public HVecBinaryOperation { public: HVecAdd(ArenaAllocator* allocator, HInstruction* left, @@ -519,7 +519,7 @@ class HVecAdd FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(right, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecAdd); @@ -530,7 +530,7 @@ class HVecAdd FINAL : public HVecBinaryOperation { // Adds every component in the two vectors using saturation arithmetic, // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 +_sat y1, .. , xn +_sat yn ] // for either both signed or both unsigned operands x, y (reflected in packed_type). -class HVecSaturationAdd FINAL : public HVecBinaryOperation { +class HVecSaturationAdd final : public HVecBinaryOperation { public: HVecSaturationAdd(ArenaAllocator* allocator, HInstruction* left, @@ -544,7 +544,7 @@ class HVecSaturationAdd FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(right, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecSaturationAdd); @@ -556,7 +556,7 @@ class HVecSaturationAdd FINAL : public HVecBinaryOperation { // rounded [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ] // truncated [ x1, .. , xn ] hadd [ y1, .. , yn ] = [ (x1 + y1) >> 1, .. , (xn + yn ) >> 1 ] // for either both signed or both unsigned operands x, y (reflected in packed_type). -class HVecHalvingAdd FINAL : public HVecBinaryOperation { +class HVecHalvingAdd final : public HVecBinaryOperation { public: HVecHalvingAdd(ArenaAllocator* allocator, HInstruction* left, @@ -574,9 +574,9 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation { bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsVecHalvingAdd()); const HVecHalvingAdd* o = other->AsVecHalvingAdd(); return HVecOperation::InstructionDataEquals(o) && IsRounded() == o->IsRounded(); @@ -596,7 +596,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation { // Subtracts every component in the two vectors, // viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ]. -class HVecSub FINAL : public HVecBinaryOperation { +class HVecSub final : public HVecBinaryOperation { public: HVecSub(ArenaAllocator* allocator, HInstruction* left, @@ -609,7 +609,7 @@ class HVecSub FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(right, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecSub); @@ -620,7 +620,7 @@ class HVecSub FINAL : public HVecBinaryOperation { // Subtracts every component in the two vectors using saturation arithmetic, // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 -_sat y1, .. , xn -_sat yn ] // for either both signed or both unsigned operands x, y (reflected in packed_type). -class HVecSaturationSub FINAL : public HVecBinaryOperation { +class HVecSaturationSub final : public HVecBinaryOperation { public: HVecSaturationSub(ArenaAllocator* allocator, HInstruction* left, @@ -634,7 +634,7 @@ class HVecSaturationSub FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(right, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecSaturationSub); @@ -644,7 +644,7 @@ class HVecSaturationSub FINAL : public HVecBinaryOperation { // Multiplies every component in the two vectors, // viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ]. -class HVecMul FINAL : public HVecBinaryOperation { +class HVecMul final : public HVecBinaryOperation { public: HVecMul(ArenaAllocator* allocator, HInstruction* left, @@ -657,7 +657,7 @@ class HVecMul FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(right, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecMul); @@ -667,7 +667,7 @@ class HVecMul FINAL : public HVecBinaryOperation { // Divides every component in the two vectors, // viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ]. -class HVecDiv FINAL : public HVecBinaryOperation { +class HVecDiv final : public HVecBinaryOperation { public: HVecDiv(ArenaAllocator* allocator, HInstruction* left, @@ -680,7 +680,7 @@ class HVecDiv FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(right, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecDiv); @@ -691,7 +691,7 @@ class HVecDiv FINAL : public HVecBinaryOperation { // Takes minimum of every component in the two vectors, // viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ] // for either both signed or both unsigned operands x, y (reflected in packed_type). -class HVecMin FINAL : public HVecBinaryOperation { +class HVecMin final : public HVecBinaryOperation { public: HVecMin(ArenaAllocator* allocator, HInstruction* left, @@ -704,7 +704,7 @@ class HVecMin FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(right, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecMin); @@ -715,7 +715,7 @@ class HVecMin FINAL : public HVecBinaryOperation { // Takes maximum of every component in the two vectors, // viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ] // for either both signed or both unsigned operands x, y (reflected in packed_type). -class HVecMax FINAL : public HVecBinaryOperation { +class HVecMax final : public HVecBinaryOperation { public: HVecMax(ArenaAllocator* allocator, HInstruction* left, @@ -728,7 +728,7 @@ class HVecMax FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(right, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecMax); @@ -738,7 +738,7 @@ class HVecMax FINAL : public HVecBinaryOperation { // Bitwise-ands every component in the two vectors, // viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ]. -class HVecAnd FINAL : public HVecBinaryOperation { +class HVecAnd final : public HVecBinaryOperation { public: HVecAnd(ArenaAllocator* allocator, HInstruction* left, @@ -750,7 +750,7 @@ class HVecAnd FINAL : public HVecBinaryOperation { DCHECK(left->IsVecOperation() && right->IsVecOperation()); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecAnd); @@ -760,7 +760,7 @@ class HVecAnd FINAL : public HVecBinaryOperation { // Bitwise-and-nots every component in the two vectors, // viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ]. -class HVecAndNot FINAL : public HVecBinaryOperation { +class HVecAndNot final : public HVecBinaryOperation { public: HVecAndNot(ArenaAllocator* allocator, HInstruction* left, @@ -773,7 +773,7 @@ class HVecAndNot FINAL : public HVecBinaryOperation { DCHECK(left->IsVecOperation() && right->IsVecOperation()); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecAndNot); @@ -783,7 +783,7 @@ class HVecAndNot FINAL : public HVecBinaryOperation { // Bitwise-ors every component in the two vectors, // viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ]. -class HVecOr FINAL : public HVecBinaryOperation { +class HVecOr final : public HVecBinaryOperation { public: HVecOr(ArenaAllocator* allocator, HInstruction* left, @@ -795,7 +795,7 @@ class HVecOr FINAL : public HVecBinaryOperation { DCHECK(left->IsVecOperation() && right->IsVecOperation()); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecOr); @@ -805,7 +805,7 @@ class HVecOr FINAL : public HVecBinaryOperation { // Bitwise-xors every component in the two vectors, // viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ]. -class HVecXor FINAL : public HVecBinaryOperation { +class HVecXor final : public HVecBinaryOperation { public: HVecXor(ArenaAllocator* allocator, HInstruction* left, @@ -817,7 +817,7 @@ class HVecXor FINAL : public HVecBinaryOperation { DCHECK(left->IsVecOperation() && right->IsVecOperation()); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecXor); @@ -827,7 +827,7 @@ class HVecXor FINAL : public HVecBinaryOperation { // Logically shifts every component in the vector left by the given distance, // viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ]. -class HVecShl FINAL : public HVecBinaryOperation { +class HVecShl final : public HVecBinaryOperation { public: HVecShl(ArenaAllocator* allocator, HInstruction* left, @@ -839,7 +839,7 @@ class HVecShl FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(left, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecShl); @@ -849,7 +849,7 @@ class HVecShl FINAL : public HVecBinaryOperation { // Arithmetically shifts every component in the vector right by the given distance, // viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ]. -class HVecShr FINAL : public HVecBinaryOperation { +class HVecShr final : public HVecBinaryOperation { public: HVecShr(ArenaAllocator* allocator, HInstruction* left, @@ -861,7 +861,7 @@ class HVecShr FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(left, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecShr); @@ -871,7 +871,7 @@ class HVecShr FINAL : public HVecBinaryOperation { // Logically shifts every component in the vector right by the given distance, // viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ]. -class HVecUShr FINAL : public HVecBinaryOperation { +class HVecUShr final : public HVecBinaryOperation { public: HVecUShr(ArenaAllocator* allocator, HInstruction* left, @@ -883,7 +883,7 @@ class HVecUShr FINAL : public HVecBinaryOperation { DCHECK(HasConsistentPackedTypes(left, packed_type)); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(VecUShr); @@ -898,7 +898,7 @@ class HVecUShr FINAL : public HVecBinaryOperation { // Assigns the given scalar elements to a vector, // viz. set( array(x1, .. , xn) ) = [ x1, .. , xn ] if n == m, // set( array(x1, .. , xm) ) = [ x1, .. , xm, 0, .. , 0 ] if m < n. -class HVecSetScalars FINAL : public HVecOperation { +class HVecSetScalars final : public HVecOperation { public: HVecSetScalars(ArenaAllocator* allocator, HInstruction* scalars[], @@ -921,7 +921,7 @@ class HVecSetScalars FINAL : public HVecOperation { // Setting scalars needs to stay in place, since SIMD registers are not // kept alive across vector loop boundaries (yet). - bool CanBeMoved() const OVERRIDE { return false; } + bool CanBeMoved() const override { return false; } DECLARE_INSTRUCTION(VecSetScalars); @@ -934,7 +934,7 @@ class HVecSetScalars FINAL : public HVecOperation { // For floating point types, Java rounding behavior must be preserved; the products are rounded to // the proper precision before being added. "Fused" multiply-add operations available on several // architectures are not usable since they would violate Java language rules. -class HVecMultiplyAccumulate FINAL : public HVecOperation { +class HVecMultiplyAccumulate final : public HVecOperation { public: HVecMultiplyAccumulate(ArenaAllocator* allocator, InstructionKind op, @@ -964,9 +964,9 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation { SetRawInputAt(2, mul_right); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsVecMultiplyAccumulate()); const HVecMultiplyAccumulate* o = other->AsVecMultiplyAccumulate(); return HVecOperation::InstructionDataEquals(o) && GetOpKind() == o->GetOpKind(); @@ -989,7 +989,7 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation { // viz. SAD([ a1, .. , am ], [ x1, .. , xn ], [ y1, .. , yn ]) = // [ a1 + sum abs(xi-yi), .. , am + sum abs(xj-yj) ], // for m <= n, non-overlapping sums, and signed operands x, y. -class HVecSADAccumulate FINAL : public HVecOperation { +class HVecSADAccumulate final : public HVecOperation { public: HVecSADAccumulate(ArenaAllocator* allocator, HInstruction* accumulator, @@ -1021,9 +1021,69 @@ class HVecSADAccumulate FINAL : public HVecOperation { DEFAULT_COPY_CONSTRUCTOR(VecSADAccumulate); }; +// Performs dot product of two vectors and adds the result to wider precision components in +// the accumulator. +// +// viz. DOT_PRODUCT([ a1, .. , am], [ x1, .. , xn ], [ y1, .. , yn ]) = +// [ a1 + sum(xi * yi), .. , am + sum(xj * yj) ], +// for m <= n, non-overlapping sums, +// for either both signed or both unsigned operands x, y. +// +// Notes: +// - packed type reflects the type of sum reduction, not the type of the operands. +// - IsZeroExtending() is used to determine the kind of signed/zero extension to be +// performed for the operands. +// +// TODO: Support types other than kInt32 for packed type. +class HVecDotProd final : public HVecOperation { + public: + HVecDotProd(ArenaAllocator* allocator, + HInstruction* accumulator, + HInstruction* left, + HInstruction* right, + DataType::Type packed_type, + bool is_zero_extending, + size_t vector_length, + uint32_t dex_pc) + : HVecOperation(kVecDotProd, + allocator, + packed_type, + SideEffects::None(), + /* number_of_inputs */ 3, + vector_length, + dex_pc) { + DCHECK(HasConsistentPackedTypes(accumulator, packed_type)); + DCHECK(DataType::IsIntegralType(packed_type)); + DCHECK(left->IsVecOperation()); + DCHECK(right->IsVecOperation()); + DCHECK_EQ(ToSignedType(left->AsVecOperation()->GetPackedType()), + ToSignedType(right->AsVecOperation()->GetPackedType())); + SetRawInputAt(0, accumulator); + SetRawInputAt(1, left); + SetRawInputAt(2, right); + SetPackedFlag<kFieldHDotProdIsZeroExtending>(is_zero_extending); + } + + bool IsZeroExtending() const { return GetPackedFlag<kFieldHDotProdIsZeroExtending>(); } + + bool CanBeMoved() const override { return true; } + + DECLARE_INSTRUCTION(VecDotProd); + + protected: + DEFAULT_COPY_CONSTRUCTOR(VecDotProd); + + private: + // Additional packed bits. + static constexpr size_t kFieldHDotProdIsZeroExtending = + HVecOperation::kNumberOfVectorOpPackedBits; + static constexpr size_t kNumberOfHDotProdPackedBits = kFieldHDotProdIsZeroExtending + 1; + static_assert(kNumberOfHDotProdPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); +}; + // Loads a vector from memory, viz. load(mem, 1) // yield the vector [ mem(1), .. , mem(n) ]. -class HVecLoad FINAL : public HVecMemoryOperation { +class HVecLoad final : public HVecMemoryOperation { public: HVecLoad(ArenaAllocator* allocator, HInstruction* base, @@ -1047,9 +1107,9 @@ class HVecLoad FINAL : public HVecMemoryOperation { bool IsStringCharAt() const { return GetPackedFlag<kFieldIsStringCharAt>(); } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } - bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { + bool InstructionDataEquals(const HInstruction* other) const override { DCHECK(other->IsVecLoad()); const HVecLoad* o = other->AsVecLoad(); return HVecMemoryOperation::InstructionDataEquals(o) && IsStringCharAt() == o->IsStringCharAt(); @@ -1069,7 +1129,7 @@ class HVecLoad FINAL : public HVecMemoryOperation { // Stores a vector to memory, viz. store(m, 1, [x1, .. , xn] ) // sets mem(1) = x1, .. , mem(n) = xn. -class HVecStore FINAL : public HVecMemoryOperation { +class HVecStore final : public HVecMemoryOperation { public: HVecStore(ArenaAllocator* allocator, HInstruction* base, @@ -1093,7 +1153,7 @@ class HVecStore FINAL : public HVecMemoryOperation { } // A store needs to stay in place. - bool CanBeMoved() const OVERRIDE { return false; } + bool CanBeMoved() const override { return false; } DECLARE_INSTRUCTION(VecStore); diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h index d1e7f68edb..a55110426b 100644 --- a/compiler/optimizing/nodes_x86.h +++ b/compiler/optimizing/nodes_x86.h @@ -20,7 +20,7 @@ namespace art { // Compute the address of the method for X86 Constant area support. -class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> { +class HX86ComputeBaseMethodAddress final : public HExpression<0> { public: // Treat the value as an int32_t, but it is really a 32 bit native pointer. HX86ComputeBaseMethodAddress() @@ -30,7 +30,7 @@ class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> { kNoDexPc) { } - bool CanBeMoved() const OVERRIDE { return true; } + bool CanBeMoved() const override { return true; } DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress); @@ -39,7 +39,7 @@ class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> { }; // Load a constant value from the constant table. -class HX86LoadFromConstantTable FINAL : public HExpression<2> { +class HX86LoadFromConstantTable final : public HExpression<2> { public: HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base, HConstant* constant) @@ -66,7 +66,7 @@ class HX86LoadFromConstantTable FINAL : public HExpression<2> { }; // Version of HNeg with access to the constant table for FP types. -class HX86FPNeg FINAL : public HExpression<2> { +class HX86FPNeg final : public HExpression<2> { public: HX86FPNeg(DataType::Type result_type, HInstruction* input, @@ -89,7 +89,7 @@ class HX86FPNeg FINAL : public HExpression<2> { }; // X86 version of HPackedSwitch that holds a pointer to the base method address. -class HX86PackedSwitch FINAL : public HExpression<2> { +class HX86PackedSwitch final : public HExpression<2> { public: HX86PackedSwitch(int32_t start_value, int32_t num_entries, @@ -103,7 +103,7 @@ class HX86PackedSwitch FINAL : public HExpression<2> { SetRawInputAt(1, method_base); } - bool IsControlFlow() const OVERRIDE { return true; } + bool IsControlFlow() const override { return true; } int32_t GetStartValue() const { return start_value_; } diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc index 142ddb5fbb..4b0941b4ff 100644 --- a/compiler/optimizing/optimization.cc +++ b/compiler/optimizing/optimization.cc @@ -84,14 +84,10 @@ const char* OptimizationPassName(OptimizationPass pass) { return HDeadCodeElimination::kDeadCodeEliminationPassName; case OptimizationPass::kInliner: return HInliner::kInlinerPassName; - case OptimizationPass::kSharpening: - return HSharpening::kSharpeningPassName; case OptimizationPass::kSelectGenerator: return HSelectGenerator::kSelectGeneratorPassName; case OptimizationPass::kInstructionSimplifier: return InstructionSimplifier::kInstructionSimplifierPassName; - case OptimizationPass::kIntrinsicsRecognizer: - return IntrinsicsRecognizer::kIntrinsicsRecognizerPassName; case OptimizationPass::kCHAGuardOptimization: return CHAGuardOptimization::kCHAGuardOptimizationPassName; case OptimizationPass::kCodeSinking: @@ -141,14 +137,12 @@ OptimizationPass OptimizationPassByName(const std::string& pass_name) { X(OptimizationPass::kInductionVarAnalysis); X(OptimizationPass::kInliner); X(OptimizationPass::kInstructionSimplifier); - X(OptimizationPass::kIntrinsicsRecognizer); X(OptimizationPass::kInvariantCodeMotion); X(OptimizationPass::kLoadStoreAnalysis); X(OptimizationPass::kLoadStoreElimination); X(OptimizationPass::kLoopOptimization); X(OptimizationPass::kScheduling); X(OptimizationPass::kSelectGenerator); - X(OptimizationPass::kSharpening); X(OptimizationPass::kSideEffectsAnalysis); #ifdef ART_ENABLE_CODEGEN_arm X(OptimizationPass::kInstructionSimplifierArm); @@ -264,18 +258,12 @@ ArenaVector<HOptimization*> ConstructOptimizations( pass_name); break; } - case OptimizationPass::kSharpening: - opt = new (allocator) HSharpening(graph, codegen, pass_name); - break; case OptimizationPass::kSelectGenerator: opt = new (allocator) HSelectGenerator(graph, handles, stats, pass_name); break; case OptimizationPass::kInstructionSimplifier: opt = new (allocator) InstructionSimplifier(graph, codegen, stats, pass_name); break; - case OptimizationPass::kIntrinsicsRecognizer: - opt = new (allocator) IntrinsicsRecognizer(graph, stats, pass_name); - break; case OptimizationPass::kCHAGuardOptimization: opt = new (allocator) CHAGuardOptimization(graph, pass_name); break; diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h index 88b283cebf..ced383fdd6 100644 --- a/compiler/optimizing/optimization.h +++ b/compiler/optimizing/optimization.h @@ -77,14 +77,12 @@ enum class OptimizationPass { kInductionVarAnalysis, kInliner, kInstructionSimplifier, - kIntrinsicsRecognizer, kInvariantCodeMotion, kLoadStoreAnalysis, kLoadStoreElimination, kLoopOptimization, kScheduling, kSelectGenerator, - kSharpening, kSideEffectsAnalysis, #ifdef ART_ENABLE_CODEGEN_arm kInstructionSimplifierArm, diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index 04301f5366..a52031cced 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -128,12 +128,12 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper { public: InternalCodeAllocator() {} - virtual uint8_t* Allocate(size_t size) { + uint8_t* Allocate(size_t size) override { memory_.resize(size); return memory_.data(); } - ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); } + ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); } private: std::vector<uint8_t> memory_; diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index c40cbcf52a..46754fe33f 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -74,17 +74,17 @@ static constexpr const char* kPassNameSeparator = "$"; /** * Used by the code generator, to allocate the code in a vector. */ -class CodeVectorAllocator FINAL : public CodeAllocator { +class CodeVectorAllocator final : public CodeAllocator { public: explicit CodeVectorAllocator(ArenaAllocator* allocator) : memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {} - virtual uint8_t* Allocate(size_t size) { + uint8_t* Allocate(size_t size) override { memory_.resize(size); return &memory_[0]; } - ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); } + ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); } uint8_t* GetData() { return memory_.data(); } private: @@ -264,12 +264,12 @@ class PassScope : public ValueObject { PassObserver* const pass_observer_; }; -class OptimizingCompiler FINAL : public Compiler { +class OptimizingCompiler final : public Compiler { public: explicit OptimizingCompiler(CompilerDriver* driver); - ~OptimizingCompiler() OVERRIDE; + ~OptimizingCompiler() override; - bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const OVERRIDE; + bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override; CompiledMethod* Compile(const DexFile::CodeItem* code_item, uint32_t access_flags, @@ -278,29 +278,29 @@ class OptimizingCompiler FINAL : public Compiler { uint32_t method_idx, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache) const OVERRIDE; + Handle<mirror::DexCache> dex_cache) const override; CompiledMethod* JniCompile(uint32_t access_flags, uint32_t method_idx, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache) const OVERRIDE; + Handle<mirror::DexCache> dex_cache) const override; - uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE + uintptr_t GetEntryPointOf(ArtMethod* method) const override REQUIRES_SHARED(Locks::mutator_lock_) { return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( InstructionSetPointerSize(GetCompilerDriver()->GetCompilerOptions().GetInstructionSet()))); } - void Init() OVERRIDE; + void Init() override; - void UnInit() const OVERRIDE; + void UnInit() const override; bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr, jit::JitLogger* jit_logger) - OVERRIDE + override REQUIRES_SHARED(Locks::mutator_lock_); private: @@ -399,7 +399,8 @@ class OptimizingCompiler FINAL : public Compiler { PassObserver* pass_observer, VariableSizedHandleScope* handles) const; - void GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo method_debug_info) + void GenerateJitDebugInfo(ArtMethod* method, + const debug::MethodDebugInfo& method_debug_info) REQUIRES_SHARED(Locks::mutator_lock_); std::unique_ptr<OptimizingCompilerStats> compilation_stats_; @@ -570,7 +571,7 @@ static void AllocateRegisters(HGraph* graph, { PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName, pass_observer); - PrepareForRegisterAllocation(graph, stats).Run(); + PrepareForRegisterAllocation(graph, codegen->GetCompilerOptions(), stats).Run(); } // Use local allocator shared by SSA liveness analysis and register allocator. // (Register allocator creates new objects in the liveness data.) @@ -623,8 +624,6 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, OptimizationDef optimizations[] = { // Initial optimizations. - OptDef(OptimizationPass::kIntrinsicsRecognizer), - OptDef(OptimizationPass::kSharpening), OptDef(OptimizationPass::kConstantFolding), OptDef(OptimizationPass::kInstructionSimplifier), OptDef(OptimizationPass::kDeadCodeElimination, @@ -848,6 +847,11 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, MethodCompilationStat::kNotCompiledAmbiguousArrayOp); break; } + case kAnalysisFailIrreducibleLoopAndStringInit: { + MaybeRecordStat(compilation_stats_.get(), + MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit); + break; + } case kAnalysisSuccess: UNREACHABLE(); } @@ -945,9 +949,8 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic( } OptimizationDef optimizations[] = { - OptDef(OptimizationPass::kIntrinsicsRecognizer), - // Some intrinsics are converted to HIR by the simplifier and the codegen also - // has a few assumptions that only the instruction simplifier can satisfy. + // The codegen has a few assumptions that only the instruction simplifier + // can satisfy. OptDef(OptimizationPass::kInstructionSimplifier), }; RunOptimizations(graph, @@ -1219,7 +1222,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod( compiler_options, access_flags, method_idx, *dex_file); - ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots; + std::vector<Handle<mirror::Object>> roots; ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list( allocator.Adapter(kArenaAllocCHA)); ArenaStack arena_stack(runtime->GetJitArenaPool()); @@ -1321,19 +1324,6 @@ bool OptimizingCompiler::JitCompile(Thread* self, ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item); size_t number_of_roots = codegen->GetNumberOfJitRoots(); - // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots - // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is - // executed, this array is not needed. - Handle<mirror::ObjectArray<mirror::Object>> roots( - hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc( - self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(), number_of_roots))); - if (roots == nullptr) { - // Out of memory, just clear the exception to avoid any Java exception uncaught problems. - MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); - DCHECK(self->IsExceptionPending()); - self->ClearException(); - return false; - } uint8_t* stack_map_data = nullptr; uint8_t* roots_data = nullptr; uint32_t data_size = code_cache->ReserveData(self, @@ -1347,7 +1337,14 @@ bool OptimizingCompiler::JitCompile(Thread* self, return false; } memcpy(stack_map_data, stack_map.data(), stack_map.size()); - codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data); + std::vector<Handle<mirror::Object>> roots; + codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots); + // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope. + DCHECK(std::all_of(roots.begin(), + roots.end(), + [&handles](Handle<mirror::Object> root){ + return handles.Contains(root.GetReference()); + })); const void* code = code_cache->CommitCode( self, @@ -1413,7 +1410,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, return true; } -void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo info) { +void OptimizingCompiler::GenerateJitDebugInfo( + ArtMethod* method, const debug::MethodDebugInfo& info) { const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); DCHECK(compiler_options.GenerateAnyDebugInfo()); diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h index 9a26f2f6c4..1f4f6d56be 100644 --- a/compiler/optimizing/optimizing_compiler_stats.h +++ b/compiler/optimizing/optimizing_compiler_stats.h @@ -59,6 +59,7 @@ enum class MethodCompilationStat { kNotCompiledUnsupportedIsa, kNotCompiledVerificationError, kNotCompiledVerifyAtRuntime, + kNotCompiledIrreducibleLoopAndStringInit, kInlinedMonomorphicCall, kInlinedPolymorphicCall, kMonomorphicCall, diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h index e6e069f96e..5fadcab402 100644 --- a/compiler/optimizing/parallel_move_resolver.h +++ b/compiler/optimizing/parallel_move_resolver.h @@ -58,7 +58,7 @@ class ParallelMoveResolverWithSwap : public ParallelMoveResolver { virtual ~ParallelMoveResolverWithSwap() {} // Resolve a set of parallel moves, emitting assembler instructions. - void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE; + void EmitNativeCode(HParallelMove* parallel_move) override; protected: class ScratchRegisterScope : public ValueObject { @@ -133,7 +133,7 @@ class ParallelMoveResolverNoSwap : public ParallelMoveResolver { virtual ~ParallelMoveResolverNoSwap() {} // Resolve a set of parallel moves, emitting assembler instructions. - void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE; + void EmitNativeCode(HParallelMove* parallel_move) override; protected: // Called at the beginning of EmitNativeCode(). A subclass may put some architecture dependent diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc index be35201166..a8ab6cdd0c 100644 --- a/compiler/optimizing/parallel_move_test.cc +++ b/compiler/optimizing/parallel_move_test.cc @@ -56,7 +56,7 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap { explicit TestParallelMoveResolverWithSwap(ArenaAllocator* allocator) : ParallelMoveResolverWithSwap(allocator) {} - void EmitMove(size_t index) OVERRIDE { + void EmitMove(size_t index) override { MoveOperands* move = moves_[index]; if (!message_.str().empty()) { message_ << " "; @@ -68,7 +68,7 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap { message_ << ")"; } - void EmitSwap(size_t index) OVERRIDE { + void EmitSwap(size_t index) override { MoveOperands* move = moves_[index]; if (!message_.str().empty()) { message_ << " "; @@ -80,8 +80,8 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap { message_ << ")"; } - void SpillScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {} - void RestoreScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {} + void SpillScratch(int reg ATTRIBUTE_UNUSED) override {} + void RestoreScratch(int reg ATTRIBUTE_UNUSED) override {} std::string GetMessage() const { return message_.str(); @@ -99,13 +99,13 @@ class TestParallelMoveResolverNoSwap : public ParallelMoveResolverNoSwap { explicit TestParallelMoveResolverNoSwap(ArenaAllocator* allocator) : ParallelMoveResolverNoSwap(allocator), scratch_index_(kScratchRegisterStartIndexForTest) {} - void PrepareForEmitNativeCode() OVERRIDE { + void PrepareForEmitNativeCode() override { scratch_index_ = kScratchRegisterStartIndexForTest; } - void FinishEmitNativeCode() OVERRIDE {} + void FinishEmitNativeCode() override {} - Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE { + Location AllocateScratchLocationFor(Location::Kind kind) override { if (kind == Location::kStackSlot || kind == Location::kFpuRegister || kind == Location::kRegister) { kind = Location::kRegister; @@ -125,9 +125,9 @@ class TestParallelMoveResolverNoSwap : public ParallelMoveResolverNoSwap { return scratch; } - void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) OVERRIDE {} + void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) override {} - void EmitMove(size_t index) OVERRIDE { + void EmitMove(size_t index) override { MoveOperands* move = moves_[index]; if (!message_.str().empty()) { message_ << " "; @@ -174,8 +174,8 @@ class ParallelMoveTest : public ::testing::Test { template<> const bool ParallelMoveTest<TestParallelMoveResolverWithSwap>::has_swap = true; template<> const bool ParallelMoveTest<TestParallelMoveResolverNoSwap>::has_swap = false; -typedef ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap> - ParallelMoveResolverTestTypes; +using ParallelMoveResolverTestTypes = + ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap>; TYPED_TEST_CASE(ParallelMoveTest, ParallelMoveResolverTestTypes); diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc index a7e97a1ce5..05208ff65c 100644 --- a/compiler/optimizing/pc_relative_fixups_mips.cc +++ b/compiler/optimizing/pc_relative_fixups_mips.cc @@ -58,7 +58,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { DCHECK(base_ != nullptr); } - void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE { + void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override { // If this is an invoke with PC-relative load kind, // we need to add the base as the special input. if (invoke->HasPcRelativeMethodLoadKind() && @@ -70,7 +70,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } } - void VisitLoadClass(HLoadClass* load_class) OVERRIDE { + void VisitLoadClass(HLoadClass* load_class) override { HLoadClass::LoadKind load_kind = load_class->GetLoadKind(); switch (load_kind) { case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: @@ -86,7 +86,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } } - void VisitLoadString(HLoadString* load_string) OVERRIDE { + void VisitLoadString(HLoadString* load_string) override { HLoadString::LoadKind load_kind = load_string->GetLoadKind(); switch (load_kind) { case HLoadString::LoadKind::kBootImageLinkTimePcRelative: @@ -102,7 +102,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } } - void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE { + void VisitPackedSwitch(HPackedSwitch* switch_insn) override { if (switch_insn->GetNumEntries() <= InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) { return; diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h index 6dd1ee0db2..872370bcb7 100644 --- a/compiler/optimizing/pc_relative_fixups_mips.h +++ b/compiler/optimizing/pc_relative_fixups_mips.h @@ -34,7 +34,7 @@ class PcRelativeFixups : public HOptimization { static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips"; - bool Run() OVERRIDE; + bool Run() override; private: CodeGenerator* codegen_; diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc index 41f2f776fc..4b07d5b621 100644 --- a/compiler/optimizing/pc_relative_fixups_x86.cc +++ b/compiler/optimizing/pc_relative_fixups_x86.cc @@ -42,53 +42,53 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } private: - void VisitAdd(HAdd* add) OVERRIDE { + void VisitAdd(HAdd* add) override { BinaryFP(add); } - void VisitSub(HSub* sub) OVERRIDE { + void VisitSub(HSub* sub) override { BinaryFP(sub); } - void VisitMul(HMul* mul) OVERRIDE { + void VisitMul(HMul* mul) override { BinaryFP(mul); } - void VisitDiv(HDiv* div) OVERRIDE { + void VisitDiv(HDiv* div) override { BinaryFP(div); } - void VisitCompare(HCompare* compare) OVERRIDE { + void VisitCompare(HCompare* compare) override { BinaryFP(compare); } - void VisitReturn(HReturn* ret) OVERRIDE { + void VisitReturn(HReturn* ret) override { HConstant* value = ret->InputAt(0)->AsConstant(); if ((value != nullptr && DataType::IsFloatingPointType(value->GetType()))) { ReplaceInput(ret, value, 0, true); } } - void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE { + void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override { HandleInvoke(invoke); } - void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE { + void VisitInvokeVirtual(HInvokeVirtual* invoke) override { HandleInvoke(invoke); } - void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE { + void VisitInvokeInterface(HInvokeInterface* invoke) override { HandleInvoke(invoke); } - void VisitLoadClass(HLoadClass* load_class) OVERRIDE { + void VisitLoadClass(HLoadClass* load_class) override { if (load_class->HasPcRelativeLoadKind()) { HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_class); load_class->AddSpecialInput(method_address); } } - void VisitLoadString(HLoadString* load_string) OVERRIDE { + void VisitLoadString(HLoadString* load_string) override { if (load_string->HasPcRelativeLoadKind()) { HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_string); load_string->AddSpecialInput(method_address); @@ -102,31 +102,31 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } } - void VisitEqual(HEqual* cond) OVERRIDE { + void VisitEqual(HEqual* cond) override { BinaryFP(cond); } - void VisitNotEqual(HNotEqual* cond) OVERRIDE { + void VisitNotEqual(HNotEqual* cond) override { BinaryFP(cond); } - void VisitLessThan(HLessThan* cond) OVERRIDE { + void VisitLessThan(HLessThan* cond) override { BinaryFP(cond); } - void VisitLessThanOrEqual(HLessThanOrEqual* cond) OVERRIDE { + void VisitLessThanOrEqual(HLessThanOrEqual* cond) override { BinaryFP(cond); } - void VisitGreaterThan(HGreaterThan* cond) OVERRIDE { + void VisitGreaterThan(HGreaterThan* cond) override { BinaryFP(cond); } - void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) OVERRIDE { + void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) override { BinaryFP(cond); } - void VisitNeg(HNeg* neg) OVERRIDE { + void VisitNeg(HNeg* neg) override { if (DataType::IsFloatingPointType(neg->GetType())) { // We need to replace the HNeg with a HX86FPNeg in order to address the constant area. HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(neg); @@ -141,7 +141,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } } - void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE { + void VisitPackedSwitch(HPackedSwitch* switch_insn) override { if (switch_insn->GetNumEntries() <= InstructionCodeGeneratorX86::kPackedSwitchJumpTableThreshold) { return; diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h index db56b7f053..3b470a6502 100644 --- a/compiler/optimizing/pc_relative_fixups_x86.h +++ b/compiler/optimizing/pc_relative_fixups_x86.h @@ -34,7 +34,7 @@ class PcRelativeFixups : public HOptimization { static constexpr const char* kPcRelativeFixupsX86PassName = "pc_relative_fixups_x86"; - bool Run() OVERRIDE; + bool Run() override; private: CodeGenerator* codegen_; diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index 060613d349..fc81740013 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -17,6 +17,7 @@ #include "prepare_for_register_allocation.h" #include "dex/dex_file_types.h" +#include "driver/compiler_options.h" #include "jni/jni_internal.h" #include "optimizing_compiler_stats.h" #include "well_known_classes.h" @@ -27,7 +28,7 @@ void PrepareForRegisterAllocation::Run() { // Order does not matter. for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) { // No need to visit the phis. - for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done(); + for (HInstructionIteratorHandleChanges inst_it(block->GetInstructions()); !inst_it.Done(); inst_it.Advance()) { inst_it.Current()->Accept(this); } @@ -50,6 +51,19 @@ void PrepareForRegisterAllocation::VisitInstanceOf(HInstanceOf* instance_of) { void PrepareForRegisterAllocation::VisitNullCheck(HNullCheck* check) { check->ReplaceWith(check->InputAt(0)); + if (compiler_options_.GetImplicitNullChecks()) { + HInstruction* next = check->GetNext(); + + // The `PrepareForRegisterAllocation` pass removes `HBoundType` from the graph, + // so do it ourselves now to not prevent optimizations. + while (next->IsBoundType()) { + next = next->GetNext(); + VisitBoundType(next->GetPrevious()->AsBoundType()); + } + if (next->CanDoImplicitNullCheckOn(check->InputAt(0))) { + check->MarkEmittedAtUseSite(); + } + } } void PrepareForRegisterAllocation::VisitDivZeroCheck(HDivZeroCheck* check) { diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h index f6e4d3ef99..a8ab256e27 100644 --- a/compiler/optimizing/prepare_for_register_allocation.h +++ b/compiler/optimizing/prepare_for_register_allocation.h @@ -21,6 +21,7 @@ namespace art { +class CompilerOptions; class OptimizingCompilerStats; /** @@ -30,9 +31,11 @@ class OptimizingCompilerStats; */ class PrepareForRegisterAllocation : public HGraphDelegateVisitor { public: - explicit PrepareForRegisterAllocation(HGraph* graph, - OptimizingCompilerStats* stats = nullptr) - : HGraphDelegateVisitor(graph, stats) {} + PrepareForRegisterAllocation(HGraph* graph, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats = nullptr) + : HGraphDelegateVisitor(graph, stats), + compiler_options_(compiler_options) {} void Run(); @@ -40,22 +43,24 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor { "prepare_for_register_allocation"; private: - void VisitCheckCast(HCheckCast* check_cast) OVERRIDE; - void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE; - void VisitNullCheck(HNullCheck* check) OVERRIDE; - void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE; - void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE; - void VisitBoundType(HBoundType* bound_type) OVERRIDE; - void VisitArraySet(HArraySet* instruction) OVERRIDE; - void VisitClinitCheck(HClinitCheck* check) OVERRIDE; - void VisitCondition(HCondition* condition) OVERRIDE; - void VisitConstructorFence(HConstructorFence* constructor_fence) OVERRIDE; - void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE; - void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE; + void VisitCheckCast(HCheckCast* check_cast) override; + void VisitInstanceOf(HInstanceOf* instance_of) override; + void VisitNullCheck(HNullCheck* check) override; + void VisitDivZeroCheck(HDivZeroCheck* check) override; + void VisitBoundsCheck(HBoundsCheck* check) override; + void VisitBoundType(HBoundType* bound_type) override; + void VisitArraySet(HArraySet* instruction) override; + void VisitClinitCheck(HClinitCheck* check) override; + void VisitCondition(HCondition* condition) override; + void VisitConstructorFence(HConstructorFence* constructor_fence) override; + void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override; + void VisitDeoptimize(HDeoptimize* deoptimize) override; bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const; bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const; + const CompilerOptions& compiler_options_; + DISALLOW_COPY_AND_ASSIGN(PrepareForRegisterAllocation); }; diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h index c6579dc5e0..8ef9ce4e8b 100644 --- a/compiler/optimizing/pretty_printer.h +++ b/compiler/optimizing/pretty_printer.h @@ -33,7 +33,7 @@ class HPrettyPrinter : public HGraphVisitor { PrintString(": "); } - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { PrintPreInstruction(instruction); PrintString(instruction->DebugName()); PrintPostInstruction(instruction); @@ -70,7 +70,7 @@ class HPrettyPrinter : public HGraphVisitor { PrintNewLine(); } - void VisitBasicBlock(HBasicBlock* block) OVERRIDE { + void VisitBasicBlock(HBasicBlock* block) override { PrintString("BasicBlock "); PrintInt(block->GetBlockId()); const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors(); @@ -108,15 +108,15 @@ class StringPrettyPrinter : public HPrettyPrinter { explicit StringPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_(""), current_block_(nullptr) { } - void PrintInt(int value) OVERRIDE { + void PrintInt(int value) override { str_ += android::base::StringPrintf("%d", value); } - void PrintString(const char* value) OVERRIDE { + void PrintString(const char* value) override { str_ += value; } - void PrintNewLine() OVERRIDE { + void PrintNewLine() override { str_ += '\n'; } @@ -124,12 +124,12 @@ class StringPrettyPrinter : public HPrettyPrinter { std::string str() const { return str_; } - void VisitBasicBlock(HBasicBlock* block) OVERRIDE { + void VisitBasicBlock(HBasicBlock* block) override { current_block_ = block; HPrettyPrinter::VisitBasicBlock(block); } - void VisitGoto(HGoto* gota) OVERRIDE { + void VisitGoto(HGoto* gota) override { PrintString(" "); PrintInt(gota->GetId()); PrintString(": Goto "); diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 0d622484ee..9079658e1c 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -94,29 +94,29 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor { worklist_.reserve(kDefaultWorklistSize); } - void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE; - void VisitNewInstance(HNewInstance* new_instance) OVERRIDE; - void VisitLoadClass(HLoadClass* load_class) OVERRIDE; - void VisitInstanceOf(HInstanceOf* load_class) OVERRIDE; - void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE; - void VisitLoadMethodHandle(HLoadMethodHandle* instr) OVERRIDE; - void VisitLoadMethodType(HLoadMethodType* instr) OVERRIDE; - void VisitLoadString(HLoadString* instr) OVERRIDE; - void VisitLoadException(HLoadException* instr) OVERRIDE; - void VisitNewArray(HNewArray* instr) OVERRIDE; - void VisitParameterValue(HParameterValue* instr) OVERRIDE; - void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE; - void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE; - void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE; - void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) OVERRIDE; - void VisitInvoke(HInvoke* instr) OVERRIDE; - void VisitArrayGet(HArrayGet* instr) OVERRIDE; - void VisitCheckCast(HCheckCast* instr) OVERRIDE; - void VisitBoundType(HBoundType* instr) OVERRIDE; - void VisitNullCheck(HNullCheck* instr) OVERRIDE; - void VisitPhi(HPhi* phi); - - void VisitBasicBlock(HBasicBlock* block); + void VisitDeoptimize(HDeoptimize* deopt) override; + void VisitNewInstance(HNewInstance* new_instance) override; + void VisitLoadClass(HLoadClass* load_class) override; + void VisitInstanceOf(HInstanceOf* load_class) override; + void VisitClinitCheck(HClinitCheck* clinit_check) override; + void VisitLoadMethodHandle(HLoadMethodHandle* instr) override; + void VisitLoadMethodType(HLoadMethodType* instr) override; + void VisitLoadString(HLoadString* instr) override; + void VisitLoadException(HLoadException* instr) override; + void VisitNewArray(HNewArray* instr) override; + void VisitParameterValue(HParameterValue* instr) override; + void VisitInstanceFieldGet(HInstanceFieldGet* instr) override; + void VisitStaticFieldGet(HStaticFieldGet* instr) override; + void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) override; + void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) override; + void VisitInvoke(HInvoke* instr) override; + void VisitArrayGet(HArrayGet* instr) override; + void VisitCheckCast(HCheckCast* instr) override; + void VisitBoundType(HBoundType* instr) override; + void VisitNullCheck(HNullCheck* instr) override; + void VisitPhi(HPhi* phi) override; + + void VisitBasicBlock(HBasicBlock* block) override; void ProcessWorklist(); private: diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h index d36d592708..7c6a048444 100644 --- a/compiler/optimizing/reference_type_propagation.h +++ b/compiler/optimizing/reference_type_propagation.h @@ -40,7 +40,7 @@ class ReferenceTypePropagation : public HOptimization { // Visit a single instruction. void Visit(HInstruction* instruction); - bool Run() OVERRIDE; + bool Run() override; // Returns true if klass is admissible to the propagation: non-null and resolved. // For an array type, we also check if the component type is admissible. diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h index 3072c92e0f..16131e1c71 100644 --- a/compiler/optimizing/register_allocator_graph_color.h +++ b/compiler/optimizing/register_allocator_graph_color.h @@ -90,9 +90,9 @@ class RegisterAllocatorGraphColor : public RegisterAllocator { CodeGenerator* codegen, const SsaLivenessAnalysis& analysis, bool iterative_move_coalescing = true); - ~RegisterAllocatorGraphColor() OVERRIDE; + ~RegisterAllocatorGraphColor() override; - void AllocateRegisters() OVERRIDE; + void AllocateRegisters() override; bool Validate(bool log_fatal_on_failure); diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc index 216fb57a96..1e00003701 100644 --- a/compiler/optimizing/register_allocator_linear_scan.cc +++ b/compiler/optimizing/register_allocator_linear_scan.cc @@ -312,7 +312,7 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) for (size_t safepoint_index = safepoints_.size(); safepoint_index > 0; --safepoint_index) { HInstruction* safepoint = safepoints_[safepoint_index - 1u]; - size_t safepoint_position = safepoint->GetLifetimePosition(); + size_t safepoint_position = SafepointPosition::ComputePosition(safepoint); // Test that safepoints are ordered in the optimal way. DCHECK(safepoint_index == safepoints_.size() || diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h index 36788b7c3c..4d445c7ff7 100644 --- a/compiler/optimizing/register_allocator_linear_scan.h +++ b/compiler/optimizing/register_allocator_linear_scan.h @@ -42,11 +42,11 @@ class RegisterAllocatorLinearScan : public RegisterAllocator { RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator, CodeGenerator* codegen, const SsaLivenessAnalysis& analysis); - ~RegisterAllocatorLinearScan() OVERRIDE; + ~RegisterAllocatorLinearScan() override; - void AllocateRegisters() OVERRIDE; + void AllocateRegisters() override; - bool Validate(bool log_fatal_on_failure) OVERRIDE { + bool Validate(bool log_fatal_on_failure) override { processing_core_registers_ = true; if (!ValidateInternal(log_fatal_on_failure)) { return false; diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc index 7144775c2b..be5304c7c3 100644 --- a/compiler/optimizing/register_allocator_test.cc +++ b/compiler/optimizing/register_allocator_test.cc @@ -40,7 +40,7 @@ using Strategy = RegisterAllocator::Strategy; class RegisterAllocatorTest : public OptimizingUnitTest { protected: - void SetUp() OVERRIDE { + void SetUp() override { // This test is using the x86 ISA. OverrideInstructionSetFeatures(InstructionSet::kX86, "default"); OptimizingUnitTest::SetUp(); @@ -872,9 +872,9 @@ TEST_F(RegisterAllocatorTest, SpillInactive) { // Create an interval with lifetime holes. static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}}; LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), -1, one); - first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8)); - first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 7)); - first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 6)); + first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 8)); + first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 7)); + first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 6)); locations = new (GetAllocator()) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); @@ -895,9 +895,9 @@ TEST_F(RegisterAllocatorTest, SpillInactive) { // before lifetime position 6 yet. static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}}; LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetScopedAllocator(), -1, three); - third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8)); - third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 4)); - third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 3)); + third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 8)); + third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 4)); + third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 3)); locations = new (GetAllocator()) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); third = third->SplitAt(3); diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc index 1aa16f45bc..df897a4904 100644 --- a/compiler/optimizing/scheduler.cc +++ b/compiler/optimizing/scheduler.cc @@ -280,6 +280,23 @@ bool SchedulingGraph::HasSideEffectDependency(HInstruction* node, return false; } +// Check if the specified instruction is a better candidate which more likely will +// have other instructions depending on it. +static bool IsBetterCandidateWithMoreLikelyDependencies(HInstruction* new_candidate, + HInstruction* old_candidate) { + if (!new_candidate->GetSideEffects().Includes(old_candidate->GetSideEffects())) { + // Weaker side effects. + return false; + } + if (old_candidate->GetSideEffects().Includes(new_candidate->GetSideEffects())) { + // Same side effects, check if `new_candidate` has stronger `CanThrow()`. + return new_candidate->CanThrow() && !old_candidate->CanThrow(); + } else { + // Stronger side effects, check if `new_candidate` has at least as strong `CanThrow()`. + return new_candidate->CanThrow() || !old_candidate->CanThrow(); + } +} + void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_scheduling_barrier) { SchedulingNode* instruction_node = GetNode(instruction); @@ -331,6 +348,7 @@ void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_schedul // Side effect dependencies. if (!instruction->GetSideEffects().DoesNothing() || instruction->CanThrow()) { + HInstruction* dep_chain_candidate = nullptr; for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) { SchedulingNode* other_node = GetNode(other); if (other_node->IsSchedulingBarrier()) { @@ -340,7 +358,18 @@ void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_schedul break; } if (HasSideEffectDependency(other, instruction)) { - AddOtherDependency(other_node, instruction_node); + if (dep_chain_candidate != nullptr && + HasSideEffectDependency(other, dep_chain_candidate)) { + // Skip an explicit dependency to reduce memory usage, rely on the transitive dependency. + } else { + AddOtherDependency(other_node, instruction_node); + } + // Check if `other` is a better candidate which more likely will have other instructions + // depending on it. + if (dep_chain_candidate == nullptr || + IsBetterCandidateWithMoreLikelyDependencies(other, dep_chain_candidate)) { + dep_chain_candidate = other; + } } } } diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h index fd48d844e6..48e80f5f8b 100644 --- a/compiler/optimizing/scheduler.h +++ b/compiler/optimizing/scheduler.h @@ -339,7 +339,7 @@ class SchedulingLatencyVisitor : public HGraphDelegateVisitor { last_visited_latency_(0), last_visited_internal_latency_(0) {} - void VisitInstruction(HInstruction* instruction) OVERRIDE { + void VisitInstruction(HInstruction* instruction) override { LOG(FATAL) << "Error visiting " << instruction->DebugName() << ". " "Architecture-specific scheduling latency visitors must handle all instructions" " (potentially by overriding the generic `VisitInstruction()`."; @@ -392,7 +392,7 @@ class RandomSchedulingNodeSelector : public SchedulingNodeSelector { } SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes, - const SchedulingGraph& graph) OVERRIDE { + const SchedulingGraph& graph) override { UNUSED(graph); DCHECK(!nodes->empty()); size_t select = rand_r(&seed_) % nodes->size(); @@ -412,9 +412,9 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector { public: CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {} - void Reset() OVERRIDE { prev_select_ = nullptr; } + void Reset() override { prev_select_ = nullptr; } SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes, - const SchedulingGraph& graph) OVERRIDE; + const SchedulingGraph& graph) override; protected: SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate, @@ -492,7 +492,7 @@ class HInstructionScheduling : public HOptimization { codegen_(cg), instruction_set_(instruction_set) {} - bool Run() OVERRIDE { + bool Run() override { return Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false); } diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h index 2f369486b3..875593bbf0 100644 --- a/compiler/optimizing/scheduler_arm.h +++ b/compiler/optimizing/scheduler_arm.h @@ -100,7 +100,7 @@ class SchedulingLatencyVisitorARM : public SchedulingLatencyVisitor { M(DataProcWithShifterOp, unused) #define DECLARE_VISIT_INSTRUCTION(type, unused) \ - void Visit##type(H##type* instruction) OVERRIDE; + void Visit##type(H##type* instruction) override; FOR_EACH_SCHEDULED_ARM_INSTRUCTION(DECLARE_VISIT_INSTRUCTION) FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION) @@ -140,9 +140,9 @@ class HSchedulerARM : public HScheduler { HSchedulerARM(SchedulingNodeSelector* selector, SchedulingLatencyVisitorARM* arm_latency_visitor) : HScheduler(arm_latency_visitor, selector) {} - ~HSchedulerARM() OVERRIDE {} + ~HSchedulerARM() override {} - bool IsSchedulable(const HInstruction* instruction) const OVERRIDE { + bool IsSchedulable(const HInstruction* instruction) const override { #define CASE_INSTRUCTION_KIND(type, unused) case \ HInstruction::InstructionKind::k##type: switch (instruction->GetKind()) { diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h index 0d2f8d9fa0..7f6549dcfe 100644 --- a/compiler/optimizing/scheduler_arm64.h +++ b/compiler/optimizing/scheduler_arm64.h @@ -118,7 +118,7 @@ class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor { M(DataProcWithShifterOp, unused) #define DECLARE_VISIT_INSTRUCTION(type, unused) \ - void Visit##type(H##type* instruction) OVERRIDE; + void Visit##type(H##type* instruction) override; FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(DECLARE_VISIT_INSTRUCTION) FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(DECLARE_VISIT_INSTRUCTION) @@ -136,9 +136,9 @@ class HSchedulerARM64 : public HScheduler { public: explicit HSchedulerARM64(SchedulingNodeSelector* selector) : HScheduler(&arm64_latency_visitor_, selector) {} - ~HSchedulerARM64() OVERRIDE {} + ~HSchedulerARM64() override {} - bool IsSchedulable(const HInstruction* instruction) const OVERRIDE { + bool IsSchedulable(const HInstruction* instruction) const override { #define CASE_INSTRUCTION_KIND(type, unused) case \ HInstruction::InstructionKind::k##type: switch (instruction->GetKind()) { @@ -160,7 +160,7 @@ class HSchedulerARM64 : public HScheduler { // SIMD&FP registers are callee saved) so don't reorder such vector instructions. // // TODO: remove this when a proper support of SIMD registers is introduced to the compiler. - bool IsSchedulingBarrier(const HInstruction* instr) const OVERRIDE { + bool IsSchedulingBarrier(const HInstruction* instr) const override { return HScheduler::IsSchedulingBarrier(instr) || instr->IsVecReduce() || instr->IsVecExtractScalar() || diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc index fe23fb4cff..981fcc42a7 100644 --- a/compiler/optimizing/scheduler_test.cc +++ b/compiler/optimizing/scheduler_test.cc @@ -171,7 +171,9 @@ class SchedulerTest : public OptimizingUnitTest { ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set1, array_get1)); ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set2, array_get2)); ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_get2, array_set1)); - ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set2, array_set1)); + // Unnecessary dependency is not stored, we rely on transitive dependencies. + // The array_set2 -> array_get2 -> array_set1 dependencies are tested above. + ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(array_set2, array_set1)); // Env dependency. ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(div_check, mul)); @@ -308,7 +310,9 @@ class SchedulerTest : public OptimizingUnitTest { loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i); loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_j); ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2)); - ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i)); + // Unnecessary dependency is not stored, we rely on transitive dependencies. + // The arr_set_j -> arr_set_sub0 -> arr_set_add0 -> arr_set_i dependencies are tested below. + ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i)); // Test side effect dependency based on LSA analysis: array[i] and array[i+0] loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i); @@ -320,7 +324,10 @@ class SchedulerTest : public OptimizingUnitTest { loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i); loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_sub0); ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2)); - ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub0, arr_set_i)); + // Unnecessary dependency is not stored, we rely on transitive dependencies. + ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub0, arr_set_i)); + // Instead, we rely on arr_set_sub0 -> arr_set_add0 -> arr_set_i, the latter is tested above. + ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub0, arr_set_add0)); // Test side effect dependency based on LSA analysis: array[i] and array[i+1] loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i); @@ -335,11 +342,12 @@ class SchedulerTest : public OptimizingUnitTest { ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub1, arr_set_add1)); // Test side effect dependency based on LSA analysis: array[j] and all others array accesses - ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i)); - ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_add0)); ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_sub0)); ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_add1)); ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_sub1)); + // Unnecessary dependencies are not stored, we rely on transitive dependencies. + ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i)); + ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_add0)); // Test that ArraySet and FieldSet should not have side effect dependency ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_i, set_field10)); diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h index d24d2264b2..2889166f60 100644 --- a/compiler/optimizing/select_generator.h +++ b/compiler/optimizing/select_generator.h @@ -68,7 +68,7 @@ class HSelectGenerator : public HOptimization { OptimizingCompilerStats* stats, const char* name = kSelectGeneratorPassName); - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kSelectGeneratorPassName = "select_generator"; diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index 5c2f57e314..c864951a47 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -35,22 +35,6 @@ namespace art { -bool HSharpening::Run() { - // We don't care about the order of the blocks here. - for (HBasicBlock* block : graph_->GetReversePostOrder()) { - for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { - HInstruction* instruction = it.Current(); - if (instruction->IsInvokeStaticOrDirect()) { - SharpenInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect(), codegen_); - } - // TODO: Move the sharpening of invoke-virtual/-interface/-super from HGraphBuilder - // here. Rewrite it to avoid the CompilerDriver's reliance on verifier data - // because we know the type better when inlining. - } - } - return true; -} - static bool IsInBootImage(ArtMethod* method) { const std::vector<gc::space::ImageSpace*>& image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces(); @@ -72,17 +56,14 @@ static bool BootImageAOTCanEmbedMethod(ArtMethod* method, const CompilerOptions& return compiler_options.IsImageClass(dex_file.StringByTypeIdx(klass->GetDexTypeIndex())); } -void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke, - CodeGenerator* codegen) { - if (invoke->IsStringInit()) { - // Not using the dex cache arrays. But we could still try to use a better dispatch... - // TODO: Use direct_method and direct_code for the appropriate StringFactory method. - return; +HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenInvokeStaticOrDirect( + ArtMethod* callee, CodeGenerator* codegen) { + if (kIsDebugBuild) { + ScopedObjectAccess soa(Thread::Current()); // Required for GetDeclaringClass below. + DCHECK(callee != nullptr); + DCHECK(!(callee->IsConstructor() && callee->GetDeclaringClass()->IsStringClass())); } - ArtMethod* callee = invoke->GetResolvedMethod(); - DCHECK(callee != nullptr); - HInvokeStaticOrDirect::MethodLoadKind method_load_kind; HInvokeStaticOrDirect::CodePtrLocation code_ptr_location; uint64_t method_load_data = 0u; @@ -141,9 +122,7 @@ void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke, HInvokeStaticOrDirect::DispatchInfo desired_dispatch_info = { method_load_kind, code_ptr_location, method_load_data }; - HInvokeStaticOrDirect::DispatchInfo dispatch_info = - codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, invoke); - invoke->SetDispatchInfo(dispatch_info); + return codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, callee); } HLoadClass::LoadKind HSharpening::ComputeLoadClassKind( diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h index cbac361891..b81867201f 100644 --- a/compiler/optimizing/sharpening.h +++ b/compiler/optimizing/sharpening.h @@ -25,24 +25,13 @@ namespace art { class CodeGenerator; class DexCompilationUnit; -// Optimization that tries to improve the way we dispatch methods and access types, -// fields, etc. Besides actual method sharpening based on receiver type (for example -// virtual->direct), this includes selecting the best available dispatch for -// invoke-static/-direct based on code generator support. -class HSharpening : public HOptimization { +// Utility methods that try to improve the way we dispatch methods, and access +// types and strings. +class HSharpening { public: - HSharpening(HGraph* graph, - CodeGenerator* codegen, - const char* name = kSharpeningPassName) - : HOptimization(graph, name), - codegen_(codegen) { } - - bool Run() OVERRIDE; - - static constexpr const char* kSharpeningPassName = "sharpening"; - - // Used by Sharpening and InstructionSimplifier. - static void SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke, CodeGenerator* codegen); + // Used by the builder and InstructionSimplifier. + static HInvokeStaticOrDirect::DispatchInfo SharpenInvokeStaticOrDirect( + ArtMethod* callee, CodeGenerator* codegen); // Used by the builder and the inliner. static HLoadClass::LoadKind ComputeLoadClassKind(HLoadClass* load_class, @@ -61,9 +50,6 @@ class HSharpening : public HOptimization { CodeGenerator* codegen, const DexCompilationUnit& dex_compilation_unit, VariableSizedHandleScope* handles); - - private: - CodeGenerator* codegen_; }; } // namespace art diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc index 97317124ef..4b0be07f3b 100644 --- a/compiler/optimizing/side_effects_test.cc +++ b/compiler/optimizing/side_effects_test.cc @@ -202,6 +202,7 @@ TEST(SideEffectsTest, GC) { EXPECT_TRUE(depends_on_gc.MayDependOn(all_changes)); EXPECT_TRUE(depends_on_gc.Union(can_trigger_gc).MayDependOn(all_changes)); EXPECT_FALSE(can_trigger_gc.MayDependOn(all_changes)); + EXPECT_FALSE(can_trigger_gc.MayDependOn(can_trigger_gc)); EXPECT_TRUE(all_changes.Includes(can_trigger_gc)); EXPECT_FALSE(all_changes.Includes(depends_on_gc)); diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index dda29a1b4b..16c23c8df5 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -16,6 +16,8 @@ #include "ssa_builder.h" +#include "base/arena_bit_vector.h" +#include "base/bit_vector-inl.h" #include "data_type-inl.h" #include "dex/bytecode_utils.h" #include "mirror/class-inl.h" @@ -415,85 +417,36 @@ bool SsaBuilder::FixAmbiguousArrayOps() { return true; } -static bool HasAliasInEnvironments(HInstruction* instruction) { - HEnvironment* last_user = nullptr; +bool SsaBuilder::HasAliasInEnvironments(HInstruction* instruction) { + ScopedArenaHashSet<size_t> seen_users( + local_allocator_->Adapter(kArenaAllocGraphBuilder)); for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) { DCHECK(use.GetUser() != nullptr); - // Note: The first comparison (== null) always fails. - if (use.GetUser() == last_user) { + size_t id = use.GetUser()->GetHolder()->GetId(); + if (seen_users.find(id) != seen_users.end()) { return true; } - last_user = use.GetUser(); - } - - if (kIsDebugBuild) { - // Do a quadratic search to ensure same environment uses are next - // to each other. - const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses(); - for (auto current = env_uses.begin(), end = env_uses.end(); current != end; ++current) { - auto next = current; - for (++next; next != end; ++next) { - DCHECK(next->GetUser() != current->GetUser()); - } - } + seen_users.insert(id); } return false; } -void SsaBuilder::ReplaceUninitializedStringPhis() { - ScopedArenaHashSet<HInstruction*> seen_instructions( - local_allocator_->Adapter(kArenaAllocGraphBuilder)); - ScopedArenaVector<HInstruction*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder)); - - // Iterate over all inputs and uses of the phi, recursively, until all related instructions - // have been visited. - for (const auto& pair : uninitialized_string_phis_) { - HPhi* string_phi = pair.first; - HInvoke* invoke = pair.second; - worklist.push_back(string_phi); - HNewInstance* found_instance = nullptr; - do { - HInstruction* current = worklist.back(); - worklist.pop_back(); - if (seen_instructions.find(current) != seen_instructions.end()) { - continue; - } - seen_instructions.insert(current); - if (current->IsNewInstance()) { - // If it is the first time we see the allocation, replace its uses. We don't register - // it through `RemoveRedundantUninitializedStrings`, as that method makes assumption about - // aliasing and environment uses that don't hold when the string escapes to phis. - // Note that this also means we will keep the (useless) allocation. - if (found_instance == nullptr) { - found_instance = current->AsNewInstance(); - } else { - DCHECK(found_instance == current); - } - } else if (current->IsPhi()) { - // Push all inputs to the worklist. Those should be Phis or NewInstance. - for (HInstruction* input : current->GetInputs()) { - DCHECK(input->IsPhi() || input->IsNewInstance()) << input->DebugName(); - worklist.push_back(input); - } - } else { - // The verifier prevents any other DEX uses of the uninitialized string. - DCHECK(current->IsEqual() || current->IsNotEqual()); - continue; - } - current->ReplaceUsesDominatedBy(invoke, invoke); - current->ReplaceEnvUsesDominatedBy(invoke, invoke); - // Push all users to the worklist. Now that we have replaced - // the uses dominated by the invokes, the remaining users should only - // be Phi, or Equal/NotEqual. - for (const HUseListNode<HInstruction*>& use : current->GetUses()) { - HInstruction* user = use.GetUser(); - DCHECK(user->IsPhi() || user->IsEqual() || user->IsNotEqual()) << user->DebugName(); - worklist.push_back(user); - } - } while (!worklist.empty()); - seen_instructions.clear(); - DCHECK(found_instance != nullptr); +bool SsaBuilder::ReplaceUninitializedStringPhis() { + for (HInvoke* invoke : uninitialized_string_phis_) { + HInstruction* str = invoke->InputAt(invoke->InputCount() - 1); + if (str->IsPhi()) { + // If after redundant phi and dead phi elimination, it's still a phi that feeds + // the invoke, then we must be compiling a method with irreducible loops. Just bail. + DCHECK(graph_->HasIrreducibleLoops()); + return false; + } + DCHECK(str->IsNewInstance()); + AddUninitializedString(str->AsNewInstance()); + str->ReplaceUsesDominatedBy(invoke, invoke); + str->ReplaceEnvUsesDominatedBy(invoke, invoke); + invoke->RemoveInputAt(invoke->InputCount() - 1); } + return true; } void SsaBuilder::RemoveRedundantUninitializedStrings() { @@ -508,8 +461,9 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() { DCHECK(new_instance->IsStringAlloc()); // Replace NewInstance of String with NullConstant if not used prior to - // calling StringFactory. In case of deoptimization, the interpreter is - // expected to skip null check on the `this` argument of the StringFactory call. + // calling StringFactory. We check for alias environments in case of deoptimization. + // The interpreter is expected to skip null check on the `this` argument of the + // StringFactory call. if (!new_instance->HasNonEnvironmentUses() && !HasAliasInEnvironments(new_instance)) { new_instance->ReplaceWith(graph_->GetNullConstant()); new_instance->GetBlock()->RemoveInstruction(new_instance); @@ -544,11 +498,6 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() { GraphAnalysisResult SsaBuilder::BuildSsa() { DCHECK(!graph_->IsInSsaForm()); - // Replace Phis that feed in a String.<init>, as well as their aliases, with - // the actual String allocation invocation. We do this first, as the phis stored in - // the data structure might get removed from the graph in later stages during `BuildSsa`. - ReplaceUninitializedStringPhis(); - // Propagate types of phis. At this point, phis are typed void in the general // case, or float/double/reference if we created an equivalent phi. So we need // to propagate the types across phis to give them a correct type. If a type @@ -607,6 +556,14 @@ GraphAnalysisResult SsaBuilder::BuildSsa() { // input types. dead_phi_elimimation.EliminateDeadPhis(); + // Replace Phis that feed in a String.<init> during instruction building. We + // run this after redundant and dead phi elimination to make sure the phi will have + // been replaced by the actual allocation. Only with an irreducible loop + // a phi can still be the input, in which case we bail. + if (!ReplaceUninitializedStringPhis()) { + return kAnalysisFailIrreducibleLoopAndStringInit; + } + // HInstructionBuidler replaced uses of NewInstances of String with the // results of their corresponding StringFactory calls. Unless the String // objects are used before they are initialized, they can be replaced with diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h index 765544508e..bb892c9304 100644 --- a/compiler/optimizing/ssa_builder.h +++ b/compiler/optimizing/ssa_builder.h @@ -97,8 +97,8 @@ class SsaBuilder : public ValueObject { } } - void AddUninitializedStringPhi(HPhi* phi, HInvoke* invoke) { - uninitialized_string_phis_.push_back(std::make_pair(phi, invoke)); + void AddUninitializedStringPhi(HInvoke* invoke) { + uninitialized_string_phis_.push_back(invoke); } private: @@ -123,7 +123,8 @@ class SsaBuilder : public ValueObject { HArrayGet* GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget); void RemoveRedundantUninitializedStrings(); - void ReplaceUninitializedStringPhis(); + bool ReplaceUninitializedStringPhis(); + bool HasAliasInEnvironments(HInstruction* instruction); HGraph* const graph_; Handle<mirror::ClassLoader> class_loader_; @@ -137,7 +138,7 @@ class SsaBuilder : public ValueObject { ScopedArenaVector<HArrayGet*> ambiguous_agets_; ScopedArenaVector<HArraySet*> ambiguous_asets_; ScopedArenaVector<HNewInstance*> uninitialized_strings_; - ScopedArenaVector<std::pair<HPhi*, HInvoke*>> uninitialized_string_phis_; + ScopedArenaVector<HInvoke*> uninitialized_string_phis_; DISALLOW_COPY_AND_ASSIGN(SsaBuilder); }; diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 2f782f39fc..62a70d6b12 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -103,9 +103,9 @@ void SsaLivenessAnalysis::ComputeLiveness() { ComputeLiveInAndLiveOutSets(); } -static void RecursivelyProcessInputs(HInstruction* current, - HInstruction* actual_user, - BitVector* live_in) { +void SsaLivenessAnalysis::RecursivelyProcessInputs(HInstruction* current, + HInstruction* actual_user, + BitVector* live_in) { HInputsRef inputs = current->GetInputs(); for (size_t i = 0; i < inputs.size(); ++i) { HInstruction* input = inputs[i]; @@ -131,11 +131,40 @@ static void RecursivelyProcessInputs(HInstruction* current, // Check that the inlined input is not a phi. Recursing on loop phis could // lead to an infinite loop. DCHECK(!input->IsPhi()); + DCHECK(!input->HasEnvironment()); RecursivelyProcessInputs(input, actual_user, live_in); } } } +void SsaLivenessAnalysis::ProcessEnvironment(HInstruction* current, + HInstruction* actual_user, + BitVector* live_in) { + for (HEnvironment* environment = current->GetEnvironment(); + environment != nullptr; + environment = environment->GetParent()) { + // Handle environment uses. See statements (b) and (c) of the + // SsaLivenessAnalysis. + for (size_t i = 0, e = environment->Size(); i < e; ++i) { + HInstruction* instruction = environment->GetInstructionAt(i); + if (instruction == nullptr) { + continue; + } + bool should_be_live = ShouldBeLiveForEnvironment(current, instruction); + // If this environment use does not keep the instruction live, it does not + // affect the live range of that instruction. + if (should_be_live) { + CHECK(instruction->HasSsaIndex()) << instruction->DebugName(); + live_in->SetBit(instruction->GetSsaIndex()); + instruction->GetLiveInterval()->AddUse(current, + environment, + i, + actual_user); + } + } + } +} + void SsaLivenessAnalysis::ComputeLiveRanges() { // Do a post order visit, adding inputs of instructions live in the block where // that instruction is defined, and killing instructions that are being visited. @@ -186,32 +215,6 @@ void SsaLivenessAnalysis::ComputeLiveRanges() { current->GetLiveInterval()->SetFrom(current->GetLifetimePosition()); } - // Process the environment first, because we know their uses come after - // or at the same liveness position of inputs. - for (HEnvironment* environment = current->GetEnvironment(); - environment != nullptr; - environment = environment->GetParent()) { - // Handle environment uses. See statements (b) and (c) of the - // SsaLivenessAnalysis. - for (size_t i = 0, e = environment->Size(); i < e; ++i) { - HInstruction* instruction = environment->GetInstructionAt(i); - if (instruction == nullptr) { - continue; - } - bool should_be_live = ShouldBeLiveForEnvironment(current, instruction); - // If this environment use does not keep the instruction live, it does not - // affect the live range of that instruction. - if (should_be_live) { - CHECK(instruction->HasSsaIndex()) << instruction->DebugName(); - live_in->SetBit(instruction->GetSsaIndex()); - instruction->GetLiveInterval()->AddUse(current, - environment, - i, - /* actual_user */ nullptr); - } - } - } - // Process inputs of instructions. if (current->IsEmittedAtUseSite()) { if (kIsDebugBuild) { @@ -224,6 +227,16 @@ void SsaLivenessAnalysis::ComputeLiveRanges() { DCHECK(!current->HasEnvironmentUses()); } } else { + // Process the environment first, because we know their uses come after + // or at the same liveness position of inputs. + ProcessEnvironment(current, current, live_in); + + // Special case implicit null checks. We want their environment uses to be + // emitted at the instruction doing the actual null check. + HNullCheck* check = current->GetImplicitNullCheck(); + if (check != nullptr) { + ProcessEnvironment(check, current, live_in); + } RecursivelyProcessInputs(current, current, live_in); } } diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index 83ca5bd5fa..92d0b08301 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -60,7 +60,7 @@ class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> { * A live range contains the start and end of a range where an instruction or a temporary * is live. */ -class LiveRange FINAL : public ArenaObject<kArenaAllocSsaLiveness> { +class LiveRange final : public ArenaObject<kArenaAllocSsaLiveness> { public: LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) { DCHECK_LT(start, end); @@ -230,12 +230,25 @@ class SafepointPosition : public ArenaObject<kArenaAllocSsaLiveness> { : instruction_(instruction), next_(nullptr) {} + static size_t ComputePosition(HInstruction* instruction) { + // We special case instructions emitted at use site, as their + // safepoint position needs to be at their use. + if (instruction->IsEmittedAtUseSite()) { + // Currently only applies to implicit null checks, which are emitted + // at the next instruction. + DCHECK(instruction->IsNullCheck()) << instruction->DebugName(); + return instruction->GetLifetimePosition() + 2; + } else { + return instruction->GetLifetimePosition(); + } + } + void SetNext(SafepointPosition* next) { next_ = next; } size_t GetPosition() const { - return instruction_->GetLifetimePosition(); + return ComputePosition(instruction_); } SafepointPosition* GetNext() const { @@ -922,7 +935,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { if (first_safepoint_ == nullptr) { first_safepoint_ = last_safepoint_ = safepoint; } else { - DCHECK_LT(last_safepoint_->GetPosition(), safepoint->GetPosition()); + DCHECK_LE(last_safepoint_->GetPosition(), safepoint->GetPosition()); last_safepoint_->SetNext(safepoint); last_safepoint_ = safepoint; } @@ -1252,6 +1265,13 @@ class SsaLivenessAnalysis : public ValueObject { // Update the live_out set of the block and returns whether it has changed. bool UpdateLiveOut(const HBasicBlock& block); + static void ProcessEnvironment(HInstruction* instruction, + HInstruction* actual_user, + BitVector* live_in); + static void RecursivelyProcessInputs(HInstruction* instruction, + HInstruction* actual_user, + BitVector* live_in); + // Returns whether `instruction` in an HEnvironment held by `env_holder` // should be kept live by the HEnvironment. static bool ShouldBeLiveForEnvironment(HInstruction* env_holder, HInstruction* instruction) { diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc index a683c698d9..4b525531da 100644 --- a/compiler/optimizing/ssa_liveness_analysis_test.cc +++ b/compiler/optimizing/ssa_liveness_analysis_test.cc @@ -29,7 +29,7 @@ namespace art { class SsaLivenessAnalysisTest : public OptimizingUnitTest { protected: - void SetUp() OVERRIDE { + void SetUp() override { OptimizingUnitTest::SetUp(); graph_ = CreateGraph(); codegen_ = CodeGenerator::Create(graph_, *compiler_options_); diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h index ee859e834c..c5cc752ffc 100644 --- a/compiler/optimizing/ssa_phi_elimination.h +++ b/compiler/optimizing/ssa_phi_elimination.h @@ -31,7 +31,7 @@ class SsaDeadPhiElimination : public HOptimization { explicit SsaDeadPhiElimination(HGraph* graph) : HOptimization(graph, kSsaDeadPhiEliminationPassName) {} - bool Run() OVERRIDE; + bool Run() override; void MarkDeadPhis(); void EliminateDeadPhis(); @@ -53,7 +53,7 @@ class SsaRedundantPhiElimination : public HOptimization { explicit SsaRedundantPhiElimination(HGraph* graph) : HOptimization(graph, kSsaRedundantPhiEliminationPassName) {} - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination"; diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc index 85ed06eb9b..e679893af2 100644 --- a/compiler/optimizing/ssa_test.cc +++ b/compiler/optimizing/ssa_test.cc @@ -38,15 +38,15 @@ class SsaPrettyPrinter : public HPrettyPrinter { public: explicit SsaPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {} - void PrintInt(int value) OVERRIDE { + void PrintInt(int value) override { str_ += android::base::StringPrintf("%d", value); } - void PrintString(const char* value) OVERRIDE { + void PrintString(const char* value) override { str_ += value; } - void PrintNewLine() OVERRIDE { + void PrintNewLine() override { str_ += '\n'; } @@ -54,7 +54,7 @@ class SsaPrettyPrinter : public HPrettyPrinter { std::string str() const { return str_; } - void VisitIntConstant(HIntConstant* constant) OVERRIDE { + void VisitIntConstant(HIntConstant* constant) override { PrintPreInstruction(constant); str_ += constant->DebugName(); str_ += " "; diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc index f0069c0e09..b1abcf6747 100644 --- a/compiler/optimizing/x86_memory_gen.cc +++ b/compiler/optimizing/x86_memory_gen.cc @@ -31,7 +31,7 @@ class MemoryOperandVisitor : public HGraphVisitor { do_implicit_null_checks_(do_implicit_null_checks) {} private: - void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE { + void VisitBoundsCheck(HBoundsCheck* check) override { // Replace the length by the array itself, so that we can do compares to memory. HArrayLength* array_len = check->InputAt(1)->AsArrayLength(); diff --git a/compiler/optimizing/x86_memory_gen.h b/compiler/optimizing/x86_memory_gen.h index b254000f28..3f4178d58a 100644 --- a/compiler/optimizing/x86_memory_gen.h +++ b/compiler/optimizing/x86_memory_gen.h @@ -31,7 +31,7 @@ class X86MemoryOperandGeneration : public HOptimization { CodeGenerator* codegen, OptimizingCompilerStats* stats); - bool Run() OVERRIDE; + bool Run() override; static constexpr const char* kX86MemoryOperandGenerationPassName = "x86_memory_operand_generation"; |