diff options
56 files changed, 190 insertions, 176 deletions
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc index d893cc88c4..dfefa524bf 100644 --- a/compiler/optimizing/bounds_check_elimination.cc +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -1938,9 +1938,9 @@ class BCEVisitor : public HGraphVisitor { DISALLOW_COPY_AND_ASSIGN(BCEVisitor); }; -void BoundsCheckElimination::Run() { +bool BoundsCheckElimination::Run() { if (!graph_->HasBoundsChecks()) { - return; + return false; } // Reverse post order guarantees a node's dominators are visited first. @@ -1968,6 +1968,8 @@ void BoundsCheckElimination::Run() { // Perform cleanup. visitor.Finish(); + + return true; } } // namespace art diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h index 79c67a8c7a..92ab7984c8 100644 --- a/compiler/optimizing/bounds_check_elimination.h +++ b/compiler/optimizing/bounds_check_elimination.h @@ -34,7 +34,7 @@ class BoundsCheckElimination : public HOptimization { side_effects_(side_effects), induction_analysis_(induction_analysis) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kBoundsCheckEliminationPassName = "BCE"; diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc index 3addaeecd9..bdc395b52d 100644 --- a/compiler/optimizing/cha_guard_optimization.cc +++ b/compiler/optimizing/cha_guard_optimization.cc @@ -241,14 +241,15 @@ void CHAGuardVisitor::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { GetGraph()->IncrementNumberOfCHAGuards(); } -void CHAGuardOptimization::Run() { +bool CHAGuardOptimization::Run() { if (graph_->GetNumberOfCHAGuards() == 0) { - return; + return false; } CHAGuardVisitor visitor(graph_); for (HBasicBlock* block : graph_->GetReversePostOrder()) { visitor.VisitBasicBlock(block); } + return true; } } // namespace art diff --git a/compiler/optimizing/cha_guard_optimization.h b/compiler/optimizing/cha_guard_optimization.h index f14e07bd6c..d2c5a344b7 100644 --- a/compiler/optimizing/cha_guard_optimization.h +++ b/compiler/optimizing/cha_guard_optimization.h @@ -30,7 +30,7 @@ class CHAGuardOptimization : public HOptimization { const char* name = kCHAGuardOptimizationPassName) : HOptimization(graph, name) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kCHAGuardOptimizationPassName = "cha_guard_optimization"; diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc index 2e31d35584..d6c97552dc 100644 --- a/compiler/optimizing/code_sinking.cc +++ b/compiler/optimizing/code_sinking.cc @@ -25,11 +25,11 @@ namespace art { -void CodeSinking::Run() { +bool CodeSinking::Run() { HBasicBlock* exit = graph_->GetExitBlock(); if (exit == nullptr) { // Infinite loop, just bail. - return; + return false; } // TODO(ngeoffray): we do not profile branches yet, so use throw instructions // as an indicator of an uncommon branch. @@ -40,6 +40,7 @@ void CodeSinking::Run() { SinkCodeToUncommonBranch(exit_predecessor); } } + return true; } static bool IsInterestingInstruction(HInstruction* instruction) { diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h index 836d9d4f67..5db0b6dcc5 100644 --- a/compiler/optimizing/code_sinking.h +++ b/compiler/optimizing/code_sinking.h @@ -33,7 +33,7 @@ class CodeSinking : public HOptimization { const char* name = kCodeSinkingPassName) : HOptimization(graph, name, stats) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kCodeSinkingPassName = "code_sinking"; diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc index 6f11e628ee..bb78c2357e 100644 --- a/compiler/optimizing/constant_folding.cc +++ b/compiler/optimizing/constant_folding.cc @@ -68,13 +68,14 @@ class InstructionWithAbsorbingInputSimplifier : public HGraphVisitor { }; -void HConstantFolding::Run() { +bool HConstantFolding::Run() { HConstantFoldingVisitor visitor(graph_); // Process basic blocks in reverse post-order in the dominator tree, // so that an instruction turned into a constant, used as input of // another instruction, may possibly be used to turn that second // instruction into a constant as well. visitor.VisitReversePostOrder(); + return true; } diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h index 05c6df4a93..f4dbc805c4 100644 --- a/compiler/optimizing/constant_folding.h +++ b/compiler/optimizing/constant_folding.h @@ -41,7 +41,7 @@ class HConstantFolding : public HOptimization { public: HConstantFolding(HGraph* graph, const char* name) : HOptimization(graph, name) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kConstantFoldingPassName = "constant_folding"; diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc index 4a66cd2265..1a7f9266e9 100644 --- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc +++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc @@ -250,13 +250,14 @@ class CFREVisitor : public HGraphVisitor { DISALLOW_COPY_AND_ASSIGN(CFREVisitor); }; -void ConstructorFenceRedundancyElimination::Run() { +bool ConstructorFenceRedundancyElimination::Run() { CFREVisitor cfre_visitor(graph_, stats_); // Arbitrarily visit in reverse-post order. // The exact block visit order does not matter, as the algorithm // only operates on a single block at a time. cfre_visitor.VisitReversePostOrder(); + return true; } } // namespace art diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.h b/compiler/optimizing/constructor_fence_redundancy_elimination.h index f4b06d5544..367d9f21a0 100644 --- a/compiler/optimizing/constructor_fence_redundancy_elimination.h +++ b/compiler/optimizing/constructor_fence_redundancy_elimination.h @@ -52,7 +52,7 @@ class ConstructorFenceRedundancyElimination : public HOptimization { const char* name = kCFREPassName) : HOptimization(graph, name, stats) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kCFREPassName = "constructor_fence_redundancy_elimination"; diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index 9fa0f72e80..1dc10948cc 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -508,7 +508,7 @@ void HDeadCodeElimination::RemoveDeadInstructions() { } } -void HDeadCodeElimination::Run() { +bool HDeadCodeElimination::Run() { // Do not eliminate dead blocks if the graph has irreducible loops. We could // support it, but that would require changes in our loop representation to handle // multiple entry points. We decided it was not worth the complexity. @@ -526,6 +526,7 @@ void HDeadCodeElimination::Run() { } SsaRedundantPhiElimination(graph_).Run(); RemoveDeadInstructions(); + return true; } } // namespace art diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h index 92a7f562e1..90caa53764 100644 --- a/compiler/optimizing/dead_code_elimination.h +++ b/compiler/optimizing/dead_code_elimination.h @@ -32,7 +32,8 @@ class HDeadCodeElimination : public HOptimization { HDeadCodeElimination(HGraph* graph, OptimizingCompilerStats* stats, const char* name) : HOptimization(graph, name, stats) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; + static constexpr const char* kDeadCodeEliminationPassName = "dead_code_elimination"; private: diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc index f05159b735..4863718518 100644 --- a/compiler/optimizing/gvn.cc +++ b/compiler/optimizing/gvn.cc @@ -352,7 +352,7 @@ class GlobalValueNumberer : public ValueObject { visited_blocks_.ClearAllBits(); } - void Run(); + bool Run(); private: // Per-block GVN. Will also update the ValueSet of the dominated and @@ -397,7 +397,7 @@ class GlobalValueNumberer : public ValueObject { DISALLOW_COPY_AND_ASSIGN(GlobalValueNumberer); }; -void GlobalValueNumberer::Run() { +bool GlobalValueNumberer::Run() { DCHECK(side_effects_.HasRun()); sets_[graph_->GetEntryBlock()->GetBlockId()] = new (&allocator_) ValueSet(&allocator_); @@ -406,6 +406,7 @@ void GlobalValueNumberer::Run() { for (HBasicBlock* block : graph_->GetReversePostOrder()) { VisitBasicBlock(block); } + return true; } void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) { @@ -557,9 +558,9 @@ HBasicBlock* GlobalValueNumberer::FindVisitedBlockWithRecyclableSet( return secondary_match; } -void GVNOptimization::Run() { +bool GVNOptimization::Run() { GlobalValueNumberer gvn(graph_, side_effects_); - gvn.Run(); + return gvn.Run(); } } // namespace art diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h index 4fdba26ebd..75cfff2140 100644 --- a/compiler/optimizing/gvn.h +++ b/compiler/optimizing/gvn.h @@ -31,7 +31,7 @@ class GVNOptimization : public HOptimization { const char* pass_name = kGlobalValueNumberingPassName) : HOptimization(graph, pass_name), side_effects_(side_effects) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kGlobalValueNumberingPassName = "GVN"; diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc index d270c6a28e..a4d638f4c6 100644 --- a/compiler/optimizing/induction_var_analysis.cc +++ b/compiler/optimizing/induction_var_analysis.cc @@ -243,7 +243,7 @@ HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph, const char* name) graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)) { } -void HInductionVarAnalysis::Run() { +bool HInductionVarAnalysis::Run() { // Detects sequence variables (generalized induction variables) during an outer to inner // traversal of all loops using Gerlek's algorithm. The order is important to enable // range analysis on outer loop while visiting inner loops. @@ -253,6 +253,7 @@ void HInductionVarAnalysis::Run() { VisitLoop(graph_block->GetLoopInformation()); } } + return !induction_.empty(); } void HInductionVarAnalysis::VisitLoop(HLoopInformation* loop) { diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h index acad77d35f..89fed2ec64 100644 --- a/compiler/optimizing/induction_var_analysis.h +++ b/compiler/optimizing/induction_var_analysis.h @@ -37,7 +37,7 @@ class HInductionVarAnalysis : public HOptimization { public: explicit HInductionVarAnalysis(HGraph* graph, const char* name = kInductionPassName); - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kInductionPassName = "induction_var_analysis"; diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 8b10a78212..3800c96937 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -124,13 +124,18 @@ void HInliner::UpdateInliningBudget() { } } -void HInliner::Run() { - if (graph_->IsDebuggable()) { +bool HInliner::Run() { + if (compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits() == 0) { + // Inlining effectively disabled. + return false; + } else if (graph_->IsDebuggable()) { // For simplicity, we currently never inline when the graph is debuggable. This avoids // doing some logic in the runtime to discover if a method could have been inlined. - return; + return false; } + bool didInline = false; + // Initialize the number of instructions for the method being compiled. Recursive calls // to HInliner::Run have already updated the instruction count. if (outermost_graph_ == graph_) { @@ -171,7 +176,9 @@ void HInliner::Run() { call->GetDexMethodIndex(), /* with_signature */ false); // Tests prevent inlining by having $noinline$ in their method names. if (callee_name.find("$noinline$") == std::string::npos) { - if (!TryInline(call) && honor_inline_directives) { + if (TryInline(call)) { + didInline = true; + } else { bool should_have_inlined = (callee_name.find("$inline$") != std::string::npos); CHECK(!should_have_inlined) << "Could not inline " << callee_name; } @@ -179,12 +186,16 @@ void HInliner::Run() { } else { DCHECK(!honor_inline_directives); // Normal case: try to inline. - TryInline(call); + if (TryInline(call)) { + didInline = true; + } } } instruction = next; } } + + return didInline; } static bool IsMethodOrDeclaringClassFinal(ArtMethod* method) diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h index 02465d37ba..fb1c9af896 100644 --- a/compiler/optimizing/inliner.h +++ b/compiler/optimizing/inliner.h @@ -60,7 +60,7 @@ class HInliner : public HOptimization { handles_(handles), inline_stats_(nullptr) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kInlinerPassName = "inliner"; diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index d3cf9568c2..0fe16725f3 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -42,7 +42,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { compiler_driver_(compiler_driver), stats_(stats) {} - void Run(); + bool Run(); private: void RecordSimplification() { @@ -136,17 +136,18 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { static constexpr int kMaxSamePositionSimplifications = 50; }; -void InstructionSimplifier::Run() { +bool InstructionSimplifier::Run() { if (kTestInstructionClonerExhaustively) { CloneAndReplaceInstructionVisitor visitor(graph_); visitor.VisitReversePostOrder(); } InstructionSimplifierVisitor visitor(graph_, codegen_, compiler_driver_, stats_); - visitor.Run(); + return visitor.Run(); } -void InstructionSimplifierVisitor::Run() { +bool InstructionSimplifierVisitor::Run() { + bool didSimplify = false; // Iterate in reverse post order to open up more simplifications to users // of instructions that got simplified. for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) { @@ -156,10 +157,14 @@ void InstructionSimplifierVisitor::Run() { do { simplification_occurred_ = false; VisitBasicBlock(block); + if (simplification_occurred_) { + didSimplify = true; + } } while (simplification_occurred_ && (simplifications_at_current_position_ < kMaxSamePositionSimplifications)); simplifications_at_current_position_ = 0; } + return didSimplify; } namespace { diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h index 5e2045580b..f409e873de 100644 --- a/compiler/optimizing/instruction_simplifier.h +++ b/compiler/optimizing/instruction_simplifier.h @@ -49,7 +49,7 @@ class InstructionSimplifier : public HOptimization { static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier"; - void Run() OVERRIDE; + bool Run() OVERRIDE; private: CodeGenerator* codegen_; diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc index 92081e30b1..37fcdb9d5c 100644 --- a/compiler/optimizing/instruction_simplifier_arm.cc +++ b/compiler/optimizing/instruction_simplifier_arm.cc @@ -283,9 +283,10 @@ void InstructionSimplifierArmVisitor::VisitUShr(HUShr* instruction) { } } -void InstructionSimplifierArm::Run() { +bool InstructionSimplifierArm::Run() { InstructionSimplifierArmVisitor visitor(graph_, stats_); visitor.VisitReversePostOrder(); + return true; } } // namespace arm diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h index 2f6572931f..f1a16efc61 100644 --- a/compiler/optimizing/instruction_simplifier_arm.h +++ b/compiler/optimizing/instruction_simplifier_arm.h @@ -30,7 +30,7 @@ class InstructionSimplifierArm : public HOptimization { static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm"; - void Run() OVERRIDE; + bool Run() OVERRIDE; }; } // namespace arm diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index 1c44e5ac49..e0a627994d 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -278,9 +278,10 @@ void InstructionSimplifierArm64Visitor::VisitVecStore(HVecStore* instruction) { } } -void InstructionSimplifierArm64::Run() { +bool InstructionSimplifierArm64::Run() { InstructionSimplifierArm64Visitor visitor(graph_, stats_); visitor.VisitReversePostOrder(); + return true; } } // namespace arm64 diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h index d180a8dc46..8659c1f5f4 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.h +++ b/compiler/optimizing/instruction_simplifier_arm64.h @@ -30,7 +30,7 @@ class InstructionSimplifierArm64 : public HOptimization { static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64"; - void Run() OVERRIDE; + bool Run() OVERRIDE; }; } // namespace arm64 diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc index fa97401a0c..3bdf90f652 100644 --- a/compiler/optimizing/instruction_simplifier_mips.cc +++ b/compiler/optimizing/instruction_simplifier_mips.cc @@ -131,9 +131,10 @@ void InstructionSimplifierMipsVisitor::VisitArraySet(HArraySet* instruction) { } } -void InstructionSimplifierMips::Run() { +bool InstructionSimplifierMips::Run() { InstructionSimplifierMipsVisitor visitor(graph_, codegen_, stats_); visitor.VisitReversePostOrder(); + return true; } } // namespace mips diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h index 6cb8affe85..94ef73d425 100644 --- a/compiler/optimizing/instruction_simplifier_mips.h +++ b/compiler/optimizing/instruction_simplifier_mips.h @@ -35,7 +35,7 @@ class InstructionSimplifierMips : public HOptimization { static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips"; - void Run() OVERRIDE; + bool Run() OVERRIDE; private: CodeGeneratorMIPS* codegen_; diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index f8dc316e45..dfe6d791c6 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -178,7 +178,8 @@ bool IntrinsicsRecognizer::Recognize(HInvoke* invoke, return true; } -void IntrinsicsRecognizer::Run() { +bool IntrinsicsRecognizer::Run() { + bool didRecognize = false; ScopedObjectAccess soa(Thread::Current()); for (HBasicBlock* block : graph_->GetReversePostOrder()) { for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done(); @@ -187,6 +188,7 @@ void IntrinsicsRecognizer::Run() { if (inst->IsInvoke()) { bool wrong_invoke_type = false; if (Recognize(inst->AsInvoke(), /* art_method */ nullptr, &wrong_invoke_type)) { + didRecognize = true; MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized); } else if (wrong_invoke_type) { LOG(WARNING) @@ -197,6 +199,7 @@ void IntrinsicsRecognizer::Run() { } } } + return didRecognize; } std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) { diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 1035cbc2c4..30cffac015 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -42,7 +42,7 @@ class IntrinsicsRecognizer : public HOptimization { const char* name = kIntrinsicsRecognizerPassName) : HOptimization(graph, name, stats) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; // Static helper that recognizes intrinsic call. Returns true on success. // If it fails due to invoke type mismatch, wrong_invoke_type is set. diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc index d3a0376e9c..0edb23b857 100644 --- a/compiler/optimizing/licm.cc +++ b/compiler/optimizing/licm.cc @@ -78,7 +78,8 @@ static void UpdateLoopPhisIn(HEnvironment* environment, HLoopInformation* info) } } -void LICM::Run() { +bool LICM::Run() { + bool didLICM = false; DCHECK(side_effects_.HasRun()); // Only used during debug. @@ -157,6 +158,7 @@ void LICM::Run() { } instruction->MoveBefore(pre_header->GetLastInstruction()); MaybeRecordStat(stats_, MethodCompilationStat::kLoopInvariantMoved); + didLICM = true; } if (!can_move && (instruction->CanThrow() || instruction->DoesAnyWrite())) { @@ -167,6 +169,7 @@ void LICM::Run() { } } } + return didLICM; } } // namespace art diff --git a/compiler/optimizing/licm.h b/compiler/optimizing/licm.h index ee567aeb20..f72d195ab2 100644 --- a/compiler/optimizing/licm.h +++ b/compiler/optimizing/licm.h @@ -33,7 +33,7 @@ class LICM : public HOptimization { : HOptimization(graph, name, stats), side_effects_(side_effects) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kLoopInvariantCodeMotionPassName = "licm"; diff --git a/compiler/optimizing/load_store_analysis.cc b/compiler/optimizing/load_store_analysis.cc index 8b1812a6de..7d7bb94933 100644 --- a/compiler/optimizing/load_store_analysis.cc +++ b/compiler/optimizing/load_store_analysis.cc @@ -152,7 +152,7 @@ bool HeapLocationCollector::CanArrayElementsAlias(const HInstruction* idx1, return true; } -void LoadStoreAnalysis::Run() { +bool LoadStoreAnalysis::Run() { for (HBasicBlock* block : graph_->GetReversePostOrder()) { heap_location_collector_.VisitBasicBlock(block); } @@ -160,22 +160,23 @@ void LoadStoreAnalysis::Run() { if (heap_location_collector_.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) { // Bail out if there are too many heap locations to deal with. heap_location_collector_.CleanUp(); - return; + return false; } if (!heap_location_collector_.HasHeapStores()) { // Without heap stores, this pass would act mostly as GVN on heap accesses. heap_location_collector_.CleanUp(); - return; + return false; } if (heap_location_collector_.HasVolatile() || heap_location_collector_.HasMonitorOps()) { // Don't do load/store elimination if the method has volatile field accesses or // monitor operations, for now. // TODO: do it right. heap_location_collector_.CleanUp(); - return; + return false; } heap_location_collector_.BuildAliasingMatrix(); + return true; } } // namespace art diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h index 437e6be418..f84846d1b0 100644 --- a/compiler/optimizing/load_store_analysis.h +++ b/compiler/optimizing/load_store_analysis.h @@ -572,7 +572,7 @@ class LoadStoreAnalysis : public HOptimization { return heap_location_collector_; } - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kLoadStoreAnalysisPassName = "load_store_analysis"; diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index 237ecd3c10..d598ff592d 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -948,22 +948,22 @@ class LSEVisitor : public HGraphDelegateVisitor { DISALLOW_COPY_AND_ASSIGN(LSEVisitor); }; -void LoadStoreElimination::Run() { +bool LoadStoreElimination::Run() { if (graph_->IsDebuggable() || graph_->HasTryCatch()) { // Debugger may set heap values or trigger deoptimization of callers. // Try/catch support not implemented yet. // Skip this optimization. - return; + return false; } const HeapLocationCollector& heap_location_collector = lsa_.GetHeapLocationCollector(); if (heap_location_collector.GetNumberOfHeapLocations() == 0) { // No HeapLocation information from LSA, skip this optimization. - return; + return false; } // TODO: analyze VecLoad/VecStore better. if (graph_->HasSIMD()) { - return; + return false; } LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_, stats_); @@ -971,6 +971,7 @@ void LoadStoreElimination::Run() { lse_visitor.VisitBasicBlock(block); } lse_visitor.RemoveInstructions(); + return true; } } // namespace art diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h index 7153541baf..408386bd82 100644 --- a/compiler/optimizing/load_store_elimination.h +++ b/compiler/optimizing/load_store_elimination.h @@ -35,7 +35,7 @@ class LoadStoreElimination : public HOptimization { side_effects_(side_effects), lsa_(lsa) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination"; diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc index 1462404932..7f1b319c12 100644 --- a/compiler/optimizing/loop_optimization.cc +++ b/compiler/optimizing/loop_optimization.cc @@ -608,11 +608,11 @@ HLoopOptimization::HLoopOptimization(HGraph* graph, global_allocator_)) { } -void HLoopOptimization::Run() { +bool HLoopOptimization::Run() { // Skip if there is no loop or the graph has try-catch/irreducible loops. // TODO: make this less of a sledgehammer. if (!graph_->HasLoops() || graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) { - return; + return false; } // Phase-local allocator. @@ -620,7 +620,7 @@ void HLoopOptimization::Run() { loop_allocator_ = &allocator; // Perform loop optimizations. - LocalRun(); + bool didLoopOpt = LocalRun(); if (top_loop_ == nullptr) { graph_->SetHasLoops(false); // no more loops } @@ -628,13 +628,16 @@ void HLoopOptimization::Run() { // Detach. loop_allocator_ = nullptr; last_loop_ = top_loop_ = nullptr; + + return didLoopOpt; } // // Loop setup and traversal. // -void HLoopOptimization::LocalRun() { +bool HLoopOptimization::LocalRun() { + bool didLoopOpt = false; // Build the linear order using the phase-local allocator. This step enables building // a loop hierarchy that properly reflects the outer-inner and previous-next relation. ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder)); @@ -666,7 +669,7 @@ void HLoopOptimization::LocalRun() { vector_map_ = ↦ vector_permanent_map_ = &perm; // Traverse. - TraverseLoopsInnerToOuter(top_loop_); + didLoopOpt = TraverseLoopsInnerToOuter(top_loop_); // Detach. iset_ = nullptr; reductions_ = nullptr; @@ -674,6 +677,7 @@ void HLoopOptimization::LocalRun() { vector_map_ = nullptr; vector_permanent_map_ = nullptr; } + return didLoopOpt; } void HLoopOptimization::AddLoop(HLoopInformation* loop_info) { diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h index f9a31a34d4..11e969875e 100644 --- a/compiler/optimizing/loop_optimization.h +++ b/compiler/optimizing/loop_optimization.h @@ -43,7 +43,7 @@ class HLoopOptimization : public HOptimization { OptimizingCompilerStats* stats, const char* name = kLoopOptimizationPassName); - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kLoopOptimizationPassName = "loop_optimization"; @@ -123,7 +123,7 @@ class HLoopOptimization : public HOptimization { // Loop setup and traversal. // - void LocalRun(); + bool LocalRun(); void AddLoop(HLoopInformation* loop_info); void RemoveLoop(LoopNode* node); diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h index c170f155fa..b00d686e5f 100644 --- a/compiler/optimizing/optimization.h +++ b/compiler/optimizing/optimization.h @@ -47,8 +47,9 @@ class HOptimization : public ArenaObject<kArenaAllocOptimization> { // 'instruction_simplifier$before_codegen'. const char* GetPassName() const { return pass_name_; } - // Perform the analysis itself. - virtual void Run() = 0; + // Perform the pass or analysis. Returns false if no optimizations occurred or no useful + // information was computed (this is best effort, returning true is always ok). + virtual bool Run() = 0; protected: HGraph* const graph_; diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index cadefc3b01..f68bcbe59f 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -294,7 +294,7 @@ class OptimizingCompiler FINAL : public Compiler { REQUIRES_SHARED(Locks::mutator_lock_); private: - void RunOptimizations(HGraph* graph, + bool RunOptimizations(HGraph* graph, CodeGenerator* codegen, const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer, @@ -314,20 +314,22 @@ class OptimizingCompiler FINAL : public Compiler { handles); DCHECK_EQ(length, optimizations.size()); // Run the optimization passes one by one. + bool change = false; for (size_t i = 0; i < length; ++i) { PassScope scope(optimizations[i]->GetPassName(), pass_observer); - optimizations[i]->Run(); + change |= optimizations[i]->Run(); } + return change; } - template <size_t length> void RunOptimizations( + template <size_t length> bool RunOptimizations( HGraph* graph, CodeGenerator* codegen, const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer, VariableSizedHandleScope* handles, const OptimizationDef (&definitions)[length]) const { - RunOptimizations( + return RunOptimizations( graph, codegen, dex_compilation_unit, pass_observer, handles, definitions, length); } @@ -366,13 +368,7 @@ class OptimizingCompiler FINAL : public Compiler { ArtMethod* method, VariableSizedHandleScope* handles) const; - void MaybeRunInliner(HGraph* graph, - CodeGenerator* codegen, - const DexCompilationUnit& dex_compilation_unit, - PassObserver* pass_observer, - VariableSizedHandleScope* handles) const; - - void RunArchOptimizations(HGraph* graph, + bool RunArchOptimizations(HGraph* graph, CodeGenerator* codegen, const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer, @@ -435,28 +431,7 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) { || instruction_set == InstructionSet::kX86_64; } -void OptimizingCompiler::MaybeRunInliner(HGraph* graph, - CodeGenerator* codegen, - const DexCompilationUnit& dex_compilation_unit, - PassObserver* pass_observer, - VariableSizedHandleScope* handles) const { - const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); - bool should_inline = (compiler_options.GetInlineMaxCodeUnits() > 0); - if (!should_inline) { - return; - } - OptimizationDef optimizations[] = { - OptDef(OptimizationPass::kInliner) - }; - RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - optimizations); -} - -void OptimizingCompiler::RunArchOptimizations(HGraph* graph, +bool OptimizingCompiler::RunArchOptimizations(HGraph* graph, CodeGenerator* codegen, const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer, @@ -471,13 +446,12 @@ void OptimizingCompiler::RunArchOptimizations(HGraph* graph, OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"), OptDef(OptimizationPass::kScheduling) }; - RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - arm_optimizations); - break; + return RunOptimizations(graph, + codegen, + dex_compilation_unit, + pass_observer, + handles, + arm_optimizations); } #endif #ifdef ART_ENABLE_CODEGEN_arm64 @@ -488,13 +462,12 @@ void OptimizingCompiler::RunArchOptimizations(HGraph* graph, OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"), OptDef(OptimizationPass::kScheduling) }; - RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - arm64_optimizations); - break; + return RunOptimizations(graph, + codegen, + dex_compilation_unit, + pass_observer, + handles, + arm64_optimizations); } #endif #ifdef ART_ENABLE_CODEGEN_mips @@ -505,13 +478,12 @@ void OptimizingCompiler::RunArchOptimizations(HGraph* graph, OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"), OptDef(OptimizationPass::kPcRelativeFixupsMips) }; - RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - mips_optimizations); - break; + return RunOptimizations(graph, + codegen, + dex_compilation_unit, + pass_observer, + handles, + mips_optimizations); } #endif #ifdef ART_ENABLE_CODEGEN_mips64 @@ -520,13 +492,12 @@ void OptimizingCompiler::RunArchOptimizations(HGraph* graph, OptDef(OptimizationPass::kSideEffectsAnalysis), OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch") }; - RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - mips64_optimizations); - break; + return RunOptimizations(graph, + codegen, + dex_compilation_unit, + pass_observer, + handles, + mips64_optimizations); } #endif #ifdef ART_ENABLE_CODEGEN_x86 @@ -537,13 +508,12 @@ void OptimizingCompiler::RunArchOptimizations(HGraph* graph, OptDef(OptimizationPass::kPcRelativeFixupsX86), OptDef(OptimizationPass::kX86MemoryOperandGeneration) }; - RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - x86_optimizations); - break; + return RunOptimizations(graph, + codegen, + dex_compilation_unit, + pass_observer, + handles, + x86_optimizations); } #endif #ifdef ART_ENABLE_CODEGEN_x86_64 @@ -553,17 +523,16 @@ void OptimizingCompiler::RunArchOptimizations(HGraph* graph, OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"), OptDef(OptimizationPass::kX86MemoryOperandGeneration) }; - RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - x86_64_optimizations); - break; + return RunOptimizations(graph, + codegen, + dex_compilation_unit, + pass_observer, + handles, + x86_64_optimizations); } #endif default: - break; + return false; } } @@ -626,23 +595,13 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, return; } - OptimizationDef optimizations1[] = { + OptimizationDef optimizations[] = { OptDef(OptimizationPass::kIntrinsicsRecognizer), OptDef(OptimizationPass::kSharpening), OptDef(OptimizationPass::kConstantFolding), OptDef(OptimizationPass::kInstructionSimplifier), - OptDef(OptimizationPass::kDeadCodeElimination, "dead_code_elimination$initial") - }; - RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - optimizations1); - - MaybeRunInliner(graph, codegen, dex_compilation_unit, pass_observer, handles); - - OptimizationDef optimizations2[] = { + OptDef(OptimizationPass::kDeadCodeElimination, "dead_code_elimination$initial"), + OptDef(OptimizationPass::kInliner), OptDef(OptimizationPass::kSideEffectsAnalysis, "side_effects$before_gvn"), OptDef(OptimizationPass::kGlobalValueNumbering), OptDef(OptimizationPass::kSelectGenerator), @@ -676,7 +635,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, dex_compilation_unit, pass_observer, handles, - optimizations2); + optimizations); RunArchOptimizations(graph, codegen, dex_compilation_unit, pass_observer, handles); } diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc index 0102254206..f18ecc1458 100644 --- a/compiler/optimizing/pc_relative_fixups_mips.cc +++ b/compiler/optimizing/pc_relative_fixups_mips.cc @@ -128,20 +128,21 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { HMipsComputeBaseMethodAddress* base_; }; -void PcRelativeFixups::Run() { +bool PcRelativeFixups::Run() { CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen_); if (mips_codegen->GetInstructionSetFeatures().IsR6()) { // Do nothing for R6 because it has PC-relative addressing. - return; + return false; } if (graph_->HasIrreducibleLoops()) { // Do not run this optimization, as irreducible loops do not work with an instruction // that can be live-in at the irreducible loop header. - return; + return false; } PCRelativeHandlerVisitor visitor(graph_, codegen_); visitor.VisitInsertionOrder(); visitor.MoveBaseIfNeeded(); + return true; } } // namespace mips diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h index ec2c711f8d..6dd1ee0db2 100644 --- a/compiler/optimizing/pc_relative_fixups_mips.h +++ b/compiler/optimizing/pc_relative_fixups_mips.h @@ -34,7 +34,7 @@ class PcRelativeFixups : public HOptimization { static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips"; - void Run() OVERRIDE; + bool Run() OVERRIDE; private: CodeGenerator* codegen_; diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc index 647336b6b9..9049457da5 100644 --- a/compiler/optimizing/pc_relative_fixups_x86.cc +++ b/compiler/optimizing/pc_relative_fixups_x86.cc @@ -256,10 +256,11 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { HX86ComputeBaseMethodAddress* base_; }; -void PcRelativeFixups::Run() { +bool PcRelativeFixups::Run() { PCRelativeHandlerVisitor visitor(graph_, codegen_); visitor.VisitInsertionOrder(); visitor.MoveBaseIfNeeded(); + return true; } } // namespace x86 diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h index 72fa71ea94..db56b7f053 100644 --- a/compiler/optimizing/pc_relative_fixups_x86.h +++ b/compiler/optimizing/pc_relative_fixups_x86.h @@ -34,7 +34,7 @@ class PcRelativeFixups : public HOptimization { static constexpr const char* kPcRelativeFixupsX86PassName = "pc_relative_fixups_x86"; - void Run() OVERRIDE; + bool Run() OVERRIDE; private: CodeGenerator* codegen_; diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 4030883a57..c47c69af67 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -348,7 +348,7 @@ static void BoundTypeForClassCheck(HInstruction* check) { } } -void ReferenceTypePropagation::Run() { +bool ReferenceTypePropagation::Run() { RTPVisitor visitor(graph_, class_loader_, hint_dex_cache_, &handle_cache_, is_first_run_); // To properly propagate type info we need to visit in the dominator-based order. @@ -360,6 +360,7 @@ void ReferenceTypePropagation::Run() { visitor.ProcessWorklist(); ValidateTypes(); + return true; } void ReferenceTypePropagation::RTPVisitor::VisitBasicBlock(HBasicBlock* block) { diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h index fd4dad2b45..400852f4dc 100644 --- a/compiler/optimizing/reference_type_propagation.h +++ b/compiler/optimizing/reference_type_propagation.h @@ -40,7 +40,7 @@ class ReferenceTypePropagation : public HOptimization { // Visit a single instruction. void Visit(HInstruction* instruction); - void Run() OVERRIDE; + bool Run() OVERRIDE; // Returns true if klass is admissible to the propagation: non-null and resolved. // For an array type, we also check if the component type is admissible. diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc index bca538fb17..e014efaf5c 100644 --- a/compiler/optimizing/scheduler.cc +++ b/compiler/optimizing/scheduler.cc @@ -774,7 +774,7 @@ bool HScheduler::IsSchedulingBarrier(const HInstruction* instr) const { instr->IsSuspendCheck(); } -void HInstructionScheduling::Run(bool only_optimize_loop_blocks, +bool HInstructionScheduling::Run(bool only_optimize_loop_blocks, bool schedule_randomly) { #if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm) // Phase-local allocator that allocates scheduler internal data structures like @@ -814,6 +814,7 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks, default: break; } + return true; } } // namespace art diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h index dfa077f7de..51cd20aea9 100644 --- a/compiler/optimizing/scheduler.h +++ b/compiler/optimizing/scheduler.h @@ -508,10 +508,11 @@ class HInstructionScheduling : public HOptimization { codegen_(cg), instruction_set_(instruction_set) {} - void Run() { - Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false); + bool Run() OVERRIDE { + return Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false); } - void Run(bool only_optimize_loop_blocks, bool schedule_randomly); + + bool Run(bool only_optimize_loop_blocks, bool schedule_randomly); static constexpr const char* kInstructionSchedulingPassName = "scheduler"; diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc index f9acf5aa9a..0d0f7cc748 100644 --- a/compiler/optimizing/select_generator.cc +++ b/compiler/optimizing/select_generator.cc @@ -90,7 +90,8 @@ static HPhi* GetSingleChangedPhi(HBasicBlock* block, size_t index1, size_t index return select_phi; } -void HSelectGenerator::Run() { +bool HSelectGenerator::Run() { + bool didSelect = false; // Select cache with local allocator. ScopedArenaAllocator allocator(graph_->GetArenaStack()); ScopedArenaSafeMap<HInstruction*, HSelect*> cache( @@ -211,7 +212,9 @@ void HSelectGenerator::Run() { // entry block. Any following blocks would have had the join block // as a dominator, and `MergeWith` handles changing that to the // entry block. + didSelect = true; } + return didSelect; } } // namespace art diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h index bda57fd5c8..d24d2264b2 100644 --- a/compiler/optimizing/select_generator.h +++ b/compiler/optimizing/select_generator.h @@ -68,7 +68,7 @@ class HSelectGenerator : public HOptimization { OptimizingCompilerStats* stats, const char* name = kSelectGeneratorPassName); - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kSelectGeneratorPassName = "select_generator"; diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index 70b45763af..6541043046 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -36,7 +36,7 @@ namespace art { -void HSharpening::Run() { +bool HSharpening::Run() { // We don't care about the order of the blocks here. for (HBasicBlock* block : graph_->GetReversePostOrder()) { for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { @@ -51,6 +51,7 @@ void HSharpening::Run() { // because we know the type better when inlining. } } + return true; } static bool IsInBootImage(ArtMethod* method) { diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h index fa3e948eeb..9ccbcaf220 100644 --- a/compiler/optimizing/sharpening.h +++ b/compiler/optimizing/sharpening.h @@ -40,7 +40,7 @@ class HSharpening : public HOptimization { codegen_(codegen), compiler_driver_(compiler_driver) { } - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kSharpeningPassName = "sharpening"; diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc index 6d82e8e06d..ba97b43de9 100644 --- a/compiler/optimizing/side_effects_analysis.cc +++ b/compiler/optimizing/side_effects_analysis.cc @@ -18,7 +18,7 @@ namespace art { -void SideEffectsAnalysis::Run() { +bool SideEffectsAnalysis::Run() { // Inlining might have created more blocks, so we need to increase the size // if needed. block_effects_.resize(graph_->GetBlocks().size()); @@ -69,6 +69,7 @@ void SideEffectsAnalysis::Run() { } } has_run_ = true; + return true; } SideEffects SideEffectsAnalysis::GetLoopEffects(HBasicBlock* block) const { diff --git a/compiler/optimizing/side_effects_analysis.h b/compiler/optimizing/side_effects_analysis.h index c0f81a9c54..56a01e63f1 100644 --- a/compiler/optimizing/side_effects_analysis.h +++ b/compiler/optimizing/side_effects_analysis.h @@ -37,7 +37,7 @@ class SideEffectsAnalysis : public HOptimization { SideEffects GetBlockEffects(HBasicBlock* block) const; // Compute side effects of individual blocks and loops. - void Run(); + bool Run(); bool HasRun() const { return has_run_; } diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index cb27ded17a..5370f43b4f 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -23,9 +23,10 @@ namespace art { -void SsaDeadPhiElimination::Run() { +bool SsaDeadPhiElimination::Run() { MarkDeadPhis(); EliminateDeadPhis(); + return true; } void SsaDeadPhiElimination::MarkDeadPhis() { @@ -122,7 +123,7 @@ void SsaDeadPhiElimination::EliminateDeadPhis() { } } -void SsaRedundantPhiElimination::Run() { +bool SsaRedundantPhiElimination::Run() { // Use local allocator for allocating memory used by this optimization. ScopedArenaAllocator allocator(graph_->GetArenaStack()); @@ -255,6 +256,7 @@ void SsaRedundantPhiElimination::Run() { current->GetBlock()->RemovePhi(current); } } + return true; } } // namespace art diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h index 11d5837eb5..ee859e834c 100644 --- a/compiler/optimizing/ssa_phi_elimination.h +++ b/compiler/optimizing/ssa_phi_elimination.h @@ -31,7 +31,7 @@ class SsaDeadPhiElimination : public HOptimization { explicit SsaDeadPhiElimination(HGraph* graph) : HOptimization(graph, kSsaDeadPhiEliminationPassName) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; void MarkDeadPhis(); void EliminateDeadPhis(); @@ -53,7 +53,7 @@ class SsaRedundantPhiElimination : public HOptimization { explicit SsaRedundantPhiElimination(HGraph* graph) : HOptimization(graph, kSsaRedundantPhiEliminationPassName) {} - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination"; diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc index 0271850f29..f0069c0e09 100644 --- a/compiler/optimizing/x86_memory_gen.cc +++ b/compiler/optimizing/x86_memory_gen.cc @@ -76,9 +76,10 @@ X86MemoryOperandGeneration::X86MemoryOperandGeneration(HGraph* graph, do_implicit_null_checks_(codegen->GetCompilerOptions().GetImplicitNullChecks()) { } -void X86MemoryOperandGeneration::Run() { +bool X86MemoryOperandGeneration::Run() { MemoryOperandVisitor visitor(graph_, do_implicit_null_checks_); visitor.VisitInsertionOrder(); + return true; } } // namespace x86 diff --git a/compiler/optimizing/x86_memory_gen.h b/compiler/optimizing/x86_memory_gen.h index 5f15d9f1e6..b254000f28 100644 --- a/compiler/optimizing/x86_memory_gen.h +++ b/compiler/optimizing/x86_memory_gen.h @@ -31,7 +31,7 @@ class X86MemoryOperandGeneration : public HOptimization { CodeGenerator* codegen, OptimizingCompilerStats* stats); - void Run() OVERRIDE; + bool Run() OVERRIDE; static constexpr const char* kX86MemoryOperandGenerationPassName = "x86_memory_operand_generation"; |