From 0eca098c263e3f93d9996dbd6070a53d09d4a6c6 Mon Sep 17 00:00:00 2001 From: Santiago Aboy Solanes Date: Fri, 8 Apr 2022 18:00:48 +0100 Subject: Enable LoopOptimization for graphs with try catch blocks We can enable the optimization when the graph has try catch blocks, but we do not perform the optimizations if the loops themselves have try catch blocks. Bug: 227283906 Change-Id: I8889d2ed7a5a260d5c2dcbbc5184cb7eaf3a8f9f --- compiler/optimizing/loop_optimization.cc | 107 ++++++++++++++++++++++--------- 1 file changed, 75 insertions(+), 32 deletions(-) (limited to 'compiler/optimizing/loop_optimization.cc') diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc index 77dfe68bf6..2d7c20825c 100644 --- a/compiler/optimizing/loop_optimization.cc +++ b/compiler/optimizing/loop_optimization.cc @@ -491,9 +491,9 @@ HLoopOptimization::HLoopOptimization(HGraph* graph, } bool HLoopOptimization::Run() { - // Skip if there is no loop or the graph has try-catch/irreducible loops. + // Skip if there is no loop or the graph has irreducible loops. // TODO: make this less of a sledgehammer. - if (!graph_->HasLoops() || graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) { + if (!graph_->HasLoops() || graph_->HasIrreducibleLoops()) { return false; } @@ -502,7 +502,7 @@ bool HLoopOptimization::Run() { loop_allocator_ = &allocator; // Perform loop optimizations. - bool didLoopOpt = LocalRun(); + const bool did_loop_opt = LocalRun(); if (top_loop_ == nullptr) { graph_->SetHasLoops(false); // no more loops } @@ -511,7 +511,7 @@ bool HLoopOptimization::Run() { loop_allocator_ = nullptr; last_loop_ = top_loop_ = nullptr; - return didLoopOpt; + return did_loop_opt; } // @@ -519,7 +519,6 @@ bool HLoopOptimization::Run() { // bool HLoopOptimization::LocalRun() { - bool didLoopOpt = false; // Build the linear order using the phase-local allocator. This step enables building // a loop hierarchy that properly reflects the outer-inner and previous-next relation. ScopedArenaVector linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder)); @@ -532,34 +531,37 @@ bool HLoopOptimization::LocalRun() { } } + // TODO(solanes): How can `top_loop_` be null if `graph_->HasLoops()` is true? + if (top_loop_ == nullptr) { + return false; + } + // Traverse the loop hierarchy inner-to-outer and optimize. Traversal can use // temporary data structures using the phase-local allocator. All new HIR // should use the global allocator. - if (top_loop_ != nullptr) { - ScopedArenaSet iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization)); - ScopedArenaSafeMap reds( - std::less(), loop_allocator_->Adapter(kArenaAllocLoopOptimization)); - ScopedArenaSet refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization)); - ScopedArenaSafeMap map( - std::less(), loop_allocator_->Adapter(kArenaAllocLoopOptimization)); - ScopedArenaSafeMap perm( - std::less(), loop_allocator_->Adapter(kArenaAllocLoopOptimization)); - // Attach. - iset_ = &iset; - reductions_ = &reds; - vector_refs_ = &refs; - vector_map_ = ↦ - vector_permanent_map_ = &perm; - // Traverse. - didLoopOpt = TraverseLoopsInnerToOuter(top_loop_); - // Detach. - iset_ = nullptr; - reductions_ = nullptr; - vector_refs_ = nullptr; - vector_map_ = nullptr; - vector_permanent_map_ = nullptr; - } - return didLoopOpt; + ScopedArenaSet iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization)); + ScopedArenaSafeMap reds( + std::less(), loop_allocator_->Adapter(kArenaAllocLoopOptimization)); + ScopedArenaSet refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization)); + ScopedArenaSafeMap map( + std::less(), loop_allocator_->Adapter(kArenaAllocLoopOptimization)); + ScopedArenaSafeMap perm( + std::less(), loop_allocator_->Adapter(kArenaAllocLoopOptimization)); + // Attach. + iset_ = &iset; + reductions_ = &reds; + vector_refs_ = &refs; + vector_map_ = ↦ + vector_permanent_map_ = &perm; + // Traverse. + const bool did_loop_opt = TraverseLoopsInnerToOuter(top_loop_); + // Detach. + iset_ = nullptr; + reductions_ = nullptr; + vector_refs_ = nullptr; + vector_map_ = nullptr; + vector_permanent_map_ = nullptr; + return did_loop_opt; } void HLoopOptimization::AddLoop(HLoopInformation* loop_info) { @@ -618,6 +620,18 @@ bool HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) { induction_range_.ReVisit(node->loop_info); changed = true; } + + CalculateAndSetTryCatchKind(node); + if (node->try_catch_kind == LoopNode::TryCatchKind::kHasTryCatch) { + // The current optimizations assume that the loops do not contain try/catches. + // TODO(solanes, 227283906): Assess if we can modify them to work with try/catches. + continue; + } + + DCHECK(node->try_catch_kind == LoopNode::TryCatchKind::kNoTryCatch) + << "kind: " << static_cast(node->try_catch_kind) + << ". LoopOptimization requires the loops to not have try catches."; + // Repeat simplifications in the loop-body until no more changes occur. // Note that since each simplification consists of eliminating code (without // introducing new code), this process is always finite. @@ -635,6 +649,37 @@ bool HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) { return changed; } +void HLoopOptimization::CalculateAndSetTryCatchKind(LoopNode* node) { + DCHECK(node != nullptr); + DCHECK(node->try_catch_kind == LoopNode::TryCatchKind::kUnknown) + << "kind: " << static_cast(node->try_catch_kind) + << ". SetTryCatchKind should be called only once per LoopNode."; + + // If a inner loop has a try catch, then the outer loop has one too (as it contains `inner`). + // Knowing this, we could skip iterating through all of the outer loop's parents with a simple + // check. + for (LoopNode* inner = node->inner; inner != nullptr; inner = inner->next) { + DCHECK(inner->try_catch_kind != LoopNode::TryCatchKind::kUnknown) + << "kind: " << static_cast(inner->try_catch_kind) + << ". Should have updated the inner loop before the outer loop."; + + if (inner->try_catch_kind == LoopNode::TryCatchKind::kHasTryCatch) { + node->try_catch_kind = LoopNode::TryCatchKind::kHasTryCatch; + return; + } + } + + for (HBlocksInLoopIterator it_loop(*node->loop_info); !it_loop.Done(); it_loop.Advance()) { + HBasicBlock* block = it_loop.Current(); + if (block->GetTryCatchInformation() != nullptr) { + node->try_catch_kind = LoopNode::TryCatchKind::kHasTryCatch; + return; + } + } + + node->try_catch_kind = LoopNode::TryCatchKind::kNoTryCatch; +} + // // Optimization. // @@ -782,8 +827,6 @@ bool HLoopOptimization::OptimizeInnerLoop(LoopNode* node) { return TryOptimizeInnerLoopFinite(node) || TryPeelingAndUnrolling(node); } - - // // Scalar loop peeling and unrolling: generic part methods. // -- cgit v1.2.3-59-g8ed1b