summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/loop_optimization.cc107
-rw-r--r--compiler/optimizing/loop_optimization.h18
-rw-r--r--compiler/optimizing/superblock_cloner.cc13
3 files changed, 103 insertions, 35 deletions
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 77dfe68bf6..2d7c20825c 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -491,9 +491,9 @@ HLoopOptimization::HLoopOptimization(HGraph* graph,
}
bool HLoopOptimization::Run() {
- // Skip if there is no loop or the graph has try-catch/irreducible loops.
+ // Skip if there is no loop or the graph has irreducible loops.
// TODO: make this less of a sledgehammer.
- if (!graph_->HasLoops() || graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) {
+ if (!graph_->HasLoops() || graph_->HasIrreducibleLoops()) {
return false;
}
@@ -502,7 +502,7 @@ bool HLoopOptimization::Run() {
loop_allocator_ = &allocator;
// Perform loop optimizations.
- bool didLoopOpt = LocalRun();
+ const bool did_loop_opt = LocalRun();
if (top_loop_ == nullptr) {
graph_->SetHasLoops(false); // no more loops
}
@@ -511,7 +511,7 @@ bool HLoopOptimization::Run() {
loop_allocator_ = nullptr;
last_loop_ = top_loop_ = nullptr;
- return didLoopOpt;
+ return did_loop_opt;
}
//
@@ -519,7 +519,6 @@ bool HLoopOptimization::Run() {
//
bool HLoopOptimization::LocalRun() {
- bool didLoopOpt = false;
// Build the linear order using the phase-local allocator. This step enables building
// a loop hierarchy that properly reflects the outer-inner and previous-next relation.
ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
@@ -532,34 +531,37 @@ bool HLoopOptimization::LocalRun() {
}
}
+ // TODO(solanes): How can `top_loop_` be null if `graph_->HasLoops()` is true?
+ if (top_loop_ == nullptr) {
+ return false;
+ }
+
// Traverse the loop hierarchy inner-to-outer and optimize. Traversal can use
// temporary data structures using the phase-local allocator. All new HIR
// should use the global allocator.
- if (top_loop_ != nullptr) {
- ScopedArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ScopedArenaSafeMap<HInstruction*, HInstruction*> reds(
- std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ScopedArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ScopedArenaSafeMap<HInstruction*, HInstruction*> map(
- std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ScopedArenaSafeMap<HInstruction*, HInstruction*> perm(
- std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- // Attach.
- iset_ = &iset;
- reductions_ = &reds;
- vector_refs_ = &refs;
- vector_map_ = &map;
- vector_permanent_map_ = &perm;
- // Traverse.
- didLoopOpt = TraverseLoopsInnerToOuter(top_loop_);
- // Detach.
- iset_ = nullptr;
- reductions_ = nullptr;
- vector_refs_ = nullptr;
- vector_map_ = nullptr;
- vector_permanent_map_ = nullptr;
- }
- return didLoopOpt;
+ ScopedArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> reds(
+ std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ScopedArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> map(
+ std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> perm(
+ std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ // Attach.
+ iset_ = &iset;
+ reductions_ = &reds;
+ vector_refs_ = &refs;
+ vector_map_ = &map;
+ vector_permanent_map_ = &perm;
+ // Traverse.
+ const bool did_loop_opt = TraverseLoopsInnerToOuter(top_loop_);
+ // Detach.
+ iset_ = nullptr;
+ reductions_ = nullptr;
+ vector_refs_ = nullptr;
+ vector_map_ = nullptr;
+ vector_permanent_map_ = nullptr;
+ return did_loop_opt;
}
void HLoopOptimization::AddLoop(HLoopInformation* loop_info) {
@@ -618,6 +620,18 @@ bool HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
induction_range_.ReVisit(node->loop_info);
changed = true;
}
+
+ CalculateAndSetTryCatchKind(node);
+ if (node->try_catch_kind == LoopNode::TryCatchKind::kHasTryCatch) {
+ // The current optimizations assume that the loops do not contain try/catches.
+ // TODO(solanes, 227283906): Assess if we can modify them to work with try/catches.
+ continue;
+ }
+
+ DCHECK(node->try_catch_kind == LoopNode::TryCatchKind::kNoTryCatch)
+ << "kind: " << static_cast<int>(node->try_catch_kind)
+ << ". LoopOptimization requires the loops to not have try catches.";
+
// Repeat simplifications in the loop-body until no more changes occur.
// Note that since each simplification consists of eliminating code (without
// introducing new code), this process is always finite.
@@ -635,6 +649,37 @@ bool HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
return changed;
}
+void HLoopOptimization::CalculateAndSetTryCatchKind(LoopNode* node) {
+ DCHECK(node != nullptr);
+ DCHECK(node->try_catch_kind == LoopNode::TryCatchKind::kUnknown)
+ << "kind: " << static_cast<int>(node->try_catch_kind)
+ << ". SetTryCatchKind should be called only once per LoopNode.";
+
+ // If a inner loop has a try catch, then the outer loop has one too (as it contains `inner`).
+ // Knowing this, we could skip iterating through all of the outer loop's parents with a simple
+ // check.
+ for (LoopNode* inner = node->inner; inner != nullptr; inner = inner->next) {
+ DCHECK(inner->try_catch_kind != LoopNode::TryCatchKind::kUnknown)
+ << "kind: " << static_cast<int>(inner->try_catch_kind)
+ << ". Should have updated the inner loop before the outer loop.";
+
+ if (inner->try_catch_kind == LoopNode::TryCatchKind::kHasTryCatch) {
+ node->try_catch_kind = LoopNode::TryCatchKind::kHasTryCatch;
+ return;
+ }
+ }
+
+ for (HBlocksInLoopIterator it_loop(*node->loop_info); !it_loop.Done(); it_loop.Advance()) {
+ HBasicBlock* block = it_loop.Current();
+ if (block->GetTryCatchInformation() != nullptr) {
+ node->try_catch_kind = LoopNode::TryCatchKind::kHasTryCatch;
+ return;
+ }
+ }
+
+ node->try_catch_kind = LoopNode::TryCatchKind::kNoTryCatch;
+}
+
//
// Optimization.
//
@@ -782,8 +827,6 @@ bool HLoopOptimization::OptimizeInnerLoop(LoopNode* node) {
return TryOptimizeInnerLoopFinite(node) || TryPeelingAndUnrolling(node);
}
-
-
//
// Scalar loop peeling and unrolling: generic part methods.
//
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 3acd5b191b..b17861648f 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -57,12 +57,23 @@ class HLoopOptimization : public HOptimization {
outer(nullptr),
inner(nullptr),
previous(nullptr),
- next(nullptr) {}
+ next(nullptr),
+ try_catch_kind(TryCatchKind::kUnknown) {}
+
+ enum class TryCatchKind {
+ kUnknown,
+ // Either if we have a try catch in the loop, or if the loop is inside of an outer try catch,
+ // we set `kHasTryCatch`.
+ kHasTryCatch,
+ kNoTryCatch
+ };
+
HLoopInformation* loop_info;
LoopNode* outer;
LoopNode* inner;
LoopNode* previous;
LoopNode* next;
+ TryCatchKind try_catch_kind;
};
/*
@@ -131,6 +142,11 @@ class HLoopOptimization : public HOptimization {
// Returns true if loops nested inside current loop (node) have changed.
bool TraverseLoopsInnerToOuter(LoopNode* node);
+ // Calculates `node`'s `try_catch_kind` and sets it to:
+ // 1) kHasTryCatch if it has try catches (or if it's inside of an outer try catch)
+ // 2) kNoTryCatch otherwise.
+ void CalculateAndSetTryCatchKind(LoopNode* node);
+
//
// Optimization.
//
diff --git a/compiler/optimizing/superblock_cloner.cc b/compiler/optimizing/superblock_cloner.cc
index b46d193541..a5f919c31c 100644
--- a/compiler/optimizing/superblock_cloner.cc
+++ b/compiler/optimizing/superblock_cloner.cc
@@ -855,11 +855,20 @@ void SuperblockCloner::SetSuccessorRemappingInfo(const HEdgeSet* remap_orig_inte
}
bool SuperblockCloner::IsSubgraphClonable() const {
- // TODO: Support irreducible graphs and graphs with try-catch.
- if (graph_->HasIrreducibleLoops() || graph_->HasTryCatch()) {
+ // TODO: Support irreducible graphs and subgraphs with try-catch.
+ if (graph_->HasIrreducibleLoops()) {
return false;
}
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ if (!IsInOrigBBSet(block)) {
+ continue;
+ }
+ if (block->GetTryCatchInformation() != nullptr) {
+ return false;
+ }
+ }
+
HInstructionMap live_outs(
std::less<HInstruction*>(), graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));