summaryrefslogtreecommitdiff
path: root/compiler/optimizing/register_allocator.cc
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2016-01-05 15:55:41 +0000
committer Nicolas Geoffray <ngeoffray@google.com> 2016-01-14 15:00:20 +0000
commit15bd22849ee6a1ffb3fb3630f686c2870bdf1bbc (patch)
treea261601589163faa4538bcf1c9d156e8ec4a42b3 /compiler/optimizing/register_allocator.cc
parent5b7b5ddb515828c93f0c2aec67aa513c32d0de22 (diff)
Implement irreducible loop support in optimizing.
So we don't fallback to the interpreter in the presence of irreducible loops. Implications: - A loop pre-header does not necessarily dominate a loop header. - Non-constant redundant phis will be kept in loop headers, to satisfy our linear scan register allocation algorithm. - while-graph optimizations, such as gvn, licm, lse, and dce need to know when they are dealing with irreducible loops. Change-Id: I2cea8934ce0b40162d215353497c7f77d6c9137e
Diffstat (limited to 'compiler/optimizing/register_allocator.cc')
-rw-r--r--compiler/optimizing/register_allocator.cc14
1 files changed, 9 insertions, 5 deletions
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index d399bc2d7a..5ab4547e22 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -179,9 +179,11 @@ void RegisterAllocator::AllocateRegistersInternal() {
ProcessInstruction(inst_it.Current());
}
- if (block->IsCatchBlock()) {
- // By blocking all registers at the top of each catch block, we force
- // intervals used after catch to spill.
+ if (block->IsCatchBlock() ||
+ (block->GetLoopInformation() != nullptr && block->GetLoopInformation()->IsIrreducible())) {
+ // By blocking all registers at the top of each catch block or irreducible loop, we force
+ // intervals belonging to the live-in set of the catch/header block to be spilled.
+ // TODO(ngeoffray): Phis in this block could be allocated in register.
size_t position = block->GetLifetimeStart();
BlockRegisters(position, position + 1);
}
@@ -1864,8 +1866,10 @@ void RegisterAllocator::Resolve() {
// Resolve non-linear control flow across branches. Order does not matter.
for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- if (block->IsCatchBlock()) {
- // Instructions live at the top of catch blocks were forced to spill.
+ if (block->IsCatchBlock() ||
+ (block->GetLoopInformation() != nullptr && block->GetLoopInformation()->IsIrreducible())) {
+ // Instructions live at the top of catch blocks or irreducible loop header
+ // were forced to spill.
if (kIsDebugBuild) {
BitVector* live = liveness_.GetLiveInSet(*block);
for (uint32_t idx : live->Indexes()) {