Merge "Some improvement to reg alloc."
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 5b693dd..aae7f9b 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -46,7 +46,7 @@
size_t number_of_ranges,
ArenaAllocator* allocator,
int reg = -1) {
- LiveInterval* interval = new (allocator) LiveInterval(allocator, Primitive::kPrimInt);
+ LiveInterval* interval = LiveInterval::MakeInterval(allocator, Primitive::kPrimInt);
for (size_t i = number_of_ranges; i > 0; --i) {
interval->AddRange(ranges[i - 1][0], ranges[i - 1][1]);
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index df63530..c98b82a 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -141,6 +141,10 @@
for (size_t i = 0, e = physical_core_register_intervals_.Size(); i < e; ++i) {
LiveInterval* fixed = physical_core_register_intervals_.Get(i);
if (fixed != nullptr) {
+ // Fixed interval is added to inactive_ instead of unhandled_.
+ // It's also the only type of inactive interval whose start position
+ // can be after the current interval during linear scan.
+ // Fixed interval is never split and never moves to unhandled_.
inactive_.Add(fixed);
}
}
@@ -160,6 +164,10 @@
for (size_t i = 0, e = physical_fp_register_intervals_.Size(); i < e; ++i) {
LiveInterval* fixed = physical_fp_register_intervals_.Get(i);
if (fixed != nullptr) {
+ // Fixed interval is added to inactive_ instead of unhandled_.
+ // It's also the only type of inactive interval whose start position
+ // can be after the current interval during linear scan.
+ // Fixed interval is never split and never moves to unhandled_.
inactive_.Add(fixed);
}
}
@@ -431,6 +439,27 @@
stream << std::endl;
}
+void RegisterAllocator::DumpAllIntervals(std::ostream& stream) const {
+ stream << "inactive: " << std::endl;
+ for (size_t i = 0; i < inactive_.Size(); i ++) {
+ DumpInterval(stream, inactive_.Get(i));
+ }
+ stream << "active: " << std::endl;
+ for (size_t i = 0; i < active_.Size(); i ++) {
+ DumpInterval(stream, active_.Get(i));
+ }
+ stream << "unhandled: " << std::endl;
+ auto unhandled = (unhandled_ != nullptr) ?
+ unhandled_ : &unhandled_core_intervals_;
+ for (size_t i = 0; i < unhandled->Size(); i ++) {
+ DumpInterval(stream, unhandled->Get(i));
+ }
+ stream << "handled: " << std::endl;
+ for (size_t i = 0; i < handled_.Size(); i ++) {
+ DumpInterval(stream, handled_.Get(i));
+ }
+}
+
// By the book implementation of a linear scan register allocator.
void RegisterAllocator::LinearScan() {
while (!unhandled_->IsEmpty()) {
@@ -440,6 +469,10 @@
DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() >= current->GetStart());
size_t position = current->GetStart();
+ // Remember the inactive_ size here since the ones moved to inactive_ from
+ // active_ below shouldn't need to be re-checked.
+ size_t inactive_intervals_to_handle = inactive_.Size();
+
// (2) Remove currently active intervals that are dead at this position.
// Move active intervals that have a lifetime hole at this position
// to inactive.
@@ -458,15 +491,18 @@
// (3) Remove currently inactive intervals that are dead at this position.
// Move inactive intervals that cover this position to active.
- for (size_t i = 0; i < inactive_.Size(); ++i) {
+ for (size_t i = 0; i < inactive_intervals_to_handle; ++i) {
LiveInterval* interval = inactive_.Get(i);
+ DCHECK(interval->GetStart() < position || interval->IsFixed());
if (interval->IsDeadAt(position)) {
inactive_.Delete(interval);
--i;
+ --inactive_intervals_to_handle;
handled_.Add(interval);
} else if (interval->Covers(position)) {
inactive_.Delete(interval);
--i;
+ --inactive_intervals_to_handle;
active_.Add(interval);
}
}
@@ -505,20 +541,6 @@
free_until[i] = kMaxLifetimePosition;
}
- // For each inactive interval, set its register to be free until
- // the next intersection with `current`.
- // Thanks to SSA, this should only be needed for intervals
- // that are the result of a split.
- for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
- LiveInterval* inactive = inactive_.Get(i);
- DCHECK(inactive->HasRegister());
- size_t next_intersection = inactive->FirstIntersectionWith(current);
- if (next_intersection != kNoLifetime) {
- free_until[inactive->GetRegister()] =
- std::min(free_until[inactive->GetRegister()], next_intersection);
- }
- }
-
// For each active interval, set its register to not free.
for (size_t i = 0, e = active_.Size(); i < e; ++i) {
LiveInterval* interval = active_.Get(i);
@@ -526,6 +548,33 @@
free_until[interval->GetRegister()] = 0;
}
+ // For each inactive interval, set its register to be free until
+ // the next intersection with `current`.
+ for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
+ LiveInterval* inactive = inactive_.Get(i);
+ // Temp/Slow-path-safepoint interval has no holes.
+ DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
+
+ DCHECK(inactive->HasRegister());
+ if (free_until[inactive->GetRegister()] == 0) {
+ // Already used by some active interval. No need to intersect.
+ continue;
+ }
+ size_t next_intersection = inactive->FirstIntersectionWith(current);
+ if (next_intersection != kNoLifetime) {
+ free_until[inactive->GetRegister()] =
+ std::min(free_until[inactive->GetRegister()], next_intersection);
+ }
+ }
+
int reg = -1;
if (current->HasRegister()) {
// Some instructions have a fixed register output.
@@ -604,10 +653,18 @@
// For each inactive interval, find the next use of its register after the
// start of current.
- // Thanks to SSA, this should only be needed for intervals
- // that are the result of a split.
for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
LiveInterval* inactive = inactive_.Get(i);
+ // Temp/Slow-path-safepoint interval has no holes.
+ DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
DCHECK(inactive->HasRegister());
size_t next_intersection = inactive->FirstIntersectionWith(current);
if (next_intersection != kNoLifetime) {
@@ -659,20 +716,29 @@
}
}
- for (size_t i = 0; i < inactive_.Size(); ++i) {
+ for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
LiveInterval* inactive = inactive_.Get(i);
if (inactive->GetRegister() == reg) {
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
size_t next_intersection = inactive->FirstIntersectionWith(current);
if (next_intersection != kNoLifetime) {
if (inactive->IsFixed()) {
LiveInterval* split = Split(current, next_intersection);
AddSorted(unhandled_, split);
} else {
- LiveInterval* split = Split(inactive, current->GetStart());
+ LiveInterval* split = Split(inactive, next_intersection);
inactive_.DeleteAt(i);
+ --i;
+ --e;
handled_.Add(inactive);
AddSorted(unhandled_, split);
- --i;
}
}
}
@@ -811,7 +877,7 @@
HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
if (at == nullptr) {
- // Block boundary, don't no anything the connection of split siblings will handle it.
+ // Block boundary, don't do anything the connection of split siblings will handle it.
return;
}
HParallelMove* move;
@@ -1022,7 +1088,7 @@
}
size_t from_position = from->GetLifetimeEnd() - 1;
- // When an instructions dies at entry of another, and the latter is the beginning
+ // When an instruction dies at entry of another, and the latter is the beginning
// of a block, the register allocator ensures the former has a register
// at block->GetLifetimeStart() + 1. Since this is at a block boundary, it must
// must be handled in this method.
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index b881539..976ee39 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -126,6 +126,7 @@
void ProcessInstruction(HInstruction* instruction);
bool ValidateInternal(bool log_fatal_on_failure) const;
void DumpInterval(std::ostream& stream, LiveInterval* interval) const;
+ void DumpAllIntervals(std::ostream& stream) const;
ArenaAllocator* const allocator_;
CodeGenerator* const codegen_;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 2d84a9d..6845dea 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -414,21 +414,24 @@
// Add an artifical range to cover the temps that will be put in the unhandled list.
LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
unhandled->AddLoopRange(0, 60);
+ // For SSA value intervals, only an interval resulted from a split may intersect
+ // with inactive intervals.
+ unhandled = register_allocator.Split(unhandled, 5);
// Add three temps holding the same register, and starting at different positions.
// Put the one that should be picked in the middle of the inactive list to ensure
// we do not depend on an order.
- LiveInterval* interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ LiveInterval* interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(40, 50);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(20, 30);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(60, 70);
register_allocator.inactive_.Add(interval);
@@ -438,7 +441,7 @@
register_allocator.processing_core_registers_ = true;
register_allocator.unhandled_ = ®ister_allocator.unhandled_core_intervals_;
- register_allocator.TryAllocateFreeReg(unhandled);
+ ASSERT_TRUE(register_allocator.TryAllocateFreeReg(unhandled));
// Check that we have split the interval.
ASSERT_EQ(1u, register_allocator.unhandled_->Size());
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 1e34670..97bc7f3 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -115,7 +115,7 @@
instructions_from_ssa_index_.Add(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_.GetArena(), current->GetType(), current));
}
current->SetLifetimePosition(lifetime_position);
}
@@ -132,7 +132,7 @@
instructions_from_ssa_index_.Add(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_.GetArena(), current->GetType(), current));
}
instructions_from_lifetime_position_.Add(current);
current->SetLifetimePosition(lifetime_position);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 7dda4f6..8811ac8 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -139,26 +139,11 @@
*/
class LiveInterval : public ArenaObject {
public:
- LiveInterval(ArenaAllocator* allocator,
- Primitive::Type type,
- HInstruction* defined_by = nullptr,
- bool is_fixed = false,
- int reg = kNoRegister,
- bool is_temp = false,
- bool is_slow_path_safepoint = false)
- : allocator_(allocator),
- first_range_(nullptr),
- last_range_(nullptr),
- first_use_(nullptr),
- type_(type),
- next_sibling_(nullptr),
- parent_(this),
- register_(reg),
- spill_slot_(kNoSpillSlot),
- is_fixed_(is_fixed),
- is_temp_(is_temp),
- is_slow_path_safepoint_(is_slow_path_safepoint),
- defined_by_(defined_by) {}
+ static LiveInterval* MakeInterval(ArenaAllocator* allocator,
+ Primitive::Type type,
+ HInstruction* instruction = nullptr) {
+ return new (allocator) LiveInterval(allocator, type, instruction);
+ }
static LiveInterval* MakeSlowPathInterval(ArenaAllocator* allocator, HInstruction* instruction) {
return new (allocator) LiveInterval(
@@ -174,7 +159,10 @@
}
bool IsFixed() const { return is_fixed_; }
+ bool IsTemp() const { return is_temp_; }
bool IsSlowPathSafepoint() const { return is_slow_path_safepoint_; }
+ // This interval is the result of a split.
+ bool IsSplit() const { return parent_ != this; }
void AddUse(HInstruction* instruction, size_t input_index, bool is_environment) {
// Set the use within the instruction.
@@ -489,6 +477,7 @@
} while ((use = use->GetNext()) != nullptr);
}
stream << "}";
+ stream << " is_fixed: " << is_fixed_ << ", is_split: " << IsSplit();
}
LiveInterval* GetNextSibling() const { return next_sibling_; }
@@ -520,12 +509,31 @@
// Finds the interval that covers `position`.
const LiveInterval& GetIntervalAt(size_t position) const;
- bool IsTemp() const { return is_temp_; }
-
// Returns whether `other` and `this` share the same kind of register.
bool SameRegisterKind(Location other) const;
private:
+ LiveInterval(ArenaAllocator* allocator,
+ Primitive::Type type,
+ HInstruction* defined_by = nullptr,
+ bool is_fixed = false,
+ int reg = kNoRegister,
+ bool is_temp = false,
+ bool is_slow_path_safepoint = false)
+ : allocator_(allocator),
+ first_range_(nullptr),
+ last_range_(nullptr),
+ first_use_(nullptr),
+ type_(type),
+ next_sibling_(nullptr),
+ parent_(this),
+ register_(reg),
+ spill_slot_(kNoSpillSlot),
+ is_fixed_(is_fixed),
+ is_temp_(is_temp),
+ is_slow_path_safepoint_(is_slow_path_safepoint),
+ defined_by_(defined_by) {}
+
ArenaAllocator* const allocator_;
// Ranges of this interval. We need a quick access to the last range to test