Refactor register allocation to be pluggable

Allow alternate register allocation strategies to be implemented
in subclasses of a common register allocation base class.

Test: m test-art-host

Change-Id: I7c5866aa9ddff8f53fcaf721bad47654ab221b4f
diff --git a/compiler/Android.mk b/compiler/Android.mk
index f310565..e3f8a5c 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -67,6 +67,7 @@
 	optimizing/parallel_move_resolver.cc \
 	optimizing/prepare_for_register_allocation.cc \
 	optimizing/reference_type_propagation.cc \
+	optimizing/register_allocator.cc \
 	optimizing/register_allocation_resolver.cc \
 	optimizing/register_allocator_linear_scan.cc \
 	optimizing/select_generator.cc \
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 6f487c5..fe9a7af 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -219,7 +219,7 @@
 
   PrepareForRegisterAllocation(graph).Run();
   liveness.Analyze();
-  RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters();
+  RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters();
   hook_before_codegen(graph);
 
   InternalCodeAllocator allocator;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 80affc3..77ae10a 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -531,7 +531,7 @@
   }
   {
     PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
-    RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters();
+    RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters();
   }
 }
 
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
new file mode 100644
index 0000000..2367ce1
--- /dev/null
+++ b/compiler/optimizing/register_allocator.cc
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "register_allocator.h"
+
+#include <iostream>
+#include <sstream>
+
+#include "base/bit_vector-inl.h"
+#include "code_generator.h"
+#include "register_allocator_linear_scan.h"
+#include "ssa_liveness_analysis.h"
+
+
+namespace art {
+
+RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
+                                     CodeGenerator* codegen,
+                                     const SsaLivenessAnalysis& liveness)
+    : allocator_(allocator),
+      codegen_(codegen),
+      liveness_(liveness) {}
+
+RegisterAllocator* RegisterAllocator::Create(ArenaAllocator* allocator,
+                                             CodeGenerator* codegen,
+                                             const SsaLivenessAnalysis& analysis,
+                                             Strategy strategy) {
+  switch (strategy) {
+    case kRegisterAllocatorLinearScan:
+      return new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis);
+    default:
+      LOG(FATAL) << "Invalid register allocation strategy: " << strategy;
+      UNREACHABLE();
+  }
+}
+
+bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
+                                                InstructionSet instruction_set) {
+  return instruction_set == kArm
+      || instruction_set == kArm64
+      || instruction_set == kMips
+      || instruction_set == kMips64
+      || instruction_set == kThumb2
+      || instruction_set == kX86
+      || instruction_set == kX86_64;
+}
+
+class AllRangesIterator : public ValueObject {
+ public:
+  explicit AllRangesIterator(LiveInterval* interval)
+      : current_interval_(interval),
+        current_range_(interval->GetFirstRange()) {}
+
+  bool Done() const { return current_interval_ == nullptr; }
+  LiveRange* CurrentRange() const { return current_range_; }
+  LiveInterval* CurrentInterval() const { return current_interval_; }
+
+  void Advance() {
+    current_range_ = current_range_->GetNext();
+    if (current_range_ == nullptr) {
+      current_interval_ = current_interval_->GetNextSibling();
+      if (current_interval_ != nullptr) {
+        current_range_ = current_interval_->GetFirstRange();
+      }
+    }
+  }
+
+ private:
+  LiveInterval* current_interval_;
+  LiveRange* current_range_;
+
+  DISALLOW_COPY_AND_ASSIGN(AllRangesIterator);
+};
+
+bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
+                                          size_t number_of_spill_slots,
+                                          size_t number_of_out_slots,
+                                          const CodeGenerator& codegen,
+                                          ArenaAllocator* allocator,
+                                          bool processing_core_registers,
+                                          bool log_fatal_on_failure) {
+  size_t number_of_registers = processing_core_registers
+      ? codegen.GetNumberOfCoreRegisters()
+      : codegen.GetNumberOfFloatingPointRegisters();
+  ArenaVector<ArenaBitVector*> liveness_of_values(
+      allocator->Adapter(kArenaAllocRegisterAllocatorValidate));
+  liveness_of_values.reserve(number_of_registers + number_of_spill_slots);
+
+  size_t max_end = 0u;
+  for (LiveInterval* start_interval : intervals) {
+    for (AllRangesIterator it(start_interval); !it.Done(); it.Advance()) {
+      max_end = std::max(max_end, it.CurrentRange()->GetEnd());
+    }
+  }
+
+  // Allocate a bit vector per register. A live interval that has a register
+  // allocated will populate the associated bit vector based on its live ranges.
+  for (size_t i = 0; i < number_of_registers + number_of_spill_slots; ++i) {
+    liveness_of_values.push_back(
+        ArenaBitVector::Create(allocator, max_end, false, kArenaAllocRegisterAllocatorValidate));
+  }
+
+  for (LiveInterval* start_interval : intervals) {
+    for (AllRangesIterator it(start_interval); !it.Done(); it.Advance()) {
+      LiveInterval* current = it.CurrentInterval();
+      HInstruction* defined_by = current->GetParent()->GetDefinedBy();
+      if (current->GetParent()->HasSpillSlot()
+           // Parameters and current method have their own stack slot.
+           && !(defined_by != nullptr && (defined_by->IsParameterValue()
+                                          || defined_by->IsCurrentMethod()))) {
+        BitVector* liveness_of_spill_slot = liveness_of_values[number_of_registers
+            + current->GetParent()->GetSpillSlot() / kVRegSize
+            - number_of_out_slots];
+        for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) {
+          if (liveness_of_spill_slot->IsBitSet(j)) {
+            if (log_fatal_on_failure) {
+              std::ostringstream message;
+              message << "Spill slot conflict at " << j;
+              LOG(FATAL) << message.str();
+            } else {
+              return false;
+            }
+          } else {
+            liveness_of_spill_slot->SetBit(j);
+          }
+        }
+      }
+
+      if (current->HasRegister()) {
+        if (kIsDebugBuild && log_fatal_on_failure && !current->IsFixed()) {
+          // Only check when an error is fatal. Only tests code ask for non-fatal failures
+          // and test code may not properly fill the right information to the code generator.
+          CHECK(codegen.HasAllocatedRegister(processing_core_registers, current->GetRegister()));
+        }
+        BitVector* liveness_of_register = liveness_of_values[current->GetRegister()];
+        for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) {
+          if (liveness_of_register->IsBitSet(j)) {
+            if (current->IsUsingInputRegister() && current->CanUseInputRegister()) {
+              continue;
+            }
+            if (log_fatal_on_failure) {
+              std::ostringstream message;
+              message << "Register conflict at " << j << " ";
+              if (defined_by != nullptr) {
+                message << "(" << defined_by->DebugName() << ")";
+              }
+              message << "for ";
+              if (processing_core_registers) {
+                codegen.DumpCoreRegister(message, current->GetRegister());
+              } else {
+                codegen.DumpFloatingPointRegister(message, current->GetRegister());
+              }
+              LOG(FATAL) << message.str();
+            } else {
+              return false;
+            }
+          } else {
+            liveness_of_register->SetBit(j);
+          }
+        }
+      }
+    }
+  }
+  return true;
+}
+
+LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) {
+  DCHECK_GE(position, interval->GetStart());
+  DCHECK(!interval->IsDeadAt(position));
+  if (position == interval->GetStart()) {
+    // Spill slot will be allocated when handling `interval` again.
+    interval->ClearRegister();
+    if (interval->HasHighInterval()) {
+      interval->GetHighInterval()->ClearRegister();
+    } else if (interval->HasLowInterval()) {
+      interval->GetLowInterval()->ClearRegister();
+    }
+    return interval;
+  } else {
+    LiveInterval* new_interval = interval->SplitAt(position);
+    if (interval->HasHighInterval()) {
+      LiveInterval* high = interval->GetHighInterval()->SplitAt(position);
+      new_interval->SetHighInterval(high);
+      high->SetLowInterval(new_interval);
+    } else if (interval->HasLowInterval()) {
+      LiveInterval* low = interval->GetLowInterval()->SplitAt(position);
+      new_interval->SetLowInterval(low);
+      low->SetHighInterval(new_interval);
+    }
+    return new_interval;
+  }
+}
+
+LiveInterval* RegisterAllocator::SplitBetween(LiveInterval* interval, size_t from, size_t to) {
+  HBasicBlock* block_from = liveness_.GetBlockFromPosition(from / 2);
+  HBasicBlock* block_to = liveness_.GetBlockFromPosition(to / 2);
+  DCHECK(block_from != nullptr);
+  DCHECK(block_to != nullptr);
+
+  // Both locations are in the same block. We split at the given location.
+  if (block_from == block_to) {
+    return Split(interval, to);
+  }
+
+  /*
+   * Non-linear control flow will force moves at every branch instruction to the new location.
+   * To avoid having all branches doing the moves, we find the next non-linear position and
+   * split the interval at this position. Take the following example (block number is the linear
+   * order position):
+   *
+   *     B1
+   *    /  \
+   *   B2  B3
+   *    \  /
+   *     B4
+   *
+   * B2 needs to split an interval, whose next use is in B4. If we were to split at the
+   * beginning of B4, B3 would need to do a move between B3 and B4 to ensure the interval
+   * is now in the correct location. It makes performance worst if the interval is spilled
+   * and both B2 and B3 need to reload it before entering B4.
+   *
+   * By splitting at B3, we give a chance to the register allocator to allocate the
+   * interval to the same register as in B1, and therefore avoid doing any
+   * moves in B3.
+   */
+  if (block_from->GetDominator() != nullptr) {
+    for (HBasicBlock* dominated : block_from->GetDominator()->GetDominatedBlocks()) {
+      size_t position = dominated->GetLifetimeStart();
+      if ((position > from) && (block_to->GetLifetimeStart() > position)) {
+        // Even if we found a better block, we continue iterating in case
+        // a dominated block is closer.
+        // Note that dominated blocks are not sorted in liveness order.
+        block_to = dominated;
+        DCHECK_NE(block_to, block_from);
+      }
+    }
+  }
+
+  // If `to` is in a loop, find the outermost loop header which does not contain `from`.
+  for (HLoopInformationOutwardIterator it(*block_to); !it.Done(); it.Advance()) {
+    HBasicBlock* header = it.Current()->GetHeader();
+    if (block_from->GetLifetimeStart() >= header->GetLifetimeStart()) {
+      break;
+    }
+    block_to = header;
+  }
+
+  // Split at the start of the found block, to piggy back on existing moves
+  // due to resolution if non-linear control flow (see `ConnectSplitSiblings`).
+  return Split(interval, block_to->GetLifetimeStart());
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
new file mode 100644
index 0000000..729eede
--- /dev/null
+++ b/compiler/optimizing/register_allocator.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
+#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
+
+#include "arch/instruction_set.h"
+#include "base/arena_containers.h"
+#include "base/arena_object.h"
+#include "base/macros.h"
+#include "primitive.h"
+
+namespace art {
+
+class CodeGenerator;
+class HBasicBlock;
+class HGraph;
+class HInstruction;
+class HParallelMove;
+class LiveInterval;
+class Location;
+class SsaLivenessAnalysis;
+
+/**
+ * Base class for any register allocator.
+ */
+class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> {
+ public:
+  enum Strategy {
+    kRegisterAllocatorLinearScan
+  };
+
+  static constexpr Strategy kRegisterAllocatorDefault = kRegisterAllocatorLinearScan;
+
+  static RegisterAllocator* Create(ArenaAllocator* allocator,
+                                   CodeGenerator* codegen,
+                                   const SsaLivenessAnalysis& analysis,
+                                   Strategy strategy = kRegisterAllocatorDefault);
+
+  virtual ~RegisterAllocator() = default;
+
+  // Main entry point for the register allocator. Given the liveness analysis,
+  // allocates registers to live intervals.
+  virtual void AllocateRegisters() = 0;
+
+  // Validate that the register allocator did not allocate the same register to
+  // intervals that intersect each other. Returns false if it failed.
+  virtual bool Validate(bool log_fatal_on_failure) = 0;
+
+  static bool CanAllocateRegistersFor(const HGraph& graph,
+                                      InstructionSet instruction_set);
+
+  // Verifies that live intervals do not conflict. Used by unit testing.
+  static bool ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
+                                size_t number_of_spill_slots,
+                                size_t number_of_out_slots,
+                                const CodeGenerator& codegen,
+                                ArenaAllocator* allocator,
+                                bool processing_core_registers,
+                                bool log_fatal_on_failure);
+
+  static constexpr const char* kRegisterAllocatorPassName = "register";
+
+ protected:
+  RegisterAllocator(ArenaAllocator* allocator,
+                    CodeGenerator* codegen,
+                    const SsaLivenessAnalysis& analysis);
+
+  // Split `interval` at the position `position`. The new interval starts at `position`.
+  // If `position` is at the start of `interval`, returns `interval` with its
+  // register location(s) cleared.
+  static LiveInterval* Split(LiveInterval* interval, size_t position);
+
+  // Split `interval` at a position between `from` and `to`. The method will try
+  // to find an optimal split position.
+  LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
+
+  ArenaAllocator* const allocator_;
+  CodeGenerator* const codegen_;
+  const SsaLivenessAnalysis& liveness_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index c1797b0..a9151ba 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -38,12 +38,10 @@
   return GetHighForLowRegister(low->GetRegister()) != low->GetHighInterval()->GetRegister();
 }
 
-RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
-                                     CodeGenerator* codegen,
-                                     const SsaLivenessAnalysis& liveness)
-      : allocator_(allocator),
-        codegen_(codegen),
-        liveness_(liveness),
+RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ArenaAllocator* allocator,
+                                                         CodeGenerator* codegen,
+                                                         const SsaLivenessAnalysis& liveness)
+      : RegisterAllocator(allocator, codegen, liveness),
         unhandled_core_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
         unhandled_fp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
         unhandled_(nullptr),
@@ -83,17 +81,6 @@
       codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
 }
 
-bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
-                                                InstructionSet instruction_set) {
-  return instruction_set == kArm
-      || instruction_set == kArm64
-      || instruction_set == kMips
-      || instruction_set == kMips64
-      || instruction_set == kThumb2
-      || instruction_set == kX86
-      || instruction_set == kX86_64;
-}
-
 static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
   if (interval == nullptr) return false;
   bool is_core_register = (interval->GetType() != Primitive::kPrimDouble)
@@ -101,7 +88,7 @@
   return processing_core_registers == is_core_register;
 }
 
-void RegisterAllocator::AllocateRegisters() {
+void RegisterAllocatorLinearScan::AllocateRegisters() {
   AllocateRegistersInternal();
   RegisterAllocationResolver(allocator_, codegen_, liveness_)
       .Resolve(maximum_number_of_live_core_registers_,
@@ -141,7 +128,7 @@
   }
 }
 
-void RegisterAllocator::BlockRegister(Location location, size_t start, size_t end) {
+void RegisterAllocatorLinearScan::BlockRegister(Location location, size_t start, size_t end) {
   int reg = location.reg();
   DCHECK(location.IsRegister() || location.IsFpuRegister());
   LiveInterval* interval = location.IsRegister()
@@ -162,7 +149,7 @@
   interval->AddRange(start, end);
 }
 
-void RegisterAllocator::BlockRegisters(size_t start, size_t end, bool caller_save_only) {
+void RegisterAllocatorLinearScan::BlockRegisters(size_t start, size_t end, bool caller_save_only) {
   for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
     if (!caller_save_only || !codegen_->IsCoreCalleeSaveRegister(i)) {
       BlockRegister(Location::RegisterLocation(i), start, end);
@@ -175,7 +162,7 @@
   }
 }
 
-void RegisterAllocator::AllocateRegistersInternal() {
+void RegisterAllocatorLinearScan::AllocateRegistersInternal() {
   // Iterate post-order, to ensure the list is sorted, and the last added interval
   // is the one with the lowest start position.
   for (HLinearPostOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
@@ -235,7 +222,7 @@
   LinearScan();
 }
 
-void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
+void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) {
   LocationSummary* locations = instruction->GetLocations();
   size_t position = instruction->GetLifetimePosition();
 
@@ -452,7 +439,7 @@
   DISALLOW_COPY_AND_ASSIGN(AllRangesIterator);
 };
 
-bool RegisterAllocator::ValidateInternal(bool log_fatal_on_failure) const {
+bool RegisterAllocatorLinearScan::ValidateInternal(bool log_fatal_on_failure) const {
   // To simplify unit testing, we eagerly create the array of intervals, and
   // call the helper method.
   ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
@@ -482,99 +469,7 @@
                            allocator_, processing_core_registers_, log_fatal_on_failure);
 }
 
-bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
-                                          size_t number_of_spill_slots,
-                                          size_t number_of_out_slots,
-                                          const CodeGenerator& codegen,
-                                          ArenaAllocator* allocator,
-                                          bool processing_core_registers,
-                                          bool log_fatal_on_failure) {
-  size_t number_of_registers = processing_core_registers
-      ? codegen.GetNumberOfCoreRegisters()
-      : codegen.GetNumberOfFloatingPointRegisters();
-  ArenaVector<ArenaBitVector*> liveness_of_values(
-      allocator->Adapter(kArenaAllocRegisterAllocatorValidate));
-  liveness_of_values.reserve(number_of_registers + number_of_spill_slots);
-
-  size_t max_end = 0u;
-  for (LiveInterval* start_interval : intervals) {
-    for (AllRangesIterator it(start_interval); !it.Done(); it.Advance()) {
-      max_end = std::max(max_end, it.CurrentRange()->GetEnd());
-    }
-  }
-
-  // Allocate a bit vector per register. A live interval that has a register
-  // allocated will populate the associated bit vector based on its live ranges.
-  for (size_t i = 0; i < number_of_registers + number_of_spill_slots; ++i) {
-    liveness_of_values.push_back(
-        ArenaBitVector::Create(allocator, max_end, false, kArenaAllocRegisterAllocatorValidate));
-  }
-
-  for (LiveInterval* start_interval : intervals) {
-    for (AllRangesIterator it(start_interval); !it.Done(); it.Advance()) {
-      LiveInterval* current = it.CurrentInterval();
-      HInstruction* defined_by = current->GetParent()->GetDefinedBy();
-      if (current->GetParent()->HasSpillSlot()
-           // Parameters and current method have their own stack slot.
-           && !(defined_by != nullptr && (defined_by->IsParameterValue()
-                                          || defined_by->IsCurrentMethod()))) {
-        BitVector* liveness_of_spill_slot = liveness_of_values[number_of_registers
-            + current->GetParent()->GetSpillSlot() / kVRegSize
-            - number_of_out_slots];
-        for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) {
-          if (liveness_of_spill_slot->IsBitSet(j)) {
-            if (log_fatal_on_failure) {
-              std::ostringstream message;
-              message << "Spill slot conflict at " << j;
-              LOG(FATAL) << message.str();
-            } else {
-              return false;
-            }
-          } else {
-            liveness_of_spill_slot->SetBit(j);
-          }
-        }
-      }
-
-      if (current->HasRegister()) {
-        if (kIsDebugBuild && log_fatal_on_failure && !current->IsFixed()) {
-          // Only check when an error is fatal. Only tests code ask for non-fatal failures
-          // and test code may not properly fill the right information to the code generator.
-          CHECK(codegen.HasAllocatedRegister(processing_core_registers, current->GetRegister()));
-        }
-        BitVector* liveness_of_register = liveness_of_values[current->GetRegister()];
-        for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) {
-          if (liveness_of_register->IsBitSet(j)) {
-            if (current->IsUsingInputRegister() && current->CanUseInputRegister()) {
-              continue;
-            }
-            if (log_fatal_on_failure) {
-              std::ostringstream message;
-              message << "Register conflict at " << j << " ";
-              if (defined_by != nullptr) {
-                message << "(" << defined_by->DebugName() << ")";
-              }
-              message << "for ";
-              if (processing_core_registers) {
-                codegen.DumpCoreRegister(message, current->GetRegister());
-              } else {
-                codegen.DumpFloatingPointRegister(message, current->GetRegister());
-              }
-              LOG(FATAL) << message.str();
-            } else {
-              return false;
-            }
-          } else {
-            liveness_of_register->SetBit(j);
-          }
-        }
-      }
-    }
-  }
-  return true;
-}
-
-void RegisterAllocator::DumpInterval(std::ostream& stream, LiveInterval* interval) const {
+void RegisterAllocatorLinearScan::DumpInterval(std::ostream& stream, LiveInterval* interval) const {
   interval->Dump(stream);
   stream << ": ";
   if (interval->HasRegister()) {
@@ -589,7 +484,7 @@
   stream << std::endl;
 }
 
-void RegisterAllocator::DumpAllIntervals(std::ostream& stream) const {
+void RegisterAllocatorLinearScan::DumpAllIntervals(std::ostream& stream) const {
   stream << "inactive: " << std::endl;
   for (LiveInterval* inactive_interval : inactive_) {
     DumpInterval(stream, inactive_interval);
@@ -611,7 +506,7 @@
 }
 
 // By the book implementation of a linear scan register allocator.
-void RegisterAllocator::LinearScan() {
+void RegisterAllocatorLinearScan::LinearScan() {
   while (!unhandled_->empty()) {
     // (1) Remove interval with the lowest start position from unhandled.
     LiveInterval* current = unhandled_->back();
@@ -742,7 +637,7 @@
 
 // Find a free register. If multiple are found, pick the register that
 // is free the longest.
-bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
+bool RegisterAllocatorLinearScan::TryAllocateFreeReg(LiveInterval* current) {
   size_t* free_until = registers_array_;
 
   // First set all registers to be free.
@@ -865,13 +760,13 @@
   return true;
 }
 
-bool RegisterAllocator::IsBlocked(int reg) const {
+bool RegisterAllocatorLinearScan::IsBlocked(int reg) const {
   return processing_core_registers_
       ? blocked_core_registers_[reg]
       : blocked_fp_registers_[reg];
 }
 
-int RegisterAllocator::FindAvailableRegisterPair(size_t* next_use, size_t starting_at) const {
+int RegisterAllocatorLinearScan::FindAvailableRegisterPair(size_t* next_use, size_t starting_at) const {
   int reg = kNoRegister;
   // Pick the register pair that is used the last.
   for (size_t i = 0; i < number_of_registers_; ++i) {
@@ -896,13 +791,13 @@
   return reg;
 }
 
-bool RegisterAllocator::IsCallerSaveRegister(int reg) const {
+bool RegisterAllocatorLinearScan::IsCallerSaveRegister(int reg) const {
   return processing_core_registers_
       ? !codegen_->IsCoreCalleeSaveRegister(reg)
       : !codegen_->IsFloatingPointCalleeSaveRegister(reg);
 }
 
-int RegisterAllocator::FindAvailableRegister(size_t* next_use, LiveInterval* current) const {
+int RegisterAllocatorLinearScan::FindAvailableRegister(size_t* next_use, LiveInterval* current) const {
   // We special case intervals that do not span a safepoint to try to find a caller-save
   // register if one is available. We iterate from 0 to the number of registers,
   // so if there are caller-save registers available at the end, we continue the iteration.
@@ -965,9 +860,9 @@
   }
 }
 
-bool RegisterAllocator::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,
-                                                                 size_t first_register_use,
-                                                                 size_t* next_use) {
+bool RegisterAllocatorLinearScan::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,
+                                                                           size_t first_register_use,
+                                                                           size_t* next_use) {
   for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
     LiveInterval* active = *it;
     DCHECK(active->HasRegister());
@@ -997,7 +892,7 @@
 // Find the register that is used the last, and spill the interval
 // that holds it. If the first use of `current` is after that register
 // we spill `current` instead.
-bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
+bool RegisterAllocatorLinearScan::AllocateBlockedReg(LiveInterval* current) {
   size_t first_register_use = current->FirstRegisterUse();
   if (current->HasRegister()) {
     DCHECK(current->IsHighInterval());
@@ -1180,7 +1075,7 @@
   }
 }
 
-void RegisterAllocator::AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval) {
+void RegisterAllocatorLinearScan::AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval) {
   DCHECK(!interval->IsFixed() && !interval->HasSpillSlot());
   size_t insert_at = 0;
   for (size_t i = array->size(); i > 0; --i) {
@@ -1209,93 +1104,7 @@
   }
 }
 
-LiveInterval* RegisterAllocator::SplitBetween(LiveInterval* interval, size_t from, size_t to) {
-  HBasicBlock* block_from = liveness_.GetBlockFromPosition(from / 2);
-  HBasicBlock* block_to = liveness_.GetBlockFromPosition(to / 2);
-  DCHECK(block_from != nullptr);
-  DCHECK(block_to != nullptr);
-
-  // Both locations are in the same block. We split at the given location.
-  if (block_from == block_to) {
-    return Split(interval, to);
-  }
-
-  /*
-   * Non-linear control flow will force moves at every branch instruction to the new location.
-   * To avoid having all branches doing the moves, we find the next non-linear position and
-   * split the interval at this position. Take the following example (block number is the linear
-   * order position):
-   *
-   *     B1
-   *    /  \
-   *   B2  B3
-   *    \  /
-   *     B4
-   *
-   * B2 needs to split an interval, whose next use is in B4. If we were to split at the
-   * beginning of B4, B3 would need to do a move between B3 and B4 to ensure the interval
-   * is now in the correct location. It makes performance worst if the interval is spilled
-   * and both B2 and B3 need to reload it before entering B4.
-   *
-   * By splitting at B3, we give a chance to the register allocator to allocate the
-   * interval to the same register as in B1, and therefore avoid doing any
-   * moves in B3.
-   */
-  if (block_from->GetDominator() != nullptr) {
-    for (HBasicBlock* dominated : block_from->GetDominator()->GetDominatedBlocks()) {
-      size_t position = dominated->GetLifetimeStart();
-      if ((position > from) && (block_to->GetLifetimeStart() > position)) {
-        // Even if we found a better block, we continue iterating in case
-        // a dominated block is closer.
-        // Note that dominated blocks are not sorted in liveness order.
-        block_to = dominated;
-        DCHECK_NE(block_to, block_from);
-      }
-    }
-  }
-
-  // If `to` is in a loop, find the outermost loop header which does not contain `from`.
-  for (HLoopInformationOutwardIterator it(*block_to); !it.Done(); it.Advance()) {
-    HBasicBlock* header = it.Current()->GetHeader();
-    if (block_from->GetLifetimeStart() >= header->GetLifetimeStart()) {
-      break;
-    }
-    block_to = header;
-  }
-
-  // Split at the start of the found block, to piggy back on existing moves
-  // due to resolution if non-linear control flow (see `ConnectSplitSiblings`).
-  return Split(interval, block_to->GetLifetimeStart());
-}
-
-LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) {
-  DCHECK_GE(position, interval->GetStart());
-  DCHECK(!interval->IsDeadAt(position));
-  if (position == interval->GetStart()) {
-    // Spill slot will be allocated when handling `interval` again.
-    interval->ClearRegister();
-    if (interval->HasHighInterval()) {
-      interval->GetHighInterval()->ClearRegister();
-    } else if (interval->HasLowInterval()) {
-      interval->GetLowInterval()->ClearRegister();
-    }
-    return interval;
-  } else {
-    LiveInterval* new_interval = interval->SplitAt(position);
-    if (interval->HasHighInterval()) {
-      LiveInterval* high = interval->GetHighInterval()->SplitAt(position);
-      new_interval->SetHighInterval(high);
-      high->SetLowInterval(new_interval);
-    } else if (interval->HasLowInterval()) {
-      LiveInterval* low = interval->GetLowInterval()->SplitAt(position);
-      new_interval->SetLowInterval(low);
-      low->SetHighInterval(new_interval);
-    }
-    return new_interval;
-  }
-}
-
-void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) {
+void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) {
   if (interval->IsHighInterval()) {
     // The low interval already took care of allocating the spill slot.
     DCHECK(!interval->GetLowInterval()->HasRegister());
@@ -1390,7 +1199,7 @@
   parent->SetSpillSlot(slot);
 }
 
-void RegisterAllocator::AllocateSpillSlotForCatchPhi(HPhi* phi) {
+void RegisterAllocatorLinearScan::AllocateSpillSlotForCatchPhi(HPhi* phi) {
   LiveInterval* interval = phi->GetLiveInterval();
 
   HInstruction* previous_phi = phi->GetPrevious();
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index f32a4db..b6e4f92 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -21,6 +21,7 @@
 #include "base/arena_containers.h"
 #include "base/macros.h"
 #include "primitive.h"
+#include "register_allocator.h"
 
 namespace art {
 
@@ -37,19 +38,15 @@
 /**
  * An implementation of a linear scan register allocator on an `HGraph` with SSA form.
  */
-class RegisterAllocator {
+class RegisterAllocatorLinearScan : public RegisterAllocator {
  public:
-  RegisterAllocator(ArenaAllocator* allocator,
-                    CodeGenerator* codegen,
-                    const SsaLivenessAnalysis& analysis);
+  RegisterAllocatorLinearScan(ArenaAllocator* allocator,
+                              CodeGenerator* codegen,
+                              const SsaLivenessAnalysis& analysis);
 
-  // Main entry point for the register allocator. Given the liveness analysis,
-  // allocates registers to live intervals.
-  void AllocateRegisters();
+  void AllocateRegisters() OVERRIDE;
 
-  // Validate that the register allocator did not allocate the same register to
-  // intervals that intersect each other. Returns false if it did not.
-  bool Validate(bool log_fatal_on_failure) {
+  bool Validate(bool log_fatal_on_failure) OVERRIDE {
     processing_core_registers_ = true;
     if (!ValidateInternal(log_fatal_on_failure)) {
       return false;
@@ -58,17 +55,6 @@
     return ValidateInternal(log_fatal_on_failure);
   }
 
-  // Helper method for validation. Used by unit testing.
-  static bool ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
-                                size_t number_of_spill_slots,
-                                size_t number_of_out_slots,
-                                const CodeGenerator& codegen,
-                                ArenaAllocator* allocator,
-                                bool processing_core_registers,
-                                bool log_fatal_on_failure);
-
-  static bool CanAllocateRegistersFor(const HGraph& graph, InstructionSet instruction_set);
-
   size_t GetNumberOfSpillSlots() const {
     return int_spill_slots_.size()
         + long_spill_slots_.size()
@@ -77,8 +63,6 @@
         + catch_phi_spill_slots_;
   }
 
-  static constexpr const char* kRegisterAllocatorPassName = "register";
-
  private:
   // Main methods of the allocator.
   void LinearScan();
@@ -88,13 +72,6 @@
   // Add `interval` in the given sorted list.
   static void AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval);
 
-  // Split `interval` at the position `position`. The new interval starts at `position`.
-  LiveInterval* Split(LiveInterval* interval, size_t position);
-
-  // Split `interval` at a position between `from` and `to`. The method will try
-  // to find an optimal split position.
-  LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
-
   // Returns whether `reg` is blocked by the code generator.
   bool IsBlocked(int reg) const;
 
@@ -127,10 +104,6 @@
                                                 size_t first_register_use,
                                                 size_t* next_use);
 
-  ArenaAllocator* const allocator_;
-  CodeGenerator* const codegen_;
-  const SsaLivenessAnalysis& liveness_;
-
   // List of intervals for core registers that must be processed, ordered by start
   // position. Last entry is the interval that has the lowest start position.
   // This list is initially populated before doing the linear scan.
@@ -206,7 +179,7 @@
   ART_FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
   ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
 
-  DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorLinearScan);
 };
 
 }  // namespace art
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 602a14c..cbb7b2f 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -24,6 +24,7 @@
 #include "driver/compiler_options.h"
 #include "nodes.h"
 #include "optimizing_unit_test.h"
+#include "register_allocator.h"
 #include "register_allocator_linear_scan.h"
 #include "ssa_liveness_analysis.h"
 #include "ssa_phi_elimination.h"
@@ -44,9 +45,9 @@
   x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
   SsaLivenessAnalysis liveness(graph, &codegen);
   liveness.Analyze();
-  RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-  register_allocator.AllocateRegisters();
-  return register_allocator.Validate(false);
+  RegisterAllocator* register_allocator = RegisterAllocator::Create(&allocator, &codegen, liveness);
+  register_allocator->AllocateRegisters();
+  return register_allocator->Validate(false);
 }
 
 /**
@@ -295,9 +296,9 @@
   x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
   SsaLivenessAnalysis liveness(graph, &codegen);
   liveness.Analyze();
-  RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-  register_allocator.AllocateRegisters();
-  ASSERT_TRUE(register_allocator.Validate(false));
+  RegisterAllocator* register_allocator = RegisterAllocator::Create(&allocator, &codegen, liveness);
+  register_allocator->AllocateRegisters();
+  ASSERT_TRUE(register_allocator->Validate(false));
 
   HBasicBlock* loop_header = graph->GetBlocks()[2];
   HPhi* phi = loop_header->GetFirstPhi()->AsPhi();
@@ -384,9 +385,9 @@
   x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
   SsaLivenessAnalysis liveness(graph, &codegen);
   liveness.Analyze();
-  RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-  register_allocator.AllocateRegisters();
-  ASSERT_TRUE(register_allocator.Validate(false));
+  RegisterAllocator* register_allocator = RegisterAllocator::Create(&allocator, &codegen, liveness);
+  register_allocator->AllocateRegisters();
+  ASSERT_TRUE(register_allocator->Validate(false));
 }
 
 /**
@@ -408,7 +409,7 @@
   x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
   SsaLivenessAnalysis liveness(graph, &codegen);
   liveness.Analyze();
-  RegisterAllocator register_allocator(&allocator, &codegen, liveness);
+  RegisterAllocatorLinearScan register_allocator(&allocator, &codegen, liveness);
 
   // Add an artifical range to cover the temps that will be put in the unhandled list.
   LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
@@ -541,8 +542,9 @@
     liveness.Analyze();
 
     // Check that the register allocator is deterministic.
-    RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-    register_allocator.AllocateRegisters();
+    RegisterAllocator* register_allocator =
+        RegisterAllocator::Create(&allocator, &codegen, liveness);
+    register_allocator->AllocateRegisters();
 
     ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 0);
     ASSERT_EQ(input2->GetLiveInterval()->GetRegister(), 0);
@@ -560,8 +562,9 @@
     // Set the phi to a specific register, and check that the inputs get allocated
     // the same register.
     phi->GetLocations()->UpdateOut(Location::RegisterLocation(2));
-    RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-    register_allocator.AllocateRegisters();
+    RegisterAllocator* register_allocator =
+        RegisterAllocator::Create(&allocator, &codegen, liveness);
+    register_allocator->AllocateRegisters();
 
     ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
     ASSERT_EQ(input2->GetLiveInterval()->GetRegister(), 2);
@@ -579,8 +582,9 @@
     // Set input1 to a specific register, and check that the phi and other input get allocated
     // the same register.
     input1->GetLocations()->UpdateOut(Location::RegisterLocation(2));
-    RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-    register_allocator.AllocateRegisters();
+    RegisterAllocator* register_allocator =
+        RegisterAllocator::Create(&allocator, &codegen, liveness);
+    register_allocator->AllocateRegisters();
 
     ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
     ASSERT_EQ(input2->GetLiveInterval()->GetRegister(), 2);
@@ -598,8 +602,9 @@
     // Set input2 to a specific register, and check that the phi and other input get allocated
     // the same register.
     input2->GetLocations()->UpdateOut(Location::RegisterLocation(2));
-    RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-    register_allocator.AllocateRegisters();
+    RegisterAllocator* register_allocator =
+        RegisterAllocator::Create(&allocator, &codegen, liveness);
+    register_allocator->AllocateRegisters();
 
     ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
     ASSERT_EQ(input2->GetLiveInterval()->GetRegister(), 2);
@@ -658,8 +663,9 @@
     SsaLivenessAnalysis liveness(graph, &codegen);
     liveness.Analyze();
 
-    RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-    register_allocator.AllocateRegisters();
+    RegisterAllocator* register_allocator =
+        RegisterAllocator::Create(&allocator, &codegen, liveness);
+    register_allocator->AllocateRegisters();
 
     // Sanity check that in normal conditions, the register should be hinted to 0 (EAX).
     ASSERT_EQ(field->GetLiveInterval()->GetRegister(), 0);
@@ -677,8 +683,9 @@
     // Don't use SetInAt because we are overriding an already allocated location.
     ret->GetLocations()->inputs_[0] = Location::RegisterLocation(2);
 
-    RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-    register_allocator.AllocateRegisters();
+    RegisterAllocator* register_allocator =
+        RegisterAllocator::Create(&allocator, &codegen, liveness);
+    register_allocator->AllocateRegisters();
 
     ASSERT_EQ(field->GetLiveInterval()->GetRegister(), 2);
   }
@@ -726,8 +733,9 @@
     SsaLivenessAnalysis liveness(graph, &codegen);
     liveness.Analyze();
 
-    RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-    register_allocator.AllocateRegisters();
+    RegisterAllocator* register_allocator =
+        RegisterAllocator::Create(&allocator, &codegen, liveness);
+    register_allocator->AllocateRegisters();
 
     // Sanity check that in normal conditions, the registers are the same.
     ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 1);
@@ -748,8 +756,9 @@
     ASSERT_EQ(first_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
     ASSERT_EQ(second_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
 
-    RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-    register_allocator.AllocateRegisters();
+    RegisterAllocator* register_allocator =
+        RegisterAllocator::Create(&allocator, &codegen, liveness);
+    register_allocator->AllocateRegisters();
 
     ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 2);
     ASSERT_EQ(second_sub->GetLiveInterval()->GetRegister(), 2);
@@ -795,8 +804,9 @@
     SsaLivenessAnalysis liveness(graph, &codegen);
     liveness.Analyze();
 
-    RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-    register_allocator.AllocateRegisters();
+    RegisterAllocator* register_allocator =
+        RegisterAllocator::Create(&allocator, &codegen, liveness);
+    register_allocator->AllocateRegisters();
 
     // div on x86 requires its first input in eax and the output be the same as the first input.
     ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0);
@@ -892,7 +902,7 @@
     liveness.instructions_from_lifetime_position_.push_back(user);
   }
 
-  RegisterAllocator register_allocator(&allocator, &codegen, liveness);
+  RegisterAllocatorLinearScan register_allocator(&allocator, &codegen, liveness);
   register_allocator.unhandled_core_intervals_.push_back(fourth);
   register_allocator.unhandled_core_intervals_.push_back(third);
   register_allocator.unhandled_core_intervals_.push_back(second);