Final CL to enable register allocation on x86.

This CL implements:
1) Resolution after allocation: connecting the locations
   allocated to an interval within a block and between blocks.
2) Handling of fixed registers: some instructions require
   inputs/output to be at a specific location, and the allocator
   needs to deal with them in a special way.
3) ParallelMoveResolver::EmitNativeCode for x86.

Change-Id: I0da6bd7eb66877987148b87c3be6a983b4e3f858
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index beafbcc..f05cb66 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -29,30 +29,60 @@
 
 namespace art {
 
-void CodeGenerator::Compile(CodeAllocator* allocator) {
+void CodeGenerator::CompileBaseline(CodeAllocator* allocator) {
   const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
   DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock());
   DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1)));
+  block_labels_.SetSize(blocks.Size());
+
+  DCHECK_EQ(frame_size_, kUninitializedFrameSize);
+  ComputeFrameSize(GetGraph()->GetMaximumNumberOfOutVRegs()
+                   + GetGraph()->GetNumberOfVRegs()
+                   + 1 /* filler */);
   GenerateFrameEntry();
+
   for (size_t i = 0, e = blocks.Size(); i < e; ++i) {
-    CompileBlock(blocks.Get(i));
+    HBasicBlock* block = blocks.Get(i);
+    Bind(GetLabelOf(block));
+    HGraphVisitor* location_builder = GetLocationBuilder();
+    HGraphVisitor* instruction_visitor = GetInstructionVisitor();
+    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+      HInstruction* current = it.Current();
+      current->Accept(location_builder);
+      InitLocations(current);
+      current->Accept(instruction_visitor);
+    }
   }
+
   size_t code_size = GetAssembler()->CodeSize();
   uint8_t* buffer = allocator->Allocate(code_size);
   MemoryRegion code(buffer, code_size);
   GetAssembler()->FinalizeInstructions(code);
 }
 
-void CodeGenerator::CompileBlock(HBasicBlock* block) {
-  Bind(GetLabelOf(block));
-  HGraphVisitor* location_builder = GetLocationBuilder();
-  HGraphVisitor* instruction_visitor = GetInstructionVisitor();
-  for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
-    HInstruction* current = it.Current();
-    current->Accept(location_builder);
-    InitLocations(current);
-    current->Accept(instruction_visitor);
+void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
+  // The frame size has already been computed during register allocation.
+  DCHECK_NE(frame_size_, kUninitializedFrameSize);
+  const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
+  DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock());
+  DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1)));
+  block_labels_.SetSize(blocks.Size());
+
+  GenerateFrameEntry();
+  for (size_t i = 0, e = blocks.Size(); i < e; ++i) {
+    HBasicBlock* block = blocks.Get(i);
+    Bind(GetLabelOf(block));
+    HGraphVisitor* instruction_visitor = GetInstructionVisitor();
+    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+      HInstruction* current = it.Current();
+      current->Accept(instruction_visitor);
+    }
   }
+
+  size_t code_size = GetAssembler()->CodeSize();
+  uint8_t* buffer = allocator->Allocate(code_size);
+  MemoryRegion code(buffer, code_size);
+  GetAssembler()->FinalizeInstructions(code);
 }
 
 size_t CodeGenerator::AllocateFreeRegisterInternal(
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index e197ccd..82fa639 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -28,6 +28,7 @@
 namespace art {
 
 static size_t constexpr kVRegSize = 4;
+static size_t constexpr kUninitializedFrameSize = 0;
 
 class DexCompilationUnit;
 
@@ -51,7 +52,8 @@
  public:
   // Compiles the graph to executable instructions. Returns whether the compilation
   // succeeded.
-  void Compile(CodeAllocator* allocator);
+  void CompileBaseline(CodeAllocator* allocator);
+  void CompileOptimized(CodeAllocator* allocator);
   static CodeGenerator* Create(ArenaAllocator* allocator,
                                HGraph* graph,
                                InstructionSet instruction_set);
@@ -61,6 +63,14 @@
   Label* GetLabelOf(HBasicBlock* block) const;
   bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
 
+  size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
+    // Note that this follows the current calling convention.
+    return GetFrameSize()
+        + kVRegSize  // Art method
+        + (parameter->GetIndex() - graph_->GetNumberOfVRegs() + graph_->GetNumberOfInVRegs())
+          * kVRegSize;
+  }
+
   virtual void GenerateFrameEntry() = 0;
   virtual void GenerateFrameExit() = 0;
   virtual void Bind(Label* label) = 0;
@@ -69,6 +79,7 @@
   virtual HGraphVisitor* GetInstructionVisitor() = 0;
   virtual Assembler* GetAssembler() = 0;
   virtual size_t GetWordSize() const = 0;
+  virtual void ComputeFrameSize(size_t number_of_spill_slots) = 0;
 
   uint32_t GetFrameSize() const { return frame_size_; }
   void SetFrameSize(uint32_t size) { frame_size_ = size; }
@@ -95,14 +106,12 @@
 
  protected:
   CodeGenerator(HGraph* graph, size_t number_of_registers)
-      : frame_size_(0),
+      : frame_size_(kUninitializedFrameSize),
         graph_(graph),
         block_labels_(graph->GetArena(), 0),
         pc_infos_(graph->GetArena(), 32),
-        blocked_registers_(graph->GetArena()->AllocArray<bool>(number_of_registers)) {
-    block_labels_.SetSize(graph->GetBlocks().Size());
-  }
-  ~CodeGenerator() { }
+        blocked_registers_(graph->GetArena()->AllocArray<bool>(number_of_registers)) {}
+  ~CodeGenerator() {}
 
   // Register allocation logic.
   void AllocateRegistersLocally(HInstruction* instruction) const;
@@ -123,7 +132,6 @@
 
  private:
   void InitLocations(HInstruction* instruction);
-  void CompileBlock(HBasicBlock* block);
 
   HGraph* const graph_;
 
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e888cc1..d61df36 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -129,16 +129,18 @@
         assembler_(codegen->GetAssembler()),
         codegen_(codegen) {}
 
+void CodeGeneratorARM::ComputeFrameSize(size_t number_of_spill_slots) {
+  SetFrameSize(RoundUp(
+      number_of_spill_slots * kVRegSize
+      + kVRegSize  // Art method
+      + kNumberOfPushedRegistersAtEntry * kArmWordSize,
+      kStackAlignment));
+}
+
 void CodeGeneratorARM::GenerateFrameEntry() {
   core_spill_mask_ |= (1 << LR);
   __ PushList((1 << LR));
 
-  SetFrameSize(RoundUp(
-      (GetGraph()->GetMaximumNumberOfOutVRegs() + GetGraph()->GetNumberOfVRegs()) * kVRegSize
-      + kVRegSize  // filler
-      + kArmWordSize  // Art method
-      + kNumberOfPushedRegistersAtEntry * kArmWordSize,
-      kStackAlignment));
   // The return PC has already been pushed on the stack.
   __ AddConstant(SP, -(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kArmWordSize));
   __ str(R0, Address(SP, 0));
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index c945a06..ac5ef21 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -104,6 +104,7 @@
   explicit CodeGeneratorARM(HGraph* graph);
   virtual ~CodeGeneratorARM() { }
 
+  virtual void ComputeFrameSize(size_t number_of_spill_slots) OVERRIDE;
   virtual void GenerateFrameEntry() OVERRIDE;
   virtual void GenerateFrameExit() OVERRIDE;
   virtual void Bind(Label* label) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 72c697f..c7dca86 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -48,7 +48,16 @@
 CodeGeneratorX86::CodeGeneratorX86(HGraph* graph)
     : CodeGenerator(graph, kNumberOfRegIds),
       location_builder_(graph, this),
-      instruction_visitor_(graph, this) {}
+      instruction_visitor_(graph, this),
+      move_resolver_(graph->GetArena(), this) {}
+
+void CodeGeneratorX86::ComputeFrameSize(size_t number_of_spill_slots) {
+  SetFrameSize(RoundUp(
+      number_of_spill_slots * kVRegSize
+      + kVRegSize  // Art method
+      + kNumberOfPushedRegistersAtEntry * kX86WordSize,
+      kStackAlignment));
+}
 
 static bool* GetBlockedRegisterPairs(bool* blocked_registers) {
   return blocked_registers + kNumberOfAllocIds;
@@ -125,13 +134,6 @@
   static const int kFakeReturnRegister = 8;
   core_spill_mask_ |= (1 << kFakeReturnRegister);
 
-  SetFrameSize(RoundUp(
-      (GetGraph()->GetMaximumNumberOfOutVRegs() + GetGraph()->GetNumberOfVRegs()) * kVRegSize
-      + kVRegSize  // filler
-      + kX86WordSize  // Art method
-      + kNumberOfPushedRegistersAtEntry * kX86WordSize,
-      kStackAlignment));
-
   // The return PC has already been pushed on the stack.
   __ subl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
   __ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
@@ -264,8 +266,8 @@
       __ movl(Address(ESP, destination.GetStackIndex()), source.AsX86().AsCpuRegister());
     } else {
       DCHECK(source.IsStackSlot());
-      __ movl(EAX, Address(ESP, source.GetStackIndex()));
-      __ movl(Address(ESP, destination.GetStackIndex()), EAX);
+      __ pushl(Address(ESP, source.GetStackIndex()));
+      __ popl(Address(ESP, destination.GetStackIndex()));
     }
   }
 }
@@ -302,8 +304,8 @@
       DCHECK(source.IsDoubleStackSlot());
       __ movl(calling_convention.GetRegisterAt(argument_index),
               Address(ESP, source.GetStackIndex()));
-      __ movl(EAX, Address(ESP, source.GetHighStackIndex(kX86WordSize)));
-      __ movl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1, kX86WordSize)), EAX);
+      __ pushl(Address(ESP, source.GetHighStackIndex(kX86WordSize)));
+      __ popl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1, kX86WordSize)));
     }
   } else {
     if (source.IsRegister()) {
@@ -315,15 +317,15 @@
       uint32_t argument_index = source.GetQuickParameterIndex();
       __ movl(Address(ESP, destination.GetStackIndex()),
               calling_convention.GetRegisterAt(argument_index));
-      __ movl(EAX, Address(ESP,
+      __ pushl(Address(ESP,
           calling_convention.GetStackOffsetOf(argument_index + 1, kX86WordSize) + GetFrameSize()));
-      __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), EAX);
+      __ popl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)));
     } else {
       DCHECK(source.IsDoubleStackSlot());
-      __ movl(EAX, Address(ESP, source.GetStackIndex()));
-      __ movl(Address(ESP, destination.GetStackIndex()), EAX);
-      __ movl(EAX, Address(ESP, source.GetHighStackIndex(kX86WordSize)));
-      __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), EAX);
+      __ pushl(Address(ESP, source.GetStackIndex()));
+      __ popl(Address(ESP, destination.GetStackIndex()));
+      __ pushl(Address(ESP, source.GetHighStackIndex(kX86WordSize)));
+      __ popl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)));
     }
   }
 }
@@ -501,7 +503,7 @@
 }
 
 void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
-  // Will be generated at use site.
+  codegen_->Move(constant, constant->GetLocations()->Out(), nullptr);
 }
 
 void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -573,7 +575,7 @@
 
 void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
-  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(X86CpuLocation(EAX));
 
   InvokeDexCallingConventionVisitor calling_convention_visitor;
   for (size_t i = 0; i < invoke->InputCount(); i++) {
@@ -802,7 +804,6 @@
 }
 
 void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
-  // Nothing to do, the parameter is already at its location.
 }
 
 void LocationsBuilderX86::VisitNot(HNot* instruction) {
@@ -829,15 +830,100 @@
 }
 
 void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
-  LOG(FATAL) << "Unimplemented";
+  LOG(FATAL) << "Unreachable";
 }
 
 void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
-  LOG(FATAL) << "Unimplemented";
+  LOG(FATAL) << "Unreachable";
 }
 
 void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction) {
-  LOG(FATAL) << "Unimplemented";
+  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
+}
+
+X86Assembler* ParallelMoveResolverX86::GetAssembler() const {
+  return codegen_->GetAssembler();
+}
+
+void ParallelMoveResolverX86::MoveMemoryToMemory(int dst, int src) {
+  ScratchRegisterScope ensure_scratch(
+      this, kNoRegister, codegen_->GetNumberOfCoreRegisters());
+  int stack_offset = ensure_scratch.IsSpilled() ? kX86WordSize : 0;
+  __ movl(static_cast<Register>(ensure_scratch.GetRegister()), Address(ESP, src + stack_offset));
+  __ movl(Address(ESP, dst + stack_offset), static_cast<Register>(ensure_scratch.GetRegister()));
+}
+
+void ParallelMoveResolverX86::EmitMove(size_t index) {
+  MoveOperands* move = moves_.Get(index);
+  Location source = move->GetSource();
+  Location destination = move->GetDestination();
+
+  if (source.IsRegister()) {
+    if (destination.IsRegister()) {
+      __ movl(destination.AsX86().AsCpuRegister(), source.AsX86().AsCpuRegister());
+    } else {
+      DCHECK(destination.IsStackSlot());
+      __ movl(Address(ESP, destination.GetStackIndex()), source.AsX86().AsCpuRegister());
+    }
+  } else if (source.IsStackSlot()) {
+    if (destination.IsRegister()) {
+      __ movl(destination.AsX86().AsCpuRegister(), Address(ESP, source.GetStackIndex()));
+    } else {
+      DCHECK(destination.IsStackSlot());
+      MoveMemoryToMemory(destination.GetStackIndex(),
+                         source.GetStackIndex());
+    }
+  } else {
+    LOG(FATAL) << "Unimplemented";
+  }
+}
+
+void ParallelMoveResolverX86::Exchange(Register reg, int mem) {
+  ScratchRegisterScope ensure_scratch(this, reg, codegen_->GetNumberOfCoreRegisters());
+  int stack_offset = ensure_scratch.IsSpilled() ? kX86WordSize : 0;
+  __ movl(static_cast<Register>(ensure_scratch.GetRegister()), Address(ESP, mem + stack_offset));
+  __ movl(Address(ESP, mem + stack_offset), reg);
+  __ movl(reg, static_cast<Register>(ensure_scratch.GetRegister()));
+}
+
+
+void ParallelMoveResolverX86::Exchange(int mem1, int mem2) {
+  ScratchRegisterScope ensure_scratch1(
+      this, kNoRegister, codegen_->GetNumberOfCoreRegisters());
+  ScratchRegisterScope ensure_scratch2(
+      this, ensure_scratch1.GetRegister(), codegen_->GetNumberOfCoreRegisters());
+  int stack_offset = ensure_scratch1.IsSpilled() ? kX86WordSize : 0;
+  stack_offset += ensure_scratch2.IsSpilled() ? kX86WordSize : 0;
+  __ movl(static_cast<Register>(ensure_scratch1.GetRegister()), Address(ESP, mem1 + stack_offset));
+  __ movl(static_cast<Register>(ensure_scratch2.GetRegister()), Address(ESP, mem2 + stack_offset));
+  __ movl(Address(ESP, mem2 + stack_offset), static_cast<Register>(ensure_scratch1.GetRegister()));
+  __ movl(Address(ESP, mem1 + stack_offset), static_cast<Register>(ensure_scratch2.GetRegister()));
+}
+
+void ParallelMoveResolverX86::EmitSwap(size_t index) {
+  MoveOperands* move = moves_.Get(index);
+  Location source = move->GetSource();
+  Location destination = move->GetDestination();
+
+  if (source.IsRegister() && destination.IsRegister()) {
+    __ xchgl(destination.AsX86().AsCpuRegister(), source.AsX86().AsCpuRegister());
+  } else if (source.IsRegister() && destination.IsStackSlot()) {
+    Exchange(source.AsX86().AsCpuRegister(), destination.GetStackIndex());
+  } else if (source.IsStackSlot() && destination.IsRegister()) {
+    Exchange(destination.AsX86().AsCpuRegister(), source.GetStackIndex());
+  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
+    Exchange(destination.GetStackIndex(), source.GetStackIndex());
+  } else {
+    LOG(FATAL) << "Unimplemented";
+  }
+}
+
+void ParallelMoveResolverX86::SpillScratch(int reg) {
+  __ pushl(static_cast<Register>(reg));
+}
+
+void ParallelMoveResolverX86::RestoreScratch(int reg) {
+  __ popl(static_cast<Register>(reg));
 }
 
 }  // namespace x86
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 4a70636..acc670e 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -19,6 +19,7 @@
 
 #include "code_generator.h"
 #include "nodes.h"
+#include "parallel_move_resolver.h"
 #include "utils/x86/assembler_x86.h"
 
 namespace art {
@@ -59,6 +60,28 @@
   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
 };
 
+class ParallelMoveResolverX86 : public ParallelMoveResolver {
+ public:
+  ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
+      : ParallelMoveResolver(allocator), codegen_(codegen) {}
+
+  virtual void EmitMove(size_t index) OVERRIDE;
+  virtual void EmitSwap(size_t index) OVERRIDE;
+  virtual void SpillScratch(int reg) OVERRIDE;
+  virtual void RestoreScratch(int reg) OVERRIDE;
+
+  X86Assembler* GetAssembler() const;
+
+ private:
+  void Exchange(Register reg, int mem);
+  void Exchange(int mem1, int mem2);
+  void MoveMemoryToMemory(int dst, int src);
+
+  CodeGeneratorX86* const codegen_;
+
+  DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverX86);
+};
+
 class LocationsBuilderX86 : public HGraphVisitor {
  public:
   LocationsBuilderX86(HGraph* graph, CodeGeneratorX86* codegen)
@@ -105,6 +128,7 @@
   explicit CodeGeneratorX86(HGraph* graph);
   virtual ~CodeGeneratorX86() { }
 
+  virtual void ComputeFrameSize(size_t number_of_spill_slots) OVERRIDE;
   virtual void GenerateFrameEntry() OVERRIDE;
   virtual void GenerateFrameExit() OVERRIDE;
   virtual void Bind(Label* label) OVERRIDE;
@@ -145,6 +169,10 @@
   virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
   virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
 
+  ParallelMoveResolverX86* GetMoveResolver() {
+    return &move_resolver_;
+  }
+
  private:
   // Helper method to move a 32bits value between two locations.
   void Move32(Location destination, Location source);
@@ -153,6 +181,7 @@
 
   LocationsBuilderX86 location_builder_;
   InstructionCodeGeneratorX86 instruction_visitor_;
+  ParallelMoveResolverX86 move_resolver_;
   X86Assembler assembler_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86);
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 7684bb1..8ee775c 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -56,7 +56,7 @@
   ASSERT_NE(graph, nullptr);
   InternalCodeAllocator allocator;
   CodeGenerator* codegen = CodeGenerator::Create(&arena, graph, kX86);
-  codegen->Compile(&allocator);
+  codegen->CompileBaseline(&allocator);
   typedef int32_t (*fptr)();
 #if defined(__i386__)
   CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
@@ -66,7 +66,7 @@
   }
 #endif
   codegen = CodeGenerator::Create(&arena, graph, kArm);
-  codegen->Compile(&allocator);
+  codegen->CompileBaseline(&allocator);
 #if defined(__arm__)
   CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
   int32_t result = reinterpret_cast<fptr>(allocator.GetMemory())();
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 5c5042e..a49ce64 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -28,8 +28,15 @@
  */
 class HGraphVisualizerPrinter : public HGraphVisitor {
  public:
-  HGraphVisualizerPrinter(HGraph* graph, std::ostream& output, const CodeGenerator& codegen)
-      : HGraphVisitor(graph), output_(output), codegen_(codegen), indent_(0) {}
+  HGraphVisualizerPrinter(HGraph* graph,
+                          std::ostream& output,
+                          const char* pass_name,
+                          const CodeGenerator& codegen)
+      : HGraphVisitor(graph),
+        output_(output),
+        pass_name_(pass_name),
+        codegen_(codegen),
+        indent_(0) {}
 
   void StartTag(const char* name) {
     AddIndent();
@@ -94,6 +101,33 @@
     output_<< std::endl;
   }
 
+  void DumpLocation(Location location, Primitive::Type type) {
+    if (location.IsRegister()) {
+      if (type == Primitive::kPrimDouble || type == Primitive::kPrimFloat) {
+        codegen_.DumpFloatingPointRegister(output_, location.reg().RegId());
+      } else {
+        codegen_.DumpCoreRegister(output_, location.reg().RegId());
+      }
+    } else {
+      DCHECK(location.IsStackSlot());
+      output_ << location.GetStackIndex() << "(sp)";
+    }
+  }
+
+  void VisitParallelMove(HParallelMove* instruction) {
+    output_ << instruction->DebugName();
+    output_ << " (";
+    for (size_t i = 0, e = instruction->NumMoves(); i < e; ++i) {
+      MoveOperands* move = instruction->MoveOperandsAt(i);
+      DumpLocation(move->GetSource(), Primitive::kPrimInt);
+      output_ << " -> ";
+      DumpLocation(move->GetDestination(), Primitive::kPrimInt);
+      if (i + 1 != e) {
+        output_ << ", ";
+      }
+    }
+    output_ << ")";
+  }
 
   void VisitInstruction(HInstruction* instruction) {
     output_ << instruction->DebugName();
@@ -104,24 +138,28 @@
       }
       output_ << "]";
     }
-    if (instruction->GetLifetimePosition() != kNoLifetime) {
+    if (pass_name_ == kLivenessPassName && instruction->GetLifetimePosition() != kNoLifetime) {
       output_ << " (liveness: " << instruction->GetLifetimePosition();
       if (instruction->HasLiveInterval()) {
         output_ << " ";
         const LiveInterval& interval = *instruction->GetLiveInterval();
         interval.Dump(output_);
-        if (interval.HasRegister()) {
-          int reg = interval.GetRegister();
-          output_ << " ";
-          if (instruction->GetType() == Primitive::kPrimFloat
-              || instruction->GetType() == Primitive::kPrimDouble) {
-            codegen_.DumpFloatingPointRegister(output_, reg);
-          } else {
-            codegen_.DumpCoreRegister(output_, reg);
-          }
-        }
       }
       output_ << ")";
+    } else if (pass_name_ == kRegisterAllocatorPassName) {
+      LocationSummary* locations = instruction->GetLocations();
+      if (locations != nullptr) {
+        output_ << " ( ";
+        for (size_t i = 0; i < instruction->InputCount(); ++i) {
+          DumpLocation(locations->InAt(i), instruction->InputAt(i)->GetType());
+          output_ << " ";
+        }
+        output_ << ")";
+        if (locations->Out().IsValid()) {
+          output_ << " -> ";
+          DumpLocation(locations->Out(), instruction->GetType());
+        }
+      }
     }
   }
 
@@ -137,9 +175,9 @@
     }
   }
 
-  void Run(const char* pass_name) {
+  void Run() {
     StartTag("cfg");
-    PrintProperty("name", pass_name);
+    PrintProperty("name", pass_name_);
     VisitInsertionOrder();
     EndTag("cfg");
   }
@@ -188,6 +226,7 @@
 
  private:
   std::ostream& output_;
+  const char* pass_name_;
   const CodeGenerator& codegen_;
   size_t indent_;
 
@@ -209,7 +248,7 @@
   }
 
   is_enabled_ = true;
-  HGraphVisualizerPrinter printer(graph, *output_, codegen_);
+  HGraphVisualizerPrinter printer(graph, *output_, "", codegen_);
   printer.StartTag("compilation");
   printer.PrintProperty("name", pretty_name.c_str());
   printer.PrintProperty("method", pretty_name.c_str());
@@ -227,7 +266,7 @@
   }
 
   is_enabled_ = true;
-  HGraphVisualizerPrinter printer(graph, *output_, codegen_);
+  HGraphVisualizerPrinter printer(graph, *output_, "", codegen_);
   printer.StartTag("compilation");
   printer.PrintProperty("name", name);
   printer.PrintProperty("method", name);
@@ -239,8 +278,8 @@
   if (!is_enabled_) {
     return;
   }
-  HGraphVisualizerPrinter printer(graph_, *output_, codegen_);
-  printer.Run(pass_name);
+  HGraphVisualizerPrinter printer(graph_, *output_, pass_name, codegen_);
+  printer.Run();
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 2638cf5..7cd74e9 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -25,6 +25,9 @@
 class DexCompilationUnit;
 class HGraph;
 
+static const char* kLivenessPassName = "liveness";
+static const char* kRegisterAllocatorPassName = "register";
+
 /**
  * If enabled, emits compilation information suitable for the c1visualizer tool
  * and IRHydra.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 68848de..143d5c9 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -508,6 +508,7 @@
   void ReplaceWith(HInstruction* instruction);
 
 #define INSTRUCTION_TYPE_CHECK(type)                                           \
+  bool Is##type() { return (As##type() != nullptr); }                          \
   virtual H##type* As##type() { return nullptr; }
 
   FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 3dc0928..ccacbef 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -85,6 +85,8 @@
   // For testing purposes, we put a special marker on method names that should be compiled
   // with this compiler. This makes sure we're not regressing.
   bool shouldCompile = dex_compilation_unit.GetSymbol().find("00024opt_00024") != std::string::npos;
+  bool shouldOptimize =
+      dex_compilation_unit.GetSymbol().find("00024reg_00024") != std::string::npos;
 
   ArenaPool pool;
   ArenaAllocator arena(&pool);
@@ -116,7 +118,36 @@
   visualizer.DumpGraph("builder");
 
   CodeVectorAllocator allocator;
-  codegen->Compile(&allocator);
+
+  if (RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+    graph->BuildDominatorTree();
+    graph->TransformToSSA();
+    visualizer.DumpGraph("ssa");
+
+    graph->FindNaturalLoops();
+    SsaLivenessAnalysis liveness(*graph, codegen);
+    liveness.Analyze();
+    visualizer.DumpGraph(kLivenessPassName);
+
+    RegisterAllocator register_allocator(graph->GetArena(), codegen, liveness);
+    register_allocator.AllocateRegisters();
+
+    visualizer.DumpGraph(kRegisterAllocatorPassName);
+    codegen->CompileOptimized(&allocator);
+  } else if (shouldOptimize && RegisterAllocator::Supports(instruction_set)) {
+    LOG(FATAL) << "Could not allocate registers in optimizing compiler";
+  } else {
+    codegen->CompileBaseline(&allocator);
+
+    // Run these phases to get some test coverage.
+    graph->BuildDominatorTree();
+    graph->TransformToSSA();
+    visualizer.DumpGraph("ssa");
+    graph->FindNaturalLoops();
+    SsaLivenessAnalysis liveness(*graph, codegen);
+    liveness.Analyze();
+    visualizer.DumpGraph(kLivenessPassName);
+  }
 
   std::vector<uint8_t> mapping_table;
   codegen->BuildMappingTable(&mapping_table);
@@ -125,19 +156,6 @@
   std::vector<uint8_t> gc_map;
   codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit);
 
-  // Run these phases to get some test coverage.
-  graph->BuildDominatorTree();
-  graph->TransformToSSA();
-  visualizer.DumpGraph("ssa");
-
-  graph->FindNaturalLoops();
-  SsaLivenessAnalysis liveness(*graph, codegen);
-  liveness.Analyze();
-  visualizer.DumpGraph("liveness");
-
-  RegisterAllocator(graph->GetArena(), *codegen).AllocateRegisters(liveness);
-  visualizer.DumpGraph("register");
-
   return new CompiledMethod(GetCompilerDriver(),
                             instruction_set,
                             allocator.GetMemory(),
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index 3d2d136..4a1b6ce 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -147,4 +147,64 @@
   }
 }
 
+bool ParallelMoveResolver::IsScratchLocation(Location loc) {
+  for (size_t i = 0; i < moves_.Size(); ++i) {
+    if (moves_.Get(i)->Blocks(loc)) {
+      return false;
+    }
+  }
+
+  for (size_t i = 0; i < moves_.Size(); ++i) {
+    if (moves_.Get(i)->GetDestination().Equals(loc)) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+int ParallelMoveResolver::AllocateScratchRegister(int blocked, int register_count, bool* spilled) {
+  int scratch = -1;
+  for (int reg = 0; reg < register_count; ++reg) {
+    if ((blocked != reg) &&
+        IsScratchLocation(Location::RegisterLocation(ManagedRegister(reg)))) {
+      scratch = reg;
+      break;
+    }
+  }
+
+  if (scratch == -1) {
+    *spilled = true;
+    for (int reg = 0; reg < register_count; ++reg) {
+      if (blocked != reg) {
+        scratch = reg;
+      }
+    }
+  } else {
+    *spilled = false;
+  }
+
+  return scratch;
+}
+
+
+ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope(
+    ParallelMoveResolver* resolver, int blocked, int number_of_registers)
+    : resolver_(resolver),
+      reg_(kNoRegister),
+      spilled_(false) {
+  reg_ = resolver_->AllocateScratchRegister(blocked, number_of_registers, &spilled_);
+
+  if (spilled_) {
+    resolver->SpillScratch(reg_);
+  }
+}
+
+
+ParallelMoveResolver::ScratchRegisterScope::~ScratchRegisterScope() {
+  if (spilled_) {
+    resolver_->RestoreScratch(reg_);
+  }
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
index ff20cb0..e1189d8 100644
--- a/compiler/optimizing/parallel_move_resolver.h
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -23,6 +23,7 @@
 namespace art {
 
 class HParallelMove;
+class Location;
 class MoveOperands;
 
 /**
@@ -39,15 +40,37 @@
   void EmitNativeCode(HParallelMove* parallel_move);
 
  protected:
+  class ScratchRegisterScope : public ValueObject {
+   public:
+    ScratchRegisterScope(ParallelMoveResolver* resolver, int blocked, int number_of_registers);
+    ~ScratchRegisterScope();
+
+    int GetRegister() const { return reg_; }
+    bool IsSpilled() const { return spilled_; }
+
+   private:
+    ParallelMoveResolver* resolver_;
+    int reg_;
+    bool spilled_;
+  };
+
+  bool IsScratchLocation(Location loc);
+  int AllocateScratchRegister(int blocked, int register_count, bool* spilled);
+
   // Emit a move.
   virtual void EmitMove(size_t index) = 0;
 
   // Execute a move by emitting a swap of two operands.
   virtual void EmitSwap(size_t index) = 0;
 
+  virtual void SpillScratch(int reg) = 0;
+  virtual void RestoreScratch(int reg) = 0;
+
   // List of moves not yet resolved.
   GrowableArray<MoveOperands*> moves_;
 
+  static constexpr int kNoRegister = -1;
+
  private:
   // Build the initial list of moves.
   void BuildInitialMoveList(HParallelMove* parallel_move);
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 88df24d..093856d 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -50,6 +50,9 @@
              << ")";
   }
 
+  virtual void SpillScratch(int reg) {}
+  virtual void RestoreScratch(int reg) {}
+
   std::string GetMessage() const {
     return  message_.str();
   }
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 8c6eb2a..c2a4769 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -24,64 +24,151 @@
 static constexpr size_t kMaxLifetimePosition = -1;
 static constexpr size_t kDefaultNumberOfSpillSlots = 4;
 
-RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator, const CodeGenerator& codegen)
+RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
+                                     CodeGenerator* codegen,
+                                     const SsaLivenessAnalysis& liveness)
       : allocator_(allocator),
         codegen_(codegen),
+        liveness_(liveness),
         unhandled_(allocator, 0),
         handled_(allocator, 0),
         active_(allocator, 0),
         inactive_(allocator, 0),
+        physical_register_intervals_(allocator, codegen->GetNumberOfRegisters()),
         spill_slots_(allocator, kDefaultNumberOfSpillSlots),
         processing_core_registers_(false),
         number_of_registers_(-1),
         registers_array_(nullptr),
-        blocked_registers_(allocator->AllocArray<bool>(codegen.GetNumberOfRegisters())) {
-  codegen.SetupBlockedRegisters(blocked_registers_);
+        blocked_registers_(allocator->AllocArray<bool>(codegen->GetNumberOfRegisters())) {
+  codegen->SetupBlockedRegisters(blocked_registers_);
+  physical_register_intervals_.SetSize(codegen->GetNumberOfRegisters());
 }
 
-static bool ShouldProcess(bool processing_core_registers, HInstruction* instruction) {
-  bool is_core_register = (instruction->GetType() != Primitive::kPrimDouble)
-      && (instruction->GetType() != Primitive::kPrimFloat);
+bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph,
+                                                InstructionSet instruction_set) {
+  if (!Supports(instruction_set)) {
+    return false;
+  }
+  for (size_t i = 0, e = graph.GetBlocks().Size(); i < e; ++i) {
+    for (HInstructionIterator it(graph.GetBlocks().Get(i)->GetInstructions());
+         !it.Done();
+         it.Advance()) {
+      HInstruction* current = it.Current();
+      if (current->NeedsEnvironment()) return false;
+      if (current->GetType() == Primitive::kPrimLong) return false;
+      if (current->GetType() == Primitive::kPrimFloat) return false;
+      if (current->GetType() == Primitive::kPrimDouble) return false;
+    }
+  }
+  return true;
+}
+
+static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
+  bool is_core_register = (interval->GetType() != Primitive::kPrimDouble)
+      && (interval->GetType() != Primitive::kPrimFloat);
   return processing_core_registers == is_core_register;
 }
 
-void RegisterAllocator::AllocateRegistersInternal(const SsaLivenessAnalysis& liveness) {
+void RegisterAllocator::AllocateRegisters() {
+  processing_core_registers_ = true;
+  AllocateRegistersInternal();
+  processing_core_registers_ = false;
+  AllocateRegistersInternal();
+
+  Resolve();
+
+  if (kIsDebugBuild) {
+    processing_core_registers_ = true;
+    ValidateInternal(true);
+    processing_core_registers_ = false;
+    ValidateInternal(true);
+  }
+}
+
+void RegisterAllocator::BlockRegister(Location location,
+                                      size_t start,
+                                      size_t end,
+                                      Primitive::Type type) {
+  int reg = location.reg().RegId();
+  LiveInterval* interval = physical_register_intervals_.Get(reg);
+  if (interval == nullptr) {
+    interval = LiveInterval::MakeFixedInterval(allocator_, reg, type);
+    physical_register_intervals_.Put(reg, interval);
+    inactive_.Add(interval);
+  }
+  DCHECK(interval->GetRegister() == reg);
+  interval->AddRange(start, end);
+}
+
+void RegisterAllocator::AllocateRegistersInternal() {
   number_of_registers_ = processing_core_registers_
-      ? codegen_.GetNumberOfCoreRegisters()
-      : codegen_.GetNumberOfFloatingPointRegisters();
+      ? codegen_->GetNumberOfCoreRegisters()
+      : codegen_->GetNumberOfFloatingPointRegisters();
 
   registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
 
   // Iterate post-order, to ensure the list is sorted, and the last added interval
   // is the one with the lowest start position.
-  for (size_t i = liveness.GetNumberOfSsaValues(); i > 0; --i) {
-    HInstruction* instruction = liveness.GetInstructionFromSsaIndex(i - 1);
-    if (ShouldProcess(processing_core_registers_, instruction)) {
-      LiveInterval* current = instruction->GetLiveInterval();
+  for (size_t i = liveness_.GetNumberOfSsaValues(); i > 0; --i) {
+    HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i - 1);
+    LiveInterval* current = instruction->GetLiveInterval();
+    if (ShouldProcess(processing_core_registers_, current)) {
       DCHECK(unhandled_.IsEmpty() || current->StartsBefore(unhandled_.Peek()));
-      unhandled_.Add(current);
+
+      LocationSummary* locations = instruction->GetLocations();
+      if (locations->GetTempCount() != 0) {
+        // Note that we already filtered out instructions requiring temporaries in
+        // RegisterAllocator::CanAllocateRegistersFor.
+        LOG(FATAL) << "Unimplemented";
+      }
+
+      // Some instructions define their output in fixed register/stack slot. We need
+      // to ensure we know these locations before doing register allocation. For a
+      // given register, we create an interval that covers these locations. The register
+      // will be unavailable at these locations when trying to allocate one for an
+      // interval.
+      //
+      // The backwards walking ensures the ranges are ordered on increasing start positions.
+      Location output = locations->Out();
+      size_t position = instruction->GetLifetimePosition();
+      if (output.IsRegister()) {
+        // Shift the interval's start by one to account for the blocked register.
+        current->SetFrom(position + 1);
+        current->SetRegister(output.reg().RegId());
+        BlockRegister(output, position, position + 1, instruction->GetType());
+      } else if (output.IsStackSlot()) {
+        current->SetSpillSlot(output.GetStackIndex());
+      }
+      for (size_t i = 0; i < instruction->InputCount(); ++i) {
+        Location input = locations->InAt(i);
+        if (input.IsRegister()) {
+          BlockRegister(input, position, position + 1, instruction->InputAt(i)->GetType());
+        }
+      }
+
+      // Add the interval to the correct list.
+      if (current->HasRegister()) {
+        DCHECK(instruction->IsParameterValue());
+        inactive_.Add(current);
+      } else if (current->HasSpillSlot()) {
+        DCHECK(instruction->IsParameterValue());
+        // Split before first register use.
+        size_t first_register_use = current->FirstRegisterUse();
+        if (first_register_use != kNoLifetime) {
+          LiveInterval* split = Split(current, first_register_use - 1);
+          // The new interval may start at a late
+          AddToUnhandled(split);
+        } else {
+          // Nothing to do, we won't allocate a register for this value.
+        }
+      } else {
+        DCHECK(unhandled_.IsEmpty() || current->StartsBefore(unhandled_.Peek()));
+        unhandled_.Add(current);
+      }
     }
   }
 
   LinearScan();
-  if (kIsDebugBuild) {
-    ValidateInternal(liveness, true);
-  }
-}
-
-bool RegisterAllocator::ValidateInternal(const SsaLivenessAnalysis& liveness,
-                                         bool log_fatal_on_failure) const {
-  // To simplify unit testing, we eagerly create the array of intervals, and
-  // call the helper method.
-  GrowableArray<LiveInterval*> intervals(allocator_, 0);
-  for (size_t i = 0; i < liveness.GetNumberOfSsaValues(); ++i) {
-    HInstruction* instruction = liveness.GetInstructionFromSsaIndex(i);
-    if (ShouldProcess(processing_core_registers_, instruction)) {
-      intervals.Add(instruction->GetLiveInterval());
-    }
-  }
-  return ValidateIntervals(intervals, spill_slots_.Size(), codegen_, allocator_,
-                           processing_core_registers_, log_fatal_on_failure);
 }
 
 class AllRangesIterator : public ValueObject {
@@ -111,6 +198,28 @@
   DISALLOW_COPY_AND_ASSIGN(AllRangesIterator);
 };
 
+bool RegisterAllocator::ValidateInternal(bool log_fatal_on_failure) const {
+  // To simplify unit testing, we eagerly create the array of intervals, and
+  // call the helper method.
+  GrowableArray<LiveInterval*> intervals(allocator_, 0);
+  for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
+    HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
+    if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) {
+      intervals.Add(instruction->GetLiveInterval());
+    }
+  }
+
+  for (size_t i = 0, e = physical_register_intervals_.Size(); i < e; ++i) {
+    LiveInterval* fixed = physical_register_intervals_.Get(i);
+    if (fixed != nullptr && ShouldProcess(processing_core_registers_, fixed)) {
+      intervals.Add(fixed);
+    }
+  }
+
+  return ValidateIntervals(intervals, spill_slots_.Size(), *codegen_, allocator_,
+                           processing_core_registers_, log_fatal_on_failure);
+}
+
 bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& intervals,
                                           size_t number_of_spill_slots,
                                           const CodeGenerator& codegen,
@@ -132,7 +241,10 @@
   for (size_t i = 0, e = intervals.Size(); i < e; ++i) {
     for (AllRangesIterator it(intervals.Get(i)); !it.Done(); it.Advance()) {
       LiveInterval* current = it.CurrentInterval();
-      if (current->GetParent()->HasSpillSlot()) {
+      HInstruction* defined_by = current->GetParent()->GetDefinedBy();
+      if (current->GetParent()->HasSpillSlot()
+           // Parameters have their own stack slot.
+           && !(defined_by != nullptr && defined_by->IsParameterValue())) {
         BitVector* liveness_of_spill_slot = liveness_of_values.Get(
             number_of_registers + current->GetParent()->GetSpillSlot() / kVRegSize);
         for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) {
@@ -176,14 +288,14 @@
   return true;
 }
 
-void RegisterAllocator::DumpInterval(std::ostream& stream, LiveInterval* interval) {
+void RegisterAllocator::DumpInterval(std::ostream& stream, LiveInterval* interval) const {
   interval->Dump(stream);
   stream << ": ";
   if (interval->HasRegister()) {
     if (processing_core_registers_) {
-      codegen_.DumpCoreRegister(stream, interval->GetRegister());
+      codegen_->DumpCoreRegister(stream, interval->GetRegister());
     } else {
-      codegen_.DumpFloatingPointRegister(stream, interval->GetRegister());
+      codegen_->DumpFloatingPointRegister(stream, interval->GetRegister());
     }
   } else {
     stream << "spilled";
@@ -196,6 +308,7 @@
   while (!unhandled_.IsEmpty()) {
     // (1) Remove interval with the lowest start position from unhandled.
     LiveInterval* current = unhandled_.Pop();
+    DCHECK(!current->IsFixed() && !current->HasRegister() && !current->HasSpillSlot());
     size_t position = current->GetStart();
 
     // (2) Remove currently active intervals that are dead at this position.
@@ -255,13 +368,6 @@
     free_until[i] = kMaxLifetimePosition;
   }
 
-  // For each active interval, set its register to not free.
-  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
-    LiveInterval* interval = active_.Get(i);
-    DCHECK(interval->HasRegister());
-    free_until[interval->GetRegister()] = 0;
-  }
-
   // For each inactive interval, set its register to be free until
   // the next intersection with `current`.
   // Thanks to SSA, this should only be needed for intervals
@@ -275,6 +381,13 @@
     }
   }
 
+  // For each active interval, set its register to not free.
+  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
+    LiveInterval* interval = active_.Get(i);
+    DCHECK(interval->HasRegister());
+    free_until[interval->GetRegister()] = 0;
+  }
+
   // Pick the register that is free the longest.
   int reg = -1;
   for (size_t i = 0; i < number_of_registers_; ++i) {
@@ -330,9 +443,13 @@
   for (size_t i = 0, e = active_.Size(); i < e; ++i) {
     LiveInterval* active = active_.Get(i);
     DCHECK(active->HasRegister());
-    size_t use = active->FirstRegisterUseAfter(current->GetStart());
-    if (use != kNoLifetime) {
-      next_use[active->GetRegister()] = use;
+    if (active->IsFixed()) {
+      next_use[active->GetRegister()] = current->GetStart();
+    } else {
+      size_t use = active->FirstRegisterUseAfter(current->GetStart());
+      if (use != kNoLifetime) {
+        next_use[active->GetRegister()] = use;
+      }
     }
   }
 
@@ -343,9 +460,17 @@
   for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
     LiveInterval* inactive = inactive_.Get(i);
     DCHECK(inactive->HasRegister());
-    size_t use = inactive->FirstRegisterUseAfter(current->GetStart());
-    if (use != kNoLifetime) {
-      next_use[inactive->GetRegister()] = use;
+    size_t next_intersection = inactive->FirstIntersectionWith(current);
+    if (next_intersection != kNoLifetime) {
+      if (inactive->IsFixed()) {
+        next_use[inactive->GetRegister()] =
+            std::min(next_intersection, next_use[inactive->GetRegister()]);
+      } else {
+        size_t use = inactive->FirstRegisterUseAfter(current->GetStart());
+        if (use != kNoLifetime) {
+          next_use[inactive->GetRegister()] = std::min(use, next_use[inactive->GetRegister()]);
+        }
+      }
     }
   }
 
@@ -374,6 +499,7 @@
     for (size_t i = 0, e = active_.Size(); i < e; ++i) {
       LiveInterval* active = active_.Get(i);
       if (active->GetRegister() == reg) {
+        DCHECK(!active->IsFixed());
         LiveInterval* split = Split(active, current->GetStart());
         active_.DeleteAt(i);
         handled_.Add(active);
@@ -385,11 +511,19 @@
     for (size_t i = 0; i < inactive_.Size(); ++i) {
       LiveInterval* inactive = inactive_.Get(i);
       if (inactive->GetRegister() == reg) {
-        LiveInterval* split = Split(inactive, current->GetStart());
-        inactive_.DeleteAt(i);
-        handled_.Add(inactive);
-        AddToUnhandled(split);
-        --i;
+        size_t next_intersection = inactive->FirstIntersectionWith(current);
+        if (next_intersection != kNoLifetime) {
+          if (inactive->IsFixed()) {
+            LiveInterval* split = Split(current, next_intersection);
+            AddToUnhandled(split);
+          } else {
+            LiveInterval* split = Split(inactive, current->GetStart());
+            inactive_.DeleteAt(i);
+            handled_.Add(inactive);
+            AddToUnhandled(split);
+            --i;
+          }
+        }
       }
     }
 
@@ -398,13 +532,15 @@
 }
 
 void RegisterAllocator::AddToUnhandled(LiveInterval* interval) {
+  size_t insert_at = 0;
   for (size_t i = unhandled_.Size(); i > 0; --i) {
     LiveInterval* current = unhandled_.Get(i - 1);
     if (current->StartsAfter(interval)) {
-      unhandled_.InsertAt(i, interval);
+      insert_at = i;
       break;
     }
   }
+  unhandled_.InsertAt(insert_at, interval);
 }
 
 LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) {
@@ -429,7 +565,13 @@
     return;
   }
 
-  // Find when this instruction dies.
+  HInstruction* defined_by = parent->GetDefinedBy();
+  if (defined_by->IsParameterValue()) {
+    // Parameters have their own stack slot.
+    parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
+    return;
+  }
+
   LiveInterval* last_sibling = interval;
   while (last_sibling->GetNextSibling() != nullptr) {
     last_sibling = last_sibling->GetNextSibling();
@@ -451,7 +593,315 @@
     spill_slots_.Put(slot, end);
   }
 
-  interval->GetParent()->SetSpillSlot(slot * kVRegSize);
+  parent->SetSpillSlot(slot * kVRegSize);
+}
+
+static Location ConvertToLocation(LiveInterval* interval) {
+  if (interval->HasRegister()) {
+    return Location::RegisterLocation(ManagedRegister(interval->GetRegister()));
+  } else {
+    DCHECK(interval->GetParent()->HasSpillSlot());
+    return Location::StackSlot(interval->GetParent()->GetSpillSlot());
+  }
+}
+
+// We create a special marker for inputs moves to differentiate them from
+// moves created during resolution. They must be different instructions
+// because the input moves work on the assumption that the interval moves
+// have been executed.
+static constexpr size_t kInputMoveLifetimePosition = 0;
+static bool IsInputMove(HInstruction* instruction) {
+  return instruction->GetLifetimePosition() == kInputMoveLifetimePosition;
+}
+
+void RegisterAllocator::AddInputMoveFor(HInstruction* instruction,
+                                        Location source,
+                                        Location destination) const {
+  if (source.Equals(destination)) return;
+
+  DCHECK(instruction->AsPhi() == nullptr);
+
+  HInstruction* previous = instruction->GetPrevious();
+  HParallelMove* move = nullptr;
+  if (previous == nullptr
+      || previous->AsParallelMove() == nullptr
+      || !IsInputMove(previous)) {
+    move = new (allocator_) HParallelMove(allocator_);
+    move->SetLifetimePosition(kInputMoveLifetimePosition);
+    instruction->GetBlock()->InsertInstructionBefore(move, instruction);
+  } else {
+    move = previous->AsParallelMove();
+  }
+  DCHECK(IsInputMove(move));
+  move->AddMove(new (allocator_) MoveOperands(source, destination));
+}
+
+void RegisterAllocator::InsertParallelMoveAt(size_t position,
+                                             Location source,
+                                             Location destination) const {
+  if (source.Equals(destination)) return;
+
+  HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
+  if (at == nullptr) {
+    // Block boundary, don't no anything the connection of split siblings will handle it.
+    return;
+  }
+  HParallelMove* move;
+  if ((position & 1) == 1) {
+    // Move must happen after the instruction.
+    DCHECK(!at->IsControlFlow());
+    move = at->GetNext()->AsParallelMove();
+    if (move == nullptr || IsInputMove(move)) {
+      move = new (allocator_) HParallelMove(allocator_);
+      move->SetLifetimePosition(position);
+      at->GetBlock()->InsertInstructionBefore(move, at->GetNext());
+    }
+  } else {
+    // Move must happen before the instruction.
+    HInstruction* previous = at->GetPrevious();
+    if (previous != nullptr && previous->AsParallelMove() != nullptr) {
+      if (IsInputMove(previous)) {
+        previous = previous->GetPrevious();
+      }
+    }
+    if (previous == nullptr || previous->AsParallelMove() == nullptr) {
+      move = new (allocator_) HParallelMove(allocator_);
+      move->SetLifetimePosition(position);
+      at->GetBlock()->InsertInstructionBefore(move, at);
+    } else {
+      move = previous->AsParallelMove();
+    }
+  }
+  move->AddMove(new (allocator_) MoveOperands(source, destination));
+}
+
+void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block,
+                                                   Location source,
+                                                   Location destination) const {
+  if (source.Equals(destination)) return;
+
+  DCHECK_EQ(block->GetSuccessors().Size(), 1u);
+  HInstruction* last = block->GetLastInstruction();
+  HInstruction* previous = last->GetPrevious();
+  HParallelMove* move;
+  if (previous == nullptr || previous->AsParallelMove() == nullptr) {
+    move = new (allocator_) HParallelMove(allocator_);
+    block->InsertInstructionBefore(move, last);
+  } else {
+    move = previous->AsParallelMove();
+  }
+  move->AddMove(new (allocator_) MoveOperands(source, destination));
+}
+
+void RegisterAllocator::InsertParallelMoveAtEntryOf(HBasicBlock* block,
+                                                    Location source,
+                                                    Location destination) const {
+  if (source.Equals(destination)) return;
+
+  HInstruction* first = block->GetFirstInstruction();
+  HParallelMove* move = first->AsParallelMove();
+  if (move == nullptr || IsInputMove(move)) {
+    move = new (allocator_) HParallelMove(allocator_);
+    move->SetLifetimePosition(block->GetLifetimeStart());
+    block->InsertInstructionBefore(move, first);
+  }
+  move->AddMove(new (allocator_) MoveOperands(source, destination));
+}
+
+void RegisterAllocator::InsertMoveAfter(HInstruction* instruction,
+                                        Location source,
+                                        Location destination) const {
+  if (source.Equals(destination)) return;
+
+  if (instruction->AsPhi() != nullptr) {
+    InsertParallelMoveAtEntryOf(instruction->GetBlock(), source, destination);
+    return;
+  }
+
+  HParallelMove* move = instruction->GetNext()->AsParallelMove();
+  if (move == nullptr || IsInputMove(move)) {
+    move = new (allocator_) HParallelMove(allocator_);
+    instruction->GetBlock()->InsertInstructionBefore(move, instruction->GetNext());
+  }
+  move->AddMove(new (allocator_) MoveOperands(source, destination));
+}
+
+void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
+  LiveInterval* current = interval;
+  if (current->HasSpillSlot() && current->HasRegister()) {
+    // We spill eagerly, so move must be at definition.
+    InsertMoveAfter(interval->GetDefinedBy(),
+                    Location::RegisterLocation(ManagedRegister(interval->GetRegister())),
+                    Location::StackSlot(interval->GetParent()->GetSpillSlot()));
+  }
+  UsePosition* use = current->GetFirstUse();
+
+  // Walk over all siblings, updating locations of use positions, and
+  // connecting them when they are adjacent.
+  do {
+    Location source = ConvertToLocation(current);
+
+    // Walk over all uses covered by this interval, and update the location
+    // information.
+    while (use != nullptr && use->GetPosition() <= current->GetEnd()) {
+      if (!use->GetIsEnvironment()) {
+        LocationSummary* locations = use->GetUser()->GetLocations();
+        Location expected_location = locations->InAt(use->GetInputIndex());
+        if (expected_location.IsUnallocated()) {
+          locations->SetInAt(use->GetInputIndex(), source);
+        } else {
+          AddInputMoveFor(use->GetUser(), source, expected_location);
+        }
+      }
+      use = use->GetNext();
+    }
+
+    // If the next interval starts just after this one, and has a register,
+    // insert a move.
+    LiveInterval* next_sibling = current->GetNextSibling();
+    if (next_sibling != nullptr
+        && next_sibling->HasRegister()
+        && current->GetEnd() == next_sibling->GetStart()) {
+      Location destination = ConvertToLocation(next_sibling);
+      InsertParallelMoveAt(current->GetEnd(), source, destination);
+    }
+    current = next_sibling;
+  } while (current != nullptr);
+  DCHECK(use == nullptr);
+}
+
+void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
+                                             HBasicBlock* from,
+                                             HBasicBlock* to) const {
+  if (interval->GetNextSibling() == nullptr) {
+    // Nothing to connect. The whole range was allocated to the same location.
+    return;
+  }
+
+  size_t from_position = from->GetLifetimeEnd() - 1;
+  size_t to_position = to->GetLifetimeStart();
+
+  LiveInterval* destination = nullptr;
+  LiveInterval* source = nullptr;
+
+  LiveInterval* current = interval;
+
+  // Check the intervals that cover `from` and `to`.
+  while ((current != nullptr) && (source == nullptr || destination == nullptr)) {
+    if (current->Covers(from_position)) {
+      DCHECK(source == nullptr);
+      source = current;
+    }
+    if (current->Covers(to_position)) {
+      DCHECK(destination == nullptr);
+      destination = current;
+    }
+
+    current = current->GetNextSibling();
+  }
+
+  if (destination == source) {
+    // Interval was not split.
+    return;
+  }
+
+  if (!destination->HasRegister()) {
+    // Values are eagerly spilled. Spill slot already contains appropriate value.
+    return;
+  }
+
+  // If `from` has only one successor, we can put the moves at the exit of it. Otherwise
+  // we need to put the moves at the entry of `to`.
+  if (from->GetSuccessors().Size() == 1) {
+    InsertParallelMoveAtExitOf(from, ConvertToLocation(source), ConvertToLocation(destination));
+  } else {
+    DCHECK_EQ(to->GetPredecessors().Size(), 1u);
+    InsertParallelMoveAtEntryOf(to, ConvertToLocation(source), ConvertToLocation(destination));
+  }
+}
+
+// Returns the location of `interval`, or siblings of `interval`, at `position`.
+static Location FindLocationAt(LiveInterval* interval, size_t position) {
+  LiveInterval* current = interval;
+  while (!current->Covers(position)) {
+    current = current->GetNextSibling();
+    DCHECK(current != nullptr);
+  }
+  return ConvertToLocation(current);
+}
+
+void RegisterAllocator::Resolve() {
+  codegen_->ComputeFrameSize(spill_slots_.Size());
+
+  // Adjust the Out Location of instructions.
+  // TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.
+  for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
+    HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
+    LiveInterval* current = instruction->GetLiveInterval();
+    LocationSummary* locations = instruction->GetLocations();
+    Location location = locations->Out();
+    if (instruction->AsParameterValue() != nullptr) {
+      // Now that we know the frame size, adjust the parameter's location.
+      if (location.IsStackSlot()) {
+        location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+        current->SetSpillSlot(location.GetStackIndex());
+        locations->SetOut(location);
+      } else if (location.IsDoubleStackSlot()) {
+        location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+        current->SetSpillSlot(location.GetStackIndex());
+        locations->SetOut(location);
+      } else if (current->HasSpillSlot()) {
+        current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
+      }
+    }
+
+    Location source = ConvertToLocation(current);
+
+    if (location.IsUnallocated()) {
+      if (location.GetPolicy() == Location::kSameAsFirstInput) {
+        locations->SetInAt(0, source);
+      }
+      locations->SetOut(source);
+    } else {
+      DCHECK(source.Equals(location));
+    }
+  }
+
+  // Connect siblings.
+  for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
+    HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
+    ConnectSiblings(instruction->GetLiveInterval());
+  }
+
+  // Resolve non-linear control flow across branches. Order does not matter.
+  for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
+    HBasicBlock* block = it.Current();
+    BitVector* live = liveness_.GetLiveInSet(*block);
+    for (uint32_t idx : live->Indexes()) {
+      HInstruction* current = liveness_.GetInstructionFromSsaIndex(idx);
+      LiveInterval* interval = current->GetLiveInterval();
+      for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
+        ConnectSplitSiblings(interval, block->GetPredecessors().Get(i), block);
+      }
+    }
+  }
+
+  // Resolve phi inputs. Order does not matter.
+  for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
+    HBasicBlock* current = it.Current();
+    for (HInstructionIterator it(current->GetPhis()); !it.Done(); it.Advance()) {
+      HInstruction* phi = it.Current();
+      for (size_t i = 0, e = current->GetPredecessors().Size(); i < e; ++i) {
+        HBasicBlock* predecessor = current->GetPredecessors().Get(i);
+        DCHECK_EQ(predecessor->GetSuccessors().Size(), 1u);
+        HInstruction* input = phi->InputAt(i);
+        Location source = FindLocationAt(input->GetLiveInterval(),
+                                         predecessor->GetLastInstruction()->GetLifetimePosition());
+        Location destination = ConvertToLocation(phi->GetLiveInterval());
+        InsertParallelMoveAtExitOf(predecessor, source, destination);
+      }
+    }
+  }
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 3393a04..1b5585f 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -23,7 +23,12 @@
 namespace art {
 
 class CodeGenerator;
+class HBasicBlock;
+class HGraph;
+class HInstruction;
+class HParallelMove;
 class LiveInterval;
+class Location;
 class SsaLivenessAnalysis;
 
 /**
@@ -31,26 +36,23 @@
  */
 class RegisterAllocator {
  public:
-  RegisterAllocator(ArenaAllocator* allocator, const CodeGenerator& codegen);
+  RegisterAllocator(ArenaAllocator* allocator,
+                    CodeGenerator* codegen,
+                    const SsaLivenessAnalysis& analysis);
 
   // Main entry point for the register allocator. Given the liveness analysis,
   // allocates registers to live intervals.
-  void AllocateRegisters(const SsaLivenessAnalysis& liveness) {
-    processing_core_registers_ = true;
-    AllocateRegistersInternal(liveness);
-    processing_core_registers_ = false;
-    AllocateRegistersInternal(liveness);
-  }
+  void AllocateRegisters();
 
   // Validate that the register allocator did not allocate the same register to
   // intervals that intersect each other. Returns false if it did not.
-  bool Validate(const SsaLivenessAnalysis& liveness, bool log_fatal_on_failure) {
+  bool Validate(bool log_fatal_on_failure) {
     processing_core_registers_ = true;
-    if (!ValidateInternal(liveness, log_fatal_on_failure)) {
+    if (!ValidateInternal(log_fatal_on_failure)) {
       return false;
     }
     processing_core_registers_ = false;
-    return ValidateInternal(liveness, log_fatal_on_failure);
+    return ValidateInternal(log_fatal_on_failure);
   }
 
   // Helper method for validation. Used by unit testing.
@@ -61,11 +63,21 @@
                                 bool processing_core_registers,
                                 bool log_fatal_on_failure);
 
+  static bool CanAllocateRegistersFor(const HGraph& graph, InstructionSet instruction_set);
+  static bool Supports(InstructionSet instruction_set) {
+    return instruction_set == kX86;
+  }
+
+  size_t GetNumberOfSpillSlots() const {
+    return spill_slots_.Size();
+  }
+
  private:
   // Main methods of the allocator.
   void LinearScan();
   bool TryAllocateFreeReg(LiveInterval* interval);
   bool AllocateBlockedReg(LiveInterval* interval);
+  void Resolve();
 
   // Add `interval` in the sorted list of unhandled intervals.
   void AddToUnhandled(LiveInterval* interval);
@@ -76,16 +88,33 @@
   // Returns whether `reg` is blocked by the code generator.
   bool IsBlocked(int reg) const;
 
+  // Update the interval for the register in `location` to cover [start, end).
+  void BlockRegister(Location location, size_t start, size_t end, Primitive::Type type);
+
   // Allocate a spill slot for the given interval.
   void AllocateSpillSlotFor(LiveInterval* interval);
 
+  // Connect adjacent siblings within blocks.
+  void ConnectSiblings(LiveInterval* interval);
+
+  // Connect siblings between block entries and exits.
+  void ConnectSplitSiblings(LiveInterval* interval, HBasicBlock* from, HBasicBlock* to) const;
+
+  // Helper methods to insert parallel moves in the graph.
+  void InsertParallelMoveAtExitOf(HBasicBlock* block, Location source, Location destination) const;
+  void InsertParallelMoveAtEntryOf(HBasicBlock* block, Location source, Location destination) const;
+  void InsertMoveAfter(HInstruction* instruction, Location source, Location destination) const;
+  void AddInputMoveFor(HInstruction* instruction, Location source, Location destination) const;
+  void InsertParallelMoveAt(size_t position, Location source, Location destination) const;
+
   // Helper methods.
-  void AllocateRegistersInternal(const SsaLivenessAnalysis& liveness);
-  bool ValidateInternal(const SsaLivenessAnalysis& liveness, bool log_fatal_on_failure) const;
-  void DumpInterval(std::ostream& stream, LiveInterval* interval);
+  void AllocateRegistersInternal();
+  bool ValidateInternal(bool log_fatal_on_failure) const;
+  void DumpInterval(std::ostream& stream, LiveInterval* interval) const;
 
   ArenaAllocator* const allocator_;
-  const CodeGenerator& codegen_;
+  CodeGenerator* const codegen_;
+  const SsaLivenessAnalysis& liveness_;
 
   // List of intervals that must be processed, ordered by start position. Last entry
   // is the interval that has the lowest start position.
@@ -102,6 +131,10 @@
   // That is, they have a lifetime hole that spans the start of the new interval.
   GrowableArray<LiveInterval*> inactive_;
 
+  // Fixed intervals for physical registers. Such an interval covers the positions
+  // where an instruction requires a specific register.
+  GrowableArray<LiveInterval*> physical_register_intervals_;
+
   // The spill slots allocated for live intervals.
   GrowableArray<size_t> spill_slots_;
 
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index ff9b9be..bfabc5a 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -43,9 +43,9 @@
   CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
   SsaLivenessAnalysis liveness(*graph, codegen);
   liveness.Analyze();
-  RegisterAllocator register_allocator(&allocator, *codegen);
-  register_allocator.AllocateRegisters(liveness);
-  return register_allocator.Validate(liveness, false);
+  RegisterAllocator register_allocator(&allocator, codegen, liveness);
+  register_allocator.AllocateRegisters();
+  return register_allocator.Validate(false);
 }
 
 /**
@@ -300,9 +300,9 @@
   CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
   SsaLivenessAnalysis liveness(*graph, codegen);
   liveness.Analyze();
-  RegisterAllocator register_allocator(&allocator, *codegen);
-  register_allocator.AllocateRegisters(liveness);
-  ASSERT_TRUE(register_allocator.Validate(liveness, false));
+  RegisterAllocator register_allocator(&allocator, codegen, liveness);
+  register_allocator.AllocateRegisters();
+  ASSERT_TRUE(register_allocator.Validate(false));
 
   HBasicBlock* loop_header = graph->GetBlocks().Get(2);
   HPhi* phi = loop_header->GetFirstPhi()->AsPhi();
@@ -314,7 +314,7 @@
   ASSERT_NE(phi_interval->GetRegister(), loop_update->GetRegister());
 
   HBasicBlock* return_block = graph->GetBlocks().Get(3);
-  HReturn* ret = return_block->GetFirstInstruction()->AsReturn();
+  HReturn* ret = return_block->GetLastInstruction()->AsReturn();
   ASSERT_EQ(phi_interval->GetRegister(), ret->InputAt(0)->GetLiveInterval()->GetRegister());
 }
 
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 7903ad6..fc3eb66 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -172,6 +172,7 @@
       // Last use is in the following block.
       first_range_->start_ = start_block_position;
     } else {
+      DCHECK(first_range_->GetStart() > position);
       // There is a hole in the interval. Create a new range.
       first_range_ = new (allocator_) LiveRange(start_block_position, position, first_range_);
     }
@@ -192,6 +193,7 @@
       // There is a use in the following block.
       first_range_->start_ = start;
     } else {
+      DCHECK(first_range_->GetStart() > end);
       // There is a hole in the interval. Create a new range.
       first_range_ = new (allocator_) LiveRange(start, end, first_range_);
     }
diff --git a/test/404-optimizing-allocator/expected.txt b/test/404-optimizing-allocator/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/404-optimizing-allocator/expected.txt
diff --git a/test/404-optimizing-allocator/info.txt b/test/404-optimizing-allocator/info.txt
new file mode 100644
index 0000000..930d42f
--- /dev/null
+++ b/test/404-optimizing-allocator/info.txt
@@ -0,0 +1 @@
+Initial tests for testing the optimizing compiler's register allocator.
diff --git a/test/404-optimizing-allocator/src/Main.java b/test/404-optimizing-allocator/src/Main.java
new file mode 100644
index 0000000..60477f9
--- /dev/null
+++ b/test/404-optimizing-allocator/src/Main.java
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$reg$ is a marker for the optimizing compiler to ensure
+// it does use its register allocator.
+
+public class Main {
+  public static void main(String[] args) {
+
+    expectEquals(4, $opt$reg$TestLostCopy());
+    expectEquals(-10, $opt$reg$TestTwoLive());
+    expectEquals(-20, $opt$reg$TestThreeLive());
+    expectEquals(5, $opt$reg$TestFourLive());
+    expectEquals(10, $opt$reg$TestMultipleLive());
+    expectEquals(1, $opt$reg$TestWithBreakAndContinue());
+    expectEquals(-15, $opt$reg$testSpillInIf(5, 6, 7));
+    expectEquals(-567, $opt$reg$TestAgressiveLive(1, 2, 3, 4, 5, 6, 7));
+  }
+
+  public static int $opt$reg$TestLostCopy() {
+    int a = 0;
+    int b = 0;
+    do {
+      b = a;
+      a++;
+    } while (a != 5);
+    return b;
+  }
+
+  public static int $opt$reg$TestTwoLive() {
+    int a = 0;
+    int b = 0;
+    do {
+      a++;
+      b += 3;
+    } while (a != 5);
+    return a - b;
+  }
+
+  public static int $opt$reg$TestThreeLive() {
+    int a = 0;
+    int b = 0;
+    int c = 0;
+    do {
+      a++;
+      b += 3;
+      c += 2;
+    } while (a != 5);
+    return a - b - c;
+  }
+
+  public static int $opt$reg$TestFourLive() {
+    int a = 0;
+    int b = 0;
+    int c = 0;
+    int d = 0;
+    do {
+      a++;
+      b += 3;
+      c += 2;
+      d++;
+    } while (a != 5);
+    return d;
+  }
+
+  public static int $opt$reg$TestMultipleLive() {
+    int a = 0;
+    int b = 0;
+    int c = 0;
+    int d = 0;
+    int e = 0;
+    int f = 0;
+    int g = 0;
+    do {
+      a++;
+      b++;
+      c++;
+      d++;
+      e += 3;
+      f += 2;
+      g += 2;
+    } while (a != 5);
+    return f;
+  }
+
+  public static int $opt$reg$TestWithBreakAndContinue() {
+    int a = 0;
+    int b = 0;
+    do {
+      a++;
+      if (a == 2) {
+        continue;
+      }
+      b++;
+      if (a == 5) {
+        break;
+      }
+    } while (true);
+    return a - b;
+  }
+
+  public static int $opt$reg$testSpillInIf(int a, int b, int c) {
+    int d = 0;
+    int e = 0;
+    if (a == 5) {
+      b++;
+      c++;
+      d += 2;
+      e += 3;
+    }
+
+    return a - b - c - d - e;
+  }
+
+  public static int $opt$reg$TestAgressiveLive(int a, int b, int c, int d, int e, int f, int g) {
+    int h = a - b;
+    int i = c - d;
+    int j = e - f;
+    int k = 42 + g - a;
+    do {
+      b++;
+      while (k != 1) {
+        --k;
+        ++i;
+        if (i == 9) {
+          ++i;
+        }
+        j += 5;
+      }
+      k = 9;
+      h++;
+    } while (h != 5);
+    return a - b - c - d - e - f - g - h - i - j - k;
+  }
+
+  public static void expectEquals(int expected, int value) {
+    if (expected != value) {
+      throw new Error("Expected: " + expected + ", got: " + value);
+    }
+  }
+}