Move assembler out of runtime into compiler/utils.

Other directory layout bits of clean up. There is still work to separate quick
and portable in some files (e.g. argument visitor, proxy..).

Change-Id: If8fecffda8ba5c4c47a035f0c622c538c6b58351
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
new file mode 100644
index 0000000..0778cd3
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -0,0 +1,1895 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_arm.h"
+
+#include "base/logging.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "offsets.h"
+#include "thread.h"
+#include "utils.h"
+
+namespace art {
+namespace arm {
+
+// Instruction encoding bits.
+enum {
+  H   = 1 << 5,   // halfword (or byte)
+  L   = 1 << 20,  // load (or store)
+  S   = 1 << 20,  // set condition code (or leave unchanged)
+  W   = 1 << 21,  // writeback base register (or leave unchanged)
+  A   = 1 << 21,  // accumulate in multiply instruction (or not)
+  B   = 1 << 22,  // unsigned byte (or word)
+  N   = 1 << 22,  // long (or short)
+  U   = 1 << 23,  // positive (or negative) offset/index
+  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
+  I   = 1 << 25,  // immediate shifter operand (or not)
+
+  B0 = 1,
+  B1 = 1 << 1,
+  B2 = 1 << 2,
+  B3 = 1 << 3,
+  B4 = 1 << 4,
+  B5 = 1 << 5,
+  B6 = 1 << 6,
+  B7 = 1 << 7,
+  B8 = 1 << 8,
+  B9 = 1 << 9,
+  B10 = 1 << 10,
+  B11 = 1 << 11,
+  B12 = 1 << 12,
+  B16 = 1 << 16,
+  B17 = 1 << 17,
+  B18 = 1 << 18,
+  B19 = 1 << 19,
+  B20 = 1 << 20,
+  B21 = 1 << 21,
+  B22 = 1 << 22,
+  B23 = 1 << 23,
+  B24 = 1 << 24,
+  B25 = 1 << 25,
+  B26 = 1 << 26,
+  B27 = 1 << 27,
+
+  // Instruction bit masks.
+  RdMask = 15 << 12,  // in str instruction
+  CondMask = 15 << 28,
+  CoprocessorMask = 15 << 8,
+  OpCodeMask = 15 << 21,  // in data-processing instructions
+  Imm24Mask = (1 << 24) - 1,
+  Off12Mask = (1 << 12) - 1,
+
+  // ldrex/strex register field encodings.
+  kLdExRnShift = 16,
+  kLdExRtShift = 12,
+  kStrExRnShift = 16,
+  kStrExRdShift = 12,
+  kStrExRtShift = 0,
+};
+
+
+static const char* kRegisterNames[] = {
+  "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+  "fp", "ip", "sp", "lr", "pc"
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+  if (rhs >= R0 && rhs <= PC) {
+    os << kRegisterNames[rhs];
+  } else {
+    os << "Register[" << static_cast<int>(rhs) << "]";
+  }
+  return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
+  if (rhs >= S0 && rhs < kNumberOfSRegisters) {
+    os << "s" << static_cast<int>(rhs);
+  } else {
+    os << "SRegister[" << static_cast<int>(rhs) << "]";
+  }
+  return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
+  if (rhs >= D0 && rhs < kNumberOfDRegisters) {
+    os << "d" << static_cast<int>(rhs);
+  } else {
+    os << "DRegister[" << static_cast<int>(rhs) << "]";
+  }
+  return os;
+}
+
+
+static const char* kConditionNames[] = {
+  "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
+  "LE", "AL",
+};
+std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
+  if (rhs >= EQ && rhs <= AL) {
+    os << kConditionNames[rhs];
+  } else {
+    os << "Condition[" << static_cast<int>(rhs) << "]";
+  }
+  return os;
+}
+
+void ArmAssembler::Emit(int32_t value) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  buffer_.Emit<int32_t>(value);
+}
+
+
+void ArmAssembler::EmitType01(Condition cond,
+                              int type,
+                              Opcode opcode,
+                              int set_cc,
+                              Register rn,
+                              Register rd,
+                              ShifterOperand so) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     type << kTypeShift |
+                     static_cast<int32_t>(opcode) << kOpcodeShift |
+                     set_cc << kSShift |
+                     static_cast<int32_t>(rn) << kRnShift |
+                     static_cast<int32_t>(rd) << kRdShift |
+                     so.encoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitType5(Condition cond, int offset, bool link) {
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     5 << kTypeShift |
+                     (link ? 1 : 0) << kLinkShift;
+  Emit(ArmAssembler::EncodeBranchOffset(offset, encoding));
+}
+
+
+void ArmAssembler::EmitMemOp(Condition cond,
+                             bool load,
+                             bool byte,
+                             Register rd,
+                             Address ad) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B26 |
+                     (load ? L : 0) |
+                     (byte ? B : 0) |
+                     (static_cast<int32_t>(rd) << kRdShift) |
+                     ad.encoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMemOpAddressMode3(Condition cond,
+                                         int32_t mode,
+                                         Register rd,
+                                         Address ad) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B22  |
+                     mode |
+                     (static_cast<int32_t>(rd) << kRdShift) |
+                     ad.encoding3();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMultiMemOp(Condition cond,
+                                  BlockAddressMode am,
+                                  bool load,
+                                  Register base,
+                                  RegList regs) {
+  CHECK_NE(base, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 |
+                     am |
+                     (load ? L : 0) |
+                     (static_cast<int32_t>(base) << kRnShift) |
+                     regs;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitShiftImmediate(Condition cond,
+                                      Shift opcode,
+                                      Register rd,
+                                      Register rm,
+                                      ShifterOperand so) {
+  CHECK_NE(cond, kNoCondition);
+  CHECK_EQ(so.type(), 1U);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     static_cast<int32_t>(MOV) << kOpcodeShift |
+                     static_cast<int32_t>(rd) << kRdShift |
+                     so.encoding() << kShiftImmShift |
+                     static_cast<int32_t>(opcode) << kShiftShift |
+                     static_cast<int32_t>(rm);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitShiftRegister(Condition cond,
+                                     Shift opcode,
+                                     Register rd,
+                                     Register rm,
+                                     ShifterOperand so) {
+  CHECK_NE(cond, kNoCondition);
+  CHECK_EQ(so.type(), 0U);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     static_cast<int32_t>(MOV) << kOpcodeShift |
+                     static_cast<int32_t>(rd) << kRdShift |
+                     so.encoding() << kShiftRegisterShift |
+                     static_cast<int32_t>(opcode) << kShiftShift |
+                     B4 |
+                     static_cast<int32_t>(rm);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitBranch(Condition cond, Label* label, bool link) {
+  if (label->IsBound()) {
+    EmitType5(cond, label->Position() - buffer_.Size(), link);
+  } else {
+    int position = buffer_.Size();
+    // Use the offset field of the branch instruction for linking the sites.
+    EmitType5(cond, label->position_, link);
+    label->LinkTo(position);
+  }
+}
+
+void ArmAssembler::and_(Register rd, Register rn, ShifterOperand so,
+                        Condition cond) {
+  EmitType01(cond, so.type(), AND, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::eor(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), EOR, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::sub(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), SUB, 0, rn, rd, so);
+}
+
+void ArmAssembler::rsb(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), RSB, 0, rn, rd, so);
+}
+
+void ArmAssembler::rsbs(Register rd, Register rn, ShifterOperand so,
+                        Condition cond) {
+  EmitType01(cond, so.type(), RSB, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::add(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), ADD, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::adds(Register rd, Register rn, ShifterOperand so,
+                        Condition cond) {
+  EmitType01(cond, so.type(), ADD, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::subs(Register rd, Register rn, ShifterOperand so,
+                        Condition cond) {
+  EmitType01(cond, so.type(), SUB, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::adc(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), ADC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::sbc(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), SBC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::rsc(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), RSC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::tst(Register rn, ShifterOperand so, Condition cond) {
+  CHECK_NE(rn, PC);  // Reserve tst pc instruction for exception handler marker.
+  EmitType01(cond, so.type(), TST, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::teq(Register rn, ShifterOperand so, Condition cond) {
+  CHECK_NE(rn, PC);  // Reserve teq pc instruction for exception handler marker.
+  EmitType01(cond, so.type(), TEQ, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::cmp(Register rn, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), CMP, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::cmn(Register rn, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), CMN, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::orr(Register rd, Register rn,
+                    ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), ORR, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::orrs(Register rd, Register rn,
+                        ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), ORR, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::mov(Register rd, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), MOV, 0, R0, rd, so);
+}
+
+
+void ArmAssembler::movs(Register rd, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), MOV, 1, R0, rd, so);
+}
+
+
+void ArmAssembler::bic(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), BIC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::mvn(Register rd, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), MVN, 0, R0, rd, so);
+}
+
+
+void ArmAssembler::mvns(Register rd, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), MVN, 1, R0, rd, so);
+}
+
+
+void ArmAssembler::clz(Register rd, Register rm, Condition cond) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  CHECK_NE(rd, PC);
+  CHECK_NE(rm, PC);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B24 | B22 | B21 | (0xf << 16) |
+                     (static_cast<int32_t>(rd) << kRdShift) |
+                     (0xf << 8) | B4 | static_cast<int32_t>(rm);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::movw(Register rd, uint16_t imm16, Condition cond) {
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     B25 | B24 | ((imm16 >> 12) << 16) |
+                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::movt(Register rd, uint16_t imm16, Condition cond) {
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     B25 | B24 | B22 | ((imm16 >> 12) << 16) |
+                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMulOp(Condition cond, int32_t opcode,
+                             Register rd, Register rn,
+                             Register rm, Register rs) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rn, kNoRegister);
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(rs, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = opcode |
+      (static_cast<int32_t>(cond) << kConditionShift) |
+      (static_cast<int32_t>(rn) << kRnShift) |
+      (static_cast<int32_t>(rd) << kRdShift) |
+      (static_cast<int32_t>(rs) << kRsShift) |
+      B7 | B4 |
+      (static_cast<int32_t>(rm) << kRmShift);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::mul(Register rd, Register rn, Register rm, Condition cond) {
+  // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
+  EmitMulOp(cond, 0, R0, rd, rn, rm);
+}
+
+
+void ArmAssembler::mla(Register rd, Register rn, Register rm, Register ra,
+                       Condition cond) {
+  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
+  EmitMulOp(cond, B21, ra, rd, rn, rm);
+}
+
+
+void ArmAssembler::mls(Register rd, Register rn, Register rm, Register ra,
+                       Condition cond) {
+  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
+  EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
+}
+
+
+void ArmAssembler::umull(Register rd_lo, Register rd_hi, Register rn,
+                         Register rm, Condition cond) {
+  // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
+  EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
+}
+
+
+void ArmAssembler::ldr(Register rd, Address ad, Condition cond) {
+  EmitMemOp(cond, true, false, rd, ad);
+}
+
+
+void ArmAssembler::str(Register rd, Address ad, Condition cond) {
+  EmitMemOp(cond, false, false, rd, ad);
+}
+
+
+void ArmAssembler::ldrb(Register rd, Address ad, Condition cond) {
+  EmitMemOp(cond, true, true, rd, ad);
+}
+
+
+void ArmAssembler::strb(Register rd, Address ad, Condition cond) {
+  EmitMemOp(cond, false, true, rd, ad);
+}
+
+
+void ArmAssembler::ldrh(Register rd, Address ad, Condition cond) {
+  EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::strh(Register rd, Address ad, Condition cond) {
+  EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrsb(Register rd, Address ad, Condition cond) {
+  EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrsh(Register rd, Address ad, Condition cond) {
+  EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrd(Register rd, Address ad, Condition cond) {
+  CHECK_EQ(rd % 2, 0);
+  EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad);
+}
+
+
+void ArmAssembler::strd(Register rd, Address ad, Condition cond) {
+  CHECK_EQ(rd % 2, 0);
+  EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldm(BlockAddressMode am,
+                       Register base,
+                       RegList regs,
+                       Condition cond) {
+  EmitMultiMemOp(cond, am, true, base, regs);
+}
+
+
+void ArmAssembler::stm(BlockAddressMode am,
+                       Register base,
+                       RegList regs,
+                       Condition cond) {
+  EmitMultiMemOp(cond, am, false, base, regs);
+}
+
+
+void ArmAssembler::ldrex(Register rt, Register rn, Condition cond) {
+  CHECK_NE(rn, kNoRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B24 |
+                     B23 |
+                     L   |
+                     (static_cast<int32_t>(rn) << kLdExRnShift) |
+                     (static_cast<int32_t>(rt) << kLdExRtShift) |
+                     B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::strex(Register rd,
+                         Register rt,
+                         Register rn,
+                         Condition cond) {
+  CHECK_NE(rn, kNoRegister);
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B24 |
+                     B23 |
+                     (static_cast<int32_t>(rn) << kStrExRnShift) |
+                     (static_cast<int32_t>(rd) << kStrExRdShift) |
+                     B11 | B10 | B9 | B8 | B7 | B4 |
+                     (static_cast<int32_t>(rt) << kStrExRtShift);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::clrex() {
+  int32_t encoding = (kSpecialCondition << kConditionShift) |
+                     B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::nop(Condition cond) {
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B25 | B24 | B21 | (0xf << 12);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovsr(SRegister sn, Register rt, Condition cond) {
+  CHECK_NE(sn, kNoSRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 |
+                     ((static_cast<int32_t>(sn) >> 1)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrs(Register rt, SRegister sn, Condition cond) {
+  CHECK_NE(sn, kNoSRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B20 |
+                     ((static_cast<int32_t>(sn) >> 1)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovsrr(SRegister sm, Register rt, Register rt2,
+                           Condition cond) {
+  CHECK_NE(sm, kNoSRegister);
+  CHECK_NE(sm, S31);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt2, SP);
+  CHECK_NE(rt2, PC);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B22 |
+                     (static_cast<int32_t>(rt2)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
+                     (static_cast<int32_t>(sm) >> 1);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrrs(Register rt, Register rt2, SRegister sm,
+                           Condition cond) {
+  CHECK_NE(sm, kNoSRegister);
+  CHECK_NE(sm, S31);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt2, SP);
+  CHECK_NE(rt2, PC);
+  CHECK_NE(rt, rt2);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B22 | B20 |
+                     (static_cast<int32_t>(rt2)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
+                     (static_cast<int32_t>(sm) >> 1);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovdrr(DRegister dm, Register rt, Register rt2,
+                           Condition cond) {
+  CHECK_NE(dm, kNoDRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt2, SP);
+  CHECK_NE(rt2, PC);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B22 |
+                     (static_cast<int32_t>(rt2)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
+                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
+                     (static_cast<int32_t>(dm) & 0xf);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrrd(Register rt, Register rt2, DRegister dm,
+                           Condition cond) {
+  CHECK_NE(dm, kNoDRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt2, SP);
+  CHECK_NE(rt2, PC);
+  CHECK_NE(rt, rt2);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B22 | B20 |
+                     (static_cast<int32_t>(rt2)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
+                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
+                     (static_cast<int32_t>(dm) & 0xf);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vldrs(SRegister sd, Address ad, Condition cond) {
+  CHECK_NE(sd, kNoSRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B24 | B20 |
+                     ((static_cast<int32_t>(sd) & 1)*B22) |
+                     ((static_cast<int32_t>(sd) >> 1)*B12) |
+                     B11 | B9 | ad.vencoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vstrs(SRegister sd, Address ad, Condition cond) {
+  CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
+  CHECK_NE(sd, kNoSRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B24 |
+                     ((static_cast<int32_t>(sd) & 1)*B22) |
+                     ((static_cast<int32_t>(sd) >> 1)*B12) |
+                     B11 | B9 | ad.vencoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vldrd(DRegister dd, Address ad, Condition cond) {
+  CHECK_NE(dd, kNoDRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B24 | B20 |
+                     ((static_cast<int32_t>(dd) >> 4)*B22) |
+                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
+                     B11 | B9 | B8 | ad.vencoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vstrd(DRegister dd, Address ad, Condition cond) {
+  CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
+  CHECK_NE(dd, kNoDRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B24 |
+                     ((static_cast<int32_t>(dd) >> 4)*B22) |
+                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
+                     B11 | B9 | B8 | ad.vencoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPsss(Condition cond, int32_t opcode,
+                              SRegister sd, SRegister sn, SRegister sm) {
+  CHECK_NE(sd, kNoSRegister);
+  CHECK_NE(sn, kNoSRegister);
+  CHECK_NE(sm, kNoSRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B11 | B9 | opcode |
+                     ((static_cast<int32_t>(sd) & 1)*B22) |
+                     ((static_cast<int32_t>(sn) >> 1)*B16) |
+                     ((static_cast<int32_t>(sd) >> 1)*B12) |
+                     ((static_cast<int32_t>(sn) & 1)*B7) |
+                     ((static_cast<int32_t>(sm) & 1)*B5) |
+                     (static_cast<int32_t>(sm) >> 1);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPddd(Condition cond, int32_t opcode,
+                              DRegister dd, DRegister dn, DRegister dm) {
+  CHECK_NE(dd, kNoDRegister);
+  CHECK_NE(dn, kNoDRegister);
+  CHECK_NE(dm, kNoDRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B11 | B9 | B8 | opcode |
+                     ((static_cast<int32_t>(dd) >> 4)*B22) |
+                     ((static_cast<int32_t>(dn) & 0xf)*B16) |
+                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
+                     ((static_cast<int32_t>(dn) >> 4)*B7) |
+                     ((static_cast<int32_t>(dm) >> 4)*B5) |
+                     (static_cast<int32_t>(dm) & 0xf);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
+}
+
+
+bool ArmAssembler::vmovs(SRegister sd, float s_imm, Condition cond) {
+  uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
+  if (((imm32 & ((1 << 19) - 1)) == 0) &&
+      ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
+       (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
+    uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
+        ((imm32 >> 19) & ((1 << 6) -1));
+    EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
+               sd, S0, S0);
+    return true;
+  }
+  return false;
+}
+
+
+bool ArmAssembler::vmovd(DRegister dd, double d_imm, Condition cond) {
+  uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
+  if (((imm64 & ((1LL << 48) - 1)) == 0) &&
+      ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
+       (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
+    uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
+        ((imm64 >> 48) & ((1 << 6) -1));
+    EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
+               dd, D0, D0);
+    return true;
+  }
+  return false;
+}
+
+
+void ArmAssembler::vadds(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, B21 | B20, sd, sn, sm);
+}
+
+
+void ArmAssembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, B21 | B20, dd, dn, dm);
+}
+
+
+void ArmAssembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
+}
+
+
+void ArmAssembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, B21, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, B21, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, 0, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, 0, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, B6, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, B6, dd, dn, dm);
+}
+
+
+void ArmAssembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, B23, sd, sn, sm);
+}
+
+
+void ArmAssembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, B23, dd, dn, dm);
+}
+
+
+void ArmAssembler::vabss(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
+}
+
+void ArmAssembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::EmitVFPsd(Condition cond, int32_t opcode,
+                             SRegister sd, DRegister dm) {
+  CHECK_NE(sd, kNoSRegister);
+  CHECK_NE(dm, kNoDRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B11 | B9 | opcode |
+                     ((static_cast<int32_t>(sd) & 1)*B22) |
+                     ((static_cast<int32_t>(sd) >> 1)*B12) |
+                     ((static_cast<int32_t>(dm) >> 4)*B5) |
+                     (static_cast<int32_t>(dm) & 0xf);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPds(Condition cond, int32_t opcode,
+                             DRegister dd, SRegister sm) {
+  CHECK_NE(dd, kNoDRegister);
+  CHECK_NE(sm, kNoSRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B11 | B9 | opcode |
+                     ((static_cast<int32_t>(dd) >> 4)*B22) |
+                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
+                     ((static_cast<int32_t>(sm) & 1)*B5) |
+                     (static_cast<int32_t>(sm) >> 1);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
+  EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
+  EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
+  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
+  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
+  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
+  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vcmpsz(SRegister sd, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
+}
+
+
+void ArmAssembler::vcmpdz(DRegister dd, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
+}
+
+
+void ArmAssembler::vmstat(Condition cond) {  // VMRS APSR_nzcv, FPSCR
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
+                     (static_cast<int32_t>(PC)*B12) |
+                     B11 | B9 | B4;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::svc(uint32_t imm24) {
+  CHECK(IsUint(24, imm24)) << imm24;
+  int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::bkpt(uint16_t imm16) {
+  int32_t encoding = (AL << kConditionShift) | B24 | B21 |
+                     ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::b(Label* label, Condition cond) {
+  EmitBranch(cond, label, false);
+}
+
+
+void ArmAssembler::bl(Label* label, Condition cond) {
+  EmitBranch(cond, label, true);
+}
+
+
+void ArmAssembler::blx(Register rm, Condition cond) {
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B24 | B21 | (0xfff << 8) | B5 | B4 |
+                     (static_cast<int32_t>(rm) << kRmShift);
+  Emit(encoding);
+}
+
+void ArmAssembler::bx(Register rm, Condition cond) {
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B24 | B21 | (0xfff << 8) | B4 |
+                     (static_cast<int32_t>(rm) << kRmShift);
+  Emit(encoding);
+}
+
+void ArmAssembler::MarkExceptionHandler(Label* label) {
+  EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0));
+  Label l;
+  b(&l);
+  EmitBranch(AL, label, false);
+  Bind(&l);
+}
+
+
+void ArmAssembler::Bind(Label* label) {
+  CHECK(!label->IsBound());
+  int bound_pc = buffer_.Size();
+  while (label->IsLinked()) {
+    int32_t position = label->Position();
+    int32_t next = buffer_.Load<int32_t>(position);
+    int32_t encoded = ArmAssembler::EncodeBranchOffset(bound_pc - position, next);
+    buffer_.Store<int32_t>(position, encoded);
+    label->position_ = ArmAssembler::DecodeBranchOffset(next);
+  }
+  label->BindTo(bound_pc);
+}
+
+
+void ArmAssembler::EncodeUint32InTstInstructions(uint32_t data) {
+  // TODO: Consider using movw ip, <16 bits>.
+  while (!IsUint(8, data)) {
+    tst(R0, ShifterOperand(data & 0xFF), VS);
+    data >>= 8;
+  }
+  tst(R0, ShifterOperand(data), MI);
+}
+
+
+int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) {
+  // The offset is off by 8 due to the way the ARM CPUs read PC.
+  offset -= 8;
+  CHECK_ALIGNED(offset, 4);
+  CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset;
+
+  // Properly preserve only the bits supported in the instruction.
+  offset >>= 2;
+  offset &= kBranchOffsetMask;
+  return (inst & ~kBranchOffsetMask) | offset;
+}
+
+
+int ArmAssembler::DecodeBranchOffset(int32_t inst) {
+  // Sign-extend, left-shift by 2, then add 8.
+  return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8);
+}
+
+void ArmAssembler::AddConstant(Register rd, int32_t value, Condition cond) {
+  AddConstant(rd, rd, value, cond);
+}
+
+
+void ArmAssembler::AddConstant(Register rd, Register rn, int32_t value,
+                               Condition cond) {
+  if (value == 0) {
+    if (rd != rn) {
+      mov(rd, ShifterOperand(rn), cond);
+    }
+    return;
+  }
+  // We prefer to select the shorter code sequence rather than selecting add for
+  // positive values and sub for negatives ones, which would slightly improve
+  // the readability of generated code for some constants.
+  ShifterOperand shifter_op;
+  if (ShifterOperand::CanHold(value, &shifter_op)) {
+    add(rd, rn, shifter_op, cond);
+  } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
+    sub(rd, rn, shifter_op, cond);
+  } else {
+    CHECK(rn != IP);
+    if (ShifterOperand::CanHold(~value, &shifter_op)) {
+      mvn(IP, shifter_op, cond);
+      add(rd, rn, ShifterOperand(IP), cond);
+    } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
+      mvn(IP, shifter_op, cond);
+      sub(rd, rn, ShifterOperand(IP), cond);
+    } else {
+      movw(IP, Low16Bits(value), cond);
+      uint16_t value_high = High16Bits(value);
+      if (value_high != 0) {
+        movt(IP, value_high, cond);
+      }
+      add(rd, rn, ShifterOperand(IP), cond);
+    }
+  }
+}
+
+
+void ArmAssembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
+                                       Condition cond) {
+  ShifterOperand shifter_op;
+  if (ShifterOperand::CanHold(value, &shifter_op)) {
+    adds(rd, rn, shifter_op, cond);
+  } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
+    subs(rd, rn, shifter_op, cond);
+  } else {
+    CHECK(rn != IP);
+    if (ShifterOperand::CanHold(~value, &shifter_op)) {
+      mvn(IP, shifter_op, cond);
+      adds(rd, rn, ShifterOperand(IP), cond);
+    } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
+      mvn(IP, shifter_op, cond);
+      subs(rd, rn, ShifterOperand(IP), cond);
+    } else {
+      movw(IP, Low16Bits(value), cond);
+      uint16_t value_high = High16Bits(value);
+      if (value_high != 0) {
+        movt(IP, value_high, cond);
+      }
+      adds(rd, rn, ShifterOperand(IP), cond);
+    }
+  }
+}
+
+
+void ArmAssembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
+  ShifterOperand shifter_op;
+  if (ShifterOperand::CanHold(value, &shifter_op)) {
+    mov(rd, shifter_op, cond);
+  } else if (ShifterOperand::CanHold(~value, &shifter_op)) {
+    mvn(rd, shifter_op, cond);
+  } else {
+    movw(rd, Low16Bits(value), cond);
+    uint16_t value_high = High16Bits(value);
+    if (value_high != 0) {
+      movt(rd, value_high, cond);
+    }
+  }
+}
+
+
+bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) {
+  switch (type) {
+    case kLoadSignedByte:
+    case kLoadSignedHalfword:
+    case kLoadUnsignedHalfword:
+    case kLoadWordPair:
+      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
+    case kLoadUnsignedByte:
+    case kLoadWord:
+      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
+    case kLoadSWord:
+    case kLoadDWord:
+      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+      return false;
+  }
+}
+
+
+bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) {
+  switch (type) {
+    case kStoreHalfword:
+    case kStoreWordPair:
+      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
+    case kStoreByte:
+    case kStoreWord:
+      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
+    case kStoreSWord:
+    case kStoreDWord:
+      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+      return false;
+  }
+}
+
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset.
+void ArmAssembler::LoadFromOffset(LoadOperandType type,
+                               Register reg,
+                               Register base,
+                               int32_t offset,
+                               Condition cond) {
+  if (!Address::CanHoldLoadOffset(type, offset)) {
+    CHECK(base != IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldLoadOffset(type, offset));
+  switch (type) {
+    case kLoadSignedByte:
+      ldrsb(reg, Address(base, offset), cond);
+      break;
+    case kLoadUnsignedByte:
+      ldrb(reg, Address(base, offset), cond);
+      break;
+    case kLoadSignedHalfword:
+      ldrsh(reg, Address(base, offset), cond);
+      break;
+    case kLoadUnsignedHalfword:
+      ldrh(reg, Address(base, offset), cond);
+      break;
+    case kLoadWord:
+      ldr(reg, Address(base, offset), cond);
+      break;
+    case kLoadWordPair:
+      ldrd(reg, Address(base, offset), cond);
+      break;
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+  }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
+void ArmAssembler::LoadSFromOffset(SRegister reg,
+                                   Register base,
+                                   int32_t offset,
+                                   Condition cond) {
+  if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) {
+    CHECK_NE(base, IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldLoadOffset(kLoadSWord, offset));
+  vldrs(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
+void ArmAssembler::LoadDFromOffset(DRegister reg,
+                                   Register base,
+                                   int32_t offset,
+                                   Condition cond) {
+  if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) {
+    CHECK_NE(base, IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldLoadOffset(kLoadDWord, offset));
+  vldrd(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset.
+void ArmAssembler::StoreToOffset(StoreOperandType type,
+                                 Register reg,
+                                 Register base,
+                                 int32_t offset,
+                                 Condition cond) {
+  if (!Address::CanHoldStoreOffset(type, offset)) {
+    CHECK(reg != IP);
+    CHECK(base != IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldStoreOffset(type, offset));
+  switch (type) {
+    case kStoreByte:
+      strb(reg, Address(base, offset), cond);
+      break;
+    case kStoreHalfword:
+      strh(reg, Address(base, offset), cond);
+      break;
+    case kStoreWord:
+      str(reg, Address(base, offset), cond);
+      break;
+    case kStoreWordPair:
+      strd(reg, Address(base, offset), cond);
+      break;
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+  }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreToOffset.
+void ArmAssembler::StoreSToOffset(SRegister reg,
+                                  Register base,
+                                  int32_t offset,
+                                  Condition cond) {
+  if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) {
+    CHECK_NE(base, IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldStoreOffset(kStoreSWord, offset));
+  vstrs(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreSToOffset.
+void ArmAssembler::StoreDToOffset(DRegister reg,
+                                  Register base,
+                                  int32_t offset,
+                                  Condition cond) {
+  if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) {
+    CHECK_NE(base, IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldStoreOffset(kStoreDWord, offset));
+  vstrd(reg, Address(base, offset), cond);
+}
+
+void ArmAssembler::Push(Register rd, Condition cond) {
+  str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
+}
+
+void ArmAssembler::Pop(Register rd, Condition cond) {
+  ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
+}
+
+void ArmAssembler::PushList(RegList regs, Condition cond) {
+  stm(DB_W, SP, regs, cond);
+}
+
+void ArmAssembler::PopList(RegList regs, Condition cond) {
+  ldm(IA_W, SP, regs, cond);
+}
+
+void ArmAssembler::Mov(Register rd, Register rm, Condition cond) {
+  if (rd != rm) {
+    mov(rd, ShifterOperand(rm), cond);
+  }
+}
+
+void ArmAssembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
+                       Condition cond) {
+  CHECK_NE(shift_imm, 0u);  // Do not use Lsl if no shift is wanted.
+  mov(rd, ShifterOperand(rm, LSL, shift_imm), cond);
+}
+
+void ArmAssembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
+                       Condition cond) {
+  CHECK_NE(shift_imm, 0u);  // Do not use Lsr if no shift is wanted.
+  if (shift_imm == 32) shift_imm = 0;  // Comply to UAL syntax.
+  mov(rd, ShifterOperand(rm, LSR, shift_imm), cond);
+}
+
+void ArmAssembler::Asr(Register rd, Register rm, uint32_t shift_imm,
+                       Condition cond) {
+  CHECK_NE(shift_imm, 0u);  // Do not use Asr if no shift is wanted.
+  if (shift_imm == 32) shift_imm = 0;  // Comply to UAL syntax.
+  mov(rd, ShifterOperand(rm, ASR, shift_imm), cond);
+}
+
+void ArmAssembler::Ror(Register rd, Register rm, uint32_t shift_imm,
+                       Condition cond) {
+  CHECK_NE(shift_imm, 0u);  // Use Rrx instruction.
+  mov(rd, ShifterOperand(rm, ROR, shift_imm), cond);
+}
+
+void ArmAssembler::Rrx(Register rd, Register rm, Condition cond) {
+  mov(rd, ShifterOperand(rm, ROR, 0), cond);
+}
+
+void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                              const std::vector<ManagedRegister>& callee_save_regs,
+                              const std::vector<ManagedRegister>& entry_spills) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+  CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
+
+  // Push callee saves and link register.
+  RegList push_list = 1 << LR;
+  size_t pushed_values = 1;
+  for (size_t i = 0; i < callee_save_regs.size(); i++) {
+    Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
+    push_list |= 1 << reg;
+    pushed_values++;
+  }
+  PushList(push_list);
+
+  // Increase frame to required size.
+  CHECK_GT(frame_size, pushed_values * kPointerSize);  // Must be at least space to push Method*
+  size_t adjust = frame_size - (pushed_values * kPointerSize);
+  IncreaseFrameSize(adjust);
+
+  // Write out Method*.
+  StoreToOffset(kStoreWord, R0, SP, 0);
+
+  // Write out entry spills.
+  for (size_t i = 0; i < entry_spills.size(); ++i) {
+    Register reg = entry_spills.at(i).AsArm().AsCoreRegister();
+    StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize));
+  }
+}
+
+void ArmAssembler::RemoveFrame(size_t frame_size,
+                              const std::vector<ManagedRegister>& callee_save_regs) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+  // Compute callee saves to pop and PC
+  RegList pop_list = 1 << PC;
+  size_t pop_values = 1;
+  for (size_t i = 0; i < callee_save_regs.size(); i++) {
+    Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
+    pop_list |= 1 << reg;
+    pop_values++;
+  }
+
+  // Decrease frame to start of callee saves
+  CHECK_GT(frame_size, pop_values * kPointerSize);
+  size_t adjust = frame_size - (pop_values * kPointerSize);
+  DecreaseFrameSize(adjust);
+
+  // Pop callee saves and PC
+  PopList(pop_list);
+}
+
+void ArmAssembler::IncreaseFrameSize(size_t adjust) {
+  AddConstant(SP, -adjust);
+}
+
+void ArmAssembler::DecreaseFrameSize(size_t adjust) {
+  AddConstant(SP, adjust);
+}
+
+void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
+  ArmManagedRegister src = msrc.AsArm();
+  if (src.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (src.IsCoreRegister()) {
+    CHECK_EQ(4u, size);
+    StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+  } else if (src.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
+    StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
+                  SP, dest.Int32Value() + 4);
+  } else if (src.IsSRegister()) {
+    StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
+  } else {
+    CHECK(src.IsDRegister()) << src;
+    StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+  }
+}
+
+void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+  ArmManagedRegister src = msrc.AsArm();
+  CHECK(src.IsCoreRegister()) << src;
+  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+  ArmManagedRegister src = msrc.AsArm();
+  CHECK(src.IsCoreRegister()) << src;
+  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
+                              FrameOffset in_off, ManagedRegister mscratch) {
+  ArmManagedRegister src = msrc.AsArm();
+  ArmManagedRegister scratch = mscratch.AsArm();
+  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+}
+
+void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
+                        ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+                           MemberOffset offs) {
+  ArmManagedRegister dst = mdest.AsArm();
+  CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+  LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
+                 base.AsArm().AsCoreRegister(), offs.Int32Value());
+}
+
+void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset  src) {
+  ArmManagedRegister dst = mdest.AsArm();
+  CHECK(dst.IsCoreRegister()) << dst;
+  LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
+}
+
+void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+                           Offset offs) {
+  ArmManagedRegister dst = mdest.AsArm();
+  CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+  LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
+                 base.AsArm().AsCoreRegister(), offs.Int32Value());
+}
+
+void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                      ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadImmediate(scratch.AsCoreRegister(), imm);
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                       ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadImmediate(scratch.AsCoreRegister(), imm);
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
+}
+
+static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
+                     Register src_register, int32_t src_offset, size_t size) {
+  ArmManagedRegister dst = m_dst.AsArm();
+  if (dst.IsNoRegister()) {
+    CHECK_EQ(0u, size) << dst;
+  } else if (dst.IsCoreRegister()) {
+    CHECK_EQ(4u, size) << dst;
+    assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
+  } else if (dst.IsRegisterPair()) {
+    CHECK_EQ(8u, size) << dst;
+    assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
+    assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
+  } else if (dst.IsSRegister()) {
+    assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
+  } else {
+    CHECK(dst.IsDRegister()) << dst;
+    assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
+  }
+}
+
+void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+  return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
+}
+
+void ArmAssembler::Load(ManagedRegister m_dst, ThreadOffset src, size_t size) {
+  return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
+}
+
+void ArmAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset offs) {
+  ArmManagedRegister dst = m_dst.AsArm();
+  CHECK(dst.IsCoreRegister()) << dst;
+  LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
+}
+
+void ArmAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+                                        ThreadOffset thr_offs,
+                                        ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 TR, thr_offs.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+                SP, fr_offs.Int32Value());
+}
+
+void ArmAssembler::CopyRawPtrToThread(ThreadOffset thr_offs,
+                                      FrameOffset fr_offs,
+                                      ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 SP, fr_offs.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+                TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                            FrameOffset fr_offs,
+                                            ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+                TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+  StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
+}
+
+void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
+}
+
+void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
+  ArmManagedRegister dst = m_dst.AsArm();
+  ArmManagedRegister src = m_src.AsArm();
+  if (!dst.Equals(src)) {
+    if (dst.IsCoreRegister()) {
+      CHECK(src.IsCoreRegister()) << src;
+      mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
+    } else if (dst.IsDRegister()) {
+      CHECK(src.IsDRegister()) << src;
+      vmovd(dst.AsDRegister(), src.AsDRegister());
+    } else if (dst.IsSRegister()) {
+      CHECK(src.IsSRegister()) << src;
+      vmovs(dst.AsSRegister(), src.AsSRegister());
+    } else {
+      CHECK(dst.IsRegisterPair()) << dst;
+      CHECK(src.IsRegisterPair()) << src;
+      // Ensure that the first move doesn't clobber the input of the second
+      if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
+        mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+        mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+      } else {
+        mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+        mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+      }
+    }
+  }
+}
+
+void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  CHECK(size == 4 || size == 8) << size;
+  if (size == 4) {
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+  } else if (size == 8) {
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
+    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+  }
+}
+
+void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+                        ManagedRegister mscratch, size_t size) {
+  Register scratch = mscratch.AsArm().AsCoreRegister();
+  CHECK_EQ(size, 4u);
+  LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
+  StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
+}
+
+void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                        ManagedRegister mscratch, size_t size) {
+  Register scratch = mscratch.AsArm().AsCoreRegister();
+  CHECK_EQ(size, 4u);
+  LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
+  StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
+                        ManagedRegister /*mscratch*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
+void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
+                        ManagedRegister src, Offset src_offset,
+                        ManagedRegister mscratch, size_t size) {
+  CHECK_EQ(size, 4u);
+  Register scratch = mscratch.AsArm().AsCoreRegister();
+  LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
+  StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
+                        ManagedRegister /*scratch*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
+
+void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) {
+  CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
+#if ANDROID_SMP != 0
+#if defined(__ARM_HAVE_DMB)
+  int32_t encoding = 0xf57ff05f;  // dmb
+  Emit(encoding);
+#elif  defined(__ARM_HAVE_LDREX_STREX)
+  LoadImmediate(R12, 0);
+  int32_t encoding = 0xee07cfba;  // mcr p15, 0, r12, c7, c10, 5
+  Emit(encoding);
+#else
+  LoadImmediate(R12, 0xffff0fa0);  // kuser_memory_barrier
+  blx(R12);
+#endif
+#endif
+}
+
+void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg,
+                                   FrameOffset sirt_offset,
+                                   ManagedRegister min_reg, bool null_allowed) {
+  ArmManagedRegister out_reg = mout_reg.AsArm();
+  ArmManagedRegister in_reg = min_reg.AsArm();
+  CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
+  CHECK(out_reg.IsCoreRegister()) << out_reg;
+  if (null_allowed) {
+    // Null values get a SIRT entry value of 0.  Otherwise, the SIRT entry is
+    // the address in the SIRT holding the reference.
+    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+    if (in_reg.IsNoRegister()) {
+      LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+                     SP, sirt_offset.Int32Value());
+      in_reg = out_reg;
+    }
+    cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+    if (!out_reg.Equals(in_reg)) {
+      LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+    }
+    AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+  } else {
+    AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+  }
+}
+
+void ArmAssembler::CreateSirtEntry(FrameOffset out_off,
+                                   FrameOffset sirt_offset,
+                                   ManagedRegister mscratch,
+                                   bool null_allowed) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  if (null_allowed) {
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
+                   sirt_offset.Int32Value());
+    // Null values get a SIRT entry value of 0.  Otherwise, the sirt entry is
+    // the address in the SIRT holding the reference.
+    // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+    cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+    AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+  } else {
+    AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+  }
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
+}
+
+void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+                                         ManagedRegister min_reg) {
+  ArmManagedRegister out_reg = mout_reg.AsArm();
+  ArmManagedRegister in_reg = min_reg.AsArm();
+  CHECK(out_reg.IsCoreRegister()) << out_reg;
+  CHECK(in_reg.IsCoreRegister()) << in_reg;
+  Label null_arg;
+  if (!out_reg.Equals(in_reg)) {
+    LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+  }
+  cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+  LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+                 in_reg.AsCoreRegister(), 0, NE);
+}
+
+void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
+                        ManagedRegister mscratch) {
+  ArmManagedRegister base = mbase.AsArm();
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(base.IsCoreRegister()) << base;
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 base.AsCoreRegister(), offset.Int32Value());
+  blx(scratch.AsCoreRegister());
+  // TODO: place reference map on call
+}
+
+void ArmAssembler::Call(FrameOffset base, Offset offset,
+                        ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  // Call *(*(SP + base) + offset)
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 SP, base.Int32Value());
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 scratch.AsCoreRegister(), offset.Int32Value());
+  blx(scratch.AsCoreRegister());
+  // TODO: place reference map on call
+}
+
+void ArmAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*scratch*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
+void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
+  mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
+}
+
+void ArmAssembler::GetCurrentThread(FrameOffset offset,
+                                    ManagedRegister /*scratch*/) {
+  StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
+}
+
+void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust);
+  buffer_.EnqueueSlowPath(slow);
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 TR, Thread::ExceptionOffset().Int32Value());
+  cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+  b(slow->Entry(), NE);
+}
+
+void ArmExceptionSlowPath::Emit(Assembler* sasm) {
+  ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
+#define __ sp_asm->
+  __ Bind(&entry_);
+  if (stack_adjust_ != 0) {  // Fix up the frame.
+    __ DecreaseFrameSize(stack_adjust_);
+  }
+  // Pass exception object as argument
+  // Don't care about preserving R0 as this call won't return
+  __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
+  // Set up call to Thread::Current()->pDeliverException
+  __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pDeliverException));
+  __ blx(R12);
+  // Call never returns
+  __ bkpt(0);
+#undef __
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
new file mode 100644
index 0000000..757a8a2
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm.h
@@ -0,0 +1,659 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
+#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
+
+#include <vector>
+
+#include "base/logging.h"
+#include "constants_arm.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
+#include "offsets.h"
+#include "utils.h"
+
+namespace art {
+namespace arm {
+
+// Encodes Addressing Mode 1 - Data-processing operands defined in Section 5.1.
+class ShifterOperand {
+ public:
+  // Data-processing operands - Uninitialized
+  ShifterOperand() {
+    type_ = -1;
+  }
+
+  // Data-processing operands - Immediate
+  explicit ShifterOperand(uint32_t immediate) {
+    CHECK(immediate < (1 << kImmed8Bits));
+    type_ = 1;
+    encoding_ = immediate;
+  }
+
+  // Data-processing operands - Rotated immediate
+  ShifterOperand(uint32_t rotate, uint32_t immed8) {
+    CHECK((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits)));
+    type_ = 1;
+    encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift);
+  }
+
+  // Data-processing operands - Register
+  explicit ShifterOperand(Register rm) {
+    type_ = 0;
+    encoding_ = static_cast<uint32_t>(rm);
+  }
+
+  // Data-processing operands - Logical shift/rotate by immediate
+  ShifterOperand(Register rm, Shift shift, uint32_t shift_imm) {
+    CHECK(shift_imm < (1 << kShiftImmBits));
+    type_ = 0;
+    encoding_ = shift_imm << kShiftImmShift |
+                static_cast<uint32_t>(shift) << kShiftShift |
+                static_cast<uint32_t>(rm);
+  }
+
+  // Data-processing operands - Logical shift/rotate by register
+  ShifterOperand(Register rm, Shift shift, Register rs) {
+    type_ = 0;
+    encoding_ = static_cast<uint32_t>(rs) << kShiftRegisterShift |
+                static_cast<uint32_t>(shift) << kShiftShift | (1 << 4) |
+                static_cast<uint32_t>(rm);
+  }
+
+  static bool CanHold(uint32_t immediate, ShifterOperand* shifter_op) {
+    // Avoid the more expensive test for frequent small immediate values.
+    if (immediate < (1 << kImmed8Bits)) {
+      shifter_op->type_ = 1;
+      shifter_op->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift);
+      return true;
+    }
+    // Note that immediate must be unsigned for the test to work correctly.
+    for (int rot = 0; rot < 16; rot++) {
+      uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
+      if (imm8 < (1 << kImmed8Bits)) {
+        shifter_op->type_ = 1;
+        shifter_op->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift);
+        return true;
+      }
+    }
+    return false;
+  }
+
+ private:
+  bool is_valid() const { return (type_ == 0) || (type_ == 1); }
+
+  uint32_t type() const {
+    CHECK(is_valid());
+    return type_;
+  }
+
+  uint32_t encoding() const {
+    CHECK(is_valid());
+    return encoding_;
+  }
+
+  uint32_t type_;  // Encodes the type field (bits 27-25) in the instruction.
+  uint32_t encoding_;
+
+  friend class ArmAssembler;
+#ifdef SOURCE_ASSEMBLER_SUPPORT
+  friend class BinaryAssembler;
+#endif
+};
+
+
+enum LoadOperandType {
+  kLoadSignedByte,
+  kLoadUnsignedByte,
+  kLoadSignedHalfword,
+  kLoadUnsignedHalfword,
+  kLoadWord,
+  kLoadWordPair,
+  kLoadSWord,
+  kLoadDWord
+};
+
+
+enum StoreOperandType {
+  kStoreByte,
+  kStoreHalfword,
+  kStoreWord,
+  kStoreWordPair,
+  kStoreSWord,
+  kStoreDWord
+};
+
+
+// Load/store multiple addressing mode.
+enum BlockAddressMode {
+  // bit encoding P U W
+  DA           = (0|0|0) << 21,  // decrement after
+  IA           = (0|4|0) << 21,  // increment after
+  DB           = (8|0|0) << 21,  // decrement before
+  IB           = (8|4|0) << 21,  // increment before
+  DA_W         = (0|0|1) << 21,  // decrement after with writeback to base
+  IA_W         = (0|4|1) << 21,  // increment after with writeback to base
+  DB_W         = (8|0|1) << 21,  // decrement before with writeback to base
+  IB_W         = (8|4|1) << 21   // increment before with writeback to base
+};
+
+
+class Address {
+ public:
+  // Memory operand addressing mode
+  enum Mode {
+    // bit encoding P U W
+    Offset       = (8|4|0) << 21,  // offset (w/o writeback to base)
+    PreIndex     = (8|4|1) << 21,  // pre-indexed addressing with writeback
+    PostIndex    = (0|4|0) << 21,  // post-indexed addressing with writeback
+    NegOffset    = (8|0|0) << 21,  // negative offset (w/o writeback to base)
+    NegPreIndex  = (8|0|1) << 21,  // negative pre-indexed with writeback
+    NegPostIndex = (0|0|0) << 21   // negative post-indexed with writeback
+  };
+
+  explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) {
+    CHECK(IsAbsoluteUint(12, offset));
+    if (offset < 0) {
+      encoding_ = (am ^ (1 << kUShift)) | -offset;  // Flip U to adjust sign.
+    } else {
+      encoding_ = am | offset;
+    }
+    encoding_ |= static_cast<uint32_t>(rn) << kRnShift;
+  }
+
+  static bool CanHoldLoadOffset(LoadOperandType type, int offset);
+  static bool CanHoldStoreOffset(StoreOperandType type, int offset);
+
+ private:
+  uint32_t encoding() const { return encoding_; }
+
+  // Encoding for addressing mode 3.
+  uint32_t encoding3() const {
+    const uint32_t offset_mask = (1 << 12) - 1;
+    uint32_t offset = encoding_ & offset_mask;
+    CHECK_LT(offset, 256u);
+    return (encoding_ & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf);
+  }
+
+  // Encoding for vfp load/store addressing.
+  uint32_t vencoding() const {
+    const uint32_t offset_mask = (1 << 12) - 1;
+    uint32_t offset = encoding_ & offset_mask;
+    CHECK(IsAbsoluteUint(10, offset));  // In the range -1020 to +1020.
+    CHECK_ALIGNED(offset, 2);  // Multiple of 4.
+    int mode = encoding_ & ((8|4|1) << 21);
+    CHECK((mode == Offset) || (mode == NegOffset));
+    uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2);
+    if (mode == Offset) {
+      vencoding |= 1 << 23;
+    }
+    return vencoding;
+  }
+
+  uint32_t encoding_;
+
+  friend class ArmAssembler;
+};
+
+
+class ArmAssembler : public Assembler {
+ public:
+  ArmAssembler() {}
+  virtual ~ArmAssembler() {}
+
+  // Data-processing instructions.
+  void and_(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void eor(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void sub(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+  void subs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void rsb(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+  void rsbs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void add(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void adds(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void adc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void sbc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void rsc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void tst(Register rn, ShifterOperand so, Condition cond = AL);
+
+  void teq(Register rn, ShifterOperand so, Condition cond = AL);
+
+  void cmp(Register rn, ShifterOperand so, Condition cond = AL);
+
+  void cmn(Register rn, ShifterOperand so, Condition cond = AL);
+
+  void orr(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+  void orrs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void mov(Register rd, ShifterOperand so, Condition cond = AL);
+  void movs(Register rd, ShifterOperand so, Condition cond = AL);
+
+  void bic(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void mvn(Register rd, ShifterOperand so, Condition cond = AL);
+  void mvns(Register rd, ShifterOperand so, Condition cond = AL);
+
+  // Miscellaneous data-processing instructions.
+  void clz(Register rd, Register rm, Condition cond = AL);
+  void movw(Register rd, uint16_t imm16, Condition cond = AL);
+  void movt(Register rd, uint16_t imm16, Condition cond = AL);
+
+  // Multiply instructions.
+  void mul(Register rd, Register rn, Register rm, Condition cond = AL);
+  void mla(Register rd, Register rn, Register rm, Register ra,
+           Condition cond = AL);
+  void mls(Register rd, Register rn, Register rm, Register ra,
+           Condition cond = AL);
+  void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
+             Condition cond = AL);
+
+  // Load/store instructions.
+  void ldr(Register rd, Address ad, Condition cond = AL);
+  void str(Register rd, Address ad, Condition cond = AL);
+
+  void ldrb(Register rd, Address ad, Condition cond = AL);
+  void strb(Register rd, Address ad, Condition cond = AL);
+
+  void ldrh(Register rd, Address ad, Condition cond = AL);
+  void strh(Register rd, Address ad, Condition cond = AL);
+
+  void ldrsb(Register rd, Address ad, Condition cond = AL);
+  void ldrsh(Register rd, Address ad, Condition cond = AL);
+
+  void ldrd(Register rd, Address ad, Condition cond = AL);
+  void strd(Register rd, Address ad, Condition cond = AL);
+
+  void ldm(BlockAddressMode am, Register base,
+           RegList regs, Condition cond = AL);
+  void stm(BlockAddressMode am, Register base,
+           RegList regs, Condition cond = AL);
+
+  void ldrex(Register rd, Register rn, Condition cond = AL);
+  void strex(Register rd, Register rt, Register rn, Condition cond = AL);
+
+  // Miscellaneous instructions.
+  void clrex();
+  void nop(Condition cond = AL);
+
+  // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
+  void bkpt(uint16_t imm16);
+  void svc(uint32_t imm24);
+
+  // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
+  void vmovsr(SRegister sn, Register rt, Condition cond = AL);
+  void vmovrs(Register rt, SRegister sn, Condition cond = AL);
+  void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL);
+  void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL);
+  void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL);
+  void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL);
+  void vmovs(SRegister sd, SRegister sm, Condition cond = AL);
+  void vmovd(DRegister dd, DRegister dm, Condition cond = AL);
+
+  // Returns false if the immediate cannot be encoded.
+  bool vmovs(SRegister sd, float s_imm, Condition cond = AL);
+  bool vmovd(DRegister dd, double d_imm, Condition cond = AL);
+
+  void vldrs(SRegister sd, Address ad, Condition cond = AL);
+  void vstrs(SRegister sd, Address ad, Condition cond = AL);
+  void vldrd(DRegister dd, Address ad, Condition cond = AL);
+  void vstrd(DRegister dd, Address ad, Condition cond = AL);
+
+  void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+  void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+  void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+  void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+  void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+  void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+
+  void vabss(SRegister sd, SRegister sm, Condition cond = AL);
+  void vabsd(DRegister dd, DRegister dm, Condition cond = AL);
+  void vnegs(SRegister sd, SRegister sm, Condition cond = AL);
+  void vnegd(DRegister dd, DRegister dm, Condition cond = AL);
+  void vsqrts(SRegister sd, SRegister sm, Condition cond = AL);
+  void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL);
+
+  void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL);
+  void vcvtds(DRegister dd, SRegister sm, Condition cond = AL);
+  void vcvtis(SRegister sd, SRegister sm, Condition cond = AL);
+  void vcvtid(SRegister sd, DRegister dm, Condition cond = AL);
+  void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL);
+  void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL);
+  void vcvtus(SRegister sd, SRegister sm, Condition cond = AL);
+  void vcvtud(SRegister sd, DRegister dm, Condition cond = AL);
+  void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL);
+  void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL);
+
+  void vcmps(SRegister sd, SRegister sm, Condition cond = AL);
+  void vcmpd(DRegister dd, DRegister dm, Condition cond = AL);
+  void vcmpsz(SRegister sd, Condition cond = AL);
+  void vcmpdz(DRegister dd, Condition cond = AL);
+  void vmstat(Condition cond = AL);  // VMRS APSR_nzcv, FPSCR
+
+  // Branch instructions.
+  void b(Label* label, Condition cond = AL);
+  void bl(Label* label, Condition cond = AL);
+  void blx(Register rm, Condition cond = AL);
+  void bx(Register rm, Condition cond = AL);
+
+  // Macros.
+  // Add signed constant value to rd. May clobber IP.
+  void AddConstant(Register rd, int32_t value, Condition cond = AL);
+  void AddConstant(Register rd, Register rn, int32_t value,
+                   Condition cond = AL);
+  void AddConstantSetFlags(Register rd, Register rn, int32_t value,
+                           Condition cond = AL);
+  void AddConstantWithCarry(Register rd, Register rn, int32_t value,
+                            Condition cond = AL);
+
+  // Load and Store. May clobber IP.
+  void LoadImmediate(Register rd, int32_t value, Condition cond = AL);
+  void LoadSImmediate(SRegister sd, float value, Condition cond = AL);
+  void LoadDImmediate(DRegister dd, double value,
+                      Register scratch, Condition cond = AL);
+  void MarkExceptionHandler(Label* label);
+  void LoadFromOffset(LoadOperandType type,
+                      Register reg,
+                      Register base,
+                      int32_t offset,
+                      Condition cond = AL);
+  void StoreToOffset(StoreOperandType type,
+                     Register reg,
+                     Register base,
+                     int32_t offset,
+                     Condition cond = AL);
+  void LoadSFromOffset(SRegister reg,
+                       Register base,
+                       int32_t offset,
+                       Condition cond = AL);
+  void StoreSToOffset(SRegister reg,
+                      Register base,
+                      int32_t offset,
+                      Condition cond = AL);
+  void LoadDFromOffset(DRegister reg,
+                       Register base,
+                       int32_t offset,
+                       Condition cond = AL);
+  void StoreDToOffset(DRegister reg,
+                      Register base,
+                      int32_t offset,
+                      Condition cond = AL);
+
+  void Push(Register rd, Condition cond = AL);
+  void Pop(Register rd, Condition cond = AL);
+
+  void PushList(RegList regs, Condition cond = AL);
+  void PopList(RegList regs, Condition cond = AL);
+
+  void Mov(Register rd, Register rm, Condition cond = AL);
+
+  // Convenience shift instructions. Use mov instruction with shifter operand
+  // for variants setting the status flags or using a register shift count.
+  void Lsl(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+  void Lsr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+  void Asr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+  void Ror(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+  void Rrx(Register rd, Register rm, Condition cond = AL);
+
+  // Encode a signed constant in tst instructions, only affecting the flags.
+  void EncodeUint32InTstInstructions(uint32_t data);
+  // ... and decode from a pc pointing to the start of encoding instructions.
+  static uint32_t DecodeUint32FromTstInstructions(uword pc);
+  static bool IsInstructionForExceptionHandling(uword pc);
+
+  // Emit data (e.g. encoded instruction or immediate) to the
+  // instruction stream.
+  void Emit(int32_t value);
+  void Bind(Label* label);
+
+  //
+  // Overridden common assembler high-level functionality
+  //
+
+  // Emit code that will create an activation on the stack
+  virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                          const std::vector<ManagedRegister>& callee_save_regs,
+                          const std::vector<ManagedRegister>& entry_spills);
+
+  // Emit code that will remove an activation from the stack
+  virtual void RemoveFrame(size_t frame_size,
+                           const std::vector<ManagedRegister>& callee_save_regs);
+
+  virtual void IncreaseFrameSize(size_t adjust);
+  virtual void DecreaseFrameSize(size_t adjust);
+
+  // Store routines
+  virtual void Store(FrameOffset offs, ManagedRegister src, size_t size);
+  virtual void StoreRef(FrameOffset dest, ManagedRegister src);
+  virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src);
+
+  virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                     ManagedRegister scratch);
+
+  virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                      ManagedRegister scratch);
+
+  virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                        FrameOffset fr_offs,
+                                        ManagedRegister scratch);
+
+  virtual void StoreStackPointerToThread(ThreadOffset thr_offs);
+
+  virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+                             FrameOffset in_off, ManagedRegister scratch);
+
+  // Load routines
+  virtual void Load(ManagedRegister dest, FrameOffset src, size_t size);
+
+  virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size);
+
+  virtual void LoadRef(ManagedRegister dest, FrameOffset  src);
+
+  virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+                       MemberOffset offs);
+
+  virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+                          Offset offs);
+
+  virtual void LoadRawPtrFromThread(ManagedRegister dest,
+                                    ThreadOffset offs);
+
+  // Copying routines
+  virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+  virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+                                    ManagedRegister scratch);
+
+  virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+                                  ManagedRegister scratch);
+
+  virtual void CopyRef(FrameOffset dest, FrameOffset src,
+                       ManagedRegister scratch);
+
+  virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(ManagedRegister dest, Offset dest_offset,
+                    ManagedRegister src, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void MemoryBarrier(ManagedRegister scratch);
+
+  // Sign extension
+  virtual void SignExtend(ManagedRegister mreg, size_t size);
+
+  // Zero extension
+  virtual void ZeroExtend(ManagedRegister mreg, size_t size);
+
+  // Exploit fast access in managed code to Thread::Current()
+  virtual void GetCurrentThread(ManagedRegister tr);
+  virtual void GetCurrentThread(FrameOffset dest_offset,
+                                ManagedRegister scratch);
+
+  // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed. in_reg holds a possibly stale reference
+  // that can be used to avoid loading the SIRT entry to see if the value is
+  // NULL.
+  virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+                               ManagedRegister in_reg, bool null_allowed);
+
+  // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed.
+  virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+                               ManagedRegister scratch, bool null_allowed);
+
+  // src holds a SIRT entry (Object**) load this into dst
+  virtual void LoadReferenceFromSirt(ManagedRegister dst,
+                                     ManagedRegister src);
+
+  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+  // know that src may not be null.
+  virtual void VerifyObject(ManagedRegister src, bool could_be_null);
+  virtual void VerifyObject(FrameOffset src, bool could_be_null);
+
+  // Call to address held at [base+offset]
+  virtual void Call(ManagedRegister base, Offset offset,
+                    ManagedRegister scratch);
+  virtual void Call(FrameOffset base, Offset offset,
+                    ManagedRegister scratch);
+  virtual void Call(ThreadOffset offset, ManagedRegister scratch);
+
+  // Generate code to check if Thread::Current()->exception_ is non-null
+  // and branch to a ExceptionSlowPath if it is.
+  virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+
+ private:
+  void EmitType01(Condition cond,
+                  int type,
+                  Opcode opcode,
+                  int set_cc,
+                  Register rn,
+                  Register rd,
+                  ShifterOperand so);
+
+  void EmitType5(Condition cond, int offset, bool link);
+
+  void EmitMemOp(Condition cond,
+                 bool load,
+                 bool byte,
+                 Register rd,
+                 Address ad);
+
+  void EmitMemOpAddressMode3(Condition cond,
+                             int32_t mode,
+                             Register rd,
+                             Address ad);
+
+  void EmitMultiMemOp(Condition cond,
+                      BlockAddressMode am,
+                      bool load,
+                      Register base,
+                      RegList regs);
+
+  void EmitShiftImmediate(Condition cond,
+                          Shift opcode,
+                          Register rd,
+                          Register rm,
+                          ShifterOperand so);
+
+  void EmitShiftRegister(Condition cond,
+                         Shift opcode,
+                         Register rd,
+                         Register rm,
+                         ShifterOperand so);
+
+  void EmitMulOp(Condition cond,
+                 int32_t opcode,
+                 Register rd,
+                 Register rn,
+                 Register rm,
+                 Register rs);
+
+  void EmitVFPsss(Condition cond,
+                  int32_t opcode,
+                  SRegister sd,
+                  SRegister sn,
+                  SRegister sm);
+
+  void EmitVFPddd(Condition cond,
+                  int32_t opcode,
+                  DRegister dd,
+                  DRegister dn,
+                  DRegister dm);
+
+  void EmitVFPsd(Condition cond,
+                 int32_t opcode,
+                 SRegister sd,
+                 DRegister dm);
+
+  void EmitVFPds(Condition cond,
+                 int32_t opcode,
+                 DRegister dd,
+                 SRegister sm);
+
+  void EmitBranch(Condition cond, Label* label, bool link);
+  static int32_t EncodeBranchOffset(int offset, int32_t inst);
+  static int DecodeBranchOffset(int32_t inst);
+  int32_t EncodeTstOffset(int offset, int32_t inst);
+  int DecodeTstOffset(int32_t inst);
+
+  // Returns whether or not the given register is used for passing parameters.
+  static int RegisterCompare(const Register* reg1, const Register* reg2) {
+    return *reg1 - *reg2;
+  }
+};
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class ArmExceptionSlowPath : public SlowPath {
+ public:
+  explicit ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust)
+      : scratch_(scratch), stack_adjust_(stack_adjust) {
+  }
+  virtual void Emit(Assembler *sp_asm);
+ private:
+  const ArmManagedRegister scratch_;
+  const size_t stack_adjust_;
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
new file mode 100644
index 0000000..cc795b1
--- /dev/null
+++ b/compiler/utils/arm/constants_arm.h
@@ -0,0 +1,449 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_
+#define ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+
+#include "arch/arm/registers_arm.h"
+#include "base/casts.h"
+#include "base/logging.h"
+#include "globals.h"
+
+namespace art {
+namespace arm {
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate ARM instructions.
+//
+// Section references in the code refer to the "ARM Architecture Reference
+// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
+//
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+
+
+// We support both VFPv3-D16 and VFPv3-D32 profiles, but currently only one at
+// a time, so that compile time optimizations can be applied.
+// Warning: VFPv3-D32 is untested.
+#define VFPv3_D16
+#if defined(VFPv3_D16) == defined(VFPv3_D32)
+#error "Exactly one of VFPv3_D16 or VFPv3_D32 can be defined at a time."
+#endif
+
+
+enum ScaleFactor {
+  TIMES_1 = 0,
+  TIMES_2 = 1,
+  TIMES_4 = 2,
+  TIMES_8 = 3
+};
+
+// Values for double-precision floating point registers.
+enum DRegister {
+  D0  =  0,
+  D1  =  1,
+  D2  =  2,
+  D3  =  3,
+  D4  =  4,
+  D5  =  5,
+  D6  =  6,
+  D7  =  7,
+  D8  =  8,
+  D9  =  9,
+  D10 = 10,
+  D11 = 11,
+  D12 = 12,
+  D13 = 13,
+  D14 = 14,
+  D15 = 15,
+#ifdef VFPv3_D16
+  kNumberOfDRegisters = 16,
+#else
+  D16 = 16,
+  D17 = 17,
+  D18 = 18,
+  D19 = 19,
+  D20 = 20,
+  D21 = 21,
+  D22 = 22,
+  D23 = 23,
+  D24 = 24,
+  D25 = 25,
+  D26 = 26,
+  D27 = 27,
+  D28 = 28,
+  D29 = 29,
+  D30 = 30,
+  D31 = 31,
+  kNumberOfDRegisters = 32,
+#endif
+  kNumberOfOverlappingDRegisters = 16,
+  kNoDRegister = -1,
+};
+std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
+
+
+// Values for the condition field as defined in section A3.2.
+enum Condition {
+  kNoCondition = -1,
+  EQ =  0,  // equal
+  NE =  1,  // not equal
+  CS =  2,  // carry set/unsigned higher or same
+  CC =  3,  // carry clear/unsigned lower
+  MI =  4,  // minus/negative
+  PL =  5,  // plus/positive or zero
+  VS =  6,  // overflow
+  VC =  7,  // no overflow
+  HI =  8,  // unsigned higher
+  LS =  9,  // unsigned lower or same
+  GE = 10,  // signed greater than or equal
+  LT = 11,  // signed less than
+  GT = 12,  // signed greater than
+  LE = 13,  // signed less than or equal
+  AL = 14,  // always (unconditional)
+  kSpecialCondition = 15,  // special condition (refer to section A3.2.1)
+  kMaxCondition = 16,
+};
+std::ostream& operator<<(std::ostream& os, const Condition& rhs);
+
+
+// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
+// as defined in section A3.4
+enum Opcode {
+  kNoOperand = -1,
+  AND =  0,  // Logical AND
+  EOR =  1,  // Logical Exclusive OR
+  SUB =  2,  // Subtract
+  RSB =  3,  // Reverse Subtract
+  ADD =  4,  // Add
+  ADC =  5,  // Add with Carry
+  SBC =  6,  // Subtract with Carry
+  RSC =  7,  // Reverse Subtract with Carry
+  TST =  8,  // Test
+  TEQ =  9,  // Test Equivalence
+  CMP = 10,  // Compare
+  CMN = 11,  // Compare Negated
+  ORR = 12,  // Logical (inclusive) OR
+  MOV = 13,  // Move
+  BIC = 14,  // Bit Clear
+  MVN = 15,  // Move Not
+  kMaxOperand = 16
+};
+
+
+// Shifter types for Data-processing operands as defined in section A5.1.2.
+enum Shift {
+  kNoShift = -1,
+  LSL = 0,  // Logical shift left
+  LSR = 1,  // Logical shift right
+  ASR = 2,  // Arithmetic shift right
+  ROR = 3,  // Rotate right
+  kMaxShift = 4
+};
+
+
+// Constants used for the decoding or encoding of the individual fields of
+// instructions. Based on the "Figure 3-1 ARM instruction set summary".
+enum InstructionFields {
+  kConditionShift = 28,
+  kConditionBits = 4,
+  kTypeShift = 25,
+  kTypeBits = 3,
+  kLinkShift = 24,
+  kLinkBits = 1,
+  kUShift = 23,
+  kUBits = 1,
+  kOpcodeShift = 21,
+  kOpcodeBits = 4,
+  kSShift = 20,
+  kSBits = 1,
+  kRnShift = 16,
+  kRnBits = 4,
+  kRdShift = 12,
+  kRdBits = 4,
+  kRsShift = 8,
+  kRsBits = 4,
+  kRmShift = 0,
+  kRmBits = 4,
+
+  // Immediate instruction fields encoding.
+  kRotateShift = 8,
+  kRotateBits = 4,
+  kImmed8Shift = 0,
+  kImmed8Bits = 8,
+
+  // Shift instruction register fields encodings.
+  kShiftImmShift = 7,
+  kShiftRegisterShift = 8,
+  kShiftImmBits = 5,
+  kShiftShift = 5,
+  kShiftBits = 2,
+
+  // Load/store instruction offset field encoding.
+  kOffset12Shift = 0,
+  kOffset12Bits = 12,
+  kOffset12Mask = 0x00000fff,
+
+  // Mul instruction register fields encodings.
+  kMulRdShift = 16,
+  kMulRdBits = 4,
+  kMulRnShift = 12,
+  kMulRnBits = 4,
+
+  kBranchOffsetMask = 0x00ffffff
+};
+
+
+// Size (in bytes) of registers.
+const int kRegisterSize = 4;
+
+// List of registers used in load/store multiple.
+typedef uint16_t RegList;
+
+// The class Instr enables access to individual fields defined in the ARM
+// architecture instruction set encoding as described in figure A3-1.
+//
+// Example: Test whether the instruction at ptr does set the condition code
+// bits.
+//
+// bool InstructionSetsConditionCodes(byte* ptr) {
+//   Instr* instr = Instr::At(ptr);
+//   int type = instr->TypeField();
+//   return ((type == 0) || (type == 1)) && instr->HasS();
+// }
+//
+class Instr {
+ public:
+  enum {
+    kInstrSize = 4,
+    kInstrSizeLog2 = 2,
+    kPCReadOffset = 8
+  };
+
+  bool IsBreakPoint() {
+    return IsBkpt();
+  }
+
+  // Get the raw instruction bits.
+  inline int32_t InstructionBits() const {
+    return *reinterpret_cast<const int32_t*>(this);
+  }
+
+  // Set the raw instruction bits to value.
+  inline void SetInstructionBits(int32_t value) {
+    *reinterpret_cast<int32_t*>(this) = value;
+  }
+
+  // Read one particular bit out of the instruction bits.
+  inline int Bit(int nr) const {
+    return (InstructionBits() >> nr) & 1;
+  }
+
+  // Read a bit field out of the instruction bits.
+  inline int Bits(int shift, int count) const {
+    return (InstructionBits() >> shift) & ((1 << count) - 1);
+  }
+
+
+  // Accessors for the different named fields used in the ARM encoding.
+  // The naming of these accessor corresponds to figure A3-1.
+  // Generally applicable fields
+  inline Condition ConditionField() const {
+    return static_cast<Condition>(Bits(kConditionShift, kConditionBits));
+  }
+  inline int TypeField() const { return Bits(kTypeShift, kTypeBits); }
+
+  inline Register RnField() const { return static_cast<Register>(
+                                        Bits(kRnShift, kRnBits)); }
+  inline Register RdField() const { return static_cast<Register>(
+                                        Bits(kRdShift, kRdBits)); }
+
+  // Fields used in Data processing instructions
+  inline Opcode OpcodeField() const {
+    return static_cast<Opcode>(Bits(kOpcodeShift, kOpcodeBits));
+  }
+  inline int SField() const { return Bits(kSShift, kSBits); }
+  // with register
+  inline Register RmField() const {
+    return static_cast<Register>(Bits(kRmShift, kRmBits));
+  }
+  inline Shift ShiftField() const { return static_cast<Shift>(
+                                        Bits(kShiftShift, kShiftBits)); }
+  inline int RegShiftField() const { return Bit(4); }
+  inline Register RsField() const {
+    return static_cast<Register>(Bits(kRsShift, kRsBits));
+  }
+  inline int ShiftAmountField() const { return Bits(kShiftImmShift,
+                                                    kShiftImmBits); }
+  // with immediate
+  inline int RotateField() const { return Bits(kRotateShift, kRotateBits); }
+  inline int Immed8Field() const { return Bits(kImmed8Shift, kImmed8Bits); }
+
+  // Fields used in Load/Store instructions
+  inline int PUField() const { return Bits(23, 2); }
+  inline int  BField() const { return Bit(22); }
+  inline int  WField() const { return Bit(21); }
+  inline int  LField() const { return Bit(20); }
+  // with register uses same fields as Data processing instructions above
+  // with immediate
+  inline int Offset12Field() const { return Bits(kOffset12Shift,
+                                                 kOffset12Bits); }
+  // multiple
+  inline int RlistField() const { return Bits(0, 16); }
+  // extra loads and stores
+  inline int SignField() const { return Bit(6); }
+  inline int HField() const { return Bit(5); }
+  inline int ImmedHField() const { return Bits(8, 4); }
+  inline int ImmedLField() const { return Bits(0, 4); }
+
+  // Fields used in Branch instructions
+  inline int LinkField() const { return Bits(kLinkShift, kLinkBits); }
+  inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); }
+
+  // Fields used in Supervisor Call instructions
+  inline uint32_t SvcField() const { return Bits(0, 24); }
+
+  // Field used in Breakpoint instruction
+  inline uint16_t BkptField() const {
+    return ((Bits(8, 12) << 4) | Bits(0, 4));
+  }
+
+  // Field used in 16-bit immediate move instructions
+  inline uint16_t MovwField() const {
+    return ((Bits(16, 4) << 12) | Bits(0, 12));
+  }
+
+  // Field used in VFP float immediate move instruction
+  inline float ImmFloatField() const {
+    uint32_t imm32 = (Bit(19) << 31) | (((1 << 5) - Bit(18)) << 25) |
+                     (Bits(16, 2) << 23) | (Bits(0, 4) << 19);
+    return bit_cast<float, uint32_t>(imm32);
+  }
+
+  // Field used in VFP double immediate move instruction
+  inline double ImmDoubleField() const {
+    uint64_t imm64 = (Bit(19)*(1LL << 63)) | (((1LL << 8) - Bit(18)) << 54) |
+                     (Bits(16, 2)*(1LL << 52)) | (Bits(0, 4)*(1LL << 48));
+    return bit_cast<double, uint64_t>(imm64);
+  }
+
+  // Test for data processing instructions of type 0 or 1.
+  // See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition",
+  // section A5.1 "ARM instruction set encoding".
+  inline bool IsDataProcessing() const {
+    CHECK_NE(ConditionField(), kSpecialCondition);
+    CHECK_EQ(Bits(26, 2), 0);  // Type 0 or 1.
+    return ((Bits(20, 5) & 0x19) != 0x10) &&
+      ((Bit(25) == 1) ||  // Data processing immediate.
+       (Bit(4) == 0) ||  // Data processing register.
+       (Bit(7) == 0));  // Data processing register-shifted register.
+  }
+
+  // Tests for special encodings of type 0 instructions (extra loads and stores,
+  // as well as multiplications, synchronization primitives, and miscellaneous).
+  // Can only be called for a type 0 or 1 instruction.
+  inline bool IsMiscellaneous() const {
+    CHECK_EQ(Bits(26, 2), 0);  // Type 0 or 1.
+    return ((Bit(25) == 0) && ((Bits(20, 5) & 0x19) == 0x10) && (Bit(7) == 0));
+  }
+  inline bool IsMultiplyOrSyncPrimitive() const {
+    CHECK_EQ(Bits(26, 2), 0);  // Type 0 or 1.
+    return ((Bit(25) == 0) && (Bits(4, 4) == 9));
+  }
+
+  // Test for Supervisor Call instruction.
+  inline bool IsSvc() const {
+    return ((InstructionBits() & 0xff000000) == 0xef000000);
+  }
+
+  // Test for Breakpoint instruction.
+  inline bool IsBkpt() const {
+    return ((InstructionBits() & 0xfff000f0) == 0xe1200070);
+  }
+
+  // VFP register fields.
+  inline SRegister SnField() const {
+    return static_cast<SRegister>((Bits(kRnShift, kRnBits) << 1) + Bit(7));
+  }
+  inline SRegister SdField() const {
+    return static_cast<SRegister>((Bits(kRdShift, kRdBits) << 1) + Bit(22));
+  }
+  inline SRegister SmField() const {
+    return static_cast<SRegister>((Bits(kRmShift, kRmBits) << 1) + Bit(5));
+  }
+  inline DRegister DnField() const {
+    return static_cast<DRegister>(Bits(kRnShift, kRnBits) + (Bit(7) << 4));
+  }
+  inline DRegister DdField() const {
+    return static_cast<DRegister>(Bits(kRdShift, kRdBits) + (Bit(22) << 4));
+  }
+  inline DRegister DmField() const {
+    return static_cast<DRegister>(Bits(kRmShift, kRmBits) + (Bit(5) << 4));
+  }
+
+  // Test for VFP data processing or single transfer instructions of type 7.
+  inline bool IsVFPDataProcessingOrSingleTransfer() const {
+    CHECK_NE(ConditionField(), kSpecialCondition);
+    CHECK_EQ(TypeField(), 7);
+    return ((Bit(24) == 0) && (Bits(9, 3) == 5));
+    // Bit(4) == 0: Data Processing
+    // Bit(4) == 1: 8, 16, or 32-bit Transfer between ARM Core and VFP
+  }
+
+  // Test for VFP 64-bit transfer instructions of type 6.
+  inline bool IsVFPDoubleTransfer() const {
+    CHECK_NE(ConditionField(), kSpecialCondition);
+    CHECK_EQ(TypeField(), 6);
+    return ((Bits(21, 4) == 2) && (Bits(9, 3) == 5) &&
+            ((Bits(4, 4) & 0xd) == 1));
+  }
+
+  // Test for VFP load and store instructions of type 6.
+  inline bool IsVFPLoadStore() const {
+    CHECK_NE(ConditionField(), kSpecialCondition);
+    CHECK_EQ(TypeField(), 6);
+    return ((Bits(20, 5) & 0x12) == 0x10) && (Bits(9, 3) == 5);
+  }
+
+  // Special accessors that test for existence of a value.
+  inline bool HasS() const { return SField() == 1; }
+  inline bool HasB() const { return BField() == 1; }
+  inline bool HasW() const { return WField() == 1; }
+  inline bool HasL() const { return LField() == 1; }
+  inline bool HasSign() const { return SignField() == 1; }
+  inline bool HasH() const { return HField() == 1; }
+  inline bool HasLink() const { return LinkField() == 1; }
+
+  // Instructions are read out of a code stream. The only way to get a
+  // reference to an instruction is to convert a pointer. There is no way
+  // to allocate or create instances of class Instr.
+  // Use the At(pc) function to create references to Instr.
+  static Instr* At(uword pc) { return reinterpret_cast<Instr*>(pc); }
+  Instr* Next() { return this + kInstrSize; }
+
+ private:
+  // We need to prevent the creation of instances of class Instr.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_
diff --git a/compiler/utils/arm/managed_register_arm.cc b/compiler/utils/arm/managed_register_arm.cc
new file mode 100644
index 0000000..57c2305
--- /dev/null
+++ b/compiler/utils/arm/managed_register_arm.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_arm.h"
+
+#include "globals.h"
+
+namespace art {
+namespace arm {
+
+// We need all registers for caching of locals.
+// Register R9 .. R15 are reserved.
+static const int kNumberOfAvailableCoreRegisters = (R8 - R0) + 1;
+static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters;
+static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
+static const int kNumberOfAvailableOverlappingDRegisters =
+    kNumberOfOverlappingDRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+
+// Returns true if this managed-register overlaps the other managed-register.
+bool ArmManagedRegister::Overlaps(const ArmManagedRegister& other) const {
+  if (IsNoRegister() || other.IsNoRegister()) return false;
+  if (Equals(other)) return true;
+  if (IsRegisterPair()) {
+    Register low = AsRegisterPairLow();
+    Register high = AsRegisterPairHigh();
+    return ArmManagedRegister::FromCoreRegister(low).Overlaps(other) ||
+        ArmManagedRegister::FromCoreRegister(high).Overlaps(other);
+  }
+  if (IsOverlappingDRegister()) {
+    if (other.IsDRegister()) return Equals(other);
+    if (other.IsSRegister()) {
+      SRegister low = AsOverlappingDRegisterLow();
+      SRegister high = AsOverlappingDRegisterHigh();
+      SRegister other_sreg = other.AsSRegister();
+      return (low == other_sreg) || (high == other_sreg);
+    }
+    return false;
+  }
+  if (other.IsRegisterPair() || other.IsOverlappingDRegister()) {
+    return other.Overlaps(*this);
+  }
+  return false;
+}
+
+
+int ArmManagedRegister::AllocIdLow() const {
+  CHECK(IsOverlappingDRegister() || IsRegisterPair());
+  const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+  int low;
+  if (r < kNumberOfOverlappingDRegIds) {
+    CHECK(IsOverlappingDRegister());
+    low = (r * 2) + kNumberOfCoreRegIds;  // Return a SRegister.
+  } else {
+    CHECK(IsRegisterPair());
+    low = (r - kNumberOfDRegIds) * 2;  // Return a Register.
+    if (low > 6) {
+      // we didn't got a pair higher than R6_R7, must be the dalvik special case
+      low = 1;
+    }
+  }
+  return low;
+}
+
+
+int ArmManagedRegister::AllocIdHigh() const {
+  return AllocIdLow() + 1;
+}
+
+
+void ArmManagedRegister::Print(std::ostream& os) const {
+  if (!IsValidManagedRegister()) {
+    os << "No Register";
+  } else if (IsCoreRegister()) {
+    os << "Core: " << static_cast<int>(AsCoreRegister());
+  } else if (IsRegisterPair()) {
+    os << "Pair: " << static_cast<int>(AsRegisterPairLow()) << ", "
+       << static_cast<int>(AsRegisterPairHigh());
+  } else if (IsSRegister()) {
+    os << "SRegister: " << static_cast<int>(AsSRegister());
+  } else if (IsDRegister()) {
+    os << "DRegister: " << static_cast<int>(AsDRegister());
+  } else {
+    os << "??: " << RegId();
+  }
+}
+
+std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg) {
+  reg.Print(os);
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& r) {
+  os << ArmManagedRegister::FromRegisterPair(r);
+  return os;
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/compiler/utils/arm/managed_register_arm.h b/compiler/utils/arm/managed_register_arm.h
new file mode 100644
index 0000000..a496c87
--- /dev/null
+++ b/compiler/utils/arm/managed_register_arm.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+#define ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+
+#include "base/logging.h"
+#include "constants_arm.h"
+#include "utils/managed_register.h"
+
+namespace art {
+namespace arm {
+
+// Values for register pairs.
+enum RegisterPair {
+  R0_R1 = 0,
+  R2_R3 = 1,
+  R4_R5 = 2,
+  R6_R7 = 3,
+  R1_R2 = 4,  // Dalvik style passing
+  kNumberOfRegisterPairs = 5,
+  kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCoreRegIds = kNumberOfCoreRegisters;
+const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters;
+
+const int kNumberOfSRegIds = kNumberOfSRegisters;
+const int kNumberOfSAllocIds = kNumberOfSRegisters;
+
+const int kNumberOfDRegIds = kNumberOfDRegisters;
+const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters;
+const int kNumberOfDAllocIds = kNumberOfDRegIds - kNumberOfOverlappingDRegIds;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfSRegIds +
+    kNumberOfDRegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds =
+    kNumberOfCoreAllocIds + kNumberOfSAllocIds + kNumberOfDAllocIds;
+
+// Register ids map:
+//   [0..R[  core registers (enum Register)
+//   [R..S[  single precision VFP registers (enum SRegister)
+//   [S..D[  double precision VFP registers (enum DRegister)
+//   [D..P[  core register pairs (enum RegisterPair)
+// where
+//   R = kNumberOfCoreRegIds
+//   S = R + kNumberOfSRegIds
+//   D = S + kNumberOfDRegIds
+//   P = D + kNumberOfRegisterPairs
+
+// Allocation ids map:
+//   [0..R[  core registers (enum Register)
+//   [R..S[  single precision VFP registers (enum SRegister)
+//   [S..N[  non-overlapping double precision VFP registers (16-31 in enum
+//           DRegister, VFPv3-D32 only)
+// where
+//   R = kNumberOfCoreAllocIds
+//   S = R + kNumberOfSAllocIds
+//   N = S + kNumberOfDAllocIds
+
+
+// An instance of class 'ManagedRegister' represents a single ARM register or a
+// pair of core ARM registers (enum RegisterPair). A single register is either a
+// core register (enum Register), a VFP single precision register
+// (enum SRegister), or a VFP double precision register (enum DRegister).
+// 'ManagedRegister::NoRegister()' returns an invalid ManagedRegister.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class ArmManagedRegister : public ManagedRegister {
+ public:
+  Register AsCoreRegister() const {
+    CHECK(IsCoreRegister());
+    return static_cast<Register>(id_);
+  }
+
+  SRegister AsSRegister() const {
+    CHECK(IsSRegister());
+    return static_cast<SRegister>(id_ - kNumberOfCoreRegIds);
+  }
+
+  DRegister AsDRegister() const {
+    CHECK(IsDRegister());
+    return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
+  }
+
+  SRegister AsOverlappingDRegisterLow() const {
+    CHECK(IsOverlappingDRegister());
+    DRegister d_reg = AsDRegister();
+    return static_cast<SRegister>(d_reg * 2);
+  }
+
+  SRegister AsOverlappingDRegisterHigh() const {
+    CHECK(IsOverlappingDRegister());
+    DRegister d_reg = AsDRegister();
+    return static_cast<SRegister>(d_reg * 2 + 1);
+  }
+
+  RegisterPair AsRegisterPair() const {
+    CHECK(IsRegisterPair());
+    Register reg_low = AsRegisterPairLow();
+    if (reg_low == R1) {
+      return R1_R2;
+    } else {
+      return static_cast<RegisterPair>(reg_low / 2);
+    }
+  }
+
+  Register AsRegisterPairLow() const {
+    CHECK(IsRegisterPair());
+    // Appropriate mapping of register ids allows to use AllocIdLow().
+    return FromRegId(AllocIdLow()).AsCoreRegister();
+  }
+
+  Register AsRegisterPairHigh() const {
+    CHECK(IsRegisterPair());
+    // Appropriate mapping of register ids allows to use AllocIdHigh().
+    return FromRegId(AllocIdHigh()).AsCoreRegister();
+  }
+
+  bool IsCoreRegister() const {
+    CHECK(IsValidManagedRegister());
+    return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
+  }
+
+  bool IsSRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - kNumberOfCoreRegIds;
+    return (0 <= test) && (test < kNumberOfSRegIds);
+  }
+
+  bool IsDRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+    return (0 <= test) && (test < kNumberOfDRegIds);
+  }
+
+  // Returns true if this DRegister overlaps SRegisters.
+  bool IsOverlappingDRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+    return (0 <= test) && (test < kNumberOfOverlappingDRegIds);
+  }
+
+  bool IsRegisterPair() const {
+    CHECK(IsValidManagedRegister());
+    const int test =
+        id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds + kNumberOfDRegIds);
+    return (0 <= test) && (test < kNumberOfPairRegIds);
+  }
+
+  bool IsSameType(ArmManagedRegister test) const {
+    CHECK(IsValidManagedRegister() && test.IsValidManagedRegister());
+    return
+      (IsCoreRegister() && test.IsCoreRegister()) ||
+      (IsSRegister() && test.IsSRegister()) ||
+      (IsDRegister() && test.IsDRegister()) ||
+      (IsRegisterPair() && test.IsRegisterPair());
+  }
+
+
+  // Returns true if the two managed-registers ('this' and 'other') overlap.
+  // Either managed-register may be the NoRegister. If both are the NoRegister
+  // then false is returned.
+  bool Overlaps(const ArmManagedRegister& other) const;
+
+  void Print(std::ostream& os) const;
+
+  static ArmManagedRegister FromCoreRegister(Register r) {
+    CHECK_NE(r, kNoRegister);
+    return FromRegId(r);
+  }
+
+  static ArmManagedRegister FromSRegister(SRegister r) {
+    CHECK_NE(r, kNoSRegister);
+    return FromRegId(r + kNumberOfCoreRegIds);
+  }
+
+  static ArmManagedRegister FromDRegister(DRegister r) {
+    CHECK_NE(r, kNoDRegister);
+    return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfSRegIds));
+  }
+
+  static ArmManagedRegister FromRegisterPair(RegisterPair r) {
+    CHECK_NE(r, kNoRegisterPair);
+    return FromRegId(r + (kNumberOfCoreRegIds +
+                          kNumberOfSRegIds + kNumberOfDRegIds));
+  }
+
+  // Return a RegisterPair consisting of Register r_low and r_low + 1.
+  static ArmManagedRegister FromCoreRegisterPair(Register r_low) {
+    if (r_low != R1) {  // not the dalvik special case
+      CHECK_NE(r_low, kNoRegister);
+      CHECK_EQ(0, (r_low % 2));
+      const int r = r_low / 2;
+      CHECK_LT(r, kNumberOfPairRegIds);
+      return FromRegisterPair(static_cast<RegisterPair>(r));
+    } else {
+      return FromRegisterPair(R1_R2);
+    }
+  }
+
+  // Return a DRegister overlapping SRegister r_low and r_low + 1.
+  static ArmManagedRegister FromSRegisterPair(SRegister r_low) {
+    CHECK_NE(r_low, kNoSRegister);
+    CHECK_EQ(0, (r_low % 2));
+    const int r = r_low / 2;
+    CHECK_LT(r, kNumberOfOverlappingDRegIds);
+    return FromDRegister(static_cast<DRegister>(r));
+  }
+
+ private:
+  bool IsValidManagedRegister() const {
+    return (0 <= id_) && (id_ < kNumberOfRegIds);
+  }
+
+  int RegId() const {
+    CHECK(!IsNoRegister());
+    return id_;
+  }
+
+  int AllocId() const {
+    CHECK(IsValidManagedRegister() &&
+           !IsOverlappingDRegister() && !IsRegisterPair());
+    int r = id_;
+    if ((kNumberOfDAllocIds > 0) && IsDRegister()) {  // VFPv3-D32 only.
+      r -= kNumberOfOverlappingDRegIds;
+    }
+    CHECK_LT(r, kNumberOfAllocIds);
+    return r;
+  }
+
+  int AllocIdLow() const;
+  int AllocIdHigh() const;
+
+  friend class ManagedRegister;
+
+  explicit ArmManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+  static ArmManagedRegister FromRegId(int reg_id) {
+    ArmManagedRegister reg(reg_id);
+    CHECK(reg.IsValidManagedRegister());
+    return reg;
+  }
+};
+
+std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg);
+
+}  // namespace arm
+
+inline arm::ArmManagedRegister ManagedRegister::AsArm() const {
+  arm::ArmManagedRegister reg(id_);
+  CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+  return reg;
+}
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_
diff --git a/compiler/utils/arm/managed_register_arm_test.cc b/compiler/utils/arm/managed_register_arm_test.cc
new file mode 100644
index 0000000..f5d4cc0
--- /dev/null
+++ b/compiler/utils/arm/managed_register_arm_test.cc
@@ -0,0 +1,767 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "globals.h"
+#include "managed_register_arm.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace arm {
+
+TEST(ArmManagedRegister, NoRegister) {
+  ArmManagedRegister reg = ManagedRegister::NoRegister().AsArm();
+  EXPECT_TRUE(reg.IsNoRegister());
+  EXPECT_TRUE(!reg.Overlaps(reg));
+}
+
+TEST(ArmManagedRegister, CoreRegister) {
+  ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(R0, reg.AsCoreRegister());
+
+  reg = ArmManagedRegister::FromCoreRegister(R1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(R1, reg.AsCoreRegister());
+
+  reg = ArmManagedRegister::FromCoreRegister(R8);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(R8, reg.AsCoreRegister());
+
+  reg = ArmManagedRegister::FromCoreRegister(R15);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(R15, reg.AsCoreRegister());
+}
+
+
+TEST(ArmManagedRegister, SRegister) {
+  ArmManagedRegister reg = ArmManagedRegister::FromSRegister(S0);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S0, reg.AsSRegister());
+
+  reg = ArmManagedRegister::FromSRegister(S1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S1, reg.AsSRegister());
+
+  reg = ArmManagedRegister::FromSRegister(S3);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S3, reg.AsSRegister());
+
+  reg = ArmManagedRegister::FromSRegister(S15);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S15, reg.AsSRegister());
+
+  reg = ArmManagedRegister::FromSRegister(S30);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S30, reg.AsSRegister());
+
+  reg = ArmManagedRegister::FromSRegister(S31);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S31, reg.AsSRegister());
+}
+
+
+TEST(ArmManagedRegister, DRegister) {
+  ArmManagedRegister reg = ArmManagedRegister::FromDRegister(D0);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D0, reg.AsDRegister());
+  EXPECT_EQ(S0, reg.AsOverlappingDRegisterLow());
+  EXPECT_EQ(S1, reg.AsOverlappingDRegisterHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S0)));
+
+  reg = ArmManagedRegister::FromDRegister(D1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D1, reg.AsDRegister());
+  EXPECT_EQ(S2, reg.AsOverlappingDRegisterLow());
+  EXPECT_EQ(S3, reg.AsOverlappingDRegisterHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S2)));
+
+  reg = ArmManagedRegister::FromDRegister(D6);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D6, reg.AsDRegister());
+  EXPECT_EQ(S12, reg.AsOverlappingDRegisterLow());
+  EXPECT_EQ(S13, reg.AsOverlappingDRegisterHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S12)));
+
+  reg = ArmManagedRegister::FromDRegister(D14);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D14, reg.AsDRegister());
+  EXPECT_EQ(S28, reg.AsOverlappingDRegisterLow());
+  EXPECT_EQ(S29, reg.AsOverlappingDRegisterHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S28)));
+
+  reg = ArmManagedRegister::FromDRegister(D15);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D15, reg.AsDRegister());
+  EXPECT_EQ(S30, reg.AsOverlappingDRegisterLow());
+  EXPECT_EQ(S31, reg.AsOverlappingDRegisterHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S30)));
+
+#ifdef VFPv3_D32
+  reg = ArmManagedRegister::FromDRegister(D16);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D16, reg.AsDRegister());
+
+  reg = ArmManagedRegister::FromDRegister(D18);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D18, reg.AsDRegister());
+
+  reg = ArmManagedRegister::FromDRegister(D30);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D30, reg.AsDRegister());
+
+  reg = ArmManagedRegister::FromDRegister(D31);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D31, reg.AsDRegister());
+#endif  // VFPv3_D32
+}
+
+
+TEST(ArmManagedRegister, Pair) {
+  ArmManagedRegister reg = ArmManagedRegister::FromRegisterPair(R0_R1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(R0_R1, reg.AsRegisterPair());
+  EXPECT_EQ(R0, reg.AsRegisterPairLow());
+  EXPECT_EQ(R1, reg.AsRegisterPairHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R0)));
+
+  reg = ArmManagedRegister::FromRegisterPair(R1_R2);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(R1_R2, reg.AsRegisterPair());
+  EXPECT_EQ(R1, reg.AsRegisterPairLow());
+  EXPECT_EQ(R2, reg.AsRegisterPairHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R1)));
+
+  reg = ArmManagedRegister::FromRegisterPair(R2_R3);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(R2_R3, reg.AsRegisterPair());
+  EXPECT_EQ(R2, reg.AsRegisterPairLow());
+  EXPECT_EQ(R3, reg.AsRegisterPairHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R2)));
+
+  reg = ArmManagedRegister::FromRegisterPair(R4_R5);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(R4_R5, reg.AsRegisterPair());
+  EXPECT_EQ(R4, reg.AsRegisterPairLow());
+  EXPECT_EQ(R5, reg.AsRegisterPairHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R4)));
+
+  reg = ArmManagedRegister::FromRegisterPair(R6_R7);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(R6_R7, reg.AsRegisterPair());
+  EXPECT_EQ(R6, reg.AsRegisterPairLow());
+  EXPECT_EQ(R7, reg.AsRegisterPairHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R6)));
+}
+
+
+TEST(ArmManagedRegister, Equals) {
+  ManagedRegister no_reg = ManagedRegister::NoRegister();
+  EXPECT_TRUE(no_reg.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_R0 = ArmManagedRegister::FromCoreRegister(R0);
+  EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_R1 = ArmManagedRegister::FromCoreRegister(R1);
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_R8 = ArmManagedRegister::FromCoreRegister(R8);
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_S0 = ArmManagedRegister::FromSRegister(S0);
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(reg_S0.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_S1 = ArmManagedRegister::FromSRegister(S1);
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(reg_S1.Equals(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_S31 = ArmManagedRegister::FromSRegister(S31);
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(reg_S31.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_D0 = ArmManagedRegister::FromDRegister(D0);
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(reg_D0.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_D15 = ArmManagedRegister::FromDRegister(D15);
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(reg_D15.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+#ifdef VFPv3_D32
+  ArmManagedRegister reg_D16 = ArmManagedRegister::FromDRegister(D16);
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(reg_D16.Equals(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_D30 = ArmManagedRegister::FromDRegister(D30);
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(reg_D30.Equals(ArmManagedRegister::FromDRegister(D30)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_D31 = ArmManagedRegister::FromDRegister(D30);
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D30)));
+  EXPECT_TRUE(reg_D31.Equals(ArmManagedRegister::FromDRegister(D31)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+#endif  // VFPv3_D32
+
+  ArmManagedRegister reg_R0R1 = ArmManagedRegister::FromRegisterPair(R0_R1);
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R2_R3)));
+
+  ArmManagedRegister reg_R4R5 = ArmManagedRegister::FromRegisterPair(R4_R5);
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R4_R5)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R6_R7)));
+
+  ArmManagedRegister reg_R6R7 = ArmManagedRegister::FromRegisterPair(R6_R7);
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R4_R5)));
+  EXPECT_TRUE(reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R6_R7)));
+}
+
+
+TEST(ArmManagedRegister, Overlaps) {
+  ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0);
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromCoreRegister(R1);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromCoreRegister(R7);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromSRegister(S0);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromSRegister(S1);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromSRegister(S15);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromSRegister(S31);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromDRegister(D0);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromDRegister(D7);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromDRegister(D15);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+#ifdef VFPv3_D32
+  reg = ArmManagedRegister::FromDRegister(D16);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromDRegister(D31);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+#endif  // VFPv3_D32
+
+  reg = ArmManagedRegister::FromRegisterPair(R0_R1);
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromRegisterPair(R4_R5);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
new file mode 100644
index 0000000..92ce0b8
--- /dev/null
+++ b/compiler/utils/assembler.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "arm/assembler_arm.h"
+#include "mips/assembler_mips.h"
+#include "x86/assembler_x86.h"
+#include "globals.h"
+#include "memory_region.h"
+
+namespace art {
+
+static byte* NewContents(size_t capacity) {
+  return new byte[capacity];
+}
+
+
+AssemblerBuffer::AssemblerBuffer() {
+  static const size_t kInitialBufferCapacity = 4 * KB;
+  contents_ = NewContents(kInitialBufferCapacity);
+  cursor_ = contents_;
+  limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
+  fixup_ = NULL;
+  slow_path_ = NULL;
+#ifndef NDEBUG
+  has_ensured_capacity_ = false;
+  fixups_processed_ = false;
+#endif
+
+  // Verify internal state.
+  CHECK_EQ(Capacity(), kInitialBufferCapacity);
+  CHECK_EQ(Size(), 0U);
+}
+
+
+AssemblerBuffer::~AssemblerBuffer() {
+  delete[] contents_;
+}
+
+
+void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
+  AssemblerFixup* fixup = fixup_;
+  while (fixup != NULL) {
+    fixup->Process(region, fixup->position());
+    fixup = fixup->previous();
+  }
+}
+
+
+void AssemblerBuffer::FinalizeInstructions(const MemoryRegion& instructions) {
+  // Copy the instructions from the buffer.
+  MemoryRegion from(reinterpret_cast<void*>(contents()), Size());
+  instructions.CopyFrom(0, from);
+  // Process fixups in the instructions.
+  ProcessFixups(instructions);
+#ifndef NDEBUG
+  fixups_processed_ = true;
+#endif
+}
+
+
+void AssemblerBuffer::ExtendCapacity() {
+  size_t old_size = Size();
+  size_t old_capacity = Capacity();
+  size_t new_capacity = std::min(old_capacity * 2, old_capacity + 1 * MB);
+
+  // Allocate the new data area and copy contents of the old one to it.
+  byte* new_contents = NewContents(new_capacity);
+  memmove(reinterpret_cast<void*>(new_contents),
+          reinterpret_cast<void*>(contents_),
+          old_size);
+
+  // Compute the relocation delta and switch to the new contents area.
+  ptrdiff_t delta = new_contents - contents_;
+  contents_ = new_contents;
+
+  // Update the cursor and recompute the limit.
+  cursor_ += delta;
+  limit_ = ComputeLimit(new_contents, new_capacity);
+
+  // Verify internal state.
+  CHECK_EQ(Capacity(), new_capacity);
+  CHECK_EQ(Size(), old_size);
+}
+
+
+Assembler* Assembler::Create(InstructionSet instruction_set) {
+  switch (instruction_set) {
+    case kArm:
+    case kThumb2:
+      return new arm::ArmAssembler();
+    case kMips:
+      return new mips::MipsAssembler();
+    case kX86:
+      return new x86::X86Assembler();
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
+      return NULL;
+  }
+}
+
+}  // namespace art
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
new file mode 100644
index 0000000..9d79002
--- /dev/null
+++ b/compiler/utils/assembler.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ASSEMBLER_H_
+#define ART_COMPILER_UTILS_ASSEMBLER_H_
+
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "arm/constants_arm.h"
+#include "mips/constants_mips.h"
+#include "x86/constants_x86.h"
+#include "instruction_set.h"
+#include "managed_register.h"
+#include "memory_region.h"
+#include "offsets.h"
+
+namespace art {
+
+class Assembler;
+class AssemblerBuffer;
+class AssemblerFixup;
+
+namespace arm {
+  class ArmAssembler;
+}
+namespace mips {
+  class MipsAssembler;
+}
+namespace x86 {
+  class X86Assembler;
+}
+
+class Label {
+ public:
+  Label() : position_(0) {}
+
+  ~Label() {
+    // Assert if label is being destroyed with unresolved branches pending.
+    CHECK(!IsLinked());
+  }
+
+  // Returns the position for bound and linked labels. Cannot be used
+  // for unused labels.
+  int Position() const {
+    CHECK(!IsUnused());
+    return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize;
+  }
+
+  int LinkPosition() const {
+    CHECK(IsLinked());
+    return position_ - kWordSize;
+  }
+
+  bool IsBound() const { return position_ < 0; }
+  bool IsUnused() const { return position_ == 0; }
+  bool IsLinked() const { return position_ > 0; }
+
+ private:
+  int position_;
+
+  void Reinitialize() {
+    position_ = 0;
+  }
+
+  void BindTo(int position) {
+    CHECK(!IsBound());
+    position_ = -position - kPointerSize;
+    CHECK(IsBound());
+  }
+
+  void LinkTo(int position) {
+    CHECK(!IsBound());
+    position_ = position + kPointerSize;
+    CHECK(IsLinked());
+  }
+
+  friend class arm::ArmAssembler;
+  friend class mips::MipsAssembler;
+  friend class x86::X86Assembler;
+
+  DISALLOW_COPY_AND_ASSIGN(Label);
+};
+
+
+// Assembler fixups are positions in generated code that require processing
+// after the code has been copied to executable memory. This includes building
+// relocation information.
+class AssemblerFixup {
+ public:
+  virtual void Process(const MemoryRegion& region, int position) = 0;
+  virtual ~AssemblerFixup() {}
+
+ private:
+  AssemblerFixup* previous_;
+  int position_;
+
+  AssemblerFixup* previous() const { return previous_; }
+  void set_previous(AssemblerFixup* previous) { previous_ = previous; }
+
+  int position() const { return position_; }
+  void set_position(int position) { position_ = position; }
+
+  friend class AssemblerBuffer;
+};
+
+// Parent of all queued slow paths, emitted during finalization
+class SlowPath {
+ public:
+  SlowPath() : next_(NULL) {}
+  virtual ~SlowPath() {}
+
+  Label* Continuation() { return &continuation_; }
+  Label* Entry() { return &entry_; }
+  // Generate code for slow path
+  virtual void Emit(Assembler *sp_asm) = 0;
+
+ protected:
+  // Entry branched to by fast path
+  Label entry_;
+  // Optional continuation that is branched to at the end of the slow path
+  Label continuation_;
+  // Next in linked list of slow paths
+  SlowPath *next_;
+
+  friend class AssemblerBuffer;
+  DISALLOW_COPY_AND_ASSIGN(SlowPath);
+};
+
+class AssemblerBuffer {
+ public:
+  AssemblerBuffer();
+  ~AssemblerBuffer();
+
+  // Basic support for emitting, loading, and storing.
+  template<typename T> void Emit(T value) {
+    CHECK(HasEnsuredCapacity());
+    *reinterpret_cast<T*>(cursor_) = value;
+    cursor_ += sizeof(T);
+  }
+
+  template<typename T> T Load(size_t position) {
+    CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
+    return *reinterpret_cast<T*>(contents_ + position);
+  }
+
+  template<typename T> void Store(size_t position, T value) {
+    CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
+    *reinterpret_cast<T*>(contents_ + position) = value;
+  }
+
+  // Emit a fixup at the current location.
+  void EmitFixup(AssemblerFixup* fixup) {
+    fixup->set_previous(fixup_);
+    fixup->set_position(Size());
+    fixup_ = fixup;
+  }
+
+  void EnqueueSlowPath(SlowPath* slowpath) {
+    if (slow_path_ == NULL) {
+      slow_path_ = slowpath;
+    } else {
+      SlowPath* cur = slow_path_;
+      for ( ; cur->next_ != NULL ; cur = cur->next_) {}
+      cur->next_ = slowpath;
+    }
+  }
+
+  void EmitSlowPaths(Assembler* sp_asm) {
+    SlowPath* cur = slow_path_;
+    SlowPath* next = NULL;
+    slow_path_ = NULL;
+    for ( ; cur != NULL ; cur = next) {
+      cur->Emit(sp_asm);
+      next = cur->next_;
+      delete cur;
+    }
+  }
+
+  // Get the size of the emitted code.
+  size_t Size() const {
+    CHECK_GE(cursor_, contents_);
+    return cursor_ - contents_;
+  }
+
+  byte* contents() const { return contents_; }
+
+  // Copy the assembled instructions into the specified memory block
+  // and apply all fixups.
+  void FinalizeInstructions(const MemoryRegion& region);
+
+  // To emit an instruction to the assembler buffer, the EnsureCapacity helper
+  // must be used to guarantee that the underlying data area is big enough to
+  // hold the emitted instruction. Usage:
+  //
+  //     AssemblerBuffer buffer;
+  //     AssemblerBuffer::EnsureCapacity ensured(&buffer);
+  //     ... emit bytes for single instruction ...
+
+#ifndef NDEBUG
+
+  class EnsureCapacity {
+   public:
+    explicit EnsureCapacity(AssemblerBuffer* buffer) {
+      if (buffer->cursor() >= buffer->limit()) {
+        buffer->ExtendCapacity();
+      }
+      // In debug mode, we save the assembler buffer along with the gap
+      // size before we start emitting to the buffer. This allows us to
+      // check that any single generated instruction doesn't overflow the
+      // limit implied by the minimum gap size.
+      buffer_ = buffer;
+      gap_ = ComputeGap();
+      // Make sure that extending the capacity leaves a big enough gap
+      // for any kind of instruction.
+      CHECK_GE(gap_, kMinimumGap);
+      // Mark the buffer as having ensured the capacity.
+      CHECK(!buffer->HasEnsuredCapacity());  // Cannot nest.
+      buffer->has_ensured_capacity_ = true;
+    }
+
+    ~EnsureCapacity() {
+      // Unmark the buffer, so we cannot emit after this.
+      buffer_->has_ensured_capacity_ = false;
+      // Make sure the generated instruction doesn't take up more
+      // space than the minimum gap.
+      int delta = gap_ - ComputeGap();
+      CHECK_LE(delta, kMinimumGap);
+    }
+
+   private:
+    AssemblerBuffer* buffer_;
+    int gap_;
+
+    int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
+  };
+
+  bool has_ensured_capacity_;
+  bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
+
+#else
+
+  class EnsureCapacity {
+   public:
+    explicit EnsureCapacity(AssemblerBuffer* buffer) {
+      if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
+    }
+  };
+
+  // When building the C++ tests, assertion code is enabled. To allow
+  // asserting that the user of the assembler buffer has ensured the
+  // capacity needed for emitting, we add a dummy method in non-debug mode.
+  bool HasEnsuredCapacity() const { return true; }
+
+#endif
+
+  // Returns the position in the instruction stream.
+  int GetPosition() { return  cursor_ - contents_; }
+
+ private:
+  // The limit is set to kMinimumGap bytes before the end of the data area.
+  // This leaves enough space for the longest possible instruction and allows
+  // for a single, fast space check per instruction.
+  static const int kMinimumGap = 32;
+
+  byte* contents_;
+  byte* cursor_;
+  byte* limit_;
+  AssemblerFixup* fixup_;
+  bool fixups_processed_;
+
+  // Head of linked list of slow paths
+  SlowPath* slow_path_;
+
+  byte* cursor() const { return cursor_; }
+  byte* limit() const { return limit_; }
+  size_t Capacity() const {
+    CHECK_GE(limit_, contents_);
+    return (limit_ - contents_) + kMinimumGap;
+  }
+
+  // Process the fixup chain starting at the given fixup. The offset is
+  // non-zero for fixups in the body if the preamble is non-empty.
+  void ProcessFixups(const MemoryRegion& region);
+
+  // Compute the limit based on the data area and the capacity. See
+  // description of kMinimumGap for the reasoning behind the value.
+  static byte* ComputeLimit(byte* data, size_t capacity) {
+    return data + capacity - kMinimumGap;
+  }
+
+  void ExtendCapacity();
+
+  friend class AssemblerFixup;
+};
+
+class Assembler {
+ public:
+  static Assembler* Create(InstructionSet instruction_set);
+
+  // Emit slow paths queued during assembly
+  void EmitSlowPaths() { buffer_.EmitSlowPaths(this); }
+
+  // Size of generated code
+  size_t CodeSize() const { return buffer_.Size(); }
+
+  // Copy instructions out of assembly buffer into the given region of memory
+  void FinalizeInstructions(const MemoryRegion& region) {
+    buffer_.FinalizeInstructions(region);
+  }
+
+  // Emit code that will create an activation on the stack
+  virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                          const std::vector<ManagedRegister>& callee_save_regs,
+                          const std::vector<ManagedRegister>& entry_spills) = 0;
+
+  // Emit code that will remove an activation from the stack
+  virtual void RemoveFrame(size_t frame_size,
+                           const std::vector<ManagedRegister>& callee_save_regs) = 0;
+
+  virtual void IncreaseFrameSize(size_t adjust) = 0;
+  virtual void DecreaseFrameSize(size_t adjust) = 0;
+
+  // Store routines
+  virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
+  virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
+  virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
+
+  virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                     ManagedRegister scratch) = 0;
+
+  virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                      ManagedRegister scratch) = 0;
+
+  virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                        FrameOffset fr_offs,
+                                        ManagedRegister scratch) = 0;
+
+  virtual void StoreStackPointerToThread(ThreadOffset thr_offs) = 0;
+
+  virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+                             FrameOffset in_off, ManagedRegister scratch) = 0;
+
+  // Load routines
+  virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
+
+  virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size) = 0;
+
+  virtual void LoadRef(ManagedRegister dest, FrameOffset  src) = 0;
+
+  virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+                       MemberOffset offs) = 0;
+
+  virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+                          Offset offs) = 0;
+
+  virtual void LoadRawPtrFromThread(ManagedRegister dest,
+                                    ThreadOffset offs) = 0;
+
+  // Copying routines
+  virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
+
+  virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+                                    ManagedRegister scratch) = 0;
+
+  virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+                                  ManagedRegister scratch) = 0;
+
+  virtual void CopyRef(FrameOffset dest, FrameOffset src,
+                       ManagedRegister scratch) = 0;
+
+  virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
+
+  virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size) = 0;
+
+  virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                    ManagedRegister scratch, size_t size) = 0;
+
+  virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size) = 0;
+
+  virtual void Copy(ManagedRegister dest, Offset dest_offset,
+                    ManagedRegister src, Offset src_offset,
+                    ManagedRegister scratch, size_t size) = 0;
+
+  virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+                    ManagedRegister scratch, size_t size) = 0;
+
+  virtual void MemoryBarrier(ManagedRegister scratch) = 0;
+
+  // Sign extension
+  virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
+
+  // Zero extension
+  virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
+
+  // Exploit fast access in managed code to Thread::Current()
+  virtual void GetCurrentThread(ManagedRegister tr) = 0;
+  virtual void GetCurrentThread(FrameOffset dest_offset,
+                                ManagedRegister scratch) = 0;
+
+  // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed. in_reg holds a possibly stale reference
+  // that can be used to avoid loading the SIRT entry to see if the value is
+  // NULL.
+  virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+                               ManagedRegister in_reg, bool null_allowed) = 0;
+
+  // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed.
+  virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+                               ManagedRegister scratch, bool null_allowed) = 0;
+
+  // src holds a SIRT entry (Object**) load this into dst
+  virtual void LoadReferenceFromSirt(ManagedRegister dst,
+                                     ManagedRegister src) = 0;
+
+  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+  // know that src may not be null.
+  virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
+  virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
+
+  // Call to address held at [base+offset]
+  virtual void Call(ManagedRegister base, Offset offset,
+                    ManagedRegister scratch) = 0;
+  virtual void Call(FrameOffset base, Offset offset,
+                    ManagedRegister scratch) = 0;
+  virtual void Call(ThreadOffset offset, ManagedRegister scratch) = 0;
+
+  // Generate code to check if Thread::Current()->exception_ is non-null
+  // and branch to a ExceptionSlowPath if it is.
+  virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
+
+  virtual ~Assembler() {}
+
+ protected:
+  Assembler() : buffer_() {}
+
+  AssemblerBuffer buffer_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_ASSEMBLER_H_
diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h
new file mode 100644
index 0000000..4ad1763
--- /dev/null
+++ b/compiler/utils/managed_register.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_MANAGED_REGISTER_H_
+#define ART_COMPILER_UTILS_MANAGED_REGISTER_H_
+
+namespace art {
+
+namespace arm {
+class ArmManagedRegister;
+}
+namespace mips {
+class MipsManagedRegister;
+}
+namespace x86 {
+class X86ManagedRegister;
+}
+
+class ManagedRegister {
+ public:
+  // ManagedRegister is a value class. There exists no method to change the
+  // internal state. We therefore allow a copy constructor and an
+  // assignment-operator.
+  ManagedRegister(const ManagedRegister& other) : id_(other.id_) { }
+
+  ManagedRegister& operator=(const ManagedRegister& other) {
+    id_ = other.id_;
+    return *this;
+  }
+
+  arm::ArmManagedRegister AsArm() const;
+  mips::MipsManagedRegister AsMips() const;
+  x86::X86ManagedRegister AsX86() const;
+
+  // It is valid to invoke Equals on and with a NoRegister.
+  bool Equals(const ManagedRegister& other) const {
+    return id_ == other.id_;
+  }
+
+  bool IsNoRegister() const {
+    return id_ == kNoRegister;
+  }
+
+  static ManagedRegister NoRegister() {
+    return ManagedRegister();
+  }
+
+ protected:
+  static const int kNoRegister = -1;
+
+  ManagedRegister() : id_(kNoRegister) { }
+  explicit ManagedRegister(int reg_id) : id_(reg_id) { }
+
+  int id_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_MANAGED_REGISTER_H_
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
new file mode 100644
index 0000000..58815da
--- /dev/null
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -0,0 +1,999 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_mips.h"
+
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "memory_region.h"
+#include "thread.h"
+
+namespace art {
+namespace mips {
+#if 0
+class DirectCallRelocation : public AssemblerFixup {
+ public:
+  void Process(const MemoryRegion& region, int position) {
+    // Direct calls are relative to the following instruction on mips.
+    int32_t pointer = region.Load<int32_t>(position);
+    int32_t start = reinterpret_cast<int32_t>(region.start());
+    int32_t delta = start + position + sizeof(int32_t);
+    region.Store<int32_t>(position, pointer - delta);
+  }
+};
+#endif
+
+std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
+  if (rhs >= D0 && rhs < kNumberOfDRegisters) {
+    os << "d" << static_cast<int>(rhs);
+  } else {
+    os << "DRegister[" << static_cast<int>(rhs) << "]";
+  }
+  return os;
+}
+
+void MipsAssembler::Emit(int32_t value) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  buffer_.Emit<int32_t>(value);
+}
+
+void MipsAssembler::EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct) {
+  CHECK_NE(rs, kNoRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rd, kNoRegister);
+  int32_t encoding = opcode << kOpcodeShift |
+                     static_cast<int32_t>(rs) << kRsShift |
+                     static_cast<int32_t>(rt) << kRtShift |
+                     static_cast<int32_t>(rd) << kRdShift |
+                     shamt << kShamtShift |
+                     funct;
+  Emit(encoding);
+}
+
+void MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) {
+  CHECK_NE(rs, kNoRegister);
+  CHECK_NE(rt, kNoRegister);
+  int32_t encoding = opcode << kOpcodeShift |
+                     static_cast<int32_t>(rs) << kRsShift |
+                     static_cast<int32_t>(rt) << kRtShift |
+                     imm;
+  Emit(encoding);
+}
+
+void MipsAssembler::EmitJ(int opcode, int address) {
+  int32_t encoding = opcode << kOpcodeShift |
+                     address;
+  Emit(encoding);
+}
+
+void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct) {
+  CHECK_NE(ft, kNoFRegister);
+  CHECK_NE(fs, kNoFRegister);
+  CHECK_NE(fd, kNoFRegister);
+  int32_t encoding = opcode << kOpcodeShift |
+                     fmt << kFmtShift |
+                     static_cast<int32_t>(ft) << kFtShift |
+                     static_cast<int32_t>(fs) << kFsShift |
+                     static_cast<int32_t>(fd) << kFdShift |
+                     funct;
+  Emit(encoding);
+}
+
+void MipsAssembler::EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm) {
+  CHECK_NE(rt, kNoFRegister);
+  int32_t encoding = opcode << kOpcodeShift |
+                     fmt << kFmtShift |
+                     static_cast<int32_t>(rt) << kRtShift |
+                     imm;
+  Emit(encoding);
+}
+
+void MipsAssembler::EmitBranch(Register rt, Register rs, Label* label, bool equal) {
+  int offset;
+  if (label->IsBound()) {
+    offset = label->Position() - buffer_.Size();
+  } else {
+    // Use the offset field of the branch instruction for linking the sites.
+    offset = label->position_;
+    label->LinkTo(buffer_.Size());
+  }
+  if (equal) {
+    Beq(rt, rs, (offset >> 2) & kBranchOffsetMask);
+  } else {
+    Bne(rt, rs, (offset >> 2) & kBranchOffsetMask);
+  }
+}
+
+void MipsAssembler::EmitJump(Label* label, bool link) {
+  int offset;
+  if (label->IsBound()) {
+    offset = label->Position() - buffer_.Size();
+  } else {
+    // Use the offset field of the jump instruction for linking the sites.
+    offset = label->position_;
+    label->LinkTo(buffer_.Size());
+  }
+  if (link) {
+    Jal((offset >> 2) & kJumpOffsetMask);
+  } else {
+    J((offset >> 2) & kJumpOffsetMask);
+  }
+}
+
+int32_t MipsAssembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) {
+  CHECK_ALIGNED(offset, 4);
+  CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset;
+
+  // Properly preserve only the bits supported in the instruction.
+  offset >>= 2;
+  if (is_jump) {
+    offset &= kJumpOffsetMask;
+    return (inst & ~kJumpOffsetMask) | offset;
+  } else {
+    offset &= kBranchOffsetMask;
+    return (inst & ~kBranchOffsetMask) | offset;
+  }
+}
+
+int MipsAssembler::DecodeBranchOffset(int32_t inst, bool is_jump) {
+  // Sign-extend, then left-shift by 2.
+  if (is_jump) {
+    return (((inst & kJumpOffsetMask) << 6) >> 4);
+  } else {
+    return (((inst & kBranchOffsetMask) << 16) >> 14);
+  }
+}
+
+void MipsAssembler::Bind(Label* label, bool is_jump) {
+  CHECK(!label->IsBound());
+  int bound_pc = buffer_.Size();
+  while (label->IsLinked()) {
+    int32_t position = label->Position();
+    int32_t next = buffer_.Load<int32_t>(position);
+    int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4;
+    int32_t encoded = MipsAssembler::EncodeBranchOffset(offset, next, is_jump);
+    buffer_.Store<int32_t>(position, encoded);
+    label->position_ = MipsAssembler::DecodeBranchOffset(next, is_jump);
+  }
+  label->BindTo(bound_pc);
+}
+
+void MipsAssembler::Add(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x20);
+}
+
+void MipsAssembler::Addu(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x21);
+}
+
+void MipsAssembler::Addi(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x8, rs, rt, imm16);
+}
+
+void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x9, rs, rt, imm16);
+}
+
+void MipsAssembler::Sub(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x22);
+}
+
+void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x23);
+}
+
+void MipsAssembler::Mult(Register rs, Register rt) {
+  EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x18);
+}
+
+void MipsAssembler::Multu(Register rs, Register rt) {
+  EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x19);
+}
+
+void MipsAssembler::Div(Register rs, Register rt) {
+  EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1a);
+}
+
+void MipsAssembler::Divu(Register rs, Register rt) {
+  EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1b);
+}
+
+void MipsAssembler::And(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x24);
+}
+
+void MipsAssembler::Andi(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0xc, rs, rt, imm16);
+}
+
+void MipsAssembler::Or(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x25);
+}
+
+void MipsAssembler::Ori(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0xd, rs, rt, imm16);
+}
+
+void MipsAssembler::Xor(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x26);
+}
+
+void MipsAssembler::Xori(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0xe, rs, rt, imm16);
+}
+
+void MipsAssembler::Nor(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x27);
+}
+
+void MipsAssembler::Sll(Register rd, Register rs, int shamt) {
+  EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x00);
+}
+
+void MipsAssembler::Srl(Register rd, Register rs, int shamt) {
+  EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x02);
+}
+
+void MipsAssembler::Sra(Register rd, Register rs, int shamt) {
+  EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x03);
+}
+
+void MipsAssembler::Sllv(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x04);
+}
+
+void MipsAssembler::Srlv(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x06);
+}
+
+void MipsAssembler::Srav(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x07);
+}
+
+void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x20, rs, rt, imm16);
+}
+
+void MipsAssembler::Lh(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x21, rs, rt, imm16);
+}
+
+void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x23, rs, rt, imm16);
+}
+
+void MipsAssembler::Lbu(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x24, rs, rt, imm16);
+}
+
+void MipsAssembler::Lhu(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x25, rs, rt, imm16);
+}
+
+void MipsAssembler::Lui(Register rt, uint16_t imm16) {
+  EmitI(0xf, static_cast<Register>(0), rt, imm16);
+}
+
+void MipsAssembler::Mfhi(Register rd) {
+  EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rd, 0, 0x10);
+}
+
+void MipsAssembler::Mflo(Register rd) {
+  EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rd, 0, 0x12);
+}
+
+void MipsAssembler::Sb(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x28, rs, rt, imm16);
+}
+
+void MipsAssembler::Sh(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x29, rs, rt, imm16);
+}
+
+void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x2b, rs, rt, imm16);
+}
+
+void MipsAssembler::Slt(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x2a);
+}
+
+void MipsAssembler::Sltu(Register rd, Register rs, Register rt) {
+  EmitR(0, rs, rt, rd, 0, 0x2b);
+}
+
+void MipsAssembler::Slti(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0xa, rs, rt, imm16);
+}
+
+void MipsAssembler::Sltiu(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0xb, rs, rt, imm16);
+}
+
+void MipsAssembler::Beq(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x4, rs, rt, imm16);
+  Nop();
+}
+
+void MipsAssembler::Bne(Register rt, Register rs, uint16_t imm16) {
+  EmitI(0x5, rs, rt, imm16);
+  Nop();
+}
+
+void MipsAssembler::J(uint32_t address) {
+  EmitJ(0x2, address);
+  Nop();
+}
+
+void MipsAssembler::Jal(uint32_t address) {
+  EmitJ(0x2, address);
+  Nop();
+}
+
+void MipsAssembler::Jr(Register rs) {
+  EmitR(0, rs, static_cast<Register>(0), static_cast<Register>(0), 0, 0x08);
+  Nop();
+}
+
+void MipsAssembler::Jalr(Register rs) {
+  EmitR(0, rs, static_cast<Register>(0), RA, 0, 0x09);
+  Nop();
+}
+
+void MipsAssembler::AddS(FRegister fd, FRegister fs, FRegister ft) {
+  EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
+}
+
+void MipsAssembler::SubS(FRegister fd, FRegister fs, FRegister ft) {
+  EmitFR(0x11, 0x10, ft, fs, fd, 0x1);
+}
+
+void MipsAssembler::MulS(FRegister fd, FRegister fs, FRegister ft) {
+  EmitFR(0x11, 0x10, ft, fs, fd, 0x2);
+}
+
+void MipsAssembler::DivS(FRegister fd, FRegister fs, FRegister ft) {
+  EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
+}
+
+void MipsAssembler::AddD(DRegister fd, DRegister fs, DRegister ft) {
+  EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
+         static_cast<FRegister>(fd), 0x0);
+}
+
+void MipsAssembler::SubD(DRegister fd, DRegister fs, DRegister ft) {
+  EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
+         static_cast<FRegister>(fd), 0x1);
+}
+
+void MipsAssembler::MulD(DRegister fd, DRegister fs, DRegister ft) {
+  EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
+         static_cast<FRegister>(fd), 0x2);
+}
+
+void MipsAssembler::DivD(DRegister fd, DRegister fs, DRegister ft) {
+  EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
+         static_cast<FRegister>(fd), 0x3);
+}
+
+void MipsAssembler::MovS(FRegister fd, FRegister fs) {
+  EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x6);
+}
+
+void MipsAssembler::MovD(DRegister fd, DRegister fs) {
+  EmitFR(0x11, 0x11, static_cast<FRegister>(0), static_cast<FRegister>(fs),
+         static_cast<FRegister>(fd), 0x6);
+}
+
+void MipsAssembler::Mfc1(Register rt, FRegister fs) {
+  EmitFR(0x11, 0x00, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+}
+
+void MipsAssembler::Mtc1(FRegister ft, Register rs) {
+  EmitFR(0x11, 0x04, ft, static_cast<FRegister>(rs), static_cast<FRegister>(0), 0x0);
+}
+
+void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) {
+  EmitI(0x31, rs, static_cast<Register>(ft), imm16);
+}
+
+void MipsAssembler::Ldc1(DRegister ft, Register rs, uint16_t imm16) {
+  EmitI(0x35, rs, static_cast<Register>(ft), imm16);
+}
+
+void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) {
+  EmitI(0x39, rs, static_cast<Register>(ft), imm16);
+}
+
+void MipsAssembler::Sdc1(DRegister ft, Register rs, uint16_t imm16) {
+  EmitI(0x3d, rs, static_cast<Register>(ft), imm16);
+}
+
+void MipsAssembler::Break() {
+  EmitR(0, static_cast<Register>(0), static_cast<Register>(0),
+        static_cast<Register>(0), 0, 0xD);
+}
+
+void MipsAssembler::Nop() {
+  EmitR(0x0, static_cast<Register>(0), static_cast<Register>(0), static_cast<Register>(0), 0, 0x0);
+}
+
+void MipsAssembler::Move(Register rt, Register rs) {
+  EmitI(0x8, rs, rt, 0);
+}
+
+void MipsAssembler::Clear(Register rt) {
+  EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rt, 0, 0x20);
+}
+
+void MipsAssembler::Not(Register rt, Register rs) {
+  EmitR(0, static_cast<Register>(0), rs, rt, 0, 0x27);
+}
+
+void MipsAssembler::Mul(Register rd, Register rs, Register rt) {
+  Mult(rs, rt);
+  Mflo(rd);
+}
+
+void MipsAssembler::Div(Register rd, Register rs, Register rt) {
+  Div(rs, rt);
+  Mflo(rd);
+}
+
+void MipsAssembler::Rem(Register rd, Register rs, Register rt) {
+  Div(rs, rt);
+  Mfhi(rd);
+}
+
+void MipsAssembler::AddConstant(Register rt, Register rs, int32_t value) {
+  Addi(rt, rs, value);
+}
+
+void MipsAssembler::LoadImmediate(Register rt, int32_t value) {
+  Addi(rt, ZERO, value);
+}
+
+void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset,
+                             size_t size) {
+  MipsManagedRegister dst = m_dst.AsMips();
+  if (dst.IsNoRegister()) {
+    CHECK_EQ(0u, size) << dst;
+  } else if (dst.IsCoreRegister()) {
+    CHECK_EQ(4u, size) << dst;
+    LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
+  } else if (dst.IsRegisterPair()) {
+    CHECK_EQ(8u, size) << dst;
+    LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
+    LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
+  } else if (dst.IsFRegister()) {
+    LoadSFromOffset(dst.AsFRegister(), src_register, src_offset);
+  } else {
+    CHECK(dst.IsDRegister()) << dst;
+    LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
+  }
+}
+
+void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base,
+                                   int32_t offset) {
+  switch (type) {
+    case kLoadSignedByte:
+      Lb(reg, base, offset);
+      break;
+    case kLoadUnsignedByte:
+      Lbu(reg, base, offset);
+      break;
+    case kLoadSignedHalfword:
+      Lh(reg, base, offset);
+      break;
+    case kLoadUnsignedHalfword:
+      Lhu(reg, base, offset);
+      break;
+    case kLoadWord:
+      Lw(reg, base, offset);
+      break;
+    case kLoadWordPair:
+      LOG(FATAL) << "UNREACHABLE";
+      break;
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+  }
+}
+
+void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) {
+  Lwc1(reg, base, offset);
+}
+
+void MipsAssembler::LoadDFromOffset(DRegister reg, Register base, int32_t offset) {
+  Ldc1(reg, base, offset);
+}
+
+void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register base,
+                                  int32_t offset) {
+  switch (type) {
+    case kStoreByte:
+      Sb(reg, base, offset);
+      break;
+    case kStoreHalfword:
+      Sh(reg, base, offset);
+      break;
+    case kStoreWord:
+      Sw(reg, base, offset);
+      break;
+    case kStoreWordPair:
+      LOG(FATAL) << "UNREACHABLE";
+      break;
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+  }
+}
+
+void MipsAssembler::StoreFToOffset(FRegister reg, Register base, int32_t offset) {
+  Swc1(reg, base, offset);
+}
+
+void MipsAssembler::StoreDToOffset(DRegister reg, Register base, int32_t offset) {
+  Sdc1(reg, base, offset);
+}
+
+void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                               const std::vector<ManagedRegister>& callee_save_regs,
+                               const std::vector<ManagedRegister>& entry_spills) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+
+  // Increase frame to required size.
+  IncreaseFrameSize(frame_size);
+
+  // Push callee saves and return address
+  int stack_offset = frame_size - kPointerSize;
+  StoreToOffset(kStoreWord, RA, SP, stack_offset);
+  for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
+    stack_offset -= kPointerSize;
+    Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister();
+    StoreToOffset(kStoreWord, reg, SP, stack_offset);
+  }
+
+  // Write out Method*.
+  StoreToOffset(kStoreWord, method_reg.AsMips().AsCoreRegister(), SP, 0);
+
+  // Write out entry spills.
+  for (size_t i = 0; i < entry_spills.size(); ++i) {
+    Register reg = entry_spills.at(i).AsMips().AsCoreRegister();
+    StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize));
+  }
+}
+
+void MipsAssembler::RemoveFrame(size_t frame_size,
+                                const std::vector<ManagedRegister>& callee_save_regs) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+
+  // Pop callee saves and return address
+  int stack_offset = frame_size - (callee_save_regs.size() * kPointerSize) - kPointerSize;
+  for (size_t i = 0; i < callee_save_regs.size(); ++i) {
+    Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister();
+    LoadFromOffset(kLoadWord, reg, SP, stack_offset);
+    stack_offset += kPointerSize;
+  }
+  LoadFromOffset(kLoadWord, RA, SP, stack_offset);
+
+  // Decrease frame to required size.
+  DecreaseFrameSize(frame_size);
+
+  // Then jump to the return address.
+  Jr(RA);
+}
+
+void MipsAssembler::IncreaseFrameSize(size_t adjust) {
+  CHECK_ALIGNED(adjust, kStackAlignment);
+  AddConstant(SP, SP, -adjust);
+}
+
+void MipsAssembler::DecreaseFrameSize(size_t adjust) {
+  CHECK_ALIGNED(adjust, kStackAlignment);
+  AddConstant(SP, SP, adjust);
+}
+
+void MipsAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
+  MipsManagedRegister src = msrc.AsMips();
+  if (src.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (src.IsCoreRegister()) {
+    CHECK_EQ(4u, size);
+    StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+  } else if (src.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
+    StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
+                  SP, dest.Int32Value() + 4);
+  } else if (src.IsFRegister()) {
+    StoreFToOffset(src.AsFRegister(), SP, dest.Int32Value());
+  } else {
+    CHECK(src.IsDRegister());
+    StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+  }
+}
+
+void MipsAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+  MipsManagedRegister src = msrc.AsMips();
+  CHECK(src.IsCoreRegister());
+  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void MipsAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+  MipsManagedRegister src = msrc.AsMips();
+  CHECK(src.IsCoreRegister());
+  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                          ManagedRegister mscratch) {
+  MipsManagedRegister scratch = mscratch.AsMips();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadImmediate(scratch.AsCoreRegister(), imm);
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void MipsAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                           ManagedRegister mscratch) {
+  MipsManagedRegister scratch = mscratch.AsMips();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadImmediate(scratch.AsCoreRegister(), imm);
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value());
+}
+
+void MipsAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                             FrameOffset fr_offs,
+                                             ManagedRegister mscratch) {
+  MipsManagedRegister scratch = mscratch.AsMips();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+                S1, thr_offs.Int32Value());
+}
+
+void MipsAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+  StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value());
+}
+
+void MipsAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
+                                  FrameOffset in_off, ManagedRegister mscratch) {
+  MipsManagedRegister src = msrc.AsMips();
+  MipsManagedRegister scratch = mscratch.AsMips();
+  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+}
+
+void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+  return EmitLoad(mdest, SP, src.Int32Value(), size);
+}
+
+void MipsAssembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) {
+  return EmitLoad(mdest, S1, src.Int32Value(), size);
+}
+
+void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+  MipsManagedRegister dest = mdest.AsMips();
+  CHECK(dest.IsCoreRegister());
+  LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value());
+}
+
+void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+                            MemberOffset offs) {
+  MipsManagedRegister dest = mdest.AsMips();
+  CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
+  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
+                 base.AsMips().AsCoreRegister(), offs.Int32Value());
+}
+
+void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+                               Offset offs) {
+  MipsManagedRegister dest = mdest.AsMips();
+  CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()) << dest;
+  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
+                 base.AsMips().AsCoreRegister(), offs.Int32Value());
+}
+
+void MipsAssembler::LoadRawPtrFromThread(ManagedRegister mdest,
+                                         ThreadOffset offs) {
+  MipsManagedRegister dest = mdest.AsMips();
+  CHECK(dest.IsCoreRegister());
+  LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value());
+}
+
+void MipsAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips";
+}
+
+void MipsAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips";
+}
+
+void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*size*/) {
+  MipsManagedRegister dest = mdest.AsMips();
+  MipsManagedRegister src = msrc.AsMips();
+  if (!dest.Equals(src)) {
+    if (dest.IsCoreRegister()) {
+      CHECK(src.IsCoreRegister()) << src;
+      Move(dest.AsCoreRegister(), src.AsCoreRegister());
+    } else if (dest.IsFRegister()) {
+      CHECK(src.IsFRegister()) << src;
+      MovS(dest.AsFRegister(), src.AsFRegister());
+    } else if (dest.IsDRegister()) {
+      CHECK(src.IsDRegister()) << src;
+      MovD(dest.AsDRegister(), src.AsDRegister());
+    } else {
+      CHECK(dest.IsRegisterPair()) << dest;
+      CHECK(src.IsRegisterPair()) << src;
+      // Ensure that the first move doesn't clobber the input of the second
+      if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) {
+        Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow());
+        Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh());
+      } else {
+        Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh());
+        Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow());
+      }
+    }
+  }
+}
+
+void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src,
+                            ManagedRegister mscratch) {
+  MipsManagedRegister scratch = mscratch.AsMips();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void MipsAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+                                         ThreadOffset thr_offs,
+                                         ManagedRegister mscratch) {
+  MipsManagedRegister scratch = mscratch.AsMips();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 S1, thr_offs.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+                SP, fr_offs.Int32Value());
+}
+
+void MipsAssembler::CopyRawPtrToThread(ThreadOffset thr_offs,
+                                       FrameOffset fr_offs,
+                                       ManagedRegister mscratch) {
+  MipsManagedRegister scratch = mscratch.AsMips();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 SP, fr_offs.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+                S1, thr_offs.Int32Value());
+}
+
+void MipsAssembler::Copy(FrameOffset dest, FrameOffset src,
+                         ManagedRegister mscratch, size_t size) {
+  MipsManagedRegister scratch = mscratch.AsMips();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  CHECK(size == 4 || size == 8) << size;
+  if (size == 4) {
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+  } else if (size == 8) {
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
+    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+  }
+}
+
+void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+                         ManagedRegister mscratch, size_t size) {
+  Register scratch = mscratch.AsMips().AsCoreRegister();
+  CHECK_EQ(size, 4u);
+  LoadFromOffset(kLoadWord, scratch, src_base.AsMips().AsCoreRegister(), src_offset.Int32Value());
+  StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
+}
+
+void MipsAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                         ManagedRegister mscratch, size_t size) {
+  Register scratch = mscratch.AsMips().AsCoreRegister();
+  CHECK_EQ(size, 4u);
+  LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
+  StoreToOffset(kStoreWord, scratch, dest_base.AsMips().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
+                         ManagedRegister /*mscratch*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL) << "no arm implementation";
+#if 0
+  Register scratch = mscratch.AsMips().AsCoreRegister();
+  CHECK_EQ(size, 4u);
+  movl(scratch, Address(ESP, src_base));
+  movl(scratch, Address(scratch, src_offset));
+  movl(Address(ESP, dest), scratch);
+#endif
+}
+
+void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset,
+                         ManagedRegister src, Offset src_offset,
+                         ManagedRegister mscratch, size_t size) {
+  CHECK_EQ(size, 4u);
+  Register scratch = mscratch.AsMips().AsCoreRegister();
+  LoadFromOffset(kLoadWord, scratch, src.AsMips().AsCoreRegister(), src_offset.Int32Value());
+  StoreToOffset(kStoreWord, scratch, dest.AsMips().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
+                         ManagedRegister /*mscratch*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL) << "no arm implementation";
+#if 0
+  Register scratch = mscratch.AsMips().AsCoreRegister();
+  CHECK_EQ(size, 4u);
+  CHECK_EQ(dest.Int32Value(), src.Int32Value());
+  movl(scratch, Address(ESP, src));
+  pushl(Address(scratch, src_offset));
+  popl(Address(scratch, dest_offset));
+#endif
+}
+
+void MipsAssembler::MemoryBarrier(ManagedRegister) {
+  UNIMPLEMENTED(FATAL) << "NEEDS TO BE IMPLEMENTED";
+#if 0
+#if ANDROID_SMP != 0
+  mfence();
+#endif
+#endif
+}
+
+void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
+                                    FrameOffset sirt_offset,
+                                    ManagedRegister min_reg, bool null_allowed) {
+  MipsManagedRegister out_reg = mout_reg.AsMips();
+  MipsManagedRegister in_reg = min_reg.AsMips();
+  CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
+  CHECK(out_reg.IsCoreRegister()) << out_reg;
+  if (null_allowed) {
+    Label null_arg;
+    // Null values get a SIRT entry value of 0.  Otherwise, the SIRT entry is
+    // the address in the SIRT holding the reference.
+    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+    if (in_reg.IsNoRegister()) {
+      LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+                     SP, sirt_offset.Int32Value());
+      in_reg = out_reg;
+    }
+    if (!out_reg.Equals(in_reg)) {
+      LoadImmediate(out_reg.AsCoreRegister(), 0);
+    }
+    EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
+    AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+    Bind(&null_arg, false);
+  } else {
+    AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+  }
+}
+
+void MipsAssembler::CreateSirtEntry(FrameOffset out_off,
+                                    FrameOffset sirt_offset,
+                                    ManagedRegister mscratch,
+                                    bool null_allowed) {
+  MipsManagedRegister scratch = mscratch.AsMips();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  if (null_allowed) {
+    Label null_arg;
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
+                   sirt_offset.Int32Value());
+    // Null values get a SIRT entry value of 0.  Otherwise, the sirt entry is
+    // the address in the SIRT holding the reference.
+    // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+    EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true);
+    AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+    Bind(&null_arg, false);
+  } else {
+    AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+  }
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
+}
+
+// Given a SIRT entry, load the associated reference.
+void MipsAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+                                          ManagedRegister min_reg) {
+  MipsManagedRegister out_reg = mout_reg.AsMips();
+  MipsManagedRegister in_reg = min_reg.AsMips();
+  CHECK(out_reg.IsCoreRegister()) << out_reg;
+  CHECK(in_reg.IsCoreRegister()) << in_reg;
+  Label null_arg;
+  if (!out_reg.Equals(in_reg)) {
+    LoadImmediate(out_reg.AsCoreRegister(), 0);
+  }
+  EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
+  LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+                 in_reg.AsCoreRegister(), 0);
+  Bind(&null_arg, false);
+}
+
+void MipsAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void MipsAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
+  MipsManagedRegister base = mbase.AsMips();
+  MipsManagedRegister scratch = mscratch.AsMips();
+  CHECK(base.IsCoreRegister()) << base;
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 base.AsCoreRegister(), offset.Int32Value());
+  Jalr(scratch.AsCoreRegister());
+  // TODO: place reference map on call
+}
+
+void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+  MipsManagedRegister scratch = mscratch.AsMips();
+  CHECK(scratch.IsCoreRegister()) << scratch;
+  // Call *(*(SP + base) + offset)
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 SP, base.Int32Value());
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 scratch.AsCoreRegister(), offset.Int32Value());
+  Jalr(scratch.AsCoreRegister());
+  // TODO: place reference map on call
+}
+
+void MipsAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*mscratch*/) {
+  UNIMPLEMENTED(FATAL) << "no arm implementation";
+#if 0
+  fs()->call(Address::Absolute(offset));
+#endif
+}
+
+void MipsAssembler::GetCurrentThread(ManagedRegister tr) {
+  Move(tr.AsMips().AsCoreRegister(), S1);
+}
+
+void MipsAssembler::GetCurrentThread(FrameOffset offset,
+                                     ManagedRegister /*mscratch*/) {
+  StoreToOffset(kStoreWord, S1, SP, offset.Int32Value());
+}
+
+void MipsAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
+  MipsManagedRegister scratch = mscratch.AsMips();
+  MipsExceptionSlowPath* slow = new MipsExceptionSlowPath(scratch, stack_adjust);
+  buffer_.EnqueueSlowPath(slow);
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 S1, Thread::ExceptionOffset().Int32Value());
+  EmitBranch(scratch.AsCoreRegister(), ZERO, slow->Entry(), false);
+}
+
+void MipsExceptionSlowPath::Emit(Assembler* sasm) {
+  MipsAssembler* sp_asm = down_cast<MipsAssembler*>(sasm);
+#define __ sp_asm->
+  __ Bind(&entry_, false);
+  if (stack_adjust_ != 0) {  // Fix up the frame.
+    __ DecreaseFrameSize(stack_adjust_);
+  }
+  // Pass exception object as argument
+  // Don't care about preserving A0 as this call won't return
+  __ Move(A0, scratch_.AsCoreRegister());
+  // Set up call to Thread::Current()->pDeliverException
+  __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pDeliverException));
+  __ Jr(T9);
+  // Call never returns
+  __ Break();
+#undef __
+}
+
+}  // namespace mips
+}  // namespace art
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
new file mode 100644
index 0000000..0f5f2fe
--- /dev/null
+++ b/compiler/utils/mips/assembler_mips.h
@@ -0,0 +1,507 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
+#define ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
+
+#include <vector>
+
+#include "base/macros.h"
+#include "constants_mips.h"
+#include "globals.h"
+#include "managed_register_mips.h"
+#include "utils/assembler.h"
+#include "offsets.h"
+#include "utils.h"
+
+namespace art {
+namespace mips {
+#if 0
+class Operand {
+ public:
+  uint8_t mod() const {
+    return (encoding_at(0) >> 6) & 3;
+  }
+
+  Register rm() const {
+    return static_cast<Register>(encoding_at(0) & 7);
+  }
+
+  ScaleFactor scale() const {
+    return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3);
+  }
+
+  Register index() const {
+    return static_cast<Register>((encoding_at(1) >> 3) & 7);
+  }
+
+  Register base() const {
+    return static_cast<Register>(encoding_at(1) & 7);
+  }
+
+  int8_t disp8() const {
+    CHECK_GE(length_, 2);
+    return static_cast<int8_t>(encoding_[length_ - 1]);
+  }
+
+  int32_t disp32() const {
+    CHECK_GE(length_, 5);
+    int32_t value;
+    memcpy(&value, &encoding_[length_ - 4], sizeof(value));
+    return value;
+  }
+
+  bool IsRegister(Register reg) const {
+    return ((encoding_[0] & 0xF8) == 0xC0)  // Addressing mode is register only.
+        && ((encoding_[0] & 0x07) == reg);  // Register codes match.
+  }
+
+ protected:
+  // Operand can be sub classed (e.g: Address).
+  Operand() : length_(0) { }
+
+  void SetModRM(int mod, Register rm) {
+    CHECK_EQ(mod & ~3, 0);
+    encoding_[0] = (mod << 6) | rm;
+    length_ = 1;
+  }
+
+  void SetSIB(ScaleFactor scale, Register index, Register base) {
+    CHECK_EQ(length_, 1);
+    CHECK_EQ(scale & ~3, 0);
+    encoding_[1] = (scale << 6) | (index << 3) | base;
+    length_ = 2;
+  }
+
+  void SetDisp8(int8_t disp) {
+    CHECK(length_ == 1 || length_ == 2);
+    encoding_[length_++] = static_cast<uint8_t>(disp);
+  }
+
+  void SetDisp32(int32_t disp) {
+    CHECK(length_ == 1 || length_ == 2);
+    int disp_size = sizeof(disp);
+    memmove(&encoding_[length_], &disp, disp_size);
+    length_ += disp_size;
+  }
+
+ private:
+  byte length_;
+  byte encoding_[6];
+  byte padding_;
+
+  explicit Operand(Register reg) { SetModRM(3, reg); }
+
+  // Get the operand encoding byte at the given index.
+  uint8_t encoding_at(int index) const {
+    CHECK_GE(index, 0);
+    CHECK_LT(index, length_);
+    return encoding_[index];
+  }
+
+  friend class MipsAssembler;
+
+  DISALLOW_COPY_AND_ASSIGN(Operand);
+};
+
+
+class Address : public Operand {
+ public:
+  Address(Register base, int32_t disp) {
+    Init(base, disp);
+  }
+
+  Address(Register base, Offset disp) {
+    Init(base, disp.Int32Value());
+  }
+
+  Address(Register base, FrameOffset disp) {
+    CHECK_EQ(base, ESP);
+    Init(ESP, disp.Int32Value());
+  }
+
+  Address(Register base, MemberOffset disp) {
+    Init(base, disp.Int32Value());
+  }
+
+  void Init(Register base, int32_t disp) {
+    if (disp == 0 && base != EBP) {
+      SetModRM(0, base);
+      if (base == ESP) SetSIB(TIMES_1, ESP, base);
+    } else if (disp >= -128 && disp <= 127) {
+      SetModRM(1, base);
+      if (base == ESP) SetSIB(TIMES_1, ESP, base);
+      SetDisp8(disp);
+    } else {
+      SetModRM(2, base);
+      if (base == ESP) SetSIB(TIMES_1, ESP, base);
+      SetDisp32(disp);
+    }
+  }
+
+
+  Address(Register index, ScaleFactor scale, int32_t disp) {
+    CHECK_NE(index, ESP);  // Illegal addressing mode.
+    SetModRM(0, ESP);
+    SetSIB(scale, index, EBP);
+    SetDisp32(disp);
+  }
+
+  Address(Register base, Register index, ScaleFactor scale, int32_t disp) {
+    CHECK_NE(index, ESP);  // Illegal addressing mode.
+    if (disp == 0 && base != EBP) {
+      SetModRM(0, ESP);
+      SetSIB(scale, index, base);
+    } else if (disp >= -128 && disp <= 127) {
+      SetModRM(1, ESP);
+      SetSIB(scale, index, base);
+      SetDisp8(disp);
+    } else {
+      SetModRM(2, ESP);
+      SetSIB(scale, index, base);
+      SetDisp32(disp);
+    }
+  }
+
+  static Address Absolute(uword addr) {
+    Address result;
+    result.SetModRM(0, EBP);
+    result.SetDisp32(addr);
+    return result;
+  }
+
+  static Address Absolute(ThreadOffset addr) {
+    return Absolute(addr.Int32Value());
+  }
+
+ private:
+  Address() {}
+
+  DISALLOW_COPY_AND_ASSIGN(Address);
+};
+
+#endif
+
+enum LoadOperandType {
+  kLoadSignedByte,
+  kLoadUnsignedByte,
+  kLoadSignedHalfword,
+  kLoadUnsignedHalfword,
+  kLoadWord,
+  kLoadWordPair,
+  kLoadSWord,
+  kLoadDWord
+};
+
+enum StoreOperandType {
+  kStoreByte,
+  kStoreHalfword,
+  kStoreWord,
+  kStoreWordPair,
+  kStoreSWord,
+  kStoreDWord
+};
+
+class MipsAssembler : public Assembler {
+ public:
+  MipsAssembler() {}
+  virtual ~MipsAssembler() {}
+
+  // Emit Machine Instructions.
+  void Add(Register rd, Register rs, Register rt);
+  void Addu(Register rd, Register rs, Register rt);
+  void Addi(Register rt, Register rs, uint16_t imm16);
+  void Addiu(Register rt, Register rs, uint16_t imm16);
+  void Sub(Register rd, Register rs, Register rt);
+  void Subu(Register rd, Register rs, Register rt);
+  void Mult(Register rs, Register rt);
+  void Multu(Register rs, Register rt);
+  void Div(Register rs, Register rt);
+  void Divu(Register rs, Register rt);
+
+  void And(Register rd, Register rs, Register rt);
+  void Andi(Register rt, Register rs, uint16_t imm16);
+  void Or(Register rd, Register rs, Register rt);
+  void Ori(Register rt, Register rs, uint16_t imm16);
+  void Xor(Register rd, Register rs, Register rt);
+  void Xori(Register rt, Register rs, uint16_t imm16);
+  void Nor(Register rd, Register rs, Register rt);
+
+  void Sll(Register rd, Register rs, int shamt);
+  void Srl(Register rd, Register rs, int shamt);
+  void Sra(Register rd, Register rs, int shamt);
+  void Sllv(Register rd, Register rs, Register rt);
+  void Srlv(Register rd, Register rs, Register rt);
+  void Srav(Register rd, Register rs, Register rt);
+
+  void Lb(Register rt, Register rs, uint16_t imm16);
+  void Lh(Register rt, Register rs, uint16_t imm16);
+  void Lw(Register rt, Register rs, uint16_t imm16);
+  void Lbu(Register rt, Register rs, uint16_t imm16);
+  void Lhu(Register rt, Register rs, uint16_t imm16);
+  void Lui(Register rt, uint16_t imm16);
+  void Mfhi(Register rd);
+  void Mflo(Register rd);
+
+  void Sb(Register rt, Register rs, uint16_t imm16);
+  void Sh(Register rt, Register rs, uint16_t imm16);
+  void Sw(Register rt, Register rs, uint16_t imm16);
+
+  void Slt(Register rd, Register rs, Register rt);
+  void Sltu(Register rd, Register rs, Register rt);
+  void Slti(Register rt, Register rs, uint16_t imm16);
+  void Sltiu(Register rt, Register rs, uint16_t imm16);
+
+  void Beq(Register rt, Register rs, uint16_t imm16);
+  void Bne(Register rt, Register rs, uint16_t imm16);
+  void J(uint32_t address);
+  void Jal(uint32_t address);
+  void Jr(Register rs);
+  void Jalr(Register rs);
+
+  void AddS(FRegister fd, FRegister fs, FRegister ft);
+  void SubS(FRegister fd, FRegister fs, FRegister ft);
+  void MulS(FRegister fd, FRegister fs, FRegister ft);
+  void DivS(FRegister fd, FRegister fs, FRegister ft);
+  void AddD(DRegister fd, DRegister fs, DRegister ft);
+  void SubD(DRegister fd, DRegister fs, DRegister ft);
+  void MulD(DRegister fd, DRegister fs, DRegister ft);
+  void DivD(DRegister fd, DRegister fs, DRegister ft);
+  void MovS(FRegister fd, FRegister fs);
+  void MovD(DRegister fd, DRegister fs);
+
+  void Mfc1(Register rt, FRegister fs);
+  void Mtc1(FRegister ft, Register rs);
+  void Lwc1(FRegister ft, Register rs, uint16_t imm16);
+  void Ldc1(DRegister ft, Register rs, uint16_t imm16);
+  void Swc1(FRegister ft, Register rs, uint16_t imm16);
+  void Sdc1(DRegister ft, Register rs, uint16_t imm16);
+
+  void Break();
+  void Nop();
+  void Move(Register rt, Register rs);
+  void Clear(Register rt);
+  void Not(Register rt, Register rs);
+  void Mul(Register rd, Register rs, Register rt);
+  void Div(Register rd, Register rs, Register rt);
+  void Rem(Register rd, Register rs, Register rt);
+
+  void AddConstant(Register rt, Register rs, int32_t value);
+  void LoadImmediate(Register rt, int32_t value);
+
+  void EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, size_t size);
+  void LoadFromOffset(LoadOperandType type, Register reg, Register base, int32_t offset);
+  void LoadSFromOffset(FRegister reg, Register base, int32_t offset);
+  void LoadDFromOffset(DRegister reg, Register base, int32_t offset);
+  void StoreToOffset(StoreOperandType type, Register reg, Register base, int32_t offset);
+  void StoreFToOffset(FRegister reg, Register base, int32_t offset);
+  void StoreDToOffset(DRegister reg, Register base, int32_t offset);
+
+#if 0
+  MipsAssembler* lock();
+
+  void mfence();
+
+  MipsAssembler* fs();
+
+  //
+  // Macros for High-level operations.
+  //
+
+  void AddImmediate(Register reg, const Immediate& imm);
+
+  void LoadDoubleConstant(XmmRegister dst, double value);
+
+  void DoubleNegate(XmmRegister d);
+  void FloatNegate(XmmRegister f);
+
+  void DoubleAbs(XmmRegister reg);
+
+  void LockCmpxchgl(const Address& address, Register reg) {
+    lock()->cmpxchgl(address, reg);
+  }
+
+  //
+  // Misc. functionality
+  //
+  int PreferredLoopAlignment() { return 16; }
+  void Align(int alignment, int offset);
+
+  // Debugging and bringup support.
+  void Stop(const char* message);
+#endif
+
+  // Emit data (e.g. encoded instruction or immediate) to the instruction stream.
+  void Emit(int32_t value);
+  void EmitBranch(Register rt, Register rs, Label* label, bool equal);
+  void EmitJump(Label* label, bool link);
+  void Bind(Label* label, bool is_jump);
+
+  //
+  // Overridden common assembler high-level functionality
+  //
+
+  // Emit code that will create an activation on the stack
+  virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                          const std::vector<ManagedRegister>& callee_save_regs,
+                          const std::vector<ManagedRegister>& entry_spills);
+
+  // Emit code that will remove an activation from the stack
+  virtual void RemoveFrame(size_t frame_size,
+                           const std::vector<ManagedRegister>& callee_save_regs);
+
+  virtual void IncreaseFrameSize(size_t adjust);
+  virtual void DecreaseFrameSize(size_t adjust);
+
+  // Store routines
+  virtual void Store(FrameOffset offs, ManagedRegister msrc, size_t size);
+  virtual void StoreRef(FrameOffset dest, ManagedRegister msrc);
+  virtual void StoreRawPtr(FrameOffset dest, ManagedRegister msrc);
+
+  virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                     ManagedRegister mscratch);
+
+  virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                      ManagedRegister mscratch);
+
+  virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                        FrameOffset fr_offs,
+                                        ManagedRegister mscratch);
+
+  virtual void StoreStackPointerToThread(ThreadOffset thr_offs);
+
+  virtual void StoreSpanning(FrameOffset dest, ManagedRegister msrc,
+                             FrameOffset in_off, ManagedRegister mscratch);
+
+  // Load routines
+  virtual void Load(ManagedRegister mdest, FrameOffset src, size_t size);
+
+  virtual void Load(ManagedRegister mdest, ThreadOffset src, size_t size);
+
+  virtual void LoadRef(ManagedRegister dest, FrameOffset  src);
+
+  virtual void LoadRef(ManagedRegister mdest, ManagedRegister base,
+                       MemberOffset offs);
+
+  virtual void LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+                          Offset offs);
+
+  virtual void LoadRawPtrFromThread(ManagedRegister mdest,
+                                    ThreadOffset offs);
+
+  // Copying routines
+  virtual void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size);
+
+  virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+                                    ManagedRegister mscratch);
+
+  virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+                                  ManagedRegister mscratch);
+
+  virtual void CopyRef(FrameOffset dest, FrameOffset src,
+                       ManagedRegister mscratch);
+
+  virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+                    ManagedRegister mscratch, size_t size);
+
+  virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                    ManagedRegister mscratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+                    ManagedRegister mscratch, size_t size);
+
+  virtual void Copy(ManagedRegister dest, Offset dest_offset,
+                    ManagedRegister src, Offset src_offset,
+                    ManagedRegister mscratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+                    ManagedRegister mscratch, size_t size);
+
+  virtual void MemoryBarrier(ManagedRegister);
+
+  // Sign extension
+  virtual void SignExtend(ManagedRegister mreg, size_t size);
+
+  // Zero extension
+  virtual void ZeroExtend(ManagedRegister mreg, size_t size);
+
+  // Exploit fast access in managed code to Thread::Current()
+  virtual void GetCurrentThread(ManagedRegister tr);
+  virtual void GetCurrentThread(FrameOffset dest_offset,
+                                ManagedRegister mscratch);
+
+  // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed. in_reg holds a possibly stale reference
+  // that can be used to avoid loading the SIRT entry to see if the value is
+  // NULL.
+  virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+                               ManagedRegister in_reg, bool null_allowed);
+
+  // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed.
+  virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+                               ManagedRegister mscratch, bool null_allowed);
+
+  // src holds a SIRT entry (Object**) load this into dst
+  virtual void LoadReferenceFromSirt(ManagedRegister dst,
+                                     ManagedRegister src);
+
+  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+  // know that src may not be null.
+  virtual void VerifyObject(ManagedRegister src, bool could_be_null);
+  virtual void VerifyObject(FrameOffset src, bool could_be_null);
+
+  // Call to address held at [base+offset]
+  virtual void Call(ManagedRegister base, Offset offset,
+                    ManagedRegister mscratch);
+  virtual void Call(FrameOffset base, Offset offset,
+                    ManagedRegister mscratch);
+  virtual void Call(ThreadOffset offset, ManagedRegister mscratch);
+
+  // Generate code to check if Thread::Current()->exception_ is non-null
+  // and branch to a ExceptionSlowPath if it is.
+  virtual void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust);
+
+ private:
+  void EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct);
+  void EmitI(int opcode, Register rs, Register rt, uint16_t imm);
+  void EmitJ(int opcode, int address);
+  void EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct);
+  void EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm);
+
+  int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump);
+  int DecodeBranchOffset(int32_t inst, bool is_jump);
+
+  DISALLOW_COPY_AND_ASSIGN(MipsAssembler);
+};
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class MipsExceptionSlowPath : public SlowPath {
+ public:
+  explicit MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust)
+      : scratch_(scratch), stack_adjust_(stack_adjust) {}
+  virtual void Emit(Assembler *sp_asm);
+ private:
+  const MipsManagedRegister scratch_;
+  const size_t stack_adjust_;
+};
+
+}  // namespace mips
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
diff --git a/compiler/utils/mips/constants_mips.h b/compiler/utils/mips/constants_mips.h
new file mode 100644
index 0000000..44ed5cc
--- /dev/null
+++ b/compiler/utils/mips/constants_mips.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_
+#define ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_
+
+#include <iosfwd>
+
+#include "arch/mips/registers_mips.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "globals.h"
+
+namespace art {
+namespace mips {
+
+// Values for double-precision floating point registers.
+enum DRegister {
+  D0  =  0,
+  D1  =  1,
+  D2  =  2,
+  D3  =  3,
+  D4  =  4,
+  D5  =  5,
+  D6  =  6,
+  D7  =  7,
+  D8  =  8,
+  D9  =  9,
+  D10 = 10,
+  D11 = 11,
+  D12 = 12,
+  D13 = 13,
+  D14 = 14,
+  D15 = 15,
+  kNumberOfDRegisters = 16,
+  kNumberOfOverlappingDRegisters = 16,
+  kNoDRegister = -1,
+};
+std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
+
+// Constants used for the decoding or encoding of the individual fields of instructions.
+enum InstructionFields {
+  kOpcodeShift = 26,
+  kOpcodeBits = 6,
+  kRsShift = 21,
+  kRsBits = 5,
+  kRtShift = 16,
+  kRtBits = 5,
+  kRdShift = 11,
+  kRdBits = 5,
+  kShamtShift = 6,
+  kShamtBits = 5,
+  kFunctShift = 0,
+  kFunctBits = 6,
+
+  kFmtShift = 21,
+  kFmtBits = 5,
+  kFtShift = 16,
+  kFtBits = 5,
+  kFsShift = 11,
+  kFsBits = 5,
+  kFdShift = 6,
+  kFdBits = 5,
+
+  kBranchOffsetMask = 0x0000ffff,
+  kJumpOffsetMask = 0x03ffffff,
+};
+
+enum ScaleFactor {
+  TIMES_1 = 0,
+  TIMES_2 = 1,
+  TIMES_4 = 2,
+  TIMES_8 = 3
+};
+
+class Instr {
+ public:
+  static const uint32_t kBreakPointInstruction = 0x0000000D;
+
+  bool IsBreakPoint() {
+    return ((*reinterpret_cast<const uint32_t*>(this)) & 0xFC0000CF) == kBreakPointInstruction;
+  }
+
+  // Instructions are read out of a code stream. The only way to get a
+  // reference to an instruction is to convert a pointer. There is no way
+  // to allocate or create instances of class Instr.
+  // Use the At(pc) function to create references to Instr.
+  static Instr* At(uintptr_t pc) { return reinterpret_cast<Instr*>(pc); }
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+};
+
+}  // namespace mips
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_
diff --git a/compiler/utils/mips/managed_register_mips.cc b/compiler/utils/mips/managed_register_mips.cc
new file mode 100644
index 0000000..195dafb
--- /dev/null
+++ b/compiler/utils/mips/managed_register_mips.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_mips.h"
+
+#include "globals.h"
+
+namespace art {
+namespace mips {
+
+// These core registers are never available for allocation.
+static const Register kReservedCoreRegistersArray[] = { S0, S1 };
+
+// We need all registers for caching.
+static const int kNumberOfAvailableCoreRegisters = (S7 - T0) + 1;
+static const int kNumberOfAvailableFRegisters = kNumberOfFRegisters;
+static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
+static const int kNumberOfAvailableOverlappingDRegisters =
+    kNumberOfOverlappingDRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+bool MipsManagedRegister::Overlaps(const MipsManagedRegister& other) const {
+  if (IsNoRegister() || other.IsNoRegister()) return false;
+  CHECK(IsValidManagedRegister());
+  CHECK(other.IsValidManagedRegister());
+  if (Equals(other)) return true;
+  if (IsRegisterPair()) {
+    Register low = AsRegisterPairLow();
+    Register high = AsRegisterPairHigh();
+    return MipsManagedRegister::FromCoreRegister(low).Overlaps(other) ||
+        MipsManagedRegister::FromCoreRegister(high).Overlaps(other);
+  }
+  if (IsOverlappingDRegister()) {
+    if (other.IsDRegister()) return Equals(other);
+    if (other.IsFRegister()) {
+      FRegister low = AsOverlappingDRegisterLow();
+      FRegister high = AsOverlappingDRegisterHigh();
+      FRegister other_freg = other.AsFRegister();
+      return (low == other_freg) || (high == other_freg);
+    }
+    return false;
+  }
+  if (other.IsRegisterPair() || other.IsOverlappingDRegister()) {
+    return other.Overlaps(*this);
+  }
+  return false;
+}
+
+
+int MipsManagedRegister::AllocIdLow() const {
+  CHECK(IsOverlappingDRegister() || IsRegisterPair());
+  const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfFRegIds);
+  int low;
+  if (r < kNumberOfOverlappingDRegIds) {
+    CHECK(IsOverlappingDRegister());
+    low = (r * 2) + kNumberOfCoreRegIds;  // Return an FRegister.
+  } else {
+    CHECK(IsRegisterPair());
+    low = (r - kNumberOfDRegIds) * 2 + 2;  // Return a Register.
+    if (low >= 24) {
+      // we got a pair higher than S6_S7, must be the dalvik special case
+      low = 5;
+    }
+  }
+  return low;
+}
+
+
+int MipsManagedRegister::AllocIdHigh() const {
+  return AllocIdLow() + 1;
+}
+
+
+void MipsManagedRegister::Print(std::ostream& os) const {
+  if (!IsValidManagedRegister()) {
+    os << "No Register";
+  } else if (IsCoreRegister()) {
+    os << "Core: " << static_cast<int>(AsCoreRegister());
+  } else if (IsRegisterPair()) {
+    os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh();
+  } else if (IsFRegister()) {
+    os << "FRegister: " << static_cast<int>(AsFRegister());
+  } else if (IsDRegister()) {
+    os << "DRegister: " << static_cast<int>(AsDRegister());
+  } else {
+    os << "??: " << RegId();
+  }
+}
+
+std::ostream& operator<<(std::ostream& os, const MipsManagedRegister& reg) {
+  reg.Print(os);
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) {
+  os << MipsManagedRegister::FromRegisterPair(reg);
+  return os;
+}
+
+}  // namespace mips
+}  // namespace art
diff --git a/compiler/utils/mips/managed_register_mips.h b/compiler/utils/mips/managed_register_mips.h
new file mode 100644
index 0000000..dd55cc4
--- /dev/null
+++ b/compiler/utils/mips/managed_register_mips.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
+#define ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
+
+#include "constants_mips.h"
+#include "utils/managed_register.h"
+
+namespace art {
+namespace mips {
+
+// Values for register pairs.
+enum RegisterPair {
+  V0_V1 = 0,
+  A0_A1 = 1,
+  A2_A3 = 2,
+  T0_T1 = 3,
+  T2_T3 = 4,
+  T4_T5 = 5,
+  T6_T7 = 6,
+  S0_S1 = 7,
+  S2_S3 = 8,
+  S4_S5 = 9,
+  S6_S7 = 10,
+  A1_A2 = 11,  // Dalvik style passing
+  kNumberOfRegisterPairs = 12,
+  kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCoreRegIds = kNumberOfCoreRegisters;
+const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters;
+
+const int kNumberOfFRegIds = kNumberOfFRegisters;
+const int kNumberOfFAllocIds = kNumberOfFRegisters;
+
+const int kNumberOfDRegIds = kNumberOfDRegisters;
+const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters;
+const int kNumberOfDAllocIds = kNumberOfDRegisters;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfFRegIds +
+    kNumberOfDRegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds =
+    kNumberOfCoreAllocIds + kNumberOfFAllocIds + kNumberOfDAllocIds;
+
+// Register ids map:
+//   [0..R[  core registers (enum Register)
+//   [R..F[  single precision FP registers (enum FRegister)
+//   [F..D[  double precision FP registers (enum DRegister)
+//   [D..P[  core register pairs (enum RegisterPair)
+// where
+//   R = kNumberOfCoreRegIds
+//   F = R + kNumberOfFRegIds
+//   D = F + kNumberOfDRegIds
+//   P = D + kNumberOfRegisterPairs
+
+// Allocation ids map:
+//   [0..R[  core registers (enum Register)
+//   [R..F[  single precision FP registers (enum FRegister)
+// where
+//   R = kNumberOfCoreRegIds
+//   F = R + kNumberOfFRegIds
+
+
+// An instance of class 'ManagedRegister' represents a single core register (enum
+// Register), a single precision FP register (enum FRegister), a double precision
+// FP register (enum DRegister), or a pair of core registers (enum RegisterPair).
+// 'ManagedRegister::NoRegister()' provides an invalid register.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class MipsManagedRegister : public ManagedRegister {
+ public:
+  Register AsCoreRegister() const {
+    CHECK(IsCoreRegister());
+    return static_cast<Register>(id_);
+  }
+
+  FRegister AsFRegister() const {
+    CHECK(IsFRegister());
+    return static_cast<FRegister>(id_ - kNumberOfCoreRegIds);
+  }
+
+  DRegister AsDRegister() const {
+    CHECK(IsDRegister());
+    return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfFRegIds);
+  }
+
+  FRegister AsOverlappingDRegisterLow() const {
+    CHECK(IsOverlappingDRegister());
+    DRegister d_reg = AsDRegister();
+    return static_cast<FRegister>(d_reg * 2);
+  }
+
+  FRegister AsOverlappingDRegisterHigh() const {
+    CHECK(IsOverlappingDRegister());
+    DRegister d_reg = AsDRegister();
+    return static_cast<FRegister>(d_reg * 2 + 1);
+  }
+
+  Register AsRegisterPairLow() const {
+    CHECK(IsRegisterPair());
+    // Appropriate mapping of register ids allows to use AllocIdLow().
+    return FromRegId(AllocIdLow()).AsCoreRegister();
+  }
+
+  Register AsRegisterPairHigh() const {
+    CHECK(IsRegisterPair());
+    // Appropriate mapping of register ids allows to use AllocIdHigh().
+    return FromRegId(AllocIdHigh()).AsCoreRegister();
+  }
+
+  bool IsCoreRegister() const {
+    CHECK(IsValidManagedRegister());
+    return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
+  }
+
+  bool IsFRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - kNumberOfCoreRegIds;
+    return (0 <= test) && (test < kNumberOfFRegIds);
+  }
+
+  bool IsDRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds);
+    return (0 <= test) && (test < kNumberOfDRegIds);
+  }
+
+  // Returns true if this DRegister overlaps FRegisters.
+  bool IsOverlappingDRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds);
+    return (0 <= test) && (test < kNumberOfOverlappingDRegIds);
+  }
+
+  bool IsRegisterPair() const {
+    CHECK(IsValidManagedRegister());
+    const int test =
+        id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds + kNumberOfDRegIds);
+    return (0 <= test) && (test < kNumberOfPairRegIds);
+  }
+
+  void Print(std::ostream& os) const;
+
+  // Returns true if the two managed-registers ('this' and 'other') overlap.
+  // Either managed-register may be the NoRegister. If both are the NoRegister
+  // then false is returned.
+  bool Overlaps(const MipsManagedRegister& other) const;
+
+  static MipsManagedRegister FromCoreRegister(Register r) {
+    CHECK_NE(r, kNoRegister);
+    return FromRegId(r);
+  }
+
+  static MipsManagedRegister FromFRegister(FRegister r) {
+    CHECK_NE(r, kNoFRegister);
+    return FromRegId(r + kNumberOfCoreRegIds);
+  }
+
+  static MipsManagedRegister FromDRegister(DRegister r) {
+    CHECK_NE(r, kNoDRegister);
+    return FromRegId(r + kNumberOfCoreRegIds + kNumberOfFRegIds);
+  }
+
+  static MipsManagedRegister FromRegisterPair(RegisterPair r) {
+    CHECK_NE(r, kNoRegisterPair);
+    return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfFRegIds + kNumberOfDRegIds));
+  }
+
+ private:
+  bool IsValidManagedRegister() const {
+    return (0 <= id_) && (id_ < kNumberOfRegIds);
+  }
+
+  int RegId() const {
+    CHECK(!IsNoRegister());
+    return id_;
+  }
+
+  int AllocId() const {
+    CHECK(IsValidManagedRegister() && !IsOverlappingDRegister() && !IsRegisterPair());
+    CHECK_LT(id_, kNumberOfAllocIds);
+    return id_;
+  }
+
+  int AllocIdLow() const;
+  int AllocIdHigh() const;
+
+  friend class ManagedRegister;
+
+  explicit MipsManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+  static MipsManagedRegister FromRegId(int reg_id) {
+    MipsManagedRegister reg(reg_id);
+    CHECK(reg.IsValidManagedRegister());
+    return reg;
+  }
+};
+
+std::ostream& operator<<(std::ostream& os, const MipsManagedRegister& reg);
+
+}  // namespace mips
+
+inline mips::MipsManagedRegister ManagedRegister::AsMips() const {
+  mips::MipsManagedRegister reg(id_);
+  CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+  return reg;
+}
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
new file mode 100644
index 0000000..89bfeb5
--- /dev/null
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -0,0 +1,1847 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_x86.h"
+
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "memory_region.h"
+#include "thread.h"
+
+namespace art {
+namespace x86 {
+
+class DirectCallRelocation : public AssemblerFixup {
+ public:
+  void Process(const MemoryRegion& region, int position) {
+    // Direct calls are relative to the following instruction on x86.
+    int32_t pointer = region.Load<int32_t>(position);
+    int32_t start = reinterpret_cast<int32_t>(region.start());
+    int32_t delta = start + position + sizeof(int32_t);
+    region.Store<int32_t>(position, pointer - delta);
+  }
+};
+
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg) {
+  return os << "XMM" << static_cast<int>(reg);
+}
+
+std::ostream& operator<<(std::ostream& os, const X87Register& reg) {
+  return os << "ST" << static_cast<int>(reg);
+}
+
+void X86Assembler::call(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitRegisterOperand(2, reg);
+}
+
+
+void X86Assembler::call(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitOperand(2, address);
+}
+
+
+void X86Assembler::call(Label* label) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xE8);
+  static const int kSize = 5;
+  EmitLabel(label, kSize);
+}
+
+
+void X86Assembler::pushl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x50 + reg);
+}
+
+
+void X86Assembler::pushl(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitOperand(6, address);
+}
+
+
+void X86Assembler::pushl(const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  if (imm.is_int8()) {
+    EmitUint8(0x6A);
+    EmitUint8(imm.value() & 0xFF);
+  } else {
+    EmitUint8(0x68);
+    EmitImmediate(imm);
+  }
+}
+
+
+void X86Assembler::popl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x58 + reg);
+}
+
+
+void X86Assembler::popl(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x8F);
+  EmitOperand(0, address);
+}
+
+
+void X86Assembler::movl(Register dst, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xB8 + dst);
+  EmitImmediate(imm);
+}
+
+
+void X86Assembler::movl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x89);
+  EmitRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::movl(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x8B);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movl(const Address& dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x89);
+  EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movl(const Address& dst, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC7);
+  EmitOperand(0, dst);
+  EmitImmediate(imm);
+}
+
+void X86Assembler::movl(const Address& dst, Label* lbl) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC7);
+  EmitOperand(0, dst);
+  EmitLabel(lbl, dst.length_ + 5);
+}
+
+void X86Assembler::movzxb(Register dst, ByteRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xB6);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movzxb(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xB6);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movsxb(Register dst, ByteRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xBE);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movsxb(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xBE);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movb(Register /*dst*/, const Address& /*src*/) {
+  LOG(FATAL) << "Use movzxb or movsxb instead.";
+}
+
+
+void X86Assembler::movb(const Address& dst, ByteRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x88);
+  EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movb(const Address& dst, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC6);
+  EmitOperand(EAX, dst);
+  CHECK(imm.is_int8());
+  EmitUint8(imm.value() & 0xFF);
+}
+
+
+void X86Assembler::movzxw(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xB7);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movzxw(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xB7);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movsxw(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xBF);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movsxw(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xBF);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movw(Register /*dst*/, const Address& /*src*/) {
+  LOG(FATAL) << "Use movzxw or movsxw instead.";
+}
+
+
+void X86Assembler::movw(const Address& dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitOperandSizeOverride();
+  EmitUint8(0x89);
+  EmitOperand(src, dst);
+}
+
+
+void X86Assembler::leal(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x8D);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::cmovl(Condition condition, Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0x40 + condition);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::setb(Condition condition, Register dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0x90 + condition);
+  EmitOperand(0, Operand(dst));
+}
+
+
+void X86Assembler::movss(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x10);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movss(const Address& dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x11);
+  EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x11);
+  EmitXmmRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::movd(XmmRegister dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x6E);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::movd(Register dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x7E);
+  EmitOperand(src, Operand(dst));
+}
+
+
+void X86Assembler::addss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x58);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::addss(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x58);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::subss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x5C);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::subss(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x5C);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::mulss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x59);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::mulss(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x59);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::divss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x5E);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::divss(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x5E);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::flds(const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitOperand(0, src);
+}
+
+
+void X86Assembler::fstps(const Address& dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitOperand(3, dst);
+}
+
+
+void X86Assembler::movsd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x10);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movsd(const Address& dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x11);
+  EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x11);
+  EmitXmmRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::addsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x58);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::addsd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x58);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::subsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x5C);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::subsd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x5C);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::mulsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x59);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::mulsd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x59);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::divsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x5E);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::divsd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x5E);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::cvtsi2ss(XmmRegister dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x2A);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::cvtsi2sd(XmmRegister dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x2A);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::cvtss2si(Register dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x2D);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtss2sd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x5A);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtsd2si(Register dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x2D);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvttss2si(Register dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x2C);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvttsd2si(Register dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x2C);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x5A);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0xE6);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::comiss(XmmRegister a, XmmRegister b) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0x2F);
+  EmitXmmRegisterOperand(a, b);
+}
+
+
+void X86Assembler::comisd(XmmRegister a, XmmRegister b) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x2F);
+  EmitXmmRegisterOperand(a, b);
+}
+
+
+void X86Assembler::sqrtsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x51);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::sqrtss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x51);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::xorpd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x57);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::xorpd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x57);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::xorps(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0x57);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::xorps(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0x57);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::andpd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x54);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::fldl(const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDD);
+  EmitOperand(0, src);
+}
+
+
+void X86Assembler::fstpl(const Address& dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDD);
+  EmitOperand(3, dst);
+}
+
+
+void X86Assembler::fnstcw(const Address& dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitOperand(7, dst);
+}
+
+
+void X86Assembler::fldcw(const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitOperand(5, src);
+}
+
+
+void X86Assembler::fistpl(const Address& dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDF);
+  EmitOperand(7, dst);
+}
+
+
+void X86Assembler::fistps(const Address& dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDB);
+  EmitOperand(3, dst);
+}
+
+
+void X86Assembler::fildl(const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDF);
+  EmitOperand(5, src);
+}
+
+
+void X86Assembler::fincstp() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitUint8(0xF7);
+}
+
+
+void X86Assembler::ffree(const Immediate& index) {
+  CHECK_LT(index.value(), 7);
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDD);
+  EmitUint8(0xC0 + index.value());
+}
+
+
+void X86Assembler::fsin() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitUint8(0xFE);
+}
+
+
+void X86Assembler::fcos() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitUint8(0xFF);
+}
+
+
+void X86Assembler::fptan() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitUint8(0xF2);
+}
+
+
+void X86Assembler::xchgl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x87);
+  EmitRegisterOperand(dst, src);
+}
+
+void X86Assembler::xchgl(Register reg, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x87);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cmpl(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(7, Operand(reg), imm);
+}
+
+
+void X86Assembler::cmpl(Register reg0, Register reg1) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x3B);
+  EmitOperand(reg0, Operand(reg1));
+}
+
+
+void X86Assembler::cmpl(Register reg, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x3B);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::addl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x03);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::addl(Register reg, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x03);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cmpl(const Address& address, Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x39);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cmpl(const Address& address, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(7, address, imm);
+}
+
+
+void X86Assembler::testl(Register reg1, Register reg2) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x85);
+  EmitRegisterOperand(reg1, reg2);
+}
+
+
+void X86Assembler::testl(Register reg, const Immediate& immediate) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  // For registers that have a byte variant (EAX, EBX, ECX, and EDX)
+  // we only test the byte register to keep the encoding short.
+  if (immediate.is_uint8() && reg < 4) {
+    // Use zero-extended 8-bit immediate.
+    if (reg == EAX) {
+      EmitUint8(0xA8);
+    } else {
+      EmitUint8(0xF6);
+      EmitUint8(0xC0 + reg);
+    }
+    EmitUint8(immediate.value() & 0xFF);
+  } else if (reg == EAX) {
+    // Use short form if the destination is EAX.
+    EmitUint8(0xA9);
+    EmitImmediate(immediate);
+  } else {
+    EmitUint8(0xF7);
+    EmitOperand(0, Operand(reg));
+    EmitImmediate(immediate);
+  }
+}
+
+
+void X86Assembler::andl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x23);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::andl(Register dst, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(4, Operand(dst), imm);
+}
+
+
+void X86Assembler::orl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0B);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::orl(Register dst, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(1, Operand(dst), imm);
+}
+
+
+void X86Assembler::xorl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x33);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::addl(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(0, Operand(reg), imm);
+}
+
+
+void X86Assembler::addl(const Address& address, Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x01);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::addl(const Address& address, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(0, address, imm);
+}
+
+
+void X86Assembler::adcl(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(2, Operand(reg), imm);
+}
+
+
+void X86Assembler::adcl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x13);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::adcl(Register dst, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x13);
+  EmitOperand(dst, address);
+}
+
+
+void X86Assembler::subl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x2B);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::subl(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(5, Operand(reg), imm);
+}
+
+
+void X86Assembler::subl(Register reg, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x2B);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cdq() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x99);
+}
+
+
+void X86Assembler::idivl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitUint8(0xF8 | reg);
+}
+
+
+void X86Assembler::imull(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xAF);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::imull(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x69);
+  EmitOperand(reg, Operand(reg));
+  EmitImmediate(imm);
+}
+
+
+void X86Assembler::imull(Register reg, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xAF);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::imull(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitOperand(5, Operand(reg));
+}
+
+
+void X86Assembler::imull(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitOperand(5, address);
+}
+
+
+void X86Assembler::mull(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitOperand(4, Operand(reg));
+}
+
+
+void X86Assembler::mull(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitOperand(4, address);
+}
+
+
+void X86Assembler::sbbl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x1B);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::sbbl(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(3, Operand(reg), imm);
+}
+
+
+void X86Assembler::sbbl(Register dst, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x1B);
+  EmitOperand(dst, address);
+}
+
+
+void X86Assembler::incl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x40 + reg);
+}
+
+
+void X86Assembler::incl(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitOperand(0, address);
+}
+
+
+void X86Assembler::decl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x48 + reg);
+}
+
+
+void X86Assembler::decl(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitOperand(1, address);
+}
+
+
+void X86Assembler::shll(Register reg, const Immediate& imm) {
+  EmitGenericShift(4, reg, imm);
+}
+
+
+void X86Assembler::shll(Register operand, Register shifter) {
+  EmitGenericShift(4, operand, shifter);
+}
+
+
+void X86Assembler::shrl(Register reg, const Immediate& imm) {
+  EmitGenericShift(5, reg, imm);
+}
+
+
+void X86Assembler::shrl(Register operand, Register shifter) {
+  EmitGenericShift(5, operand, shifter);
+}
+
+
+void X86Assembler::sarl(Register reg, const Immediate& imm) {
+  EmitGenericShift(7, reg, imm);
+}
+
+
+void X86Assembler::sarl(Register operand, Register shifter) {
+  EmitGenericShift(7, operand, shifter);
+}
+
+
+void X86Assembler::shld(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xA5);
+  EmitRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::negl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitOperand(3, Operand(reg));
+}
+
+
+void X86Assembler::notl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitUint8(0xD0 | reg);
+}
+
+
+void X86Assembler::enter(const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC8);
+  CHECK(imm.is_uint16());
+  EmitUint8(imm.value() & 0xFF);
+  EmitUint8((imm.value() >> 8) & 0xFF);
+  EmitUint8(0x00);
+}
+
+
+void X86Assembler::leave() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC9);
+}
+
+
+void X86Assembler::ret() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC3);
+}
+
+
+void X86Assembler::ret(const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC2);
+  CHECK(imm.is_uint16());
+  EmitUint8(imm.value() & 0xFF);
+  EmitUint8((imm.value() >> 8) & 0xFF);
+}
+
+
+
+void X86Assembler::nop() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x90);
+}
+
+
+void X86Assembler::int3() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xCC);
+}
+
+
+void X86Assembler::hlt() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF4);
+}
+
+
+void X86Assembler::j(Condition condition, Label* label) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  if (label->IsBound()) {
+    static const int kShortSize = 2;
+    static const int kLongSize = 6;
+    int offset = label->Position() - buffer_.Size();
+    CHECK_LE(offset, 0);
+    if (IsInt(8, offset - kShortSize)) {
+      EmitUint8(0x70 + condition);
+      EmitUint8((offset - kShortSize) & 0xFF);
+    } else {
+      EmitUint8(0x0F);
+      EmitUint8(0x80 + condition);
+      EmitInt32(offset - kLongSize);
+    }
+  } else {
+    EmitUint8(0x0F);
+    EmitUint8(0x80 + condition);
+    EmitLabelLink(label);
+  }
+}
+
+
+void X86Assembler::jmp(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitRegisterOperand(4, reg);
+}
+
+void X86Assembler::jmp(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitOperand(4, address);
+}
+
+void X86Assembler::jmp(Label* label) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  if (label->IsBound()) {
+    static const int kShortSize = 2;
+    static const int kLongSize = 5;
+    int offset = label->Position() - buffer_.Size();
+    CHECK_LE(offset, 0);
+    if (IsInt(8, offset - kShortSize)) {
+      EmitUint8(0xEB);
+      EmitUint8((offset - kShortSize) & 0xFF);
+    } else {
+      EmitUint8(0xE9);
+      EmitInt32(offset - kLongSize);
+    }
+  } else {
+    EmitUint8(0xE9);
+    EmitLabelLink(label);
+  }
+}
+
+
+X86Assembler* X86Assembler::lock() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF0);
+  return this;
+}
+
+
+void X86Assembler::cmpxchgl(const Address& address, Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xB1);
+  EmitOperand(reg, address);
+}
+
+void X86Assembler::mfence() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xAE);
+  EmitUint8(0xF0);
+}
+
+X86Assembler* X86Assembler::fs() {
+  // TODO: fs is a prefix and not an instruction
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x64);
+  return this;
+}
+
+void X86Assembler::AddImmediate(Register reg, const Immediate& imm) {
+  int value = imm.value();
+  if (value > 0) {
+    if (value == 1) {
+      incl(reg);
+    } else if (value != 0) {
+      addl(reg, imm);
+    }
+  } else if (value < 0) {
+    value = -value;
+    if (value == 1) {
+      decl(reg);
+    } else if (value != 0) {
+      subl(reg, Immediate(value));
+    }
+  }
+}
+
+
+void X86Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
+  // TODO: Need to have a code constants table.
+  int64_t constant = bit_cast<int64_t, double>(value);
+  pushl(Immediate(High32Bits(constant)));
+  pushl(Immediate(Low32Bits(constant)));
+  movsd(dst, Address(ESP, 0));
+  addl(ESP, Immediate(2 * kWordSize));
+}
+
+
+void X86Assembler::FloatNegate(XmmRegister f) {
+  static const struct {
+    uint32_t a;
+    uint32_t b;
+    uint32_t c;
+    uint32_t d;
+  } float_negate_constant __attribute__((aligned(16))) =
+      { 0x80000000, 0x00000000, 0x80000000, 0x00000000 };
+  xorps(f, Address::Absolute(reinterpret_cast<uword>(&float_negate_constant)));
+}
+
+
+void X86Assembler::DoubleNegate(XmmRegister d) {
+  static const struct {
+    uint64_t a;
+    uint64_t b;
+  } double_negate_constant __attribute__((aligned(16))) =
+      {0x8000000000000000LL, 0x8000000000000000LL};
+  xorpd(d, Address::Absolute(reinterpret_cast<uword>(&double_negate_constant)));
+}
+
+
+void X86Assembler::DoubleAbs(XmmRegister reg) {
+  static const struct {
+    uint64_t a;
+    uint64_t b;
+  } double_abs_constant __attribute__((aligned(16))) =
+      {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL};
+  andpd(reg, Address::Absolute(reinterpret_cast<uword>(&double_abs_constant)));
+}
+
+
+void X86Assembler::Align(int alignment, int offset) {
+  CHECK(IsPowerOfTwo(alignment));
+  // Emit nop instruction until the real position is aligned.
+  while (((offset + buffer_.GetPosition()) & (alignment-1)) != 0) {
+    nop();
+  }
+}
+
+
+void X86Assembler::Bind(Label* label) {
+  int bound = buffer_.Size();
+  CHECK(!label->IsBound());  // Labels can only be bound once.
+  while (label->IsLinked()) {
+    int position = label->LinkPosition();
+    int next = buffer_.Load<int32_t>(position);
+    buffer_.Store<int32_t>(position, bound - (position + 4));
+    label->position_ = next;
+  }
+  label->BindTo(bound);
+}
+
+
+void X86Assembler::Stop(const char* message) {
+  // Emit the message address as immediate operand in the test rax instruction,
+  // followed by the int3 instruction.
+  // Execution can be resumed with the 'cont' command in gdb.
+  testl(EAX, Immediate(reinterpret_cast<int32_t>(message)));
+  int3();
+}
+
+
+void X86Assembler::EmitOperand(int reg_or_opcode, const Operand& operand) {
+  CHECK_GE(reg_or_opcode, 0);
+  CHECK_LT(reg_or_opcode, 8);
+  const int length = operand.length_;
+  CHECK_GT(length, 0);
+  // Emit the ModRM byte updated with the given reg value.
+  CHECK_EQ(operand.encoding_[0] & 0x38, 0);
+  EmitUint8(operand.encoding_[0] + (reg_or_opcode << 3));
+  // Emit the rest of the encoded operand.
+  for (int i = 1; i < length; i++) {
+    EmitUint8(operand.encoding_[i]);
+  }
+}
+
+
+void X86Assembler::EmitImmediate(const Immediate& imm) {
+  EmitInt32(imm.value());
+}
+
+
+void X86Assembler::EmitComplex(int reg_or_opcode,
+                               const Operand& operand,
+                               const Immediate& immediate) {
+  CHECK_GE(reg_or_opcode, 0);
+  CHECK_LT(reg_or_opcode, 8);
+  if (immediate.is_int8()) {
+    // Use sign-extended 8-bit immediate.
+    EmitUint8(0x83);
+    EmitOperand(reg_or_opcode, operand);
+    EmitUint8(immediate.value() & 0xFF);
+  } else if (operand.IsRegister(EAX)) {
+    // Use short form if the destination is eax.
+    EmitUint8(0x05 + (reg_or_opcode << 3));
+    EmitImmediate(immediate);
+  } else {
+    EmitUint8(0x81);
+    EmitOperand(reg_or_opcode, operand);
+    EmitImmediate(immediate);
+  }
+}
+
+
+void X86Assembler::EmitLabel(Label* label, int instruction_size) {
+  if (label->IsBound()) {
+    int offset = label->Position() - buffer_.Size();
+    CHECK_LE(offset, 0);
+    EmitInt32(offset - instruction_size);
+  } else {
+    EmitLabelLink(label);
+  }
+}
+
+
+void X86Assembler::EmitLabelLink(Label* label) {
+  CHECK(!label->IsBound());
+  int position = buffer_.Size();
+  EmitInt32(label->position_);
+  label->LinkTo(position);
+}
+
+
+void X86Assembler::EmitGenericShift(int reg_or_opcode,
+                                    Register reg,
+                                    const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  CHECK(imm.is_int8());
+  if (imm.value() == 1) {
+    EmitUint8(0xD1);
+    EmitOperand(reg_or_opcode, Operand(reg));
+  } else {
+    EmitUint8(0xC1);
+    EmitOperand(reg_or_opcode, Operand(reg));
+    EmitUint8(imm.value() & 0xFF);
+  }
+}
+
+
+void X86Assembler::EmitGenericShift(int reg_or_opcode,
+                                    Register operand,
+                                    Register shifter) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  CHECK_EQ(shifter, ECX);
+  EmitUint8(0xD3);
+  EmitOperand(reg_or_opcode, Operand(operand));
+}
+
+void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                              const std::vector<ManagedRegister>& spill_regs,
+                              const std::vector<ManagedRegister>& entry_spills) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+  for (int i = spill_regs.size() - 1; i >= 0; --i) {
+    pushl(spill_regs.at(i).AsX86().AsCpuRegister());
+  }
+  // return address then method on stack
+  addl(ESP, Immediate(-frame_size + (spill_regs.size() * kPointerSize) +
+                      kPointerSize /*method*/ + kPointerSize /*return address*/));
+  pushl(method_reg.AsX86().AsCpuRegister());
+  for (size_t i = 0; i < entry_spills.size(); ++i) {
+    movl(Address(ESP, frame_size + kPointerSize + (i * kPointerSize)),
+         entry_spills.at(i).AsX86().AsCpuRegister());
+  }
+}
+
+void X86Assembler::RemoveFrame(size_t frame_size,
+                            const std::vector<ManagedRegister>& spill_regs) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+  addl(ESP, Immediate(frame_size - (spill_regs.size() * kPointerSize) - kPointerSize));
+  for (size_t i = 0; i < spill_regs.size(); ++i) {
+    popl(spill_regs.at(i).AsX86().AsCpuRegister());
+  }
+  ret();
+}
+
+void X86Assembler::IncreaseFrameSize(size_t adjust) {
+  CHECK_ALIGNED(adjust, kStackAlignment);
+  addl(ESP, Immediate(-adjust));
+}
+
+void X86Assembler::DecreaseFrameSize(size_t adjust) {
+  CHECK_ALIGNED(adjust, kStackAlignment);
+  addl(ESP, Immediate(adjust));
+}
+
+void X86Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+  X86ManagedRegister src = msrc.AsX86();
+  if (src.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (src.IsCpuRegister()) {
+    CHECK_EQ(4u, size);
+    movl(Address(ESP, offs), src.AsCpuRegister());
+  } else if (src.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    movl(Address(ESP, offs), src.AsRegisterPairLow());
+    movl(Address(ESP, FrameOffset(offs.Int32Value()+4)),
+         src.AsRegisterPairHigh());
+  } else if (src.IsX87Register()) {
+    if (size == 4) {
+      fstps(Address(ESP, offs));
+    } else {
+      fstpl(Address(ESP, offs));
+    }
+  } else {
+    CHECK(src.IsXmmRegister());
+    if (size == 4) {
+      movss(Address(ESP, offs), src.AsXmmRegister());
+    } else {
+      movsd(Address(ESP, offs), src.AsXmmRegister());
+    }
+  }
+}
+
+void X86Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+  X86ManagedRegister src = msrc.AsX86();
+  CHECK(src.IsCpuRegister());
+  movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+  X86ManagedRegister src = msrc.AsX86();
+  CHECK(src.IsCpuRegister());
+  movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                         ManagedRegister) {
+  movl(Address(ESP, dest), Immediate(imm));
+}
+
+void X86Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                          ManagedRegister) {
+  fs()->movl(Address::Absolute(dest), Immediate(imm));
+}
+
+void X86Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                            FrameOffset fr_offs,
+                                            ManagedRegister mscratch) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  CHECK(scratch.IsCpuRegister());
+  leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
+  fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86Assembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+  fs()->movl(Address::Absolute(thr_offs), ESP);
+}
+
+void X86Assembler::StoreLabelToThread(ThreadOffset thr_offs, Label* lbl) {
+  fs()->movl(Address::Absolute(thr_offs), lbl);
+}
+
+void X86Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
+                                 FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
+  UNIMPLEMENTED(FATAL);  // this case only currently exists for ARM
+}
+
+void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+  X86ManagedRegister dest = mdest.AsX86();
+  if (dest.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (dest.IsCpuRegister()) {
+    CHECK_EQ(4u, size);
+    movl(dest.AsCpuRegister(), Address(ESP, src));
+  } else if (dest.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    movl(dest.AsRegisterPairLow(), Address(ESP, src));
+    movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
+  } else if (dest.IsX87Register()) {
+    if (size == 4) {
+      flds(Address(ESP, src));
+    } else {
+      fldl(Address(ESP, src));
+    }
+  } else {
+    CHECK(dest.IsXmmRegister());
+    if (size == 4) {
+      movss(dest.AsXmmRegister(), Address(ESP, src));
+    } else {
+      movsd(dest.AsXmmRegister(), Address(ESP, src));
+    }
+  }
+}
+
+void X86Assembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) {
+  X86ManagedRegister dest = mdest.AsX86();
+  if (dest.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (dest.IsCpuRegister()) {
+    CHECK_EQ(4u, size);
+    fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
+  } else if (dest.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
+    fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset(src.Int32Value()+4)));
+  } else if (dest.IsX87Register()) {
+    if (size == 4) {
+      fs()->flds(Address::Absolute(src));
+    } else {
+      fs()->fldl(Address::Absolute(src));
+    }
+  } else {
+    CHECK(dest.IsXmmRegister());
+    if (size == 4) {
+      fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
+    } else {
+      fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
+    }
+  }
+}
+
+void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset  src) {
+  X86ManagedRegister dest = mdest.AsX86();
+  CHECK(dest.IsCpuRegister());
+  movl(dest.AsCpuRegister(), Address(ESP, src));
+}
+
+void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+                           MemberOffset offs) {
+  X86ManagedRegister dest = mdest.AsX86();
+  CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+  movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+}
+
+void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+                              Offset offs) {
+  X86ManagedRegister dest = mdest.AsX86();
+  CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+  movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+}
+
+void X86Assembler::LoadRawPtrFromThread(ManagedRegister mdest,
+                                        ThreadOffset offs) {
+  X86ManagedRegister dest = mdest.AsX86();
+  CHECK(dest.IsCpuRegister());
+  fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
+}
+
+void X86Assembler::SignExtend(ManagedRegister mreg, size_t size) {
+  X86ManagedRegister reg = mreg.AsX86();
+  CHECK(size == 1 || size == 2) << size;
+  CHECK(reg.IsCpuRegister()) << reg;
+  if (size == 1) {
+    movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
+  } else {
+    movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+  }
+}
+
+void X86Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+  X86ManagedRegister reg = mreg.AsX86();
+  CHECK(size == 1 || size == 2) << size;
+  CHECK(reg.IsCpuRegister()) << reg;
+  if (size == 1) {
+    movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
+  } else {
+    movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+  }
+}
+
+void X86Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+  X86ManagedRegister dest = mdest.AsX86();
+  X86ManagedRegister src = msrc.AsX86();
+  if (!dest.Equals(src)) {
+    if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+      movl(dest.AsCpuRegister(), src.AsCpuRegister());
+    } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+      // Pass via stack and pop X87 register
+      subl(ESP, Immediate(16));
+      if (size == 4) {
+        CHECK_EQ(src.AsX87Register(), ST0);
+        fstps(Address(ESP, 0));
+        movss(dest.AsXmmRegister(), Address(ESP, 0));
+      } else {
+        CHECK_EQ(src.AsX87Register(), ST0);
+        fstpl(Address(ESP, 0));
+        movsd(dest.AsXmmRegister(), Address(ESP, 0));
+      }
+      addl(ESP, Immediate(16));
+    } else {
+      // TODO: x87, SSE
+      UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+    }
+  }
+}
+
+void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src,
+                           ManagedRegister mscratch) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  CHECK(scratch.IsCpuRegister());
+  movl(scratch.AsCpuRegister(), Address(ESP, src));
+  movl(Address(ESP, dest), scratch.AsCpuRegister());
+}
+
+void X86Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+                                        ThreadOffset thr_offs,
+                                        ManagedRegister mscratch) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  CHECK(scratch.IsCpuRegister());
+  fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
+  Store(fr_offs, scratch, 4);
+}
+
+void X86Assembler::CopyRawPtrToThread(ThreadOffset thr_offs,
+                                      FrameOffset fr_offs,
+                                      ManagedRegister mscratch) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  CHECK(scratch.IsCpuRegister());
+  Load(scratch, fr_offs, 4);
+  fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86Assembler::Copy(FrameOffset dest, FrameOffset src,
+                        ManagedRegister mscratch,
+                        size_t size) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  if (scratch.IsCpuRegister() && size == 8) {
+    Load(scratch, src, 4);
+    Store(dest, scratch, 4);
+    Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+    Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+  } else {
+    Load(scratch, src, size);
+    Store(dest, scratch, size);
+  }
+}
+
+void X86Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
+                        ManagedRegister /*scratch*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
+void X86Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                        ManagedRegister scratch, size_t size) {
+  CHECK(scratch.IsNoRegister());
+  CHECK_EQ(size, 4u);
+  pushl(Address(ESP, src));
+  popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+                        ManagedRegister mscratch, size_t size) {
+  Register scratch = mscratch.AsX86().AsCpuRegister();
+  CHECK_EQ(size, 4u);
+  movl(scratch, Address(ESP, src_base));
+  movl(scratch, Address(scratch, src_offset));
+  movl(Address(ESP, dest), scratch);
+}
+
+void X86Assembler::Copy(ManagedRegister dest, Offset dest_offset,
+                        ManagedRegister src, Offset src_offset,
+                        ManagedRegister scratch, size_t size) {
+  CHECK_EQ(size, 4u);
+  CHECK(scratch.IsNoRegister());
+  pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
+  popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+                        ManagedRegister mscratch, size_t size) {
+  Register scratch = mscratch.AsX86().AsCpuRegister();
+  CHECK_EQ(size, 4u);
+  CHECK_EQ(dest.Int32Value(), src.Int32Value());
+  movl(scratch, Address(ESP, src));
+  pushl(Address(scratch, src_offset));
+  popl(Address(scratch, dest_offset));
+}
+
+void X86Assembler::MemoryBarrier(ManagedRegister) {
+#if ANDROID_SMP != 0
+  mfence();
+#endif
+}
+
+void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg,
+                                   FrameOffset sirt_offset,
+                                   ManagedRegister min_reg, bool null_allowed) {
+  X86ManagedRegister out_reg = mout_reg.AsX86();
+  X86ManagedRegister in_reg = min_reg.AsX86();
+  CHECK(in_reg.IsCpuRegister());
+  CHECK(out_reg.IsCpuRegister());
+  VerifyObject(in_reg, null_allowed);
+  if (null_allowed) {
+    Label null_arg;
+    if (!out_reg.Equals(in_reg)) {
+      xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+    }
+    testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+    j(kZero, &null_arg);
+    leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+    Bind(&null_arg);
+  } else {
+    leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+  }
+}
+
+void X86Assembler::CreateSirtEntry(FrameOffset out_off,
+                                   FrameOffset sirt_offset,
+                                   ManagedRegister mscratch,
+                                   bool null_allowed) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  CHECK(scratch.IsCpuRegister());
+  if (null_allowed) {
+    Label null_arg;
+    movl(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+    testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+    j(kZero, &null_arg);
+    leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+    Bind(&null_arg);
+  } else {
+    leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+  }
+  Store(out_off, scratch, 4);
+}
+
+// Given a SIRT entry, load the associated reference.
+void X86Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+                                         ManagedRegister min_reg) {
+  X86ManagedRegister out_reg = mout_reg.AsX86();
+  X86ManagedRegister in_reg = min_reg.AsX86();
+  CHECK(out_reg.IsCpuRegister());
+  CHECK(in_reg.IsCpuRegister());
+  Label null_arg;
+  if (!out_reg.Equals(in_reg)) {
+    xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+  }
+  testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+  j(kZero, &null_arg);
+  movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+  Bind(&null_arg);
+}
+
+void X86Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void X86Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void X86Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+  X86ManagedRegister base = mbase.AsX86();
+  CHECK(base.IsCpuRegister());
+  call(Address(base.AsCpuRegister(), offset.Int32Value()));
+  // TODO: place reference map on call
+}
+
+void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+  Register scratch = mscratch.AsX86().AsCpuRegister();
+  movl(scratch, Address(ESP, base));
+  call(Address(scratch, offset));
+}
+
+void X86Assembler::Call(ThreadOffset offset, ManagedRegister /*mscratch*/) {
+  fs()->call(Address::Absolute(offset));
+}
+
+void X86Assembler::GetCurrentThread(ManagedRegister tr) {
+  fs()->movl(tr.AsX86().AsCpuRegister(),
+             Address::Absolute(Thread::SelfOffset()));
+}
+
+void X86Assembler::GetCurrentThread(FrameOffset offset,
+                                    ManagedRegister mscratch) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset()));
+  movl(Address(ESP, offset), scratch.AsCpuRegister());
+}
+
+void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+  X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(stack_adjust);
+  buffer_.EnqueueSlowPath(slow);
+  fs()->cmpl(Address::Absolute(Thread::ExceptionOffset()), Immediate(0));
+  j(kNotEqual, slow->Entry());
+}
+
+void X86ExceptionSlowPath::Emit(Assembler *sasm) {
+  X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
+#define __ sp_asm->
+  __ Bind(&entry_);
+  // Note: the return value is dead
+  if (stack_adjust_ != 0) {  // Fix up the frame.
+    __ DecreaseFrameSize(stack_adjust_);
+  }
+  // Pass exception as argument in EAX
+  __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset()));
+  __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException)));
+  // this call should never return
+  __ int3();
+#undef __
+}
+
+}  // namespace x86
+}  // namespace art
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
new file mode 100644
index 0000000..4ba03d1
--- /dev/null
+++ b/compiler/utils/x86/assembler_x86.h
@@ -0,0 +1,646 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_
+#define ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_
+
+#include <vector>
+#include "base/macros.h"
+#include "constants_x86.h"
+#include "globals.h"
+#include "managed_register_x86.h"
+#include "offsets.h"
+#include "utils/assembler.h"
+#include "utils.h"
+
+namespace art {
+namespace x86 {
+
+class Immediate {
+ public:
+  explicit Immediate(int32_t value) : value_(value) {}
+
+  int32_t value() const { return value_; }
+
+  bool is_int8() const { return IsInt(8, value_); }
+  bool is_uint8() const { return IsUint(8, value_); }
+  bool is_uint16() const { return IsUint(16, value_); }
+
+ private:
+  const int32_t value_;
+
+  DISALLOW_COPY_AND_ASSIGN(Immediate);
+};
+
+
+class Operand {
+ public:
+  uint8_t mod() const {
+    return (encoding_at(0) >> 6) & 3;
+  }
+
+  Register rm() const {
+    return static_cast<Register>(encoding_at(0) & 7);
+  }
+
+  ScaleFactor scale() const {
+    return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3);
+  }
+
+  Register index() const {
+    return static_cast<Register>((encoding_at(1) >> 3) & 7);
+  }
+
+  Register base() const {
+    return static_cast<Register>(encoding_at(1) & 7);
+  }
+
+  int8_t disp8() const {
+    CHECK_GE(length_, 2);
+    return static_cast<int8_t>(encoding_[length_ - 1]);
+  }
+
+  int32_t disp32() const {
+    CHECK_GE(length_, 5);
+    int32_t value;
+    memcpy(&value, &encoding_[length_ - 4], sizeof(value));
+    return value;
+  }
+
+  bool IsRegister(Register reg) const {
+    return ((encoding_[0] & 0xF8) == 0xC0)  // Addressing mode is register only.
+        && ((encoding_[0] & 0x07) == reg);  // Register codes match.
+  }
+
+ protected:
+  // Operand can be sub classed (e.g: Address).
+  Operand() : length_(0) { }
+
+  void SetModRM(int mod, Register rm) {
+    CHECK_EQ(mod & ~3, 0);
+    encoding_[0] = (mod << 6) | rm;
+    length_ = 1;
+  }
+
+  void SetSIB(ScaleFactor scale, Register index, Register base) {
+    CHECK_EQ(length_, 1);
+    CHECK_EQ(scale & ~3, 0);
+    encoding_[1] = (scale << 6) | (index << 3) | base;
+    length_ = 2;
+  }
+
+  void SetDisp8(int8_t disp) {
+    CHECK(length_ == 1 || length_ == 2);
+    encoding_[length_++] = static_cast<uint8_t>(disp);
+  }
+
+  void SetDisp32(int32_t disp) {
+    CHECK(length_ == 1 || length_ == 2);
+    int disp_size = sizeof(disp);
+    memmove(&encoding_[length_], &disp, disp_size);
+    length_ += disp_size;
+  }
+
+ private:
+  byte length_;
+  byte encoding_[6];
+  byte padding_;
+
+  explicit Operand(Register reg) { SetModRM(3, reg); }
+
+  // Get the operand encoding byte at the given index.
+  uint8_t encoding_at(int index) const {
+    CHECK_GE(index, 0);
+    CHECK_LT(index, length_);
+    return encoding_[index];
+  }
+
+  friend class X86Assembler;
+
+  DISALLOW_COPY_AND_ASSIGN(Operand);
+};
+
+
+class Address : public Operand {
+ public:
+  Address(Register base, int32_t disp) {
+    Init(base, disp);
+  }
+
+  Address(Register base, Offset disp) {
+    Init(base, disp.Int32Value());
+  }
+
+  Address(Register base, FrameOffset disp) {
+    CHECK_EQ(base, ESP);
+    Init(ESP, disp.Int32Value());
+  }
+
+  Address(Register base, MemberOffset disp) {
+    Init(base, disp.Int32Value());
+  }
+
+  void Init(Register base, int32_t disp) {
+    if (disp == 0 && base != EBP) {
+      SetModRM(0, base);
+      if (base == ESP) SetSIB(TIMES_1, ESP, base);
+    } else if (disp >= -128 && disp <= 127) {
+      SetModRM(1, base);
+      if (base == ESP) SetSIB(TIMES_1, ESP, base);
+      SetDisp8(disp);
+    } else {
+      SetModRM(2, base);
+      if (base == ESP) SetSIB(TIMES_1, ESP, base);
+      SetDisp32(disp);
+    }
+  }
+
+
+  Address(Register index, ScaleFactor scale, int32_t disp) {
+    CHECK_NE(index, ESP);  // Illegal addressing mode.
+    SetModRM(0, ESP);
+    SetSIB(scale, index, EBP);
+    SetDisp32(disp);
+  }
+
+  Address(Register base, Register index, ScaleFactor scale, int32_t disp) {
+    CHECK_NE(index, ESP);  // Illegal addressing mode.
+    if (disp == 0 && base != EBP) {
+      SetModRM(0, ESP);
+      SetSIB(scale, index, base);
+    } else if (disp >= -128 && disp <= 127) {
+      SetModRM(1, ESP);
+      SetSIB(scale, index, base);
+      SetDisp8(disp);
+    } else {
+      SetModRM(2, ESP);
+      SetSIB(scale, index, base);
+      SetDisp32(disp);
+    }
+  }
+
+  static Address Absolute(uword addr) {
+    Address result;
+    result.SetModRM(0, EBP);
+    result.SetDisp32(addr);
+    return result;
+  }
+
+  static Address Absolute(ThreadOffset addr) {
+    return Absolute(addr.Int32Value());
+  }
+
+ private:
+  Address() {}
+
+  DISALLOW_COPY_AND_ASSIGN(Address);
+};
+
+
+class X86Assembler : public Assembler {
+ public:
+  X86Assembler() {}
+  virtual ~X86Assembler() {}
+
+  /*
+   * Emit Machine Instructions.
+   */
+  void call(Register reg);
+  void call(const Address& address);
+  void call(Label* label);
+
+  void pushl(Register reg);
+  void pushl(const Address& address);
+  void pushl(const Immediate& imm);
+
+  void popl(Register reg);
+  void popl(const Address& address);
+
+  void movl(Register dst, const Immediate& src);
+  void movl(Register dst, Register src);
+
+  void movl(Register dst, const Address& src);
+  void movl(const Address& dst, Register src);
+  void movl(const Address& dst, const Immediate& imm);
+  void movl(const Address& dst, Label* lbl);
+
+  void movzxb(Register dst, ByteRegister src);
+  void movzxb(Register dst, const Address& src);
+  void movsxb(Register dst, ByteRegister src);
+  void movsxb(Register dst, const Address& src);
+  void movb(Register dst, const Address& src);
+  void movb(const Address& dst, ByteRegister src);
+  void movb(const Address& dst, const Immediate& imm);
+
+  void movzxw(Register dst, Register src);
+  void movzxw(Register dst, const Address& src);
+  void movsxw(Register dst, Register src);
+  void movsxw(Register dst, const Address& src);
+  void movw(Register dst, const Address& src);
+  void movw(const Address& dst, Register src);
+
+  void leal(Register dst, const Address& src);
+
+  void cmovl(Condition condition, Register dst, Register src);
+
+  void setb(Condition condition, Register dst);
+
+  void movss(XmmRegister dst, const Address& src);
+  void movss(const Address& dst, XmmRegister src);
+  void movss(XmmRegister dst, XmmRegister src);
+
+  void movd(XmmRegister dst, Register src);
+  void movd(Register dst, XmmRegister src);
+
+  void addss(XmmRegister dst, XmmRegister src);
+  void addss(XmmRegister dst, const Address& src);
+  void subss(XmmRegister dst, XmmRegister src);
+  void subss(XmmRegister dst, const Address& src);
+  void mulss(XmmRegister dst, XmmRegister src);
+  void mulss(XmmRegister dst, const Address& src);
+  void divss(XmmRegister dst, XmmRegister src);
+  void divss(XmmRegister dst, const Address& src);
+
+  void movsd(XmmRegister dst, const Address& src);
+  void movsd(const Address& dst, XmmRegister src);
+  void movsd(XmmRegister dst, XmmRegister src);
+
+  void addsd(XmmRegister dst, XmmRegister src);
+  void addsd(XmmRegister dst, const Address& src);
+  void subsd(XmmRegister dst, XmmRegister src);
+  void subsd(XmmRegister dst, const Address& src);
+  void mulsd(XmmRegister dst, XmmRegister src);
+  void mulsd(XmmRegister dst, const Address& src);
+  void divsd(XmmRegister dst, XmmRegister src);
+  void divsd(XmmRegister dst, const Address& src);
+
+  void cvtsi2ss(XmmRegister dst, Register src);
+  void cvtsi2sd(XmmRegister dst, Register src);
+
+  void cvtss2si(Register dst, XmmRegister src);
+  void cvtss2sd(XmmRegister dst, XmmRegister src);
+
+  void cvtsd2si(Register dst, XmmRegister src);
+  void cvtsd2ss(XmmRegister dst, XmmRegister src);
+
+  void cvttss2si(Register dst, XmmRegister src);
+  void cvttsd2si(Register dst, XmmRegister src);
+
+  void cvtdq2pd(XmmRegister dst, XmmRegister src);
+
+  void comiss(XmmRegister a, XmmRegister b);
+  void comisd(XmmRegister a, XmmRegister b);
+
+  void sqrtsd(XmmRegister dst, XmmRegister src);
+  void sqrtss(XmmRegister dst, XmmRegister src);
+
+  void xorpd(XmmRegister dst, const Address& src);
+  void xorpd(XmmRegister dst, XmmRegister src);
+  void xorps(XmmRegister dst, const Address& src);
+  void xorps(XmmRegister dst, XmmRegister src);
+
+  void andpd(XmmRegister dst, const Address& src);
+
+  void flds(const Address& src);
+  void fstps(const Address& dst);
+
+  void fldl(const Address& src);
+  void fstpl(const Address& dst);
+
+  void fnstcw(const Address& dst);
+  void fldcw(const Address& src);
+
+  void fistpl(const Address& dst);
+  void fistps(const Address& dst);
+  void fildl(const Address& src);
+
+  void fincstp();
+  void ffree(const Immediate& index);
+
+  void fsin();
+  void fcos();
+  void fptan();
+
+  void xchgl(Register dst, Register src);
+  void xchgl(Register reg, const Address& address);
+
+  void cmpl(Register reg, const Immediate& imm);
+  void cmpl(Register reg0, Register reg1);
+  void cmpl(Register reg, const Address& address);
+
+  void cmpl(const Address& address, Register reg);
+  void cmpl(const Address& address, const Immediate& imm);
+
+  void testl(Register reg1, Register reg2);
+  void testl(Register reg, const Immediate& imm);
+
+  void andl(Register dst, const Immediate& imm);
+  void andl(Register dst, Register src);
+
+  void orl(Register dst, const Immediate& imm);
+  void orl(Register dst, Register src);
+
+  void xorl(Register dst, Register src);
+
+  void addl(Register dst, Register src);
+  void addl(Register reg, const Immediate& imm);
+  void addl(Register reg, const Address& address);
+
+  void addl(const Address& address, Register reg);
+  void addl(const Address& address, const Immediate& imm);
+
+  void adcl(Register dst, Register src);
+  void adcl(Register reg, const Immediate& imm);
+  void adcl(Register dst, const Address& address);
+
+  void subl(Register dst, Register src);
+  void subl(Register reg, const Immediate& imm);
+  void subl(Register reg, const Address& address);
+
+  void cdq();
+
+  void idivl(Register reg);
+
+  void imull(Register dst, Register src);
+  void imull(Register reg, const Immediate& imm);
+  void imull(Register reg, const Address& address);
+
+  void imull(Register reg);
+  void imull(const Address& address);
+
+  void mull(Register reg);
+  void mull(const Address& address);
+
+  void sbbl(Register dst, Register src);
+  void sbbl(Register reg, const Immediate& imm);
+  void sbbl(Register reg, const Address& address);
+
+  void incl(Register reg);
+  void incl(const Address& address);
+
+  void decl(Register reg);
+  void decl(const Address& address);
+
+  void shll(Register reg, const Immediate& imm);
+  void shll(Register operand, Register shifter);
+  void shrl(Register reg, const Immediate& imm);
+  void shrl(Register operand, Register shifter);
+  void sarl(Register reg, const Immediate& imm);
+  void sarl(Register operand, Register shifter);
+  void shld(Register dst, Register src);
+
+  void negl(Register reg);
+  void notl(Register reg);
+
+  void enter(const Immediate& imm);
+  void leave();
+
+  void ret();
+  void ret(const Immediate& imm);
+
+  void nop();
+  void int3();
+  void hlt();
+
+  void j(Condition condition, Label* label);
+
+  void jmp(Register reg);
+  void jmp(const Address& address);
+  void jmp(Label* label);
+
+  X86Assembler* lock();
+  void cmpxchgl(const Address& address, Register reg);
+
+  void mfence();
+
+  X86Assembler* fs();
+
+  //
+  // Macros for High-level operations.
+  //
+
+  void AddImmediate(Register reg, const Immediate& imm);
+
+  void LoadDoubleConstant(XmmRegister dst, double value);
+
+  void DoubleNegate(XmmRegister d);
+  void FloatNegate(XmmRegister f);
+
+  void DoubleAbs(XmmRegister reg);
+
+  void LockCmpxchgl(const Address& address, Register reg) {
+    lock()->cmpxchgl(address, reg);
+  }
+
+  //
+  // Misc. functionality
+  //
+  int PreferredLoopAlignment() { return 16; }
+  void Align(int alignment, int offset);
+  void Bind(Label* label);
+
+  // Debugging and bringup support.
+  void Stop(const char* message);
+
+  //
+  // Overridden common assembler high-level functionality
+  //
+
+  // Emit code that will create an activation on the stack
+  virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                          const std::vector<ManagedRegister>& callee_save_regs,
+                          const std::vector<ManagedRegister>& entry_spills);
+
+  // Emit code that will remove an activation from the stack
+  virtual void RemoveFrame(size_t frame_size,
+                           const std::vector<ManagedRegister>& callee_save_regs);
+
+  virtual void IncreaseFrameSize(size_t adjust);
+  virtual void DecreaseFrameSize(size_t adjust);
+
+  // Store routines
+  virtual void Store(FrameOffset offs, ManagedRegister src, size_t size);
+  virtual void StoreRef(FrameOffset dest, ManagedRegister src);
+  virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src);
+
+  virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                     ManagedRegister scratch);
+
+  virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                      ManagedRegister scratch);
+
+  virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                        FrameOffset fr_offs,
+                                        ManagedRegister scratch);
+
+  virtual void StoreStackPointerToThread(ThreadOffset thr_offs);
+
+  void StoreLabelToThread(ThreadOffset thr_offs, Label* lbl);
+
+  virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+                             FrameOffset in_off, ManagedRegister scratch);
+
+  // Load routines
+  virtual void Load(ManagedRegister dest, FrameOffset src, size_t size);
+
+  virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size);
+
+  virtual void LoadRef(ManagedRegister dest, FrameOffset  src);
+
+  virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+                       MemberOffset offs);
+
+  virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+                          Offset offs);
+
+  virtual void LoadRawPtrFromThread(ManagedRegister dest,
+                                    ThreadOffset offs);
+
+  // Copying routines
+  virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+  virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+                                    ManagedRegister scratch);
+
+  virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+                                  ManagedRegister scratch);
+
+  virtual void CopyRef(FrameOffset dest, FrameOffset src,
+                       ManagedRegister scratch);
+
+  virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(ManagedRegister dest, Offset dest_offset,
+                    ManagedRegister src, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void MemoryBarrier(ManagedRegister);
+
+  // Sign extension
+  virtual void SignExtend(ManagedRegister mreg, size_t size);
+
+  // Zero extension
+  virtual void ZeroExtend(ManagedRegister mreg, size_t size);
+
+  // Exploit fast access in managed code to Thread::Current()
+  virtual void GetCurrentThread(ManagedRegister tr);
+  virtual void GetCurrentThread(FrameOffset dest_offset,
+                                ManagedRegister scratch);
+
+  // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed. in_reg holds a possibly stale reference
+  // that can be used to avoid loading the SIRT entry to see if the value is
+  // NULL.
+  virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+                               ManagedRegister in_reg, bool null_allowed);
+
+  // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed.
+  virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+                               ManagedRegister scratch, bool null_allowed);
+
+  // src holds a SIRT entry (Object**) load this into dst
+  virtual void LoadReferenceFromSirt(ManagedRegister dst,
+                                     ManagedRegister src);
+
+  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+  // know that src may not be null.
+  virtual void VerifyObject(ManagedRegister src, bool could_be_null);
+  virtual void VerifyObject(FrameOffset src, bool could_be_null);
+
+  // Call to address held at [base+offset]
+  virtual void Call(ManagedRegister base, Offset offset,
+                    ManagedRegister scratch);
+  virtual void Call(FrameOffset base, Offset offset,
+                    ManagedRegister scratch);
+  virtual void Call(ThreadOffset offset, ManagedRegister scratch);
+
+  // Generate code to check if Thread::Current()->exception_ is non-null
+  // and branch to a ExceptionSlowPath if it is.
+  virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+
+ private:
+  inline void EmitUint8(uint8_t value);
+  inline void EmitInt32(int32_t value);
+  inline void EmitRegisterOperand(int rm, int reg);
+  inline void EmitXmmRegisterOperand(int rm, XmmRegister reg);
+  inline void EmitFixup(AssemblerFixup* fixup);
+  inline void EmitOperandSizeOverride();
+
+  void EmitOperand(int rm, const Operand& operand);
+  void EmitImmediate(const Immediate& imm);
+  void EmitComplex(int rm, const Operand& operand, const Immediate& immediate);
+  void EmitLabel(Label* label, int instruction_size);
+  void EmitLabelLink(Label* label);
+  void EmitNearLabelLink(Label* label);
+
+  void EmitGenericShift(int rm, Register reg, const Immediate& imm);
+  void EmitGenericShift(int rm, Register operand, Register shifter);
+
+  DISALLOW_COPY_AND_ASSIGN(X86Assembler);
+};
+
+inline void X86Assembler::EmitUint8(uint8_t value) {
+  buffer_.Emit<uint8_t>(value);
+}
+
+inline void X86Assembler::EmitInt32(int32_t value) {
+  buffer_.Emit<int32_t>(value);
+}
+
+inline void X86Assembler::EmitRegisterOperand(int rm, int reg) {
+  CHECK_GE(rm, 0);
+  CHECK_LT(rm, 8);
+  buffer_.Emit<uint8_t>(0xC0 + (rm << 3) + reg);
+}
+
+inline void X86Assembler::EmitXmmRegisterOperand(int rm, XmmRegister reg) {
+  EmitRegisterOperand(rm, static_cast<Register>(reg));
+}
+
+inline void X86Assembler::EmitFixup(AssemblerFixup* fixup) {
+  buffer_.EmitFixup(fixup);
+}
+
+inline void X86Assembler::EmitOperandSizeOverride() {
+  EmitUint8(0x66);
+}
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86ExceptionSlowPath : public SlowPath {
+ public:
+  explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
+  virtual void Emit(Assembler *sp_asm);
+ private:
+  const size_t stack_adjust_;
+};
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
new file mode 100644
index 0000000..5d8a3b1
--- /dev/null
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_x86.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(AssemblerX86, CreateBuffer) {
+  AssemblerBuffer buffer;
+  AssemblerBuffer::EnsureCapacity ensured(&buffer);
+  buffer.Emit<uint8_t>(0x42);
+  ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
+  buffer.Emit<int32_t>(42);
+  ASSERT_EQ(static_cast<size_t>(5), buffer.Size());
+}
+
+}  // namespace art
diff --git a/compiler/utils/x86/constants_x86.h b/compiler/utils/x86/constants_x86.h
new file mode 100644
index 0000000..45c3834
--- /dev/null
+++ b/compiler/utils/x86/constants_x86.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_CONSTANTS_X86_H_
+#define ART_COMPILER_UTILS_X86_CONSTANTS_X86_H_
+
+#include <iosfwd>
+
+#include "arch/x86/registers_x86.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "globals.h"
+
+namespace art {
+namespace x86 {
+
+enum ByteRegister {
+  AL = 0,
+  CL = 1,
+  DL = 2,
+  BL = 3,
+  AH = 4,
+  CH = 5,
+  DH = 6,
+  BH = 7,
+  kNoByteRegister = -1  // Signals an illegal register.
+};
+
+
+enum XmmRegister {
+  XMM0 = 0,
+  XMM1 = 1,
+  XMM2 = 2,
+  XMM3 = 3,
+  XMM4 = 4,
+  XMM5 = 5,
+  XMM6 = 6,
+  XMM7 = 7,
+  kNumberOfXmmRegisters = 8,
+  kNoXmmRegister = -1  // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg);
+
+enum X87Register {
+  ST0 = 0,
+  ST1 = 1,
+  ST2 = 2,
+  ST3 = 3,
+  ST4 = 4,
+  ST5 = 5,
+  ST6 = 6,
+  ST7 = 7,
+  kNumberOfX87Registers = 8,
+  kNoX87Register = -1  // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const X87Register& reg);
+
+enum ScaleFactor {
+  TIMES_1 = 0,
+  TIMES_2 = 1,
+  TIMES_4 = 2,
+  TIMES_8 = 3
+};
+
+enum Condition {
+  kOverflow     =  0,
+  kNoOverflow   =  1,
+  kBelow        =  2,
+  kAboveEqual   =  3,
+  kEqual        =  4,
+  kNotEqual     =  5,
+  kBelowEqual   =  6,
+  kAbove        =  7,
+  kSign         =  8,
+  kNotSign      =  9,
+  kParityEven   = 10,
+  kParityOdd    = 11,
+  kLess         = 12,
+  kGreaterEqual = 13,
+  kLessEqual    = 14,
+  kGreater      = 15,
+
+  kZero         = kEqual,
+  kNotZero      = kNotEqual,
+  kNegative     = kSign,
+  kPositive     = kNotSign
+};
+
+
+class Instr {
+ public:
+  static const uint8_t kHltInstruction = 0xF4;
+  // We prefer not to use the int3 instruction since it conflicts with gdb.
+  static const uint8_t kBreakPointInstruction = kHltInstruction;
+
+  bool IsBreakPoint() {
+    return (*reinterpret_cast<const uint8_t*>(this)) == kBreakPointInstruction;
+  }
+
+  // Instructions are read out of a code stream. The only way to get a
+  // reference to an instruction is to convert a pointer. There is no way
+  // to allocate or create instances of class Instr.
+  // Use the At(pc) function to create references to Instr.
+  static Instr* At(uintptr_t pc) { return reinterpret_cast<Instr*>(pc); }
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+};
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_X86_CONSTANTS_X86_H_
diff --git a/compiler/utils/x86/managed_register_x86.cc b/compiler/utils/x86/managed_register_x86.cc
new file mode 100644
index 0000000..4697d06
--- /dev/null
+++ b/compiler/utils/x86/managed_register_x86.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_x86.h"
+
+#include "globals.h"
+
+namespace art {
+namespace x86 {
+
+// These cpu registers are never available for allocation.
+static const Register kReservedCpuRegistersArray[] = { ESP };
+
+
+// We reduce the number of available registers for allocation in debug-code
+// mode in order to increase register pressure.
+
+// We need all registers for caching.
+static const int kNumberOfAvailableCpuRegisters = kNumberOfCpuRegisters;
+static const int kNumberOfAvailableXmmRegisters = kNumberOfXmmRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+
+// Define register pairs.
+// This list must be kept in sync with the RegisterPair enum.
+#define REGISTER_PAIR_LIST(P) \
+  P(EAX, EDX)                 \
+  P(EAX, ECX)                 \
+  P(EAX, EBX)                 \
+  P(EAX, EDI)                 \
+  P(EDX, ECX)                 \
+  P(EDX, EBX)                 \
+  P(EDX, EDI)                 \
+  P(ECX, EBX)                 \
+  P(ECX, EDI)                 \
+  P(EBX, EDI)
+
+
+struct RegisterPairDescriptor {
+  RegisterPair reg;  // Used to verify that the enum is in sync.
+  Register low;
+  Register high;
+};
+
+
+static const RegisterPairDescriptor kRegisterPairs[] = {
+#define REGISTER_PAIR_ENUMERATION(low, high) { low##_##high, low, high },
+  REGISTER_PAIR_LIST(REGISTER_PAIR_ENUMERATION)
+#undef REGISTER_PAIR_ENUMERATION
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) {
+  os << X86ManagedRegister::FromRegisterPair(reg);
+  return os;
+}
+
+bool X86ManagedRegister::Overlaps(const X86ManagedRegister& other) const {
+  if (IsNoRegister() || other.IsNoRegister()) return false;
+  CHECK(IsValidManagedRegister());
+  CHECK(other.IsValidManagedRegister());
+  if (Equals(other)) return true;
+  if (IsRegisterPair()) {
+    Register low = AsRegisterPairLow();
+    Register high = AsRegisterPairHigh();
+    return X86ManagedRegister::FromCpuRegister(low).Overlaps(other) ||
+        X86ManagedRegister::FromCpuRegister(high).Overlaps(other);
+  }
+  if (other.IsRegisterPair()) {
+    return other.Overlaps(*this);
+  }
+  return false;
+}
+
+
+int X86ManagedRegister::AllocIdLow() const {
+  CHECK(IsRegisterPair());
+  const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+                           kNumberOfX87RegIds);
+  CHECK_EQ(r, kRegisterPairs[r].reg);
+  return kRegisterPairs[r].low;
+}
+
+
+int X86ManagedRegister::AllocIdHigh() const {
+  CHECK(IsRegisterPair());
+  const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+                           kNumberOfX87RegIds);
+  CHECK_EQ(r, kRegisterPairs[r].reg);
+  return kRegisterPairs[r].high;
+}
+
+
+void X86ManagedRegister::Print(std::ostream& os) const {
+  if (!IsValidManagedRegister()) {
+    os << "No Register";
+  } else if (IsXmmRegister()) {
+    os << "XMM: " << static_cast<int>(AsXmmRegister());
+  } else if (IsX87Register()) {
+    os << "X87: " << static_cast<int>(AsX87Register());
+  } else if (IsCpuRegister()) {
+    os << "CPU: " << static_cast<int>(AsCpuRegister());
+  } else if (IsRegisterPair()) {
+    os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh();
+  } else {
+    os << "??: " << RegId();
+  }
+}
+
+std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg) {
+  reg.Print(os);
+  return os;
+}
+
+}  // namespace x86
+}  // namespace art
diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h
new file mode 100644
index 0000000..0201a96
--- /dev/null
+++ b/compiler/utils/x86/managed_register_x86.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_
+#define ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_
+
+#include "constants_x86.h"
+#include "utils/managed_register.h"
+
+namespace art {
+namespace x86 {
+
+// Values for register pairs.
+// The registers in kReservedCpuRegistersArray in x86.cc are not used in pairs.
+// The table kRegisterPairs in x86.cc must be kept in sync with this enum.
+enum RegisterPair {
+  EAX_EDX = 0,
+  EAX_ECX = 1,
+  EAX_EBX = 2,
+  EAX_EDI = 3,
+  EDX_ECX = 4,
+  EDX_EBX = 5,
+  EDX_EDI = 6,
+  ECX_EBX = 7,
+  ECX_EDI = 8,
+  EBX_EDI = 9,
+  kNumberOfRegisterPairs = 10,
+  kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCpuRegIds = kNumberOfCpuRegisters;
+const int kNumberOfCpuAllocIds = kNumberOfCpuRegisters;
+
+const int kNumberOfXmmRegIds = kNumberOfXmmRegisters;
+const int kNumberOfXmmAllocIds = kNumberOfXmmRegisters;
+
+const int kNumberOfX87RegIds = kNumberOfX87Registers;
+const int kNumberOfX87AllocIds = kNumberOfX87Registers;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+    kNumberOfX87RegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds +
+    kNumberOfX87RegIds;
+
+// Register ids map:
+//   [0..R[  cpu registers (enum Register)
+//   [R..X[  xmm registers (enum XmmRegister)
+//   [X..S[  x87 registers (enum X87Register)
+//   [S..P[  register pairs (enum RegisterPair)
+// where
+//   R = kNumberOfCpuRegIds
+//   X = R + kNumberOfXmmRegIds
+//   S = X + kNumberOfX87RegIds
+//   P = X + kNumberOfRegisterPairs
+
+// Allocation ids map:
+//   [0..R[  cpu registers (enum Register)
+//   [R..X[  xmm registers (enum XmmRegister)
+//   [X..S[  x87 registers (enum X87Register)
+// where
+//   R = kNumberOfCpuRegIds
+//   X = R + kNumberOfXmmRegIds
+//   S = X + kNumberOfX87RegIds
+
+
+// An instance of class 'ManagedRegister' represents a single cpu register (enum
+// Register), an xmm register (enum XmmRegister), or a pair of cpu registers
+// (enum RegisterPair).
+// 'ManagedRegister::NoRegister()' provides an invalid register.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class X86ManagedRegister : public ManagedRegister {
+ public:
+  ByteRegister AsByteRegister() const {
+    CHECK(IsCpuRegister());
+    CHECK_LT(AsCpuRegister(), ESP);  // ESP, EBP, ESI and EDI cannot be encoded as byte registers.
+    return static_cast<ByteRegister>(id_);
+  }
+
+  Register AsCpuRegister() const {
+    CHECK(IsCpuRegister());
+    return static_cast<Register>(id_);
+  }
+
+  XmmRegister AsXmmRegister() const {
+    CHECK(IsXmmRegister());
+    return static_cast<XmmRegister>(id_ - kNumberOfCpuRegIds);
+  }
+
+  X87Register AsX87Register() const {
+    CHECK(IsX87Register());
+    return static_cast<X87Register>(id_ -
+                                    (kNumberOfCpuRegIds + kNumberOfXmmRegIds));
+  }
+
+  Register AsRegisterPairLow() const {
+    CHECK(IsRegisterPair());
+    // Appropriate mapping of register ids allows to use AllocIdLow().
+    return FromRegId(AllocIdLow()).AsCpuRegister();
+  }
+
+  Register AsRegisterPairHigh() const {
+    CHECK(IsRegisterPair());
+    // Appropriate mapping of register ids allows to use AllocIdHigh().
+    return FromRegId(AllocIdHigh()).AsCpuRegister();
+  }
+
+  bool IsCpuRegister() const {
+    CHECK(IsValidManagedRegister());
+    return (0 <= id_) && (id_ < kNumberOfCpuRegIds);
+  }
+
+  bool IsXmmRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - kNumberOfCpuRegIds;
+    return (0 <= test) && (test < kNumberOfXmmRegIds);
+  }
+
+  bool IsX87Register() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - (kNumberOfCpuRegIds + kNumberOfXmmRegIds);
+    return (0 <= test) && (test < kNumberOfX87RegIds);
+  }
+
+  bool IsRegisterPair() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ -
+        (kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds);
+    return (0 <= test) && (test < kNumberOfPairRegIds);
+  }
+
+  void Print(std::ostream& os) const;
+
+  // Returns true if the two managed-registers ('this' and 'other') overlap.
+  // Either managed-register may be the NoRegister. If both are the NoRegister
+  // then false is returned.
+  bool Overlaps(const X86ManagedRegister& other) const;
+
+  static X86ManagedRegister FromCpuRegister(Register r) {
+    CHECK_NE(r, kNoRegister);
+    return FromRegId(r);
+  }
+
+  static X86ManagedRegister FromXmmRegister(XmmRegister r) {
+    CHECK_NE(r, kNoXmmRegister);
+    return FromRegId(r + kNumberOfCpuRegIds);
+  }
+
+  static X86ManagedRegister FromX87Register(X87Register r) {
+    CHECK_NE(r, kNoX87Register);
+    return FromRegId(r + kNumberOfCpuRegIds + kNumberOfXmmRegIds);
+  }
+
+  static X86ManagedRegister FromRegisterPair(RegisterPair r) {
+    CHECK_NE(r, kNoRegisterPair);
+    return FromRegId(r + (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+                          kNumberOfX87RegIds));
+  }
+
+ private:
+  bool IsValidManagedRegister() const {
+    return (0 <= id_) && (id_ < kNumberOfRegIds);
+  }
+
+  int RegId() const {
+    CHECK(!IsNoRegister());
+    return id_;
+  }
+
+  int AllocId() const {
+    CHECK(IsValidManagedRegister() && !IsRegisterPair());
+    CHECK_LT(id_, kNumberOfAllocIds);
+    return id_;
+  }
+
+  int AllocIdLow() const;
+  int AllocIdHigh() const;
+
+  friend class ManagedRegister;
+
+  explicit X86ManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+  static X86ManagedRegister FromRegId(int reg_id) {
+    X86ManagedRegister reg(reg_id);
+    CHECK(reg.IsValidManagedRegister());
+    return reg;
+  }
+};
+
+std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg);
+
+}  // namespace x86
+
+inline x86::X86ManagedRegister ManagedRegister::AsX86() const {
+  x86::X86ManagedRegister reg(id_);
+  CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+  return reg;
+}
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_
diff --git a/compiler/utils/x86/managed_register_x86_test.cc b/compiler/utils/x86/managed_register_x86_test.cc
new file mode 100644
index 0000000..4fbafda
--- /dev/null
+++ b/compiler/utils/x86/managed_register_x86_test.cc
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "globals.h"
+#include "managed_register_x86.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace x86 {
+
+TEST(X86ManagedRegister, NoRegister) {
+  X86ManagedRegister reg = ManagedRegister::NoRegister().AsX86();
+  EXPECT_TRUE(reg.IsNoRegister());
+  EXPECT_TRUE(!reg.Overlaps(reg));
+}
+
+TEST(X86ManagedRegister, CpuRegister) {
+  X86ManagedRegister reg = X86ManagedRegister::FromCpuRegister(EAX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(EAX, reg.AsCpuRegister());
+
+  reg = X86ManagedRegister::FromCpuRegister(EBX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(EBX, reg.AsCpuRegister());
+
+  reg = X86ManagedRegister::FromCpuRegister(ECX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(ECX, reg.AsCpuRegister());
+
+  reg = X86ManagedRegister::FromCpuRegister(EDI);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(EDI, reg.AsCpuRegister());
+}
+
+TEST(X86ManagedRegister, XmmRegister) {
+  X86ManagedRegister reg = X86ManagedRegister::FromXmmRegister(XMM0);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(XMM0, reg.AsXmmRegister());
+
+  reg = X86ManagedRegister::FromXmmRegister(XMM1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(XMM1, reg.AsXmmRegister());
+
+  reg = X86ManagedRegister::FromXmmRegister(XMM7);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(XMM7, reg.AsXmmRegister());
+}
+
+TEST(X86ManagedRegister, X87Register) {
+  X86ManagedRegister reg = X86ManagedRegister::FromX87Register(ST0);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(ST0, reg.AsX87Register());
+
+  reg = X86ManagedRegister::FromX87Register(ST1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(ST1, reg.AsX87Register());
+
+  reg = X86ManagedRegister::FromX87Register(ST7);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(ST7, reg.AsX87Register());
+}
+
+TEST(X86ManagedRegister, RegisterPair) {
+  X86ManagedRegister reg = X86ManagedRegister::FromRegisterPair(EAX_EDX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EDX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EAX_ECX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+  EXPECT_EQ(ECX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EAX_EBX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EAX_EDI);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EDX_ECX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+  EXPECT_EQ(ECX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EDX_EBX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EDX_EDI);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(ECX_EBX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(ECX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(ECX_EDI);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(ECX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EBX_EDI);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EBX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+}
+
+TEST(X86ManagedRegister, Equals) {
+  X86ManagedRegister reg_eax = X86ManagedRegister::FromCpuRegister(EAX);
+  EXPECT_TRUE(reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  X86ManagedRegister reg_xmm0 = X86ManagedRegister::FromXmmRegister(XMM0);
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(reg_xmm0.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  X86ManagedRegister reg_st0 = X86ManagedRegister::FromX87Register(ST0);
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(reg_st0.Equals(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  X86ManagedRegister reg_pair = X86ManagedRegister::FromRegisterPair(EAX_EDX);
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(reg_pair.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+}
+
+TEST(X86ManagedRegister, Overlaps) {
+  X86ManagedRegister reg = X86ManagedRegister::FromCpuRegister(EAX);
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromCpuRegister(EDX);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromCpuRegister(EDI);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromCpuRegister(EBX);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromXmmRegister(XMM0);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromX87Register(ST0);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromRegisterPair(EAX_EDX);
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_ECX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromRegisterPair(EBX_EDI);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_EBX)));
+
+  reg = X86ManagedRegister::FromRegisterPair(EDX_ECX);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_EBX)));
+}
+
+}  // namespace x86
+}  // namespace art