Continuing register cleanup

Ready for review.

Continue the process of using RegStorage rather than
ints to hold register value in the top layers of codegen.
Given the huge number of changes in this CL, I've attempted
to minimize the number of actual logic changes.  With this
CL, the use of ints for registers has largely been eliminated
except in the lowest utility levels.  "Wide" utility routines
have been updated to take a single RegStorage rather than
a pair of ints representing low and high registers.

Upcoming CLs will be smaller and more targeted.  My expectations:
   o Allocate float double registers as a single double rather than
     a pair of float single registers.
   o Refactor to push code which assumes long and double Dalvik
     values are held in a pair of register to the target dependent
     layer.
   o Clean-up of the xxx_mir.h files to reduce the amount of #defines
     for registers.  May also do a register renumbering to bring all
     of our targets' register naming more consistent.  Possibly
     introduce a target-independent float/non-float test at the
     RegStorage level.

Change-Id: I646de7392bdec94595dd2c6f76e0f1c4331096ff
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index c218621..1784af3 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -203,19 +203,53 @@
   dr15 = fr30 + ARM_FP_DOUBLE,
 };
 
+// TODO: clean this up; reduce use of or eliminate macros
+
+const RegStorage rs_r0(RegStorage::k32BitSolo, r0);
+const RegStorage rs_r1(RegStorage::k32BitSolo, r1);
+const RegStorage rs_r2(RegStorage::k32BitSolo, r2);
+const RegStorage rs_r3(RegStorage::k32BitSolo, r3);
+const RegStorage rs_rARM_SUSPEND(RegStorage::k32BitSolo, rARM_SUSPEND);
+const RegStorage rs_r5(RegStorage::k32BitSolo, r5);
+const RegStorage rs_r6(RegStorage::k32BitSolo, r6);
+const RegStorage rs_r7(RegStorage::k32BitSolo, r7);
+const RegStorage rs_r8(RegStorage::k32BitSolo, r8);
+const RegStorage rs_rARM_SELF(RegStorage::k32BitSolo, rARM_SELF);
+const RegStorage rs_r10(RegStorage::k32BitSolo, r10);
+const RegStorage rs_r11(RegStorage::k32BitSolo, r11);
+const RegStorage rs_r12(RegStorage::k32BitSolo, r12);
+const RegStorage rs_r13sp(RegStorage::k32BitSolo, r13sp);
+const RegStorage rs_rARM_SP(RegStorage::k32BitSolo, rARM_SP);
+const RegStorage rs_r14lr(RegStorage::k32BitSolo, r14lr);
+const RegStorage rs_rARM_LR(RegStorage::k32BitSolo, rARM_LR);
+const RegStorage rs_r15pc(RegStorage::k32BitSolo, r15pc);
+const RegStorage rs_rARM_PC(RegStorage::k32BitSolo, rARM_PC);
+const RegStorage rs_invalid(RegStorage::kInvalid);
+
 // Target-independent aliases.
 #define rARM_ARG0 r0
+#define rs_rARM_ARG0 rs_r0
 #define rARM_ARG1 r1
+#define rs_rARM_ARG1 rs_r1
 #define rARM_ARG2 r2
+#define rs_rARM_ARG2 rs_r2
 #define rARM_ARG3 r3
+#define rs_rARM_ARG3 rs_r3
 #define rARM_FARG0 r0
+#define rs_ARM_FARG0 rs_r0
 #define rARM_FARG1 r1
+#define rs_rARM_FARG1 rs_r1
 #define rARM_FARG2 r2
+#define rs_rARM_FARG2 rs_r2
 #define rARM_FARG3 r3
+#define rs_rARM_FARG3 rs_r3
 #define rARM_RET0 r0
+#define rs_rARM_RET0 rs_r0
 #define rARM_RET1 r1
+#define rs_rARM_RET1 rs_r1
 #define rARM_INVOKE_TGT rARM_LR
-#define rARM_COUNT INVALID_REG
+#define rs_rARM_INVOKE_TGT rs_rARM_LR
+#define rARM_COUNT RegStorage::kInvalidRegVal
 
 // RegisterLocation templates return values (r0, or r0/r1).
 const RegLocation arm_loc_c_return
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 94f0ca4..175fc06 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -31,11 +31,11 @@
  *
  * The test loop will look something like:
  *
- *   adr   rBase, <table>
+ *   adr   r_base, <table>
  *   ldr   r_val, [rARM_SP, v_reg_off]
  *   mov   r_idx, #table_size
  * lp:
- *   ldmia rBase!, {r_key, r_disp}
+ *   ldmia r_base!, {r_key, r_disp}
  *   sub   r_idx, #1
  *   cmp   r_val, r_key
  *   ifeq
@@ -60,29 +60,29 @@
 
   // Get the switch value
   rl_src = LoadValue(rl_src, kCoreReg);
-  int rBase = AllocTemp();
+  RegStorage r_base = AllocTemp();
   /* Allocate key and disp temps */
-  int r_key = AllocTemp();
-  int r_disp = AllocTemp();
+  RegStorage r_key = AllocTemp();
+  RegStorage r_disp = AllocTemp();
   // Make sure r_key's register number is less than r_disp's number for ldmia
-  if (r_key > r_disp) {
-    int tmp = r_disp;
+  if (r_key.GetReg() > r_disp.GetReg()) {
+    RegStorage tmp = r_disp;
     r_disp = r_key;
     r_key = tmp;
   }
   // Materialize a pointer to the switch table
-  NewLIR3(kThumb2Adr, rBase, 0, WrapPointer(tab_rec));
+  NewLIR3(kThumb2Adr, r_base.GetReg(), 0, WrapPointer(tab_rec));
   // Set up r_idx
-  int r_idx = AllocTemp();
+  RegStorage r_idx = AllocTemp();
   LoadConstant(r_idx, size);
   // Establish loop branch target
   LIR* target = NewLIR0(kPseudoTargetLabel);
   // Load next key/disp
-  NewLIR2(kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
-  OpRegReg(kOpCmp, r_key, rl_src.reg.GetReg());
+  NewLIR2(kThumb2LdmiaWB, r_base.GetReg(), (1 << r_key.GetReg()) | (1 << r_disp.GetReg()));
+  OpRegReg(kOpCmp, r_key, rl_src.reg);
   // Go if match. NOTE: No instruction set switch here - must stay Thumb2
   OpIT(kCondEq, "");
-  LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp);
+  LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp.GetReg());
   tab_rec->anchor = switch_branch;
   // Needs to use setflags encoding here
   OpRegRegImm(kOpSub, r_idx, r_idx, 1);  // For value == 1, this should set flags.
@@ -109,28 +109,28 @@
 
   // Get the switch value
   rl_src = LoadValue(rl_src, kCoreReg);
-  int table_base = AllocTemp();
+  RegStorage table_base = AllocTemp();
   // Materialize a pointer to the switch table
-  NewLIR3(kThumb2Adr, table_base, 0, WrapPointer(tab_rec));
+  NewLIR3(kThumb2Adr, table_base.GetReg(), 0, WrapPointer(tab_rec));
   int low_key = s4FromSwitchData(&table[2]);
-  int keyReg;
+  RegStorage keyReg;
   // Remove the bias, if necessary
   if (low_key == 0) {
-    keyReg = rl_src.reg.GetReg();
+    keyReg = rl_src.reg;
   } else {
     keyReg = AllocTemp();
-    OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key);
+    OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
   }
   // Bounds check - if < 0 or >= size continue following switch
   OpRegImm(kOpCmp, keyReg, size-1);
   LIR* branch_over = OpCondBranch(kCondHi, NULL);
 
   // Load the displacement from the switch table
-  int disp_reg = AllocTemp();
+  RegStorage disp_reg = AllocTemp();
   LoadBaseIndexed(table_base, keyReg, disp_reg, 2, kWord);
 
   // ..and go! NOTE: No instruction set switch here - must stay Thumb2
-  LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg);
+  LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg.GetReg());
   tab_rec->anchor = switch_branch;
 
   /* branch_over target here */
@@ -163,13 +163,13 @@
 
   // Making a call - use explicit registers
   FlushAllRegs();   /* Everything to home location */
-  LoadValueDirectFixed(rl_src, r0);
-  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
-               rARM_LR);
+  LoadValueDirectFixed(rl_src, rs_r0);
+  LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
+               rs_rARM_LR);
   // Materialize a pointer to the fill data image
   NewLIR3(kThumb2Adr, r1, 0, WrapPointer(tab_rec));
   ClobberCallerSave();
-  LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+  LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
   MarkSafepointPC(call_inst);
 }
 
@@ -179,7 +179,7 @@
  */
 void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
   FlushAllRegs();
-  LoadValueDirectFixed(rl_src, r0);  // Get obj
+  LoadValueDirectFixed(rl_src, rs_r0);  // Get obj
   LockCallTemps();  // Prepare for explicit register usage
   constexpr bool kArchVariantHasGoodBranchPredictor = false;  // TODO: true if cortex-A15.
   if (kArchVariantHasGoodBranchPredictor) {
@@ -188,13 +188,13 @@
       null_check_branch = nullptr;  // No null check.
     } else {
       // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
-      null_check_branch = OpCmpImmBranch(kCondEq, r0, 0, NULL);
+      null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
     }
-    LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+    LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2);
     NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
-    LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, r1, 0, NULL);
+    LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r1, 0, NULL);
     NewLIR4(kThumb2Strex, r1, r2, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
-    LIR* lock_success_branch = OpCmpImmBranch(kCondEq, r1, 0, NULL);
+    LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
 
 
     LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -204,9 +204,9 @@
     }
     // TODO: move to a slow path.
     // Go expensive route - artLockObjectFromCode(obj);
-    LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR);
+    LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rs_rARM_LR);
     ClobberCallerSave();
-    LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+    LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
     MarkSafepointPC(call_inst);
 
     LIR* success_target = NewLIR0(kPseudoTargetLabel);
@@ -214,19 +214,19 @@
     GenMemBarrier(kLoadLoad);
   } else {
     // Explicit null-check as slow-path is entered using an IT.
-    GenNullCheck(r0, opt_flags);
-    LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+    GenNullCheck(rs_r0, opt_flags);
+    LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2);
     MarkPossibleNullPointerException(opt_flags);
     NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
-    OpRegImm(kOpCmp, r1, 0);
+    OpRegImm(kOpCmp, rs_r1, 0);
     OpIT(kCondEq, "");
     NewLIR4(kThumb2Strex/*eq*/, r1, r2, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
-    OpRegImm(kOpCmp, r1, 0);
+    OpRegImm(kOpCmp, rs_r1, 0);
     OpIT(kCondNe, "T");
     // Go expensive route - artLockObjectFromCode(self, obj);
-    LoadWordDisp/*ne*/(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR);
+    LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rs_rARM_LR);
     ClobberCallerSave();
-    LIR* call_inst = OpReg(kOpBlx/*ne*/, rARM_LR);
+    LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
     MarkSafepointPC(call_inst);
     GenMemBarrier(kLoadLoad);
   }
@@ -239,22 +239,22 @@
  */
 void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
   FlushAllRegs();
-  LoadValueDirectFixed(rl_src, r0);  // Get obj
+  LoadValueDirectFixed(rl_src, rs_r0);  // Get obj
   LockCallTemps();  // Prepare for explicit register usage
   LIR* null_check_branch;
-  LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+  LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2);
   constexpr bool kArchVariantHasGoodBranchPredictor = false;  // TODO: true if cortex-A15.
   if (kArchVariantHasGoodBranchPredictor) {
     if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
       null_check_branch = nullptr;  // No null check.
     } else {
       // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
-      null_check_branch = OpCmpImmBranch(kCondEq, r0, 0, NULL);
+      null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
     }
-    LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1);
-    LoadConstantNoClobber(r3, 0);
-    LIR* slow_unlock_branch = OpCmpBranch(kCondNe, r1, r2, NULL);
-    StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
+    LoadWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
+    LoadConstantNoClobber(rs_r3, 0);
+    LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r1, rs_r2, NULL);
+    StoreWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
     LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
 
     LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -264,9 +264,9 @@
     }
     // TODO: move to a slow path.
     // Go expensive route - artUnlockObjectFromCode(obj);
-    LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR);
+    LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rs_rARM_LR);
     ClobberCallerSave();
-    LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+    LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
     MarkSafepointPC(call_inst);
 
     LIR* success_target = NewLIR0(kPseudoTargetLabel);
@@ -274,19 +274,20 @@
     GenMemBarrier(kStoreLoad);
   } else {
     // Explicit null-check as slow-path is entered using an IT.
-    GenNullCheck(r0, opt_flags);
-    LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1);  // Get lock
+    GenNullCheck(rs_r0, opt_flags);
+    LoadWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);  // Get lock
     MarkPossibleNullPointerException(opt_flags);
-    LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
-    LoadConstantNoClobber(r3, 0);
+    LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2);
+    LoadConstantNoClobber(rs_r3, 0);
     // Is lock unheld on lock or held by us (==thread_id) on unlock?
-    OpRegReg(kOpCmp, r1, r2);
+    OpRegReg(kOpCmp, rs_r1, rs_r2);
     OpIT(kCondEq, "EE");
-    StoreWordDisp/*eq*/(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
+    StoreWordDisp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
     // Go expensive route - UnlockObjectFromCode(obj);
-    LoadWordDisp/*ne*/(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR);
+    LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(),
+                       rs_rARM_LR);
     ClobberCallerSave();
-    LIR* call_inst = OpReg(kOpBlx/*ne*/, rARM_LR);
+    LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
     MarkSafepointPC(call_inst);
     GenMemBarrier(kStoreLoad);
   }
@@ -295,10 +296,10 @@
 void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
   int ex_offset = Thread::ExceptionOffset().Int32Value();
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  int reset_reg = AllocTemp();
-  LoadWordDisp(rARM_SELF, ex_offset, rl_result.reg.GetReg());
+  RegStorage reset_reg = AllocTemp();
+  LoadWordDisp(rs_rARM_SELF, ex_offset, rl_result.reg);
   LoadConstant(reset_reg, 0);
-  StoreWordDisp(rARM_SELF, ex_offset, reset_reg);
+  StoreWordDisp(rs_rARM_SELF, ex_offset, reset_reg);
   FreeTemp(reset_reg);
   StoreValue(rl_dest, rl_result);
 }
@@ -306,14 +307,13 @@
 /*
  * Mark garbage collection card. Skip if the value we're storing is null.
  */
-void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) {
-  int reg_card_base = AllocTemp();
-  int reg_card_no = AllocTemp();
+void ArmMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+  RegStorage reg_card_base = AllocTemp();
+  RegStorage reg_card_no = AllocTemp();
   LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
-  LoadWordDisp(rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+  LoadWordDisp(rs_rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
   OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
-  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
-                   kUnsignedByte);
+  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
   LIR* target = NewLIR0(kPseudoTargetLabel);
   branch_over->target = target;
   FreeTemp(reg_card_base);
@@ -344,7 +344,7 @@
   if (!skip_overflow_check) {
     if (Runtime::Current()->ExplicitStackOverflowChecks()) {
       /* Load stack limit */
-      LoadWordDisp(rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+      LoadWordDisp(rs_rARM_SELF, Thread::StackEndOffset().Int32Value(), rs_r12);
     }
   }
   /* Spill core callee saves */
@@ -374,14 +374,14 @@
           m2l_->ResetDefTracking();
           GenerateTargetLabel();
           if (restore_lr_) {
-            m2l_->LoadWordDisp(kArmRegSP, sp_displace_ - 4, kArmRegLR);
+            m2l_->LoadWordDisp(rs_rARM_SP, sp_displace_ - 4, rs_rARM_LR);
           }
-          m2l_->OpRegImm(kOpAdd, kArmRegSP, sp_displace_);
+          m2l_->OpRegImm(kOpAdd, rs_rARM_SP, sp_displace_);
           m2l_->ClobberCallerSave();
           ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow);
           // Load the entrypoint directly into the pc instead of doing a load + branch. Assumes
           // codegen and target are in thumb2 mode.
-          m2l_->LoadWordDisp(rARM_SELF, func_offset.Int32Value(), rARM_PC);
+          m2l_->LoadWordDisp(rs_rARM_SELF, func_offset.Int32Value(), rs_rARM_PC);
         }
 
        private:
@@ -389,29 +389,29 @@
         const size_t sp_displace_;
       };
       if (static_cast<size_t>(frame_size_) > Thread::kStackOverflowReservedUsableBytes) {
-        OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_without_spills);
-        LIR* branch = OpCmpBranch(kCondUlt, rARM_LR, r12, nullptr);
+        OpRegRegImm(kOpSub, rs_rARM_LR, rs_rARM_SP, frame_size_without_spills);
+        LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_LR, rs_r12, nullptr);
         // Need to restore LR since we used it as a temp.
         AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, true,
                                                      frame_size_without_spills));
-        OpRegCopy(rARM_SP, rARM_LR);     // Establish stack
+        OpRegCopy(rs_rARM_SP, rs_rARM_LR);     // Establish stack
       } else {
         // If the frame is small enough we are guaranteed to have enough space that remains to
         // handle signals on the user stack.
-        OpRegRegImm(kOpSub, rARM_SP, rARM_SP, frame_size_without_spills);
-        LIR* branch = OpCmpBranch(kCondUlt, rARM_SP, r12, nullptr);
+        OpRegRegImm(kOpSub, rs_rARM_SP, rs_rARM_SP, frame_size_without_spills);
+        LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_SP, rs_r12, nullptr);
         AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, false, frame_size_));
       }
     } else {
       // Implicit stack overflow check.
       // Generate a load from [sp, #-framesize].  If this is in the stack
       // redzone we will get a segmentation fault.
-      OpRegImm(kOpSub, rARM_SP, frame_size_without_spills);
-      LoadWordDisp(rARM_SP, 0, rARM_LR);
+      OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills);
+      LoadWordDisp(rs_rARM_SP, 0, rs_rARM_LR);
       MarkPossibleStackOverflowException();
     }
   } else {
-    OpRegImm(kOpSub, rARM_SP, frame_size_without_spills);
+    OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills);
   }
 
   FlushIns(ArgLocs, rl_method);
@@ -432,7 +432,7 @@
   LockTemp(r1);
 
   NewLIR0(kPseudoMethodExit);
-  OpRegImm(kOpAdd, rARM_SP, frame_size_ - (spill_count * 4));
+  OpRegImm(kOpAdd, rs_rARM_SP, frame_size_ - (spill_count * 4));
   /* Need to restore any FP callee saves? */
   if (num_fp_spills_) {
     NewLIR1(kThumb2VPopCS, num_fp_spills_);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 6e72c80..0f1e171 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -28,32 +28,35 @@
 
     // Required for target - codegen helpers.
     bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                                    RegLocation rl_dest, int lit);
-    int LoadHelper(ThreadOffset offset);
+                            RegLocation rl_dest, int lit);
     LIR* CheckSuspendUsingLoad() OVERRIDE;
-    LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
-    LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
-                          int s_reg);
-    LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
-    LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
-                             int r_dest, int r_dest_hi, OpSize size, int s_reg);
-    LIR* LoadConstantNoClobber(int r_dest, int value);
-    LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
-    LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
-    LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
-    LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
-    LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
-                              int r_src, int r_src_hi, OpSize size, int s_reg);
-    void MarkGCCard(int val_reg, int tgt_addr_reg);
+    RegStorage LoadHelper(ThreadOffset offset);
+    LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+                      int s_reg);
+    LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg);
+    LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+                         OpSize size);
+    LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+                             RegStorage r_dest, RegStorage r_dest_hi, OpSize size, int s_reg);
+    LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
+    LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+    LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+    LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src);
+    LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+                          OpSize size);
+    LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+                              RegStorage r_src, RegStorage r_src_hi, OpSize size, int s_reg);
+    void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
 
     // Required for target - register utilities.
     bool IsFpReg(int reg);
+    bool IsFpReg(RegStorage reg);
     bool SameRegType(int reg1, int reg2);
-    int AllocTypedTemp(bool fp_hint, int reg_class);
+    RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
     RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
     int S2d(int low_reg, int high_reg);
-    int TargetReg(SpecialTargetRegister reg);
-    int GetArgMappingToPhysicalReg(int arg_num);
+    RegStorage TargetReg(SpecialTargetRegister reg);
+    RegStorage GetArgMappingToPhysicalReg(int arg_num);
     RegLocation GetReturnAlt();
     RegLocation GetReturnWideAlt();
     RegLocation LocCReturn();
@@ -64,8 +67,8 @@
     uint64_t GetRegMaskCommon(int reg);
     void AdjustSpillMask();
     void ClobberCallerSave();
-    void FlushReg(int reg);
-    void FlushRegWide(int reg1, int reg2);
+    void FlushReg(RegStorage reg);
+    void FlushRegWide(RegStorage reg);
     void FreeCallTemps();
     void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
     void LockCallTemps();
@@ -97,13 +100,16 @@
                      RegLocation rl_src, int scale, bool card_mark);
     void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_shift);
-    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
-                          RegLocation rl_src1, RegLocation rl_src2);
-    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
-                         RegLocation rl_src1, RegLocation rl_src2);
+    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
+    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
+    void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
+    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                          RegLocation rl_src2);
+    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_src2);
     void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                   RegLocation rl_src2);
     void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
@@ -113,15 +119,18 @@
     bool GenInlinedPeek(CallInfo* info, OpSize size);
     bool GenInlinedPoke(CallInfo* info, OpSize size);
     void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
-                                ThrowKind kind);
-    RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
-    RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+    void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                   RegLocation rl_src2);
+    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
+    void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
+    LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, int offset,
+                        ThrowKind kind);
+    RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
+    RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
     void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenDivZeroCheck(int reg_lo, int reg_hi);
+    void GenDivZeroCheck(RegStorage reg);
     void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
     void GenExitSequence();
     void GenSpecialExitSequence();
@@ -134,7 +143,7 @@
     void GenMonitorExit(int opt_flags, RegLocation rl_src);
     void GenMoveException(RegLocation rl_dest);
     void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                               int first_bit, int second_bit);
+                                       int first_bit, int second_bit);
     void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
     void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
     void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
@@ -142,36 +151,36 @@
 
     // Required for target - single operation generators.
     LIR* OpUnconditionalBranch(LIR* target);
-    LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
-    LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+    LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
+    LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
     LIR* OpCondBranch(ConditionCode cc, LIR* target);
-    LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
-    LIR* OpFpRegCopy(int r_dest, int r_src);
+    LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
+    LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
     LIR* OpIT(ConditionCode cond, const char* guide);
-    LIR* OpMem(OpKind op, int rBase, int disp);
-    LIR* OpPcRelLoad(int reg, LIR* target);
-    LIR* OpReg(OpKind op, int r_dest_src);
-    LIR* OpRegCopy(int r_dest, int r_src);
-    LIR* OpRegCopyNoInsert(int r_dest, int r_src);
-    LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
-    LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
-    LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
-    LIR* OpMovRegMem(int r_dest, int r_base, int offset, MoveType move_type);
-    LIR* OpMovMemReg(int r_base, int offset, int r_src, MoveType move_type);
-    LIR* OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src);
-    LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
-    LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+    LIR* OpMem(OpKind op, RegStorage r_base, int disp);
+    LIR* OpPcRelLoad(RegStorage reg, LIR* target);
+    LIR* OpReg(OpKind op, RegStorage r_dest_src);
+    LIR* OpRegCopy(RegStorage r_dest, RegStorage r_src);
+    LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
+    LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
+    LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
+    LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
+    LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
+    LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
+    LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+    LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
+    LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
     LIR* OpTestSuspend(LIR* target);
     LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
-    LIR* OpVldm(int rBase, int count);
-    LIR* OpVstm(int rBase, int count);
-    void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
-    void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+    LIR* OpVldm(RegStorage r_base, int count);
+    LIR* OpVstm(RegStorage r_base, int count);
+    void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
+    void OpRegCopyWide(RegStorage dest, RegStorage src);
     void OpTlsCmp(ThreadOffset offset, int val);
 
-    LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
+    LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
                           int s_reg);
-    LIR* StoreBaseDispBody(int rBase, int displacement, int r_src, int r_src_hi, OpSize size);
+    LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
     LIR* OpRegRegRegShift(OpKind op, int r_dest, int r_src1, int r_src2, int shift);
     LIR* OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, int shift);
     static const ArmEncodingMap EncodingMap[kArmLast];
@@ -190,8 +199,8 @@
     void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
     void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
     void AssignDataOffsets();
-    RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
-                          RegLocation rl_src2, bool is_div, bool check_zero);
+    RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+                          bool is_div, bool check_zero);
     RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
 };
 
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 6868f6f..398bf96 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -111,8 +111,8 @@
   rl_result = EvalLoc(rl_dest, kFPReg, true);
   DCHECK(rl_dest.wide);
   DCHECK(rl_result.wide);
-  NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
-          S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+  NewLIR3(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg()),
+          S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -143,17 +143,19 @@
       break;
     case Instruction::LONG_TO_DOUBLE: {
       rl_src = LoadValueWide(rl_src, kFPReg);
-      src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+      src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
       rl_result = EvalLoc(rl_dest, kFPReg, true);
-      // TODO: clean up AllocTempDouble so that its result has the double bits set.
-      int tmp1 = AllocTempDouble();
-      int tmp2 = AllocTempDouble();
+      // TODO: fix AllocTempDouble to return a k64BitSolo double reg and lose the ARM_FP_DOUBLE.
+      RegStorage tmp1 = AllocTempDouble();
+      RegStorage tmp2 = AllocTempDouble();
 
-      NewLIR2(kThumb2VcvtF64S32, tmp1 | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1);
-      NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), (src_reg & ~ARM_FP_DOUBLE));
-      LoadConstantWide(tmp2, tmp2 + 1, INT64_C(0x41f0000000000000));
-      NewLIR3(kThumb2VmlaF64, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), tmp1 | ARM_FP_DOUBLE,
-              tmp2 | ARM_FP_DOUBLE);
+      // FIXME: needs 64-bit register cleanup.
+      NewLIR2(kThumb2VcvtF64S32, tmp1.GetLowReg() | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1);
+      NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+              (src_reg & ~ARM_FP_DOUBLE));
+      LoadConstantWide(tmp2, 0x41f0000000000000LL);
+      NewLIR3(kThumb2VmlaF64, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+              tmp1.GetLowReg() | ARM_FP_DOUBLE, tmp2.GetLowReg() | ARM_FP_DOUBLE);
       FreeTemp(tmp1);
       FreeTemp(tmp2);
       StoreValueWide(rl_dest, rl_result);
@@ -164,20 +166,23 @@
       return;
     case Instruction::LONG_TO_FLOAT: {
       rl_src = LoadValueWide(rl_src, kFPReg);
-      src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+      src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
       rl_result = EvalLoc(rl_dest, kFPReg, true);
       // Allocate temp registers.
-      int high_val = AllocTempDouble();
-      int low_val = AllocTempDouble();
-      int const_val = AllocTempDouble();
+      RegStorage high_val = AllocTempDouble();
+      RegStorage low_val = AllocTempDouble();
+      RegStorage const_val = AllocTempDouble();
       // Long to double.
-      NewLIR2(kThumb2VcvtF64S32, high_val | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1);
-      NewLIR2(kThumb2VcvtF64U32, low_val | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE));
-      LoadConstantWide(const_val, const_val + 1, INT64_C(0x41f0000000000000));
-      NewLIR3(kThumb2VmlaF64, low_val | ARM_FP_DOUBLE, high_val | ARM_FP_DOUBLE,
-          const_val | ARM_FP_DOUBLE);
+      NewLIR2(kThumb2VcvtF64S32, high_val.GetLowReg() | ARM_FP_DOUBLE,
+              (src_reg & ~ARM_FP_DOUBLE) + 1);
+      NewLIR2(kThumb2VcvtF64U32, low_val.GetLowReg() | ARM_FP_DOUBLE,
+              (src_reg & ~ARM_FP_DOUBLE));
+      LoadConstantWide(const_val, INT64_C(0x41f0000000000000));
+      NewLIR3(kThumb2VmlaF64, low_val.GetLowReg() | ARM_FP_DOUBLE,
+              high_val.GetLowReg() | ARM_FP_DOUBLE,
+              const_val.GetLowReg() | ARM_FP_DOUBLE);
       // Double to float.
-      NewLIR2(kThumb2VcvtDF, rl_result.reg.GetReg(), low_val | ARM_FP_DOUBLE);
+      NewLIR2(kThumb2VcvtDF, rl_result.reg.GetReg(), low_val.GetLowReg() | ARM_FP_DOUBLE);
       // Free temp registers.
       FreeTemp(high_val);
       FreeTemp(low_val);
@@ -194,14 +199,14 @@
   }
   if (rl_src.wide) {
     rl_src = LoadValueWide(rl_src, kFPReg);
-    src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+    src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
   } else {
     rl_src = LoadValue(rl_src, kFPReg);
     src_reg = rl_src.reg.GetReg();
   }
   if (rl_dest.wide) {
     rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
+    NewLIR2(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), src_reg);
     StoreValueWide(rl_dest, rl_result);
   } else {
     rl_result = EvalLoc(rl_dest, kFPReg, true);
@@ -220,8 +225,8 @@
     rl_src2 = mir_graph_->GetSrcWide(mir, 2);
     rl_src1 = LoadValueWide(rl_src1, kFPReg);
     rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()),
-            S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+    NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg()),
+            S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
   } else {
     rl_src1 = mir_graph_->GetSrc(mir, 0);
     rl_src2 = mir_graph_->GetSrc(mir, 1);
@@ -294,16 +299,16 @@
     // In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
     ClobberSReg(rl_dest.s_reg_low);
     rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    LoadConstant(rl_result.reg.GetReg(), default_result);
-    NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()),
-            S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+    LoadConstant(rl_result.reg, default_result);
+    NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg()),
+            S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
   } else {
     rl_src1 = LoadValue(rl_src1, kFPReg);
     rl_src2 = LoadValue(rl_src2, kFPReg);
     // In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
     ClobberSReg(rl_dest.s_reg_low);
     rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    LoadConstant(rl_result.reg.GetReg(), default_result);
+    LoadConstant(rl_result.reg, default_result);
     NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   }
   DCHECK(!ARM_FPREG(rl_result.reg.GetReg()));
@@ -315,7 +320,7 @@
   GenBarrier();
 
   OpIT(kCondEq, "");
-  LoadConstant(rl_result.reg.GetReg(), 0);
+  LoadConstant(rl_result.reg, 0);
   GenBarrier();
 
   StoreValue(rl_dest, rl_result);
@@ -333,8 +338,8 @@
   RegLocation rl_result;
   rl_src = LoadValueWide(rl_src, kFPReg);
   rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(kThumb2Vnegd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
-          S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
+  NewLIR2(kThumb2Vnegd, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+          S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -345,18 +350,19 @@
   RegLocation rl_dest = InlineTargetWide(info);  // double place for result
   rl_src = LoadValueWide(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(kThumb2Vsqrtd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
-          S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
-  NewLIR2(kThumb2Vcmpd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
-          S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()));
+  // TODO: shouldn't need S2d once 64bitSolo has proper double tag bit.
+  NewLIR2(kThumb2Vsqrtd, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+          S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
+  NewLIR2(kThumb2Vcmpd, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+          S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()));
   NewLIR0(kThumb2Fmstat);
   branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
   ClobberCallerSave();
   LockCallTemps();  // Using fixed registers
-  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
-  NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
-  NewLIR1(kThumbBlxR, r_tgt);
-  NewLIR3(kThumb2Fmdrr, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), r0, r1);
+  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
+  NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
+  NewLIR1(kThumbBlxR, r_tgt.GetReg());
+  NewLIR3(kThumb2Fmdrr, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), r0, r1);
   branch->target = NewLIR0(kPseudoTargetLabel);
   StoreValueWide(rl_dest, rl_result);
   return true;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 93d48d6..46db466 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -24,7 +24,7 @@
 
 namespace art {
 
-LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target) {
+LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
   OpRegReg(kOpCmp, src1, src2);
   return OpCondBranch(cond, target);
 }
@@ -88,16 +88,16 @@
   LIR* target2;
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  int t_reg = AllocTemp();
+  RegStorage t_reg = AllocTemp();
   LoadConstant(t_reg, -1);
-  OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+  OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
   LIR* branch1 = OpCondBranch(kCondLt, NULL);
   LIR* branch2 = OpCondBranch(kCondGt, NULL);
-  OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+  OpRegRegReg(kOpSub, t_reg, rl_src1.reg, rl_src2.reg);
   LIR* branch3 = OpCondBranch(kCondEq, NULL);
 
   OpIT(kCondHi, "E");
-  NewLIR2(kThumb2MovI8M, t_reg, ModifiedImmediate(-1));
+  NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
   LoadConstant(t_reg, 1);
   GenBarrier();
 
@@ -107,7 +107,7 @@
   target1 = NewLIR0(kPseudoTargetLabel);
 
   RegLocation rl_temp = LocCReturn();  // Just using as template, will change
-  rl_temp.reg.SetReg(t_reg);
+  rl_temp.reg.SetReg(t_reg.GetReg());
   StoreValue(rl_dest, rl_temp);
   FreeTemp(t_reg);
 
@@ -125,12 +125,12 @@
   LIR* taken = &block_label_list_[bb->taken];
   LIR* not_taken = &block_label_list_[bb->fall_through];
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  int32_t low_reg = rl_src1.reg.GetReg();
-  int32_t high_reg = rl_src1.reg.GetHighReg();
+  RegStorage low_reg = rl_src1.reg.GetLow();
+  RegStorage high_reg = rl_src1.reg.GetHigh();
 
   if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
-    int t_reg = AllocTemp();
-    NewLIR4(kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
+    RegStorage t_reg = AllocTemp();
+    NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), low_reg.GetReg(), high_reg.GetReg(), 0);
     FreeTemp(t_reg);
     OpCondBranch(ccode, taken);
     return;
@@ -185,33 +185,33 @@
     }
     bool cheap_false_val = InexpensiveConstantInt(false_val);
     if (cheap_false_val && ccode == kCondEq && (true_val == 0 || true_val == -1)) {
-      OpRegRegImm(kOpSub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), -true_val);
+      OpRegRegImm(kOpSub, rl_result.reg, rl_src.reg, -true_val);
       DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
       OpIT(true_val == 0 ? kCondNe : kCondUge, "");
-      LoadConstant(rl_result.reg.GetReg(), false_val);
+      LoadConstant(rl_result.reg, false_val);
       GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
     } else if (cheap_false_val && ccode == kCondEq && true_val == 1) {
-      OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 1);
+      OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, 1);
       DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
       OpIT(kCondLs, "");
-      LoadConstant(rl_result.reg.GetReg(), false_val);
+      LoadConstant(rl_result.reg, false_val);
       GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
     } else if (cheap_false_val && InexpensiveConstantInt(true_val)) {
-      OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+      OpRegImm(kOpCmp, rl_src.reg, 0);
       OpIT(ccode, "E");
-      LoadConstant(rl_result.reg.GetReg(), true_val);
-      LoadConstant(rl_result.reg.GetReg(), false_val);
+      LoadConstant(rl_result.reg, true_val);
+      LoadConstant(rl_result.reg, false_val);
       GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
     } else {
       // Unlikely case - could be tuned.
-      int t_reg1 = AllocTemp();
-      int t_reg2 = AllocTemp();
+      RegStorage t_reg1 = AllocTemp();
+      RegStorage t_reg2 = AllocTemp();
       LoadConstant(t_reg1, true_val);
       LoadConstant(t_reg2, false_val);
-      OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+      OpRegImm(kOpCmp, rl_src.reg, 0);
       OpIT(ccode, "E");
-      OpRegCopy(rl_result.reg.GetReg(), t_reg1);
-      OpRegCopy(rl_result.reg.GetReg(), t_reg2);
+      OpRegCopy(rl_result.reg, t_reg1);
+      OpRegCopy(rl_result.reg, t_reg2);
       GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
     }
   } else {
@@ -221,17 +221,17 @@
     rl_true = LoadValue(rl_true, kCoreReg);
     rl_false = LoadValue(rl_false, kCoreReg);
     rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+    OpRegImm(kOpCmp, rl_src.reg, 0);
     if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {  // Is the "true" case already in place?
       OpIT(NegateComparison(ccode), "");
-      OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
+      OpRegCopy(rl_result.reg, rl_false.reg);
     } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {  // False case in place?
       OpIT(ccode, "");
-      OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
+      OpRegCopy(rl_result.reg, rl_true.reg);
     } else {  // Normal - select between the two.
       OpIT(ccode, "E");
-      OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
-      OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
+      OpRegCopy(rl_result.reg, rl_true.reg);
+      OpRegCopy(rl_result.reg, rl_false.reg);
     }
     GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
   }
@@ -261,7 +261,7 @@
   LIR* not_taken = &block_label_list_[bb->fall_through];
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+  OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
   switch (ccode) {
     case kCondEq:
       OpCondBranch(kCondNe, not_taken);
@@ -292,7 +292,7 @@
     default:
       LOG(FATAL) << "Unexpected ccode: " << ccode;
   }
-  OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+  OpRegReg(kOpCmp, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
   OpCondBranch(ccode, taken);
 }
 
@@ -300,8 +300,7 @@
  * Generate a register comparison to an immediate and branch.  Caller
  * is responsible for setting branch target field.
  */
-LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
-                                LIR* target) {
+LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
   LIR* branch;
   ArmConditionCode arm_cond = ArmConditionEncoding(cond);
   /*
@@ -315,10 +314,10 @@
    */
   bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
   skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
-  if (!skip && (ARM_LOWREG(reg)) && (check_value == 0) &&
+  if (!skip && (ARM_LOWREG(reg.GetReg())) && (check_value == 0) &&
      ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
     branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
-                     reg, 0);
+                     reg.GetReg(), 0);
   } else {
     OpRegImm(kOpCmp, reg, check_value);
     branch = NewLIR2(kThumbBCond, 0, arm_cond);
@@ -327,56 +326,64 @@
   return branch;
 }
 
-LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) {
+LIR* ArmMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
   LIR* res;
   int opcode;
-  if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
+  // If src or dest is a pair, we'll be using low reg.
+  if (r_dest.IsPair()) {
+    r_dest = r_dest.GetLow();
+  }
+  if (r_src.IsPair()) {
+    r_src = r_src.GetLow();
+  }
+  if (ARM_FPREG(r_dest.GetReg()) || ARM_FPREG(r_src.GetReg()))
     return OpFpRegCopy(r_dest, r_src);
-  if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
+  if (ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src.GetReg()))
     opcode = kThumbMovRR;
-  else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
+  else if (!ARM_LOWREG(r_dest.GetReg()) && !ARM_LOWREG(r_src.GetReg()))
      opcode = kThumbMovRR_H2H;
-  else if (ARM_LOWREG(r_dest))
+  else if (ARM_LOWREG(r_dest.GetReg()))
      opcode = kThumbMovRR_H2L;
   else
      opcode = kThumbMovRR_L2H;
-  res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
   if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
     res->flags.is_nop = true;
   }
   return res;
 }
 
-LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src) {
+LIR* ArmMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
   LIR* res = OpRegCopyNoInsert(r_dest, r_src);
   AppendLIR(res);
   return res;
 }
 
-void ArmMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
-                               int src_hi) {
-  bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
-  bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
-  DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi));
-  DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
+void ArmMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
+  bool dest_fp = ARM_FPREG(r_dest.GetLowReg());
+  bool src_fp = ARM_FPREG(r_src.GetLowReg());
   if (dest_fp) {
     if (src_fp) {
-      OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+      // FIXME: handle 64-bit solo's here.
+      OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
+                RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
     } else {
-      NewLIR3(kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
+      NewLIR3(kThumb2Fmdrr, S2d(r_dest.GetLowReg(), r_dest.GetHighReg()),
+              r_src.GetLowReg(), r_src.GetHighReg());
     }
   } else {
     if (src_fp) {
-      NewLIR3(kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
+      NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), S2d(r_src.GetLowReg(),
+              r_src.GetHighReg()));
     } else {
       // Handle overlap
-      if (src_hi == dest_lo) {
-        DCHECK_NE(src_lo, dest_hi);
-        OpRegCopy(dest_hi, src_hi);
-        OpRegCopy(dest_lo, src_lo);
+      if (r_src.GetHighReg() == r_dest.GetLowReg()) {
+        DCHECK_NE(r_src.GetLowReg(), r_dest.GetHighReg());
+        OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+        OpRegCopy(r_dest.GetLow(), r_src.GetLow());
       } else {
-        OpRegCopy(dest_lo, src_lo);
-        OpRegCopy(dest_hi, src_hi);
+        OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+        OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
       }
     }
   }
@@ -423,27 +430,27 @@
     return false;
   }
 
-  int r_magic = AllocTemp();
+  RegStorage r_magic = AllocTemp();
   LoadConstant(r_magic, magic_table[lit].magic);
   rl_src = LoadValue(rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  int r_hi = AllocTemp();
-  int r_lo = AllocTemp();
-  NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.reg.GetReg());
+  RegStorage r_hi = AllocTemp();
+  RegStorage r_lo = AllocTemp();
+  NewLIR4(kThumb2Smull, r_lo.GetReg(), r_hi.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
   switch (pattern) {
     case Divide3:
-      OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi,
+      OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi.GetReg(),
                rl_src.reg.GetReg(), EncodeShift(kArmAsr, 31));
       break;
     case Divide5:
-      OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31);
-      OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi,
+      OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
+      OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo.GetReg(), r_hi.GetReg(),
                EncodeShift(kArmAsr, magic_table[lit].shift));
       break;
     case Divide7:
-      OpRegReg(kOpAdd, r_hi, rl_src.reg.GetReg());
-      OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31);
-      OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi,
+      OpRegReg(kOpAdd, r_hi, rl_src.reg);
+      OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
+      OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo.GetReg(), r_hi.GetReg(),
                EncodeShift(kArmAsr, magic_table[lit].shift));
       break;
     default:
@@ -453,8 +460,8 @@
   return true;
 }
 
-LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code,
-                    int reg1, int base, int offset, ThrowKind kind) {
+LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base,
+                                int offset, ThrowKind kind) {
   LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
   return NULL;
 }
@@ -470,12 +477,11 @@
   return rl_dest;
 }
 
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
-                                     bool is_div) {
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
 
   // Put the literal in a temp.
-  int lit_temp = AllocTemp();
+  RegStorage lit_temp = AllocTemp();
   LoadConstant(lit_temp, lit);
   // Use the generic case for div/rem with arg2 in a register.
   // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
@@ -485,22 +491,22 @@
   return rl_result;
 }
 
-RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
+RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
                                   bool is_div) {
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (is_div) {
     // Simple case, use sdiv instruction.
-    OpRegRegReg(kOpDiv, rl_result.reg.GetReg(), reg1, reg2);
+    OpRegRegReg(kOpDiv, rl_result.reg, reg1, reg2);
   } else {
     // Remainder case, use the following code:
     // temp = reg1 / reg2      - integer division
     // temp = temp * reg2
     // dest = reg1 - temp
 
-    int temp = AllocTemp();
+    RegStorage temp = AllocTemp();
     OpRegRegReg(kOpDiv, temp, reg1, reg2);
     OpRegReg(kOpMul, temp, reg2);
-    OpRegRegReg(kOpSub, rl_result.reg.GetReg(), reg1, temp);
+    OpRegRegReg(kOpSub, rl_result.reg, reg1, temp);
     FreeTemp(temp);
   }
 
@@ -515,10 +521,10 @@
   rl_src2 = LoadValue(rl_src2, kCoreReg);
   RegLocation rl_dest = InlineTarget(info);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
   OpIT((is_min) ? kCondGt : kCondLt, "E");
-  OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
-  OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+  OpRegReg(kOpMov, rl_result.reg, rl_src2.reg);
+  OpRegReg(kOpMov, rl_result.reg, rl_src1.reg);
   GenBarrier();
   StoreValue(rl_dest, rl_result);
   return true;
@@ -526,24 +532,24 @@
 
 bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
   RegLocation rl_src_address = info->args[0];  // long address
-  rl_src_address.wide = 0;  // ignore high half in info->args[1]
+  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
   RegLocation rl_dest = InlineTarget(info);
   RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (size == kLong) {
     // Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
-    if (rl_address.reg.GetReg() != rl_result.reg.GetReg()) {
-      LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG);
-      LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG);
+    if (rl_address.reg.GetReg() != rl_result.reg.GetLowReg()) {
+      LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow());
+      LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh());
     } else {
-      LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG);
-      LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG);
+      LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh());
+      LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow());
     }
     StoreValueWide(rl_dest, rl_result);
   } else {
     DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
     // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
-    LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
+    LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG);
     StoreValue(rl_dest, rl_result);
   }
   return true;
@@ -551,24 +557,24 @@
 
 bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
   RegLocation rl_src_address = info->args[0];  // long address
-  rl_src_address.wide = 0;  // ignore high half in info->args[1]
+  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
   RegLocation rl_src_value = info->args[2];  // [size] value
   RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
   if (size == kLong) {
     // Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
     RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
-    StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), kWord);
-    StoreBaseDisp(rl_address.reg.GetReg(), 4, rl_value.reg.GetHighReg(), kWord);
+    StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), kWord);
+    StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), kWord);
   } else {
     DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
     // Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
     RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
-    StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
+    StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
   }
   return true;
 }
 
-void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) {
+void ArmMir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
   LOG(FATAL) << "Unexpected use of OpLea for Arm";
 }
 
@@ -581,7 +587,7 @@
   // Unused - RegLocation rl_src_unsafe = info->args[0];
   RegLocation rl_src_obj = info->args[1];  // Object - known non-null
   RegLocation rl_src_offset = info->args[2];  // long low
-  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
+  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
   RegLocation rl_src_expected = info->args[4];  // int, long or Object
   // If is_long, high half is in info->args[5]
   RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
@@ -602,25 +608,25 @@
   LockTemp(rARM_LR);
   bool load_early = true;
   if (is_long) {
-    bool expected_is_core_reg =
-        rl_src_expected.location == kLocPhysReg && !IsFpReg(rl_src_expected.reg.GetReg());
-    bool new_value_is_core_reg =
-        rl_src_new_value.location == kLocPhysReg && !IsFpReg(rl_src_new_value.reg.GetReg());
-    bool expected_is_good_reg = expected_is_core_reg && !IsTemp(rl_src_expected.reg.GetReg());
-    bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(rl_src_new_value.reg.GetReg());
+    int expected_reg = is_long ? rl_src_expected.reg.GetLowReg() : rl_src_expected.reg.GetReg();
+    int new_val_reg = is_long ? rl_src_new_value.reg.GetLowReg() : rl_src_new_value.reg.GetReg();
+    bool expected_is_core_reg = rl_src_expected.location == kLocPhysReg && !IsFpReg(expected_reg);
+    bool new_value_is_core_reg = rl_src_new_value.location == kLocPhysReg && !IsFpReg(new_val_reg);
+    bool expected_is_good_reg = expected_is_core_reg && !IsTemp(expected_reg);
+    bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(new_val_reg);
 
     if (!expected_is_good_reg && !new_value_is_good_reg) {
       // None of expected/new_value is non-temp reg, need to load both late
       load_early = false;
       // Make sure they are not in the temp regs and the load will not be skipped.
       if (expected_is_core_reg) {
-        FlushRegWide(rl_src_expected.reg.GetReg(), rl_src_expected.reg.GetHighReg());
+        FlushRegWide(rl_src_expected.reg);
         ClobberSReg(rl_src_expected.s_reg_low);
         ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
         rl_src_expected.location = kLocDalvikFrame;
       }
       if (new_value_is_core_reg) {
-        FlushRegWide(rl_src_new_value.reg.GetReg(), rl_src_new_value.reg.GetHighReg());
+        FlushRegWide(rl_src_new_value.reg);
         ClobberSReg(rl_src_new_value.s_reg_low);
         ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
         rl_src_new_value.location = kLocDalvikFrame;
@@ -641,13 +647,13 @@
 
   if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
     // Mark card for object assuming new value is stored.
-    MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg());
+    MarkGCCard(rl_new_value.reg, rl_object.reg);
   }
 
   RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
 
-  int r_ptr = rARM_LR;
-  OpRegRegReg(kOpAdd, r_ptr, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
+  RegStorage r_ptr = rs_rARM_LR;
+  OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
 
   // Free now unneeded rl_object and rl_offset to give more temps.
   ClobberSReg(rl_object.s_reg_low);
@@ -662,8 +668,8 @@
     rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
   } else {
     // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
-    int low_reg = AllocTemp();
-    int high_reg = AllocTemp();
+    int low_reg = AllocTemp().GetReg();
+    int high_reg = AllocTemp().GetReg();
     rl_new_value.reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
     rl_expected = rl_new_value;
   }
@@ -673,38 +679,38 @@
   // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
   // result = tmp != 0;
 
-  int r_tmp = AllocTemp();
+  RegStorage r_tmp = AllocTemp();
   LIR* target = NewLIR0(kPseudoTargetLabel);
 
   if (is_long) {
-    int r_tmp_high = AllocTemp();
+    RegStorage r_tmp_high = AllocTemp();
     if (!load_early) {
-      LoadValueDirectWide(rl_src_expected, rl_expected.reg.GetReg(), rl_expected.reg.GetHighReg());
+      LoadValueDirectWide(rl_src_expected, rl_expected.reg);
     }
-    NewLIR3(kThumb2Ldrexd, r_tmp, r_tmp_high, r_ptr);
-    OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg());
-    OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHighReg());
+    NewLIR3(kThumb2Ldrexd, r_tmp.GetReg(), r_tmp_high.GetReg(), r_ptr.GetReg());
+    OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetLow());
+    OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHigh());
     if (!load_early) {
-      LoadValueDirectWide(rl_src_new_value, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg());
+      LoadValueDirectWide(rl_src_new_value, rl_new_value.reg);
     }
     // Make sure we use ORR that sets the ccode
-    if (ARM_LOWREG(r_tmp) && ARM_LOWREG(r_tmp_high)) {
-      NewLIR2(kThumbOrr, r_tmp, r_tmp_high);
+    if (ARM_LOWREG(r_tmp.GetReg()) && ARM_LOWREG(r_tmp_high.GetReg())) {
+      NewLIR2(kThumbOrr, r_tmp.GetReg(), r_tmp_high.GetReg());
     } else {
-      NewLIR4(kThumb2OrrRRRs, r_tmp, r_tmp, r_tmp_high, 0);
+      NewLIR4(kThumb2OrrRRRs, r_tmp.GetReg(), r_tmp.GetReg(), r_tmp_high.GetReg(), 0);
     }
     FreeTemp(r_tmp_high);  // Now unneeded
 
     DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
     OpIT(kCondEq, "T");
-    NewLIR4(kThumb2Strexd /* eq */, r_tmp, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg(), r_ptr);
+    NewLIR4(kThumb2Strexd /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetLowReg(), rl_new_value.reg.GetHighReg(), r_ptr.GetReg());
 
   } else {
-    NewLIR3(kThumb2Ldrex, r_tmp, r_ptr, 0);
-    OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg());
+    NewLIR3(kThumb2Ldrex, r_tmp.GetReg(), r_ptr.GetReg(), 0);
+    OpRegReg(kOpSub, r_tmp, rl_expected.reg);
     DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
     OpIT(kCondEq, "T");
-    NewLIR4(kThumb2Strex /* eq */, r_tmp, rl_new_value.reg.GetReg(), r_ptr, 0);
+    NewLIR4(kThumb2Strex /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
   }
 
   // Still one conditional left from OpIT(kCondEq, "T") from either branch
@@ -712,16 +718,15 @@
   OpCondBranch(kCondEq, target);
 
   if (!load_early) {
-    FreeTemp(rl_expected.reg.GetReg());  // Now unneeded.
-    FreeTemp(rl_expected.reg.GetHighReg());  // Now unneeded.
+    FreeTemp(rl_expected.reg);  // Now unneeded.
   }
 
   // result := (tmp1 != 0) ? 0 : 1;
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), r_tmp, 1);
+  OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
   DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
   OpIT(kCondUlt, "");
-  LoadConstant(rl_result.reg.GetReg(), 0); /* cc */
+  LoadConstant(rl_result.reg, 0); /* cc */
   FreeTemp(r_tmp);  // Now unneeded.
 
   StoreValue(rl_dest, rl_result);
@@ -732,16 +737,16 @@
   return true;
 }
 
-LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target) {
-  return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
+LIR* ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+  return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target);
 }
 
-LIR* ArmMir2Lir::OpVldm(int rBase, int count) {
-  return NewLIR3(kThumb2Vldms, rBase, fr0, count);
+LIR* ArmMir2Lir::OpVldm(RegStorage r_base, int count) {
+  return NewLIR3(kThumb2Vldms, r_base.GetReg(), fr0, count);
 }
 
-LIR* ArmMir2Lir::OpVstm(int rBase, int count) {
-  return NewLIR3(kThumb2Vstms, rBase, fr0, count);
+LIR* ArmMir2Lir::OpVstm(RegStorage r_base, int count) {
+  return NewLIR3(kThumb2Vstms, r_base.GetReg(), fr0, count);
 }
 
 void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
@@ -750,13 +755,14 @@
   OpRegRegRegShift(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
                    EncodeShift(kArmLsl, second_bit - first_bit));
   if (first_bit != 0) {
-    OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
+    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
   }
 }
 
-void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) {
-  int t_reg = AllocTemp();
-  NewLIR4(kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
+void ArmMir2Lir::GenDivZeroCheck(RegStorage reg) {
+  DCHECK(reg.IsPair());   // TODO: support k64BitSolo.
+  RegStorage t_reg = AllocTemp();
+  NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), reg.GetLowReg(), reg.GetHighReg(), 0);
   FreeTemp(t_reg);
   GenCheck(kCondEq, kThrowDivZero);
 }
@@ -768,7 +774,7 @@
 }
 
 // Decrement register and branch on condition
-LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) {
+LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
   // Combine sub & test using sub setflags encoding here
   OpRegRegImm(kOpSub, reg, reg, 1);  // For value == 1, this should set flags.
   DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
@@ -808,17 +814,17 @@
 void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
   rl_src = LoadValueWide(rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  int z_reg = AllocTemp();
+  RegStorage z_reg = AllocTemp();
   LoadConstantNoClobber(z_reg, 0);
   // Check for destructive overlap
-  if (rl_result.reg.GetReg() == rl_src.reg.GetHighReg()) {
-    int t_reg = AllocTemp();
-    OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg());
-    OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, t_reg);
+  if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
+    RegStorage t_reg = AllocTemp();
+    OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
+    OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
     FreeTemp(t_reg);
   } else {
-    OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg());
-    OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, rl_src.reg.GetHighReg());
+    OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
+    OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, rl_src.reg.GetHigh());
   }
   FreeTemp(z_reg);
   StoreValueWide(rl_dest, rl_result);
@@ -854,19 +860,19 @@
     rl_src2 = LoadValueWide(rl_src2, kCoreReg);
 
     int reg_status = 0;
-    int res_lo = INVALID_REG;
-    int res_hi = INVALID_REG;
-    bool dest_promoted = rl_dest.location == kLocPhysReg && !rl_dest.reg.IsInvalid() &&
-        !IsTemp(rl_dest.reg.GetReg()) && !IsTemp(rl_dest.reg.GetHighReg());
-    bool src1_promoted = !IsTemp(rl_src1.reg.GetReg()) && !IsTemp(rl_src1.reg.GetHighReg());
-    bool src2_promoted = !IsTemp(rl_src2.reg.GetReg()) && !IsTemp(rl_src2.reg.GetHighReg());
+    RegStorage res_lo;
+    RegStorage res_hi;
+    bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
+        !IsTemp(rl_dest.reg.GetLowReg()) && !IsTemp(rl_dest.reg.GetHighReg());
+    bool src1_promoted = !IsTemp(rl_src1.reg.GetLowReg()) && !IsTemp(rl_src1.reg.GetHighReg());
+    bool src2_promoted = !IsTemp(rl_src2.reg.GetLowReg()) && !IsTemp(rl_src2.reg.GetHighReg());
     // Check if rl_dest is *not* either operand and we have enough temp registers.
     if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
         (dest_promoted || src1_promoted || src2_promoted)) {
       // In this case, we do not need to manually allocate temp registers for result.
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      res_lo = rl_result.reg.GetReg();
-      res_hi = rl_result.reg.GetHighReg();
+      res_lo = rl_result.reg.GetLow();
+      res_hi = rl_result.reg.GetHigh();
     } else {
       res_lo = AllocTemp();
       if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
@@ -883,34 +889,37 @@
     // Temporarily add LR to the temp pool, and assign it to tmp1
     MarkTemp(rARM_LR);
     FreeTemp(rARM_LR);
-    int tmp1 = rARM_LR;
+    RegStorage tmp1 = rs_rARM_LR;
     LockTemp(rARM_LR);
 
-    if (rl_src1.reg.GetReg() == rl_src2.reg.GetReg()) {
-      DCHECK_NE(res_hi, INVALID_REG);
-      DCHECK_NE(res_lo, INVALID_REG);
-      NewLIR3(kThumb2MulRRR, tmp1, rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
-      NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.reg.GetReg(), rl_src1.reg.GetReg());
-      OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+    if (rl_src1.reg == rl_src2.reg) {
+      DCHECK(res_hi.Valid());
+      DCHECK(res_lo.Valid());
+      NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+      NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
+              rl_src1.reg.GetLowReg());
+      OpRegRegRegShift(kOpAdd, res_hi.GetReg(), res_hi.GetReg(), tmp1.GetReg(),
+                       EncodeShift(kArmLsl, 1));
     } else {
-      NewLIR3(kThumb2MulRRR, tmp1, rl_src2.reg.GetReg(), rl_src1.reg.GetHighReg());
+      NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
       if (reg_status == 2) {
-        DCHECK_EQ(res_hi, INVALID_REG);
-        DCHECK_NE(rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+        DCHECK(!res_hi.Valid());
+        DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
         DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
         FreeTemp(rl_src1.reg.GetHighReg());
         res_hi = AllocTemp();
       }
-      DCHECK_NE(res_hi, INVALID_REG);
-      DCHECK_NE(res_lo, INVALID_REG);
-      NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
-      NewLIR4(kThumb2Mla, tmp1, rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg(), tmp1);
-      NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
+      DCHECK(res_hi.Valid());
+      DCHECK(res_lo.Valid());
+      NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
+              rl_src1.reg.GetLowReg());
+      NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
+              tmp1.GetReg());
+      NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
       if (reg_status == 2) {
         // Clobber rl_src1 since it was corrupted.
-        FreeTemp(rl_src1.reg.GetReg());
-        Clobber(rl_src1.reg.GetReg());
-        Clobber(rl_src1.reg.GetHighReg());
+        FreeTemp(rl_src1.reg);
+        Clobber(rl_src1.reg);
       }
     }
 
@@ -923,8 +932,7 @@
       // We had manually allocated registers for rl_result.
       // Now construct a RegLocation.
       rl_result = GetReturnWide(false);  // Just using as a template.
-      rl_result.reg.SetReg(res_lo);
-      rl_result.reg.SetHighReg(res_hi);
+      rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
     }
 
     StoreValueWide(rl_dest, rl_result);
@@ -982,26 +990,26 @@
   }
 
   /* null object? */
-  GenNullCheck(rl_array.reg.GetReg(), opt_flags);
+  GenNullCheck(rl_array.reg, opt_flags);
 
   bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
-  int reg_len = INVALID_REG;
+  RegStorage reg_len;
   if (needs_range_check) {
     reg_len = AllocTemp();
     /* Get len */
-    LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+    LoadWordDisp(rl_array.reg, len_offset, reg_len);
     MarkPossibleNullPointerException(opt_flags);
   } else {
-    ForceImplicitNullCheck(rl_array.reg.GetReg(), opt_flags);
+    ForceImplicitNullCheck(rl_array.reg, opt_flags);
   }
   if (rl_dest.wide || rl_dest.fp || constant_index) {
-    int reg_ptr;
+    RegStorage reg_ptr;
     if (constant_index) {
-      reg_ptr = rl_array.reg.GetReg();  // NOTE: must not alter reg_ptr in constant case.
+      reg_ptr = rl_array.reg;  // NOTE: must not alter reg_ptr in constant case.
     } else {
       // No special indexed operation, lea + load w/ displacement
       reg_ptr = AllocTemp();
-      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(),
+      OpRegRegRegShift(kOpAdd, reg_ptr.GetReg(), rl_array.reg.GetReg(), rl_index.reg.GetReg(),
                        EncodeShift(kArmLsl, scale));
       FreeTemp(rl_index.reg.GetReg());
     }
@@ -1011,20 +1019,19 @@
       if (constant_index) {
         GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
       } else {
-        GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds);
+        GenRegRegCheck(kCondLs, reg_len, rl_index.reg, kThrowArrayBounds);
       }
       FreeTemp(reg_len);
     }
     if (rl_dest.wide) {
-      LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
-        INVALID_SREG);
+      LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg, INVALID_SREG);
       MarkPossibleNullPointerException(opt_flags);
       if (!constant_index) {
         FreeTemp(reg_ptr);
       }
       StoreValueWide(rl_dest, rl_result);
     } else {
-      LoadBaseDisp(reg_ptr, data_offset, rl_result.reg.GetReg(), size, INVALID_SREG);
+      LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG);
       MarkPossibleNullPointerException(opt_flags);
       if (!constant_index) {
         FreeTemp(reg_ptr);
@@ -1033,16 +1040,16 @@
     }
   } else {
     // Offset base, then use indexed load
-    int reg_ptr = AllocTemp();
-    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
+    RegStorage reg_ptr = AllocTemp();
+    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
     FreeTemp(rl_array.reg.GetReg());
     rl_result = EvalLoc(rl_dest, reg_class, true);
 
     if (needs_range_check) {
-      GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+      GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
       FreeTemp(reg_len);
     }
-    LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
+    LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
     MarkPossibleNullPointerException(opt_flags);
     FreeTemp(reg_ptr);
     StoreValue(rl_dest, rl_result);
@@ -1076,31 +1083,31 @@
     rl_index = LoadValue(rl_index, kCoreReg);
   }
 
-  int reg_ptr;
+  RegStorage reg_ptr;
   bool allocated_reg_ptr_temp = false;
   if (constant_index) {
-    reg_ptr = rl_array.reg.GetReg();
+    reg_ptr = rl_array.reg;
   } else if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
     Clobber(rl_array.reg.GetReg());
-    reg_ptr = rl_array.reg.GetReg();
+    reg_ptr = rl_array.reg;
   } else {
     allocated_reg_ptr_temp = true;
     reg_ptr = AllocTemp();
   }
 
   /* null object? */
-  GenNullCheck(rl_array.reg.GetReg(), opt_flags);
+  GenNullCheck(rl_array.reg, opt_flags);
 
   bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
-  int reg_len = INVALID_REG;
+  RegStorage reg_len;
   if (needs_range_check) {
     reg_len = AllocTemp();
     // NOTE: max live temps(4) here.
     /* Get len */
-    LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+    LoadWordDisp(rl_array.reg, len_offset, reg_len);
     MarkPossibleNullPointerException(opt_flags);
   } else {
-    ForceImplicitNullCheck(rl_array.reg.GetReg(), opt_flags);
+    ForceImplicitNullCheck(rl_array.reg, opt_flags);
   }
   /* at this point, reg_ptr points to array, 2 live temps */
   if (rl_src.wide || rl_src.fp || constant_index) {
@@ -1110,41 +1117,40 @@
       rl_src = LoadValue(rl_src, reg_class);
     }
     if (!constant_index) {
-      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(),
+      OpRegRegRegShift(kOpAdd, reg_ptr.GetReg(), rl_array.reg.GetReg(), rl_index.reg.GetReg(),
                        EncodeShift(kArmLsl, scale));
     }
     if (needs_range_check) {
       if (constant_index) {
         GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
       } else {
-        GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds);
+        GenRegRegCheck(kCondLs, reg_len, rl_index.reg, kThrowArrayBounds);
       }
       FreeTemp(reg_len);
     }
 
     if (rl_src.wide) {
-      StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+      StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg);
     } else {
-      StoreBaseDisp(reg_ptr, data_offset, rl_src.reg.GetReg(), size);
+      StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
     }
     MarkPossibleNullPointerException(opt_flags);
   } else {
     /* reg_ptr -> array data */
-    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
+    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
     rl_src = LoadValue(rl_src, reg_class);
     if (needs_range_check) {
-      GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+      GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
       FreeTemp(reg_len);
     }
-    StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
-                     scale, size);
+    StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
     MarkPossibleNullPointerException(opt_flags);
   }
   if (allocated_reg_ptr_temp) {
     FreeTemp(reg_ptr);
   }
   if (card_mark) {
-    MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
+    MarkGCCard(rl_src.reg, rl_array.reg);
   }
 }
 
@@ -1167,53 +1173,53 @@
     case Instruction::SHL_LONG:
     case Instruction::SHL_LONG_2ADDR:
       if (shift_amount == 1) {
-        OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg());
-        OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), rl_src.reg.GetHighReg());
+        OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), rl_src.reg.GetLow());
+        OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), rl_src.reg.GetHigh());
       } else if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
-        LoadConstant(rl_result.reg.GetReg(), 0);
+        OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg);
+        LoadConstant(rl_result.reg.GetLow(), 0);
       } else if (shift_amount > 31) {
-        OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), shift_amount - 32);
-        LoadConstant(rl_result.reg.GetReg(), 0);
+        OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetLow(), shift_amount - 32);
+        LoadConstant(rl_result.reg.GetLow(), 0);
       } else {
-        OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
-        OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(),
+        OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
+        OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetLowReg(),
                          EncodeShift(kArmLsr, 32 - shift_amount));
-        OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), shift_amount);
+        OpRegRegImm(kOpLsl, rl_result.reg.GetLow(), rl_src.reg.GetLow(), shift_amount);
       }
       break;
     case Instruction::SHR_LONG:
     case Instruction::SHR_LONG_2ADDR:
       if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
-        OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31);
+        OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
+        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
       } else if (shift_amount > 31) {
-        OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32);
-        OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31);
+        OpRegRegImm(kOpAsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
+        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
       } else {
-        int t_reg = AllocTemp();
-        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount);
-        OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(),
+        RegStorage t_reg = AllocTemp();
+        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
+        OpRegRegRegShift(kOpOr, rl_result.reg.GetLowReg(), t_reg.GetReg(), rl_src.reg.GetHighReg(),
                          EncodeShift(kArmLsl, 32 - shift_amount));
         FreeTemp(t_reg);
-        OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
+        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
       }
       break;
     case Instruction::USHR_LONG:
     case Instruction::USHR_LONG_2ADDR:
       if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
-        LoadConstant(rl_result.reg.GetHighReg(), 0);
+        OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
+        LoadConstant(rl_result.reg.GetHigh(), 0);
       } else if (shift_amount > 31) {
-        OpRegRegImm(kOpLsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32);
-        LoadConstant(rl_result.reg.GetHighReg(), 0);
+        OpRegRegImm(kOpLsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
+        LoadConstant(rl_result.reg.GetHigh(), 0);
       } else {
-        int t_reg = AllocTemp();
-        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount);
-        OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(),
+        RegStorage t_reg = AllocTemp();
+        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
+        OpRegRegRegShift(kOpOr, rl_result.reg.GetLowReg(), t_reg.GetReg(), rl_src.reg.GetHighReg(),
                          EncodeShift(kArmLsl, 32 - shift_amount));
         FreeTemp(t_reg);
-        OpRegRegImm(kOpLsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
+        OpRegRegImm(kOpLsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
       }
       break;
     default:
@@ -1268,35 +1274,35 @@
   switch (opcode) {
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
-      NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo);
+      NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
       NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
       break;
     case Instruction::OR_LONG:
     case Instruction::OR_LONG_2ADDR:
-      if ((val_lo != 0) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) {
-        OpRegRegImm(kOpOr, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
+      if ((val_lo != 0) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
+        OpRegRegImm(kOpOr, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
       }
       if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
-        OpRegRegImm(kOpOr, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
+        OpRegRegImm(kOpOr, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
       }
       break;
     case Instruction::XOR_LONG:
     case Instruction::XOR_LONG_2ADDR:
-      OpRegRegImm(kOpXor, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
-      OpRegRegImm(kOpXor, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
+      OpRegRegImm(kOpXor, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
+      OpRegRegImm(kOpXor, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
       break;
     case Instruction::AND_LONG:
     case Instruction::AND_LONG_2ADDR:
-      if ((val_lo != 0xffffffff) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) {
-        OpRegRegImm(kOpAnd, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
+      if ((val_lo != 0xffffffff) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
+        OpRegRegImm(kOpAnd, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
       }
       if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
-        OpRegRegImm(kOpAnd, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
+        OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
       }
       break;
     case Instruction::SUB_LONG_2ADDR:
     case Instruction::SUB_LONG:
-      NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo);
+      NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
       NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
       break;
     default:
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 7f8656a..5bab0e3 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -53,43 +53,43 @@
 }
 
 // Return a target-dependent special register.
-int ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
-  int res = INVALID_REG;
+RegStorage ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
+  int res_reg = RegStorage::kInvalidRegVal;
   switch (reg) {
-    case kSelf: res = rARM_SELF; break;
-    case kSuspend: res =  rARM_SUSPEND; break;
-    case kLr: res =  rARM_LR; break;
-    case kPc: res =  rARM_PC; break;
-    case kSp: res =  rARM_SP; break;
-    case kArg0: res = rARM_ARG0; break;
-    case kArg1: res = rARM_ARG1; break;
-    case kArg2: res = rARM_ARG2; break;
-    case kArg3: res = rARM_ARG3; break;
-    case kFArg0: res = rARM_FARG0; break;
-    case kFArg1: res = rARM_FARG1; break;
-    case kFArg2: res = rARM_FARG2; break;
-    case kFArg3: res = rARM_FARG3; break;
-    case kRet0: res = rARM_RET0; break;
-    case kRet1: res = rARM_RET1; break;
-    case kInvokeTgt: res = rARM_INVOKE_TGT; break;
-    case kHiddenArg: res = r12; break;
-    case kHiddenFpArg: res = INVALID_REG; break;
-    case kCount: res = rARM_COUNT; break;
+    case kSelf: res_reg = rARM_SELF; break;
+    case kSuspend: res_reg =  rARM_SUSPEND; break;
+    case kLr: res_reg =  rARM_LR; break;
+    case kPc: res_reg =  rARM_PC; break;
+    case kSp: res_reg =  rARM_SP; break;
+    case kArg0: res_reg = rARM_ARG0; break;
+    case kArg1: res_reg = rARM_ARG1; break;
+    case kArg2: res_reg = rARM_ARG2; break;
+    case kArg3: res_reg = rARM_ARG3; break;
+    case kFArg0: res_reg = rARM_FARG0; break;
+    case kFArg1: res_reg = rARM_FARG1; break;
+    case kFArg2: res_reg = rARM_FARG2; break;
+    case kFArg3: res_reg = rARM_FARG3; break;
+    case kRet0: res_reg = rARM_RET0; break;
+    case kRet1: res_reg = rARM_RET1; break;
+    case kInvokeTgt: res_reg = rARM_INVOKE_TGT; break;
+    case kHiddenArg: res_reg = r12; break;
+    case kHiddenFpArg: res_reg = RegStorage::kInvalidRegVal; break;
+    case kCount: res_reg = rARM_COUNT; break;
   }
-  return res;
+  return RegStorage::Solo32(res_reg);
 }
 
-int ArmMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
+RegStorage ArmMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
   // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
   switch (arg_num) {
     case 0:
-      return rARM_ARG1;
+      return rs_rARM_ARG1;
     case 1:
-      return rARM_ARG2;
+      return rs_rARM_ARG2;
     case 2:
-      return rARM_ARG3;
+      return rs_rARM_ARG3;
     default:
-      return INVALID_REG;
+      return RegStorage::InvalidReg();
   }
 }
 
@@ -528,20 +528,16 @@
 
 // Alloc a pair of core registers, or a double.
 RegStorage ArmMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
-  int high_reg;
-  int low_reg;
-
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
-    low_reg = AllocTempDouble();
-    high_reg = low_reg + 1;
+    return AllocTempDouble();
   } else {
-    low_reg = AllocTemp();
-    high_reg = AllocTemp();
+    RegStorage low_reg = AllocTemp();
+    RegStorage high_reg = AllocTemp();
+    return RegStorage::MakeRegPair(low_reg, high_reg);
   }
-  return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
 }
 
-int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
+RegStorage ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
     return AllocTempFloat();
   return AllocTemp();
@@ -583,15 +579,18 @@
   reg_pool_->next_core_reg = r2;
 }
 
-void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep,
-                     RegLocation rl_free) {
-  if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
-    (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
-    // No overlap, free both
-    FreeTemp(rl_free.reg.GetReg());
-    FreeTemp(rl_free.reg.GetHighReg());
+void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
+  DCHECK(rl_keep.wide);
+  DCHECK(rl_free.wide);
+  if ((rl_free.reg.GetLowReg() != rl_keep.reg.GetLowReg()) &&
+      (rl_free.reg.GetLowReg() != rl_keep.reg.GetHighReg()) &&
+      (rl_free.reg.GetHighReg() != rl_keep.reg.GetLowReg()) &&
+      (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
+    // No overlap, free.
+    FreeTemp(rl_free.reg);
   }
 }
+
 /*
  * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
  * instructions might call out to C/assembly helper functions.  Until
@@ -624,9 +623,9 @@
   fp_spill_mask_ = ((1 << num_fp_spills_) - 1) << ARM_FP_CALLEE_SAVE_BASE;
 }
 
-void ArmMir2Lir::FlushRegWide(int reg1, int reg2) {
-  RegisterInfo* info1 = GetRegInfo(reg1);
-  RegisterInfo* info2 = GetRegInfo(reg2);
+void ArmMir2Lir::FlushRegWide(RegStorage reg) {
+  RegisterInfo* info1 = GetRegInfo(reg.GetLowReg());
+  RegisterInfo* info2 = GetRegInfo(reg.GetHighReg());
   DCHECK(info1 && info2 && info1->pair && info2->pair &&
        (info1->partner == info2->reg) &&
        (info2->partner == info1->reg));
@@ -642,16 +641,18 @@
       mir_graph_->SRegToVReg(info1->s_reg))
       info1 = info2;
     int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
-    StoreBaseDispWide(rARM_SP, VRegOffset(v_reg), info1->reg, info1->partner);
+    StoreBaseDispWide(rs_rARM_SP, VRegOffset(v_reg),
+                      RegStorage(RegStorage::k64BitPair, info1->reg, info1->partner));
   }
 }
 
-void ArmMir2Lir::FlushReg(int reg) {
-  RegisterInfo* info = GetRegInfo(reg);
+void ArmMir2Lir::FlushReg(RegStorage reg) {
+  DCHECK(!reg.IsPair());
+  RegisterInfo* info = GetRegInfo(reg.GetReg());
   if (info->live && info->dirty) {
     info->dirty = false;
     int v_reg = mir_graph_->SRegToVReg(info->s_reg);
-    StoreBaseDisp(rARM_SP, VRegOffset(v_reg), reg, kWord);
+    StoreBaseDisp(rs_rARM_SP, VRegOffset(v_reg), reg, kWord);
   }
 }
 
@@ -660,6 +661,10 @@
   return ARM_FPREG(reg);
 }
 
+bool ArmMir2Lir::IsFpReg(RegStorage reg) {
+  return IsFpReg(reg.IsPair() ? reg.GetLowReg() : reg.GetReg());
+}
+
 /* Clobber all regs that might be used by an external C call */
 void ArmMir2Lir::ClobberCallerSave() {
   Clobber(r0);
@@ -694,7 +699,7 @@
   Clobber(r3);
   MarkInUse(r2);
   MarkInUse(r3);
-  MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
+  MarkPair(res.reg.GetLowReg(), res.reg.GetHighReg());
   return res;
 }
 
@@ -722,14 +727,14 @@
   FreeTemp(r3);
 }
 
-int ArmMir2Lir::LoadHelper(ThreadOffset offset) {
-  LoadWordDisp(rARM_SELF, offset.Int32Value(), rARM_LR);
-  return rARM_LR;
+RegStorage ArmMir2Lir::LoadHelper(ThreadOffset offset) {
+  LoadWordDisp(rs_rARM_SELF, offset.Int32Value(), rs_rARM_LR);
+  return rs_rARM_LR;
 }
 
 LIR* ArmMir2Lir::CheckSuspendUsingLoad() {
-  int tmp = r0;
-  LoadWordDisp(rARM_SELF, Thread::ThreadSuspendTriggerOffset().Int32Value(), tmp);
+  RegStorage tmp = rs_r0;
+  LoadWordDisp(rs_rARM_SELF, Thread::ThreadSuspendTriggerOffset().Int32Value(), tmp);
   LIR* load2 = LoadWordDisp(tmp, 0, tmp);
   return load2;
 }
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 1a7f2fc..1ec0a2c 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -169,37 +169,37 @@
  * 1) r_dest is freshly returned from AllocTemp or
  * 2) The codegen is under fixed register usage
  */
-LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) {
+LIR* ArmMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
   LIR* res;
   int mod_imm;
 
-  if (ARM_FPREG(r_dest)) {
-    return LoadFPConstantValue(r_dest, value);
+  if (ARM_FPREG(r_dest.GetReg())) {
+    return LoadFPConstantValue(r_dest.GetReg(), value);
   }
 
   /* See if the value can be constructed cheaply */
-  if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) {
-    return NewLIR2(kThumbMovImm, r_dest, value);
+  if (ARM_LOWREG(r_dest.GetReg()) && (value >= 0) && (value <= 255)) {
+    return NewLIR2(kThumbMovImm, r_dest.GetReg(), value);
   }
   /* Check Modified immediate special cases */
   mod_imm = ModifiedImmediate(value);
   if (mod_imm >= 0) {
-    res = NewLIR2(kThumb2MovI8M, r_dest, mod_imm);
+    res = NewLIR2(kThumb2MovI8M, r_dest.GetReg(), mod_imm);
     return res;
   }
   mod_imm = ModifiedImmediate(~value);
   if (mod_imm >= 0) {
-    res = NewLIR2(kThumb2MvnI8M, r_dest, mod_imm);
+    res = NewLIR2(kThumb2MvnI8M, r_dest.GetReg(), mod_imm);
     return res;
   }
   /* 16-bit immediate? */
   if ((value & 0xffff) == value) {
-    res = NewLIR2(kThumb2MovImm16, r_dest, value);
+    res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), value);
     return res;
   }
   /* Do a low/high pair */
-  res = NewLIR2(kThumb2MovImm16, r_dest, Low16Bits(value));
-  NewLIR2(kThumb2MovImm16H, r_dest, High16Bits(value));
+  res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), Low16Bits(value));
+  NewLIR2(kThumb2MovImm16H, r_dest.GetReg(), High16Bits(value));
   return res;
 }
 
@@ -219,7 +219,7 @@
   return branch;
 }
 
-LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) {
+LIR* ArmMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
   ArmOpcode opcode = kThumbBkpt;
   switch (op) {
     case kOpBlx:
@@ -231,7 +231,7 @@
     default:
       LOG(FATAL) << "Bad opcode " << op;
   }
-  return NewLIR1(opcode, r_dest_src);
+  return NewLIR1(opcode, r_dest_src.GetReg());
 }
 
 LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2,
@@ -366,21 +366,21 @@
   }
 }
 
-LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) {
-  return OpRegRegShift(op, r_dest_src1, r_src2, 0);
+LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
+  return OpRegRegShift(op, r_dest_src1.GetReg(), r_src2.GetReg(), 0);
 }
 
-LIR* ArmMir2Lir::OpMovRegMem(int r_dest, int r_base, int offset, MoveType move_type) {
+LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
   UNIMPLEMENTED(FATAL);
   return nullptr;
 }
 
-LIR* ArmMir2Lir::OpMovMemReg(int r_base, int offset, int r_src, MoveType move_type) {
+LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
   UNIMPLEMENTED(FATAL);
   return nullptr;
 }
 
-LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) {
+LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
   LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
   return NULL;
 }
@@ -455,44 +455,44 @@
   }
 }
 
-LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) {
-  return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
+LIR* ArmMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
+  return OpRegRegRegShift(op, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), 0);
 }
 
-LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
+LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
   LIR* res;
   bool neg = (value < 0);
   int32_t abs_value = (neg) ? -value : value;
   ArmOpcode opcode = kThumbBkpt;
   ArmOpcode alt_opcode = kThumbBkpt;
-  bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
+  bool all_low_regs = (ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src1.GetReg()));
   int32_t mod_imm = ModifiedImmediate(value);
 
   switch (op) {
     case kOpLsl:
       if (all_low_regs)
-        return NewLIR3(kThumbLslRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumbLslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
       else
-        return NewLIR3(kThumb2LslRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumb2LslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
     case kOpLsr:
       if (all_low_regs)
-        return NewLIR3(kThumbLsrRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumbLsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
       else
-        return NewLIR3(kThumb2LsrRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumb2LsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
     case kOpAsr:
       if (all_low_regs)
-        return NewLIR3(kThumbAsrRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumbAsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
       else
-        return NewLIR3(kThumb2AsrRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumb2AsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
     case kOpRor:
-      return NewLIR3(kThumb2RorRRI5, r_dest, r_src1, value);
+      return NewLIR3(kThumb2RorRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
     case kOpAdd:
-      if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) &&
+      if (ARM_LOWREG(r_dest.GetReg()) && (r_src1 == rs_r13sp) &&
         (value <= 1020) && ((value & 0x3) == 0)) {
-        return NewLIR3(kThumbAddSpRel, r_dest, r_src1, value >> 2);
-      } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) &&
+        return NewLIR3(kThumbAddSpRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
+      } else if (ARM_LOWREG(r_dest.GetReg()) && (r_src1 == rs_r15pc) &&
           (value <= 1020) && ((value & 0x3) == 0)) {
-        return NewLIR3(kThumbAddPcRel, r_dest, r_src1, value >> 2);
+        return NewLIR3(kThumbAddPcRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
       }
       // Note: intentional fallthrough
     case kOpSub:
@@ -501,7 +501,7 @@
           opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
         else
           opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
-        return NewLIR3(opcode, r_dest, r_src1, abs_value);
+        return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
       }
       if (mod_imm < 0) {
         mod_imm = ModifiedImmediate(-value);
@@ -516,7 +516,7 @@
           opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
         else
           opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
-        return NewLIR3(opcode, r_dest, r_src1, abs_value);
+        return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
       }
       if (op == kOpSub) {
         opcode = kThumb2SubRRI8M;
@@ -546,7 +546,7 @@
       if (mod_imm < 0) {
         mod_imm = ModifiedImmediate(~value);
         if (mod_imm >= 0) {
-          return NewLIR3(kThumb2BicRRI8M, r_dest, r_src1, mod_imm);
+          return NewLIR3(kThumb2BicRRI8M, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
         }
       }
       opcode = kThumb2AndRRI8M;
@@ -564,13 +564,13 @@
     case kOpCmp: {
       LIR* res;
       if (mod_imm >= 0) {
-        res = NewLIR2(kThumb2CmpRI8M, r_src1, mod_imm);
+        res = NewLIR2(kThumb2CmpRI8M, r_src1.GetReg(), mod_imm);
       } else {
         mod_imm = ModifiedImmediate(-value);
         if (mod_imm >= 0) {
-          res = NewLIR2(kThumb2CmnRI8M, r_src1, mod_imm);
+          res = NewLIR2(kThumb2CmnRI8M, r_src1.GetReg(), mod_imm);
         } else {
-          int r_tmp = AllocTemp();
+          RegStorage r_tmp = AllocTemp();
           res = LoadConstant(r_tmp, value);
           OpRegReg(kOpCmp, r_src1, r_tmp);
           FreeTemp(r_tmp);
@@ -583,28 +583,28 @@
   }
 
   if (mod_imm >= 0) {
-    return NewLIR3(opcode, r_dest, r_src1, mod_imm);
+    return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
   } else {
-    int r_scratch = AllocTemp();
+    RegStorage r_scratch = AllocTemp();
     LoadConstant(r_scratch, value);
     if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
-      res = NewLIR4(alt_opcode, r_dest, r_src1, r_scratch, 0);
+      res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
     else
-      res = NewLIR3(alt_opcode, r_dest, r_src1, r_scratch);
+      res = NewLIR3(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
     FreeTemp(r_scratch);
     return res;
   }
 }
 
 /* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
-LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
+LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
   bool neg = (value < 0);
   int32_t abs_value = (neg) ? -value : value;
-  bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
+  bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1.GetReg()));
   ArmOpcode opcode = kThumbBkpt;
   switch (op) {
     case kOpAdd:
-      if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+      if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
         DCHECK_EQ((value & 0x3), 0);
         return NewLIR1(kThumbAddSpI7, value >> 2);
       } else if (short_form) {
@@ -612,7 +612,7 @@
       }
       break;
     case kOpSub:
-      if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+      if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
         DCHECK_EQ((value & 0x3), 0);
         return NewLIR1(kThumbSubSpI7, value >> 2);
       } else if (short_form) {
@@ -632,18 +632,18 @@
       break;
   }
   if (short_form) {
-    return NewLIR2(opcode, r_dest_src1, abs_value);
+    return NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
   } else {
     return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
   }
 }
 
-LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
+LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
   LIR* res = NULL;
   int32_t val_lo = Low32Bits(value);
   int32_t val_hi = High32Bits(value);
-  int target_reg = S2d(r_dest_lo, r_dest_hi);
-  if (ARM_FPREG(r_dest_lo)) {
+  int target_reg = S2d(r_dest.GetLowReg(), r_dest.GetHighReg());
+  if (ARM_FPREG(r_dest.GetLowReg())) {
     if ((val_lo == 0) && (val_hi == 0)) {
       // TODO: we need better info about the target CPU.  a vector exclusive or
       //       would probably be better here if we could rely on its existance.
@@ -659,8 +659,8 @@
     }
   } else {
     if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
-      res = LoadConstantNoClobber(r_dest_lo, val_lo);
-      LoadConstantNoClobber(r_dest_hi, val_hi);
+      res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
+      LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
     }
   }
   if (res == NULL) {
@@ -669,12 +669,13 @@
     if (data_target == NULL) {
       data_target = AddWideData(&literal_list_, val_lo, val_hi);
     }
-    if (ARM_FPREG(r_dest_lo)) {
+    if (ARM_FPREG(r_dest.GetLowReg())) {
       res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
                    target_reg, r15pc, 0, 0, 0, data_target);
     } else {
+      DCHECK(r_dest.IsPair());
       res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
-                   r_dest_lo, r_dest_hi, r15pc, 0, 0, data_target);
+                   r_dest.GetLowReg(), r_dest.GetHighReg(), r15pc, 0, 0, data_target);
     }
     SetMemRefType(res, true, kLiteral);
     AppendLIR(res);
@@ -686,23 +687,24 @@
   return ((amount & 0x1f) << 2) | code;
 }
 
-LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest,
+LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
                                  int scale, OpSize size) {
-  bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
+  bool all_low_regs = ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_index.GetReg()) &&
+      ARM_LOWREG(r_dest.GetReg());
   LIR* load;
   ArmOpcode opcode = kThumbBkpt;
   bool thumb_form = (all_low_regs && (scale == 0));
-  int reg_ptr;
+  RegStorage reg_ptr;
 
-  if (ARM_FPREG(r_dest)) {
-    if (ARM_SINGLEREG(r_dest)) {
+  if (ARM_FPREG(r_dest.GetReg())) {
+    if (ARM_SINGLEREG(r_dest.GetReg())) {
       DCHECK((size == kWord) || (size == kSingle));
       opcode = kThumb2Vldrs;
       size = kSingle;
     } else {
-      DCHECK(ARM_DOUBLEREG(r_dest));
+      DCHECK(ARM_DOUBLEREG(r_dest.GetReg()));
       DCHECK((size == kLong) || (size == kDouble));
-      DCHECK_EQ((r_dest & 0x1), 0);
+      DCHECK_EQ((r_dest.GetReg() & 0x1), 0);
       opcode = kThumb2Vldrd;
       size = kDouble;
     }
@@ -716,12 +718,12 @@
     case kSingle:
       reg_ptr = AllocTemp();
       if (scale) {
-        NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
+        NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
                 EncodeShift(kArmLsl, scale));
       } else {
-        OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
+        OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
       }
-      load = NewLIR3(opcode, r_dest, reg_ptr, 0);
+      load = NewLIR3(opcode, r_dest.GetReg(), reg_ptr.GetReg(), 0);
       FreeTemp(reg_ptr);
       return load;
     case kWord:
@@ -743,30 +745,31 @@
       LOG(FATAL) << "Bad size: " << size;
   }
   if (thumb_form)
-    load = NewLIR3(opcode, r_dest, rBase, r_index);
+    load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
   else
-    load = NewLIR4(opcode, r_dest, rBase, r_index, scale);
+    load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
 
   return load;
 }
 
-LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
+LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                   int scale, OpSize size) {
-  bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
+  bool all_low_regs = ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_index.GetReg()) &&
+      ARM_LOWREG(r_src.GetReg());
   LIR* store = NULL;
   ArmOpcode opcode = kThumbBkpt;
   bool thumb_form = (all_low_regs && (scale == 0));
-  int reg_ptr;
+  RegStorage reg_ptr;
 
-  if (ARM_FPREG(r_src)) {
-    if (ARM_SINGLEREG(r_src)) {
+  if (ARM_FPREG(r_src.GetReg())) {
+    if (ARM_SINGLEREG(r_src.GetReg())) {
       DCHECK((size == kWord) || (size == kSingle));
       opcode = kThumb2Vstrs;
       size = kSingle;
     } else {
-      DCHECK(ARM_DOUBLEREG(r_src));
+      DCHECK(ARM_DOUBLEREG(r_src.GetReg()));
       DCHECK((size == kLong) || (size == kDouble));
-      DCHECK_EQ((r_src & 0x1), 0);
+      DCHECK_EQ((r_src.GetReg() & 0x1), 0);
       opcode = kThumb2Vstrd;
       size = kDouble;
     }
@@ -780,12 +783,12 @@
     case kSingle:
       reg_ptr = AllocTemp();
       if (scale) {
-        NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
+        NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
                 EncodeShift(kArmLsl, scale));
       } else {
-        OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
+        OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
       }
-      store = NewLIR3(opcode, r_src, reg_ptr, 0);
+      store = NewLIR3(opcode, r_src.GetReg(), reg_ptr.GetReg(), 0);
       FreeTemp(reg_ptr);
       return store;
     case kWord:
@@ -803,9 +806,9 @@
       LOG(FATAL) << "Bad size: " << size;
   }
   if (thumb_form)
-    store = NewLIR3(opcode, r_src, rBase, r_index);
+    store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
   else
-    store = NewLIR4(opcode, r_src, rBase, r_index, scale);
+    store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
 
   return store;
 }
@@ -815,45 +818,44 @@
  * on base (which must have an associated s_reg and MIR).  If not
  * performing null check, incoming MIR can be null.
  */
-LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest,
-                                  int r_dest_hi, OpSize size, int s_reg) {
+LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
+                                  OpSize size, int s_reg) {
   LIR* load = NULL;
   ArmOpcode opcode = kThumbBkpt;
   bool short_form = false;
   bool thumb2Form = (displacement < 4092 && displacement >= 0);
-  bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_dest));
+  bool all_low = r_dest.Is32Bit() && ARM_LOWREG(r_base.GetReg() && ARM_LOWREG(r_dest.GetReg()));
   int encoded_disp = displacement;
-  bool is64bit = false;
   bool already_generated = false;
+  int dest_low_reg = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
   switch (size) {
     case kDouble:
     case kLong:
-      is64bit = true;
-      if (ARM_FPREG(r_dest)) {
-        if (ARM_SINGLEREG(r_dest)) {
-          DCHECK(ARM_FPREG(r_dest_hi));
-          r_dest = S2d(r_dest, r_dest_hi);
+      if (ARM_FPREG(dest_low_reg)) {
+        // Note: following change to avoid using pairs for doubles, replace conversion w/ DCHECK.
+        if (r_dest.IsPair()) {
+          DCHECK(ARM_FPREG(r_dest.GetHighReg()));
+          r_dest = RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg()));
         }
         opcode = kThumb2Vldrd;
         if (displacement <= 1020) {
           short_form = true;
           encoded_disp >>= 2;
         }
-        break;
       } else {
         if (displacement <= 1020) {
-          load = NewLIR4(kThumb2LdrdI8, r_dest, r_dest_hi, rBase, displacement >> 2);
+          load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_base.GetReg(),
+                         displacement >> 2);
         } else {
-          load = LoadBaseDispBody(rBase, displacement, r_dest,
-                                 -1, kWord, s_reg);
-          LoadBaseDispBody(rBase, displacement + 4, r_dest_hi,
-                           -1, kWord, INVALID_SREG);
+          load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), kWord, s_reg);
+          LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), kWord, INVALID_SREG);
         }
         already_generated = true;
       }
+      break;
     case kSingle:
     case kWord:
-      if (ARM_FPREG(r_dest)) {
+      if (ARM_FPREG(r_dest.GetReg())) {
         opcode = kThumb2Vldrs;
         if (displacement <= 1020) {
           short_form = true;
@@ -861,17 +863,17 @@
         }
         break;
       }
-      if (ARM_LOWREG(r_dest) && (rBase == r15pc) &&
+      if (ARM_LOWREG(r_dest.GetReg()) && (r_base.GetReg() == r15pc) &&
           (displacement <= 1020) && (displacement >= 0)) {
         short_form = true;
         encoded_disp >>= 2;
         opcode = kThumbLdrPcRel;
-      } else if (ARM_LOWREG(r_dest) && (rBase == r13sp) &&
+      } else if (ARM_LOWREG(r_dest.GetReg()) && (r_base.GetReg() == r13sp) &&
           (displacement <= 1020) && (displacement >= 0)) {
         short_form = true;
         encoded_disp >>= 2;
         opcode = kThumbLdrSpRel;
-      } else if (all_low_regs && displacement < 128 && displacement >= 0) {
+      } else if (all_low && displacement < 128 && displacement >= 0) {
         DCHECK_EQ((displacement & 0x3), 0);
         short_form = true;
         encoded_disp >>= 2;
@@ -882,7 +884,7 @@
       }
       break;
     case kUnsignedHalf:
-      if (all_low_regs && displacement < 64 && displacement >= 0) {
+      if (all_low && displacement < 64 && displacement >= 0) {
         DCHECK_EQ((displacement & 0x1), 0);
         short_form = true;
         encoded_disp >>= 1;
@@ -899,7 +901,7 @@
       }
       break;
     case kUnsignedByte:
-      if (all_low_regs && displacement < 32 && displacement >= 0) {
+      if (all_low && displacement < 32 && displacement >= 0) {
         short_form = true;
         opcode = kThumbLdrbRRI5;
       } else if (thumb2Form) {
@@ -919,65 +921,67 @@
 
   if (!already_generated) {
     if (short_form) {
-      load = NewLIR3(opcode, r_dest, rBase, encoded_disp);
+      load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), encoded_disp);
     } else {
-      int reg_offset = AllocTemp();
+      RegStorage reg_offset = AllocTemp();
       LoadConstant(reg_offset, encoded_disp);
-      if (ARM_FPREG(r_dest)) {
+      if (ARM_FPREG(dest_low_reg)) {
         // No index ops - must use a long sequence.  Turn the offset into a direct pointer.
-        OpRegReg(kOpAdd, reg_offset, rBase);
-        load = LoadBaseDispBody(reg_offset, 0, r_dest, r_dest_hi, size, s_reg);
+        OpRegReg(kOpAdd, reg_offset, r_base);
+        load = LoadBaseDispBody(reg_offset, 0, r_dest, size, s_reg);
       } else {
-        load = LoadBaseIndexed(rBase, reg_offset, r_dest, 0, size);
+        load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size);
       }
       FreeTemp(reg_offset);
     }
   }
 
   // TODO: in future may need to differentiate Dalvik accesses w/ spills
-  if (rBase == rARM_SP) {
-    AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit);
+  if (r_base == rs_rARM_SP) {
+    AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
   }
   return load;
 }
 
-LIR* ArmMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest,
-                              OpSize size, int s_reg) {
-  return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg);
+LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+                              int s_reg) {
+  DCHECK(!((size == kLong) || (size == kDouble)));
+  return LoadBaseDispBody(r_base, displacement, r_dest, size, s_reg);
 }
 
-LIR* ArmMir2Lir::LoadBaseDispWide(int rBase, int displacement, int r_dest_lo,
-                                  int r_dest_hi, int s_reg) {
-  return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
+LIR* ArmMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
+                                  int s_reg) {
+  return LoadBaseDispBody(r_base, displacement, r_dest, kLong, s_reg);
 }
 
 
-LIR* ArmMir2Lir::StoreBaseDispBody(int rBase, int displacement,
-                                   int r_src, int r_src_hi, OpSize size) {
+LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
+                                   OpSize size) {
   LIR* store = NULL;
   ArmOpcode opcode = kThumbBkpt;
   bool short_form = false;
   bool thumb2Form = (displacement < 4092 && displacement >= 0);
-  bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_src));
+  bool all_low = r_src.Is32Bit() && (ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_src.GetReg()));
   int encoded_disp = displacement;
-  bool is64bit = false;
   bool already_generated = false;
+  int src_low_reg = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
   switch (size) {
     case kLong:
     case kDouble:
-      is64bit = true;
-      if (!ARM_FPREG(r_src)) {
+      if (!ARM_FPREG(src_low_reg)) {
         if (displacement <= 1020) {
-          store = NewLIR4(kThumb2StrdI8, r_src, r_src_hi, rBase, displacement >> 2);
+          store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_base.GetReg(),
+                          displacement >> 2);
         } else {
-          store = StoreBaseDispBody(rBase, displacement, r_src, -1, kWord);
-          StoreBaseDispBody(rBase, displacement + 4, r_src_hi, -1, kWord);
+          store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), kWord);
+          StoreBaseDispBody(r_base, displacement + 4, r_src.GetHigh(), kWord);
         }
         already_generated = true;
       } else {
-        if (ARM_SINGLEREG(r_src)) {
-          DCHECK(ARM_FPREG(r_src_hi));
-          r_src = S2d(r_src, r_src_hi);
+        // Note: following change to avoid using pairs for doubles, replace conversion w/ DCHECK.
+        if (r_src.IsPair()) {
+          DCHECK(ARM_FPREG(r_src.GetHighReg()));
+          r_src = RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg()));
         }
         opcode = kThumb2Vstrd;
         if (displacement <= 1020) {
@@ -988,8 +992,8 @@
       break;
     case kSingle:
     case kWord:
-      if (ARM_FPREG(r_src)) {
-        DCHECK(ARM_SINGLEREG(r_src));
+      if (ARM_FPREG(r_src.GetReg())) {
+        DCHECK(ARM_SINGLEREG(r_src.GetReg()));
         opcode = kThumb2Vstrs;
         if (displacement <= 1020) {
           short_form = true;
@@ -997,12 +1001,12 @@
         }
         break;
       }
-      if (ARM_LOWREG(r_src) && (rBase == r13sp) &&
+      if (ARM_LOWREG(r_src.GetReg()) && (r_base == rs_r13sp) &&
           (displacement <= 1020) && (displacement >= 0)) {
         short_form = true;
         encoded_disp >>= 2;
         opcode = kThumbStrSpRel;
-      } else if (all_low_regs && displacement < 128 && displacement >= 0) {
+      } else if (all_low && displacement < 128 && displacement >= 0) {
         DCHECK_EQ((displacement & 0x3), 0);
         short_form = true;
         encoded_disp >>= 2;
@@ -1014,7 +1018,7 @@
       break;
     case kUnsignedHalf:
     case kSignedHalf:
-      if (all_low_regs && displacement < 64 && displacement >= 0) {
+      if (all_low && displacement < 64 && displacement >= 0) {
         DCHECK_EQ((displacement & 0x1), 0);
         short_form = true;
         encoded_disp >>= 1;
@@ -1026,7 +1030,7 @@
       break;
     case kUnsignedByte:
     case kSignedByte:
-      if (all_low_regs && displacement < 32 && displacement >= 0) {
+      if (all_low && displacement < 32 && displacement >= 0) {
         short_form = true;
         opcode = kThumbStrbRRI5;
       } else if (thumb2Form) {
@@ -1039,52 +1043,52 @@
   }
   if (!already_generated) {
     if (short_form) {
-      store = NewLIR3(opcode, r_src, rBase, encoded_disp);
+      store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), encoded_disp);
     } else {
-      int r_scratch = AllocTemp();
+      RegStorage r_scratch = AllocTemp();
       LoadConstant(r_scratch, encoded_disp);
-      if (ARM_FPREG(r_src)) {
+      if (ARM_FPREG(src_low_reg)) {
         // No index ops - must use a long sequence.  Turn the offset into a direct pointer.
-        OpRegReg(kOpAdd, r_scratch, rBase);
-        store = StoreBaseDispBody(r_scratch, 0, r_src, r_src_hi, size);
+        OpRegReg(kOpAdd, r_scratch, r_base);
+        store = StoreBaseDispBody(r_scratch, 0, r_src, size);
       } else {
-        store = StoreBaseIndexed(rBase, r_scratch, r_src, 0, size);
+        store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
       }
       FreeTemp(r_scratch);
     }
   }
 
   // TODO: In future, may need to differentiate Dalvik & spill accesses
-  if (rBase == rARM_SP) {
-    AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, is64bit);
+  if (r_base == rs_rARM_SP) {
+    AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
   }
   return store;
 }
 
-LIR* ArmMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src,
+LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
                                OpSize size) {
-  return StoreBaseDispBody(rBase, displacement, r_src, -1, size);
+  DCHECK(!((size == kLong) || (size == kDouble)));
+  return StoreBaseDispBody(r_base, displacement, r_src, size);
 }
 
-LIR* ArmMir2Lir::StoreBaseDispWide(int rBase, int displacement,
-                                   int r_src_lo, int r_src_hi) {
-  return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
+LIR* ArmMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
+  return StoreBaseDispBody(r_base, displacement, r_src, kLong);
 }
 
-LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) {
+LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
   int opcode;
-  DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
-  if (ARM_DOUBLEREG(r_dest)) {
+  DCHECK_EQ(ARM_DOUBLEREG(r_dest.GetReg()), ARM_DOUBLEREG(r_src.GetReg()));
+  if (ARM_DOUBLEREG(r_dest.GetReg())) {
     opcode = kThumb2Vmovd;
   } else {
-    if (ARM_SINGLEREG(r_dest)) {
-      opcode = ARM_SINGLEREG(r_src) ? kThumb2Vmovs : kThumb2Fmsr;
+    if (ARM_SINGLEREG(r_dest.GetReg())) {
+      opcode = ARM_SINGLEREG(r_src.GetReg()) ? kThumb2Vmovs : kThumb2Fmsr;
     } else {
-      DCHECK(ARM_SINGLEREG(r_src));
+      DCHECK(ARM_SINGLEREG(r_src.GetReg()));
       opcode = kThumb2Fmrs;
     }
   }
-  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
   if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
     res->flags.is_nop = true;
   }
@@ -1096,26 +1100,26 @@
   return NULL;
 }
 
-LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp) {
+LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
   LOG(FATAL) << "Unexpected use of OpMem for Arm";
   return NULL;
 }
 
-LIR* ArmMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
-                                      int displacement, int r_src, int r_src_hi, OpSize size,
-                                      int s_reg) {
+LIR* ArmMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
+                                      int displacement, RegStorage r_src, RegStorage r_src_hi,
+                                      OpSize size, int s_reg) {
   LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
   return NULL;
 }
 
-LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset) {
+LIR* ArmMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
   LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
   return NULL;
 }
 
-LIR* ArmMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
-                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
-                                     int s_reg) {
+LIR* ArmMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
+                                     int displacement, RegStorage r_dest, RegStorage r_dest_hi,
+                                     OpSize size, int s_reg) {
   LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
   return NULL;
 }