[MIPS] Refactoring code for quick compiler

Code from compiler/dex/quick/mips64 is merged with code
in mips folder.

Change-Id: I785983c21549141306484647da86a0bb4815daaa
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index ed72d67..936ff42 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -77,7 +77,7 @@
  *
  *  [!] escape.  To insert "!", use "!!"
  */
-/* NOTE: must be kept in sync with enum MipsOpcode from LIR.h */
+/* NOTE: must be kept in sync with enum MipsOpcode from mips_lir.h */
 /*
  * TUNING: We're currently punting on the branch delay slots.  All branch
  * instructions in this map are given a size of 8, which during assembly
@@ -85,6 +85,7 @@
  * an assembler pass to fill those slots when possible.
  */
 const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
+    // The following are common mips32r2, mips32r6 and mips64r6 instructions.
     ENCODING_MAP(kMips32BitData, 0x00000000,
                  kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP,
@@ -117,7 +118,7 @@
                  kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
                  kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
                  NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
-    ENCODING_MAP(kMipsBeqz, 0x10000000, /* same as beq above with t = $zero */
+    ENCODING_MAP(kMipsBeqz, 0x10000000,  // Same as beq above with t = $zero.
                  kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
                  NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
@@ -137,7 +138,7 @@
                  kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
                  NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
-    ENCODING_MAP(kMipsBnez, 0x14000000, /* same as bne below with t = $zero */
+    ENCODING_MAP(kMipsBnez, 0x14000000,  // Same as bne below with t = $zero.
                  kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
                  NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
@@ -145,14 +146,98 @@
                  kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
                  kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
                  NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
-    ENCODING_MAP(kMipsDiv, 0x0000001a,
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
-                 "div", "!0r,!1r", 4),
     ENCODING_MAP(kMipsExt, 0x7c000000,
                  kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
                  kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
                  "ext", "!0r,!1r,!2d,!3D", 4),
+    ENCODING_MAP(kMipsFaddd, 0x46200000,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "add.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMipsFadds, 0x46000000,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "add.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMipsFsubd, 0x46200001,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sub.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMipsFsubs, 0x46000001,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sub.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMipsFdivd, 0x46200003,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "div.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMipsFdivs, 0x46000003,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "div.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMipsFmuld, 0x46200002,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMipsFmuls, 0x46000002,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMipsFcvtsd, 0x46200020,
+                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.s.d", "!0s,!1S", 4),
+    ENCODING_MAP(kMipsFcvtsw, 0x46800020,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.s.w", "!0s,!1s", 4),
+    ENCODING_MAP(kMipsFcvtds, 0x46000021,
+                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.d.s", "!0S,!1s", 4),
+    ENCODING_MAP(kMipsFcvtdw, 0x46800021,
+                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.d.w", "!0S,!1s", 4),
+    ENCODING_MAP(kMipsFcvtwd, 0x46200024,
+                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.w.d", "!0s,!1S", 4),
+    ENCODING_MAP(kMipsFcvtws, 0x46000024,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.w.s", "!0s,!1s", 4),
+    ENCODING_MAP(kMipsFmovd, 0x46200006,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov.d", "!0S,!1S", 4),
+    ENCODING_MAP(kMipsFmovs, 0x46000006,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov.s", "!0s,!1s", 4),
+    ENCODING_MAP(kMipsFnegd, 0x46200007,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "neg.d", "!0S,!1S", 4),
+    ENCODING_MAP(kMipsFnegs, 0x46000007,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "neg.s", "!0s,!1s", 4),
+    ENCODING_MAP(kMipsFldc1, 0xd4000000,
+                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "ldc1", "!0S,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsFlwc1, 0xc4000000,
+                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lwc1", "!0s,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsFsdc1, 0xf4000000,
+                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sdc1", "!0S,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsFswc1, 0xe4000000,
+                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "swc1", "!0s,!1d(!2r)", 4),
     ENCODING_MAP(kMipsJal, 0x0c000000,
                  kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
@@ -197,31 +282,31 @@
                  kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
                  "lw", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsMfhi, 0x00000010,
-                 kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_HI,
-                 "mfhi", "!0r", 4),
-    ENCODING_MAP(kMipsMflo, 0x00000012,
-                 kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_LO,
-                 "mflo", "!0r", 4),
-    ENCODING_MAP(kMipsMove, 0x00000025, /* or using zero reg */
+    ENCODING_MAP(kMipsMove, 0x00000025,  // Or using zero reg.
                  kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "move", "!0r,!1r", 4),
-    ENCODING_MAP(kMipsMovz, 0x0000000a,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "movz", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsMul, 0x70000002,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mul", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsMfc1, 0x44000000,
+                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mfc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMipsMtc1, 0x44800000,
+                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+                 "mtc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMipsMfhc1, 0x44600000,
+                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mfhc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMipsMthc1, 0x44e00000,
+                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+                 "mthc1", "!0r,!1s", 4),
     ENCODING_MAP(kMipsNop, 0x00000000,
                  kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, NO_OPERAND,
                  "nop", ";", 4),
-    ENCODING_MAP(kMipsNor, 0x00000027, /* used for "not" too */
+    ENCODING_MAP(kMipsNor, 0x00000027,  // Used for "not" too.
                  kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
                  "nor", "!0r,!1r,!2r", 4),
@@ -289,7 +374,7 @@
                  kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
                  "srlv", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsSubu, 0x00000023, /* used for "neg" too */
+    ENCODING_MAP(kMipsSubu, 0x00000023,  // Used for "neg" too.
                  kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
                  "subu", "!0r,!1r,!2r", 4),
@@ -297,6 +382,10 @@
                  kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
                  "sw", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsSync, 0x0000000f,
+                 kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 "sync", ";", 4),
     ENCODING_MAP(kMipsXor, 0x00000026,
                  kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -305,103 +394,143 @@
                  kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
                  "xori", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMipsFadds, 0x46000000,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+
+    // The following are mips32r2 instructions.
+    ENCODING_MAP(kMipsR2Div, 0x0000001a,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
+                 "div", "!0r,!1r", 4),
+    ENCODING_MAP(kMipsR2Mul, 0x70000002,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "add.s", "!0s,!1s,!2s", 4),
-    ENCODING_MAP(kMipsFsubs, 0x46000001,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 "mul", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsR2Mfhi, 0x00000010,
+                 kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_HI,
+                 "mfhi", "!0r", 4),
+    ENCODING_MAP(kMipsR2Mflo, 0x00000012,
+                 kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_LO,
+                 "mflo", "!0r", 4),
+    ENCODING_MAP(kMipsR2Movz, 0x0000000a,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "sub.s", "!0s,!1s,!2s", 4),
-    ENCODING_MAP(kMipsFmuls, 0x46000002,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 "movz", "!0r,!1r,!2r", 4),
+
+    // The following are mips32r6 and mips64r6 instructions.
+    ENCODING_MAP(kMipsR6Div, 0x0000009a,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mul.s", "!0s,!1s,!2s", 4),
-    ENCODING_MAP(kMipsFdivs, 0x46000003,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 "div", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsR6Mod, 0x000000da,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "div.s", "!0s,!1s,!2s", 4),
-    ENCODING_MAP(kMipsFaddd, 0x46200000,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 "mod", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsR6Mul, 0x00000098,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "add.d", "!0S,!1S,!2S", 4),
-    ENCODING_MAP(kMipsFsubd, 0x46200001,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 "mul", "!0r,!1r,!2r", 4),
+
+    // The following are mips64r6 instructions.
+    ENCODING_MAP(kMips64Daddiu, 0x64000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "daddiu", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Daddu, 0x0000002d,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "sub.d", "!0S,!1S,!2S", 4),
-    ENCODING_MAP(kMipsFmuld, 0x46200002,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 "daddu", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dahi, 0x04060000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+                 "dahi", "!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMips64Dati, 0x041E0000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+                 "dati", "!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMips64Daui, 0x74000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "daui", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Ddiv, 0x0000009e,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mul.d", "!0S,!1S,!2S", 4),
-    ENCODING_MAP(kMipsFdivd, 0x46200003,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 "ddiv", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dmod, 0x000000de,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "div.d", "!0S,!1S,!2S", 4),
-    ENCODING_MAP(kMipsFcvtsd, 0x46200020,
-                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 "dmod", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dmul, 0x0000009c,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dmul", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dmfc1, 0x44200000,
+                 kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.s.d", "!0s,!1S", 4),
-    ENCODING_MAP(kMipsFcvtsw, 0x46800020,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.s.w", "!0s,!1s", 4),
-    ENCODING_MAP(kMipsFcvtds, 0x46000021,
-                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.d.s", "!0S,!1s", 4),
-    ENCODING_MAP(kMipsFcvtdw, 0x46800021,
-                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.d.w", "!0S,!1s", 4),
-    ENCODING_MAP(kMipsFcvtws, 0x46000024,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.w.s", "!0s,!1s", 4),
-    ENCODING_MAP(kMipsFcvtwd, 0x46200024,
-                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.w.d", "!0s,!1S", 4),
-    ENCODING_MAP(kMipsFmovs, 0x46000006,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "mov.s", "!0s,!1s", 4),
-    ENCODING_MAP(kMipsFmovd, 0x46200006,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "mov.d", "!0S,!1S", 4),
-    ENCODING_MAP(kMipsFlwc1, 0xC4000000,
-                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "lwc1", "!0s,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsFldc1, 0xD4000000,
-                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "ldc1", "!0S,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsFswc1, 0xE4000000,
-                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
-                 "swc1", "!0s,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsFsdc1, 0xF4000000,
-                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
-                 "sdc1", "!0S,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsMfc1, 0x44000000,
-                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "mfc1", "!0r,!1s", 4),
-    ENCODING_MAP(kMipsMtc1, 0x44800000,
-                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 "dmfc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMips64Dmtc1, 0x44a00000,
+                 kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
-                 "mtc1", "!0r,!1s", 4),
-    ENCODING_MAP(kMipsMfhc1, 0x44600000,
-                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "mfhc1", "!0r,!1s", 4),
-    ENCODING_MAP(kMipsMthc1, 0x44e00000,
-                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
-                 "mthc1", "!0r,!1s", 4),
-    ENCODING_MAP(kMipsDelta, 0x27e00000,
+                 "dmtc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMips64Drotr32, 0x0000003e | (1 << 21),
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "drotr32", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsll, 0x00000038,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsll", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsll32, 0x0000003c,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsll32", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsrl, 0x0000003a,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsrl", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsrl32, 0x0000003e,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsrl32", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsra, 0x0000003b,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsra", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsra32, 0x0000003f,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsra32", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsllv, 0x00000014,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dsllv", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dsrlv, 0x00000016,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dsrlv", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dsrav, 0x00000017,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dsrav", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dsubu, 0x0000002f,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dsubu", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Ld, 0xdc000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "ld", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Lwu, 0x9c000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lwu", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Sd, 0xfc000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sd", "!0r,!1d(!2r)", 4),
+
+    // The following are pseudoinstructions.
+    ENCODING_MAP(kMipsDelta, 0x27e00000,  // It is implemented as daddiu for mips64.
                  kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
                  kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
                  NEEDS_FIXUP, "addiu", "!0r,ra,0x!1h(!1d)", 4),
@@ -417,25 +546,6 @@
                  kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
                  "addiu", "ra,pc,8", 4),
-    ENCODING_MAP(kMipsSync, 0x0000000f,
-                 kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP,
-                 "sync", ";", 4),
-
-    // The following are mips32r6 instructions.
-    ENCODING_MAP(kMipsR6Div, 0x0000009a,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "div", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsR6Mod, 0x000000da,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mod", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsR6Mul, 0x00000098,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mul", "!0r,!1r,!2r", 4),
-
     ENCODING_MAP(kMipsUndefined, 0x64000000,
                  kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, NO_OPERAND,
@@ -538,14 +648,13 @@
  */
 AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
   LIR *lir;
-  AssemblerStatus res = kSuccess;  // Assume success
+  AssemblerStatus res = kSuccess;  // Assume success.
 
   for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
     if (lir->opcode < 0) {
       continue;
     }
 
-
     if (lir->flags.is_nop) {
       continue;
     }
@@ -567,23 +676,31 @@
         int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
         int delta = offset2 - offset1;
         if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
-          // Fits
+          // Fits.
           lir->operands[1] = delta;
+          if (cu_->target64) {
+            LIR *new_addiu = RawLIR(lir->dalvik_offset, kMips64Daddiu, lir->operands[0], rRAd,
+                                    delta);
+            InsertLIRBefore(lir, new_addiu);
+            NopLIR(lir);
+            res = kRetryAll;
+          }
         } else {
-          // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
-          LIR *new_delta_hi =
-              RawLIR(lir->dalvik_offset, kMipsDeltaHi,
-                     lir->operands[0], 0, lir->operands[2],
-                     lir->operands[3], 0, lir->target);
+          // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair.
+          LIR *new_delta_hi = RawLIR(lir->dalvik_offset, kMipsDeltaHi, lir->operands[0], 0,
+                                     lir->operands[2], lir->operands[3], 0, lir->target);
           InsertLIRBefore(lir, new_delta_hi);
-          LIR *new_delta_lo =
-              RawLIR(lir->dalvik_offset, kMipsDeltaLo,
-                     lir->operands[0], 0, lir->operands[2],
-                     lir->operands[3], 0, lir->target);
+          LIR *new_delta_lo = RawLIR(lir->dalvik_offset, kMipsDeltaLo, lir->operands[0], 0,
+                                     lir->operands[2], lir->operands[3], 0, lir->target);
           InsertLIRBefore(lir, new_delta_lo);
-          LIR *new_addu =
-              RawLIR(lir->dalvik_offset, kMipsAddu,
-                     lir->operands[0], lir->operands[0], rRA);
+          LIR *new_addu;
+          if (cu_->target64) {
+            new_addu = RawLIR(lir->dalvik_offset, kMips64Daddu, lir->operands[0], lir->operands[0],
+                              rRAd);
+          } else {
+            new_addu = RawLIR(lir->dalvik_offset, kMipsAddu, lir->operands[0], lir->operands[0],
+                              rRA);
+          }
           InsertLIRBefore(lir, new_addu);
           NopLIR(lir);
           res = kRetryAll;
@@ -698,7 +815,9 @@
         case kFmtDfp: {
           // TODO: do we need to adjust now that we're using 64BitSolo?
           DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
-          DCHECK_EQ((operand & 0x1), 0U);
+          if (!cu_->target64) {
+            DCHECK_EQ((operand & 0x1), 0U);  // May only use even numbered registers for mips32.
+          }
           value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
               ((1 << (encoder->field_loc[i].end + 1)) - 1);
           bits |= value;
@@ -719,7 +838,7 @@
     code_buffer_.push_back((bits >> 8) & 0xff);
     code_buffer_.push_back((bits >> 16) & 0xff);
     code_buffer_.push_back((bits >> 24) & 0xff);
-    // TUNING: replace with proper delay slot handling
+    // TUNING: replace with proper delay slot handling.
     if (encoder->size == 8) {
       DCHECK(!IsPseudoLirOp(lir->opcode));
       const MipsEncodingMap *encoder2 = &EncodingMap[kMipsNop];
@@ -758,7 +877,7 @@
         lir->operands[0] = 0;
       }
     }
-    /* Pseudo opcodes don't consume space */
+    // Pseudo opcodes don't consume space.
   }
   return offset;
 }
@@ -771,10 +890,10 @@
 void MipsMir2Lir::AssignOffsets() {
   int offset = AssignInsnOffsets();
 
-  /* Const values have to be word aligned */
+  // Const values have to be word aligned.
   offset = RoundUp(offset, 4);
 
-  /* Set up offsets for literals */
+  // Set up offsets for literals.
   data_offset_ = offset;
 
   offset = AssignLiteralOffset(offset);
@@ -811,19 +930,19 @@
         CodegenDump();
         LOG(FATAL) << "Assembler error - too many retries";
       }
-      // Redo offsets and try again
+      // Redo offsets and try again.
       AssignOffsets();
       code_buffer_.clear();
     }
   }
 
-  // Install literals
+  // Install literals.
   InstallLiteralPools();
 
-  // Install switch tables
+  // Install switch tables.
   InstallSwitchTables();
 
-  // Install fill array data
+  // Install fill array data.
   InstallFillArrayData();
 
   // Create the mapping table and native offset to reference map.
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index b067221..de66b35 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -68,7 +68,7 @@
  */
 void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
   const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  // Add the table to the list - we'll process it later
+  // Add the table to the list - we'll process it later.
   SwitchTable* tab_rec =
       static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
   tab_rec->switch_mir = mir;
@@ -77,39 +77,39 @@
   int elements = table[1];
   switch_tables_.push_back(tab_rec);
 
-  // The table is composed of 8-byte key/disp pairs
+  // The table is composed of 8-byte key/disp pairs.
   int byte_size = elements * 8;
 
   int size_hi = byte_size >> 16;
   int size_lo = byte_size & 0xffff;
 
-  RegStorage r_end = AllocTemp();
+  RegStorage r_end = AllocPtrSizeTemp();
   if (size_hi) {
     NewLIR2(kMipsLui, r_end.GetReg(), size_hi);
   }
-  // Must prevent code motion for the curr pc pair
+  // Must prevent code motion for the curr pc pair.
   GenBarrier();  // Scheduling barrier
-  NewLIR0(kMipsCurrPC);  // Really a jal to .+8
-  // Now, fill the branch delay slot
+  NewLIR0(kMipsCurrPC);  // Really a jal to .+8.
+  // Now, fill the branch delay slot.
   if (size_hi) {
     NewLIR3(kMipsOri, r_end.GetReg(), r_end.GetReg(), size_lo);
   } else {
     NewLIR3(kMipsOri, r_end.GetReg(), rZERO, size_lo);
   }
-  GenBarrier();  // Scheduling barrier
+  GenBarrier();  // Scheduling barrier.
 
-  // Construct BaseLabel and set up table base register
+  // Construct BaseLabel and set up table base register.
   LIR* base_label = NewLIR0(kPseudoTargetLabel);
-  // Remember base label so offsets can be computed later
+  // Remember base label so offsets can be computed later.
   tab_rec->anchor = base_label;
-  RegStorage r_base = AllocTemp();
+  RegStorage r_base = AllocPtrSizeTemp();
   NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
   OpRegRegReg(kOpAdd, r_end, r_end, r_base);
 
-  // Grab switch test value
+  // Grab switch test value.
   rl_src = LoadValue(rl_src, kCoreReg);
 
-  // Test loop
+  // Test loop.
   RegStorage r_key = AllocTemp();
   LIR* loop_label = NewLIR0(kPseudoTargetLabel);
   LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
@@ -118,10 +118,10 @@
   OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
   RegStorage r_disp = AllocTemp();
   Load32Disp(r_base, -4, r_disp);
-  OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp);
-  OpReg(kOpBx, rs_rRA);
-
-  // Loop exit
+  const RegStorage rs_ra = TargetPtrReg(kLr);
+  OpRegRegReg(kOpAdd, rs_ra, rs_ra, r_disp);
+  OpReg(kOpBx, rs_ra);
+  // Loop exit.
   LIR* exit_label = NewLIR0(kPseudoTargetLabel);
   exit_branch->target = exit_label;
 }
@@ -141,7 +141,7 @@
  */
 void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
   const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  // Add the table to the list - we'll process it later
+  // Add the table to the list - we'll process it later.
   SwitchTable* tab_rec =
       static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
   tab_rec->switch_mir = mir;
@@ -150,10 +150,10 @@
   int size = table[1];
   switch_tables_.push_back(tab_rec);
 
-  // Get the switch value
+  // Get the switch value.
   rl_src = LoadValue(rl_src, kCoreReg);
 
-  // Prepare the bias.  If too big, handle 1st stage here
+  // Prepare the bias.  If too big, handle 1st stage here.
   int low_key = s4FromSwitchData(&table[2]);
   bool large_bias = false;
   RegStorage r_key;
@@ -167,10 +167,10 @@
     r_key = AllocTemp();
   }
 
-  // Must prevent code motion for the curr pc pair
+  // Must prevent code motion for the curr pc pair.
   GenBarrier();
-  NewLIR0(kMipsCurrPC);  // Really a jal to .+8
-  // Now, fill the branch delay slot with bias strip
+  NewLIR0(kMipsCurrPC);  // Really a jal to .+8.
+  // Now, fill the branch delay slot with bias strip.
   if (low_key == 0) {
     NewLIR0(kMipsNop);
   } else {
@@ -180,51 +180,60 @@
       OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key);
     }
   }
-  GenBarrier();  // Scheduling barrier
+  GenBarrier();  // Scheduling barrier.
 
-  // Construct BaseLabel and set up table base register
+  // Construct BaseLabel and set up table base register.
   LIR* base_label = NewLIR0(kPseudoTargetLabel);
-  // Remember base label so offsets can be computed later
+  // Remember base label so offsets can be computed later.
   tab_rec->anchor = base_label;
 
-  // Bounds check - if < 0 or >= size continue following switch
+  // Bounds check - if < 0 or >= size continue following switch.
   LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
 
-  // Materialize the table base pointer
-  RegStorage r_base = AllocTemp();
+  // Materialize the table base pointer.
+  RegStorage r_base = AllocPtrSizeTemp();
   NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
 
-  // Load the displacement from the switch table
+  // Load the displacement from the switch table.
   RegStorage r_disp = AllocTemp();
   LoadBaseIndexed(r_base, r_key, r_disp, 2, k32);
 
-  // Add to rAP and go
-  OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp);
-  OpReg(kOpBx, rs_rRA);
+  // Add to rRA and go.
+  const RegStorage rs_ra = TargetPtrReg(kLr);
+  OpRegRegReg(kOpAdd, rs_ra, rs_ra, r_disp);
+  OpReg(kOpBx, rs_ra);
 
-  /* branch_over target here */
+  // Branch_over target here.
   LIR* target = NewLIR0(kPseudoTargetLabel);
   branch_over->target = target;
 }
 
 void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
-  int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
+  int ex_offset = cu_->target64 ? Thread::ExceptionOffset<8>().Int32Value() :
+      Thread::ExceptionOffset<4>().Int32Value();
   RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
   RegStorage reset_reg = AllocTempRef();
-  LoadRefDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg, kNotVolatile);
+  LoadRefDisp(TargetPtrReg(kSelf), ex_offset, rl_result.reg, kNotVolatile);
   LoadConstant(reset_reg, 0);
-  StoreRefDisp(rs_rMIPS_SELF, ex_offset, reset_reg, kNotVolatile);
+  StoreRefDisp(TargetPtrReg(kSelf), ex_offset, reset_reg, kNotVolatile);
   FreeTemp(reset_reg);
   StoreValue(rl_dest, rl_result);
 }
 
 void MipsMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
-  RegStorage reg_card_base = AllocTemp();
-  RegStorage reg_card_no = AllocTemp();
-  // NOTE: native pointer.
-  LoadWordDisp(rs_rMIPS_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
-  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
-  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
+  RegStorage reg_card_base = AllocPtrSizeTemp();
+  RegStorage reg_card_no = AllocPtrSizeTemp();
+  if (cu_->target64) {
+    // NOTE: native pointer.
+    LoadWordDisp(TargetPtrReg(kSelf), Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
+    OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+    StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 0, kUnsignedByte);
+  } else {
+    // NOTE: native pointer.
+    LoadWordDisp(TargetPtrReg(kSelf), Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
+    OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+    StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
+  }
   FreeTemp(reg_card_base);
   FreeTemp(reg_card_no);
 }
@@ -232,33 +241,57 @@
 void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
   int spill_count = num_core_spills_ + num_fp_spills_;
   /*
-   * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live.  Let the register
-   * allocation mechanism know so it doesn't try to use any of them when
-   * expanding the frame or flushing.  This leaves the utility
-   * code with a single temp: r12.  This should be enough.
+   * On entry, A0, A1, A2 & A3 are live. On Mips64, A4, A5, A6 & A7 are also live.
+   * Let the register allocation mechanism know so it doesn't try to use any of them when
+   * expanding the frame or flushing.
    */
-  LockTemp(rs_rMIPS_ARG0);
-  LockTemp(rs_rMIPS_ARG1);
-  LockTemp(rs_rMIPS_ARG2);
-  LockTemp(rs_rMIPS_ARG3);
+  const RegStorage arg0 = TargetReg(kArg0);
+  const RegStorage arg1 = TargetReg(kArg1);
+  const RegStorage arg2 = TargetReg(kArg2);
+  const RegStorage arg3 = TargetReg(kArg3);
+  const RegStorage arg4 = TargetReg(kArg4);
+  const RegStorage arg5 = TargetReg(kArg5);
+  const RegStorage arg6 = TargetReg(kArg6);
+  const RegStorage arg7 = TargetReg(kArg7);
+
+  LockTemp(arg0);
+  LockTemp(arg1);
+  LockTemp(arg2);
+  LockTemp(arg3);
+  if (cu_->target64) {
+    LockTemp(arg4);
+    LockTemp(arg5);
+    LockTemp(arg6);
+    LockTemp(arg7);
+  }
+
+  bool skip_overflow_check;
+  InstructionSet target = (cu_->target64) ? kMips64 : kMips;
+  int ptr_size = cu_->target64 ? 8 : 4;
 
   /*
    * We can safely skip the stack overflow check if we're
    * a leaf *and* our frame size < fudge factor.
    */
-  bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kMips);
+
+  skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, target);
   NewLIR0(kPseudoMethodEntry);
-  RegStorage check_reg = AllocTemp();
-  RegStorage new_sp = AllocTemp();
+  RegStorage check_reg = AllocPtrSizeTemp();
+  RegStorage new_sp = AllocPtrSizeTemp();
+  const RegStorage rs_sp = TargetPtrReg(kSp);
   if (!skip_overflow_check) {
-    /* Load stack limit */
-    Load32Disp(rs_rMIPS_SELF, Thread::StackEndOffset<4>().Int32Value(), check_reg);
+    // Load stack limit.
+    if (cu_->target64) {
+      LoadWordDisp(TargetPtrReg(kSelf), Thread::StackEndOffset<8>().Int32Value(), check_reg);
+    } else {
+      Load32Disp(TargetPtrReg(kSelf), Thread::StackEndOffset<4>().Int32Value(), check_reg);
+    }
   }
-  /* Spill core callee saves */
+  // Spill core callee saves.
   SpillCoreRegs();
-  /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+  // NOTE: promotion of FP regs currently unsupported, thus no FP spill.
   DCHECK_EQ(num_fp_spills_, 0);
-  const int frame_sub = frame_size_ - spill_count * 4;
+  const int frame_sub = frame_size_ - spill_count * ptr_size;
   if (!skip_overflow_check) {
     class StackOverflowSlowPath : public LIRSlowPath {
      public:
@@ -269,9 +302,9 @@
         m2l_->ResetRegPool();
         m2l_->ResetDefTracking();
         GenerateTargetLabel(kPseudoThrowTarget);
-        // LR is offset 0 since we push in reverse order.
-        m2l_->Load32Disp(rs_rMIPS_SP, 0, rs_rRA);
-        m2l_->OpRegImm(kOpAdd, rs_rMIPS_SP, sp_displace_);
+        // RA is offset 0 since we push in reverse order.
+        m2l_->LoadWordDisp(m2l_->TargetPtrReg(kSp), 0, m2l_->TargetPtrReg(kLr));
+        m2l_->OpRegImm(kOpAdd, m2l_->TargetPtrReg(kSp), sp_displace_);
         m2l_->ClobberCallerSave();
         RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow);  // Doesn't clobber LR.
         m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
@@ -281,21 +314,27 @@
      private:
       const size_t sp_displace_;
     };
-    OpRegRegImm(kOpSub, new_sp, rs_rMIPS_SP, frame_sub);
+    OpRegRegImm(kOpSub, new_sp, rs_sp, frame_sub);
     LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
-    AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 4));
+    AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * ptr_size));
     // TODO: avoid copy for small frame sizes.
-    OpRegCopy(rs_rMIPS_SP, new_sp);     // Establish stack
+    OpRegCopy(rs_sp, new_sp);  // Establish stack.
   } else {
-    OpRegImm(kOpSub, rs_rMIPS_SP, frame_sub);
+    OpRegImm(kOpSub, rs_sp, frame_sub);
   }
 
   FlushIns(ArgLocs, rl_method);
 
-  FreeTemp(rs_rMIPS_ARG0);
-  FreeTemp(rs_rMIPS_ARG1);
-  FreeTemp(rs_rMIPS_ARG2);
-  FreeTemp(rs_rMIPS_ARG3);
+  FreeTemp(arg0);
+  FreeTemp(arg1);
+  FreeTemp(arg2);
+  FreeTemp(arg3);
+  if (cu_->target64) {
+    FreeTemp(arg4);
+    FreeTemp(arg5);
+    FreeTemp(arg6);
+    FreeTemp(arg7);
+  }
 }
 
 void MipsMir2Lir::GenExitSequence() {
@@ -303,58 +342,67 @@
    * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
    * allocated by the register utilities as temps.
    */
-  LockTemp(rs_rMIPS_RET0);
-  LockTemp(rs_rMIPS_RET1);
+  LockTemp(TargetPtrReg(kRet0));
+  LockTemp(TargetPtrReg(kRet1));
 
   NewLIR0(kPseudoMethodExit);
   UnSpillCoreRegs();
-  OpReg(kOpBx, rs_rRA);
+  OpReg(kOpBx, TargetPtrReg(kLr));
 }
 
 void MipsMir2Lir::GenSpecialExitSequence() {
-  OpReg(kOpBx, rs_rRA);
+  OpReg(kOpBx, TargetPtrReg(kLr));
 }
 
 void MipsMir2Lir::GenSpecialEntryForSuspend() {
-  // Keep 16-byte stack alignment - push A0, i.e. ArtMethod*, 2 filler words and RA.
-  core_spill_mask_ = (1u << rs_rRA.GetRegNum());
+  // Keep 16-byte stack alignment - push A0, i.e. ArtMethod*, 2 filler words and RA for mips32,
+  // but A0 and RA for mips64.
+  core_spill_mask_ = (1u << TargetPtrReg(kLr).GetRegNum());
   num_core_spills_ = 1u;
   fp_spill_mask_ = 0u;
   num_fp_spills_ = 0u;
   frame_size_ = 16u;
   core_vmap_table_.clear();
   fp_vmap_table_.clear();
-  OpRegImm(kOpSub, rs_rMIPS_SP, frame_size_);
-  Store32Disp(rs_rMIPS_SP, frame_size_ - 4, rs_rRA);
-  Store32Disp(rs_rMIPS_SP, 0, rs_rA0);
+  const RegStorage rs_sp = TargetPtrReg(kSp);
+  OpRegImm(kOpSub, rs_sp, frame_size_);
+  StoreWordDisp(rs_sp, frame_size_ - (cu_->target64 ? 8 : 4), TargetPtrReg(kLr));
+  StoreWordDisp(rs_sp, 0, TargetPtrReg(kArg0));
 }
 
 void MipsMir2Lir::GenSpecialExitForSuspend() {
   // Pop the frame. Don't pop ArtMethod*, it's no longer needed.
-  Load32Disp(rs_rMIPS_SP, frame_size_ - 4, rs_rRA);
-  OpRegImm(kOpAdd, rs_rMIPS_SP, frame_size_);
+  const RegStorage rs_sp = TargetPtrReg(kSp);
+  LoadWordDisp(rs_sp, frame_size_ - (cu_->target64 ? 8 : 4), TargetPtrReg(kLr));
+  OpRegImm(kOpAdd, rs_sp, frame_size_);
 }
 
 /*
  * Bit of a hack here - in the absence of a real scheduling pass,
  * emit the next instruction in static & direct invoke sequences.
  */
-static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
-                          int state, const MethodReference& target_method,
-                          uint32_t,
-                          uintptr_t direct_code, uintptr_t direct_method,
-                          InvokeType type) {
+static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, int state,
+                          const MethodReference& target_method, uint32_t, uintptr_t direct_code,
+                          uintptr_t direct_method, InvokeType type) {
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
   if (direct_code != 0 && direct_method != 0) {
     switch (state) {
     case 0:  // Get the current Method* [sets kArg0]
       if (direct_code != static_cast<uintptr_t>(-1)) {
-        cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+        if (cu->target64) {
+          cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
+        } else {
+          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+        }
       } else {
         cg->LoadCodeAddress(target_method, type, kInvokeTgt);
       }
       if (direct_method != static_cast<uintptr_t>(-1)) {
-        cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+        if (cu->target64) {
+          cg->LoadConstantWide(cg->TargetReg(kArg0, kRef), direct_method);
+        } else {
+          cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+        }
       } else {
         cg->LoadMethodAddress(target_method, type, kArg0);
       }
@@ -377,7 +425,11 @@
       // Set up direct code if known.
       if (direct_code != 0) {
         if (direct_code != static_cast<uintptr_t>(-1)) {
-          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+          if (cu->target64) {
+            cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
+          } else {
+            cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+          }
         } else {
           CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
           cg->LoadCodeAddress(target_method, type, kInvokeTgt);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 649b6c9..713264e 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_DEX_QUICK_MIPS_CODEGEN_MIPS_H_
 #define ART_COMPILER_DEX_QUICK_MIPS_CODEGEN_MIPS_H_
 
+#include "dex/compiler_ir.h"
 #include "dex/quick/mir_to_lir.h"
 #include "mips_lir.h"
 
@@ -39,215 +40,303 @@
     size_t cur_core_reg_;
   };
 
+  class InToRegStorageMips64Mapper : public InToRegStorageMapper {
+   public:
+    explicit InToRegStorageMips64Mapper(Mir2Lir* m2l) : m2l_(m2l), cur_arg_reg_(0) {}
+    virtual RegStorage GetNextReg(ShortyArg arg);
+    virtual void Reset() OVERRIDE {
+      cur_arg_reg_ = 0;
+    }
+   protected:
+    Mir2Lir* m2l_;
+   private:
+    size_t cur_arg_reg_;
+  };
+
+  InToRegStorageMips64Mapper in_to_reg_storage_mips64_mapper_;
   InToRegStorageMipsMapper in_to_reg_storage_mips_mapper_;
   InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
-    in_to_reg_storage_mips_mapper_.Reset();
-    return &in_to_reg_storage_mips_mapper_;
+    InToRegStorageMapper* res;
+    if (cu_->target64) {
+      res = &in_to_reg_storage_mips64_mapper_;
+    } else {
+      res = &in_to_reg_storage_mips_mapper_;
+    }
+    res->Reset();
+    return res;
   }
 
-  public:
-    MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+ public:
+  MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
 
-    // Required for target - codegen utilities.
-    bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                            RegLocation rl_dest, int lit);
-    bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-    void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
-                                    int32_t constant) OVERRIDE;
-    void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
-                                     int64_t constant) OVERRIDE;
-    LIR* CheckSuspendUsingLoad() OVERRIDE;
-    RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
-    LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                      OpSize size, VolatileKind is_volatile) OVERRIDE;
-    LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
-                         OpSize size) OVERRIDE;
-    LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
-    LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
-    LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                       OpSize size, VolatileKind is_volatile) OVERRIDE;
-    LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
-                          OpSize size) OVERRIDE;
-    LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
-    LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
+  // Required for target - codegen utilities.
+  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+                          RegLocation rl_dest, int lit);
+  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+  void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, int32_t constant)
+  OVERRIDE;
+  void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, int64_t constant)
+  OVERRIDE;
+  LIR* CheckSuspendUsingLoad() OVERRIDE;
+  RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+                    VolatileKind is_volatile) OVERRIDE;
+  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+                       OpSize size) OVERRIDE;
+  LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
+  LIR* LoadConstantWideNoClobber(RegStorage r_dest, int64_t value);
+  LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+                     VolatileKind is_volatile) OVERRIDE;
+  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+                        OpSize size) OVERRIDE;
+  LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
+  LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
 
-    /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
-    void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
+  /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+  void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
 
-    // Required for target - register utilities.
-    RegStorage Solo64ToPair64(RegStorage reg);
-    RegStorage Fp64ToSolo32(RegStorage reg);
-    RegStorage TargetReg(SpecialTargetRegister reg);
-    RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE;
-    RegLocation GetReturnAlt();
-    RegLocation GetReturnWideAlt();
-    RegLocation LocCReturn();
-    RegLocation LocCReturnRef();
-    RegLocation LocCReturnDouble();
-    RegLocation LocCReturnFloat();
-    RegLocation LocCReturnWide();
-    ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
-    void AdjustSpillMask();
-    void ClobberCallerSave();
-    void FreeCallTemps();
-    void LockCallTemps();
-    void CompilerInitializeRegAlloc();
+  // Required for target - register utilities.
+  RegStorage Solo64ToPair64(RegStorage reg);
+  RegStorage Fp64ToSolo32(RegStorage reg);
+  RegStorage TargetReg(SpecialTargetRegister reg);
+  RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE;
+  RegStorage TargetPtrReg(SpecialTargetRegister reg) OVERRIDE {
+    return TargetReg(reg, cu_->target64 ? kWide : kNotWide);
+  }
+  RegLocation GetReturnAlt();
+  RegLocation GetReturnWideAlt();
+  RegLocation LocCReturn();
+  RegLocation LocCReturnRef();
+  RegLocation LocCReturnDouble();
+  RegLocation LocCReturnFloat();
+  RegLocation LocCReturnWide();
+  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
+  void AdjustSpillMask();
+  void ClobberCallerSave();
+  void FreeCallTemps();
+  void LockCallTemps();
+  void CompilerInitializeRegAlloc();
 
-    // Required for target - miscellaneous.
-    void AssembleLIR();
-    int AssignInsnOffsets();
-    void AssignOffsets();
-    AssemblerStatus AssembleInstructions(CodeOffset start_addr);
-    void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
-    void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                  ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
-    const char* GetTargetInstFmt(int opcode);
-    const char* GetTargetInstName(int opcode);
-    std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-    ResourceMask GetPCUseDefEncoding() const OVERRIDE;
-    uint64_t GetTargetInstFlags(int opcode);
-    size_t GetInsnSize(LIR* lir) OVERRIDE;
-    bool IsUnconditionalBranch(LIR* lir);
+  // Required for target - miscellaneous.
+  void AssembleLIR();
+  int AssignInsnOffsets();
+  void AssignOffsets();
+  AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+  void SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+                                ResourceMask* def_mask) OVERRIDE;
+  const char* GetTargetInstFmt(int opcode);
+  const char* GetTargetInstName(int opcode);
+  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
+  uint64_t GetTargetInstFlags(int opcode);
+  size_t GetInsnSize(LIR* lir) OVERRIDE;
+  bool IsUnconditionalBranch(LIR* lir);
 
-    // Get the register class for load/store of a field.
-    RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+  // Get the register class for load/store of a field.
+  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
-    // Required for target - Dalvik-level generators.
-    void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_src2, int flags);
-    void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
-                     RegLocation rl_index, RegLocation rl_dest, int scale);
-    void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
-                     RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
-    void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_shift, int flags);
-    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                          RegLocation rl_src2);
-    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_src2);
-    void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-    void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
-    bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
-    bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
-    bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
-    bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
-    bool GenInlinedSqrt(CallInfo* info);
-    bool GenInlinedPeek(CallInfo* info, OpSize size);
-    bool GenInlinedPoke(CallInfo* info, OpSize size);
-    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                        RegLocation rl_src2, int flags) OVERRIDE;
-    RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
-    RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
-    void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenDivZeroCheckWide(RegStorage reg);
-    void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
-    void GenExitSequence();
-    void GenSpecialExitSequence() OVERRIDE;
-    void GenSpecialEntryForSuspend() OVERRIDE;
-    void GenSpecialExitForSuspend() OVERRIDE;
-    void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
-    void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
-    void GenSelect(BasicBlock* bb, MIR* mir);
-    void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                          int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                          RegisterClass dest_reg_class) OVERRIDE;
-    bool GenMemBarrier(MemBarrierKind barrier_kind);
-    void GenMoveException(RegLocation rl_dest);
-    void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                       int first_bit, int second_bit);
-    void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
-    void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
-    void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
-    void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
-    bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
+  // Required for target - Dalvik-level generators.
+  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                      RegLocation lr_shift);
+  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_src2, int flags);
+  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+                   RegLocation rl_dest, int scale);
+  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+                   RegLocation rl_src, int scale, bool card_mark);
+  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_shift, int flags);
+  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation rl_src2);
+  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                       RegLocation rl_src2);
+  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                RegLocation rl_src2);
+  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
+  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
+  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
+  bool GenInlinedSqrt(CallInfo* info);
+  bool GenInlinedPeek(CallInfo* info, OpSize size);
+  bool GenInlinedPoke(CallInfo* info, OpSize size);
+  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                      RegLocation rl_src2, int flags) OVERRIDE;
+  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
+  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
+  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+  void GenDivZeroCheckWide(RegStorage reg);
+  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+  void GenExitSequence();
+  void GenSpecialExitSequence() OVERRIDE;
+  void GenSpecialEntryForSuspend() OVERRIDE;
+  void GenSpecialExitForSuspend() OVERRIDE;
+  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+  void GenSelect(BasicBlock* bb, MIR* mir);
+  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
+                        RegisterClass dest_reg_class) OVERRIDE;
+  bool GenMemBarrier(MemBarrierKind barrier_kind);
+  void GenMoveException(RegLocation rl_dest);
+  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+                                     int first_bit, int second_bit);
+  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+  void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+  void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+  bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
 
-    // Required for target - single operation generators.
-    LIR* OpUnconditionalBranch(LIR* target);
-    LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
-    LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
-    LIR* OpCondBranch(ConditionCode cc, LIR* target);
-    LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
-    LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
-    LIR* OpIT(ConditionCode cond, const char* guide);
-    void OpEndIT(LIR* it);
-    LIR* OpMem(OpKind op, RegStorage r_base, int disp);
-    void OpPcRelLoad(RegStorage reg, LIR* target);
-    LIR* OpReg(OpKind op, RegStorage r_dest_src);
-    void OpRegCopy(RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
-    LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
-    LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
-    LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
-    LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
-    LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
-    LIR* OpTestSuspend(LIR* target);
-    LIR* OpVldm(RegStorage r_base, int count);
-    LIR* OpVstm(RegStorage r_base, int count);
-    void OpRegCopyWide(RegStorage dest, RegStorage src);
+  // Required for target - single operation generators.
+  LIR* OpUnconditionalBranch(LIR* target);
+  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
+  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
+  LIR* OpCondBranch(ConditionCode cc, LIR* target);
+  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
+  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
+  LIR* OpIT(ConditionCode cond, const char* guide);
+  void OpEndIT(LIR* it);
+  LIR* OpMem(OpKind op, RegStorage r_base, int disp);
+  void OpPcRelLoad(RegStorage reg, LIR* target);
+  LIR* OpReg(OpKind op, RegStorage r_dest_src);
+  void OpRegCopy(RegStorage r_dest, RegStorage r_src);
+  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
+  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
+  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
+  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
+  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
+  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
+  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
+  LIR* OpTestSuspend(LIR* target);
+  LIR* OpVldm(RegStorage r_base, int count);
+  LIR* OpVstm(RegStorage r_base, int count);
+  void OpRegCopyWide(RegStorage dest, RegStorage src);
 
-    // TODO: collapse r_dest.
-    LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
-                          OpSize size);
-    // TODO: collapse r_src.
-    LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
-                           OpSize size);
-    void SpillCoreRegs();
-    void UnSpillCoreRegs();
-    static const MipsEncodingMap EncodingMap[kMipsLast];
-    bool InexpensiveConstantInt(int32_t value);
-    bool InexpensiveConstantFloat(int32_t value);
-    bool InexpensiveConstantLong(int64_t value);
-    bool InexpensiveConstantDouble(int64_t value);
+  // TODO: collapse r_dest.
+  LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
+  // TODO: collapse r_src.
+  LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+  void SpillCoreRegs();
+  void UnSpillCoreRegs();
+  static const MipsEncodingMap EncodingMap[kMipsLast];
+  bool InexpensiveConstantInt(int32_t value);
+  bool InexpensiveConstantFloat(int32_t value);
+  bool InexpensiveConstantLong(int64_t value);
+  bool InexpensiveConstantDouble(int64_t value);
 
-    bool WideGPRsAreAliases() const OVERRIDE {
-      return false;  // Wide GPRs are formed by pairing.
+  bool WideGPRsAreAliases() const OVERRIDE {
+    return cu_->target64;  // Wide GPRs are formed by pairing on mips32.
+  }
+  bool WideFPRsAreAliases() const OVERRIDE {
+    return cu_->target64;  // Wide FPRs are formed by pairing on mips32.
+  }
+
+  LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
+  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_div,
+                        int flags) OVERRIDE;
+  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) OVERRIDE;
+  NextCallInsn GetNextSDCallInsn() OVERRIDE;
+  LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
+
+  // Unimplemented intrinsics.
+  bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+    return false;
+  }
+  bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+    return false;
+  }
+  bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+    return false;
+  }
+  bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
+  OVERRIDE {
+    return false;
+  }
+
+  // True if isa is rev R6.
+  const bool isaIsR6_;
+
+  // True if floating point unit is 32bits.
+  const bool fpuIs32Bit_;
+
+ private:
+  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+  void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+
+  void ConvertShortToLongBranch(LIR* lir);
+
+  // Mips64 specific long gen methods:
+  void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+  void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                     RegLocation rl_src2, bool is_div, int flags);
+  void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
+                         RegisterClass reg_class);
+  RegStorage AllocPtrSizeTemp(bool required = true);
+
+  /**
+   * @param reg #RegStorage containing a Solo64 input register (e.g. @c a1 or @c d0).
+   * @return A Solo32 with the same register number as the @p reg (e.g. @c a1 or @c f0).
+   * @see As64BitReg
+   */
+  RegStorage As32BitReg(RegStorage reg) {
+    DCHECK(!reg.IsPair());
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Expected 64b register";
+      } else {
+        LOG(WARNING) << "Expected 64b register";
+        return reg;
+      }
     }
-    bool WideFPRsAreAliases() const OVERRIDE {
-      return false;  // Wide FPRs are formed by pairing.
+    RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
+    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+              ->GetReg().GetReg(),
+              ret_val.GetReg());
+    return ret_val;
+  }
+
+  /**
+   * @param reg #RegStorage containing a Solo32 input register (e.g. @c a1 or @c f0).
+   * @return A Solo64 with the same register number as the @p reg (e.g. @c a1 or @c d0).
+   */
+  RegStorage As64BitReg(RegStorage reg) {
+    DCHECK(!reg.IsPair());
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Expected 32b register";
+      } else {
+        LOG(WARNING) << "Expected 32b register";
+        return reg;
+      }
     }
+    RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
+    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+              ->GetReg().GetReg(),
+              ret_val.GetReg());
+    return ret_val;
+  }
 
-    LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
-
-    RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
-                          RegLocation rl_src2, bool is_div, int flags) OVERRIDE;
-    RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div)
-        OVERRIDE;
-
-    NextCallInsn GetNextSDCallInsn() OVERRIDE;
-    LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
-
-    // Unimplemented intrinsics.
-    bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
-      return false;
+  RegStorage Check64BitReg(RegStorage reg) {
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Checked for 64b register";
+      } else {
+        LOG(WARNING) << "Checked for 64b register";
+        return As64BitReg(reg);
+      }
     }
-    bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
-      return false;
-    }
-    bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
-      return false;
-    }
-    bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
-        OVERRIDE {
-      return false;
-    }
-
-    // True if isa is rev R6.
-    const bool isaIsR6_;
-
-    // True if floating point unit is 32bits.
-    const bool fpuIs32Bit_;
-
-  private:
-    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-
-    void ConvertShortToLongBranch(LIR* lir);
+    return reg;
+  }
 };
 
 }  // namespace art
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 37bf1a6..45fd1a9 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -23,8 +23,8 @@
 
 namespace art {
 
-void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode,
-                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2) {
   int op = kMipsNop;
   RegLocation rl_result;
 
@@ -51,7 +51,7 @@
       break;
     case Instruction::REM_FLOAT_2ADDR:
     case Instruction::REM_FLOAT:
-      FlushAllRegs();   // Send everything to home location
+      FlushAllRegs();   // Send everything to home location.
       CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
       rl_result = GetReturn(kFPReg);
       StoreValue(rl_dest, rl_result);
@@ -69,8 +69,8 @@
   StoreValue(rl_dest, rl_result);
 }
 
-void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode,
-                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_src2) {
   int op = kMipsNop;
   RegLocation rl_result;
 
@@ -93,7 +93,7 @@
       break;
     case Instruction::REM_DOUBLE_2ADDR:
     case Instruction::REM_DOUBLE:
-      FlushAllRegs();   // Send everything to home location
+      FlushAllRegs();   // Send everything to home location.
       CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
       rl_result = GetReturnWide(kFPReg);
       StoreValueWide(rl_dest, rl_result);
@@ -147,22 +147,22 @@
       op = kMipsFcvtdw;
       break;
     case Instruction::FLOAT_TO_INT:
-      GenConversionCall(kQuickF2iz, rl_dest, rl_src);
+      GenConversionCall(kQuickF2iz, rl_dest, rl_src, kCoreReg);
       return;
     case Instruction::DOUBLE_TO_INT:
-      GenConversionCall(kQuickD2iz, rl_dest, rl_src);
+      GenConversionCall(kQuickD2iz, rl_dest, rl_src, kCoreReg);
       return;
     case Instruction::LONG_TO_DOUBLE:
-      GenConversionCall(kQuickL2d, rl_dest, rl_src);
+      GenConversionCall(kQuickL2d, rl_dest, rl_src, kFPReg);
       return;
     case Instruction::FLOAT_TO_LONG:
-      GenConversionCall(kQuickF2l, rl_dest, rl_src);
+      GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
       return;
     case Instruction::LONG_TO_FLOAT:
-      GenConversionCall(kQuickL2f, rl_dest, rl_src);
+      GenConversionCall(kQuickL2f, rl_dest, rl_src, kFPReg);
       return;
     case Instruction::DOUBLE_TO_LONG:
-      GenConversionCall(kQuickD2l, rl_dest, rl_src);
+      GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -189,24 +189,24 @@
   if (fpuIs32Bit) {
     switch (base) {
       case 0:
-        return RegStorage(RegStorage::k64BitPair, rMIPS_FARG0, rMIPS_FARG1);
+        return RegStorage(RegStorage::k64BitPair, rFARG0, rFARG1);
       case 2:
-        return RegStorage(RegStorage::k64BitPair, rMIPS_FARG2, rMIPS_FARG3);
+        return RegStorage(RegStorage::k64BitPair, rFARG2, rFARG3);
     }
   } else {
     switch (base) {
       case 0:
-        return RegStorage(RegStorage::k64BitSolo, rMIPS_FARG0);
+        return RegStorage(RegStorage::k64BitSolo, rFARG0);
       case 2:
-        return RegStorage(RegStorage::k64BitSolo, rMIPS_FARG2);
+        return RegStorage(RegStorage::k64BitSolo, rFARG2);
     }
   }
   LOG(FATAL) << "Unsupported Mips.GetWideFP: " << fpuIs32Bit << " " << base;
   UNREACHABLE();
 }
 
-void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_src2) {
+void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2) {
   bool wide = true;
   QuickEntrypointEnum target;
 
@@ -232,16 +232,23 @@
   FlushAllRegs();
   LockCallTemps();
   if (wide) {
-    RegStorage r_tmp1 = GetWideArgFP(fpuIs32Bit_, 0);
-    RegStorage r_tmp2 = GetWideArgFP(fpuIs32Bit_, 2);
+    RegStorage r_tmp1;
+    RegStorage r_tmp2;
+    if (cu_->target64) {
+      r_tmp1 = RegStorage(RegStorage::k64BitSolo, rFARG0);
+      r_tmp2 = RegStorage(RegStorage::k64BitSolo, rFARG1);
+    } else {
+      r_tmp1 = GetWideArgFP(fpuIs32Bit_, 0);
+      r_tmp2 = GetWideArgFP(fpuIs32Bit_, 2);
+    }
     LoadValueDirectWideFixed(rl_src1, r_tmp1);
     LoadValueDirectWideFixed(rl_src2, r_tmp2);
   } else {
-    LoadValueDirectFixed(rl_src1, rs_rMIPS_FARG0);
-    LoadValueDirectFixed(rl_src2, rs_rMIPS_FARG2);
+    LoadValueDirectFixed(rl_src1, rs_rFARG0);
+    LoadValueDirectFixed(rl_src2, cu_->target64 ? rs_rFARG1 : rs_rFARG2);
   }
   RegStorage r_tgt = LoadHelper(target);
-  // NOTE: not a safepoint
+  // NOTE: not a safepoint.
   OpReg(kOpBlx, r_tgt);
   RegLocation rl_result = GetReturn(kCoreReg);
   StoreValue(rl_dest, rl_result);
@@ -254,18 +261,30 @@
 
 void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
   RegLocation rl_result;
-  rl_src = LoadValue(rl_src, kCoreReg);
-  rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
+  if (cu_->target64) {
+    rl_src = LoadValue(rl_src, kFPReg);
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(kMipsFnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  } else {
+    rl_src = LoadValue(rl_src, kCoreReg);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
+  }
   StoreValue(rl_dest, rl_result);
 }
 
 void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
   RegLocation rl_result;
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
-  OpRegCopy(rl_result.reg, rl_src.reg);
+  if (cu_->target64) {
+    rl_src = LoadValueWide(rl_src, kFPReg);
+    rl_result = EvalLocWide(rl_dest, kFPReg, true);
+    NewLIR2(kMipsFnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  } else {
+    rl_src = LoadValueWide(rl_src, kCoreReg);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
+    OpRegCopy(rl_result.reg, rl_src.reg);
+  }
   StoreValueWide(rl_dest, rl_result);
 }
 
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 8093c97..626b36e 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -34,6 +34,7 @@
  *    x < y     return -1
  *    x > y     return  1
  *
+ * Mips32 implementation
  *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
  *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
  *    subu  res, t0, t1             # res = -1:1:0 for [ < > = ]
@@ -43,26 +44,40 @@
  *    subu  res, t0, t1
  * finish:
  *
+ * Mips64 implementation
+ *    slt   temp, x, y;             # (x < y) ? 1:0
+ *    slt   res, y, x;              # (x > y) ? 1:0
+ *    subu  res, res, temp;         # res = -1:1:0 for [ < > = ]
+ *
  */
-void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
-                             RegLocation rl_src2) {
+void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  RegStorage t0 = AllocTemp();
-  RegStorage t1 = AllocTemp();
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
-  NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
-  NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
-  LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
-  NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
-  NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
-  NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
-  FreeTemp(t0);
-  FreeTemp(t1);
-  LIR* target = NewLIR0(kPseudoTargetLabel);
-  branch->target = target;
-  StoreValue(rl_dest, rl_result);
+  if (cu_->target64) {
+    RegStorage temp = AllocTempWide();
+    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    NewLIR3(kMipsSlt, temp.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+    NewLIR3(kMipsSlt, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+    NewLIR3(kMipsSubu, rl_result.reg.GetReg(), rl_result.reg.GetReg(), temp.GetReg());
+    FreeTemp(temp);
+    StoreValue(rl_dest, rl_result);
+  } else {
+    RegStorage t0 = AllocTemp();
+    RegStorage t1 = AllocTemp();
+    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+    NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
+    NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
+    LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+    NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+    NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
+    NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
+    FreeTemp(t0);
+    FreeTemp(t1);
+    LIR* target = NewLIR0(kPseudoTargetLabel);
+    branch->target = target;
+    StoreValue(rl_dest, rl_result);
+  }
 }
 
 LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
@@ -134,7 +149,7 @@
 LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
   LIR* branch;
   if (check_value != 0) {
-    // TUNING: handle s16 & kCondLt/Mi case using slti
+    // TUNING: handle s16 & kCondLt/Mi case using slti.
     RegStorage t_reg = AllocTemp();
     LoadConstant(t_reg, check_value);
     branch = OpCmpBranch(cond, reg, t_reg, target);
@@ -164,17 +179,34 @@
 }
 
 LIR* MipsMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
-  // If src or dest is a pair, we'll be using low reg.
-  if (r_dest.IsPair()) {
-    r_dest = r_dest.GetLow();
+  LIR* res;
+  MipsOpCode opcode;
+
+  if (!cu_->target64) {
+    // If src or dest is a pair, we'll be using low reg.
+    if (r_dest.IsPair()) {
+      r_dest = r_dest.GetLow();
+    }
+    if (r_src.IsPair()) {
+      r_src = r_src.GetLow();
+    }
+  } else {
+    DCHECK(!r_dest.IsPair() && !r_src.IsPair());
   }
-  if (r_src.IsPair()) {
-    r_src = r_src.GetLow();
-  }
+
   if (r_dest.IsFloat() || r_src.IsFloat())
     return OpFpRegCopy(r_dest, r_src);
-  LIR* res = RawLIR(current_dalvik_offset_, kMipsMove,
-                    r_dest.GetReg(), r_src.GetReg());
+  if (cu_->target64) {
+    // TODO: Check that r_src and r_dest are both 32 or both 64 bits length on Mips64.
+    if (r_dest.Is64Bit() || r_src.Is64Bit()) {
+      opcode = kMipsMove;
+    } else {
+      opcode = kMipsSll;
+    }
+  } else {
+    opcode = kMipsMove;
+  }
+  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
   if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
     res->flags.is_nop = true;
   }
@@ -189,6 +221,10 @@
 }
 
 void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
+  if (cu_->target64) {
+    OpRegCopy(r_dest, r_src);
+    return;
+  }
   if (r_dest != r_src) {
     bool dest_fp = r_dest.IsFloat();
     bool src_fp = r_src.IsFloat();
@@ -213,16 +249,16 @@
       if (src_fp) {
         // Here if dest is core reg and src is fp reg.
         if (fpuIs32Bit_) {
-            NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetLowReg());
-            NewLIR2(kMipsMfc1, r_dest.GetHighReg(), r_src.GetHighReg());
+          NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetLowReg());
+          NewLIR2(kMipsMfc1, r_dest.GetHighReg(), r_src.GetHighReg());
         } else {
-            r_src = Fp64ToSolo32(r_src);
-            NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetReg());
-            NewLIR2(kMipsMfhc1, r_dest.GetHighReg(), r_src.GetReg());
+          r_src = Fp64ToSolo32(r_src);
+          NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetReg());
+          NewLIR2(kMipsMfhc1, r_dest.GetHighReg(), r_src.GetReg());
         }
       } else {
         // Here if both src and dest are core registers.
-        // Handle overlap
+        // Handle overlap.
         if (r_src.GetHighReg() == r_dest.GetLowReg()) {
           OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
           OpRegCopy(r_dest.GetLow(), r_src.GetLow());
@@ -263,17 +299,15 @@
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
 
   if (isaIsR6_) {
-      NewLIR3(is_div ? kMipsR6Div : kMipsR6Mod,
-          rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
+    NewLIR3(is_div ? kMipsR6Div : kMipsR6Mod, rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
   } else {
-      NewLIR2(kMipsDiv, reg1.GetReg(), reg2.GetReg());
-      NewLIR1(is_div ? kMipsMflo : kMipsMfhi, rl_result.reg.GetReg());
+    NewLIR2(kMipsR2Div, reg1.GetReg(), reg2.GetReg());
+    NewLIR1(is_div ? kMipsR2Mflo : kMipsR2Mfhi, rl_result.reg.GetReg());
   }
   return rl_result;
 }
 
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit,
-                                      bool is_div) {
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
   RegStorage t_reg = AllocTemp();
   NewLIR3(kMipsAddiu, t_reg.GetReg(), rZERO, lit);
   RegLocation rl_result = GenDivRem(rl_dest, reg1, t_reg, is_div);
@@ -322,10 +356,17 @@
     // MIPS supports only aligned access. Defer unaligned access to JNI implementation.
     return false;
   }
-  RegLocation rl_src_address = info->args[0];  // long address
-  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
+  RegLocation rl_src_address = info->args[0];       // Long address.
+  if (!cu_->target64) {
+    rl_src_address = NarrowRegLoc(rl_src_address);  // Ignore high half in info->args[1].
+  }
   RegLocation rl_dest = InlineTarget(info);
-  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
+  RegLocation rl_address;
+  if (cu_->target64) {
+    rl_address = LoadValueWide(rl_src_address, kCoreReg);
+  } else {
+    rl_address = LoadValue(rl_src_address, kCoreReg);
+  }
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   DCHECK(size == kSignedByte);
   LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
@@ -338,10 +379,17 @@
     // MIPS supports only aligned access. Defer unaligned access to JNI implementation.
     return false;
   }
-  RegLocation rl_src_address = info->args[0];  // long address
-  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
-  RegLocation rl_src_value = info->args[2];  // [size] value
-  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
+  RegLocation rl_src_address = info->args[0];       // Long address.
+  if (!cu_->target64) {
+    rl_src_address = NarrowRegLoc(rl_src_address);  // Ignore high half in info->args[1].
+  }
+  RegLocation rl_src_value = info->args[2];         // [size] value.
+  RegLocation rl_address;
+  if (cu_->target64) {
+    rl_address = LoadValueWide(rl_src_address, kCoreReg);
+  } else {
+    rl_address = LoadValue(rl_src_address, kCoreReg);
+  }
   DCHECK(size == kSignedByte);
   RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
   StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
@@ -366,8 +414,7 @@
   UNREACHABLE();
 }
 
-void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
-                                                RegLocation rl_result, int lit,
+void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
                                                 int first_bit, int second_bit) {
   UNUSED(lit);
   RegStorage t_reg = AllocTemp();
@@ -380,20 +427,24 @@
 }
 
 void MipsMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
-  DCHECK(reg.IsPair());   // TODO: support k64BitSolo.
-  RegStorage t_reg = AllocTemp();
-  OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
-  GenDivZeroCheck(t_reg);
-  FreeTemp(t_reg);
+  if (cu_->target64) {
+    GenDivZeroCheck(reg);
+  } else {
+    DCHECK(reg.IsPair());   // TODO: support k64BitSolo.
+    RegStorage t_reg = AllocTemp();
+    OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
+    GenDivZeroCheck(t_reg);
+    FreeTemp(t_reg);
+  }
 }
 
-// Test suspend flag, return target of taken suspend branch
+// Test suspend flag, return target of taken suspend branch.
 LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
-  OpRegImm(kOpSub, rs_rMIPS_SUSPEND, 1);
-  return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rs_rMIPS_SUSPEND, 0, target);
+  OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
+  return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
 }
 
-// Decrement register and branch on condition
+// Decrement register and branch on condition.
 LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
   OpRegImm(kOpSub, reg, 1);
   return OpCmpImmBranch(c_code, reg, 0, target);
@@ -423,9 +474,7 @@
   LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
 }
 
-void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_src2) {
-  UNUSED(opcode);
+void MipsMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -440,15 +489,14 @@
   OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src2.reg.GetLow(), rl_src1.reg.GetLow());
   RegStorage t_reg = AllocTemp();
   OpRegRegReg(kOpAdd, t_reg, rl_src2.reg.GetHigh(), rl_src1.reg.GetHigh());
-  NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+  NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(),
+          rl_src2.reg.GetLowReg());
   OpRegRegReg(kOpAdd, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
   FreeTemp(t_reg);
   StoreValueWide(rl_dest, rl_result);
 }
 
-void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_src2) {
-  UNUSED(opcode);
+void MipsMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -471,45 +519,134 @@
 
 void MipsMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                                  RegLocation rl_src2, int flags) {
-  switch (opcode) {
-    case Instruction::ADD_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-      GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
-      return;
-    case Instruction::SUB_LONG:
-    case Instruction::SUB_LONG_2ADDR:
-      GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
-      return;
-    case Instruction::NEG_LONG:
-      GenNegLong(rl_dest, rl_src2);
-      return;
+  if (cu_->target64) {
+    switch (opcode) {
+      case Instruction::NOT_LONG:
+        GenNotLong(rl_dest, rl_src2);
+        return;
+      case Instruction::ADD_LONG:
+      case Instruction::ADD_LONG_2ADDR:
+        GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
+        return;
+      case Instruction::SUB_LONG:
+      case Instruction::SUB_LONG_2ADDR:
+        GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
+        return;
+      case Instruction::MUL_LONG:
+      case Instruction::MUL_LONG_2ADDR:
+        GenMulLong(rl_dest, rl_src1, rl_src2);
+        return;
+      case Instruction::DIV_LONG:
+      case Instruction::DIV_LONG_2ADDR:
+        GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
+        return;
+      case Instruction::REM_LONG:
+      case Instruction::REM_LONG_2ADDR:
+        GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
+        return;
+      case Instruction::AND_LONG:
+      case Instruction::AND_LONG_2ADDR:
+        GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
+        return;
+      case Instruction::OR_LONG:
+      case Instruction::OR_LONG_2ADDR:
+        GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
+        return;
+      case Instruction::XOR_LONG:
+      case Instruction::XOR_LONG_2ADDR:
+        GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+        return;
+      case Instruction::NEG_LONG:
+        GenNegLong(rl_dest, rl_src2);
+        return;
 
-    default:
-      break;
+      default:
+        LOG(FATAL) << "Invalid long arith op";
+        return;
+    }
+  } else {
+    switch (opcode) {
+      case Instruction::ADD_LONG:
+      case Instruction::ADD_LONG_2ADDR:
+        GenAddLong(rl_dest, rl_src1, rl_src2);
+        return;
+      case Instruction::SUB_LONG:
+      case Instruction::SUB_LONG_2ADDR:
+        GenSubLong(rl_dest, rl_src1, rl_src2);
+        return;
+      case Instruction::NEG_LONG:
+        GenNegLong(rl_dest, rl_src2);
+        return;
+      default:
+        break;
+    }
+    // Fallback for all other ops.
+    Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
   }
+}
 
-  // Fallback for all other ops.
-  Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
+void MipsMir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) {
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegReg(kOpMvn, rl_result.reg, rl_src.reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  NewLIR3(kMips64Dmul, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                                RegLocation rl_src2, bool is_div, int flags) {
+  UNUSED(opcode);
+  // TODO: Implement easy div/rem?
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+    GenDivZeroCheckWide(rl_src2.reg);
+  }
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  NewLIR3(is_div ? kMips64Ddiv : kMips64Dmod, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
+          rl_src2.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
 }
 
 void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
   rl_src = LoadValueWide(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  /*
-   *  [v1 v0] =  -[a1 a0]
-   *  negu  v0,a0
-   *  negu  v1,a1
-   *  sltu  t1,r_zero
-   *  subu  v1,v1,t1
-   */
+  RegLocation rl_result;
 
-  OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_src.reg.GetLow());
-  OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
-  RegStorage t_reg = AllocTemp();
-  NewLIR3(kMipsSltu, t_reg.GetReg(), rZERO, rl_result.reg.GetLowReg());
-  OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
-  FreeTemp(t_reg);
-  StoreValueWide(rl_dest, rl_result);
+  if (cu_->target64) {
+    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+    OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    //  [v1 v0] =  -[a1 a0]
+    //  negu  v0,a0
+    //  negu  v1,a1
+    //  sltu  t1,r_zero
+    //  subu  v1,v1,t1
+    OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_src.reg.GetLow());
+    OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
+    RegStorage t_reg = AllocTemp();
+    NewLIR3(kMipsSltu, t_reg.GetReg(), rZERO, rl_result.reg.GetLowReg());
+    OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
+    FreeTemp(t_reg);
+    StoreValueWide(rl_dest, rl_result);
+  }
 }
 
 /*
@@ -532,18 +669,18 @@
     data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
   }
 
-  /* null object? */
+  // Null object?
   GenNullCheck(rl_array.reg, opt_flags);
 
-  RegStorage reg_ptr = AllocTemp();
+  RegStorage reg_ptr = (cu_->target64) ? AllocTempRef() : AllocTemp();
   bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
   RegStorage reg_len;
   if (needs_range_check) {
     reg_len = AllocTemp();
-    /* Get len */
+    // Get len.
     Load32Disp(rl_array.reg, len_offset, reg_len);
   }
-  /* reg_ptr -> array data */
+  // reg_ptr -> array data.
   OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
   FreeTemp(rl_array.reg);
   if ((size == k64) || (size == kDouble)) {
@@ -573,7 +710,17 @@
       GenArrayBoundsCheck(rl_index.reg, reg_len);
       FreeTemp(reg_len);
     }
-    LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
+
+    if (cu_->target64) {
+      if (rl_result.ref) {
+        LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), As32BitReg(rl_result.reg), scale,
+                        kReference);
+      } else {
+        LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
+      }
+    } else {
+      LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
+    }
 
     FreeTemp(reg_ptr);
     StoreValue(rl_dest, rl_result);
@@ -612,7 +759,7 @@
     allocated_reg_ptr_temp = true;
   }
 
-  /* null object? */
+  // Null object?
   GenNullCheck(rl_array.reg, opt_flags);
 
   bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
@@ -620,14 +767,14 @@
   if (needs_range_check) {
     reg_len = AllocTemp();
     // NOTE: max live temps(4) here.
-    /* Get len */
+    // Get len.
     Load32Disp(rl_array.reg, len_offset, reg_len);
   }
-  /* reg_ptr -> array data */
+  // reg_ptr -> array data.
   OpRegImm(kOpAdd, reg_ptr, data_offset);
-  /* at this point, reg_ptr points to array, 2 live temps */
+  // At this point, reg_ptr points to array, 2 live temps.
   if ((size == k64) || (size == kDouble)) {
-    // TUNING: specific wide routine that can handle fp regs
+    // TUNING: specific wide routine that can handle fp regs.
     if (scale) {
       RegStorage r_new_index = AllocTemp();
       OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
@@ -660,18 +807,104 @@
   }
 }
 
+void MipsMir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                                 RegLocation rl_shift) {
+  if (!cu_->target64) {
+    Mir2Lir::GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+    return;
+  }
+  OpKind op = kOpBkpt;
+  switch (opcode) {
+  case Instruction::SHL_LONG:
+  case Instruction::SHL_LONG_2ADDR:
+    op = kOpLsl;
+    break;
+  case Instruction::SHR_LONG:
+  case Instruction::SHR_LONG_2ADDR:
+    op = kOpAsr;
+    break;
+  case Instruction::USHR_LONG:
+  case Instruction::USHR_LONG_2ADDR:
+    op = kOpLsr;
+    break;
+  default:
+    LOG(FATAL) << "Unexpected case: " << opcode;
+  }
+  rl_shift = LoadValue(rl_shift, kCoreReg);
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
+  StoreValueWide(rl_dest, rl_result);
+}
+
 void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                     RegLocation rl_src1, RegLocation rl_shift, int flags) {
   UNUSED(flags);
-  // Default implementation is just to ignore the constant case.
-  GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+  if (!cu_->target64) {
+    // Default implementation is just to ignore the constant case.
+    GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+    return;
+  }
+  OpKind op = kOpBkpt;
+  // Per spec, we only care about low 6 bits of shift amount.
+  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  if (shift_amount == 0) {
+    StoreValueWide(rl_dest, rl_src1);
+    return;
+  }
+
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  switch (opcode) {
+    case Instruction::SHL_LONG:
+    case Instruction::SHL_LONG_2ADDR:
+      op = kOpLsl;
+      break;
+    case Instruction::SHR_LONG:
+    case Instruction::SHR_LONG_2ADDR:
+      op = kOpAsr;
+      break;
+    case Instruction::USHR_LONG:
+    case Instruction::USHR_LONG_2ADDR:
+      op = kOpLsr;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected case";
+  }
+  OpRegRegImm(op, rl_result.reg, rl_src1.reg, shift_amount);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
-                                    RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                                    int flags) {
+void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                    RegLocation rl_src1, RegLocation rl_src2, int flags) {
   // Default - bail to non-const handler.
   GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
 }
 
+void MipsMir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
+  if (!cu_->target64) {
+    Mir2Lir::GenIntToLong(rl_dest, rl_src);
+    return;
+  }
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  NewLIR3(kMipsSll, rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
+                                    RegLocation rl_src, RegisterClass reg_class) {
+  FlushAllRegs();   // Send everything to home location.
+  CallRuntimeHelperRegLocation(trampoline, rl_src, false);
+  if (rl_dest.wide) {
+    RegLocation rl_result;
+    rl_result = GetReturnWide(reg_class);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    RegLocation rl_result;
+    rl_result = GetReturn(reg_class);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
 }  // namespace art
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 7037055..078ac0a 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -25,25 +25,29 @@
 /*
  * Runtime register conventions.
  *
- * zero is always the value 0
- * at is scratch (normally used as temp reg by assembler)
- * v0, v1 are scratch (normally hold subroutine return values)
- * a0-a3 are scratch (normally hold subroutine arguments)
- * t0-t8 are scratch
- * t9 is scratch (normally used for function calls)
- * s0 (rMIPS_SUSPEND) is reserved [holds suspend-check counter]
- * s1 (rMIPS_SELF) is reserved [holds current &Thread]
- * s2-s7 are callee save (promotion target)
- * k0, k1 are reserved for use by interrupt handlers
- * gp is reserved for global pointer
- * sp is reserved
- * s8 is callee save (promotion target)
- * ra is scratch (normally holds the return addr)
+ *          mips32            | mips64
+ * $0:      zero is always the value 0
+ * $1:      at is scratch (normally used as temp reg by assembler)
+ * $2,$3:   v0, v1 are scratch (normally hold subroutine return values)
+ * $4-$7:   a0-a3 are scratch (normally hold subroutine arguments)
+ * $8-$11:  t0-t3 are scratch | a4-a7 are scratch (normally hold subroutine arguments)
+ * $12-$15: t4-t7 are scratch | t0-t3 are scratch
+ * $16:     s0 (rSUSPEND) is reserved [holds suspend-check counter]
+ * $17:     s1 (rSELF) is reserved [holds current &Thread]
+ * $18-$23: s2-s7 are callee save (promotion target)
+ * $24:     t8 is scratch
+ * $25:     t9 is scratch (normally used for function calls)
+ * $26,$27: k0, k1 are reserved for use by interrupt handlers
+ * $28:     gp is reserved for global pointer
+ * $29:     sp is reserved
+ * $30:     s8 is callee save (promotion target)
+ * $31:     ra is scratch (normally holds the return addr)
  *
  * Preserved across C calls: s0-s8
- * Trashed across C calls: at, v0-v1, a0-a3, t0-t9, gp, ra
+ * Trashed across C calls (mips32): at, v0-v1, a0-a3, t0-t9, gp, ra
+ * Trashed across C calls (mips64): at, v0-v1, a0-a7, t0-t3, t8, t9, gp, ra
  *
- * Floating pointer registers
+ * Floating pointer registers (mips32)
  * NOTE: there are 32 fp registers (16 df pairs), but currently
  *       only support 16 fp registers (8 df pairs).
  * f0-f15
@@ -51,14 +55,23 @@
  *
  * f0-f15 (df0-df7) trashed across C calls
  *
+ * Floating pointer registers (mips64)
+ * NOTE: there are 32 fp registers.
+ * f0-f31
+ *
  * For mips32 code use:
  *      a0-a3 to hold operands
  *      v0-v1 to hold results
  *      t0-t9 for temps
  *
+ * For mips64 code use:
+ *      a0-a7 to hold operands
+ *      v0-v1 to hold results
+ *      t0-t3, t8-t9 for temps
+ *
  * All jump/branch instructions have a delay slot after it.
  *
- *  Stack frame diagram (stack grows down, higher addresses at top):
+ * Stack frame diagram (stack grows down, higher addresses at top):
  *
  * +------------------------+
  * | IN[ins-1]              |  {Note: resides in caller's frame}
@@ -90,18 +103,6 @@
 
 #define LOWORD_OFFSET 0
 #define HIWORD_OFFSET 4
-#define rARG0 rA0
-#define rs_rARG0 rs_rA0
-#define rARG1 rA1
-#define rs_rARG1 rs_rA1
-#define rARG2 rA2
-#define rs_rARG2 rs_rA2
-#define rARG3 rA3
-#define rs_rARG3 rs_rA3
-#define rRESULT0 rV0
-#define rs_rRESULT0 rs_rV0
-#define rRESULT1 rV1
-#define rs_rRESULT1 rs_rV1
 
 #define rFARG0 rF12
 #define rs_rFARG0 rs_rF12
@@ -111,14 +112,6 @@
 #define rs_rFARG2 rs_rF14
 #define rFARG3 rF15
 #define rs_rFARG3 rs_rF15
-#define rFRESULT0 rF0
-#define rs_rFRESULT0 rs_rF0
-#define rFRESULT1 rF1
-#define rs_rFRESULT1 rs_rF1
-
-// Regs not used for Mips.
-#define rMIPS_LR RegStorage::kInvalidRegVal
-#define rMIPS_PC RegStorage::kInvalidRegVal
 
 enum MipsResourceEncodingPos {
   kMipsGPReg0   = 0,
@@ -130,6 +123,10 @@
   kMipsRegLO,
   kMipsRegPC,
   kMipsRegEnd   = 51,
+  // Mips64 related:
+  kMips64FPRegEnd = 64,
+  kMips64RegPC    = kMips64FPRegEnd,
+  kMips64RegEnd   = 65,
 };
 
 #define ENCODE_MIPS_REG_LIST(N)      (static_cast<uint64_t>(N))
@@ -144,38 +141,78 @@
 #define FR_BIT   0
 
 enum MipsNativeRegisterPool {  // private marker to avoid generate-operator-out.py from processing.
-  rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  0,
-  rAT   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  1,
-  rV0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  2,
-  rV1   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  3,
-  rA0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  4,
-  rA1   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  5,
-  rA2   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  6,
-  rA3   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  7,
-  rT0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  8,
-  rT1   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  9,
-  rT2   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
-  rT3   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
-  rT4   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
-  rT5   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
-  rT6   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
-  rT7   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
-  rS0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
-  rS1   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
-  rS2   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
-  rS3   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
-  rS4   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
-  rS5   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
-  rS6   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
-  rS7   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
-  rT8   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
-  rT9   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
-  rK0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
-  rK1   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
-  rGP   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
-  rSP   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
-  rFP   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
-  rRA   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
+  rZERO  = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  0,
+  rZEROd = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  0,
+  rAT    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  1,
+  rATd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  1,
+  rV0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  2,
+  rV0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  2,
+  rV1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  3,
+  rV1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  3,
+  rA0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  4,
+  rA0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  4,
+  rA1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  5,
+  rA1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  5,
+  rA2    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  6,
+  rA2d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  6,
+  rA3    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  7,
+  rA3d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  7,
+  rT0_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  8,
+  rA4    = rT0_32,
+  rA4d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  8,
+  rT1_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  9,
+  rA5    = rT1_32,
+  rA5d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  9,
+  rT2_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
+  rA6    = rT2_32,
+  rA6d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
+  rT3_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
+  rA7    = rT3_32,
+  rA7d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
+  rT4_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
+  rT0    = rT4_32,
+  rT0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
+  rT5_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
+  rT1    = rT5_32,
+  rT1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
+  rT6_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
+  rT2    = rT6_32,
+  rT2d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
+  rT7_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
+  rT3    = rT7_32,
+  rT3d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
+  rS0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
+  rS0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 16,
+  rS1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
+  rS1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 17,
+  rS2    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
+  rS2d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 18,
+  rS3    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
+  rS3d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 19,
+  rS4    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
+  rS4d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 20,
+  rS5    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
+  rS5d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 21,
+  rS6    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
+  rS6d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 22,
+  rS7    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
+  rS7d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 23,
+  rT8    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
+  rT8d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 24,
+  rT9    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
+  rT9d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 25,
+  rK0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
+  rK0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 26,
+  rK1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
+  rK1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 27,
+  rGP    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
+  rGPd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 28,
+  rSP    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
+  rSPd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 29,
+  rFP    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
+  rFPd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 30,
+  rRA    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
+  rRAd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 31,
 
   rF0  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  0,
   rF1  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  1,
@@ -193,6 +230,24 @@
   rF13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
   rF14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
   rF15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
+
+  rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
+  rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
+  rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
+  rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
+  rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
+  rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
+  rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
+  rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
+  rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
+  rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
+  rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
+  rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
+  rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
+  rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
+  rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
+  rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
+
 #if 0
   /*
    * TODO: The shared resource mask doesn't have enough bit positions to describe all
@@ -253,6 +308,39 @@
   rD14_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
   rD15_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
 #endif
+
+  rD0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  0,
+  rD1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  1,
+  rD2  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  2,
+  rD3  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  3,
+  rD4  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  4,
+  rD5  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  5,
+  rD6  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  6,
+  rD7  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  7,
+  rD8  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  8,
+  rD9  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  9,
+  rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+  rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+  rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+  rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+  rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+  rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
+  rD16 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
+  rD17 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
+  rD18 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
+  rD19 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
+  rD20 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
+  rD21 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
+  rD22 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
+  rD23 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
+  rD24 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
+  rD25 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
+  rD26 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
+  rD27 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
+  rD28 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
+  rD29 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
+  rD30 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
+  rD31 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
 };
 
 constexpr RegStorage rs_rZERO(RegStorage::kValid | rZERO);
@@ -263,14 +351,22 @@
 constexpr RegStorage rs_rA1(RegStorage::kValid | rA1);
 constexpr RegStorage rs_rA2(RegStorage::kValid | rA2);
 constexpr RegStorage rs_rA3(RegStorage::kValid | rA3);
-constexpr RegStorage rs_rT0(RegStorage::kValid | rT0);
-constexpr RegStorage rs_rT1(RegStorage::kValid | rT1);
-constexpr RegStorage rs_rT2(RegStorage::kValid | rT2);
-constexpr RegStorage rs_rT3(RegStorage::kValid | rT3);
-constexpr RegStorage rs_rT4(RegStorage::kValid | rT4);
-constexpr RegStorage rs_rT5(RegStorage::kValid | rT5);
-constexpr RegStorage rs_rT6(RegStorage::kValid | rT6);
-constexpr RegStorage rs_rT7(RegStorage::kValid | rT7);
+constexpr RegStorage rs_rT0_32(RegStorage::kValid | rT0_32);
+constexpr RegStorage rs_rA4 = rs_rT0_32;
+constexpr RegStorage rs_rT1_32(RegStorage::kValid | rT1_32);
+constexpr RegStorage rs_rA5 = rs_rT1_32;
+constexpr RegStorage rs_rT2_32(RegStorage::kValid | rT2_32);
+constexpr RegStorage rs_rA6 = rs_rT2_32;
+constexpr RegStorage rs_rT3_32(RegStorage::kValid | rT3_32);
+constexpr RegStorage rs_rA7 = rs_rT3_32;
+constexpr RegStorage rs_rT4_32(RegStorage::kValid | rT4_32);
+constexpr RegStorage rs_rT0 = rs_rT4_32;
+constexpr RegStorage rs_rT5_32(RegStorage::kValid | rT5_32);
+constexpr RegStorage rs_rT1 = rs_rT5_32;
+constexpr RegStorage rs_rT6_32(RegStorage::kValid | rT6_32);
+constexpr RegStorage rs_rT2 = rs_rT6_32;
+constexpr RegStorage rs_rT7_32(RegStorage::kValid | rT7_32);
+constexpr RegStorage rs_rT3 = rs_rT7_32;
 constexpr RegStorage rs_rS0(RegStorage::kValid | rS0);
 constexpr RegStorage rs_rS1(RegStorage::kValid | rS1);
 constexpr RegStorage rs_rS2(RegStorage::kValid | rS2);
@@ -288,9 +384,38 @@
 constexpr RegStorage rs_rFP(RegStorage::kValid | rFP);
 constexpr RegStorage rs_rRA(RegStorage::kValid | rRA);
 
-constexpr RegStorage rs_rMIPS_LR(RegStorage::kInvalid);     // Not used for MIPS.
-constexpr RegStorage rs_rMIPS_PC(RegStorage::kInvalid);     // Not used for MIPS.
-constexpr RegStorage rs_rMIPS_COUNT(RegStorage::kInvalid);  // Not used for MIPS.
+constexpr RegStorage rs_rZEROd(RegStorage::kValid | rZEROd);
+constexpr RegStorage rs_rATd(RegStorage::kValid | rATd);
+constexpr RegStorage rs_rV0d(RegStorage::kValid | rV0d);
+constexpr RegStorage rs_rV1d(RegStorage::kValid | rV1d);
+constexpr RegStorage rs_rA0d(RegStorage::kValid | rA0d);
+constexpr RegStorage rs_rA1d(RegStorage::kValid | rA1d);
+constexpr RegStorage rs_rA2d(RegStorage::kValid | rA2d);
+constexpr RegStorage rs_rA3d(RegStorage::kValid | rA3d);
+constexpr RegStorage rs_rA4d(RegStorage::kValid | rA4d);
+constexpr RegStorage rs_rA5d(RegStorage::kValid | rA5d);
+constexpr RegStorage rs_rA6d(RegStorage::kValid | rA6d);
+constexpr RegStorage rs_rA7d(RegStorage::kValid | rA7d);
+constexpr RegStorage rs_rT0d(RegStorage::kValid | rT0d);
+constexpr RegStorage rs_rT1d(RegStorage::kValid | rT1d);
+constexpr RegStorage rs_rT2d(RegStorage::kValid | rT2d);
+constexpr RegStorage rs_rT3d(RegStorage::kValid | rT3d);
+constexpr RegStorage rs_rS0d(RegStorage::kValid | rS0d);
+constexpr RegStorage rs_rS1d(RegStorage::kValid | rS1d);
+constexpr RegStorage rs_rS2d(RegStorage::kValid | rS2d);
+constexpr RegStorage rs_rS3d(RegStorage::kValid | rS3d);
+constexpr RegStorage rs_rS4d(RegStorage::kValid | rS4d);
+constexpr RegStorage rs_rS5d(RegStorage::kValid | rS5d);
+constexpr RegStorage rs_rS6d(RegStorage::kValid | rS6d);
+constexpr RegStorage rs_rS7d(RegStorage::kValid | rS7d);
+constexpr RegStorage rs_rT8d(RegStorage::kValid | rT8d);
+constexpr RegStorage rs_rT9d(RegStorage::kValid | rT9d);
+constexpr RegStorage rs_rK0d(RegStorage::kValid | rK0d);
+constexpr RegStorage rs_rK1d(RegStorage::kValid | rK1d);
+constexpr RegStorage rs_rGPd(RegStorage::kValid | rGPd);
+constexpr RegStorage rs_rSPd(RegStorage::kValid | rSPd);
+constexpr RegStorage rs_rFPd(RegStorage::kValid | rFPd);
+constexpr RegStorage rs_rRAd(RegStorage::kValid | rRAd);
 
 constexpr RegStorage rs_rF0(RegStorage::kValid | rF0);
 constexpr RegStorage rs_rF1(RegStorage::kValid | rF1);
@@ -309,6 +434,23 @@
 constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
 constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
 
+constexpr RegStorage rs_rF16(RegStorage::kValid | rF16);
+constexpr RegStorage rs_rF17(RegStorage::kValid | rF17);
+constexpr RegStorage rs_rF18(RegStorage::kValid | rF18);
+constexpr RegStorage rs_rF19(RegStorage::kValid | rF19);
+constexpr RegStorage rs_rF20(RegStorage::kValid | rF20);
+constexpr RegStorage rs_rF21(RegStorage::kValid | rF21);
+constexpr RegStorage rs_rF22(RegStorage::kValid | rF22);
+constexpr RegStorage rs_rF23(RegStorage::kValid | rF23);
+constexpr RegStorage rs_rF24(RegStorage::kValid | rF24);
+constexpr RegStorage rs_rF25(RegStorage::kValid | rF25);
+constexpr RegStorage rs_rF26(RegStorage::kValid | rF26);
+constexpr RegStorage rs_rF27(RegStorage::kValid | rF27);
+constexpr RegStorage rs_rF28(RegStorage::kValid | rF28);
+constexpr RegStorage rs_rF29(RegStorage::kValid | rF29);
+constexpr RegStorage rs_rF30(RegStorage::kValid | rF30);
+constexpr RegStorage rs_rF31(RegStorage::kValid | rF31);
+
 constexpr RegStorage rs_rD0_fr0(RegStorage::kValid | rD0_fr0);
 constexpr RegStorage rs_rD1_fr0(RegStorage::kValid | rD1_fr0);
 constexpr RegStorage rs_rD2_fr0(RegStorage::kValid | rD2_fr0);
@@ -327,53 +469,65 @@
 constexpr RegStorage rs_rD6_fr1(RegStorage::kValid | rD6_fr1);
 constexpr RegStorage rs_rD7_fr1(RegStorage::kValid | rD7_fr1);
 
-// TODO: reduce/eliminate use of these.
-#define rMIPS_SUSPEND rS0
-#define rs_rMIPS_SUSPEND rs_rS0
-#define rMIPS_SELF rS1
-#define rs_rMIPS_SELF rs_rS1
-#define rMIPS_SP rSP
-#define rs_rMIPS_SP rs_rSP
-#define rMIPS_ARG0 rARG0
-#define rs_rMIPS_ARG0 rs_rARG0
-#define rMIPS_ARG1 rARG1
-#define rs_rMIPS_ARG1 rs_rARG1
-#define rMIPS_ARG2 rARG2
-#define rs_rMIPS_ARG2 rs_rARG2
-#define rMIPS_ARG3 rARG3
-#define rs_rMIPS_ARG3 rs_rARG3
-#define rMIPS_FARG0 rFARG0
-#define rs_rMIPS_FARG0 rs_rFARG0
-#define rMIPS_FARG1 rFARG1
-#define rs_rMIPS_FARG1 rs_rFARG1
-#define rMIPS_FARG2 rFARG2
-#define rs_rMIPS_FARG2 rs_rFARG2
-#define rMIPS_FARG3 rFARG3
-#define rs_rMIPS_FARG3 rs_rFARG3
-#define rMIPS_RET0 rRESULT0
-#define rs_rMIPS_RET0 rs_rRESULT0
-#define rMIPS_RET1 rRESULT1
-#define rs_rMIPS_RET1 rs_rRESULT1
-#define rMIPS_INVOKE_TGT rT9
-#define rs_rMIPS_INVOKE_TGT rs_rT9
-#define rMIPS_COUNT RegStorage::kInvalidRegVal
+constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
+constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
+constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
+constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
+constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
+constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
+constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
+constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
+constexpr RegStorage rs_rD8(RegStorage::kValid | rD8);
+constexpr RegStorage rs_rD9(RegStorage::kValid | rD9);
+constexpr RegStorage rs_rD10(RegStorage::kValid | rD10);
+constexpr RegStorage rs_rD11(RegStorage::kValid | rD11);
+constexpr RegStorage rs_rD12(RegStorage::kValid | rD12);
+constexpr RegStorage rs_rD13(RegStorage::kValid | rD13);
+constexpr RegStorage rs_rD14(RegStorage::kValid | rD14);
+constexpr RegStorage rs_rD15(RegStorage::kValid | rD15);
+constexpr RegStorage rs_rD16(RegStorage::kValid | rD16);
+constexpr RegStorage rs_rD17(RegStorage::kValid | rD17);
+constexpr RegStorage rs_rD18(RegStorage::kValid | rD18);
+constexpr RegStorage rs_rD19(RegStorage::kValid | rD19);
+constexpr RegStorage rs_rD20(RegStorage::kValid | rD20);
+constexpr RegStorage rs_rD21(RegStorage::kValid | rD21);
+constexpr RegStorage rs_rD22(RegStorage::kValid | rD22);
+constexpr RegStorage rs_rD23(RegStorage::kValid | rD23);
+constexpr RegStorage rs_rD24(RegStorage::kValid | rD24);
+constexpr RegStorage rs_rD25(RegStorage::kValid | rD25);
+constexpr RegStorage rs_rD26(RegStorage::kValid | rD26);
+constexpr RegStorage rs_rD27(RegStorage::kValid | rD27);
+constexpr RegStorage rs_rD28(RegStorage::kValid | rD28);
+constexpr RegStorage rs_rD29(RegStorage::kValid | rD29);
+constexpr RegStorage rs_rD30(RegStorage::kValid | rD30);
+constexpr RegStorage rs_rD31(RegStorage::kValid | rD31);
 
 // RegisterLocation templates return values (r_V0, or r_V0/r_V1).
 const RegLocation mips_loc_c_return
     {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
      RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_ref
+    {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
+     RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
 const RegLocation mips_loc_c_return_wide
     {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
      RegStorage(RegStorage::k64BitPair, rV0, rV1), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_wide
+    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
+     RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
 const RegLocation mips_loc_c_return_float
     {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
      RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
+// FIXME: move MIPS to k64Bitsolo for doubles
 const RegLocation mips_loc_c_return_double_fr0
     {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
      RegStorage(RegStorage::k64BitPair, rF0, rF1), INVALID_SREG, INVALID_SREG};
 const RegLocation mips_loc_c_return_double_fr1
     {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
      RegStorage(RegStorage::k64BitSolo, rF0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_double
+    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
+     RegStorage(RegStorage::k64BitSolo, rD0), INVALID_SREG, INVALID_SREG};
 
 enum MipsShiftEncodings {
   kMipsLsl = 0x0,
@@ -395,104 +549,136 @@
 #define kSY kSYNC0
 
 /*
- * The following enum defines the list of supported Thumb instructions by the
+ * The following enum defines the list of supported mips instructions by the
  * assembler. Their corresponding EncodingMap positions will be defined in
- * Assemble.cc.
+ * assemble_mips.cc.
  */
 enum MipsOpCode {
   kMipsFirst = 0,
+  // The following are common mips32r2, mips32r6 and mips64r6 instructions.
   kMips32BitData = kMipsFirst,  // data [31..0].
-  kMipsAddiu,  // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
-  kMipsAddu,  // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
-  kMipsAnd,   // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
-  kMipsAndi,  // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
-  kMipsB,     // b o   [0001000000000000] o[15..0].
-  kMipsBal,   // bal o [0000010000010001] o[15..0].
-  // NOTE: the code tests the range kMipsBeq thru kMipsBne, so adding an instruction in this
-  //       range may require updates.
-  kMipsBeq,   // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
-  kMipsBeqz,  // beqz s,o [000100] s[25..21] [00000] o[15..0].
-  kMipsBgez,  // bgez s,o [000001] s[25..21] [00001] o[15..0].
-  kMipsBgtz,  // bgtz s,o [000111] s[25..21] [00000] o[15..0].
-  kMipsBlez,  // blez s,o [000110] s[25..21] [00000] o[15..0].
-  kMipsBltz,  // bltz s,o [000001] s[25..21] [00000] o[15..0].
-  kMipsBnez,  // bnez s,o [000101] s[25..21] [00000] o[15..0].
-  kMipsBne,   // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
-  kMipsDiv,   // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
-  kMipsExt,   // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
-  kMipsJal,   // jal t [000011] t[25..0].
-  kMipsJalr,  // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
-  kMipsJr,    // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
-  kMipsLahi,  // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
-  kMipsLalo,  // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
-  kMipsLui,   // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
-  kMipsLb,    // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
-  kMipsLbu,   // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
-  kMipsLh,    // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
-  kMipsLhu,   // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
-  kMipsLw,    // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
-  kMipsMfhi,  // mfhi d [0000000000000000] d[15..11] [00000010000].
-  kMipsMflo,  // mflo d [0000000000000000] d[15..11] [00000010010].
-  kMipsMove,  // move d,s [000000] s[25..21] [00000] d[15..11] [00000100101].
-  kMipsMovz,  // movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010].
-  kMipsMul,   // mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010].
-  kMipsNop,   // nop [00000000000000000000000000000000].
-  kMipsNor,   // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
-  kMipsOr,    // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
-  kMipsOri,   // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
-  kMipsPref,  // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
-  kMipsSb,    // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
-  kMipsSeb,   // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
-  kMipsSeh,   // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
-  kMipsSh,    // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
-  kMipsSll,   // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
-  kMipsSllv,  // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
-  kMipsSlt,   // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
-  kMipsSlti,  // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
-  kMipsSltu,  // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
-  kMipsSra,   // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
-  kMipsSrav,  // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
-  kMipsSrl,   // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
-  kMipsSrlv,  // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
-  kMipsSubu,  // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
-  kMipsSw,    // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
-  kMipsXor,   // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
-  kMipsXori,  // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
-  kMipsFadds,  // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
-  kMipsFsubs,  // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
-  kMipsFmuls,  // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
-  kMipsFdivs,  // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
-  kMipsFaddd,  // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
-  kMipsFsubd,  // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
-  kMipsFmuld,  // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
-  kMipsFdivd,  // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
-  kMipsFcvtsd,  // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
-  kMipsFcvtsw,  // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
-  kMipsFcvtds,  // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
-  kMipsFcvtdw,  // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
-  kMipsFcvtws,  // cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
-  kMipsFcvtwd,  // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
-  kMipsFmovs,  // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
-  kMipsFmovd,  // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
-  kMipsFlwc1,  // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
-  kMipsFldc1,  // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
-  kMipsFswc1,  // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
-  kMipsFsdc1,  // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
-  kMipsMfc1,   // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
-  kMipsMtc1,   // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
-  kMipsMfhc1,  // mfhc1 t,s [01000100011] t[20..16] s[15..11] [00000000000].
-  kMipsMthc1,  // mthc1 t,s [01000100111] t[20..16] s[15..11] [00000000000].
-  kMipsDelta,  // Psuedo for ori t, s, <label>-<label>.
-  kMipsDeltaHi,  // Pseudo for lui t, high16(<label>-<label>).
-  kMipsDeltaLo,  // Pseudo for ori t, s, low16(<label>-<label>).
-  kMipsCurrPC,  // jal to .+8 to materialize pc.
-  kMipsSync,    // sync kind [000000] [0000000000000000] s[10..6] [001111].
+  kMipsAddiu,      // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+  kMipsAddu,       // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
+  kMipsAnd,        // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
+  kMipsAndi,       // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
+  kMipsB,          // b o   [0001000000000000] o[15..0].
+  kMipsBal,        // bal o [0000010000010001] o[15..0].
+  // NOTE : the code tests the range kMipsBeq thru kMipsBne, so adding an instruction in this
+  // range may require updates.
+  kMipsBeq,        // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
+  kMipsBeqz,       // beqz s,o [000100] s[25..21] [00000] o[15..0].
+  kMipsBgez,       // bgez s,o [000001] s[25..21] [00001] o[15..0].
+  kMipsBgtz,       // bgtz s,o [000111] s[25..21] [00000] o[15..0].
+  kMipsBlez,       // blez s,o [000110] s[25..21] [00000] o[15..0].
+  kMipsBltz,       // bltz s,o [000001] s[25..21] [00000] o[15..0].
+  kMipsBnez,       // bnez s,o [000101] s[25..21] [00000] o[15..0].
+  kMipsBne,        // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
+  kMipsExt,        // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
+  kMipsFaddd,      // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
+  kMipsFadds,      // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
+  kMipsFsubd,      // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
+  kMipsFsubs,      // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
+  kMipsFdivd,      // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
+  kMipsFdivs,      // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
+  kMipsFmuld,      // mul.d d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
+  kMipsFmuls,      // mul.s d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
+  kMipsFcvtsd,     // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
+  kMipsFcvtsw,     // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
+  kMipsFcvtds,     // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
+  kMipsFcvtdw,     // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
+  kMipsFcvtwd,     // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
+  kMipsFcvtws,     // cvt.w.s d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
+  kMipsFmovd,      // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
+  kMipsFmovs,      // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
+  kMipsFnegd,      // neg.d d,s [01000110001] [00000] s[15..11] d[10..6] [000111].
+  kMipsFnegs,      // neg.s d,s [01000110000] [00000] s[15..11] d[10..6] [000111].
+  kMipsFldc1,      // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
+  kMipsFlwc1,      // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
+  kMipsFsdc1,      // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
+  kMipsFswc1,      // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
+  kMipsJal,        // jal t [000011] t[25..0].
+  kMipsJalr,       // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
+  kMipsJr,         // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
+  kMipsLahi,       // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
+  kMipsLalo,       // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
+  kMipsLui,        // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
+  kMipsLb,         // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
+  kMipsLbu,        // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
+  kMipsLh,         // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
+  kMipsLhu,        // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
+  kMipsLw,         // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
+  kMipsMove,       // move d,s [000000] s[25..21] [00000] d[15..11] [00000100101].
+  kMipsMfc1,       // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
+  kMipsMtc1,       // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
+  kMipsMfhc1,      // mfhc1 t,s [01000100011] t[20..16] s[15..11] [00000000000].
+  kMipsMthc1,      // mthc1 t,s [01000100111] t[20..16] s[15..11] [00000000000].
+  kMipsNop,        // nop [00000000000000000000000000000000].
+  kMipsNor,        // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
+  kMipsOr,         // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
+  kMipsOri,        // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+  kMipsPref,       // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
+  kMipsSb,         // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
+  kMipsSeb,        // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
+  kMipsSeh,        // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
+  kMipsSh,         // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
+  kMipsSll,        // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
+  kMipsSllv,       // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
+  kMipsSlt,        // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
+  kMipsSlti,       // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
+  kMipsSltu,       // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
+  kMipsSra,        // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
+  kMipsSrav,       // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
+  kMipsSrl,        // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
+  kMipsSrlv,       // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
+  kMipsSubu,       // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
+  kMipsSw,         // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
+  kMipsSync,       // sync kind [000000] [0000000000000000] s[10..6] [001111].
+  kMipsXor,        // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
+  kMipsXori,       // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
 
-  // The following are mips32r6 instructions.
-  kMipsR6Div,   // div d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
-  kMipsR6Mod,   // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
-  kMipsR6Mul,   // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
+  // The following are mips32r2 instructions.
+  kMipsR2Div,      // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
+  kMipsR2Mul,      // mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010].
+  kMipsR2Mfhi,     // mfhi d [0000000000000000] d[15..11] [00000010000].
+  kMipsR2Mflo,     // mflo d [0000000000000000] d[15..11] [00000010010].
+  kMipsR2Movz,     // movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010].
 
+  // The following are mips32r6 and mips64r6 instructions.
+  kMipsR6Div,      // div d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
+  kMipsR6Mod,      // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
+  kMipsR6Mul,      // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
+
+  // The following are mips64r6 instructions.
+  kMips64Daddiu,   // daddiu t,s,imm16 [011001] s[25..21] t[20..16] imm16[15..11].
+  kMips64Daddu,    // daddu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101101].
+  kMips64Dahi,     // dahi s,imm16 [000001] s[25..21] [00110] imm16[15..11].
+  kMips64Dati,     // dati s,imm16 [000001] s[25..21] [11110] imm16[15..11].
+  kMips64Daui,     // daui t,s,imm16 [011101] s[25..21] t[20..16] imm16[15..11].
+  kMips64Ddiv,     // ddiv  d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011110].
+  kMips64Dmod,     // dmod  d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011110].
+  kMips64Dmul,     // dmul  d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011100].
+  kMips64Dmfc1,    // dmfc1 t,s [01000100001] t[20..16] s[15..11] [00000000000].
+  kMips64Dmtc1,    // dmtc1 t,s [01000100101] t[20..16] s[15..11] [00000000000].
+  kMips64Drotr32,  // drotr32 d,t,a [00000000001] t[20..16] d[15..11] a[10..6] [111110].
+  kMips64Dsll,     // dsll    d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111000].
+  kMips64Dsll32,   // dsll32  d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111100].
+  kMips64Dsrl,     // dsrl    d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111010].
+  kMips64Dsrl32,   // dsrl32  d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111110].
+  kMips64Dsra,     // dsra    d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111011].
+  kMips64Dsra32,   // dsra32  d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111111].
+  kMips64Dsllv,    // dsllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010100].
+  kMips64Dsrlv,    // dsrlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010110].
+  kMips64Dsrav,    // dsrav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010111].
+  kMips64Dsubu,    // dsubu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101111].
+  kMips64Ld,       // ld  t,o(b) [110111] b[25..21] t[20..16] o[15..0].
+  kMips64Lwu,      // lwu t,o(b) [100111] b[25..21] t[20..16] o[15..0].
+  kMips64Sd,       // sd t,o(b) [111111] b[25..21] t[20..16] o[15..0].
+
+  // The following are pseudoinstructions.
+  kMipsDelta,      // Psuedo for ori t, s, <label>-<label>.
+  kMipsDeltaHi,    // Pseudo for lui t, high16(<label>-<label>).
+  kMipsDeltaLo,    // Pseudo for ori t, s, low16(<label>-<label>).
+  kMipsCurrPC,     // jal to .+8 to materialize pc.
   kMipsUndefined,  // undefined [011001xxxxxxxxxxxxxxxx].
   kMipsLast
 };
@@ -503,7 +689,7 @@
   kFmtUnused,
   kFmtBitBlt,    // Bit string using end/start.
   kFmtDfp,       // Double FP reg.
-  kFmtSfp,       // Single FP reg
+  kFmtSfp,       // Single FP reg.
   kFmtBlt5_2,    // Same 5-bit field to 2 locations.
 };
 std::ostream& operator<<(std::ostream& os, const MipsEncodingKind& rhs);
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 830f63a..a94fad7 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -30,55 +30,131 @@
 
 namespace art {
 
-static constexpr RegStorage core_regs_arr[] =
-    {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0, rs_rT1, rs_rT2,
-     rs_rT3, rs_rT4, rs_rT5, rs_rT6, rs_rT7, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5,
-     rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
-static constexpr RegStorage sp_regs_arr[] =
+static constexpr RegStorage core_regs_arr_32[] =
+    {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32,
+     rs_rT2_32, rs_rT3_32, rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rS0, rs_rS1, rs_rS2,
+     rs_rS3, rs_rS4, rs_rS5, rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP,
+     rs_rRA};
+static constexpr RegStorage sp_regs_arr_32[] =
     {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
      rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_fr0_regs_arr[] =
+static constexpr RegStorage dp_fr0_regs_arr_32[] =
     {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
      rs_rD7_fr0};
-static constexpr RegStorage dp_fr1_regs_arr[] =
+static constexpr RegStorage dp_fr1_regs_arr_32[] =
     {rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
      rs_rD7_fr1};
-static constexpr RegStorage reserved_regs_arr[] =
+static constexpr RegStorage reserved_regs_arr_32[] =
     {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
-static constexpr RegStorage core_temps_arr[] =
-    {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rT4,
-     rs_rT5, rs_rT6, rs_rT7, rs_rT8};
-static constexpr RegStorage sp_temps_arr[] =
+static constexpr RegStorage core_temps_arr_32[] =
+    {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32, rs_rT2_32, rs_rT3_32,
+     rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rT8};
+static constexpr RegStorage sp_temps_arr_32[] =
     {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
      rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_fr0_temps_arr[] =
+static constexpr RegStorage dp_fr0_temps_arr_32[] =
     {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
      rs_rD7_fr0};
-static constexpr RegStorage dp_fr1_temps_arr[] =
+static constexpr RegStorage dp_fr1_temps_arr_32[] =
     {rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
      rs_rD7_fr1};
 
+static constexpr RegStorage core_regs_arr_64[] =
+    {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6,
+     rs_rA7, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5, rs_rS6,
+     rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
+static constexpr RegStorage core_regs_arr_64d[] =
+    {rs_rZEROd, rs_rATd, rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d,
+     rs_rA6d, rs_rA7d, rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rS0d, rs_rS1d, rs_rS2d, rs_rS3d,
+     rs_rS4d, rs_rS5d, rs_rS6d, rs_rS7d, rs_rT8d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd,
+     rs_rFPd, rs_rRAd};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_regs_arr_64[] =
+    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+     rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+     rs_rF31};
+static constexpr RegStorage dp_regs_arr_64[] =
+    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+     rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+     rs_rD31};
+#else
+static constexpr RegStorage sp_regs_arr_64[] =
+    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+     rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_regs_arr_64[] =
+    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+     rs_rD21, rs_rD22, rs_rD23};
+#endif
+static constexpr RegStorage reserved_regs_arr_64[] =
+    {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
+static constexpr RegStorage reserved_regs_arr_64d[] =
+    {rs_rZEROd, rs_rATd, rs_rS0d, rs_rS1d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd, rs_rRAd};
+static constexpr RegStorage core_temps_arr_64[] =
+    {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6, rs_rA7, rs_rT0, rs_rT1,
+     rs_rT2, rs_rT3, rs_rT8};
+static constexpr RegStorage core_temps_arr_64d[] =
+    {rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d, rs_rA6d, rs_rA7d,
+     rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rT8d};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_temps_arr_64[] =
+    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+     rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+     rs_rF31};
+static constexpr RegStorage dp_temps_arr_64[] =
+    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+     rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+     rs_rD31};
+#else
+static constexpr RegStorage sp_temps_arr_64[] =
+    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+     rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_temps_arr_64[] =
+    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+     rs_rD21, rs_rD22, rs_rD23};
+#endif
+
 static constexpr ArrayRef<const RegStorage> empty_pool;
-static constexpr ArrayRef<const RegStorage> core_regs(core_regs_arr);
-static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr0_regs(dp_fr0_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr1_regs(dp_fr1_regs_arr);
-static constexpr ArrayRef<const RegStorage> reserved_regs(reserved_regs_arr);
-static constexpr ArrayRef<const RegStorage> core_temps(core_temps_arr);
-static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr0_temps(dp_fr0_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr1_temps(dp_fr1_temps_arr);
+static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr0_regs_32(dp_fr0_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr1_regs_32(dp_fr1_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr0_temps_32(dp_fr0_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr1_temps_32(dp_fr1_temps_arr_32);
+
+static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> core_regs_64d(core_regs_arr_64d);
+static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> reserved_regs_64d(reserved_regs_arr_64d);
+static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
+static constexpr ArrayRef<const RegStorage> core_temps_64d(core_temps_arr_64d);
+static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
+static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
 
 RegLocation MipsMir2Lir::LocCReturn() {
   return mips_loc_c_return;
 }
 
 RegLocation MipsMir2Lir::LocCReturnRef() {
-  return mips_loc_c_return;
+  return cu_->target64 ? mips64_loc_c_return_ref : mips_loc_c_return;
 }
 
 RegLocation MipsMir2Lir::LocCReturnWide() {
-  return mips_loc_c_return_wide;
+  return cu_->target64 ? mips64_loc_c_return_wide : mips_loc_c_return_wide;
 }
 
 RegLocation MipsMir2Lir::LocCReturnFloat() {
@@ -86,14 +162,16 @@
 }
 
 RegLocation MipsMir2Lir::LocCReturnDouble() {
-  if (fpuIs32Bit_) {
-      return mips_loc_c_return_double_fr0;
+  if (cu_->target64) {
+    return mips64_loc_c_return_double;
+  } else if (fpuIs32Bit_) {
+    return mips_loc_c_return_double_fr0;
   } else {
-      return mips_loc_c_return_double_fr1;
+    return mips_loc_c_return_double_fr1;
   }
 }
 
-// Convert k64BitSolo into k64BitPair
+// Convert k64BitSolo into k64BitPair.
 RegStorage MipsMir2Lir::Solo64ToPair64(RegStorage reg) {
   DCHECK(reg.IsDouble());
   DCHECK_EQ(reg.GetRegNum() & 1, 0);
@@ -113,16 +191,18 @@
 
 // Return a target-dependent special register.
 RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
-  if (wide_kind == kWide) {
-      DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
-      RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
-                                       TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
-      if (!fpuIs32Bit_ && ret_reg.IsFloat()) {
-        // convert 64BitPair to 64BitSolo for 64bit FPUs.
-        RegStorage low = ret_reg.GetLow();
-        ret_reg = RegStorage::FloatSolo64(low.GetRegNum());
-      }
-      return ret_reg;
+  if (!cu_->target64 && wide_kind == kWide) {
+    DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
+    RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
+                                     TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
+    if (!fpuIs32Bit_ && ret_reg.IsFloat()) {
+      // convert 64BitPair to 64BitSolo for 64bit FPUs.
+      RegStorage low = ret_reg.GetLow();
+      ret_reg = RegStorage::FloatSolo64(low.GetRegNum());
+    }
+    return ret_reg;
+  } else if (cu_->target64 && (wide_kind == kWide || wide_kind == kRef)) {
+    return As64BitReg(TargetReg(reg));
   } else {
     return TargetReg(reg);
   }
@@ -132,25 +212,33 @@
 RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg) {
   RegStorage res_reg;
   switch (reg) {
-    case kSelf: res_reg = rs_rMIPS_SELF; break;
-    case kSuspend: res_reg =  rs_rMIPS_SUSPEND; break;
-    case kLr: res_reg =  rs_rMIPS_LR; break;
-    case kPc: res_reg =  rs_rMIPS_PC; break;
-    case kSp: res_reg =  rs_rMIPS_SP; break;
-    case kArg0: res_reg = rs_rMIPS_ARG0; break;
-    case kArg1: res_reg = rs_rMIPS_ARG1; break;
-    case kArg2: res_reg = rs_rMIPS_ARG2; break;
-    case kArg3: res_reg = rs_rMIPS_ARG3; break;
-    case kFArg0: res_reg = rs_rMIPS_FARG0; break;
-    case kFArg1: res_reg = rs_rMIPS_FARG1; break;
-    case kFArg2: res_reg = rs_rMIPS_FARG2; break;
-    case kFArg3: res_reg = rs_rMIPS_FARG3; break;
-    case kRet0: res_reg = rs_rMIPS_RET0; break;
-    case kRet1: res_reg = rs_rMIPS_RET1; break;
-    case kInvokeTgt: res_reg = rs_rMIPS_INVOKE_TGT; break;
-    case kHiddenArg: res_reg = rs_rT0; break;
+    case kSelf: res_reg = rs_rS1; break;
+    case kSuspend: res_reg =  rs_rS0; break;
+    case kLr: res_reg =  rs_rRA; break;
+    case kPc: res_reg = RegStorage::InvalidReg(); break;
+    case kSp: res_reg =  rs_rSP; break;
+    case kArg0: res_reg = rs_rA0; break;
+    case kArg1: res_reg = rs_rA1; break;
+    case kArg2: res_reg = rs_rA2; break;
+    case kArg3: res_reg = rs_rA3; break;
+    case kArg4: res_reg = cu_->target64 ? rs_rA4 : RegStorage::InvalidReg(); break;
+    case kArg5: res_reg = cu_->target64 ? rs_rA5 : RegStorage::InvalidReg(); break;
+    case kArg6: res_reg = cu_->target64 ? rs_rA6 : RegStorage::InvalidReg(); break;
+    case kArg7: res_reg = cu_->target64 ? rs_rA7 : RegStorage::InvalidReg(); break;
+    case kFArg0: res_reg = rs_rF12; break;
+    case kFArg1: res_reg = rs_rF13; break;
+    case kFArg2: res_reg = rs_rF14; break;
+    case kFArg3: res_reg = rs_rF15; break;
+    case kFArg4: res_reg = cu_->target64 ? rs_rF16 : RegStorage::InvalidReg(); break;
+    case kFArg5: res_reg = cu_->target64 ? rs_rF17 : RegStorage::InvalidReg(); break;
+    case kFArg6: res_reg = cu_->target64 ? rs_rF18 : RegStorage::InvalidReg(); break;
+    case kFArg7: res_reg = cu_->target64 ? rs_rF19 : RegStorage::InvalidReg(); break;
+    case kRet0: res_reg = rs_rV0; break;
+    case kRet1: res_reg = rs_rV1; break;
+    case kInvokeTgt: res_reg = rs_rT9; break;
+    case kHiddenArg: res_reg = cu_->target64 ? rs_rT0 : rs_rT0_32; break;
     case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
-    case kCount: res_reg = rs_rMIPS_COUNT; break;
+    case kCount: res_reg = RegStorage::InvalidReg(); break;
     default: res_reg = RegStorage::InvalidReg();
   }
   return res_reg;
@@ -172,27 +260,54 @@
   return result;
 }
 
+RegStorage MipsMir2Lir::InToRegStorageMips64Mapper::GetNextReg(ShortyArg arg) {
+  const SpecialTargetRegister coreArgMappingToPhysicalReg[] =
+      {kArg1, kArg2, kArg3, kArg4, kArg5, kArg6, kArg7};
+  const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
+  const SpecialTargetRegister fpArgMappingToPhysicalReg[] =
+      {kFArg1, kFArg2, kFArg3, kFArg4, kFArg5, kFArg6, kFArg7};
+  const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
+
+  RegStorage result = RegStorage::InvalidReg();
+  if (arg.IsFP()) {
+    if (cur_arg_reg_ < fpArgMappingToPhysicalRegSize) {
+      DCHECK(!arg.IsRef());
+      result = m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_arg_reg_++],
+                               arg.IsWide() ? kWide : kNotWide);
+    }
+  } else {
+    if (cur_arg_reg_ < coreArgMappingToPhysicalRegSize) {
+      DCHECK(!(arg.IsWide() && arg.IsRef()));
+      result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_arg_reg_++],
+                               arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
+    }
+  }
+  return result;
+}
+
 /*
  * Decode the register id.
  */
 ResourceMask MipsMir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
-  if (reg.IsDouble()) {
-    return ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0);
-  } else if (reg.IsSingle()) {
-    return ResourceMask::Bit(reg.GetRegNum() + kMipsFPReg0);
+  if (cu_->target64) {
+    return ResourceMask::Bit((reg.IsFloat() ? kMipsFPReg0 : 0) + reg.GetRegNum());
   } else {
-    return ResourceMask::Bit(reg.GetRegNum());
+    if (reg.IsDouble()) {
+      return ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0);
+    } else if (reg.IsSingle()) {
+      return ResourceMask::Bit(reg.GetRegNum() + kMipsFPReg0);
+    } else {
+      return ResourceMask::Bit(reg.GetRegNum());
+    }
   }
 }
 
 ResourceMask MipsMir2Lir::GetPCUseDefEncoding() const {
-  return ResourceMask::Bit(kMipsRegPC);
+  return cu_->target64 ? ResourceMask::Bit(kMips64RegPC) : ResourceMask::Bit(kMipsRegPC);
 }
 
-
-void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                           ResourceMask* use_mask, ResourceMask* def_mask) {
-  DCHECK_EQ(cu_->instruction_set, kMips);
+void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+                                           ResourceMask* def_mask) {
   DCHECK(!lir->flags.use_def_invalid);
 
   // Mips-specific resource map setup here.
@@ -208,20 +323,22 @@
     def_mask->SetBit(kMipsRegLR);
   }
 
-  if (flags & REG_DEF_HI) {
-    def_mask->SetBit(kMipsRegHI);
-  }
+  if (!cu_->target64) {
+    if (flags & REG_DEF_HI) {
+      def_mask->SetBit(kMipsRegHI);
+    }
 
-  if (flags & REG_DEF_LO) {
-    def_mask->SetBit(kMipsRegLO);
-  }
+    if (flags & REG_DEF_LO) {
+      def_mask->SetBit(kMipsRegLO);
+    }
 
-  if (flags & REG_USE_HI) {
-    use_mask->SetBit(kMipsRegHI);
-  }
+    if (flags & REG_USE_HI) {
+      use_mask->SetBit(kMipsRegHI);
+    }
 
-  if (flags & REG_USE_LO) {
-    use_mask->SetBit(kMipsRegLO);
+    if (flags & REG_USE_LO) {
+      use_mask->SetBit(kMipsRegLO);
+    }
   }
 }
 
@@ -234,9 +351,16 @@
   "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
 };
 
+static const char *mips64_reg_name[MIPS_REG_COUNT] = {
+  "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+  "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
+  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+  "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
+};
+
 /*
  * Interpret a format string and build a string no longer than size
- * See format key in Assemble.c.
+ * See format key in assemble_mips.cc.
  */
 std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
   std::string buf;
@@ -311,7 +435,11 @@
             break;
           case 'r':
             DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
-            strcpy(tbuf, mips_reg_name[operand]);
+            if (cu_->target64) {
+              strcpy(tbuf, mips64_reg_name[operand]);
+            } else {
+              strcpy(tbuf, mips_reg_name[operand]);
+            }
             break;
           case 'N':
             // Placeholder for delay slot handling
@@ -330,7 +458,7 @@
   return buf;
 }
 
-// FIXME: need to redo resource maps for MIPS - fix this at that time
+// FIXME: need to redo resource maps for MIPS - fix this at that time.
 void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, const ResourceMask& mask, const char *prefix) {
   char buf[256];
   buf[0] = 0;
@@ -341,7 +469,7 @@
     char num[8];
     int i;
 
-    for (i = 0; i < kMipsRegEnd; i++) {
+    for (i = 0; i < (cu_->target64 ? kMips64RegEnd : kMipsRegEnd); i++) {
       if (mask.HasBit(i)) {
         snprintf(num, arraysize(num), "%d ", i);
         strcat(buf, num);
@@ -354,7 +482,7 @@
     if (mask.HasBit(ResourceMask::kFPStatus)) {
       strcat(buf, "fpcc ");
     }
-    /* Memory bits */
+    // Memory bits.
     if (mips_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
       snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
                DECODE_ALIAS_INFO_REG(mips_lir->flags.alias_info),
@@ -389,63 +517,114 @@
 
 /* Clobber all regs that might be used by an external C call */
 void MipsMir2Lir::ClobberCallerSave() {
-  Clobber(rs_rZERO);
-  Clobber(rs_rAT);
-  Clobber(rs_rV0);
-  Clobber(rs_rV1);
-  Clobber(rs_rA0);
-  Clobber(rs_rA1);
-  Clobber(rs_rA2);
-  Clobber(rs_rA3);
-  Clobber(rs_rT0);
-  Clobber(rs_rT1);
-  Clobber(rs_rT2);
-  Clobber(rs_rT3);
-  Clobber(rs_rT4);
-  Clobber(rs_rT5);
-  Clobber(rs_rT6);
-  Clobber(rs_rT7);
-  Clobber(rs_rT8);
-  Clobber(rs_rT9);
-  Clobber(rs_rK0);
-  Clobber(rs_rK1);
-  Clobber(rs_rGP);
-  Clobber(rs_rFP);
-  Clobber(rs_rRA);
-  Clobber(rs_rF0);
-  Clobber(rs_rF1);
-  Clobber(rs_rF2);
-  Clobber(rs_rF3);
-  Clobber(rs_rF4);
-  Clobber(rs_rF5);
-  Clobber(rs_rF6);
-  Clobber(rs_rF7);
-  Clobber(rs_rF8);
-  Clobber(rs_rF9);
-  Clobber(rs_rF10);
-  Clobber(rs_rF11);
-  Clobber(rs_rF12);
-  Clobber(rs_rF13);
-  Clobber(rs_rF14);
-  Clobber(rs_rF15);
-  if (fpuIs32Bit_) {
-    Clobber(rs_rD0_fr0);
-    Clobber(rs_rD1_fr0);
-    Clobber(rs_rD2_fr0);
-    Clobber(rs_rD3_fr0);
-    Clobber(rs_rD4_fr0);
-    Clobber(rs_rD5_fr0);
-    Clobber(rs_rD6_fr0);
-    Clobber(rs_rD7_fr0);
+  if (cu_->target64) {
+    Clobber(rs_rZEROd);
+    Clobber(rs_rATd);
+    Clobber(rs_rV0d);
+    Clobber(rs_rV1d);
+    Clobber(rs_rA0d);
+    Clobber(rs_rA1d);
+    Clobber(rs_rA2d);
+    Clobber(rs_rA3d);
+    Clobber(rs_rA4d);
+    Clobber(rs_rA5d);
+    Clobber(rs_rA6d);
+    Clobber(rs_rA7d);
+    Clobber(rs_rT0d);
+    Clobber(rs_rT1d);
+    Clobber(rs_rT2d);
+    Clobber(rs_rT3d);
+    Clobber(rs_rT8d);
+    Clobber(rs_rT9d);
+    Clobber(rs_rK0d);
+    Clobber(rs_rK1d);
+    Clobber(rs_rGPd);
+    Clobber(rs_rFPd);
+    Clobber(rs_rRAd);
+
+    Clobber(rs_rF0);
+    Clobber(rs_rF1);
+    Clobber(rs_rF2);
+    Clobber(rs_rF3);
+    Clobber(rs_rF4);
+    Clobber(rs_rF5);
+    Clobber(rs_rF6);
+    Clobber(rs_rF7);
+    Clobber(rs_rF8);
+    Clobber(rs_rF9);
+    Clobber(rs_rF10);
+    Clobber(rs_rF11);
+    Clobber(rs_rF12);
+    Clobber(rs_rF13);
+    Clobber(rs_rF14);
+    Clobber(rs_rF15);
+    Clobber(rs_rD0);
+    Clobber(rs_rD1);
+    Clobber(rs_rD2);
+    Clobber(rs_rD3);
+    Clobber(rs_rD4);
+    Clobber(rs_rD5);
+    Clobber(rs_rD6);
+    Clobber(rs_rD7);
   } else {
-    Clobber(rs_rD0_fr1);
-    Clobber(rs_rD1_fr1);
-    Clobber(rs_rD2_fr1);
-    Clobber(rs_rD3_fr1);
-    Clobber(rs_rD4_fr1);
-    Clobber(rs_rD5_fr1);
-    Clobber(rs_rD6_fr1);
-    Clobber(rs_rD7_fr1);
+    Clobber(rs_rZERO);
+    Clobber(rs_rAT);
+    Clobber(rs_rV0);
+    Clobber(rs_rV1);
+    Clobber(rs_rA0);
+    Clobber(rs_rA1);
+    Clobber(rs_rA2);
+    Clobber(rs_rA3);
+    Clobber(rs_rT0_32);
+    Clobber(rs_rT1_32);
+    Clobber(rs_rT2_32);
+    Clobber(rs_rT3_32);
+    Clobber(rs_rT4_32);
+    Clobber(rs_rT5_32);
+    Clobber(rs_rT6_32);
+    Clobber(rs_rT7_32);
+    Clobber(rs_rT8);
+    Clobber(rs_rT9);
+    Clobber(rs_rK0);
+    Clobber(rs_rK1);
+    Clobber(rs_rGP);
+    Clobber(rs_rFP);
+    Clobber(rs_rRA);
+    Clobber(rs_rF0);
+    Clobber(rs_rF1);
+    Clobber(rs_rF2);
+    Clobber(rs_rF3);
+    Clobber(rs_rF4);
+    Clobber(rs_rF5);
+    Clobber(rs_rF6);
+    Clobber(rs_rF7);
+    Clobber(rs_rF8);
+    Clobber(rs_rF9);
+    Clobber(rs_rF10);
+    Clobber(rs_rF11);
+    Clobber(rs_rF12);
+    Clobber(rs_rF13);
+    Clobber(rs_rF14);
+    Clobber(rs_rF15);
+    if (fpuIs32Bit_) {
+      Clobber(rs_rD0_fr0);
+      Clobber(rs_rD1_fr0);
+      Clobber(rs_rD2_fr0);
+      Clobber(rs_rD3_fr0);
+      Clobber(rs_rD4_fr0);
+      Clobber(rs_rD5_fr0);
+      Clobber(rs_rD6_fr0);
+      Clobber(rs_rD7_fr0);
+    } else {
+      Clobber(rs_rD0_fr1);
+      Clobber(rs_rD1_fr1);
+      Clobber(rs_rD2_fr1);
+      Clobber(rs_rD3_fr1);
+      Clobber(rs_rD4_fr1);
+      Clobber(rs_rD5_fr1);
+      Clobber(rs_rD6_fr1);
+      Clobber(rs_rD7_fr1);
+    }
   }
 }
 
@@ -463,18 +642,30 @@
 
 /* To be used when explicitly managing register use */
 void MipsMir2Lir::LockCallTemps() {
-  LockTemp(rs_rMIPS_ARG0);
-  LockTemp(rs_rMIPS_ARG1);
-  LockTemp(rs_rMIPS_ARG2);
-  LockTemp(rs_rMIPS_ARG3);
+  LockTemp(TargetReg(kArg0));
+  LockTemp(TargetReg(kArg1));
+  LockTemp(TargetReg(kArg2));
+  LockTemp(TargetReg(kArg3));
+  if (cu_->target64) {
+    LockTemp(TargetReg(kArg4));
+    LockTemp(TargetReg(kArg5));
+    LockTemp(TargetReg(kArg6));
+    LockTemp(TargetReg(kArg7));
+  }
 }
 
 /* To be used when explicitly managing register use */
 void MipsMir2Lir::FreeCallTemps() {
-  FreeTemp(rs_rMIPS_ARG0);
-  FreeTemp(rs_rMIPS_ARG1);
-  FreeTemp(rs_rMIPS_ARG2);
-  FreeTemp(rs_rMIPS_ARG3);
+  FreeTemp(TargetReg(kArg0));
+  FreeTemp(TargetReg(kArg1));
+  FreeTemp(TargetReg(kArg2));
+  FreeTemp(TargetReg(kArg3));
+  if (cu_->target64) {
+    FreeTemp(TargetReg(kArg4));
+    FreeTemp(TargetReg(kArg5));
+    FreeTemp(TargetReg(kArg6));
+    FreeTemp(TargetReg(kArg7));
+  }
   FreeTemp(TargetReg(kHiddenArg));
 }
 
@@ -488,31 +679,63 @@
 }
 
 void MipsMir2Lir::CompilerInitializeRegAlloc() {
-  reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */,
-                                            sp_regs,
-                                            fpuIs32Bit_ ? dp_fr0_regs : dp_fr1_regs,
-                                            reserved_regs, empty_pool /* reserved64 */,
-                                            core_temps, empty_pool /* core64_temps */,
-                                            sp_temps,
-                                            fpuIs32Bit_ ? dp_fr0_temps : dp_fr1_temps));
+  if (cu_->target64) {
+    reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64d, sp_regs_64,
+                                              dp_regs_64, reserved_regs_64, reserved_regs_64d,
+                                              core_temps_64, core_temps_64d, sp_temps_64,
+                                              dp_temps_64));
 
-  // Target-specific adjustments.
+    // Alias single precision floats to appropriate half of overlapping double.
+    for (RegisterInfo* info : reg_pool_->sp_regs_) {
+      int sp_reg_num = info->GetReg().GetRegNum();
+      int dp_reg_num = sp_reg_num;
+      RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+      RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+      // Double precision register's master storage should refer to itself.
+      DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+      // Redirect single precision's master storage to master.
+      info->SetMaster(dp_reg_info);
+      // Singles should show a single 32-bit mask bit, at first referring to the low half.
+      DCHECK_EQ(info->StorageMask(), 0x1U);
+    }
 
-  // Alias single precision floats to appropriate half of overlapping double.
-  for (RegisterInfo* info : reg_pool_->sp_regs_) {
-    int sp_reg_num = info->GetReg().GetRegNum();
-    int dp_reg_num = sp_reg_num & ~1;
-    RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
-    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
-    // Double precision register's master storage should refer to itself.
-    DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
-    // Redirect single precision's master storage to master.
-    info->SetMaster(dp_reg_info);
-    // Singles should show a single 32-bit mask bit, at first referring to the low half.
-    DCHECK_EQ(info->StorageMask(), 0x1U);
-    if (sp_reg_num & 1) {
-      // For odd singles, change to user the high word of the backing double.
-      info->SetStorageMask(0x2);
+    // Alias 32bit W registers to corresponding 64bit X registers.
+    for (RegisterInfo* info : reg_pool_->core_regs_) {
+      int d_reg_num = info->GetReg().GetRegNum();
+      RegStorage d_reg = RegStorage::Solo64(d_reg_num);
+      RegisterInfo* d_reg_info = GetRegInfo(d_reg);
+      // 64bit D register's master storage should refer to itself.
+      DCHECK_EQ(d_reg_info, d_reg_info->Master());
+      // Redirect 32bit master storage to 64bit D.
+      info->SetMaster(d_reg_info);
+      // 32bit should show a single 32-bit mask bit, at first referring to the low half.
+      DCHECK_EQ(info->StorageMask(), 0x1U);
+    }
+  } else {
+    reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool,  // core64
+                                              sp_regs_32,
+                                              fpuIs32Bit_ ? dp_fr0_regs_32 : dp_fr1_regs_32,
+                                              reserved_regs_32, empty_pool,  // reserved64
+                                              core_temps_32, empty_pool,  // core64_temps
+                                              sp_temps_32,
+                                              fpuIs32Bit_ ? dp_fr0_temps_32 : dp_fr1_temps_32));
+
+    // Alias single precision floats to appropriate half of overlapping double.
+    for (RegisterInfo* info : reg_pool_->sp_regs_) {
+      int sp_reg_num = info->GetReg().GetRegNum();
+      int dp_reg_num = sp_reg_num & ~1;
+      RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+      RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+      // Double precision register's master storage should refer to itself.
+      DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+      // Redirect single precision's master storage to master.
+      info->SetMaster(dp_reg_info);
+      // Singles should show a single 32-bit mask bit, at first referring to the low half.
+      DCHECK_EQ(info->StorageMask(), 0x1U);
+      if (sp_reg_num & 1) {
+        // For odd singles, change to user the high word of the backing double.
+        info->SetStorageMask(0x2);
+      }
     }
   }
 
@@ -520,7 +743,11 @@
   // TODO: adjust when we roll to hard float calling convention.
   reg_pool_->next_core_reg_ = 2;
   reg_pool_->next_sp_reg_ = 2;
-  reg_pool_->next_dp_reg_ = 2;
+  if (cu_->target64) {
+    reg_pool_->next_dp_reg_ = 1;
+  } else {
+    reg_pool_->next_dp_reg_ = 2;
+  }
 }
 
 /*
@@ -531,14 +758,24 @@
  */
 RegStorage MipsMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
   // NOTE: native pointer.
-  LoadWordDisp(rs_rMIPS_SELF, GetThreadOffset<4>(trampoline).Int32Value(), rs_rT9);
-  return rs_rT9;
+  if (cu_->target64) {
+    LoadWordDisp(TargetPtrReg(kSelf), GetThreadOffset<8>(trampoline).Int32Value(),
+                 TargetPtrReg(kInvokeTgt));
+  } else {
+    LoadWordDisp(TargetPtrReg(kSelf), GetThreadOffset<4>(trampoline).Int32Value(),
+                 TargetPtrReg(kInvokeTgt));
+  }
+  return TargetPtrReg(kInvokeTgt);
 }
 
 LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
   RegStorage tmp = AllocTemp();
   // NOTE: native pointer.
-  LoadWordDisp(rs_rMIPS_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
+  if (cu_->target64) {
+    LoadWordDisp(TargetPtrReg(kSelf), Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
+  } else {
+    LoadWordDisp(TargetPtrReg(kSelf), Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
+  }
   LIR *inst = LoadWordDisp(tmp, 0, tmp);
   FreeTemp(tmp);
   return inst;
@@ -546,31 +783,47 @@
 
 LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
   DCHECK(!r_dest.IsFloat());  // See RegClassForFieldLoadStore().
-  DCHECK(r_dest.IsPair());
+  if (!cu_->target64) {
+    DCHECK(r_dest.IsPair());
+  }
   ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers
+  LockCallTemps();  // Using fixed registers.
   RegStorage reg_ptr = TargetReg(kArg0);
   OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
   RegStorage r_tgt = LoadHelper(kQuickA64Load);
   LIR *ret = OpReg(kOpBlx, r_tgt);
-  RegStorage reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
-  OpRegCopyWide(r_dest, reg_ret);
+  RegStorage reg_ret;
+  if (cu_->target64) {
+    OpRegCopy(r_dest, TargetReg(kRet0));
+  } else {
+    reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
+    OpRegCopyWide(r_dest, reg_ret);
+  }
   return ret;
 }
 
 LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
   DCHECK(!r_src.IsFloat());  // See RegClassForFieldLoadStore().
-  DCHECK(r_src.IsPair());
+  if (cu_->target64) {
+    DCHECK(!r_src.IsPair());
+  } else {
+    DCHECK(r_src.IsPair());
+  }
   ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers
+  LockCallTemps();  // Using fixed registers.
   RegStorage temp_ptr = AllocTemp();
   OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
   RegStorage temp_value = AllocTempWide();
   OpRegCopyWide(temp_value, r_src);
-  RegStorage reg_ptr = TargetReg(kArg0);
-  OpRegCopy(reg_ptr, temp_ptr);
-  RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
-  OpRegCopyWide(reg_value, temp_value);
+  if (cu_->target64) {
+    OpRegCopyWide(TargetReg(kArg0, kWide), temp_ptr);
+    OpRegCopyWide(TargetReg(kArg1, kWide), temp_value);
+  } else {
+    RegStorage reg_ptr = TargetReg(kArg0);
+    OpRegCopy(reg_ptr, temp_ptr);
+    RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+    OpRegCopyWide(reg_value, temp_value);
+  }
   FreeTemp(temp_ptr);
   FreeTemp(temp_value);
   RegStorage r_tgt = LoadHelper(kQuickA64Store);
@@ -582,12 +835,15 @@
     return;
   }
   uint32_t mask = core_spill_mask_;
-  int offset = num_core_spills_ * 4;
-  OpRegImm(kOpSub, rs_rSP, offset);
+  int ptr_size = cu_->target64 ? 8 : 4;
+  int offset = num_core_spills_ * ptr_size;
+  const RegStorage rs_sp = TargetPtrReg(kSp);
+  OpRegImm(kOpSub, rs_sp, offset);
   for (int reg = 0; mask; mask >>= 1, reg++) {
     if (mask & 0x1) {
-      offset -= 4;
-      Store32Disp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
+      offset -= ptr_size;
+      StoreWordDisp(rs_sp, offset,
+                    cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg));
     }
   }
 }
@@ -597,14 +853,17 @@
     return;
   }
   uint32_t mask = core_spill_mask_;
-  int offset = frame_size_;
+  int offset  = frame_size_;
+  int ptr_size = cu_->target64 ? 8 : 4;
+  const RegStorage rs_sp = TargetPtrReg(kSp);
   for (int reg = 0; mask; mask >>= 1, reg++) {
     if (mask & 0x1) {
-      offset -= 4;
-      Load32Disp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
+      offset -= ptr_size;
+      LoadWordDisp(rs_sp, offset,
+                   cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg));
     }
   }
-  OpRegImm(kOpAdd, rs_rSP, frame_size_);
+  OpRegImm(kOpAdd, rs_sp, frame_size_);
 }
 
 bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) {
@@ -624,11 +883,12 @@
 }
 
 MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
-    : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips_mapper_(this),
-      isaIsR6_(cu->compiler_driver->GetInstructionSetFeatures()
-                 ->AsMipsInstructionSetFeatures()->IsR6()),
-      fpuIs32Bit_(cu->compiler_driver->GetInstructionSetFeatures()
-                    ->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
+    : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips64_mapper_(this),
+    in_to_reg_storage_mips_mapper_(this),
+    isaIsR6_(cu_->target64 ? true : cu->compiler_driver->GetInstructionSetFeatures()
+                ->AsMipsInstructionSetFeatures()->IsR6()),
+    fpuIs32Bit_(cu_->target64 ? false : cu->compiler_driver->GetInstructionSetFeatures()
+                   ->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
   for (int i = 0; i < kMipsLast; i++) {
     DCHECK_EQ(MipsMir2Lir::EncodingMap[i].opcode, i)
         << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 3b7e0ed..bf0e0fc 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -26,30 +26,70 @@
 
 namespace art {
 
-/* This file contains codegen for the MIPS32 ISA. */
+/* This file contains codegen for the Mips ISA */
 LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
   int opcode;
-  /* must be both DOUBLE or both not DOUBLE */
-  DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
-  if (r_dest.IsDouble()) {
-    opcode = kMipsFmovd;
-  } else {
-    if (r_dest.IsSingle()) {
-      if (r_src.IsSingle()) {
-        opcode = kMipsFmovs;
+  if (cu_->target64) {
+    DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
+    if (r_dest.Is64Bit()) {
+      if (r_dest.IsDouble()) {
+        if (r_src.IsDouble()) {
+          opcode = kMipsFmovd;
+        } else {
+          // Note the operands are swapped for the dmtc1 instr.
+          RegStorage t_opnd = r_src;
+          r_src = r_dest;
+          r_dest = t_opnd;
+          opcode = kMips64Dmtc1;
+        }
       } else {
-        /* note the operands are swapped for the mtc1 instr */
-        RegStorage t_opnd = r_src;
-        r_src = r_dest;
-        r_dest = t_opnd;
-        opcode = kMipsMtc1;
+        DCHECK(r_src.IsDouble());
+        opcode = kMips64Dmfc1;
       }
     } else {
-      DCHECK(r_src.IsSingle());
-      opcode = kMipsMfc1;
+      if (r_dest.IsSingle()) {
+        if (r_src.IsSingle()) {
+          opcode = kMipsFmovs;
+        } else {
+          // Note the operands are swapped for the mtc1 instr.
+          RegStorage t_opnd = r_src;
+          r_src = r_dest;
+          r_dest = t_opnd;
+          opcode = kMipsMtc1;
+        }
+      } else {
+        DCHECK(r_src.IsSingle());
+        opcode = kMipsMfc1;
+      }
+    }
+  } else {
+    // Must be both DOUBLE or both not DOUBLE.
+    DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
+    if (r_dest.IsDouble()) {
+      opcode = kMipsFmovd;
+    } else {
+      if (r_dest.IsSingle()) {
+        if (r_src.IsSingle()) {
+          opcode = kMipsFmovs;
+        } else {
+          // Note the operands are swapped for the mtc1 instr.
+          RegStorage t_opnd = r_src;
+          r_src = r_dest;
+          r_dest = t_opnd;
+          opcode = kMipsMtc1;
+        }
+      } else {
+        DCHECK(r_src.IsSingle());
+        opcode = kMipsMfc1;
+      }
     }
   }
-  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
+  LIR* res;
+  if (cu_->target64) {
+    res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
+  } else {
+    res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
+  }
   if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
     res->flags.is_nop = true;
   }
@@ -95,7 +135,7 @@
     r_dest = AllocTemp();
   }
 
-  /* See if the value can be constructed cheaply */
+  // See if the value can be constructed cheaply.
   if (value == 0) {
     res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO);
   } else if (IsUint<16>(value)) {
@@ -118,6 +158,117 @@
   return res;
 }
 
+LIR* MipsMir2Lir::LoadConstantWideNoClobber(RegStorage r_dest, int64_t value) {
+  LIR* res = nullptr;
+  DCHECK(r_dest.Is64Bit());
+  RegStorage r_dest_save = r_dest;
+  int is_fp_reg = r_dest.IsFloat();
+  if (is_fp_reg) {
+    DCHECK(r_dest.IsDouble());
+    r_dest = AllocTemp();
+  }
+
+  int bit31 = (value & UINT64_C(0x80000000)) != 0;
+
+  // Loads with 1 instruction.
+  if (IsUint<16>(value)) {
+    res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
+  } else if (IsInt<16>(value)) {
+    res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
+  } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
+    res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+  } else if (IsInt<32>(value)) {
+    // Loads with 2 instructions.
+    res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+    NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
+  } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
+    res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
+    NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
+  } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
+    res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
+    NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
+  } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
+             (value >> 32) <= (32767 - bit31)) {
+    res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+    NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
+  } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
+    res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+    NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
+  } else {
+    int64_t tmp = value;
+    int shift_cnt = 0;
+    while ((tmp & 1) == 0) {
+      tmp >>= 1;
+      shift_cnt++;
+    }
+
+    if (IsUint<16>(tmp)) {
+      res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp);
+      NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+              shift_cnt & 0x1F);
+    } else if (IsInt<16>(tmp)) {
+      res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+      NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+              shift_cnt & 0x1F);
+    } else if (IsInt<32>(tmp)) {
+      // Loads with 3 instructions.
+      res = NewLIR2(kMipsLui, r_dest.GetReg(), tmp >> 16);
+      NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), tmp);
+      NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+              shift_cnt & 0x1F);
+    } else {
+      tmp = value >> 16;
+      shift_cnt = 16;
+      while ((tmp & 1) == 0) {
+        tmp >>= 1;
+        shift_cnt++;
+      }
+
+      if (IsUint<16>(tmp)) {
+        res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp);
+        NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+                shift_cnt & 0x1F);
+        NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
+      } else if (IsInt<16>(tmp)) {
+        res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+        NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+                shift_cnt & 0x1F);
+        NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
+      } else {
+        // Loads with 3-4 instructions.
+        uint64_t tmp2 = value;
+        if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
+          res = NewLIR2(kMipsLui, r_dest.GetReg(), tmp2 >> 16);
+        }
+        if ((tmp2 & 0xFFFF) != 0) {
+          if (res)
+            NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), tmp2);
+          else
+            res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp2);
+        }
+        if (bit31) {
+          tmp2 += UINT64_C(0x100000000);
+        }
+        if (((tmp2 >> 32) & 0xFFFF) != 0) {
+          NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
+        }
+        if (tmp2 & UINT64_C(0x800000000000)) {
+          tmp2 += UINT64_C(0x1000000000000);
+        }
+        if ((tmp2 >> 48) != 0) {
+          NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
+        }
+      }
+    }
+  }
+
+  if (is_fp_reg) {
+    NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
+    FreeTemp(r_dest);
+  }
+  return res;
+}
+
 LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
   LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
   res->target = target;
@@ -136,57 +287,33 @@
     default:
       LOG(FATAL) << "Bad case in OpReg";
   }
-  return NewLIR2(opcode, rRA, r_dest_src.GetReg());
+  return NewLIR2(opcode, cu_->target64 ? rRAd : rRA, r_dest_src.GetReg());
 }
 
 LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
-  LIR *res;
-  bool neg = (value < 0);
-  int abs_value = (neg) ? -value : value;
-  bool short_form = (abs_value & 0xff) == abs_value;
-  MipsOpCode opcode = kMipsNop;
-  switch (op) {
-    case kOpAdd:
-      return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
-      break;
-    case kOpSub:
-      return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
-      break;
-    default:
-      LOG(FATAL) << "Bad case in OpRegImm";
-      break;
-  }
-  if (short_form) {
-    res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
+  if ((op == kOpAdd) || (op == kOpSub)) {
+    return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
   } else {
-    RegStorage r_scratch = AllocTemp();
-    res = LoadConstant(r_scratch, value);
-    if (op == kOpCmp)
-      NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
-    else
-      NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
+    LOG(FATAL) << "Bad case in OpRegImm";
   }
-  return res;
+  UNREACHABLE();
 }
 
 LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
   MipsOpCode opcode = kMipsNop;
+  bool is64bit = cu_->target64 && (r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit());
   switch (op) {
     case kOpAdd:
-      opcode = kMipsAddu;
+      opcode = is64bit ? kMips64Daddu : kMipsAddu;
       break;
     case kOpSub:
-      opcode = kMipsSubu;
+      opcode = is64bit ? kMips64Dsubu : kMipsSubu;
       break;
     case kOpAnd:
       opcode = kMipsAnd;
       break;
     case kOpMul:
-      if (isaIsR6_) {
-          opcode = kMipsR6Mul;
-      } else {
-          opcode = kMipsMul;
-      }
+      opcode = isaIsR6_ ? kMipsR6Mul : kMipsR2Mul;
       break;
     case kOpOr:
       opcode = kMipsOr;
@@ -195,20 +322,20 @@
       opcode = kMipsXor;
       break;
     case kOpLsl:
-      opcode = kMipsSllv;
+      opcode = is64bit ? kMips64Dsllv : kMipsSllv;
       break;
     case kOpLsr:
-      opcode = kMipsSrlv;
+      opcode = is64bit ? kMips64Dsrlv : kMipsSrlv;
       break;
     case kOpAsr:
-      opcode = kMipsSrav;
+      opcode = is64bit ? kMips64Dsrav : kMipsSrav;
       break;
     case kOpAdc:
     case kOpSbc:
       LOG(FATAL) << "No carry bit on MIPS";
       break;
     default:
-      LOG(FATAL) << "bad case in OpRegRegReg";
+      LOG(FATAL) << "Bad case in OpRegRegReg";
       break;
   }
   return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
@@ -218,36 +345,67 @@
   LIR *res;
   MipsOpCode opcode = kMipsNop;
   bool short_form = true;
+  bool is64bit = cu_->target64 && (r_dest.Is64Bit() || r_src1.Is64Bit());
 
   switch (op) {
     case kOpAdd:
       if (IS_SIMM16(value)) {
-        opcode = kMipsAddiu;
+        opcode = is64bit ? kMips64Daddiu : kMipsAddiu;
       } else {
         short_form = false;
-        opcode = kMipsAddu;
+        opcode = is64bit ? kMips64Daddu : kMipsAddu;
       }
       break;
     case kOpSub:
       if (IS_SIMM16((-value))) {
         value = -value;
-        opcode = kMipsAddiu;
+        opcode = is64bit ? kMips64Daddiu : kMipsAddiu;
       } else {
         short_form = false;
-        opcode = kMipsSubu;
+        opcode = is64bit ? kMips64Dsubu : kMipsSubu;
       }
       break;
     case kOpLsl:
-      DCHECK(value >= 0 && value <= 31);
-      opcode = kMipsSll;
+      if (is64bit) {
+        DCHECK(value >= 0 && value <= 63);
+        if (value >= 0 && value <= 31) {
+          opcode = kMips64Dsll;
+        } else {
+          opcode = kMips64Dsll32;
+          value = value - 32;
+        }
+      } else {
+        DCHECK(value >= 0 && value <= 31);
+        opcode = kMipsSll;
+      }
       break;
     case kOpLsr:
-      DCHECK(value >= 0 && value <= 31);
-      opcode = kMipsSrl;
+      if (is64bit) {
+        DCHECK(value >= 0 && value <= 63);
+        if (value >= 0 && value <= 31) {
+          opcode = kMips64Dsrl;
+        } else {
+          opcode = kMips64Dsrl32;
+          value = value - 32;
+        }
+      } else {
+        DCHECK(value >= 0 && value <= 31);
+        opcode = kMipsSrl;
+      }
       break;
     case kOpAsr:
-      DCHECK(value >= 0 && value <= 31);
-      opcode = kMipsSra;
+      if (is64bit) {
+        DCHECK(value >= 0 && value <= 63);
+        if (value >= 0 && value <= 31) {
+          opcode = kMips64Dsra;
+        } else {
+          opcode = kMips64Dsra32;
+          value = value - 32;
+        }
+      } else {
+        DCHECK(value >= 0 && value <= 31);
+        opcode = kMipsSra;
+      }
       break;
     case kOpAnd:
       if (IS_UIMM16((value))) {
@@ -275,11 +433,7 @@
       break;
     case kOpMul:
       short_form = false;
-      if (isaIsR6_) {
-          opcode = kMipsR6Mul;
-      } else {
-          opcode = kMipsMul;
-      }
+      opcode = isaIsR6_ ? kMipsR6Mul : kMipsR2Mul;
       break;
     default:
       LOG(FATAL) << "Bad case in OpRegRegImm";
@@ -293,8 +447,14 @@
       res = LoadConstant(r_dest, value);
       NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
     } else {
-      RegStorage r_scratch = AllocTemp();
-      res = LoadConstant(r_scratch, value);
+      RegStorage r_scratch;
+      if (is64bit) {
+        r_scratch = AllocTempWide();
+        res = LoadConstantWide(r_scratch, value);
+      } else {
+        r_scratch = AllocTemp();
+        res = LoadConstant(r_scratch, value);
+      }
       NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
     }
   }
@@ -311,7 +471,11 @@
     case kOpMvn:
       return NewLIR3(kMipsNor, r_dest_src1.GetReg(), r_src2.GetReg(), rZERO);
     case kOpNeg:
-      return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
+      if (cu_->target64 && r_dest_src1.Is64Bit()) {
+        return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
+      } else {
+        return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
+      }
     case kOpAdd:
     case kOpAnd:
     case kOpMul:
@@ -320,21 +484,29 @@
     case kOpXor:
       return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
     case kOp2Byte:
-      if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
-          ->IsMipsIsaRevGreaterThanEqual2()) {
+      if (cu_->target64) {
         res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
       } else {
-        res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
-        OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
+        if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
+            ->IsMipsIsaRevGreaterThanEqual2()) {
+          res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
+        } else {
+          res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
+          OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
+        }
       }
       return res;
     case kOp2Short:
-      if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
-          ->IsMipsIsaRevGreaterThanEqual2()) {
+      if (cu_->target64) {
         res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
       } else {
-        res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
-        OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
+        if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
+            ->IsMipsIsaRevGreaterThanEqual2()) {
+          res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
+        } else {
+          res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
+          OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
+        }
       }
       return res;
     case kOp2Char:
@@ -367,10 +539,14 @@
 
 LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
   LIR *res;
+  if (cu_->target64) {
+    res = LoadConstantWideNoClobber(r_dest, value);
+    return res;
+  }
   if (fpuIs32Bit_ || !r_dest.IsFloat()) {
     // 32bit FPU (pairs) or loading into GPR.
     if (!r_dest.IsPair()) {
-      // Form 64-bit pair
+      // Form 64-bit pair.
       r_dest = Solo64ToPair64(r_dest);
     }
     res = LoadConstantNoClobber(r_dest.GetLow(), Low32Bits(value));
@@ -393,7 +569,8 @@
   LIR *first = NULL;
   LIR *res;
   MipsOpCode opcode = kMipsNop;
-  RegStorage t_reg = AllocTemp();
+  bool is64bit = cu_->target64 && r_dest.Is64Bit();
+  RegStorage t_reg = is64bit ? AllocTempWide() : AllocTemp();
 
   if (r_dest.IsFloat()) {
     DCHECK(r_dest.IsSingle());
@@ -404,14 +581,34 @@
       size = k32;
   }
 
-  if (!scale) {
-    first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+  if (cu_->target64) {
+    if (!scale) {
+      if (is64bit) {
+        first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+      } else {
+        first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+      }
+    } else {
+      first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+      NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+    }
   } else {
-    first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
-    NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+    if (!scale) {
+      first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+    } else {
+      first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+      NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+    }
   }
 
   switch (size) {
+    case k64:
+      if (cu_->target64) {
+        opcode = kMips64Ld;
+      } else {
+        LOG(FATAL) << "Bad case in LoadBaseIndexed";
+      }
+      break;
     case kSingle:
       opcode = kMipsFlwc1;
       break;
@@ -440,7 +637,7 @@
   return (first) ? first : res;
 }
 
-/* store value base base + scaled index. */
+// Store value base base + scaled index.
 LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                    int scale, OpSize size) {
   LIR *first = NULL;
@@ -456,11 +653,12 @@
       size = k32;
   }
 
+  MipsOpCode add_opcode = cu_->target64 ? kMips64Daddu : kMipsAddu;
   if (!scale) {
-    first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+    first = NewLIR3(add_opcode, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
   } else {
     first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
-    NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+    NewLIR3(add_opcode, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
   }
 
   switch (size) {
@@ -507,9 +705,19 @@
   switch (size) {
     case k64:
     case kDouble:
+      if (cu_->target64) {
+        r_dest = Check64BitReg(r_dest);
+        if (!r_dest.IsFloat()) {
+          opcode = kMips64Ld;
+        } else {
+          opcode = kMipsFldc1;
+        }
+        DCHECK_EQ((displacement & 0x3), 0);
+        break;
+      }
       is64bit = true;
       if (fpuIs32Bit_ && !r_dest.IsPair()) {
-        // Form 64-bit pair
+        // Form 64-bit pair.
         r_dest = Solo64ToPair64(r_dest);
       }
       short_form = IS_SIMM16_2WORD(displacement);
@@ -546,20 +754,40 @@
       LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
   }
 
+  if (cu_->target64) {
+    if (short_form) {
+      load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
+    } else {
+      RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
+      res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
+      load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
+      if (r_tmp != r_dest)
+        FreeTemp(r_tmp);
+    }
+
+    if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+      DCHECK_EQ(r_base, TargetPtrReg(kSp));
+      AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
+    }
+    return res;
+  }
+
   if (short_form) {
     if (!is64bit) {
       load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
     } else {
       if (fpuIs32Bit_ || !r_dest.IsFloat()) {
         DCHECK(r_dest.IsPair());
-        load = res = NewLIR3(opcode, r_dest.GetLowReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
+        load = res = NewLIR3(opcode, r_dest.GetLowReg(), displacement + LOWORD_OFFSET,
+                             r_base.GetReg());
         load2 = NewLIR3(opcode, r_dest.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
       } else {
         // Here if 64bit fpu and r_dest is a 64bit fp register.
         RegStorage r_tmp = AllocTemp();
         // FIXME: why is r_dest a 64BitPair here???
         r_dest = Fp64ToSolo32(r_dest);
-        load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
+        load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), displacement + LOWORD_OFFSET,
+                             r_base.GetReg());
         load2 = NewLIR3(kMipsLw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
         NewLIR2(kMipsMthc1, r_tmp.GetReg(), r_dest.GetReg());
         FreeTemp(r_tmp);
@@ -591,7 +819,7 @@
   }
 
   if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    DCHECK_EQ(r_base, rs_rMIPS_SP);
+    DCHECK_EQ(r_base, TargetPtrReg(kSp));
     AnnotateDalvikRegAccess(load, (displacement + (is64bit ? LOWORD_OFFSET : 0)) >> 2,
                             true /* is_load */, is64bit /* is64bit */);
     if (is64bit) {
@@ -599,19 +827,21 @@
                               true /* is_load */, is64bit /* is64bit */);
     }
   }
-  return load;
+  return res;
 }
 
-LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                               OpSize size, VolatileKind is_volatile) {
-  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+                               VolatileKind is_volatile) {
+  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))
+      && (!cu_->target64 || displacement & 0x7)) {
+    // TODO: use lld/scd instructions for Mips64.
     // Do atomic 64-bit load.
     return GenAtomic64Load(r_base, displacement, r_dest);
   }
 
   // TODO: base this on target.
   if (size == kWord) {
-    size = k32;
+    size = cu_->target64 ? k64 : k32;
   }
   LIR* load;
   load = LoadBaseDispBody(r_base, displacement, r_dest, size);
@@ -624,8 +854,8 @@
 }
 
 // FIXME: don't split r_dest into 2 containers.
-LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
-                                    RegStorage r_src, OpSize size) {
+LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
+                                    OpSize size) {
   LIR *res;
   LIR *store = NULL;
   LIR *store2 = NULL;
@@ -636,9 +866,19 @@
   switch (size) {
     case k64:
     case kDouble:
+      if (cu_->target64) {
+        r_src = Check64BitReg(r_src);
+        if (!r_src.IsFloat()) {
+          opcode = kMips64Sd;
+        } else {
+          opcode = kMipsFsdc1;
+        }
+        DCHECK_EQ((displacement & 0x3), 0);
+        break;
+      }
       is64bit = true;
       if (fpuIs32Bit_ && !r_src.IsPair()) {
-        // Form 64-bit pair
+        // Form 64-bit pair.
         r_src = Solo64ToPair64(r_src);
       }
       short_form = IS_SIMM16_2WORD(displacement);
@@ -670,19 +910,38 @@
       LOG(FATAL) << "Bad case in StoreBaseDispBody";
   }
 
+  if (cu_->target64) {
+    if (short_form) {
+      store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
+    } else {
+      RegStorage r_scratch = AllocTemp();
+      res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
+      store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
+      FreeTemp(r_scratch);
+    }
+
+    if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+      DCHECK_EQ(r_base, TargetPtrReg(kSp));
+      AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
+    }
+    return res;
+  }
+
   if (short_form) {
     if (!is64bit) {
       store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
     } else {
       if (fpuIs32Bit_ || !r_src.IsFloat()) {
         DCHECK(r_src.IsPair());
-        store = res = NewLIR3(opcode, r_src.GetLowReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
+        store = res = NewLIR3(opcode, r_src.GetLowReg(), displacement + LOWORD_OFFSET,
+                              r_base.GetReg());
         store2 = NewLIR3(opcode, r_src.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
       } else {
         // Here if 64bit fpu and r_src is a 64bit fp register
         RegStorage r_tmp = AllocTemp();
         r_src = Fp64ToSolo32(r_src);
-        store = res = NewLIR3(kMipsFswc1, r_src.GetReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
+        store = res = NewLIR3(kMipsFswc1, r_src.GetReg(), displacement + LOWORD_OFFSET,
+                              r_base.GetReg());
         NewLIR2(kMipsMfhc1, r_tmp.GetReg(), r_src.GetReg());
         store2 = NewLIR3(kMipsSw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
         FreeTemp(r_tmp);
@@ -712,7 +971,7 @@
   }
 
   if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    DCHECK_EQ(r_base, rs_rMIPS_SP);
+    DCHECK_EQ(r_base, TargetPtrReg(kSp));
     AnnotateDalvikRegAccess(store, (displacement + (is64bit ? LOWORD_OFFSET : 0)) >> 2,
                             false /* is_load */, is64bit /* is64bit */);
     if (is64bit) {
@@ -724,21 +983,23 @@
   return res;
 }
 
-LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                                OpSize size, VolatileKind is_volatile) {
+LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+                                VolatileKind is_volatile) {
   if (is_volatile == kVolatile) {
     // Ensure that prior accesses become visible to other threads first.
     GenMemBarrier(kAnyStore);
   }
 
   LIR* store;
-  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+      (!cu_->target64 || displacement & 0x7))) {
+    // TODO: use lld/scd instructions for Mips64.
     // Do atomic 64-bit load.
     store = GenAtomic64Store(r_base, displacement, r_src);
   } else {
     // TODO: base this on target.
     if (size == kWord) {
-      size = k32;
+      size = cu_->target64 ? k64 : k32;
     }
     store = StoreBaseDispBody(r_base, displacement, r_src, size);
   }
@@ -765,7 +1026,7 @@
 }
 
 LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
-  if (IsDirectEntrypoint(trampoline)) {
+  if (!cu_->target64 && IsDirectEntrypoint(trampoline)) {
     // Reserve argument space on stack (for $a0-$a3) for
     // entrypoints that directly reference native implementations.
     // This is not safe in general, as it violates the frame size
@@ -780,4 +1041,8 @@
   return OpReg(op, r_tgt);
 }
 
+RegStorage MipsMir2Lir::AllocPtrSizeTemp(bool required) {
+  return cu_->target64 ? AllocTempWide(required) : AllocTemp(required);
+}
+
 }  // namespace art