x86 source code hack and slash

Made a pass over the compiler source to get it into a compileable
state for the x86 target.  Lots of temporary #ifdefs, but it
compiles and makes it to oatArchInit().

Change-Id: Ib8bcd2a032e47dcb83430dbc479a29758e084359
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 7680f06..0dcfd82 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -28,6 +28,13 @@
 ART_MIPS_TARGET := false
 endif
 
+# Build for x86 target (temporary)
+ifneq ($(wildcard art/X86_TARGET),)
+ART_X86_TARGET := true
+else
+ART_X86_TARGET := false
+endif
+
 ifeq ($(ART_USE_LLVM_COMPILER),true)
 LLVM_ROOT_PATH := external/llvm
 include $(LLVM_ROOT_PATH)/llvm.mk
@@ -248,6 +255,13 @@
 	src/compiler/codegen/mips/Assemble.cc \
 	src/compiler/codegen/mips/mips/Codegen.cc
 else
+ifeq ($(ART_X86_TARGET),true)
+LIBART_COMMON_SRC_FILES += \
+	src/compiler/codegen/x86/ArchUtility.cc \
+	src/compiler/codegen/x86/X86RallocUtil.cc \
+	src/compiler/codegen/x86/Assemble.cc \
+	src/compiler/codegen/x86/x86/Codegen.cc
+else
 LIBART_COMMON_SRC_FILES += \
 	src/compiler/codegen/arm/ArchUtility.cc \
 	src/compiler/codegen/arm/ArmRallocUtil.cc \
@@ -255,6 +269,7 @@
 	src/compiler/codegen/arm/armv7-a/Codegen.cc
 endif
 endif
+endif
 
 LIBART_TARGET_SRC_FILES := \
 	$(LIBART_COMMON_SRC_FILES) \
diff --git a/src/compiler/codegen/CodegenFactory.cc b/src/compiler/codegen/CodegenFactory.cc
index e467ea0..b27efe0 100644
--- a/src/compiler/codegen/CodegenFactory.cc
+++ b/src/compiler/codegen/CodegenFactory.cc
@@ -247,6 +247,9 @@
  */
 void markGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
 {
+#if defined(TARGET_X86)
+    UNIMPLEMENTED(WARNING) << "markGCCard";
+#else
     int regCardBase = oatAllocTemp(cUnit);
     int regCardNo = oatAllocTemp(cUnit);
     LIR* branchOver = opCmpImmBranch(cUnit, kCondEq, valReg, 0, NULL);
@@ -259,6 +262,7 @@
     branchOver->target = (LIR*)target;
     oatFreeTemp(cUnit, regCardBase);
     oatFreeTemp(cUnit, regCardNo);
+#endif
 }
 
 /*
diff --git a/src/compiler/codegen/CodegenUtil.cc b/src/compiler/codegen/CodegenUtil.cc
index 7f80311..9186e23 100644
--- a/src/compiler/codegen/CodegenUtil.cc
+++ b/src/compiler/codegen/CodegenUtil.cc
@@ -145,9 +145,11 @@
         lir->defMask |= ENCODE_REG_SP;
     }
 
+#if !defined(TARGET_X86)
     if (flags & REG_DEF_LR) {
         lir->defMask |= ENCODE_REG_LR;
     }
+#endif
 
     if (flags & REG_DEF_LIST0) {
         lir->defMask |= ENCODE_REG_LIST(lir->operands[0]);
@@ -190,9 +192,11 @@
         }
     }
 
+#if defined(TARGET_ARM)
     if (flags & REG_USE_PC) {
         lir->useMask |= ENCODE_REG_PC;
     }
+#endif
 
     if (flags & REG_USE_SP) {
         lir->useMask |= ENCODE_REG_SP;
diff --git a/src/compiler/codegen/GenCommon.cc b/src/compiler/codegen/GenCommon.cc
index f33b374..1c4e6c8 100644
--- a/src/compiler/codegen/GenCommon.cc
+++ b/src/compiler/codegen/GenCommon.cc
@@ -365,6 +365,9 @@
 void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
              bool isLongOrDouble, bool isObject)
 {
+#if defined(TARGET_X86)
+    UNIMPLEMENTED(WARNING) << "genSput";
+#else
     int fieldOffset;
     int ssbIndex;
     bool isVolatile;
@@ -464,6 +467,7 @@
         }
         callRuntimeHelper(cUnit, rTgt);
     }
+#endif
 }
 
 void genSget(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
@@ -572,11 +576,15 @@
 // Debugging routine - if null target, branch to DebugMe
 void genShowTarget(CompilationUnit* cUnit)
 {
+#if defined(TARGET_X86)
+    UNIMPLEMENTED(WARNING) << "genShowTarget";
+#else
     LIR* branchOver = opCmpImmBranch(cUnit, kCondNe, rINVOKE_TGT, 0, NULL);
     loadWordDisp(cUnit, rSELF,
                  OFFSETOF_MEMBER(Thread, pDebugMe), rINVOKE_TGT);
     LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
     branchOver->target = (LIR*)target;
+#endif
 }
 
 void genThrowVerificationError(CompilationUnit* cUnit, MIR* mir)
@@ -590,6 +598,9 @@
 
 void handleSuspendLaunchpads(CompilationUnit *cUnit)
 {
+#if defined(TARGET_X86)
+    UNIMPLEMENTED(WARNING);
+#else
     LIR** suspendLabel =
         (LIR **) cUnit->suspendLaunchpads.elemList;
     int numElems = cUnit->suspendLaunchpads.numUsed;
@@ -617,6 +628,7 @@
         }
         opUnconditionalBranch(cUnit, resumeLab);
     }
+#endif
 }
 
 void handleThrowLaunchpads(CompilationUnit *cUnit)
@@ -763,6 +775,9 @@
 void genIPut(CompilationUnit* cUnit, MIR* mir, OpSize size, RegLocation rlSrc,
              RegLocation rlObj, bool isLongOrDouble, bool isObject)
 {
+#if defined(TARGET_X86)
+    UNIMPLEMENTED(WARNING);
+#else
     int fieldOffset;
     bool isVolatile;
     uint32_t fieldIdx = mir->dalvikInsn.vC;
@@ -817,6 +832,7 @@
         loadConstant(cUnit, rARG0, fieldIdx);
         callRuntimeHelper(cUnit, rTgt);
     }
+#endif
 }
 
 void genConstClass(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
@@ -1778,6 +1794,10 @@
 bool genArithOpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
                     RegLocation rlSrc1, RegLocation rlSrc2)
 {
+#if defined(TARGET_X86)
+    UNIMPLEMENTED(WARNING) << "genArithOpLong";
+    return false;
+#else
     RegLocation rlResult;
     OpKind firstOp = kOpBkpt;
     OpKind secondOp = kOpBkpt;
@@ -1898,6 +1918,7 @@
         storeValueWide(cUnit, rlDest, rlResult);
     }
     return false;
+#endif
 }
 
 bool genConversionCall(CompilationUnit* cUnit, MIR* mir, int funcOffset,
@@ -1984,6 +2005,11 @@
                               RegLocation rlDest, RegLocation rlSrc1,
                               RegLocation rlSrc2)
 {
+#if defined(TARGET_X86)
+//NOTE: probably don't need the portable versions for x86
+    UNIMPLEMENTED(WARNING) << "genArithOpDoublePortable";
+    return false;
+#else
     RegLocation rlResult;
     int funcOffset;
 
@@ -2023,6 +2049,7 @@
     rlResult = oatGetReturnWide(cUnit);
     storeValueWide(cUnit, rlDest, rlResult);
     return false;
+#endif
 }
 
 bool genConversionPortable(CompilationUnit* cUnit, MIR* mir)
@@ -2077,6 +2104,9 @@
  */
 void genDebuggerUpdate(CompilationUnit* cUnit, int32_t offset)
 {
+#if defined(TARGET_X86)
+    UNIMPLEMENTED(WARNING);
+#else
     // Following DCHECK verifies that dPC is in range of single load immediate
     DCHECK((offset == DEBUGGER_METHOD_ENTRY) ||
            (offset == DEBUGGER_METHOD_EXIT) || ((offset & 0xffff) == offset));
@@ -2094,11 +2124,15 @@
     branch->target = (LIR*)target;
 #endif
     oatFreeTemp(cUnit, rARG2);
+#endif
 }
 
 /* Check if we need to check for pending suspend request */
 void genSuspendTest(CompilationUnit* cUnit, MIR* mir)
 {
+#if defined(TARGET_X86)
+    UNIMPLEMENTED(WARNING) << "genSuspendTest";
+#else
     if (NO_SUSPEND || (mir->optimizationFlags & MIR_IGNORE_SUSPEND_CHECK)) {
         return;
     }
@@ -2122,6 +2156,7 @@
                          kPseudoSuspendTarget, (intptr_t)retLab, mir->offset);
     branch->target = (LIR*)target;
     oatInsertGrowableList(cUnit, &cUnit->suspendLaunchpads, (intptr_t)target);
+#endif
 }
 
 }  // namespace art
diff --git a/src/compiler/codegen/LocalOptimizations.cc b/src/compiler/codegen/LocalOptimizations.cc
index 19b41e7..3bed72f 100644
--- a/src/compiler/codegen/LocalOptimizations.cc
+++ b/src/compiler/codegen/LocalOptimizations.cc
@@ -102,6 +102,10 @@
          */
         if (!(thisMemMask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
 
+// FIXME: make sure we have a branch barrier for x86
+#if defined(TARGET_X86)
+        u8 stopUseRegMask = (thisLIR->useMask) & ~ENCODE_MEM;
+#else
         /*
          * Add r15 (pc) to the resource mask to prevent this instruction
          * from sinking past branch instructions. Also take out the memory
@@ -110,6 +114,7 @@
          */
         u8 stopUseRegMask = (ENCODE_REG_PC | thisLIR->useMask) &
                             ~ENCODE_MEM;
+#endif
         u8 stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
 
         for (checkLIR = NEXT_LIR(thisLIR);
@@ -280,6 +285,7 @@
 
         u8 stopUseAllMask = thisLIR->useMask;
 
+#if !defined(TARGET_X86)
         /*
          * Branches for null/range checks are marked with the true resource
          * bits, and loads to Dalvik registers, constant pools, and non-alias
@@ -289,6 +295,7 @@
         if (stopUseAllMask & ENCODE_HEAP_REF) {
             stopUseAllMask |= ENCODE_REG_PC;
         }
+#endif
 
         /* Similar as above, but just check for pure register dependency */
         u8 stopUseRegMask = stopUseAllMask & ~ENCODE_MEM;
diff --git a/src/compiler/codegen/MethodCodegenDriver.cc b/src/compiler/codegen/MethodCodegenDriver.cc
index 42dae0f..f25c7e5 100644
--- a/src/compiler/codegen/MethodCodegenDriver.cc
+++ b/src/compiler/codegen/MethodCodegenDriver.cc
@@ -43,6 +43,9 @@
 
 void genInvoke(CompilationUnit* cUnit, MIR* mir, InvokeType type, bool isRange)
 {
+#if defined(TARGET_X86)
+    UNIMPLEMENTED(WARNING) << "genInvoke";
+#else
     DecodedInstruction* dInsn = &mir->dalvikInsn;
     int callState = 0;
     LIR* nullCk;
@@ -104,6 +107,7 @@
     }
     opReg(cUnit, kOpBlx, rINVOKE_TGT);
     oatClobberCalleeSave(cUnit);
+#endif
 }
 
 /*
@@ -158,6 +162,9 @@
             break;
 
         case OP_MOVE_EXCEPTION:
+#if defined(TARGET_X86)
+            UNIMPLEMENTED(WARNING) << "OP_MOVE_EXCEPTION";
+#else
             int exOffset;
             int resetReg;
             exOffset = Thread::ExceptionOffset().Int32Value();
@@ -167,6 +174,7 @@
             loadConstant(cUnit, resetReg, 0);
             storeWordDisp(cUnit, rSELF, exOffset, resetReg);
             storeValue(cUnit, rlDest, rlResult);
+#endif
             break;
 
         case OP_RETURN_VOID:
diff --git a/src/compiler/codegen/x86/ArchFactory.cc b/src/compiler/codegen/x86/ArchFactory.cc
index 76f7c4a..1bf0a72 100644
--- a/src/compiler/codegen/x86/ArchFactory.cc
+++ b/src/compiler/codegen/x86/ArchFactory.cc
@@ -43,7 +43,7 @@
     opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc2.lowReg, rlSrc1.lowReg);
     int tReg = oatAllocTemp(cUnit);
     opRegRegReg(cUnit, kOpAdd, tReg, rlSrc2.highReg, rlSrc1.highReg);
-    newLIR3(cUnit, kMipsSltu, rlResult.highReg, rlResult.lowReg, rlSrc2.lowReg);
+    newLIR3(cUnit, kX86Sltu, rlResult.highReg, rlResult.lowReg, rlSrc2.lowReg);
     opRegRegReg(cUnit, kOpAdd, rlResult.highReg, rlResult.highReg, tReg);
     oatFreeTemp(cUnit, tReg);
     storeValueWide(cUnit, rlDest, rlResult);
@@ -70,7 +70,7 @@
     opRegRegReg(cUnit, kOpSub, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
     opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlSrc1.highReg, rlSrc2.highReg);
     int tReg = oatAllocTemp(cUnit);
-    newLIR3(cUnit, kMipsSltu, tReg, rlSrc1.lowReg, rlResult.lowReg);
+    newLIR3(cUnit, kX86Sltu, tReg, rlSrc1.lowReg, rlResult.lowReg);
     opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlResult.highReg, tReg);
     oatFreeTemp(cUnit, tReg);
     storeValueWide(cUnit, rlDest, rlResult);
@@ -96,7 +96,7 @@
     opRegReg(cUnit, kOpNeg, rlResult.lowReg, rlSrc.lowReg);
     opRegReg(cUnit, kOpNeg, rlResult.highReg, rlSrc.highReg);
     int tReg = oatAllocTemp(cUnit);
-    newLIR3(cUnit, kMipsSltu, tReg, r_ZERO, rlResult.lowReg);
+    newLIR3(cUnit, kX86Sltu, tReg, r_ZERO, rlResult.lowReg);
     opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlResult.highReg, tReg);
     oatFreeTemp(cUnit, tReg);
     storeValueWide(cUnit, rlDest, rlResult);
@@ -108,15 +108,19 @@
 
 /*
  * In the Arm code a it is typical to use the link register
- * to hold the target address.  However, for Mips we must
+ * to hold the target address.  However, for X86 we must
  * ensure that all branch instructions can be restarted if
  * there is a trap in the shadow.  Allocate a temp register.
  */
 int loadHelper(CompilationUnit* cUnit, int offset)
 {
+    UNIMPLEMENTED(WARNING);
+    return 0;
+#if 0
     int tReg = oatAllocTemp(cUnit);
     loadWordDisp(cUnit, rSELF, offset, tReg);
     return tReg;
+#endif
 }
 
 void spillCoreRegs(CompilationUnit* cUnit)
@@ -256,7 +260,7 @@
          thisLIR = NEXT_LIR(thisLIR)) {
 
         /* Branch to the next instruction */
-        if (thisLIR->opcode == kMipsB) {
+        if (thisLIR->opcode == kX86B) {
             LIR* nextLIR = thisLIR;
 
             while (true) {
diff --git a/src/compiler/codegen/x86/ArchUtility.cc b/src/compiler/codegen/x86/ArchUtility.cc
index 18aa9f4..23cee14 100644
--- a/src/compiler/codegen/x86/ArchUtility.cc
+++ b/src/compiler/codegen/x86/ArchUtility.cc
@@ -35,9 +35,6 @@
  */
 std::string buildInsnString(const char *fmt, LIR *lir, unsigned char* baseAddr)
 {
-    UNIMPLEMENTED(WARNING) << "buildInsnString";
-    return NULL;
-#if 0
     std::string buf;
     int i;
     const char *fmtEnd = &fmt[strlen(fmt)];
@@ -112,8 +109,8 @@
                        strcpy(tbuf, "see above");
                        break;
                    case 'r':
-                       DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
-                       strcpy(tbuf, mipsRegName[operand]);
+                       DCHECK(operand >= 0 && operand < X86_REG_COUNT);
+                       strcpy(tbuf, x86RegName[operand]);
                        break;
                    case 'N':
                        // Placeholder for delay slot handling
@@ -130,16 +127,13 @@
         }
     }
     return buf;
-#endif
 }
 
 void oatDumpResourceMask(LIR *lir, u8 mask, const char *prefix)
 {
-    UNIMPLEMENTED(WARNING) << "oatDumpResourceMasks";
-#if 0
     char buf[256];
     buf[0] = 0;
-    LIR *mipsLIR = (LIR *) lir;
+    LIR *x86LIR = (LIR *) lir;
 
     if (mask == ENCODE_ALL) {
         strcpy(buf, "all");
@@ -157,13 +151,10 @@
         if (mask & ENCODE_CCODE) {
             strcat(buf, "cc ");
         }
-        if (mask & ENCODE_FP_STATUS) {
-            strcat(buf, "fpcc ");
-        }
         /* Memory bits */
-        if (mipsLIR && (mask & ENCODE_DALVIK_REG)) {
-            sprintf(buf + strlen(buf), "dr%d%s", mipsLIR->aliasInfo & 0xffff,
-                    (mipsLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+        if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
+            sprintf(buf + strlen(buf), "dr%d%s", x86LIR->aliasInfo & 0xffff,
+                    (x86LIR->aliasInfo & 0x80000000) ? "(+1)" : "");
         }
         if (mask & ENCODE_LITERAL) {
             strcat(buf, "lit ");
@@ -179,7 +170,6 @@
     if (buf[0]) {
         LOG(INFO) << prefix << ": " <<  buf;
     }
-#endif
 }
 
 } // namespace art
diff --git a/src/compiler/codegen/x86/Assemble.cc b/src/compiler/codegen/x86/Assemble.cc
index 3614dce..f098e66 100644
--- a/src/compiler/codegen/x86/Assemble.cc
+++ b/src/compiler/codegen/x86/Assemble.cc
@@ -25,7 +25,7 @@
 #define MAX_ASSEMBLER_RETRIES 50
 
 /*
- * opcode: MipsOpCode enum
+ * opcode: X86OpCode enum
  * skeleton: pre-designated bit-pattern for this opcode
  * k0: key to applying ds/de
  * ds: dest start bit position
@@ -76,14 +76,14 @@
  *
  *  [!] escape.  To insert "!", use "!!"
  */
-/* NOTE: must be kept in sync with enum MipsOpcode from LIR.h */
+/* NOTE: must be kept in sync with enum X86Opcode from LIR.h */
 /*
  * TUNING: We're currently punting on the branch delay slots.  All branch
  * instructions in this map are given a size of 8, which during assembly
  * is expanded to include a nop.  This scheme should be replaced with
  * an assembler pass to fill those slots when possible.
  */
-MipsEncodingMap EncodingMap[kX86Last] = {
+X86EncodingMap EncodingMap[kX86Last] = {
 };
 
 
@@ -113,7 +113,7 @@
         }
 
         if (lir->flags.pcRelFixup) {
-            if (lir->opcode == kMipsDelta) {
+            if (lir->opcode == kX86Delta) {
                 /*
                  * The "Delta" pseudo-ops load the difference between
                  * two pc-relative locations into a the target register
@@ -132,33 +132,33 @@
                     // Fits
                     lir->operands[1] = delta;
                 } else {
-                    // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
+                    // Doesn't fit - must expand to kX86Delta[Hi|Lo] pair
                     LIR *newDeltaHi =
-                          rawLIR(cUnit, lir->dalvikOffset, kMipsDeltaHi,
+                          rawLIR(cUnit, lir->dalvikOffset, kX86DeltaHi,
                                  lir->operands[0], 0, lir->operands[2],
                                  lir->operands[3], lir->target);
                     oatInsertLIRBefore((LIR*)lir, (LIR*)newDeltaHi);
                     LIR *newDeltaLo =
-                          rawLIR(cUnit, lir->dalvikOffset, kMipsDeltaLo,
+                          rawLIR(cUnit, lir->dalvikOffset, kX86DeltaLo,
                                  lir->operands[0], 0, lir->operands[2],
                                  lir->operands[3], lir->target);
                     oatInsertLIRBefore((LIR*)lir, (LIR*)newDeltaLo);
                     lir->flags.isNop = true;
                     res = kRetryAll;
                 }
-            } else if (lir->opcode == kMipsDeltaLo) {
+            } else if (lir->opcode == kX86DeltaLo) {
                 int offset1 = ((LIR*)lir->operands[2])->offset;
                 SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
                 int offset2 = tabRec ? tabRec->offset : lir->target->offset;
                 int delta = offset2 - offset1;
                 lir->operands[1] = delta & 0xffff;
-            } else if (lir->opcode == kMipsDeltaHi) {
+            } else if (lir->opcode == kX86DeltaHi) {
                 int offset1 = ((LIR*)lir->operands[2])->offset;
                 SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
                 int offset2 = tabRec ? tabRec->offset : lir->target->offset;
                 int delta = offset2 - offset1;
                 lir->operands[1] = (delta >> 16) & 0xffff;
-            } else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
+            } else if (lir->opcode == kX86B || lir->opcode == kX86Bal) {
                 LIR *targetLIR = (LIR *) lir->target;
                 intptr_t pc = lir->offset + 4;
                 intptr_t target = targetLIR->offset;
@@ -172,7 +172,7 @@
                 } else {
                     lir->operands[0] = delta >> 2;
                 }
-            } else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
+            } else if (lir->opcode >= kX86Beqz && lir->opcode <= kX86Bnez) {
                 LIR *targetLIR = (LIR *) lir->target;
                 intptr_t pc = lir->offset + 4;
                 intptr_t target = targetLIR->offset;
@@ -186,7 +186,7 @@
                 } else {
                     lir->operands[1] = delta >> 2;
                 }
-            } else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
+            } else if (lir->opcode == kX86Beq || lir->opcode == kX86Bne) {
                 LIR *targetLIR = (LIR *) lir->target;
                 intptr_t pc = lir->offset + 4;
                 intptr_t target = targetLIR->offset;
@@ -200,7 +200,7 @@
                 } else {
                     lir->operands[2] = delta >> 2;
                 }
-            } else if (lir->opcode == kMipsJal) {
+            } else if (lir->opcode == kX86Jal) {
                 intptr_t curPC = (startAddr + lir->offset + 4) & ~3;
                 intptr_t target = lir->operands[0];
                 /* ensure PC-region branch can be used */
@@ -209,11 +209,11 @@
                     LOG(FATAL) << "Jump target not multiple of 4: " << target;
                 }
                 lir->operands[0] =  target >> 2;
-            } else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
+            } else if (lir->opcode == kX86Lahi) { /* ld address hi (via lui) */
                 LIR *targetLIR = (LIR *) lir->target;
                 intptr_t target = startAddr + targetLIR->offset;
                 lir->operands[1] = target >> 16;
-            } else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
+            } else if (lir->opcode == kX86Lalo) { /* ld address lo (via ori) */
                 LIR *targetLIR = (LIR *) lir->target;
                 intptr_t target = startAddr + targetLIR->offset;
                 lir->operands[2] = lir->operands[2] + target;
@@ -228,7 +228,7 @@
         if (res != kSuccess) {
             continue;
         }
-        const MipsEncodingMap *encoder = &EncodingMap[lir->opcode];
+        const X86EncodingMap *encoder = &EncodingMap[lir->opcode];
         u4 bits = encoder->skeleton;
         int i;
         for (i = 0; i < 4; i++) {
@@ -276,7 +276,7 @@
         cUnit->codeBuffer.push_back(bits & 0xffff);
         // TUNING: replace with proper delay slot handling
         if (encoder->size == 8) {
-            const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
+            const X86EncodingMap *encoder = &EncodingMap[kX86Nop];
             u4 bits = encoder->skeleton;
             cUnit->codeBuffer.push_back((bits >> 16) & 0xffff);
             cUnit->codeBuffer.push_back(bits & 0xffff);
diff --git a/src/compiler/codegen/x86/FP/X86FP.cc b/src/compiler/codegen/x86/FP/X86FP.cc
index db3b928..57cde49 100644
--- a/src/compiler/codegen/x86/FP/X86FP.cc
+++ b/src/compiler/codegen/x86/FP/X86FP.cc
@@ -22,8 +22,7 @@
     UNIMPLEMENTED(WARNING) << "genArithOpFloat";
     return false;
 #if 0
-#ifdef __mips_hard_float
-    int op = kMipsNop;
+    int op = kX86Nop;
     RegLocation rlResult;
 
     /*
@@ -33,19 +32,19 @@
     switch (mir->dalvikInsn.opcode) {
         case OP_ADD_FLOAT_2ADDR:
         case OP_ADD_FLOAT:
-            op = kMipsFadds;
+            op = kX86Fadds;
             break;
         case OP_SUB_FLOAT_2ADDR:
         case OP_SUB_FLOAT:
-            op = kMipsFsubs;
+            op = kX86Fsubs;
             break;
         case OP_DIV_FLOAT_2ADDR:
         case OP_DIV_FLOAT:
-            op = kMipsFdivs;
+            op = kX86Fdivs;
             break;
         case OP_MUL_FLOAT_2ADDR:
         case OP_MUL_FLOAT:
-            op = kMipsFmuls;
+            op = kX86Fmuls;
             break;
         case OP_REM_FLOAT_2ADDR:
         case OP_REM_FLOAT:
@@ -58,14 +57,11 @@
     rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
     rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
     rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
-    newLIR3(cUnit, (MipsOpCode)op, rlResult.lowReg, rlSrc1.lowReg,
+    newLIR3(cUnit, (X86OpCode)op, rlResult.lowReg, rlSrc1.lowReg,
                     rlSrc2.lowReg);
     storeValue(cUnit, rlDest, rlResult);
 
     return false;
-#else
-    return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
-#endif
 #endif
 }
 
@@ -76,26 +72,25 @@
     UNIMPLEMENTED(WARNING) << "genArithOpDouble";
     return false;
 #if 0
-#ifdef __mips_hard_float
-    int op = kMipsNop;
+    int op = kX86Nop;
     RegLocation rlResult;
 
     switch (mir->dalvikInsn.opcode) {
         case OP_ADD_DOUBLE_2ADDR:
         case OP_ADD_DOUBLE:
-            op = kMipsFaddd;
+            op = kX86Faddd;
             break;
         case OP_SUB_DOUBLE_2ADDR:
         case OP_SUB_DOUBLE:
-            op = kMipsFsubd;
+            op = kX86Fsubd;
             break;
         case OP_DIV_DOUBLE_2ADDR:
         case OP_DIV_DOUBLE:
-            op = kMipsFdivd;
+            op = kX86Fdivd;
             break;
         case OP_MUL_DOUBLE_2ADDR:
         case OP_MUL_DOUBLE:
-            op = kMipsFmuld;
+            op = kX86Fmuld;
             break;
         case OP_REM_DOUBLE_2ADDR:
         case OP_REM_DOUBLE:
@@ -112,14 +107,11 @@
     rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
     DCHECK(rlDest.wide);
     DCHECK(rlResult.wide);
-    newLIR3(cUnit, (MipsOpCode)op, S2D(rlResult.lowReg, rlResult.highReg),
+    newLIR3(cUnit, (X86OpCode)op, S2D(rlResult.lowReg, rlResult.highReg),
             S2D(rlSrc1.lowReg, rlSrc1.highReg),
             S2D(rlSrc2.lowReg, rlSrc2.highReg));
     storeValueWide(cUnit, rlDest, rlResult);
     return false;
-#else
-    return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
-#endif
 #endif
 }
 
@@ -128,35 +120,34 @@
     UNIMPLEMENTED(WARNING) << "genConversion";
     return false;
 #if 0
-#ifdef __mips_hard_float
     Opcode opcode = mir->dalvikInsn.opcode;
     bool longSrc = false;
     bool longDest = false;
     RegLocation rlSrc;
     RegLocation rlDest;
-    int op = kMipsNop;
+    int op = kX86Nop;
     int srcReg;
     RegLocation rlResult;
     switch (opcode) {
         case OP_INT_TO_FLOAT:
             longSrc = false;
             longDest = false;
-            op = kMipsFcvtsw;
+            op = kX86Fcvtsw;
             break;
         case OP_DOUBLE_TO_FLOAT:
             longSrc = true;
             longDest = false;
-            op = kMipsFcvtsd;
+            op = kX86Fcvtsd;
             break;
         case OP_FLOAT_TO_DOUBLE:
             longSrc = false;
             longDest = true;
-            op = kMipsFcvtds;
+            op = kX86Fcvtds;
             break;
         case OP_INT_TO_DOUBLE:
             longSrc = false;
             longDest = true;
-            op = kMipsFcvtdw;
+            op = kX86Fcvtdw;
             break;
         case OP_FLOAT_TO_INT:
         case OP_DOUBLE_TO_INT:
@@ -180,18 +171,15 @@
     if (longDest) {
         rlDest = oatGetDestWide(cUnit, mir, 0, 1);
         rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
-        newLIR2(cUnit, (MipsOpCode)op, S2D(rlResult.lowReg, rlResult.highReg), srcReg);
+        newLIR2(cUnit, (X86OpCode)op, S2D(rlResult.lowReg, rlResult.highReg), srcReg);
         storeValueWide(cUnit, rlDest, rlResult);
     } else {
         rlDest = oatGetDest(cUnit, mir, 0);
         rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
-        newLIR2(cUnit, (MipsOpCode)op, rlResult.lowReg, srcReg);
+        newLIR2(cUnit, (X86OpCode)op, rlResult.lowReg, srcReg);
         storeValue(cUnit, rlDest, rlResult);
     }
     return false;
-#else
-    return genConversionPortable(cUnit, mir);
-#endif
 #endif
 }
 
diff --git a/src/compiler/codegen/x86/GenInvoke.cc b/src/compiler/codegen/x86/GenInvoke.cc
new file mode 100644
index 0000000..2f095f1
--- /dev/null
+++ b/src/compiler/codegen/x86/GenInvoke.cc
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace art {
+
+/*
+ * This source files contains "gen" codegen routines that should
+ * be applicable to most targets.  Only mid-level support utilities
+ * and "op" calls may be used here.
+ */
+
+
+/*
+ * x86 targets will likely be different enough to need their own
+ * invoke gen routies.
+ */
+typedef int (*NextCallInsn)(CompilationUnit*, MIR*, int, uint32_t dexIdx,
+                            uint32_t methodIdx);
+/*
+ * If there are any ins passed in registers that have not been promoted
+ * to a callee-save register, flush them to the frame.  Perform intial
+ * assignment of promoted arguments.
+ */
+void flushIns(CompilationUnit* cUnit)
+{
+    UNIMPLEMENTED(WARNING) << "flushIns";
+#if 0
+    if (cUnit->numIns == 0)
+        return;
+    int firstArgReg = rARG1;
+    int lastArgReg = rARG3;
+    int startVReg = cUnit->numDalvikRegisters - cUnit->numIns;
+    /*
+     * Arguments passed in registers should be flushed
+     * to their backing locations in the frame for now.
+     * Also, we need to do initial assignment for promoted
+     * arguments.  NOTE: an older version of dx had an issue
+     * in which it would reuse static method argument registers.
+     * This could result in the same Dalvik virtual register
+     * being promoted to both core and fp regs.  In those
+     * cases, copy argument to both.  This will be uncommon
+     * enough that it isn't worth attempting to optimize.
+     */
+    for (int i = 0; i < cUnit->numIns; i++) {
+        PromotionMap vMap = cUnit->promotionMap[startVReg + i];
+        if (i <= (lastArgReg - firstArgReg)) {
+            // If arriving in register
+            if (vMap.coreLocation == kLocPhysReg) {
+                opRegCopy(cUnit, vMap.coreReg, firstArgReg + i);
+            }
+            if (vMap.fpLocation == kLocPhysReg) {
+                opRegCopy(cUnit, vMap.fpReg, firstArgReg + i);
+            }
+            // Also put a copy in memory in case we're partially promoted
+            storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
+                          firstArgReg + i, kWord);
+        } else {
+            // If arriving in frame & promoted
+            if (vMap.coreLocation == kLocPhysReg) {
+                loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
+                             vMap.coreReg);
+            }
+            if (vMap.fpLocation == kLocPhysReg) {
+                loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
+                             vMap.fpReg);
+            }
+        }
+    }
+#endif
+}
+
+/*
+ * Bit of a hack here - in leiu of a real scheduling pass,
+ * emit the next instruction in static & direct invoke sequences.
+ */
+int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
+                   int state, uint32_t dexIdx, uint32_t unused)
+{
+    UNIMPLEMENTED(WARNING) << "nextSDCallInsn";
+    return 0;
+#if 0
+    switch(state) {
+        case 0:  // Get the current Method* [sets rARG0]
+            loadCurrMethodDirect(cUnit, rARG0);
+            break;
+        case 1:  // Get method->dex_cache_resolved_methods_
+            loadWordDisp(cUnit, rARG0,
+                Method::DexCacheResolvedMethodsOffset().Int32Value(),
+                rARG0);
+            break;
+        case 2:  // Grab target method*
+            loadWordDisp(cUnit, rARG0,
+                Array::DataOffset(sizeof(Object*)).Int32Value() + dexIdx * 4,
+                rARG0);
+            break;
+        case 3:  // Grab the code from the method*
+            loadWordDisp(cUnit, rARG0, Method::GetCodeOffset().Int32Value(),
+                         rINVOKE_TGT);
+            break;
+        default:
+            return -1;
+    }
+    return state + 1;
+#endif
+}
+
+/*
+ * Bit of a hack here - in leiu of a real scheduling pass,
+ * emit the next instruction in a virtual invoke sequence.
+ * We can use rLR as a temp prior to target address loading
+ * Note also that we'll load the first argument ("this") into
+ * rARG1 here rather than the standard loadArgRegs.
+ */
+int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
+                  int state, uint32_t dexIdx, uint32_t methodIdx)
+{
+    UNIMPLEMENTED(WARNING) << "nextVCallInsn";
+    return 0;
+#if 0
+    RegLocation rlArg;
+    /*
+     * This is the fast path in which the target virtual method is
+     * fully resolved at compile time.
+     */
+    switch(state) {
+        case 0:  // Get "this" [set rARG1]
+            rlArg = oatGetSrc(cUnit, mir, 0);
+            loadValueDirectFixed(cUnit, rlArg, rARG1);
+            break;
+        case 1: // Is "this" null? [use rARG1]
+            genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
+            // get this->klass_ [use rARG1, set rINVOKE_TGT]
+            loadWordDisp(cUnit, rARG1, Object::ClassOffset().Int32Value(),
+                         rINVOKE_TGT);
+            break;
+        case 2: // Get this->klass_->vtable [usr rINVOKE_TGT, set rINVOKE_TGT]
+            loadWordDisp(cUnit, rINVOKE_TGT, Class::VTableOffset().Int32Value(),
+                         rINVOKE_TGT);
+            break;
+        case 3: // Get target method [use rINVOKE_TGT, set rARG0]
+            loadWordDisp(cUnit, rINVOKE_TGT, (methodIdx * 4) +
+                         Array::DataOffset(sizeof(Object*)).Int32Value(),
+                         rARG0);
+            break;
+        case 4: // Get the compiled code address [uses rARG0, sets rINVOKE_TGT]
+            loadWordDisp(cUnit, rARG0, Method::GetCodeOffset().Int32Value(),
+                         rINVOKE_TGT);
+            break;
+        default:
+            return -1;
+    }
+    return state + 1;
+#endif
+}
+
+/*
+ * Interleave launch code for INVOKE_SUPER.  See comments
+ * for nextVCallIns.
+ */
+int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
+                      int state, uint32_t dexIdx, uint32_t methodIdx)
+{
+    UNIMPLEMENTED(WARNING) << "nextSuperCallInsn";
+    return 0;
+#if 0
+    /*
+     * This is the fast path in which the target virtual method is
+     * fully resolved at compile time.  Note also that this path assumes
+     * that the check to verify that the target method index falls
+     * within the size of the super's vtable has been done at compile-time.
+     */
+    RegLocation rlArg;
+    switch(state) {
+        case 0: // Get current Method* [set rARG0]
+            loadCurrMethodDirect(cUnit, rARG0);
+            // Load "this" [set rARG1]
+            rlArg = oatGetSrc(cUnit, mir, 0);
+            loadValueDirectFixed(cUnit, rlArg, rARG1);
+            // Get method->declaring_class_ [use rARG0, set rINVOKE_TGT]
+            loadWordDisp(cUnit, rARG0,
+                         Method::DeclaringClassOffset().Int32Value(),
+                         rINVOKE_TGT);
+            // Is "this" null? [use rARG1]
+            genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
+            break;
+        case 1: // method->declaring_class_->super_class [use/set rINVOKE_TGT]
+            loadWordDisp(cUnit, rINVOKE_TGT,
+                         Class::SuperClassOffset().Int32Value(), rINVOKE_TGT);
+            break;
+        case 2: // Get ...->super_class_->vtable [u/s rINVOKE_TGT]
+            loadWordDisp(cUnit, rINVOKE_TGT,
+                         Class::VTableOffset().Int32Value(), rINVOKE_TGT);
+            break;
+        case 3: // Get target method [use rINVOKE_TGT, set rARG0]
+            loadWordDisp(cUnit, rINVOKE_TGT, (methodIdx * 4) +
+                         Array::DataOffset(sizeof(Object*)).Int32Value(),
+                         rARG0);
+            break;
+        case 4: // target compiled code address [uses rARG0, sets rINVOKE_TGT]
+            loadWordDisp(cUnit, rARG0, Method::GetCodeOffset().Int32Value(),
+                         rINVOKE_TGT);
+            break;
+        default:
+            return -1;
+    }
+    return state + 1;
+#endif
+}
+
+int nextInvokeInsnSP(CompilationUnit* cUnit, MIR* mir, int trampoline,
+                     int state, uint32_t dexIdx, uint32_t methodIdx)
+{
+    UNIMPLEMENTED(WARNING) << "nextInvokeInsnSP";
+    return 0;
+#if 0
+    /*
+     * This handles the case in which the base method is not fully
+     * resolved at compile time, we bail to a runtime helper.
+     */
+    if (state == 0) {
+        // Load trampoline target
+        loadWordDisp(cUnit, rSELF, trampoline, rINVOKE_TGT);
+        // Load rARG0 with method index
+        loadConstant(cUnit, rARG0, dexIdx);
+        return 1;
+    }
+    return -1;
+#endif
+}
+
+int nextStaticCallInsnSP(CompilationUnit* cUnit, MIR* mir,
+                         int state, uint32_t dexIdx, uint32_t methodIdx)
+{
+  int trampoline = OFFSETOF_MEMBER(Thread, pInvokeStaticTrampolineWithAccessCheck);
+  return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
+}
+
+int nextDirectCallInsnSP(CompilationUnit* cUnit, MIR* mir, int state,
+                         uint32_t dexIdx, uint32_t methodIdx)
+{
+  int trampoline = OFFSETOF_MEMBER(Thread, pInvokeDirectTrampolineWithAccessCheck);
+  return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
+}
+
+int nextSuperCallInsnSP(CompilationUnit* cUnit, MIR* mir, int state,
+                        uint32_t dexIdx, uint32_t methodIdx)
+{
+  int trampoline = OFFSETOF_MEMBER(Thread, pInvokeSuperTrampolineWithAccessCheck);
+  return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
+}
+
+int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir, int state,
+                    uint32_t dexIdx, uint32_t methodIdx)
+{
+  int trampoline = OFFSETOF_MEMBER(Thread, pInvokeVirtualTrampolineWithAccessCheck);
+  return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
+}
+
+/*
+ * All invoke-interface calls bounce off of art_invoke_interface_trampoline,
+ * which will locate the target and continue on via a tail call.
+ */
+int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir, int state,
+                          uint32_t dexIdx, uint32_t unused)
+{
+  int trampoline = OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampoline);
+  return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
+}
+
+int nextInterfaceCallInsnWithAccessCheck(CompilationUnit* cUnit, MIR* mir,
+                                         int state, uint32_t dexIdx,
+                                         uint32_t unused)
+{
+  int trampoline = OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampolineWithAccessCheck);
+  return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
+}
+
+int loadArgRegs(CompilationUnit* cUnit, MIR* mir, DecodedInstruction* dInsn,
+                int callState, NextCallInsn nextCallInsn, uint32_t dexIdx,
+                uint32_t methodIdx, bool skipThis)
+{
+    UNIMPLEMENTED(WARNING) << "loadArgRegs";
+    return 0;
+#if 0
+    int nextReg = rARG1;
+    int nextArg = 0;
+    if (skipThis) {
+        nextReg++;
+        nextArg++;
+    }
+    for (; (nextReg <= rARG3) && (nextArg < mir->ssaRep->numUses); nextReg++) {
+        RegLocation rlArg = oatGetRawSrc(cUnit, mir, nextArg++);
+        rlArg = oatUpdateRawLoc(cUnit, rlArg);
+        if (rlArg.wide && (nextReg <= rARG2)) {
+            loadValueDirectWideFixed(cUnit, rlArg, nextReg, nextReg + 1);
+            nextReg++;
+            nextArg++;
+        } else {
+            rlArg.wide = false;
+            loadValueDirectFixed(cUnit, rlArg, nextReg);
+        }
+        callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
+    }
+    return callState;
+#endif
+}
+
+/*
+ * Load up to 5 arguments, the first three of which will be in
+ * rARG1 .. rARG3.  On entry rARG0 contains the current method pointer,
+ * and as part of the load sequence, it must be replaced with
+ * the target method pointer.  Note, this may also be called
+ * for "range" variants if the number of arguments is 5 or fewer.
+ */
+int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
+                         DecodedInstruction* dInsn, int callState,
+                         LIR** pcrLabel, NextCallInsn nextCallInsn,
+                         uint32_t dexIdx, uint32_t methodIdx, bool skipThis)
+{
+    UNIMPLEMENTED(WARNING) << "genDalvikArgsNoRange";
+    return 0;
+#if 0
+    RegLocation rlArg;
+
+    /* If no arguments, just return */
+    if (dInsn->vA == 0)
+        return callState;
+
+    callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
+
+    DCHECK_LE(dInsn->vA, 5U);
+    if (dInsn->vA > 3) {
+        uint32_t nextUse = 3;
+        //Detect special case of wide arg spanning arg3/arg4
+        RegLocation rlUse0 = oatGetRawSrc(cUnit, mir, 0);
+        RegLocation rlUse1 = oatGetRawSrc(cUnit, mir, 1);
+        RegLocation rlUse2 = oatGetRawSrc(cUnit, mir, 2);
+        if (((!rlUse0.wide && !rlUse1.wide) || rlUse0.wide) &&
+            rlUse2.wide) {
+            int reg;
+            // Wide spans, we need the 2nd half of uses[2].
+            rlArg = oatUpdateLocWide(cUnit, rlUse2);
+            if (rlArg.location == kLocPhysReg) {
+                reg = rlArg.highReg;
+            } else {
+                // rARG2 & rARG3 can safely be used here
+                reg = rARG3;
+                loadWordDisp(cUnit, rSP,
+                             oatSRegOffset(cUnit, rlArg.sRegLow) + 4, reg);
+                callState = nextCallInsn(cUnit, mir, callState, dexIdx,
+                                         methodIdx);
+            }
+            storeBaseDisp(cUnit, rSP, (nextUse + 1) * 4, reg, kWord);
+            storeBaseDisp(cUnit, rSP, 16 /* (3+1)*4 */, reg, kWord);
+            callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
+            nextUse++;
+        }
+        // Loop through the rest
+        while (nextUse < dInsn->vA) {
+            int lowReg;
+            int highReg;
+            rlArg = oatGetRawSrc(cUnit, mir, nextUse);
+            rlArg = oatUpdateRawLoc(cUnit, rlArg);
+            if (rlArg.location == kLocPhysReg) {
+                lowReg = rlArg.lowReg;
+                highReg = rlArg.highReg;
+            } else {
+                lowReg = rARG2;
+                highReg = rARG3;
+                if (rlArg.wide) {
+                    loadValueDirectWideFixed(cUnit, rlArg, lowReg, highReg);
+                } else {
+                    loadValueDirectFixed(cUnit, rlArg, lowReg);
+                }
+                callState = nextCallInsn(cUnit, mir, callState, dexIdx,
+                                         methodIdx);
+            }
+            int outsOffset = (nextUse + 1) * 4;
+            if (rlArg.wide) {
+                storeBaseDispWide(cUnit, rSP, outsOffset, lowReg, highReg);
+                nextUse += 2;
+            } else {
+                storeWordDisp(cUnit, rSP, outsOffset, lowReg);
+                nextUse++;
+            }
+            callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
+        }
+    }
+
+    callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
+                            dexIdx, methodIdx, skipThis);
+
+    if (pcrLabel) {
+        *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
+    }
+    return callState;
+#endif
+}
+
+/*
+ * May have 0+ arguments (also used for jumbo).  Note that
+ * source virtual registers may be in physical registers, so may
+ * need to be flushed to home location before copying.  This
+ * applies to arg3 and above (see below).
+ *
+ * Two general strategies:
+ *    If < 20 arguments
+ *       Pass args 3-18 using vldm/vstm block copy
+ *       Pass arg0, arg1 & arg2 in rARG1-rARG3
+ *    If 20+ arguments
+ *       Pass args arg19+ using memcpy block copy
+ *       Pass arg0, arg1 & arg2 in rARG1-rARG3
+ *
+ */
+int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
+                       DecodedInstruction* dInsn, int callState,
+                       LIR** pcrLabel, NextCallInsn nextCallInsn,
+                       uint32_t dexIdx, uint32_t methodIdx, bool skipThis)
+{
+    UNIMPLEMENTED(WARNING) << "genDalvikArgsRange";
+    return 0;
+#if 0
+    int firstArg = dInsn->vC;
+    int numArgs = dInsn->vA;
+
+    // If we can treat it as non-range (Jumbo ops will use range form)
+    if (numArgs <= 5)
+        return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
+                                    nextCallInsn, dexIdx, methodIdx,
+                                    skipThis);
+    /*
+     * Make sure range list doesn't span the break between in normal
+     * Dalvik vRegs and the ins.
+     */
+    int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
+    int boundaryReg = cUnit->numDalvikRegisters - cUnit->numIns;
+    if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
+        LOG(FATAL) << "Argument list spanned locals & args";
+    }
+
+    /*
+     * First load the non-register arguments.  Both forms expect all
+     * of the source arguments to be in their home frame location, so
+     * scan the sReg names and flush any that have been promoted to
+     * frame backing storage.
+     */
+    // Scan the rest of the args - if in physReg flush to memory
+    for (int nextArg = 0; nextArg < numArgs;) {
+        RegLocation loc = oatGetRawSrc(cUnit, mir, nextArg);
+        if (loc.wide) {
+            loc = oatUpdateLocWide(cUnit, loc);
+            if ((nextArg >= 2) && (loc.location == kLocPhysReg)) {
+                storeBaseDispWide(cUnit, rSP,
+                                  oatSRegOffset(cUnit, loc.sRegLow),
+                                  loc.lowReg, loc.highReg);
+            }
+            nextArg += 2;
+        } else {
+            loc = oatUpdateLoc(cUnit, loc);
+            if ((nextArg >= 3) && (loc.location == kLocPhysReg)) {
+                storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
+                              loc.lowReg, kWord);
+            }
+            nextArg++;
+        }
+    }
+
+    int startOffset = oatSRegOffset(cUnit,
+        cUnit->regLocation[mir->ssaRep->uses[3]].sRegLow);
+    int outsOffset = 4 /* Method* */ + (3 * 4);
+#if defined(TARGET_MIPS)
+    // Generate memcpy
+    opRegRegImm(cUnit, kOpAdd, rARG0, rSP, outsOffset);
+    opRegRegImm(cUnit, kOpAdd, rARG1, rSP, startOffset);
+    int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pMemcpy));
+    loadConstant(cUnit, rARG2, (numArgs - 3) * 4);
+    callRuntimeHelper(cUnit, rTgt);
+    // Restore Method*
+    loadCurrMethodDirect(cUnit, rARG0);
+#else
+    if (numArgs >= 20) {
+        // Generate memcpy
+        opRegRegImm(cUnit, kOpAdd, rARG0, rSP, outsOffset);
+        opRegRegImm(cUnit, kOpAdd, rARG1, rSP, startOffset);
+        int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pMemcpy));
+        loadConstant(cUnit, rARG2, (numArgs - 3) * 4);
+        callRuntimeHelper(cUnit, rTgt);
+        // Restore Method*
+        loadCurrMethodDirect(cUnit, rARG0);
+    } else {
+        // Use vldm/vstm pair using rARG3 as a temp
+        int regsLeft = std::min(numArgs - 3, 16);
+        callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
+        opRegRegImm(cUnit, kOpAdd, rARG3, rSP, startOffset);
+        LIR* ld = newLIR3(cUnit, kThumb2Vldms, rARG3, fr0, regsLeft);
+        //TUNING: loosen barrier
+        ld->defMask = ENCODE_ALL;
+        setMemRefType(ld, true /* isLoad */, kDalvikReg);
+        callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
+        opRegRegImm(cUnit, kOpAdd, rARG3, rSP, 4 /* Method* */ + (3 * 4));
+        callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
+        LIR* st = newLIR3(cUnit, kThumb2Vstms, rARG3, fr0, regsLeft);
+        setMemRefType(st, false /* isLoad */, kDalvikReg);
+        st->defMask = ENCODE_ALL;
+        callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
+    }
+#endif
+
+    callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
+                            dexIdx, methodIdx, skipThis);
+
+    callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
+    if (pcrLabel) {
+        *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
+    }
+    return callState;
+#endif
+}
+
+}  // namespace art
diff --git a/src/compiler/codegen/x86/X86/Factory.cc b/src/compiler/codegen/x86/X86/Factory.cc
index 9330021..cf8ef81 100644
--- a/src/compiler/codegen/x86/X86/Factory.cc
+++ b/src/compiler/codegen/x86/X86/Factory.cc
@@ -17,20 +17,21 @@
 namespace art {
 
 /*
- * This file contains codegen for the MIPS32 ISA and is intended to be
+ * This file contains codegen for the X86 ISA and is intended to be
  * includes by:
  *
  *        Codegen-$(TARGET_ARCH_VARIANT).c
  *
  */
 
-static int coreRegs[] = {rAX, rCX, rDX, rBX, rSP, rBP, rSI, rDI}
-static int reservedRegs[] = {rSP};
-static int coreTemps[] = {rAX, rCX, rDX}
-static int fpRegs[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9,
-                       fr10, fr11, fr12, fr13, fr14, fr15}
-static int fpTemps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9,
-                        fr10, fr11, fr12, fr13, fr14, fr15}
+//FIXME: restore "static" when usage uncovered
+/*static*/ int coreRegs[] = {rAX, rCX, rDX, rBX, rSP, rBP, rSI, rDI};
+/*static*/ int reservedRegs[] = {rSP};
+/*static*/ int coreTemps[] = {rAX, rCX, rDX};
+/*static*/ int fpRegs[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9,
+                       fr10, fr11, fr12, fr13, fr14, fr15};
+/*static*/ int fpTemps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9,
+                        fr10, fr11, fr12, fr13, fr14, fr15};
 
 void genBarrier(CompilationUnit *cUnit);
 void storePair(CompilationUnit *cUnit, int base, int lowReg,
@@ -51,21 +52,21 @@
     /* must be both DOUBLE or both not DOUBLE */
     DCHECK_EQ(DOUBLEREG(rDest),DOUBLEREG(rSrc));
     if (DOUBLEREG(rDest)) {
-        opcode = kMipsFmovd;
+        opcode = kX86Fmovd;
     } else {
         if (SINGLEREG(rDest)) {
             if (SINGLEREG(rSrc)) {
-                opcode = kMipsFmovs;
+                opcode = kX86Fmovs;
             } else {
                 /* note the operands are swapped for the mtc1 instr */
                 int tOpnd = rSrc;
                 rSrc = rDest;
                 rDest = tOpnd;
-                opcode = kMipsMtc1;
+                opcode = kX86Mtc1;
             }
         } else {
             DCHECK(SINGLEREG(rSrc));
-            opcode = kMipsMfc1;
+            opcode = kX86Mfc1;
         }
     }
     LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rSrc, rDest);
@@ -102,19 +103,19 @@
 
     /* See if the value can be constructed cheaply */
     if (value == 0) {
-        res = newLIR2(cUnit, kMipsMove, rDest, r_ZERO);
+        res = newLIR2(cUnit, kX86Move, rDest, r_ZERO);
     } else if ((value > 0) && (value <= 65535)) {
-        res = newLIR3(cUnit, kMipsOri, rDest, r_ZERO, value);
+        res = newLIR3(cUnit, kX86Ori, rDest, r_ZERO, value);
     } else if ((value < 0) && (value >= -32768)) {
-        res = newLIR3(cUnit, kMipsAddiu, rDest, r_ZERO, value);
+        res = newLIR3(cUnit, kX86Addiu, rDest, r_ZERO, value);
     } else {
-        res = newLIR2(cUnit, kMipsLui, rDest, value>>16);
+        res = newLIR2(cUnit, kX86Lui, rDest, value>>16);
         if (value & 0xffff)
-            newLIR3(cUnit, kMipsOri, rDest, rDest, value);
+            newLIR3(cUnit, kX86Ori, rDest, rDest, value);
     }
 
     if (isFpReg) {
-        newLIR2(cUnit, kMipsMtc1, rDest, rDestSave);
+        newLIR2(cUnit, kX86Mtc1, rDest, rDestSave);
         oatFreeTemp(cUnit, rDest);
     }
 
@@ -128,10 +129,10 @@
     return NULL;
 #if 0
     LIR *res;
-    MipsOpCode opcode = kMipsNop;
+    X86OpCode opcode = kX86Nop;
     switch (op) {
         case kOpUncondBr:
-            opcode = kMipsB;
+            opcode = kX86B;
             break;
         default:
             LOG(FATAL) << "Bad case in opNone";
@@ -143,18 +144,24 @@
 
 LIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask);
 
+LIR* opCondBranch(CompilationUnit* cUnit, ConditionCode cc, LIR* target)
+{
+    UNIMPLEMENTED(WARNING) << "opCondBranch";
+    return NULL;
+}
+
 LIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc)
 {
     UNIMPLEMENTED(WARNING) << "opReg";
     return NULL;
 #if 0
-    MipsOpCode opcode = kMipsNop;
+    X86OpCode opcode = kX86Nop;
     switch (op) {
         case kOpBlx:
-            opcode = kMipsJalr;
+            opcode = kX86Jalr;
             break;
         case kOpBx:
-            return newLIR1(cUnit, kMipsJr, rDestSrc);
+            return newLIR1(cUnit, kX86Jr, rDestSrc);
             break;
         default:
             LOG(FATAL) << "Bad case in opReg";
@@ -175,7 +182,7 @@
     bool neg = (value < 0);
     int absValue = (neg) ? -value : value;
     bool shortForm = (absValue & 0xff) == absValue;
-    MipsOpCode opcode = kMipsNop;
+    X86OpCode opcode = kX86Nop;
     switch (op) {
         case kOpAdd:
             return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
@@ -207,34 +214,34 @@
     UNIMPLEMENTED(WARNING) << "opRegRegReg";
     return NULL;
 #if 0
-    MipsOpCode opcode = kMipsNop;
+    X86OpCode opcode = kX86Nop;
     switch (op) {
         case kOpAdd:
-            opcode = kMipsAddu;
+            opcode = kX86Addu;
             break;
         case kOpSub:
-            opcode = kMipsSubu;
+            opcode = kX86Subu;
             break;
         case kOpAnd:
-            opcode = kMipsAnd;
+            opcode = kX86And;
             break;
         case kOpMul:
-            opcode = kMipsMul;
+            opcode = kX86Mul;
             break;
         case kOpOr:
-            opcode = kMipsOr;
+            opcode = kX86Or;
             break;
         case kOpXor:
-            opcode = kMipsXor;
+            opcode = kX86Xor;
             break;
         case kOpLsl:
-            opcode = kMipsSllv;
+            opcode = kX86Sllv;
             break;
         case kOpLsr:
-            opcode = kMipsSrlv;
+            opcode = kX86Srlv;
             break;
         case kOpAsr:
-            opcode = kMipsSrav;
+            opcode = kX86Srav;
             break;
         case kOpAdc:
         case kOpSbc:
@@ -255,71 +262,71 @@
     return NULL;
 #if 0
     LIR *res;
-    MipsOpCode opcode = kMipsNop;
+    X86OpCode opcode = kX86Nop;
     bool shortForm = true;
 
     switch(op) {
         case kOpAdd:
             if (IS_SIMM16(value)) {
-                opcode = kMipsAddiu;
+                opcode = kX86Addiu;
             }
             else {
                 shortForm = false;
-                opcode = kMipsAddu;
+                opcode = kX86Addu;
             }
             break;
         case kOpSub:
             if (IS_SIMM16((-value))) {
                 value = -value;
-                opcode = kMipsAddiu;
+                opcode = kX86Addiu;
             }
             else {
                 shortForm = false;
-                opcode = kMipsSubu;
+                opcode = kX86Subu;
             }
             break;
         case kOpLsl:
                 DCHECK(value >= 0 && value <= 31);
-                opcode = kMipsSll;
+                opcode = kX86Sll;
                 break;
         case kOpLsr:
                 DCHECK(value >= 0 && value <= 31);
-                opcode = kMipsSrl;
+                opcode = kX86Srl;
                 break;
         case kOpAsr:
                 DCHECK(value >= 0 && value <= 31);
-                opcode = kMipsSra;
+                opcode = kX86Sra;
                 break;
         case kOpAnd:
             if (IS_UIMM16((value))) {
-                opcode = kMipsAndi;
+                opcode = kX86Andi;
             }
             else {
                 shortForm = false;
-                opcode = kMipsAnd;
+                opcode = kX86And;
             }
             break;
         case kOpOr:
             if (IS_UIMM16((value))) {
-                opcode = kMipsOri;
+                opcode = kX86Ori;
             }
             else {
                 shortForm = false;
-                opcode = kMipsOr;
+                opcode = kX86Or;
             }
             break;
         case kOpXor:
             if (IS_UIMM16((value))) {
-                opcode = kMipsXori;
+                opcode = kX86Xori;
             }
             else {
                 shortForm = false;
-                opcode = kMipsXor;
+                opcode = kX86Xor;
             }
             break;
         case kOpMul:
             shortForm = false;
-            opcode = kMipsMul;
+            opcode = kX86Mul;
             break;
         default:
             LOG(FATAL) << "Bad case in opRegRegImm";
@@ -348,16 +355,16 @@
     UNIMPLEMENTED(WARNING) << "opRegReg";
     return NULL;
 #if 0
-    MipsOpCode opcode = kMipsNop;
+    X86OpCode opcode = kX86Nop;
     LIR *res;
     switch (op) {
         case kOpMov:
-            opcode = kMipsMove;
+            opcode = kX86Move;
             break;
         case kOpMvn:
-            return newLIR3(cUnit, kMipsNor, rDestSrc1, rSrc2, r_ZERO);
+            return newLIR3(cUnit, kX86Nor, rDestSrc1, rSrc2, r_ZERO);
         case kOpNeg:
-            return newLIR3(cUnit, kMipsSubu, rDestSrc1, r_ZERO, rSrc2);
+            return newLIR3(cUnit, kX86Subu, rDestSrc1, r_ZERO, rSrc2);
         case kOpAdd:
         case kOpAnd:
         case kOpMul:
@@ -366,23 +373,15 @@
         case kOpXor:
             return opRegRegReg(cUnit, op, rDestSrc1, rDestSrc1, rSrc2);
         case kOp2Byte:
-#if __mips_isa_rev>=2
-            res = newLIR2(cUnit, kMipsSeb, rDestSrc1, rSrc2);
-#else
             res = opRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 24);
             opRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 24);
-#endif
             return res;
         case kOp2Short:
-#if __mips_isa_rev>=2
-            res = newLIR2(cUnit, kMipsSeh, rDestSrc1, rSrc2);
-#else
             res = opRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 16);
             opRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 16);
-#endif
             return res;
         case kOp2Char:
-             return newLIR3(cUnit, kMipsAndi, rDestSrc1, rSrc2, 0xFFFF);
+             return newLIR3(cUnit, kX86Andi, rDestSrc1, rSrc2, 0xFFFF);
         default:
             LOG(FATAL) << "Bad case in opRegReg";
             break;
@@ -409,7 +408,7 @@
 #if 0
     LIR *first = NULL;
     LIR *res;
-    MipsOpCode opcode = kMipsNop;
+    X86OpCode opcode = kX86Nop;
     int tReg = oatAllocTemp(cUnit);
 
     if (FPREG(rDest)) {
@@ -422,30 +421,30 @@
     }
 
     if (!scale) {
-        first = newLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
+        first = newLIR3(cUnit, kX86Addu, tReg , rBase, rIndex);
     } else {
         first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
-        newLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
+        newLIR3(cUnit, kX86Addu, tReg , rBase, tReg);
     }
 
     switch (size) {
         case kSingle:
-            opcode = kMipsFlwc1;
+            opcode = kX86Flwc1;
             break;
         case kWord:
-            opcode = kMipsLw;
+            opcode = kX86Lw;
             break;
         case kUnsignedHalf:
-            opcode = kMipsLhu;
+            opcode = kX86Lhu;
             break;
         case kSignedHalf:
-            opcode = kMipsLh;
+            opcode = kX86Lh;
             break;
         case kUnsignedByte:
-            opcode = kMipsLbu;
+            opcode = kX86Lbu;
             break;
         case kSignedByte:
-            opcode = kMipsLb;
+            opcode = kX86Lb;
             break;
         default:
             LOG(FATAL) << "Bad case in loadBaseIndexed";
@@ -466,11 +465,10 @@
 #if 0
     LIR *first = NULL;
     LIR *res;
-    MipsOpCode opcode = kMipsNop;
+    X86OpCode opcode = kX86Nop;
     int rNewIndex = rIndex;
     int tReg = oatAllocTemp(cUnit);
 
-#ifdef __mips_hard_float
     if (FPREG(rSrc)) {
         DCHECK(SINGLEREG(rSrc));
         DCHECK((size == kWord) || (size == kSingle));
@@ -479,31 +477,28 @@
         if (size == kSingle)
             size = kWord;
     }
-#endif
 
     if (!scale) {
-        first = newLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
+        first = newLIR3(cUnit, kX86Addu, tReg , rBase, rIndex);
     } else {
         first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
-        newLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
+        newLIR3(cUnit, kX86Addu, tReg , rBase, tReg);
     }
 
     switch (size) {
-#ifdef __mips_hard_float
         case kSingle:
-            opcode = kMipsFswc1;
+            opcode = kX86Fswc1;
             break;
-#endif
         case kWord:
-            opcode = kMipsSw;
+            opcode = kX86Sw;
             break;
         case kUnsignedHalf:
         case kSignedHalf:
-            opcode = kMipsSh;
+            opcode = kX86Sh;
             break;
         case kUnsignedByte:
         case kSignedByte:
-            opcode = kMipsSb;
+            opcode = kX86Sb;
             break;
         default:
             LOG(FATAL) << "Bad case in storeBaseIndexed";
@@ -525,14 +520,14 @@
     genBarrier(cUnit);
 
     for (i = 0; i < 8; i++, rMask >>= 1) {
-        if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
-            newLIR3(cUnit, kMipsLw, i+r_A0, loadCnt*4, rBase);
+        if (rMask & 0x1) {
+            newLIR3(cUnit, kX86Lw, i+r_A0, loadCnt*4, rBase);
             loadCnt++;
         }
     }
 
     if (loadCnt) {/* increment after */
-        newLIR3(cUnit, kMipsAddiu, rBase, rBase, loadCnt*4);
+        newLIR3(cUnit, kX86Addiu, rBase, rBase, loadCnt*4);
     }
 
     genBarrier(cUnit);
@@ -544,20 +539,21 @@
 {
     UNIMPLEMENTED(WARNING) << "storeMultiple";
     return NULL;
+#if 0
     int i;
     int storeCnt = 0;
     LIR *res = NULL ;
     genBarrier(cUnit);
 
     for (i = 0; i < 8; i++, rMask >>= 1) {
-        if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
-            newLIR3(cUnit, kMipsSw, i+r_A0, storeCnt*4, rBase);
+        if (rMask & 0x1) {
+            newLIR3(cUnit, kX86Sw, i+r_A0, storeCnt*4, rBase);
             storeCnt++;
         }
     }
 
     if (storeCnt) { /* increment after */
-        newLIR3(cUnit, kMipsAddiu, rBase, rBase, storeCnt*4);
+        newLIR3(cUnit, kX86Addiu, rBase, rBase, storeCnt*4);
     }
 
     genBarrier(cUnit);
@@ -583,7 +579,7 @@
     LIR *res;
     LIR *load = NULL;
     LIR *load2 = NULL;
-    MipsOpCode opcode = kMipsNop;
+    X86OpCode opcode = kX86Nop;
     bool shortForm = IS_SIMM16(displacement);
     bool pair = false;
 
@@ -591,9 +587,9 @@
         case kLong:
         case kDouble:
             pair = true;
-            opcode = kMipsLw;
+            opcode = kX86Lw;
             if (FPREG(rDest)) {
-                opcode = kMipsFlwc1;
+                opcode = kX86Flwc1;
                 if (DOUBLEREG(rDest)) {
                     rDest = rDest - FP_DOUBLE;
                 } else {
@@ -607,26 +603,26 @@
             break;
         case kWord:
         case kSingle:
-            opcode = kMipsLw;
+            opcode = kX86Lw;
             if (FPREG(rDest)) {
-                opcode = kMipsFlwc1;
+                opcode = kX86Flwc1;
                 DCHECK(SINGLEREG(rDest));
             }
             DCHECK_EQ((displacement & 0x3), 0);
             break;
         case kUnsignedHalf:
-            opcode = kMipsLhu;
+            opcode = kX86Lhu;
             DCHECK_EQ((displacement & 0x1), 0);
             break;
         case kSignedHalf:
-            opcode = kMipsLh;
+            opcode = kX86Lh;
             DCHECK_EQ((displacement & 0x1), 0);
             break;
         case kUnsignedByte:
-            opcode = kMipsLbu;
+            opcode = kX86Lbu;
             break;
         case kSignedByte:
-            opcode = kMipsLb;
+            opcode = kX86Lb;
             break;
         default:
             LOG(FATAL) << "Bad case in loadBaseIndexedBody";
@@ -694,7 +690,7 @@
     LIR *res;
     LIR *store = NULL;
     LIR *store2 = NULL;
-    MipsOpCode opcode = kMipsNop;
+    X86OpCode opcode = kX86Nop;
     bool shortForm = IS_SIMM16(displacement);
     bool pair = false;
 
@@ -702,10 +698,9 @@
         case kLong:
         case kDouble:
             pair = true;
-            opcode = kMipsSw;
-#ifdef __mips_hard_float
+            opcode = kX86Sw;
             if (FPREG(rSrc)) {
-                opcode = kMipsFswc1;
+                opcode = kX86Fswc1;
                 if (DOUBLEREG(rSrc)) {
                     rSrc = rSrc - FP_DOUBLE;
                 } else {
@@ -714,29 +709,26 @@
                 }
                 rSrcHi = rSrc + 1;
             }
-#endif
             shortForm = IS_SIMM16_2WORD(displacement);
             DCHECK_EQ((displacement & 0x3), 0);
             break;
         case kWord:
         case kSingle:
-            opcode = kMipsSw;
-#ifdef __mips_hard_float
+            opcode = kX86Sw;
             if (FPREG(rSrc)) {
-                opcode = kMipsFswc1;
+                opcode = kX86Fswc1;
                 DCHECK(SINGLEREG(rSrc));
             }
-#endif
             DCHECK_EQ((displacement & 0x3), 0);
             break;
         case kUnsignedHalf:
         case kSignedHalf:
-            opcode = kMipsSh;
+            opcode = kX86Sh;
             DCHECK_EQ((displacement & 0x1), 0);
             break;
         case kUnsignedByte:
         case kSignedByte:
-            opcode = kMipsSb;
+            opcode = kX86Sb;
             break;
         default:
             LOG(FATAL) << "Bad case in storeBaseIndexedBody";
@@ -788,14 +780,14 @@
 
 void storePair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
 {
-    storeWordDisp(cUnit, base, LOWORD_OFFSET, lowReg);
-    storeWordDisp(cUnit, base, HIWORD_OFFSET, highReg);
+    storeWordDisp(cUnit, base, 0, lowReg);
+    storeWordDisp(cUnit, base, 4, highReg);
 }
 
 void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
 {
-    loadWordDisp(cUnit, base, LOWORD_OFFSET , lowReg);
-    loadWordDisp(cUnit, base, HIWORD_OFFSET , highReg);
+    loadWordDisp(cUnit, base, 0, lowReg);
+    loadWordDisp(cUnit, base, 4, highReg);
 }
 
 }  // namespace art
diff --git a/src/compiler/codegen/x86/X86/Gen.cc b/src/compiler/codegen/x86/X86/Gen.cc
index 1ea94c9..152574a 100644
--- a/src/compiler/codegen/x86/X86/Gen.cc
+++ b/src/compiler/codegen/x86/X86/Gen.cc
@@ -15,7 +15,7 @@
  */
 
 /*
- * This file contains codegen for the Mips ISA and is intended to be
+ * This file contains codegen for the X86 ISA and is intended to be
  * includes by:
  *
  *        Codegen-$(TARGET_ARCH_VARIANT).c
@@ -25,7 +25,7 @@
 namespace art {
 
 /*
- * The lack of pc-relative loads on Mips presents somewhat of a challenge
+ * The lack of pc-relative loads on X86 presents somewhat of a challenge
  * for our PIC switch table strategy.  To materialize the current location
  * we'll do a dummy JAL and reference our tables using r_RA as the
  * base register.  Note that r_RA will be used both as the base to
@@ -58,7 +58,7 @@
 void genSparseSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
 {
     UNIMPLEMENTED(WARNING) << "genSparseSwitch";
-    return NULL;
+    return;
 #if 0
     const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
     if (cUnit->printMe) {
@@ -82,16 +82,16 @@
 
     int rEnd = oatAllocTemp(cUnit);
     if (sizeHi) {
-        newLIR2(cUnit, kMipsLui, rEnd, sizeHi);
+        newLIR2(cUnit, kX86Lui, rEnd, sizeHi);
     }
     // Must prevent code motion for the curr pc pair
     genBarrier(cUnit);  // Scheduling barrier
-    newLIR0(cUnit, kMipsCurrPC);  // Really a jal to .+8
+    newLIR0(cUnit, kX86CurrPC);  // Really a jal to .+8
     // Now, fill the branch delay slot
     if (sizeHi) {
-        newLIR3(cUnit, kMipsOri, rEnd, rEnd, sizeLo);
+        newLIR3(cUnit, kX86Ori, rEnd, rEnd, sizeLo);
     } else {
-        newLIR3(cUnit, kMipsOri, rEnd, r_ZERO, sizeLo);
+        newLIR3(cUnit, kX86Ori, rEnd, r_ZERO, sizeLo);
     }
     genBarrier(cUnit);  // Scheduling barrier
 
@@ -100,7 +100,7 @@
     // Remember base label so offsets can be computed later
     tabRec->anchor = baseLabel;
     int rBase = oatAllocTemp(cUnit);
-    newLIR4(cUnit, kMipsDelta, rBase, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
+    newLIR4(cUnit, kX86Delta, rBase, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
     opRegRegReg(cUnit, kOpAdd, rEnd, rEnd, rBase);
 
     // Grab switch test value
@@ -174,10 +174,10 @@
 
     // Must prevent code motion for the curr pc pair
     genBarrier(cUnit);
-    newLIR0(cUnit, kMipsCurrPC);  // Really a jal to .+8
+    newLIR0(cUnit, kX86CurrPC);  // Really a jal to .+8
     // Now, fill the branch delay slot with bias strip
     if (lowKey == 0) {
-        newLIR0(cUnit, kMipsNop);
+        newLIR0(cUnit, kX86Nop);
     } else {
         if (largeBias) {
             opRegRegReg(cUnit, kOpSub, rKey, rlSrc.lowReg, rKey);
@@ -197,7 +197,7 @@
 
     // Materialize the table base pointer
     int rBase = oatAllocTemp(cUnit);
-    newLIR4(cUnit, kMipsDelta, rBase, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
+    newLIR4(cUnit, kX86Delta, rBase, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
 
     // Load the displacement from the switch table
     int rDisp = oatAllocTemp(cUnit);
@@ -246,7 +246,7 @@
 
     // Must prevent code motion for the curr pc pair
     genBarrier(cUnit);
-    newLIR0(cUnit, kMipsCurrPC);  // Really a jal to .+8
+    newLIR0(cUnit, kX86CurrPC);  // Really a jal to .+8
     // Now, fill the branch delay slot with the helper load
     int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
                           pHandleFillArrayDataFromCode));
@@ -256,14 +256,17 @@
     LIR* baseLabel = newLIR0(cUnit, kPseudoTargetLabel);
 
     // Materialize a pointer to the fill data image
-    newLIR4(cUnit, kMipsDelta, rARG1, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
+    newLIR4(cUnit, kX86Delta, rARG1, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
 
     // And go...
     callRuntimeHelper(cUnit, rTgt);  // ( array*, fill_data* )
+#endif
 }
 
 void genNegFloat(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
 {
+    UNIMPLEMENTED(WARNING) << "genNegFloat";
+#if 0
     RegLocation rlResult;
     rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
     rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
@@ -347,13 +350,13 @@
     int t0 = oatAllocTemp(cUnit);
     int t1 = oatAllocTemp(cUnit);
     RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
-    newLIR3(cUnit, kMipsSlt, t0, rlSrc1.highReg, rlSrc2.highReg);
-    newLIR3(cUnit, kMipsSlt, t1, rlSrc2.highReg, rlSrc1.highReg);
-    newLIR3(cUnit, kMipsSubu, rlResult.lowReg, t1, t0);
+    newLIR3(cUnit, kX86Slt, t0, rlSrc1.highReg, rlSrc2.highReg);
+    newLIR3(cUnit, kX86Slt, t1, rlSrc2.highReg, rlSrc1.highReg);
+    newLIR3(cUnit, kX86Subu, rlResult.lowReg, t1, t0);
     LIR* branch = opCmpImmBranch(cUnit, kCondNe, rlResult.lowReg, 0, NULL);
-    newLIR3(cUnit, kMipsSltu, t0, rlSrc1.lowReg, rlSrc2.lowReg);
-    newLIR3(cUnit, kMipsSltu, t1, rlSrc2.lowReg, rlSrc1.lowReg);
-    newLIR3(cUnit, kMipsSubu, rlResult.lowReg, t1, t0);
+    newLIR3(cUnit, kX86Sltu, t0, rlSrc1.lowReg, rlSrc2.lowReg);
+    newLIR3(cUnit, kX86Sltu, t1, rlSrc2.lowReg, rlSrc1.lowReg);
+    newLIR3(cUnit, kX86Subu, rlResult.lowReg, t1, t0);
     oatFreeTemp(cUnit, t0);
     oatFreeTemp(cUnit, t1);
     LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
@@ -369,48 +372,48 @@
     return NULL;
 #if 0
     LIR* branch;
-    MipsOpCode sltOp;
-    MipsOpCode brOp;
+    X86OpCode sltOp;
+    X86OpCode brOp;
     bool cmpZero = false;
     bool swapped = false;
     switch(cond) {
         case kCondEq:
-            brOp = kMipsBeq;
+            brOp = kX86Beq;
             cmpZero = true;
             break;
         case kCondNe:
-            brOp = kMipsBne;
+            brOp = kX86Bne;
             cmpZero = true;
             break;
         case kCondCc:
-            sltOp = kMipsSltu;
-            brOp = kMipsBnez;
+            sltOp = kX86Sltu;
+            brOp = kX86Bnez;
             break;
         case kCondCs:
-            sltOp = kMipsSltu;
-            brOp = kMipsBeqz;
+            sltOp = kX86Sltu;
+            brOp = kX86Beqz;
             break;
         case kCondGe:
-            sltOp = kMipsSlt;
-            brOp = kMipsBeqz;
+            sltOp = kX86Slt;
+            brOp = kX86Beqz;
             break;
         case kCondGt:
-            sltOp = kMipsSlt;
-            brOp = kMipsBnez;
+            sltOp = kX86Slt;
+            brOp = kX86Bnez;
             swapped = true;
             break;
         case kCondLe:
-            sltOp = kMipsSlt;
-            brOp = kMipsBeqz;
+            sltOp = kX86Slt;
+            brOp = kX86Beqz;
             swapped = true;
             break;
         case kCondLt:
-            sltOp = kMipsSlt;
-            brOp = kMipsBnez;
+            sltOp = kX86Slt;
+            brOp = kX86Bnez;
             break;
         case kCondHi:  // Gtu
-            sltOp = kMipsSltu;
-            brOp = kMipsBnez;
+            sltOp = kX86Sltu;
+            brOp = kX86Bnez;
             swapped = true;
             break;
         default:
@@ -437,7 +440,7 @@
 LIR* opCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
                     int checkValue, LIR* target)
 {
-    UNIMPLEMENTED(WARNING) >> "opCmpImmBranch";
+    UNIMPLEMENTED(WARNING) << "opCmpImmBranch";
     return NULL;
 #if 0
     LIR* branch;
@@ -449,15 +452,15 @@
         oatFreeTemp(cUnit, tReg);
         return branch;
     }
-    MipsOpCode opc;
+    X86OpCode opc;
     switch(cond) {
-        case kCondEq: opc = kMipsBeqz; break;
-        case kCondGe: opc = kMipsBgez; break;
-        case kCondGt: opc = kMipsBgtz; break;
-        case kCondLe: opc = kMipsBlez; break;
+        case kCondEq: opc = kX86Beqz; break;
+        case kCondGe: opc = kX86Bgez; break;
+        case kCondGt: opc = kX86Bgtz; break;
+        case kCondLe: opc = kX86Blez; break;
         //case KCondMi:
-        case kCondLt: opc = kMipsBltz; break;
-        case kCondNe: opc = kMipsBnez; break;
+        case kCondLt: opc = kX86Bltz; break;
+        case kCondNe: opc = kX86Bnez; break;
         default:
             // Tuning: use slti when applicable
             int tReg = oatAllocTemp(cUnit);
@@ -479,7 +482,7 @@
 #if 0
     if (FPREG(rDest) || FPREG(rSrc))
         return fpRegCopy(cUnit, rDest, rSrc);
-    LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kMipsMove,
+    LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kX86Move,
                       rDest, rSrc);
     if (rDest == rSrc) {
         res->flags.isNop = true;
@@ -509,13 +512,13 @@
             opRegCopy(cUnit, S2D(destLo, destHi), S2D(srcLo, srcHi));
         } else {
            /* note the operands are swapped for the mtc1 instr */
-            newLIR2(cUnit, kMipsMtc1, srcLo, destLo);
-            newLIR2(cUnit, kMipsMtc1, srcHi, destHi);
+            newLIR2(cUnit, kX86Mtc1, srcLo, destLo);
+            newLIR2(cUnit, kX86Mtc1, srcHi, destHi);
         }
     } else {
         if (srcFP) {
-            newLIR2(cUnit, kMipsMfc1, destLo, srcLo);
-            newLIR2(cUnit, kMipsMfc1, destHi, srcHi);
+            newLIR2(cUnit, kX86Mfc1, destLo, srcLo);
+            newLIR2(cUnit, kX86Mfc1, destHi, srcHi);
         } else {
             // Handle overlap
             if (srcHi == destLo) {
diff --git a/src/compiler/codegen/x86/X86/Ralloc.cc b/src/compiler/codegen/x86/X86/Ralloc.cc
index 2e0bb94..86625c6 100644
--- a/src/compiler/codegen/x86/X86/Ralloc.cc
+++ b/src/compiler/codegen/x86/X86/Ralloc.cc
@@ -17,7 +17,7 @@
 namespace art {
 
 /*
- * This file contains codegen for the Mips ISA and is intended to be
+ * This file contains codegen for the X86 ISA and is intended to be
  * includes by:
  *
  *        Codegen-$(TARGET_ARCH_VARIANT).c
diff --git a/src/compiler/codegen/x86/X86LIR.h b/src/compiler/codegen/x86/X86LIR.h
index 3681cdb..31f86af 100644
--- a/src/compiler/codegen/x86/X86LIR.h
+++ b/src/compiler/codegen/x86/X86LIR.h
@@ -133,6 +133,10 @@
 /* non-existant physical register */
 #define rNone   (-1)
 
+/* RegisterLocation templates return values (r0, or r0/r1) */
+#define LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 1, rAX, INVALID_REG,\
+                      INVALID_SREG}
+#define LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 1, rAX, rDX, INVALID_SREG}
 
 typedef enum ResourceEncodingPos {
     kGPReg0     = 0,
@@ -259,6 +263,10 @@
     kX86Last
 } X86OpCode;
 
+// FIXME: mem barrier type - what do we do for x86?
+#define kSY 0
+#define kST 0
+
 /* Bit flags describing the behavior of each native opcode */
 typedef enum X86OpFeatureFlags {
     kIsBranch = 0,
diff --git a/src/compiler/codegen/x86/X86RallocUtil.cc b/src/compiler/codegen/x86/X86RallocUtil.cc
index 3073258..ec9b4b1 100644
--- a/src/compiler/codegen/x86/X86RallocUtil.cc
+++ b/src/compiler/codegen/x86/X86RallocUtil.cc
@@ -15,13 +15,13 @@
  */
 
 /*
- * This file contains Mips-specific register allocation support.
+ * This file contains X86-specific register allocation support.
  */
 
 #include "../../CompilerUtility.h"
 #include "../../CompilerIR.h"
 #include "../..//Dataflow.h"
-#include "MipsLIR.h"
+#include "X86LIR.h"
 #include "Codegen.h"
 #include "../Ralloc.h"
 
@@ -162,6 +162,11 @@
 
 extern RegLocation oatGetReturnWideAlt(CompilationUnit* cUnit)
 {
+    UNIMPLEMENTED(WARNING) << "oatGetReturnWideAlt";
+    return oatGetReturnWide(cUnit);
+#if 0
+    // May not need this - it is used when calling a combined
+    // div/rem helper.  Quotient in regular return, remainder in alt regs
     RegLocation res = LOC_C_RETURN_WIDE_ALT;
     oatClobber(cUnit, res.lowReg);
     oatClobber(cUnit, res.highReg);
@@ -169,6 +174,7 @@
     oatMarkInUse(cUnit, res.highReg);
     oatMarkPair(cUnit, res.lowReg, res.highReg);
     return res;
+#endif
 }
 
 extern RegLocation oatGetReturn(CompilationUnit* cUnit)
@@ -181,10 +187,16 @@
 
 extern RegLocation oatGetReturnAlt(CompilationUnit* cUnit)
 {
+    UNIMPLEMENTED(WARNING) << "oatGetReturnWideAlt";
+    return oatGetReturn(cUnit);
+#if 0
+    // May not need this - it is used when calling a combined
+    // div/rem helper.  Quotient in regular return, remainder in alt regs
     RegLocation res = LOC_C_RETURN_ALT;
     oatClobber(cUnit, res.lowReg);
     oatMarkInUse(cUnit, res.lowReg);
     return res;
+#endif
 }
 
 extern RegisterInfo* oatGetRegInfo(CompilationUnit* cUnit, int reg)
diff --git a/src/compiler/codegen/x86/x86/ArchVariant.cc b/src/compiler/codegen/x86/x86/ArchVariant.cc
index 41dbe1b..93b17e1 100644
--- a/src/compiler/codegen/x86/x86/ArchVariant.cc
+++ b/src/compiler/codegen/x86/x86/ArchVariant.cc
@@ -17,7 +17,7 @@
 namespace art {
 
 /*
- * This file is included by Codegen-mips.c, and implements architecture
+ * This file is included by Codegen-x86.c, and implements architecture
  * variant-specific code.
  */
 
@@ -54,7 +54,7 @@
 #if ANDROID_SMP != 0
     UNIMPLEMENTED(WARNING) << "oatGenMemBarrier";
 #if 0
-    newLIR1(cUnit, kMipsSync, barrierKind);
+    newLIR1(cUnit, kX86Sync, barrierKind);
 #endif
 #endif
 }
diff --git a/src/compiler/codegen/x86/x86/Codegen.cc b/src/compiler/codegen/x86/x86/Codegen.cc
index b07ecc7..ba66b54 100644
--- a/src/compiler/codegen/x86/x86/Codegen.cc
+++ b/src/compiler/codegen/x86/x86/Codegen.cc
@@ -15,7 +15,7 @@
  */
 
 #define _CODEGEN_C
-#define TARGET_MIPS
+#define TARGET_X86
 
 #include "../../../Dalvik.h"
 #include "../../../CompilerInternals.h"
@@ -23,24 +23,24 @@
 #include "../../Ralloc.h"
 #include "../Codegen.h"
 
-/* Mips codegen building blocks */
+/* X86 codegen building blocks */
 #include "../../CodegenUtil.cc"
 
-/* Mips-specific factory utilities */
+/* X86-specific factory utilities */
 #include "../X86/Factory.cc"
 /* Target independent factory utilities */
 #include "../../CodegenFactory.cc"
 /* Target independent gen routines */
 #include "../../GenCommon.cc"
 /* Shared invoke gen routines */
-#include "../../GenInvoke.cc"
-/* Mips-specific factory utilities */
+#include "../GenInvoke.cc"
+/* X86-specific factory utilities */
 #include "../ArchFactory.cc"
 
 /* X86-specific codegen routines */
 #include "../X86/Gen.cc"
 /* FP codegen routines */
-#include "../FP/MipsFP.cc"
+#include "../FP/X86FP.cc"
 
 /* X86-specific register allocation */
 #include "../X86/Ralloc.cc"