Add x86_64 code generation support

Utilizes r0..r7 in register allocator, implements spill/unsill
core regs as well as operations with stack pointer.

Change-Id: I973d5a1acb9aa735f6832df3d440185d9e896c67
Signed-off-by: Dmitry Petrochenko <dmitry.petrochenko@intel.com>
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 1da4f17..fb85318 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -130,32 +130,42 @@
   X86OpCode opcode = kX86Bkpt;
   bool byte_imm = IS_SIMM8(value);
   DCHECK(!r_dest_src1.IsFloat());
-  switch (op) {
-    case kOpLsl: opcode = kX86Sal32RI; break;
-    case kOpLsr: opcode = kX86Shr32RI; break;
-    case kOpAsr: opcode = kX86Sar32RI; break;
-    case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
-    case kOpOr:  opcode = byte_imm ? kX86Or32RI8  : kX86Or32RI;  break;
-    case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
-    // case kOpSbb: opcode = kX86Sbb32RI; break;
-    case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
-    case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
-    case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
-    case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
-    case kOpMov:
-      /*
-       * Moving the constant zero into register can be specialized as an xor of the register.
-       * However, that sets eflags while the move does not. For that reason here, always do
-       * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
-       */
-      opcode = kX86Mov32RI;
-      break;
-    case kOpMul:
-      opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
-      return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value);
-    default:
-      LOG(FATAL) << "Bad case in OpRegImm " << op;
+  if (r_dest_src1.Is64Bit()) {
+    switch (op) {
+      case kOpAdd: opcode = byte_imm ? kX86Add64RI8 : kX86Add64RI; break;
+      case kOpSub: opcode = byte_imm ? kX86Sub64RI8 : kX86Sub64RI; break;
+      default:
+        LOG(FATAL) << "Bad case in OpRegImm (64-bit) " << op;
+    }
+  } else {
+    switch (op) {
+      case kOpLsl: opcode = kX86Sal32RI; break;
+      case kOpLsr: opcode = kX86Shr32RI; break;
+      case kOpAsr: opcode = kX86Sar32RI; break;
+      case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
+      case kOpOr:  opcode = byte_imm ? kX86Or32RI8  : kX86Or32RI;  break;
+      case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
+      // case kOpSbb: opcode = kX86Sbb32RI; break;
+      case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
+      case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
+      case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
+      case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
+      case kOpMov:
+        /*
+         * Moving the constant zero into register can be specialized as an xor of the register.
+         * However, that sets eflags while the move does not. For that reason here, always do
+         * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
+         */
+        opcode = kX86Mov32RI;
+        break;
+      case kOpMul:
+        opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
+        return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value);
+      default:
+        LOG(FATAL) << "Bad case in OpRegImm " << op;
+    }
   }
+  CHECK(!r_dest_src1.Is64Bit() || X86Mir2Lir::EncodingMap[opcode].kind == kReg64Imm) << "OpRegImm(" << op << ")";
   return NewLIR2(opcode, r_dest_src1.GetReg(), value);
 }
 
@@ -464,7 +474,7 @@
                      r_src.GetReg() /* index */, value /* scale */, 0 /* disp */);
     } else if (op == kOpAdd) {  // lea add special case
       return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r_src.GetReg() /* base */,
-                     r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
+                     rs_rX86_SP.GetReg()/*r4sib_no_index*/ /* index */, 0 /* scale */, value /* disp */);
     }
     OpRegCopy(r_dest, r_src);
   }
@@ -578,6 +588,13 @@
       // TODO: double store is to unaligned address
       DCHECK_EQ((displacement & 0x3), 0);
       break;
+    case kWord:
+      if (Gen64Bit()) {
+        opcode = is_array ? kX86Mov64RA  : kX86Mov64RM;
+        CHECK_EQ(is_array, false);
+        CHECK_EQ(r_dest.IsFloat(), false);
+        break;
+      }  // else fall-through to k32 case
     case k32:
     case kSingle:
     case kReference:  // TODO: update for reference decompression on 64-bit targets.
@@ -689,10 +706,6 @@
 
 LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
                               OpSize size) {
-  // TODO: base this on target.
-  if (size == kWord) {
-    size = k32;
-  }
   return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
                              size);
 }
@@ -711,11 +724,23 @@
       if (r_src.IsFloat()) {
         opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
       } else {
-        opcode = is_array ? kX86Mov32AR  : kX86Mov32MR;
+        if (Gen64Bit()) {
+          opcode = is_array ? kX86Mov64AR  : kX86Mov64MR;
+        } else {
+          // TODO(64): pair = true;
+          opcode = is_array ? kX86Mov32AR  : kX86Mov32MR;
+        }
       }
       // TODO: double store is to unaligned address
       DCHECK_EQ((displacement & 0x3), 0);
       break;
+    case kWord:
+      if (Gen64Bit()) {
+        opcode = is_array ? kX86Mov64AR  : kX86Mov64MR;
+        CHECK_EQ(is_array, false);
+        CHECK_EQ(r_src.IsFloat(), false);
+        break;
+      }  // else fall-through to k32 case
     case k32:
     case kSingle:
     case kReference:
@@ -785,10 +810,6 @@
 
 LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement,
                                RegStorage r_src, OpSize size) {
-  // TODO: base this on target.
-  if (size == kWord) {
-    size = k32;
-  }
   return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
 }