Revert "Revert "Better support for x86 XMM registers""

This reverts commit 8ff67e3338952c70ccf3b609559bf8cc0f379cfd.

Fix applied to loc.fp usage.

Change-Id: I1eb3005392544fcf30c595923ed25bcee2dc4859
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 11ccd4b..01479a9 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -155,9 +155,11 @@
       // TODO: Prevent this from happening in the code. The result is often
       // unused or could have been loaded more easily from memory.
       NewLIR2(kX86MovdxrRR, dest_lo, src_lo);
+      dest_hi = AllocTempDouble();
       NewLIR2(kX86MovdxrRR, dest_hi, src_hi);
       NewLIR2(kX86PsllqRI, dest_hi, 32);
       NewLIR2(kX86OrpsRR, dest_lo, dest_hi);
+      FreeTemp(dest_hi);
     }
   } else {
     if (src_fp) {
@@ -525,7 +527,7 @@
   // Compute (r1:r0) = (r1:r0) + (r2:r3)
   OpRegReg(kOpAdd, r0, r2);  // r0 = r0 + r2
   OpRegReg(kOpAdc, r1, r3);  // r1 = r1 + r3 + CF
-  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r1,
                           INVALID_SREG, INVALID_SREG};
   StoreValueWide(rl_dest, rl_result);
 }
@@ -541,7 +543,7 @@
   // Compute (r1:r0) = (r1:r0) + (r2:r3)
   OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
   OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
-  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r1,
                           INVALID_SREG, INVALID_SREG};
   StoreValueWide(rl_dest, rl_result);
 }
@@ -557,7 +559,7 @@
   // Compute (r1:r0) = (r1:r0) & (r2:r3)
   OpRegReg(kOpAnd, r0, r2);  // r0 = r0 & r2
   OpRegReg(kOpAnd, r1, r3);  // r1 = r1 & r3
-  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r1,
                           INVALID_SREG, INVALID_SREG};
   StoreValueWide(rl_dest, rl_result);
 }
@@ -573,7 +575,7 @@
   // Compute (r1:r0) = (r1:r0) | (r2:r3)
   OpRegReg(kOpOr, r0, r2);  // r0 = r0 | r2
   OpRegReg(kOpOr, r1, r3);  // r1 = r1 | r3
-  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r1,
                           INVALID_SREG, INVALID_SREG};
   StoreValueWide(rl_dest, rl_result);
 }
@@ -589,7 +591,7 @@
   // Compute (r1:r0) = (r1:r0) ^ (r2:r3)
   OpRegReg(kOpXor, r0, r2);  // r0 = r0 ^ r2
   OpRegReg(kOpXor, r1, r3);  // r1 = r1 ^ r3
-  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r1,
                           INVALID_SREG, INVALID_SREG};
   StoreValueWide(rl_dest, rl_result);
 }
@@ -602,7 +604,7 @@
   OpRegReg(kOpNeg, r0, r0);  // r0 = -r0
   OpRegImm(kOpAdc, r1, 0);   // r1 = r1 + CF
   OpRegReg(kOpNeg, r1, r1);  // r1 = -r1
-  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r1,
                           INVALID_SREG, INVALID_SREG};
   StoreValueWide(rl_dest, rl_result);
 }