x86_64: GenInlinedCas must use wide rl_src_offset under 64-bit targets

This patch fixes to use wide rl_src_offset for int and long types
under 64-bit targets, and fixes movzx8 and movsx8 to use r8_form
on the second register only.

Change-Id: Ib8c0756609100f9bc5c228f1eb391421416f3af6
Signed-off-by: Chao-ying Fu <chao-ying.fu@intel.com>
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index b416a7b..f1166f6 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -836,14 +836,12 @@
 
 bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
   DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
-  if (cu_->instruction_set == kX86_64) {
-    return false;  // TODO: Verify working on x86-64.
-  }
-
   // Unused - RegLocation rl_src_unsafe = info->args[0];
   RegLocation rl_src_obj = info->args[1];  // Object - known non-null
   RegLocation rl_src_offset = info->args[2];  // long low
-  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
+  if (!cu_->target64) {
+    rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
+  }
   RegLocation rl_src_expected = info->args[4];  // int, long or Object
   // If is_long, high half is in info->args[5]
   RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
@@ -851,21 +849,21 @@
 
   if (is_long && cu_->target64) {
     // RAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in RAX.
-    FlushReg(rs_r0);
-    Clobber(rs_r0);
-    LockTemp(rs_r0);
+    FlushReg(rs_r0q);
+    Clobber(rs_r0q);
+    LockTemp(rs_r0q);
 
     RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
     RegLocation rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
-    RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
-    LoadValueDirectWide(rl_src_expected, rs_r0);
+    RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
+    LoadValueDirectWide(rl_src_expected, rs_r0q);
     NewLIR5(kX86LockCmpxchg64AR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg());
 
     // After a store we need to insert barrier in case of potential load. Since the
     // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated.
     GenMemBarrier(kStoreLoad);
 
-    FreeTemp(rs_r0);
+    FreeTemp(rs_r0q);
   } else if (is_long) {
     // TODO: avoid unnecessary loads of SI and DI when the values are in registers.
     // TODO: CFI support.
@@ -947,7 +945,12 @@
       LockTemp(rs_r0);
     }
 
-    RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
+    RegLocation rl_offset;
+    if (cu_->target64) {
+      rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
+    } else {
+      rl_offset = LoadValue(rl_src_offset, kCoreReg);
+    }
     LoadValueDirect(rl_src_expected, rs_r0);
     NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg());