Revert "Revert "X86: Use locked add rather than mfence""

This reverts commit 0da3b9117706760e8722029f407da6d0297cc943.

Fix a compilation failure that slipped in somehow.

Change-Id: Ide8681cdc921febb296ea47aa282cc195f154049
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index e5d3841..1c2a619 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -508,6 +508,7 @@
   { kX86Lfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 5, 0, 0, false }, "Lfence", "" },
   { kX86Mfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 6, 0, 0, false }, "Mfence", "" },
   { kX86Sfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 7, 0, 0, false }, "Sfence", "" },
+  { kX86LockAdd32MI8, kMemImm,        IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0xF0, 0, 0x83, 0x0, 0x0, 0, 0, 1, false }, "LockAdd32MI8", "[!0r+!1d],!2d" },
 
   EXT_0F_ENCODING_MAP(Imul16,  0x66, 0xAF, REG_USE0 | REG_DEF0 | SETS_CCODES),
   EXT_0F_ENCODING_MAP(Imul32,  0x00, 0xAF, REG_USE0 | REG_DEF0 | SETS_CCODES),
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 75f3fef..4ff7993 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -20,7 +20,7 @@
 #include <inttypes.h>
 #include <string>
 
-#include "arch/instruction_set_features.h"
+#include "arch/x86/instruction_set_features_x86.h"
 #include "art_method.h"
 #include "backend_x86.h"
 #include "base/logging.h"
@@ -585,6 +585,8 @@
       case kX86LockCmpxchgAR:
       case kX86LockCmpxchg64M:
       case kX86LockCmpxchg64A:
+      case kX86LockCmpxchg64AR:
+      case kX86LockAdd32MI8:
       case kX86XchgMR:
       case kX86Mfence:
         // Atomic memory instructions provide full barrier.
@@ -598,7 +600,9 @@
 }
 
 bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-  if (!cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
+  const X86InstructionSetFeatures* features =
+    cu_->compiler_driver->GetInstructionSetFeatures()->AsX86InstructionSetFeatures();
+  if (!features->IsSmp()) {
     return false;
   }
   // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
@@ -610,20 +614,34 @@
    * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model.
    * For those cases, all we need to ensure is that there is a scheduling barrier in place.
    */
+  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+  bool use_locked_add = features->PrefersLockedAddSynchronization();
   if (barrier_kind == kAnyAny) {
-    // If no LIR exists already that can be used a barrier, then generate an mfence.
+    // If no LIR exists already that can be used a barrier, then generate a barrier.
     if (mem_barrier == nullptr) {
-      mem_barrier = NewLIR0(kX86Mfence);
+      if (use_locked_add) {
+        mem_barrier = NewLIR3(kX86LockAdd32MI8, rs_rSP.GetReg(), 0, 0);
+      } else {
+        mem_barrier = NewLIR0(kX86Mfence);
+      }
       ret = true;
     }
 
-    // If last instruction does not provide full barrier, then insert an mfence.
+    // If last instruction does not provide full barrier, then insert a barrier.
     if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
-      mem_barrier = NewLIR0(kX86Mfence);
+      if (use_locked_add) {
+        mem_barrier = NewLIR3(kX86LockAdd32MI8, rs_rSP.GetReg(), 0, 0);
+      } else {
+        mem_barrier = NewLIR0(kX86Mfence);
+      }
       ret = true;
     }
   } else if (barrier_kind == kNTStoreStore) {
-      mem_barrier = NewLIR0(kX86Sfence);
+      if (use_locked_add) {
+        mem_barrier = NewLIR3(kX86LockAdd32MI8, rs_rSP.GetReg(), 0, 0);
+      } else {
+        mem_barrier = NewLIR0(kX86Sfence);
+      }
       ret = true;
   }
 
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index d6a6a60..8cd6574 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -606,6 +606,7 @@
                                 // load-from-memory and store-to-memory instructions
   kX86Sfence,                   // memory barrier to serialize all previous
                                 // store-to-memory instructions
+  kX86LockAdd32MI8,             // locked add used to serialize memory instructions
   Binary0fOpCode(kX86Imul16),   // 16bit multiply
   Binary0fOpCode(kX86Imul32),   // 32bit multiply
   Binary0fOpCode(kX86Imul64),   // 64bit multiply
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index bc3256e..e7f7d57 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4157,7 +4157,7 @@
    */
   switch (kind) {
     case MemBarrierKind::kAnyAny: {
-      __ mfence();
+      MemoryFence();
       break;
     }
     case MemBarrierKind::kAnyStore:
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 7c292fa..ebbe486 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
 
+#include "arch/x86/instruction_set_features_x86.h"
 #include "code_generator.h"
 #include "dex/compiler_enums.h"
 #include "driver/compiler_options.h"
@@ -496,6 +497,19 @@
   // artReadBarrierForRootSlow.
   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
 
+  // Ensure that prior stores complete to memory before subsequent loads.
+  // The locked add implementation will avoid serializing device memory, but will
+  // touch (but not change) the top of the stack.
+  // The 'non_temporal' parameter should be used to ensure ordering of non-temporal stores.
+  void MemoryFence(bool non_temporal = false) {
+    if (!non_temporal && isa_features_.PrefersLockedAddSynchronization()) {
+      assembler_.lock()->addl(Address(ESP, 0), Immediate(0));
+    } else {
+      assembler_.mfence();
+    }
+  }
+
+
  private:
   // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
   // and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 92cef5f..e0ad062 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4029,7 +4029,7 @@
    */
   switch (kind) {
     case MemBarrierKind::kAnyAny: {
-      __ mfence();
+      MemoryFence();
       break;
     }
     case MemBarrierKind::kAnyStore:
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index dda9ea2..e5a487c 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
 
+#include "arch/x86_64/instruction_set_features_x86_64.h"
 #include "code_generator.h"
 #include "dex/compiler_enums.h"
 #include "driver/compiler_options.h"
@@ -479,6 +480,18 @@
                           int64_t v,
                           HInstruction* instruction);
 
+  // Ensure that prior stores complete to memory before subsequent loads.
+  // The locked add implementation will avoid serializing device memory, but will
+  // touch (but not change) the top of the stack. The locked add should not be used for
+  // ordering non-temporal stores.
+  void MemoryFence(bool force_mfence = false) {
+    if (!force_mfence && isa_features_.PrefersLockedAddSynchronization()) {
+      assembler_.lock()->addl(Address(CpuRegister(RSP), 0), Immediate(0));
+    } else {
+      assembler_.mfence();
+    }
+  }
+
  private:
   // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
   // and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index fd454d8..74ade7c 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2005,7 +2005,7 @@
   }
 
   if (is_volatile) {
-    __ mfence();
+    codegen->MemoryFence();
   }
 
   if (type == Primitive::kPrimNot) {
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index ce737e3..6e54dde 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2080,7 +2080,7 @@
   }
 
   if (is_volatile) {
-    __ mfence();
+    codegen->MemoryFence();
   }
 
   if (type == Primitive::kPrimNot) {