Relaxed memory barriers for x86

X86 provides stronger memory guarantees and thus the memory barriers can be
optimized. This patch ensures that all memory barriers for x86 are treated
as scheduling barriers. And in cases where a barrier is needed (StoreLoad case),
an mfence is used.

Change-Id: I13d02bf3f152083ba9f358052aedb583b0d48640
Signed-off-by: Razvan A Lupusoru <razvan.a.lupusoru@intel.com>
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 6df91e6..dd4af9c 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -92,7 +92,10 @@
         ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||  // Skip wide loads.
         ((target_flags & (REG_USE0 | REG_USE1 | REG_USE2)) ==
          (REG_USE0 | REG_USE1 | REG_USE2)) ||  // Skip wide stores.
-        !(target_flags & (IS_LOAD | IS_STORE))) {
+        // Skip instructions that are neither loads or stores.
+        !(target_flags & (IS_LOAD | IS_STORE)) ||
+        // Skip instructions that do both load and store.
+        ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
       continue;
     }
 
@@ -293,7 +296,8 @@
     /* Skip non-interesting instructions */
     if (!(target_flags & IS_LOAD) ||
         (this_lir->flags.is_nop == true) ||
-        ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1))) {
+        ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||
+        ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
       continue;
     }