Tidy up memory barriers.

Change-Id: I937ea93e6df1835ecfe2d4bb7d84c24fe7fc097b
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 7ac2c8c..9161bc5 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -253,11 +253,40 @@
   return result;
 }
 
+inline uint32_t Object::GetField32(MemberOffset field_offset, bool is_volatile) const {
+  VerifyObject(this);
+  const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
+  const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr);
+  if (UNLIKELY(is_volatile)) {
+    int32_t result = *(reinterpret_cast<volatile int32_t*>(const_cast<int32_t*>(word_addr)));
+    QuasiAtomic::MembarLoadLoad();
+    return result;
+  } else {
+    return *word_addr;
+  }
+}
+
+inline void Object::SetField32(MemberOffset field_offset, uint32_t new_value, bool is_volatile,
+                               bool this_is_valid) {
+  if (this_is_valid) {
+    VerifyObject(this);
+  }
+  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint32_t* word_addr = reinterpret_cast<uint32_t*>(raw_addr);
+  if (UNLIKELY(is_volatile)) {
+    QuasiAtomic::MembarStoreStore();  // Ensure this store occurs after others in the queue.
+    *word_addr = new_value;
+    QuasiAtomic::MembarStoreLoad();  // Ensure this store occurs before any loads.
+  } else {
+    *word_addr = new_value;
+  }
+}
+
 inline bool Object::CasField32(MemberOffset field_offset, uint32_t old_value, uint32_t new_value) {
   VerifyObject(this);
   byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
-  int32_t* addr = reinterpret_cast<int32_t*>(raw_addr);
-  return android_atomic_release_cas(old_value, new_value, addr) == 0;
+  volatile uint32_t* addr = reinterpret_cast<volatile uint32_t*>(raw_addr);
+  return __sync_bool_compare_and_swap(addr, old_value, new_value);
 }
 
 inline uint64_t Object::GetField64(MemberOffset field_offset, bool is_volatile) const {
@@ -266,7 +295,7 @@
   const int64_t* addr = reinterpret_cast<const int64_t*>(raw_addr);
   if (UNLIKELY(is_volatile)) {
     uint64_t result = QuasiAtomic::Read64(addr);
-    ANDROID_MEMBAR_FULL();
+    QuasiAtomic::MembarLoadLoad();
     return result;
   } else {
     return *addr;
@@ -278,9 +307,13 @@
   byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
   int64_t* addr = reinterpret_cast<int64_t*>(raw_addr);
   if (UNLIKELY(is_volatile)) {
-    ANDROID_MEMBAR_STORE();
+    QuasiAtomic::MembarStoreStore();  // Ensure this store occurs after others in the queue.
     QuasiAtomic::Write64(addr, new_value);
-    // Post-store barrier not required due to use of atomic op or mutex.
+    if (!QuasiAtomic::LongAtomicsUseMutexes()) {
+      QuasiAtomic::MembarStoreLoad();  // Ensure this store occurs before any loads.
+    } else {
+      // Fence from from mutex is enough.
+    }
   } else {
     *addr = new_value;
   }