ART: Simplify quasi_atomic.h

Removes fences that duplicate std::atomic_thread_fence().

Bug: 71621075
Test: art/test.py --host -j32
Test: art/test.py --target --64 -j4
Change-Id: I008de4d242d1a3cf4d3f50ce171abbbda647bdaa
diff --git a/runtime/base/quasi_atomic.h b/runtime/base/quasi_atomic.h
index 067d01d..0012f64 100644
--- a/runtime/base/quasi_atomic.h
+++ b/runtime/base/quasi_atomic.h
@@ -152,14 +152,6 @@
     return NeedSwapMutexes(isa);
   }
 
-  static void ThreadFenceAcquire() {
-    std::atomic_thread_fence(std::memory_order_acquire);
-  }
-
-  static void ThreadFenceRelease() {
-    std::atomic_thread_fence(std::memory_order_release);
-  }
-
   static void ThreadFenceForConstructor() {
     #if defined(__aarch64__)
       __asm__ __volatile__("dmb ishst" : : : "memory");
@@ -168,10 +160,6 @@
     #endif
   }
 
-  static void ThreadFenceSequentiallyConsistent() {
-    std::atomic_thread_fence(std::memory_order_seq_cst);
-  }
-
  private:
   static Mutex* GetSwapMutex(const volatile int64_t* addr);
   static int64_t SwapMutexRead64(volatile const int64_t* addr);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 412834c..3025818 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -6179,7 +6179,7 @@
   // Note that there is a race in the presence of multiple threads and we may leak
   // memory from the LinearAlloc, but that's a tradeoff compared to using
   // atomic operations.
-  QuasiAtomic::ThreadFenceRelease();
+  std::atomic_thread_fence(std::memory_order_release);
   new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_);
   return new_conflict_method;
 }
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index bb5167f..e1a9c08 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2471,7 +2471,7 @@
 
     // Do a fence to prevent the field CAS in ConcurrentCopying::Process from possibly reordering
     // before the object copy.
-    QuasiAtomic::ThreadFenceRelease();
+    std::atomic_thread_fence(std::memory_order_release);
 
     LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
 
@@ -2566,7 +2566,7 @@
 
 bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
   // TODO: Explain why this is here. What release operation does it pair with?
-  QuasiAtomic::ThreadFenceAcquire();
+  std::atomic_thread_fence(std::memory_order_acquire);
   accounting::ObjectStack* alloc_stack = GetAllocationStack();
   return alloc_stack->Contains(ref);
 }
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 4a2dd3b..4c7a97d 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1538,7 +1538,7 @@
   }
   int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
   mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 4);
-  QuasiAtomic::ThreadFenceRelease();
+  std::atomic_thread_fence(std::memory_order_release);
   if (Runtime::Current()->IsActiveTransaction()) {
     obj->SetFieldObject<true>(MemberOffset(offset), newValue);
   } else {
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 5618b6e..de64fdd 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1539,7 +1539,7 @@
 
   // Make sure other threads see the data in the profiling info object before the
   // store in the ArtMethod's ProfilingInfo pointer.
-  QuasiAtomic::ThreadFenceRelease();
+  std::atomic_thread_fence(std::memory_order_release);
 
   method->SetProfilingInfo(info);
   profiling_infos_.push_back(info);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index e110763..f246d8b 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1101,7 +1101,7 @@
       case LockWord::kFatLocked: {
         // We should have done an acquire read of the lockword initially, to ensure
         // visibility of the monitor data structure. Use an explicit fence instead.
-        QuasiAtomic::ThreadFenceAcquire();
+        std::atomic_thread_fence(std::memory_order_acquire);
         Monitor* mon = lock_word.FatLockMonitor();
         if (trylock) {
           return mon->TryLock(self) ? h_obj.Get() : nullptr;
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 25f984f..fb00ae3 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -116,7 +116,7 @@
   ScopedFastNativeObjectAccess soa(env);
   ObjPtr<mirror::Object> obj = soa.Decode<mirror::Object>(javaObj);
   // TODO: A release store is likely to be faster on future processors.
-  QuasiAtomic::ThreadFenceRelease();
+  std::atomic_thread_fence(std::memory_order_release);
   // JNI must use non transactional mode.
   obj->SetField32<false>(MemberOffset(offset), newValue);
 }
@@ -152,7 +152,7 @@
                                   jlong newValue) {
   ScopedFastNativeObjectAccess soa(env);
   ObjPtr<mirror::Object> obj = soa.Decode<mirror::Object>(javaObj);
-  QuasiAtomic::ThreadFenceRelease();
+  std::atomic_thread_fence(std::memory_order_release);
   // JNI must use non transactional mode.
   obj->SetField64<false>(MemberOffset(offset), newValue);
 }
@@ -194,7 +194,7 @@
   ScopedFastNativeObjectAccess soa(env);
   ObjPtr<mirror::Object> obj = soa.Decode<mirror::Object>(javaObj);
   ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
-  QuasiAtomic::ThreadFenceRelease();
+  std::atomic_thread_fence(std::memory_order_release);
   // JNI must use non transactional mode.
   obj->SetFieldObject<false>(MemberOffset(offset), newValue);
 }