Extract atomic pair code from DexCache

Move it to its own header so the code can be reused.

Test: test.py -b -r --interpreter
Change-Id: I749a03c48df8b481efcf573a41feb67c9b15f393
diff --git a/runtime/base/atomic_pair.h b/runtime/base/atomic_pair.h
new file mode 100644
index 0000000..3e9e820
--- /dev/null
+++ b/runtime/base/atomic_pair.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_ATOMIC_PAIR_H_
+#define ART_RUNTIME_BASE_ATOMIC_PAIR_H_
+
+#include "base/macros.h"
+
+#include <type_traits>
+
+namespace art {
+
+// std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations.
+template <typename IntType>
+struct PACKED(2 * sizeof(IntType)) AtomicPair {
+  static_assert(std::is_integral_v<IntType>);
+
+  constexpr AtomicPair() : first(0), second(0) { }
+  AtomicPair(IntType f, IntType s) : first(f), second(s) { }
+  AtomicPair(const AtomicPair&) = default;
+  AtomicPair& operator=(const AtomicPair&) = default;
+
+  IntType first;
+  IntType second;
+};
+
+template <typename IntType>
+ALWAYS_INLINE static inline AtomicPair<IntType> AtomicPairLoadAcquire(
+    std::atomic<AtomicPair<IntType>>* target) {
+  static_assert(std::atomic<AtomicPair<IntType>>::is_always_lock_free);
+  return target->load(std::memory_order_acquire);
+}
+
+template <typename IntType>
+ALWAYS_INLINE static inline void AtomicPairStoreRelease(
+    std::atomic<AtomicPair<IntType>>* target, AtomicPair<IntType> value) {
+  static_assert(std::atomic<AtomicPair<IntType>>::is_always_lock_free);
+  target->store(value, std::memory_order_release);
+}
+
+// llvm does not implement 16-byte atomic operations on x86-64.
+#if defined(__x86_64__)
+ALWAYS_INLINE static inline AtomicPair<uint64_t> AtomicPairLoadAcquire(
+    std::atomic<AtomicPair<uint64_t>>* target) {
+  uint64_t first, second;
+  __asm__ __volatile__(
+      "lock cmpxchg16b (%2)"
+      : "=&a"(first), "=&d"(second)
+      : "r"(target), "a"(0), "d"(0), "b"(0), "c"(0)
+      : "cc");
+  return {first, second};
+}
+
+ALWAYS_INLINE static inline void AtomicPairStoreRelease(
+    std::atomic<AtomicPair<uint64_t>>* target, AtomicPair<uint64_t> value) {
+  uint64_t first, second;
+  __asm__ __volatile__ (
+      "movq (%2), %%rax\n\t"
+      "movq 8(%2), %%rdx\n\t"
+      "1:\n\t"
+      "lock cmpxchg16b (%2)\n\t"
+      "jnz 1b"
+      : "=&a"(first), "=&d"(second)
+      : "r"(target), "b"(value.first), "c"(value.second)
+      : "cc");
+}
+#endif  // defined(__x86_64__)
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_ATOMIC_PAIR_H_
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 31f2bd2..e04aed5 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -23,6 +23,7 @@
 
 #include "art_field.h"
 #include "art_method.h"
+#include "base/atomic_pair.h"
 #include "base/casts.h"
 #include "base/enums.h"
 #include "class_linker.h"
@@ -357,32 +358,18 @@
 template <typename T>
 NativeDexCachePair<T> DexCache::GetNativePair(std::atomic<NativeDexCachePair<T>>* pair_array,
                                               size_t idx) {
-  if (kRuntimePointerSize == PointerSize::k64) {
-    auto* array = reinterpret_cast<std::atomic<ConversionPair64>*>(pair_array);
-    ConversionPair64 value = AtomicLoadRelaxed16B(&array[idx]);
-    return NativeDexCachePair<T>(reinterpret_cast64<T*>(value.first),
-                                 dchecked_integral_cast<size_t>(value.second));
-  } else {
-    auto* array = reinterpret_cast<std::atomic<ConversionPair32>*>(pair_array);
-    ConversionPair32 value = array[idx].load(std::memory_order_relaxed);
-    return NativeDexCachePair<T>(reinterpret_cast32<T*>(value.first), value.second);
-  }
+  auto* array = reinterpret_cast<std::atomic<AtomicPair<uintptr_t>>*>(pair_array);
+  AtomicPair<uintptr_t> value = AtomicPairLoadAcquire(&array[idx]);
+  return NativeDexCachePair<T>(reinterpret_cast<T*>(value.first), value.second);
 }
 
 template <typename T>
 void DexCache::SetNativePair(std::atomic<NativeDexCachePair<T>>* pair_array,
                              size_t idx,
                              NativeDexCachePair<T> pair) {
-  if (kRuntimePointerSize == PointerSize::k64) {
-    auto* array = reinterpret_cast<std::atomic<ConversionPair64>*>(pair_array);
-    ConversionPair64 v(reinterpret_cast64<uint64_t>(pair.object), pair.index);
-    AtomicStoreRelease16B(&array[idx], v);
-  } else {
-    auto* array = reinterpret_cast<std::atomic<ConversionPair32>*>(pair_array);
-    ConversionPair32 v(reinterpret_cast32<uint32_t>(pair.object),
-                       dchecked_integral_cast<uint32_t>(pair.index));
-    array[idx].store(v, std::memory_order_release);
-  }
+  auto* array = reinterpret_cast<std::atomic<AtomicPair<uintptr_t>>*>(pair_array);
+  AtomicPair<uintptr_t> v(reinterpret_cast<size_t>(pair.object), pair.index);
+  AtomicPairStoreRelease(&array[idx], v);
 }
 
 template <typename T,
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index c80f9df..b7f8ee7 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -126,23 +126,5 @@
   return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(DexCache, class_loader_));
 }
 
-#if !defined(__aarch64__) && !defined(__x86_64__)
-static pthread_mutex_t dex_cache_slow_atomic_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-DexCache::ConversionPair64 DexCache::AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target) {
-  pthread_mutex_lock(&dex_cache_slow_atomic_mutex);
-  DexCache::ConversionPair64 value = *reinterpret_cast<ConversionPair64*>(target);
-  pthread_mutex_unlock(&dex_cache_slow_atomic_mutex);
-  return value;
-}
-
-void DexCache::AtomicStoreRelease16B(std::atomic<ConversionPair64>* target,
-                                     ConversionPair64 value) {
-  pthread_mutex_lock(&dex_cache_slow_atomic_mutex);
-  *reinterpret_cast<ConversionPair64*>(target) = value;
-  pthread_mutex_unlock(&dex_cache_slow_atomic_mutex);
-}
-#endif
-
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 26fc520c..12f1985 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -448,19 +448,6 @@
   T* AllocArray(MemberOffset obj_offset, MemberOffset num_offset, size_t num)
      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations,
-  // so we use a custom pair class for loading and storing the NativeDexCachePair<>.
-  template <typename IntType>
-  struct PACKED(2 * sizeof(IntType)) ConversionPair {
-    ConversionPair(IntType f, IntType s) : first(f), second(s) { }
-    ConversionPair(const ConversionPair&) = default;
-    ConversionPair& operator=(const ConversionPair&) = default;
-    IntType first;
-    IntType second;
-  };
-  using ConversionPair32 = ConversionPair<uint32_t>;
-  using ConversionPair64 = ConversionPair<uint64_t>;
-
   // Visit instance fields of the dex cache as well as its associated arrays.
   template <bool kVisitNativeRoots,
             VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -469,48 +456,6 @@
   void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
 
-  // Due to lack of 16-byte atomics support, we use hand-crafted routines.
-#if defined(__aarch64__)
-  // 16-byte atomics are supported on aarch64.
-  ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
-      std::atomic<ConversionPair64>* target) {
-    return target->load(std::memory_order_relaxed);
-  }
-
-  ALWAYS_INLINE static void AtomicStoreRelease16B(
-      std::atomic<ConversionPair64>* target, ConversionPair64 value) {
-    target->store(value, std::memory_order_release);
-  }
-#elif defined(__x86_64__)
-  ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
-      std::atomic<ConversionPair64>* target) {
-    uint64_t first, second;
-    __asm__ __volatile__(
-        "lock cmpxchg16b (%2)"
-        : "=&a"(first), "=&d"(second)
-        : "r"(target), "a"(0), "d"(0), "b"(0), "c"(0)
-        : "cc");
-    return ConversionPair64(first, second);
-  }
-
-  ALWAYS_INLINE static void AtomicStoreRelease16B(
-      std::atomic<ConversionPair64>* target, ConversionPair64 value) {
-    uint64_t first, second;
-    __asm__ __volatile__ (
-        "movq (%2), %%rax\n\t"
-        "movq 8(%2), %%rdx\n\t"
-        "1:\n\t"
-        "lock cmpxchg16b (%2)\n\t"
-        "jnz 1b"
-        : "=&a"(first), "=&d"(second)
-        : "r"(target), "b"(value.first), "c"(value.second)
-        : "cc");
-  }
-#else
-  static ConversionPair64 AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target);
-  static void AtomicStoreRelease16B(std::atomic<ConversionPair64>* target, ConversionPair64 value);
-#endif
-
   HeapReference<ClassLoader> class_loader_;
   HeapReference<String> location_;