summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hans Boehm <hboehm@google.com> 2025-01-17 15:35:52 -0800
committer Hans Boehm <hboehm@google.com> 2025-01-21 11:23:46 -0800
commit38f9f08494d3f91857d88a598f7d03bd2d1818e2 (patch)
treecf2bd573a9b646a48ae224e28df70e10015c3f30
parentb30c5a0d84cd03ae465bda61ee41ddaec326b9d5 (diff)
AtomicPair needs to occasionally sleep
Otherwise we can get stuck for a long time if a low-priority thread gets preempted while writing. With enough real-time priority readers, we could even deadlock. Bug: 390651377 Test: Treehugger Change-Id: Iad30095a391b6e67ffe7299d790bd957b7c0c986
-rw-r--r--runtime/base/atomic_pair.h18
1 files changed, 15 insertions, 3 deletions
diff --git a/runtime/base/atomic_pair.h b/runtime/base/atomic_pair.h
index 8ba8faf021..21a561716c 100644
--- a/runtime/base/atomic_pair.h
+++ b/runtime/base/atomic_pair.h
@@ -23,6 +23,7 @@
#include <type_traits>
#include "base/macros.h"
+#include "base/time_utils.h"
namespace art HIDDEN {
@@ -39,6 +40,8 @@ namespace art HIDDEN {
static constexpr uint64_t kSeqMask = (0xFFFFFFFFull << 32);
static constexpr uint64_t kSeqLock = (0x80000000ull << 32);
static constexpr uint64_t kSeqIncr = (0x00000001ull << 32);
+static constexpr uint kAtomicPairMaxSpins = 10'000u;
+static constexpr uint kAtomicPairSleepNanos = 5'000u;
// std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations.
template <typename IntType>
@@ -69,7 +72,7 @@ ALWAYS_INLINE static inline void AtomicPairStoreRelease(AtomicPair<IntType>* pai
ALWAYS_INLINE static inline AtomicPair<uint64_t> AtomicPairLoadAcquire(AtomicPair<uint64_t>* pair) {
auto* key_ptr = reinterpret_cast<std::atomic_uint64_t*>(&pair->key);
auto* val_ptr = reinterpret_cast<std::atomic_uint64_t*>(&pair->val);
- while (true) {
+ for (uint i = 0;; ++i) {
uint64_t key0 = key_ptr->load(std::memory_order_acquire);
uint64_t val = val_ptr->load(std::memory_order_acquire);
uint64_t key1 = key_ptr->load(std::memory_order_relaxed);
@@ -77,6 +80,9 @@ ALWAYS_INLINE static inline AtomicPair<uint64_t> AtomicPairLoadAcquire(AtomicPai
if (LIKELY((key0 & kSeqLock) == 0 && key0 == key1)) {
return {key, val};
}
+ if (UNLIKELY(i > kAtomicPairMaxSpins)) {
+ NanoSleep(kAtomicPairSleepNanos);
+ }
}
}
@@ -86,9 +92,15 @@ ALWAYS_INLINE static inline void AtomicPairStoreRelease(AtomicPair<uint64_t>* pa
auto* key_ptr = reinterpret_cast<std::atomic_uint64_t*>(&pair->key);
auto* val_ptr = reinterpret_cast<std::atomic_uint64_t*>(&pair->val);
uint64_t key = key_ptr->load(std::memory_order_relaxed);
- do {
+ for (uint i = 0;; ++i) {
key &= ~kSeqLock; // Ensure that the CAS below fails if the lock bit is already set.
- } while (!key_ptr->compare_exchange_weak(key, key | kSeqLock));
+ if (LIKELY(key_ptr->compare_exchange_weak(key, key | kSeqLock))) {
+ break;
+ }
+ if (UNLIKELY(i > kAtomicPairMaxSpins)) {
+ NanoSleep(kAtomicPairSleepNanos);
+ }
+ }
key = (((key & kSeqMask) + kSeqIncr) & ~kSeqLock) | (value.key & ~kSeqMask);
val_ptr->store(value.val, std::memory_order_release);
key_ptr->store(key, std::memory_order_release);