diff options
-rw-r--r-- | src/atomic.cc | 49 | ||||
-rw-r--r-- | src/atomic.h | 10 |
2 files changed, 49 insertions, 10 deletions
diff --git a/src/atomic.cc b/src/atomic.cc index f03cbbc592..c373579ee2 100644 --- a/src/atomic.cc +++ b/src/atomic.cc @@ -46,12 +46,24 @@ int QuasiAtomicCas64(int64_t old_value, int64_t new_value, volatile int64_t* add return OSAtomicCompareAndSwap64Barrier(old_value, new_value, (int64_t*)addr) == 0; } -int64_t QuasiAtomicSwap64(int64_t value, volatile int64_t* addr) { - int64_t oldValue; +static inline int64_t QuasiAtomicSwap64Impl(int64_t value, volatile int64_t* addr) { + int64_t old_value; do { - oldValue = *addr; - } while (QuasiAtomicCas64(oldValue, value, addr)); - return oldValue; + old_value = *addr; + } while (QuasiAtomicCas64(old_value, value, addr)); + return old_value; +} + +int64_t QuasiAtomicSwap64(int64_t value, volatile int64_t* addr) { + return QuasiAtomicSwap64Impl(value, addr); +} + +int64_t QuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr) { + ANDROID_MEMBAR_STORE(); + int64_t old_value = QuasiAtomicSwap64Impl(value, addr); + /* TUNING: barriers can be avoided on some architectures */ + ANDROID_MEMBAR_FULL(); + return old_value; } int64_t QuasiAtomicRead64(volatile const int64_t* addr) { @@ -66,7 +78,7 @@ int64_t QuasiAtomicRead64(volatile const int64_t* addr) { #include <machine/cpu-features.h> #ifdef __ARM_HAVE_LDREXD -int64_t QuasiAtomicSwap64(int64_t new_value, volatile int64_t* addr) { +static inline int64_t QuasiAtomicSwap64Impl(int64_t new_value, volatile int64_t* addr) { int64_t prev; int status; do { @@ -80,6 +92,17 @@ int64_t QuasiAtomicSwap64(int64_t new_value, volatile int64_t* addr) { return prev; } +int64_t QuasiAtomicSwap64(int64_t new_value, volatile int64_t* addr) { + return QuasiAtomicSwap64Impl(new_value, addr); +} + +int64_t QuasiAtomicSwap64Sync(int64_t new_value, volatile int64_t* addr) { + ANDROID_MEMBAR_STORE(); + int64_t old_value = QuasiAtomicSwap64Impl(new_value, addr); + ANDROID_MEMBAR_FULL(); + return old_value; +} + int QuasiAtomicCas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) { int64_t prev; int status; @@ -135,11 +158,16 @@ int64_t QuasiAtomicSwap64(int64_t value, volatile int64_t* addr) { pthread_mutex_lock(lock); - int64_t oldValue = *addr; + int64_t old_value = *addr; *addr = value; pthread_mutex_unlock(lock); - return oldValue; + return old_value; +} + +int64_t QuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr) { + // Same as QuasiAtomicSwap64 - mutex handles barrier. + return QuasiAtomicSwap64(value, addr); } int QuasiAtomicCas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) { @@ -261,6 +289,11 @@ int64_t QuasiAtomicSwap64(int64_t value, volatile int64_t* addr) { return result; } +int64_t QuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr) { + // Same as QuasiAtomicSwap64 - syscall handles barrier. + return QuasiAtomicSwap64(value, addr); +} + #endif /*NEED_QUASIATOMICS*/ } // namespace art diff --git a/src/atomic.h b/src/atomic.h index e3e4fc02cd..dab625e4b6 100644 --- a/src/atomic.h +++ b/src/atomic.h @@ -30,16 +30,22 @@ namespace art { * quasiatomic operations that are performed on partially-overlapping * memory. * - * None of these provide a memory barrier. + * Only the "Sync" functions provide a memory barrier. */ /* * Swap the 64-bit value at "addr" with "value". Returns the previous - * value. + * value. No memory barriers. */ int64_t QuasiAtomicSwap64(int64_t value, volatile int64_t* addr); /* + * Swap the 64-bit value at "addr" with "value". Returns the previous + * value. Provides memory barriers. + */ +int64_t QuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr); + +/* * Read the 64-bit value at "addr". */ int64_t QuasiAtomicRead64(volatile const int64_t* addr); |