Merge "ART: Rename Atomic::CompareExchange methods"
diff --git a/compiler/utils/atomic_dex_ref_map-inl.h b/compiler/utils/atomic_dex_ref_map-inl.h
index 33d59f9..00db09a 100644
--- a/compiler/utils/atomic_dex_ref_map-inl.h
+++ b/compiler/utils/atomic_dex_ref_map-inl.h
@@ -58,7 +58,7 @@
return kInsertResultInvalidDexFile;
}
DCHECK_LT(ref.index, array->size());
- return (*array)[ref.index].CompareExchangeStrongSequentiallyConsistent(expected, desired)
+ return (*array)[ref.index].CompareAndSetStrongSequentiallyConsistent(expected, desired)
? kInsertResultSuccess
: kInsertResultCASFailure;
}
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index c6c4f4a..fc7920f 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -80,9 +80,8 @@
mirror::Class* desired_class) {
GcRoot<mirror::Class> expected_root(expected_class);
GcRoot<mirror::Class> desired_root(desired_class);
- return reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&declaring_class_)->
- CompareExchangeStrongSequentiallyConsistent(
- expected_root, desired_root);
+ auto atomic_root_class = reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&declaring_class_);
+ return atomic_root_class->CompareAndSetStrongSequentiallyConsistent(expected_root, desired_root);
}
inline uint16_t ArtMethod::GetMethodIndex() {
diff --git a/runtime/atomic.h b/runtime/atomic.h
index ec3eb6d..2d0290f 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -243,44 +243,44 @@
// Atomically replace the value with desired value if it matches the expected value.
// Participates in total ordering of atomic operations.
- bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) {
+ bool CompareAndSetStrongSequentiallyConsistent(T expected_value, T desired_value) {
return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_seq_cst);
}
// The same, except it may fail spuriously.
- bool CompareExchangeWeakSequentiallyConsistent(T expected_value, T desired_value) {
+ bool CompareAndSetWeakSequentiallyConsistent(T expected_value, T desired_value) {
return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_seq_cst);
}
// Atomically replace the value with desired value if it matches the expected value. Doesn't
// imply ordering or synchronization constraints.
- bool CompareExchangeStrongRelaxed(T expected_value, T desired_value) {
+ bool CompareAndSetStrongRelaxed(T expected_value, T desired_value) {
return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed);
}
// Atomically replace the value with desired value if it matches the expected value. Prior writes
// to other memory locations become visible to the threads that do a consume or an acquire on the
// same location.
- bool CompareExchangeStrongRelease(T expected_value, T desired_value) {
+ bool CompareAndSetStrongRelease(T expected_value, T desired_value) {
return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_release);
}
// The same, except it may fail spuriously.
- bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) {
+ bool CompareAndSetWeakRelaxed(T expected_value, T desired_value) {
return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed);
}
// Atomically replace the value with desired value if it matches the expected value. Prior writes
// made to other memory locations by the thread that did the release become visible in this
// thread.
- bool CompareExchangeWeakAcquire(T expected_value, T desired_value) {
+ bool CompareAndSetWeakAcquire(T expected_value, T desired_value) {
return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_acquire);
}
// Atomically replace the value with desired value if it matches the expected value. prior writes
// to other memory locations become visible to the threads that do a consume or an acquire on the
// same location.
- bool CompareExchangeWeakRelease(T expected_value, T desired_value) {
+ bool CompareAndSetWeakRelease(T expected_value, T desired_value) {
return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release);
}
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 587b092..01adbf1 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -164,7 +164,7 @@
int32_t cur_state = state_.LoadRelaxed();
if (LIKELY(cur_state >= 0)) {
// Add as an extra reader.
- done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
+ done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
} else {
HandleSharedLockContention(self, cur_state);
}
@@ -188,10 +188,10 @@
int32_t cur_state = state_.LoadRelaxed();
if (LIKELY(cur_state > 0)) {
// Reduce state by 1 and impose lock release load/store ordering.
- // Note, the relaxed loads below musn't reorder before the CompareExchange.
+ // Note, the relaxed loads below musn't reorder before the CompareAndSet.
// TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
// a status bit into the state on contention.
- done = state_.CompareExchangeWeakSequentiallyConsistent(cur_state, cur_state - 1);
+ done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, cur_state - 1);
if (done && (cur_state - 1) == 0) { // Weak CAS may fail spuriously.
if (num_pending_writers_.LoadRelaxed() > 0 ||
num_pending_readers_.LoadRelaxed() > 0) {
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index e88ed68..9f17ad0 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -127,7 +127,7 @@
public:
explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
for (uint32_t i = 0;
- !gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex);
+ !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(0, mutex);
++i) {
BackOff(i);
}
@@ -146,7 +146,7 @@
public:
explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
for (uint32_t i = 0;
- !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareExchangeWeakAcquire(0, mutex);
+ !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(0, mutex);
++i) {
BackOff(i);
}
@@ -314,7 +314,7 @@
do {
slot = data->cur_content_log_entry.LoadRelaxed();
new_slot = (slot + 1) % kContentionLogSize;
- } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
+ } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot));
log[new_slot].blocked_tid = blocked_tid;
log[new_slot].owner_tid = owner_tid;
log[new_slot].count.StoreRelaxed(1);
@@ -438,7 +438,7 @@
int32_t cur_state = state_.LoadRelaxed();
if (LIKELY(cur_state == 0)) {
// Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
- done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
+ done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */);
} else {
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
@@ -484,7 +484,7 @@
int32_t cur_state = state_.LoadRelaxed();
if (cur_state == 0) {
// Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
- done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
+ done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */);
} else {
return false;
}
@@ -543,10 +543,10 @@
// We're no longer the owner.
exclusive_owner_.StoreRelaxed(0);
// Change state to 0 and impose load/store ordering appropriate for lock release.
- // Note, the relaxed loads below mustn't reorder before the CompareExchange.
+ // Note, the relaxed loads below mustn't reorder before the CompareAndSet.
// TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
// a status bit into the state on contention.
- done = state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
+ done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, 0 /* new state */);
if (LIKELY(done)) { // Spurious fail?
// Wake a contender.
if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
@@ -639,7 +639,7 @@
int32_t cur_state = state_.LoadRelaxed();
if (LIKELY(cur_state == 0)) {
// Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
- done = state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
+ done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */);
} else {
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
@@ -680,10 +680,10 @@
// We're no longer the owner.
exclusive_owner_.StoreRelaxed(0);
// Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
- // Note, the relaxed loads below musn't reorder before the CompareExchange.
+ // Note, the relaxed loads below musn't reorder before the CompareAndSet.
// TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
// a status bit into the state on contention.
- done = state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
+ done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
if (LIKELY(done)) { // Weak CAS may fail spuriously.
// Wake any waiters.
if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
@@ -712,7 +712,7 @@
int32_t cur_state = state_.LoadRelaxed();
if (cur_state == 0) {
// Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
- done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
+ done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */);
} else {
// Failed to acquire, hang up.
timespec now_abs_ts;
@@ -784,7 +784,7 @@
int32_t cur_state = state_.LoadRelaxed();
if (cur_state >= 0) {
// Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
- done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
+ done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
} else {
// Owner holds it exclusively.
return false;
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index 1280466..718e93a 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -95,7 +95,7 @@
if (kReadBarrierOption != kWithoutReadBarrier && before_ptr != after_ptr) {
// If another thread raced and updated the reference, do not store the read barrier updated
// one.
- data_.CompareExchangeStrongRelease(before, Encode(after_ptr, MaskHash(before)));
+ data_.CompareAndSetStrongRelease(before, Encode(after_ptr, MaskHash(before)));
}
return after_ptr.Ptr();
}
@@ -110,7 +110,7 @@
if (before_ptr != after_ptr) {
// If another thread raced and updated the reference, do not store the read barrier updated
// one.
- data_.CompareExchangeStrongRelease(before, Encode(after_ptr, MaskHash(before)));
+ data_.CompareAndSetStrongRelease(before, Encode(after_ptr, MaskHash(before)));
}
}
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index e5b5694..72eb827 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -109,7 +109,7 @@
// Stack overflow.
return false;
}
- } while (!back_index_.CompareExchangeWeakRelaxed(index, new_index));
+ } while (!back_index_.CompareAndSetWeakRelaxed(index, new_index));
*start_address = begin_ + index;
*end_address = begin_ + new_index;
if (kIsDebugBuild) {
@@ -241,7 +241,7 @@
// Stack overflow.
return false;
}
- } while (!back_index_.CompareExchangeWeakRelaxed(index, index + 1));
+ } while (!back_index_.CompareAndSetWeakRelaxed(index, index + 1));
begin_[index].Assign(value);
return true;
}
diff --git a/runtime/gc/accounting/bitmap-inl.h b/runtime/gc/accounting/bitmap-inl.h
index ca6b479..bf153f5 100644
--- a/runtime/gc/accounting/bitmap-inl.h
+++ b/runtime/gc/accounting/bitmap-inl.h
@@ -43,8 +43,7 @@
DCHECK(TestBit(bit_index));
return true;
}
- } while (!atomic_entry->CompareExchangeWeakSequentiallyConsistent(old_word,
- old_word | word_mask));
+ } while (!atomic_entry->CompareAndSetWeakSequentiallyConsistent(old_word, old_word | word_mask));
DCHECK(TestBit(bit_index));
return false;
}
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 5f2f2dd..adca5c8 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -33,7 +33,7 @@
static inline bool byte_cas(uint8_t old_value, uint8_t new_value, uint8_t* address) {
#if defined(__i386__) || defined(__x86_64__)
Atomic<uint8_t>* byte_atomic = reinterpret_cast<Atomic<uint8_t>*>(address);
- return byte_atomic->CompareExchangeWeakRelaxed(old_value, new_value);
+ return byte_atomic->CompareAndSetWeakRelaxed(old_value, new_value);
#else
// Little endian means most significant byte is on the left.
const size_t shift_in_bytes = reinterpret_cast<uintptr_t>(address) % sizeof(uintptr_t);
@@ -47,7 +47,7 @@
~(static_cast<uintptr_t>(0xFF) << shift_in_bits);
const uintptr_t old_word = cur_word | (static_cast<uintptr_t>(old_value) << shift_in_bits);
const uintptr_t new_word = cur_word | (static_cast<uintptr_t>(new_value) << shift_in_bits);
- return word_atomic->CompareExchangeWeakRelaxed(old_word, new_word);
+ return word_atomic->CompareAndSetWeakRelaxed(old_word, new_word);
#endif
}
@@ -195,7 +195,7 @@
new_bytes[i] = visitor(expected_bytes[i]);
}
Atomic<uintptr_t>* atomic_word = reinterpret_cast<Atomic<uintptr_t>*>(word_cur);
- if (LIKELY(atomic_word->CompareExchangeWeakRelaxed(expected_word, new_word))) {
+ if (LIKELY(atomic_word->CompareAndSetWeakRelaxed(expected_word, new_word))) {
for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
const uint8_t expected_byte = expected_bytes[i];
const uint8_t new_byte = new_bytes[i];
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index ba83369..df9ee8c 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -47,7 +47,7 @@
DCHECK(Test(obj));
return true;
}
- } while (!atomic_entry->CompareExchangeWeakRelaxed(old_word, old_word | mask));
+ } while (!atomic_entry->CompareAndSetWeakRelaxed(old_word, old_word | mask));
DCHECK(Test(obj));
return false;
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 70685bc..e1055f2 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2079,7 +2079,7 @@
// It was updated by the mutator.
break;
}
- } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
+ } while (!addr->CompareAndSetWeakRelaxed(expected_ref, new_ref));
}
}
@@ -2098,7 +2098,7 @@
// It was updated by the mutator.
break;
}
- } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
+ } while (!addr->CompareAndSetWeakRelaxed(expected_ref, new_ref));
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index dbaddaf..d2df833 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1127,7 +1127,7 @@
AllocationListener* old;
do {
old = storage->LoadSequentiallyConsistent();
- } while (!storage->CompareExchangeStrongSequentiallyConsistent(old, new_value));
+ } while (!storage->CompareAndSetStrongSequentiallyConsistent(old, new_value));
return old;
}
@@ -3601,7 +3601,7 @@
void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) {
if (CanAddHeapTask(self) &&
- concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
+ concurrent_gc_pending_.CompareAndSetStrongSequentiallyConsistent(false, true)) {
task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
cause,
force_full));
@@ -3846,7 +3846,7 @@
do {
allocated = new_native_bytes_allocated_.LoadRelaxed();
new_freed_bytes = std::min(allocated, bytes);
- } while (!new_native_bytes_allocated_.CompareExchangeWeakRelaxed(allocated,
+ } while (!new_native_bytes_allocated_.CompareAndSetWeakRelaxed(allocated,
allocated - new_freed_bytes));
if (new_freed_bytes < bytes) {
old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes);
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 1509bb0..9ebb131 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -74,7 +74,7 @@
if (UNLIKELY(new_end > growth_end_)) {
return nullptr;
}
- } while (!end_.CompareExchangeWeakSequentiallyConsistent(old_end, new_end));
+ } while (!end_.CompareAndSetWeakSequentiallyConsistent(old_end, new_end));
return reinterpret_cast<mirror::Object*>(old_end);
}
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index a3b53b4..ea2168f 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -101,7 +101,7 @@
if (UNLIKELY(new_top > end_)) {
return nullptr;
}
- } while (!top_.CompareExchangeWeakRelaxed(old_top, new_top));
+ } while (!top_.CompareAndSetWeakRelaxed(old_top, new_top));
objects_allocated_.FetchAndAddRelaxed(1);
DCHECK_LE(Top(), end_);
DCHECK_LT(old_top, end_);
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index e54a017..01481d1 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -94,8 +94,8 @@
// *after* this thread hits a suspend point.
GcRoot<mirror::Class> expected_root(existing);
GcRoot<mirror::Class> desired_root(cls);
- if (!reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&cache->classes_[i])->
- CompareExchangeStrongSequentiallyConsistent(expected_root, desired_root)) {
+ auto atomic_root = reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&cache->classes_[i]);
+ if (!atomic_root->CompareAndSetStrongSequentiallyConsistent(expected_root, desired_root)) {
// Some other thread put a class in the cache, continue iteration starting at this
// entry in case the entry contains `cls`.
--i;
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index e0a341d..10daebb 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -1129,7 +1129,7 @@
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
auto* atomic_root =
reinterpret_cast<Atomic<CompressedReference<Object>>*>(root);
- atomic_root->CompareExchangeStrongSequentiallyConsistent(
+ atomic_root->CompareAndSetStrongSequentiallyConsistent(
CompressedReference<Object>::FromMirrorPtr(old_ref.Ptr()),
CompressedReference<Object>::FromMirrorPtr(new_ref.Ptr()));
}
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 8d4d44b..e1d04e2 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -168,7 +168,7 @@
// The first assignment for a given call site wins.
Atomic<GcRoot<mirror::CallSite>>& ref =
reinterpret_cast<Atomic<GcRoot<mirror::CallSite>>&>(target);
- if (ref.CompareExchangeStrongSequentiallyConsistent(null_call_site, candidate)) {
+ if (ref.CompareAndSetStrongSequentiallyConsistent(null_call_site, candidate)) {
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
return call_site;
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index f1a86e5..6e2a07c 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -579,7 +579,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
- return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value);
+ return atomic_addr->CompareAndSetWeakSequentiallyConsistent(old_value, new_value);
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -597,7 +597,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
- return atomic_addr->CompareExchangeWeakAcquire(old_value, new_value);
+ return atomic_addr->CompareAndSetWeakAcquire(old_value, new_value);
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -615,7 +615,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
- return atomic_addr->CompareExchangeWeakRelease(old_value, new_value);
+ return atomic_addr->CompareAndSetWeakRelease(old_value, new_value);
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -633,7 +633,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
- return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value);
+ return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value);
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
@@ -689,7 +689,7 @@
}
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr);
- return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value);
+ return atomic_addr->CompareAndSetWeakSequentiallyConsistent(old_value, new_value);
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -706,7 +706,7 @@
}
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr);
- return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value);
+ return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value);
}
template<class T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption,
@@ -832,7 +832,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_ref, new_ref);
+ bool success = atomic_addr->CompareAndSetWeakSequentiallyConsistent(old_ref, new_ref);
return success;
}
@@ -873,7 +873,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+ bool success = atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_ref, new_ref);
return success;
}
@@ -902,7 +902,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeWeakRelaxed(old_ref, new_ref);
+ bool success = atomic_addr->CompareAndSetWeakRelaxed(old_ref, new_ref);
return success;
}
@@ -931,7 +931,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeWeakRelease(old_ref, new_ref);
+ bool success = atomic_addr->CompareAndSetWeakRelease(old_ref, new_ref);
return success;
}
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 0a95663..d81fff0 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -52,7 +52,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
- return atomic_addr->CompareExchangeWeakRelaxed(old_value, new_value);
+ return atomic_addr->CompareAndSetWeakRelaxed(old_value, new_value);
}
inline bool Object::CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) {
@@ -217,7 +217,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeStrongRelaxed(old_ref, new_ref);
+ bool success = atomic_addr->CompareAndSetStrongRelaxed(old_ref, new_ref);
return success;
}
@@ -246,7 +246,7 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeStrongRelease(old_ref, new_ref);
+ bool success = atomic_addr->CompareAndSetStrongRelease(old_ref, new_ref);
return success;
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 97fb793..52e6a7e 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -174,7 +174,7 @@
do {
expected_value = hash_code_seed.LoadRelaxed();
new_value = expected_value * 1103515245 + 12345;
- } while (!hash_code_seed.CompareExchangeWeakRelaxed(expected_value, new_value) ||
+ } while (!hash_code_seed.CompareAndSetWeakRelaxed(expected_value, new_value) ||
(expected_value & LockWord::kHashMask) == 0);
return expected_value & LockWord::kHashMask;
}
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index 60f3ce1..295b460 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -31,9 +31,8 @@
template<class MirrorType>
bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
- return reference_.CompareExchangeWeakRelaxed(
- Compression::Compress(expected_ptr),
- Compression::Compress(new_ptr));
+ return reference_.CompareAndSetWeakRelaxed(Compression::Compress(expected_ptr),
+ Compression::Compress(new_ptr));
}
} // namespace mirror
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 542692f..475ec21 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -134,7 +134,7 @@
int32_t Monitor::GetHashCode() {
while (!HasHashCode()) {
- if (hash_code_.CompareExchangeWeakRelaxed(0, mirror::Object::GenerateIdentityHashCode())) {
+ if (hash_code_.CompareAndSetWeakRelaxed(0, mirror::Object::GenerateIdentityHashCode())) {
break;
}
}
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 6424599..a77d100 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -131,7 +131,7 @@
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
if (ref != old_ref) {
Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
- atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
+ atomic_root->CompareAndSetStrongRelaxed(old_ref, ref);
}
}
AssertToSpaceInvariant(gc_root_source, ref);
@@ -174,7 +174,7 @@
if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
auto* atomic_root =
reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
- atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref);
+ atomic_root->CompareAndSetStrongRelaxed(old_ref, new_ref);
}
}
AssertToSpaceInvariant(gc_root_source, ref);
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 7392dcf..2f6f50e 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -201,7 +201,7 @@
// CAS the value with a memory ordering.
bool done =
- tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakRelease(old_state_and_flags.as_int,
+ tls32_.state_and_flags.as_atomic_int.CompareAndSetWeakRelease(old_state_and_flags.as_int,
new_state_and_flags.as_int);
if (LIKELY(done)) {
break;
@@ -252,7 +252,7 @@
new_state_and_flags.as_int = old_state_and_flags.as_int;
new_state_and_flags.as_struct.state = kRunnable;
// CAS the value with a memory barrier.
- if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakAcquire(
+ if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareAndSetWeakAcquire(
old_state_and_flags.as_int,
new_state_and_flags.as_int))) {
// Mark the acquisition of a share of the mutator_lock_.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e3a17c2..b539fb8 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1319,7 +1319,7 @@
int32_t cur_val = pending_threads->LoadRelaxed();
CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val;
// Reduce value by 1.
- done = pending_threads->CompareExchangeWeakRelaxed(cur_val, cur_val - 1);
+ done = pending_threads->CompareAndSetWeakRelaxed(cur_val, cur_val - 1);
#if ART_USE_FUTEXES
if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously.
futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
@@ -1390,7 +1390,7 @@
union StateAndFlags new_state_and_flags;
new_state_and_flags.as_int = old_state_and_flags.as_int;
new_state_and_flags.as_struct.flags |= kCheckpointRequest;
- bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
+ bool success = tls32_.state_and_flags.as_atomic_int.CompareAndSetStrongSequentiallyConsistent(
old_state_and_flags.as_int, new_state_and_flags.as_int);
if (success) {
// Succeeded setting checkpoint flag, now insert the actual checkpoint.
@@ -1419,7 +1419,7 @@
union StateAndFlags new_state_and_flags;
new_state_and_flags.as_int = old_state_and_flags.as_int;
new_state_and_flags.as_struct.flags |= kEmptyCheckpointRequest;
- bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
+ bool success = tls32_.state_and_flags.as_atomic_int.CompareAndSetStrongSequentiallyConsistent(
old_state_and_flags.as_int, new_state_and_flags.as_int);
if (success) {
TriggerSuspend();
@@ -1560,7 +1560,7 @@
if (func == nullptr) {
return nullptr;
}
- } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr));
+ } while (!atomic_func->CompareAndSetWeakSequentiallyConsistent(func, nullptr));
DCHECK(func != nullptr);
return func;
}
diff --git a/runtime/trace.cc b/runtime/trace.cc
index a113ab5..d9038b2 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -937,7 +937,7 @@
overflow_ = true;
return;
}
- } while (!cur_offset_.CompareExchangeWeakSequentiallyConsistent(old_offset, new_offset));
+ } while (!cur_offset_.CompareAndSetWeakSequentiallyConsistent(old_offset, new_offset));
}
TraceAction action = kTraceMethodEnter;