diff options
26 files changed, 71 insertions, 74 deletions
diff --git a/compiler/utils/atomic_dex_ref_map-inl.h b/compiler/utils/atomic_dex_ref_map-inl.h index 33d59f9d42..00db09a735 100644 --- a/compiler/utils/atomic_dex_ref_map-inl.h +++ b/compiler/utils/atomic_dex_ref_map-inl.h @@ -58,7 +58,7 @@ inline typename AtomicDexRefMap<DexFileReferenceType, Value>::InsertResult return kInsertResultInvalidDexFile; } DCHECK_LT(ref.index, array->size()); - return (*array)[ref.index].CompareExchangeStrongSequentiallyConsistent(expected, desired) + return (*array)[ref.index].CompareAndSetStrongSequentiallyConsistent(expected, desired) ? kInsertResultSuccess : kInsertResultCASFailure; } diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h index c6c4f4ae41..fc7920f7b7 100644 --- a/runtime/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -80,9 +80,8 @@ inline bool ArtMethod::CASDeclaringClass(mirror::Class* expected_class, mirror::Class* desired_class) { GcRoot<mirror::Class> expected_root(expected_class); GcRoot<mirror::Class> desired_root(desired_class); - return reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&declaring_class_)-> - CompareExchangeStrongSequentiallyConsistent( - expected_root, desired_root); + auto atomic_root_class = reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&declaring_class_); + return atomic_root_class->CompareAndSetStrongSequentiallyConsistent(expected_root, desired_root); } inline uint16_t ArtMethod::GetMethodIndex() { diff --git a/runtime/atomic.h b/runtime/atomic.h index ec3eb6d609..2d0290f69e 100644 --- a/runtime/atomic.h +++ b/runtime/atomic.h @@ -243,44 +243,44 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> { // Atomically replace the value with desired value if it matches the expected value. // Participates in total ordering of atomic operations. - bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) { + bool CompareAndSetStrongSequentiallyConsistent(T expected_value, T desired_value) { return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_seq_cst); } // The same, except it may fail spuriously. - bool CompareExchangeWeakSequentiallyConsistent(T expected_value, T desired_value) { + bool CompareAndSetWeakSequentiallyConsistent(T expected_value, T desired_value) { return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_seq_cst); } // Atomically replace the value with desired value if it matches the expected value. Doesn't // imply ordering or synchronization constraints. - bool CompareExchangeStrongRelaxed(T expected_value, T desired_value) { + bool CompareAndSetStrongRelaxed(T expected_value, T desired_value) { return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed); } // Atomically replace the value with desired value if it matches the expected value. Prior writes // to other memory locations become visible to the threads that do a consume or an acquire on the // same location. - bool CompareExchangeStrongRelease(T expected_value, T desired_value) { + bool CompareAndSetStrongRelease(T expected_value, T desired_value) { return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_release); } // The same, except it may fail spuriously. - bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) { + bool CompareAndSetWeakRelaxed(T expected_value, T desired_value) { return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed); } // Atomically replace the value with desired value if it matches the expected value. Prior writes // made to other memory locations by the thread that did the release become visible in this // thread. - bool CompareExchangeWeakAcquire(T expected_value, T desired_value) { + bool CompareAndSetWeakAcquire(T expected_value, T desired_value) { return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_acquire); } // Atomically replace the value with desired value if it matches the expected value. prior writes // to other memory locations become visible to the threads that do a consume or an acquire on the // same location. - bool CompareExchangeWeakRelease(T expected_value, T desired_value) { + bool CompareAndSetWeakRelease(T expected_value, T desired_value) { return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release); } diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h index 587b092ab7..01adbf17e2 100644 --- a/runtime/base/mutex-inl.h +++ b/runtime/base/mutex-inl.h @@ -164,7 +164,7 @@ inline void ReaderWriterMutex::SharedLock(Thread* self) { int32_t cur_state = state_.LoadRelaxed(); if (LIKELY(cur_state >= 0)) { // Add as an extra reader. - done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1); + done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1); } else { HandleSharedLockContention(self, cur_state); } @@ -188,10 +188,10 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) { int32_t cur_state = state_.LoadRelaxed(); if (LIKELY(cur_state > 0)) { // Reduce state by 1 and impose lock release load/store ordering. - // Note, the relaxed loads below musn't reorder before the CompareExchange. + // Note, the relaxed loads below musn't reorder before the CompareAndSet. // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing // a status bit into the state on contention. - done = state_.CompareExchangeWeakSequentiallyConsistent(cur_state, cur_state - 1); + done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, cur_state - 1); if (done && (cur_state - 1) == 0) { // Weak CAS may fail spuriously. if (num_pending_writers_.LoadRelaxed() > 0 || num_pending_readers_.LoadRelaxed() > 0) { diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index e88ed68ef1..9f17ad051c 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -127,7 +127,7 @@ class ScopedAllMutexesLock FINAL { public: explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) { for (uint32_t i = 0; - !gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex); + !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(0, mutex); ++i) { BackOff(i); } @@ -146,7 +146,7 @@ class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL { public: explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) { for (uint32_t i = 0; - !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareExchangeWeakAcquire(0, mutex); + !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(0, mutex); ++i) { BackOff(i); } @@ -314,7 +314,7 @@ void BaseMutex::RecordContention(uint64_t blocked_tid, do { slot = data->cur_content_log_entry.LoadRelaxed(); new_slot = (slot + 1) % kContentionLogSize; - } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot)); + } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot)); log[new_slot].blocked_tid = blocked_tid; log[new_slot].owner_tid = owner_tid; log[new_slot].count.StoreRelaxed(1); @@ -438,7 +438,7 @@ void Mutex::ExclusiveLock(Thread* self) { int32_t cur_state = state_.LoadRelaxed(); if (LIKELY(cur_state == 0)) { // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition. - done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */); + done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */); } else { // Failed to acquire, hang up. ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); @@ -484,7 +484,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) { int32_t cur_state = state_.LoadRelaxed(); if (cur_state == 0) { // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition. - done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */); + done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */); } else { return false; } @@ -543,10 +543,10 @@ void Mutex::ExclusiveUnlock(Thread* self) { // We're no longer the owner. exclusive_owner_.StoreRelaxed(0); // Change state to 0 and impose load/store ordering appropriate for lock release. - // Note, the relaxed loads below mustn't reorder before the CompareExchange. + // Note, the relaxed loads below mustn't reorder before the CompareAndSet. // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing // a status bit into the state on contention. - done = state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */); + done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, 0 /* new state */); if (LIKELY(done)) { // Spurious fail? // Wake a contender. if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) { @@ -639,7 +639,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) { int32_t cur_state = state_.LoadRelaxed(); if (LIKELY(cur_state == 0)) { // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. - done = state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */); + done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */); } else { // Failed to acquire, hang up. ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); @@ -680,10 +680,10 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { // We're no longer the owner. exclusive_owner_.StoreRelaxed(0); // Change state from -1 to 0 and impose load/store ordering appropriate for lock release. - // Note, the relaxed loads below musn't reorder before the CompareExchange. + // Note, the relaxed loads below musn't reorder before the CompareAndSet. // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing // a status bit into the state on contention. - done = state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */); + done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */); if (LIKELY(done)) { // Weak CAS may fail spuriously. // Wake any waiters. if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 || @@ -712,7 +712,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32 int32_t cur_state = state_.LoadRelaxed(); if (cur_state == 0) { // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. - done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */); + done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */); } else { // Failed to acquire, hang up. timespec now_abs_ts; @@ -784,7 +784,7 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self) { int32_t cur_state = state_.LoadRelaxed(); if (cur_state >= 0) { // Add as an extra reader and impose load/store ordering appropriate for lock acquisition. - done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1); + done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1); } else { // Owner holds it exclusively. return false; diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h index 1280466a91..718e93a97d 100644 --- a/runtime/class_table-inl.h +++ b/runtime/class_table-inl.h @@ -95,7 +95,7 @@ inline mirror::Class* ClassTable::TableSlot::Read() const { if (kReadBarrierOption != kWithoutReadBarrier && before_ptr != after_ptr) { // If another thread raced and updated the reference, do not store the read barrier updated // one. - data_.CompareExchangeStrongRelease(before, Encode(after_ptr, MaskHash(before))); + data_.CompareAndSetStrongRelease(before, Encode(after_ptr, MaskHash(before))); } return after_ptr.Ptr(); } @@ -110,7 +110,7 @@ inline void ClassTable::TableSlot::VisitRoot(const Visitor& visitor) const { if (before_ptr != after_ptr) { // If another thread raced and updated the reference, do not store the read barrier updated // one. - data_.CompareExchangeStrongRelease(before, Encode(after_ptr, MaskHash(before))); + data_.CompareAndSetStrongRelease(before, Encode(after_ptr, MaskHash(before))); } } diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h index e5b5694413..72eb8274c8 100644 --- a/runtime/gc/accounting/atomic_stack.h +++ b/runtime/gc/accounting/atomic_stack.h @@ -109,7 +109,7 @@ class AtomicStack { // Stack overflow. return false; } - } while (!back_index_.CompareExchangeWeakRelaxed(index, new_index)); + } while (!back_index_.CompareAndSetWeakRelaxed(index, new_index)); *start_address = begin_ + index; *end_address = begin_ + new_index; if (kIsDebugBuild) { @@ -241,7 +241,7 @@ class AtomicStack { // Stack overflow. return false; } - } while (!back_index_.CompareExchangeWeakRelaxed(index, index + 1)); + } while (!back_index_.CompareAndSetWeakRelaxed(index, index + 1)); begin_[index].Assign(value); return true; } diff --git a/runtime/gc/accounting/bitmap-inl.h b/runtime/gc/accounting/bitmap-inl.h index ca6b4794de..bf153f56d8 100644 --- a/runtime/gc/accounting/bitmap-inl.h +++ b/runtime/gc/accounting/bitmap-inl.h @@ -43,8 +43,7 @@ inline bool Bitmap::AtomicTestAndSetBit(uintptr_t bit_index) { DCHECK(TestBit(bit_index)); return true; } - } while (!atomic_entry->CompareExchangeWeakSequentiallyConsistent(old_word, - old_word | word_mask)); + } while (!atomic_entry->CompareAndSetWeakSequentiallyConsistent(old_word, old_word | word_mask)); DCHECK(TestBit(bit_index)); return false; } diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h index 5f2f2dda42..adca5c835e 100644 --- a/runtime/gc/accounting/card_table-inl.h +++ b/runtime/gc/accounting/card_table-inl.h @@ -33,7 +33,7 @@ namespace accounting { static inline bool byte_cas(uint8_t old_value, uint8_t new_value, uint8_t* address) { #if defined(__i386__) || defined(__x86_64__) Atomic<uint8_t>* byte_atomic = reinterpret_cast<Atomic<uint8_t>*>(address); - return byte_atomic->CompareExchangeWeakRelaxed(old_value, new_value); + return byte_atomic->CompareAndSetWeakRelaxed(old_value, new_value); #else // Little endian means most significant byte is on the left. const size_t shift_in_bytes = reinterpret_cast<uintptr_t>(address) % sizeof(uintptr_t); @@ -47,7 +47,7 @@ static inline bool byte_cas(uint8_t old_value, uint8_t new_value, uint8_t* addre ~(static_cast<uintptr_t>(0xFF) << shift_in_bits); const uintptr_t old_word = cur_word | (static_cast<uintptr_t>(old_value) << shift_in_bits); const uintptr_t new_word = cur_word | (static_cast<uintptr_t>(new_value) << shift_in_bits); - return word_atomic->CompareExchangeWeakRelaxed(old_word, new_word); + return word_atomic->CompareAndSetWeakRelaxed(old_word, new_word); #endif } @@ -195,7 +195,7 @@ inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin, new_bytes[i] = visitor(expected_bytes[i]); } Atomic<uintptr_t>* atomic_word = reinterpret_cast<Atomic<uintptr_t>*>(word_cur); - if (LIKELY(atomic_word->CompareExchangeWeakRelaxed(expected_word, new_word))) { + if (LIKELY(atomic_word->CompareAndSetWeakRelaxed(expected_word, new_word))) { for (size_t i = 0; i < sizeof(uintptr_t); ++i) { const uint8_t expected_byte = expected_bytes[i]; const uint8_t new_byte = new_bytes[i]; diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h index ba833693f6..df9ee8c219 100644 --- a/runtime/gc/accounting/space_bitmap-inl.h +++ b/runtime/gc/accounting/space_bitmap-inl.h @@ -47,7 +47,7 @@ inline bool SpaceBitmap<kAlignment>::AtomicTestAndSet(const mirror::Object* obj) DCHECK(Test(obj)); return true; } - } while (!atomic_entry->CompareExchangeWeakRelaxed(old_word, old_word | mask)); + } while (!atomic_entry->CompareAndSetWeakRelaxed(old_word, old_word | mask)); DCHECK(Test(obj)); return false; } diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 70685bcbf7..e1055f22c2 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -2079,7 +2079,7 @@ inline void ConcurrentCopying::VisitRoots( // It was updated by the mutator. break; } - } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref)); + } while (!addr->CompareAndSetWeakRelaxed(expected_ref, new_ref)); } } @@ -2098,7 +2098,7 @@ inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Obje // It was updated by the mutator. break; } - } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref)); + } while (!addr->CompareAndSetWeakRelaxed(expected_ref, new_ref)); } } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index dbaddaf8d4..d2df833c48 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -1127,7 +1127,7 @@ static inline AllocationListener* GetAndOverwriteAllocationListener( AllocationListener* old; do { old = storage->LoadSequentiallyConsistent(); - } while (!storage->CompareExchangeStrongSequentiallyConsistent(old, new_value)); + } while (!storage->CompareAndSetStrongSequentiallyConsistent(old, new_value)); return old; } @@ -3601,7 +3601,7 @@ void Heap::ClearConcurrentGCRequest() { void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) { if (CanAddHeapTask(self) && - concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) { + concurrent_gc_pending_.CompareAndSetStrongSequentiallyConsistent(false, true)) { task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away. cause, force_full)); @@ -3846,7 +3846,7 @@ void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) { do { allocated = new_native_bytes_allocated_.LoadRelaxed(); new_freed_bytes = std::min(allocated, bytes); - } while (!new_native_bytes_allocated_.CompareExchangeWeakRelaxed(allocated, + } while (!new_native_bytes_allocated_.CompareAndSetWeakRelaxed(allocated, allocated - new_freed_bytes)); if (new_freed_bytes < bytes) { old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes); diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h index 1509bb027d..9ebb131ad1 100644 --- a/runtime/gc/space/bump_pointer_space-inl.h +++ b/runtime/gc/space/bump_pointer_space-inl.h @@ -74,7 +74,7 @@ inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t if (UNLIKELY(new_end > growth_end_)) { return nullptr; } - } while (!end_.CompareExchangeWeakSequentiallyConsistent(old_end, new_end)); + } while (!end_.CompareAndSetWeakSequentiallyConsistent(old_end, new_end)); return reinterpret_cast<mirror::Object*>(old_end); } diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index a3b53b4cad..ea2168fe9c 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -101,7 +101,7 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* byte if (UNLIKELY(new_top > end_)) { return nullptr; } - } while (!top_.CompareExchangeWeakRelaxed(old_top, new_top)); + } while (!top_.CompareAndSetWeakRelaxed(old_top, new_top)); objects_allocated_.FetchAndAddRelaxed(1); DCHECK_LE(Top(), end_); DCHECK_LT(old_top, end_); diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc index e54a0179e1..01481d1a8d 100644 --- a/runtime/jit/profiling_info.cc +++ b/runtime/jit/profiling_info.cc @@ -94,8 +94,8 @@ void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) { // *after* this thread hits a suspend point. GcRoot<mirror::Class> expected_root(existing); GcRoot<mirror::Class> desired_root(cls); - if (!reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&cache->classes_[i])-> - CompareExchangeStrongSequentiallyConsistent(expected_root, desired_root)) { + auto atomic_root = reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&cache->classes_[i]); + if (!atomic_root->CompareAndSetStrongSequentiallyConsistent(expected_root, desired_root)) { // Some other thread put a class in the cache, continue iteration starting at this // entry in case the entry contains `cls`. --i; diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index e0a341da67..10daebbf5b 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -1129,7 +1129,7 @@ class ReadBarrierOnNativeRootsVisitor { // Update the field atomically. This may fail if mutator updates before us, but it's ok. auto* atomic_root = reinterpret_cast<Atomic<CompressedReference<Object>>*>(root); - atomic_root->CompareExchangeStrongSequentiallyConsistent( + atomic_root->CompareAndSetStrongSequentiallyConsistent( CompressedReference<Object>::FromMirrorPtr(old_ref.Ptr()), CompressedReference<Object>::FromMirrorPtr(new_ref.Ptr())); } diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h index 8d4d44b6f9..e1d04e291d 100644 --- a/runtime/mirror/dex_cache-inl.h +++ b/runtime/mirror/dex_cache-inl.h @@ -168,7 +168,7 @@ inline CallSite* DexCache::SetResolvedCallSite(uint32_t call_site_idx, CallSite* // The first assignment for a given call site wins. Atomic<GcRoot<mirror::CallSite>>& ref = reinterpret_cast<Atomic<GcRoot<mirror::CallSite>>&>(target); - if (ref.CompareExchangeStrongSequentiallyConsistent(null_call_site, candidate)) { + if (ref.CompareAndSetStrongSequentiallyConsistent(null_call_site, candidate)) { // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this); return call_site; diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index f1a86e5353..6e2a07c9e0 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -579,7 +579,7 @@ inline bool Object::CasFieldWeakSequentiallyConsistent32(MemberOffset field_offs uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr); - return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value); + return atomic_addr->CompareAndSetWeakSequentiallyConsistent(old_value, new_value); } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> @@ -597,7 +597,7 @@ inline bool Object::CasFieldWeakAcquire32(MemberOffset field_offset, uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr); - return atomic_addr->CompareExchangeWeakAcquire(old_value, new_value); + return atomic_addr->CompareAndSetWeakAcquire(old_value, new_value); } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> @@ -615,7 +615,7 @@ inline bool Object::CasFieldWeakRelease32(MemberOffset field_offset, uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr); - return atomic_addr->CompareExchangeWeakRelease(old_value, new_value); + return atomic_addr->CompareAndSetWeakRelease(old_value, new_value); } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> @@ -633,7 +633,7 @@ inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_of uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr); - return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value); + return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value); } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, @@ -689,7 +689,7 @@ inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offs } uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr); - return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value); + return atomic_addr->CompareAndSetWeakSequentiallyConsistent(old_value, new_value); } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> @@ -706,7 +706,7 @@ inline bool Object::CasFieldStrongSequentiallyConsistent64(MemberOffset field_of } uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr); - return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value); + return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value); } template<class T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, @@ -832,7 +832,7 @@ inline bool Object::CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier( uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); - bool success = atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_ref, new_ref); + bool success = atomic_addr->CompareAndSetWeakSequentiallyConsistent(old_ref, new_ref); return success; } @@ -873,7 +873,7 @@ inline bool Object::CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrie uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); - bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref); + bool success = atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_ref, new_ref); return success; } @@ -902,7 +902,7 @@ inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier( uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); - bool success = atomic_addr->CompareExchangeWeakRelaxed(old_ref, new_ref); + bool success = atomic_addr->CompareAndSetWeakRelaxed(old_ref, new_ref); return success; } @@ -931,7 +931,7 @@ inline bool Object::CasFieldWeakReleaseObjectWithoutWriteBarrier( uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); - bool success = atomic_addr->CompareExchangeWeakRelease(old_ref, new_ref); + bool success = atomic_addr->CompareAndSetWeakRelease(old_ref, new_ref); return success; } diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h index 0a956633d4..d81fff0a22 100644 --- a/runtime/mirror/object-readbarrier-inl.h +++ b/runtime/mirror/object-readbarrier-inl.h @@ -52,7 +52,7 @@ inline bool Object::CasFieldWeakRelaxed32(MemberOffset field_offset, uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr); - return atomic_addr->CompareExchangeWeakRelaxed(old_value, new_value); + return atomic_addr->CompareAndSetWeakRelaxed(old_value, new_value); } inline bool Object::CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) { @@ -217,7 +217,7 @@ inline bool Object::CasFieldStrongRelaxedObjectWithoutWriteBarrier( uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); - bool success = atomic_addr->CompareExchangeStrongRelaxed(old_ref, new_ref); + bool success = atomic_addr->CompareAndSetStrongRelaxed(old_ref, new_ref); return success; } @@ -246,7 +246,7 @@ inline bool Object::CasFieldStrongReleaseObjectWithoutWriteBarrier( uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); - bool success = atomic_addr->CompareExchangeStrongRelease(old_ref, new_ref); + bool success = atomic_addr->CompareAndSetStrongRelease(old_ref, new_ref); return success; } diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index 97fb793530..52e6a7e878 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -174,7 +174,7 @@ uint32_t Object::GenerateIdentityHashCode() { do { expected_value = hash_code_seed.LoadRelaxed(); new_value = expected_value * 1103515245 + 12345; - } while (!hash_code_seed.CompareExchangeWeakRelaxed(expected_value, new_value) || + } while (!hash_code_seed.CompareAndSetWeakRelaxed(expected_value, new_value) || (expected_value & LockWord::kHashMask) == 0); return expected_value & LockWord::kHashMask; } diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h index 60f3ce153f..295b460a01 100644 --- a/runtime/mirror/object_reference-inl.h +++ b/runtime/mirror/object_reference-inl.h @@ -31,9 +31,8 @@ void ObjectReference<kPoisonReferences, MirrorType>::Assign(ObjPtr<MirrorType> p template<class MirrorType> bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) { - return reference_.CompareExchangeWeakRelaxed( - Compression::Compress(expected_ptr), - Compression::Compress(new_ptr)); + return reference_.CompareAndSetWeakRelaxed(Compression::Compress(expected_ptr), + Compression::Compress(new_ptr)); } } // namespace mirror diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 542692fe46..475ec21ed8 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -134,7 +134,7 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_ int32_t Monitor::GetHashCode() { while (!HasHashCode()) { - if (hash_code_.CompareExchangeWeakRelaxed(0, mirror::Object::GenerateIdentityHashCode())) { + if (hash_code_.CompareAndSetWeakRelaxed(0, mirror::Object::GenerateIdentityHashCode())) { break; } } diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h index 642459924e..a77d100b92 100644 --- a/runtime/read_barrier-inl.h +++ b/runtime/read_barrier-inl.h @@ -131,7 +131,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root, // Update the field atomically. This may fail if mutator updates before us, but it's ok. if (ref != old_ref) { Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root); - atomic_root->CompareExchangeStrongRelaxed(old_ref, ref); + atomic_root->CompareAndSetStrongRelaxed(old_ref, ref); } } AssertToSpaceInvariant(gc_root_source, ref); @@ -174,7 +174,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<Mirro if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) { auto* atomic_root = reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root); - atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref); + atomic_root->CompareAndSetStrongRelaxed(old_ref, new_ref); } } AssertToSpaceInvariant(gc_root_source, ref); diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h index 7392dcf6a5..2f6f50e31e 100644 --- a/runtime/thread-inl.h +++ b/runtime/thread-inl.h @@ -201,7 +201,7 @@ inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state // CAS the value with a memory ordering. bool done = - tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakRelease(old_state_and_flags.as_int, + tls32_.state_and_flags.as_atomic_int.CompareAndSetWeakRelease(old_state_and_flags.as_int, new_state_and_flags.as_int); if (LIKELY(done)) { break; @@ -252,7 +252,7 @@ inline ThreadState Thread::TransitionFromSuspendedToRunnable() { new_state_and_flags.as_int = old_state_and_flags.as_int; new_state_and_flags.as_struct.state = kRunnable; // CAS the value with a memory barrier. - if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakAcquire( + if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareAndSetWeakAcquire( old_state_and_flags.as_int, new_state_and_flags.as_int))) { // Mark the acquisition of a share of the mutator_lock_. diff --git a/runtime/thread.cc b/runtime/thread.cc index e3a17c2737..b539fb8cd9 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1319,7 +1319,7 @@ bool Thread::PassActiveSuspendBarriers(Thread* self) { int32_t cur_val = pending_threads->LoadRelaxed(); CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val; // Reduce value by 1. - done = pending_threads->CompareExchangeWeakRelaxed(cur_val, cur_val - 1); + done = pending_threads->CompareAndSetWeakRelaxed(cur_val, cur_val - 1); #if ART_USE_FUTEXES if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously. futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0); @@ -1390,7 +1390,7 @@ bool Thread::RequestCheckpoint(Closure* function) { union StateAndFlags new_state_and_flags; new_state_and_flags.as_int = old_state_and_flags.as_int; new_state_and_flags.as_struct.flags |= kCheckpointRequest; - bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( + bool success = tls32_.state_and_flags.as_atomic_int.CompareAndSetStrongSequentiallyConsistent( old_state_and_flags.as_int, new_state_and_flags.as_int); if (success) { // Succeeded setting checkpoint flag, now insert the actual checkpoint. @@ -1419,7 +1419,7 @@ bool Thread::RequestEmptyCheckpoint() { union StateAndFlags new_state_and_flags; new_state_and_flags.as_int = old_state_and_flags.as_int; new_state_and_flags.as_struct.flags |= kEmptyCheckpointRequest; - bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( + bool success = tls32_.state_and_flags.as_atomic_int.CompareAndSetStrongSequentiallyConsistent( old_state_and_flags.as_int, new_state_and_flags.as_int); if (success) { TriggerSuspend(); @@ -1560,7 +1560,7 @@ Closure* Thread::GetFlipFunction() { if (func == nullptr) { return nullptr; } - } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr)); + } while (!atomic_func->CompareAndSetWeakSequentiallyConsistent(func, nullptr)); DCHECK(func != nullptr); return func; } diff --git a/runtime/trace.cc b/runtime/trace.cc index a113ab5cc8..d9038b238e 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -937,7 +937,7 @@ void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method, overflow_ = true; return; } - } while (!cur_offset_.CompareExchangeWeakSequentiallyConsistent(old_offset, new_offset)); + } while (!cur_offset_.CompareAndSetWeakSequentiallyConsistent(old_offset, new_offset)); } TraceAction action = kTraceMethodEnter; |