Refactor CasField32
Remove excessive copy paste and add arguments to cover different
types of CAS operations.
Test: test-art-host
Change-Id: I3f58a5f84156aa0491b9e5145f3891f16217e05c
diff --git a/libartbase/base/atomic.h b/libartbase/base/atomic.h
index b68f867..9de84cd 100644
--- a/libartbase/base/atomic.h
+++ b/libartbase/base/atomic.h
@@ -28,6 +28,11 @@
namespace art {
+enum class CASMode {
+ kStrong,
+ kWeak,
+};
+
template<typename T>
class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
public:
@@ -100,6 +105,15 @@
return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release);
}
+ bool CompareAndSet(T expected_value,
+ T desired_value,
+ CASMode mode,
+ std::memory_order memory_order) {
+ return mode == CASMode::kStrong
+ ? this->compare_exchange_strong(expected_value, desired_value, memory_order)
+ : this->compare_exchange_weak(expected_value, desired_value, memory_order);
+ }
+
// Returns the address of the current atomic variable. This is only used by futex() which is
// declared to take a volatile address (see base/mutex-inl.h).
volatile T* Address() {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c4d2fdd..10fa8c5 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1551,7 +1551,7 @@
// above IsInToSpace() evaluates to true and we change the color from gray to white here in this
// else block.
if (kUseBakerReadBarrier) {
- bool success = to_ref->AtomicSetReadBarrierState</*kCasRelease*/true>(
+ bool success = to_ref->AtomicSetReadBarrierState<std::memory_order_release>(
ReadBarrier::GrayState(),
ReadBarrier::WhiteState());
DCHECK(success) << "Must succeed as we won the race.";
@@ -2490,7 +2490,10 @@
LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
// Try to atomically write the fwd ptr.
- bool success = from_ref->CasLockWordWeakRelaxed(old_lock_word, new_lock_word);
+ bool success = from_ref->CasLockWord(old_lock_word,
+ new_lock_word,
+ CASMode::kWeak,
+ std::memory_order_relaxed);
if (LIKELY(success)) {
// The CAS succeeded.
DCHECK(thread_running_gc_ != nullptr);
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 667bd03..6a4cf56 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1855,11 +1855,17 @@
jint newValue = args[4];
bool success;
if (Runtime::Current()->IsActiveTransaction()) {
- success = obj->CasFieldStrongSequentiallyConsistent32<true>(MemberOffset(offset),
- expectedValue, newValue);
+ success = obj->CasField32<true>(MemberOffset(offset),
+ expectedValue,
+ newValue,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
} else {
- success = obj->CasFieldStrongSequentiallyConsistent32<false>(MemberOffset(offset),
- expectedValue, newValue);
+ success = obj->CasField32<false>(MemberOffset(offset),
+ expectedValue,
+ newValue,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
}
result->SetZ(success ? JNI_TRUE : JNI_FALSE);
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index ee4f53b..bb99c0c 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -78,18 +78,6 @@
}
}
-inline bool Object::CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val) {
- // Force use of non-transactional mode and do not check.
- return CasFieldWeakSequentiallyConsistent32<false, false>(
- OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
-}
-
-inline bool Object::CasLockWordWeakAcquire(LockWord old_val, LockWord new_val) {
- // Force use of non-transactional mode and do not check.
- return CasFieldWeakAcquire32<false, false>(
- OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
-}
-
inline uint32_t Object::GetLockOwnerThreadId() {
return Monitor::GetLockOwnerThreadId(this);
}
@@ -575,84 +563,6 @@
}
}
-// TODO: Pass memory_order_ and strong/weak as arguments to avoid code duplication?
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
-
- return atomic_addr->CompareAndSetWeakSequentiallyConsistent(old_value, new_value);
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakAcquire32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
-
- return atomic_addr->CompareAndSetWeakAcquire(old_value, new_value);
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakRelease32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
-
- return atomic_addr->CompareAndSetWeakRelease(old_value, new_value);
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
-
- return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value);
-}
-
template<bool kTransactionActive,
bool kCheckTransaction,
VerifyObjectFlags kVerifyFlags,
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 2988d06..597ba67 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -32,15 +32,17 @@
template<VerifyObjectFlags kVerifyFlags>
inline LockWord Object::GetLockWord(bool as_volatile) {
if (as_volatile) {
- return LockWord(GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
+ return LockWord(GetField32Volatile<kVerifyFlags>(MonitorOffset()));
}
- return LockWord(GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
+ return LockWord(GetField32<kVerifyFlags>(MonitorOffset()));
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakRelaxed32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value) {
+inline bool Object::CasField32(MemberOffset field_offset,
+ int32_t old_value,
+ int32_t new_value,
+ CASMode mode,
+ std::memory_order memory_order) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -53,46 +55,19 @@
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
- return atomic_addr->CompareAndSetWeakRelaxed(old_value, new_value);
+ return atomic_addr->CompareAndSet(old_value, new_value, mode, memory_order);
}
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldStrongRelaxed32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
-
- return atomic_addr->CompareAndSetStrongRelaxed(old_value, new_value);
-}
-
-inline bool Object::CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) {
+inline bool Object::CasLockWord(LockWord old_val,
+ LockWord new_val,
+ CASMode mode,
+ std::memory_order memory_order) {
// Force use of non-transactional mode and do not check.
- return CasFieldWeakRelaxed32<false, false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_),
- old_val.GetValue(),
- new_val.GetValue());
-}
-
-inline bool Object::CasLockWordStrongRelaxed(LockWord old_val, LockWord new_val) {
- // Force use of non-transactional mode and do not check.
- return CasFieldStrongRelaxed32<false, false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_),
- old_val.GetValue(),
- new_val.GetValue());
-}
-
-inline bool Object::CasLockWordWeakRelease(LockWord old_val, LockWord new_val) {
- // Force use of non-transactional mode and do not check.
- return CasFieldWeakRelease32<false, false>(
- OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
+ return CasField32<false, false>(MonitorOffset(),
+ old_val.GetValue(),
+ new_val.GetValue(),
+ mode,
+ memory_order);
}
inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
@@ -173,7 +148,7 @@
return rb_state;
}
-template<bool kCasRelease>
+template<std::memory_order kMemoryOrder>
inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
if (!kUseBakerReadBarrier) {
LOG(FATAL) << "Unreachable";
@@ -197,9 +172,7 @@
// If kCasRelease == true, use a CAS release so that when GC updates all the fields of
// an object and then changes the object from gray to black, the field updates (stores) will be
// visible (won't be reordered after this CAS.)
- } while (!(kCasRelease ?
- CasLockWordWeakRelease(expected_lw, new_lw) :
- CasLockWordWeakRelaxed(expected_lw, new_lw)));
+ } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, kMemoryOrder));
return true;
}
@@ -216,7 +189,7 @@
new_lw = lw;
new_lw.SetMarkBitState(mark_bit);
// Since this is only set from the mutator, we can use the non-release CAS.
- } while (!CasLockWordWeakRelaxed(expected_lw, new_lw));
+ } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, std::memory_order_relaxed));
return true;
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 200bc47..ce845bf 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -199,7 +199,7 @@
DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
// Use a strong CAS to prevent spurious failures since these can make the boot image
// non-deterministic.
- if (current_this->CasLockWordStrongRelaxed(lw, hash_word)) {
+ if (current_this->CasLockWord(lw, hash_word, CASMode::kStrong, std::memory_order_relaxed)) {
return hash_word.GetHashCode();
}
break;
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index f92ff1e..654fe95 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -111,7 +111,7 @@
#endif
ALWAYS_INLINE void SetReadBarrierState(uint32_t rb_state) REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kCasRelease = false>
+ template<std::memory_order kMemoryOrder = std::memory_order_relaxed>
ALWAYS_INLINE bool AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -151,15 +151,7 @@
LockWord GetLockWord(bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetLockWord(LockWord new_val, bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_);
- bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val)
- REQUIRES_SHARED(Locks::mutator_lock_);
- bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
- REQUIRES_SHARED(Locks::mutator_lock_);
- bool CasLockWordWeakAcquire(LockWord old_val, LockWord new_val)
- REQUIRES_SHARED(Locks::mutator_lock_);
- bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val)
- REQUIRES_SHARED(Locks::mutator_lock_);
- bool CasLockWordStrongRelaxed(LockWord old_val, LockWord new_val)
+ bool CasLockWord(LockWord old_val, LockWord new_val, CASMode mode, std::memory_order memory_order)
REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
@@ -525,49 +517,11 @@
template<bool kTransactionActive,
bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldWeakRelaxed32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldStrongRelaxed32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldWeakAcquire32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldWeakRelease32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
+ ALWAYS_INLINE bool CasField32(MemberOffset field_offset,
+ int32_t old_value,
+ int32_t new_value,
+ CASMode mode,
+ std::memory_order memory_order)
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index e723169..d47bc0d 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -175,7 +175,7 @@
}
LockWord fat(this, lw.GCState());
// Publish the updated lock word, which may race with other threads.
- bool success = GetObject()->CasLockWordWeakRelease(lw, fat);
+ bool success = GetObject()->CasLockWord(lw, fat, CASMode::kWeak, std::memory_order_release);
// Lock profiling.
if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
// Do not abort on dex pc errors. This can easily happen when we want to dump a stack trace on
@@ -1041,7 +1041,7 @@
case LockWord::kUnlocked: {
// No ordering required for preceding lockword read, since we retest.
LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
- if (h_obj->CasLockWordWeakAcquire(lock_word, thin_locked)) {
+ if (h_obj->CasLockWord(lock_word, thin_locked, CASMode::kWeak, std::memory_order_acquire)) {
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
return h_obj.Get(); // Success!
}
@@ -1065,7 +1065,10 @@
return h_obj.Get(); // Success!
} else {
// Use CAS to preserve the read barrier state.
- if (h_obj->CasLockWordWeakRelaxed(lock_word, thin_locked)) {
+ if (h_obj->CasLockWord(lock_word,
+ thin_locked,
+ CASMode::kWeak,
+ std::memory_order_relaxed)) {
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
return h_obj.Get(); // Success!
}
@@ -1167,7 +1170,7 @@
return true;
} else {
// Use CAS to preserve the read barrier state.
- if (h_obj->CasLockWordWeakRelease(lock_word, new_lw)) {
+ if (h_obj->CasLockWord(lock_word, new_lw, CASMode::kWeak, std::memory_order_release)) {
AtraceMonitorUnlock();
// Success!
return true;
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index d41a195..0f474d3 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -41,9 +41,11 @@
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::Object> obj = soa.Decode<mirror::Object>(javaObj);
// JNI must use non transactional mode.
- bool success = obj->CasFieldStrongSequentiallyConsistent32<false>(MemberOffset(offset),
- expectedValue,
- newValue);
+ bool success = obj->CasField32<false>(MemberOffset(offset),
+ expectedValue,
+ newValue,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
return success ? JNI_TRUE : JNI_FALSE;
}
diff --git a/runtime/subtype_check.h b/runtime/subtype_check.h
index 1fe62e8..aac547e 100644
--- a/runtime/subtype_check.h
+++ b/runtime/subtype_check.h
@@ -542,15 +542,17 @@
int32_t new_value)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current() != nullptr && Runtime::Current()->IsActiveTransaction()) {
- return klass->template
- CasFieldWeakSequentiallyConsistent32</*kTransactionActive*/true>(offset,
- old_value,
- new_value);
+ return klass->template CasField32</*kTransactionActive*/true>(offset,
+ old_value,
+ new_value,
+ CASMode::kWeak,
+ std::memory_order_seq_cst);
} else {
- return klass->template
- CasFieldWeakSequentiallyConsistent32</*kTransactionActive*/false>(offset,
- old_value,
- new_value);
+ return klass->template CasField32</*kTransactionActive*/false>(offset,
+ old_value,
+ new_value,
+ CASMode::kWeak,
+ std::memory_order_seq_cst);
}
}
diff --git a/runtime/subtype_check_test.cc b/runtime/subtype_check_test.cc
index e297d0b..979fa42 100644
--- a/runtime/subtype_check_test.cc
+++ b/runtime/subtype_check_test.cc
@@ -86,9 +86,11 @@
}
template <bool kTransactionActive>
- bool CasFieldWeakSequentiallyConsistent32(art::MemberOffset offset,
- int32_t old_value,
- int32_t new_value)
+ bool CasField32(art::MemberOffset offset,
+ int32_t old_value,
+ int32_t new_value,
+ CASMode mode ATTRIBUTE_UNUSED,
+ std::memory_order memory_order ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(offset);
if (old_value == GetField32Volatile(offset)) {