Missed use of android_atomic and thread state_.
Move to using art::Atomic, add necessary FetchAnd... operations to art::Atomic.
Change-Id: I32f1cdc4e0a2037b73f459bf4bb4d544f357f41b
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 5cfdbcc..c2abf56 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -343,6 +343,14 @@
return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value.
}
+ T FetchAndOrSequentiallyConsistent(const T value) {
+ return this->fetch_or(value, std::memory_order_seq_cst); // Return old_value.
+ }
+
+ T FetchAndAndSequentiallyConsistent(const T value) {
+ return this->fetch_and(value, std::memory_order_seq_cst); // Return old_value.
+ }
+
volatile T* Address() {
return reinterpret_cast<T*>(this);
}
@@ -521,6 +529,30 @@
}
}
+ T FetchAndOrSequentiallyConsistent(const T value) {
+ if (sizeof(T) <= 4) {
+ return __sync_fetch_and_or(&value_, value); // Return old value.
+ } else {
+ T expected;
+ do {
+ expected = LoadRelaxed();
+ } while (!CompareExchangeWeakSequentiallyConsistent(expected, expected | value));
+ return expected;
+ }
+ }
+
+ T FetchAndAndSequentiallyConsistent(const T value) {
+ if (sizeof(T) <= 4) {
+ return __sync_fetch_and_and(&value_, value); // Return old value.
+ } else {
+ T expected;
+ do {
+ expected = LoadRelaxed();
+ } while (!CompareExchangeWeakSequentiallyConsistent(expected, expected & value));
+ return expected;
+ }
+ }
+
T operator++() { // Prefix operator.
if (sizeof(T) <= 4) {
return __sync_add_and_fetch(&value_, 1); // Return new value.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 4985583..7827dfb 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -589,14 +589,6 @@
#endif
}
-void Thread::AtomicSetFlag(ThreadFlag flag) {
- android_atomic_or(flag, &tls32_.state_and_flags.as_int);
-}
-
-void Thread::AtomicClearFlag(ThreadFlag flag) {
- android_atomic_and(-1 ^ flag, &tls32_.state_and_flags.as_int);
-}
-
// Attempt to rectify locks so that we dump thread list with required locks before exiting.
static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
LOG(ERROR) << *thread << " suspend count already zero.";
@@ -702,9 +694,10 @@
union StateAndFlags new_state_and_flags;
new_state_and_flags.as_int = old_state_and_flags.as_int;
new_state_and_flags.as_struct.flags |= kCheckpointRequest;
- int succeeded = android_atomic_acquire_cas(old_state_and_flags.as_int, new_state_and_flags.as_int,
- &tls32_.state_and_flags.as_int);
- if (UNLIKELY(succeeded != 0)) {
+ bool success =
+ tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(old_state_and_flags.as_int,
+ new_state_and_flags.as_int);
+ if (UNLIKELY(!success)) {
// The thread changed state before the checkpoint was installed.
CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
tlsPtr_.checkpoint_functions[available_checkpoint] = nullptr;
@@ -712,7 +705,7 @@
CHECK_EQ(ReadFlag(kCheckpointRequest), true);
TriggerSuspend();
}
- return succeeded == 0;
+ return success;
}
void Thread::FullSuspendCheck() {
diff --git a/runtime/thread.h b/runtime/thread.h
index 0640b38..4312741 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -739,9 +739,13 @@
return (tls32_.state_and_flags.as_struct.flags != 0);
}
- void AtomicSetFlag(ThreadFlag flag);
+ void AtomicSetFlag(ThreadFlag flag) {
+ tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
+ }
- void AtomicClearFlag(ThreadFlag flag);
+ void AtomicClearFlag(ThreadFlag flag) {
+ tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
+ }
void ResetQuickAllocEntryPointsForThread();