summaryrefslogtreecommitdiff
path: root/runtime/thread-inl.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/thread-inl.h')
-rw-r--r--runtime/thread-inl.h22
1 files changed, 17 insertions, 5 deletions
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 432453e311..431d66d7ff 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -83,7 +83,11 @@ inline void Thread::AllowThreadSuspension() {
inline void Thread::CheckSuspend(bool implicit) {
DCHECK_EQ(Thread::Current(), this);
while (true) {
- StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
+ // Memory_order_relaxed should be OK, since RunCheckpointFunction shares a lock with the
+ // requestor, and FullSuspendCheck() re-checks later. But we currently need memory_order_acquire
+ // for the empty checkpoint path.
+ // TODO (b/382722942): Revisit after we fix RunEmptyCheckpoint().
+ StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_acquire);
if (LIKELY(!state_and_flags.IsAnyOfFlagsSet(SuspendOrCheckpointRequestFlags()))) {
break;
} else if (state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest)) {
@@ -110,7 +114,8 @@ inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mu
Thread* self = Thread::Current();
DCHECK_EQ(self, this);
for (;;) {
- if (ReadFlag(ThreadFlag::kEmptyCheckpointRequest)) {
+ // TODO (b/382722942): Revisit memory ordering after we fix RunEmptyCheckpoint().
+ if (ReadFlag(ThreadFlag::kEmptyCheckpointRequest, std::memory_order_acquire)) {
RunEmptyCheckpoint();
// Check we hold only an expected mutex when accessing weak ref.
if (kIsDebugBuild) {
@@ -135,7 +140,8 @@ inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mu
inline void Thread::CheckEmptyCheckpointFromMutex() {
DCHECK_EQ(Thread::Current(), this);
for (;;) {
- if (ReadFlag(ThreadFlag::kEmptyCheckpointRequest)) {
+ // TODO (b/382722942): Revisit memory ordering after we fix RunEmptyCheckpoint().
+ if (ReadFlag(ThreadFlag::kEmptyCheckpointRequest, std::memory_order_acquire)) {
RunEmptyCheckpoint();
} else {
break;
@@ -234,7 +240,11 @@ inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) {
DCHECK_NE(new_state, ThreadState::kRunnable);
while (true) {
- StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
+ // memory_order_relaxed is OK for ordinary checkpoints, which enforce ordering via
+ // thread_suspend_count_lock_ . It is not currently OK for empty checkpoints.
+ // TODO (b/382722942): Consider changing back to memory_order_relaxed after fixing empty
+ // checkpoints.
+ StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_acquire);
DCHECK_EQ(old_state_and_flags.GetState(), ThreadState::kRunnable);
if (UNLIKELY(old_state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest))) {
IncrementStatsCounter(&checkpoint_count_);
@@ -265,6 +275,8 @@ inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state
inline void Thread::CheckActiveSuspendBarriers() {
DCHECK_NE(GetState(), ThreadState::kRunnable);
while (true) {
+ // memory_order_relaxed is OK here, since PassActiveSuspendBarriers() rechecks with
+ // thread_suspend_count_lock_ .
StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
if (LIKELY(!state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest) &&
!state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest) &&
@@ -540,7 +552,7 @@ inline void Thread::IncrementSuspendCount(Thread* self) {
}
inline void Thread::DecrementSuspendCount(Thread* self, bool for_user_code) {
- DCHECK(ReadFlag(ThreadFlag::kSuspendRequest));
+ DCHECK(ReadFlag(ThreadFlag::kSuspendRequest, std::memory_order_relaxed));
Locks::thread_suspend_count_lock_->AssertHeld(self);
if (UNLIKELY(tls32_.suspend_count <= 0)) {
UnsafeLogFatalForSuspendCount(self, this);