diff options
author | 2022-10-14 20:29:02 +0000 | |
---|---|---|
committer | 2022-10-21 22:43:37 +0000 | |
commit | a23d325152c7cd81ccb426a407f6da280797e61d (patch) | |
tree | c4c74bbbdeaeb97ebb622b5d8e4ca42589186ddf /runtime/base/mutex-inl.h | |
parent | 3e1b1f8ff913d2fb811e3fe5714552fc98000d15 (diff) |
Revert^4 "Thread suspension cleanup and deadlock fix"
This reverts commit ebd76406bf5fa74185998bc29f0f27c20fa2e683.
PS1 is identical to aosp/2216806.
PS2 in addition converts the RunCheckpoint call used from
StackUtil::GetAllStackTraces to RunCheckpointUnchecked to temporarily
work around another checkpoint Run() function lock ordering
issue.
PS3 is a nontrivial rebase.
Test: Build and boot AOSP, Treehugger
Bug: 240742796
Bug: 203363895
Bug: 238032384
Bug: 253671779
Change-Id: I38385e41392652cc30e5e74fd8b93e22088827a5
Diffstat (limited to 'runtime/base/mutex-inl.h')
-rw-r--r-- | runtime/base/mutex-inl.h | 74 |
1 files changed, 38 insertions, 36 deletions
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h index dba1e1299b..a8dc95a86b 100644 --- a/runtime/base/mutex-inl.h +++ b/runtime/base/mutex-inl.h @@ -60,43 +60,43 @@ static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALY // The check below enumerates the cases where we expect not to be able to check the validity of // locks on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock. // TODO: tighten this check. - if (kDebugLocking) { - CHECK(!Locks::IsSafeToCallAbortRacy() || - // Used during thread creation to avoid races with runtime shutdown. Thread::Current not - // yet established. - level == kRuntimeShutdownLock || - // Thread Ids are allocated/released before threads are established. - level == kAllocatedThreadIdsLock || - // Thread LDT's are initialized without Thread::Current established. - level == kModifyLdtLock || - // Threads are unregistered while holding the thread list lock, during this process they - // no longer exist and so we expect an unlock with no self. - level == kThreadListLock || - // Ignore logging which may or may not have set up thread data structures. - level == kLoggingLock || - // When transitioning from suspended to runnable, a daemon thread might be in - // a situation where the runtime is shutting down. To not crash our debug locking - // mechanism we just pass null Thread* to the MutexLock during that transition - // (see Thread::TransitionFromSuspendedToRunnable). - level == kThreadSuspendCountLock || - // Avoid recursive death. - level == kAbortLock || - // Locks at the absolute top of the stack can be locked at any time. - level == kTopLockLevel || - // The unexpected signal handler may be catching signals from any thread. - level == kUnexpectedSignalLock) << level; - } + CHECK(!Locks::IsSafeToCallAbortRacy() || + // Used during thread creation to avoid races with runtime shutdown. Thread::Current not + // yet established. + level == kRuntimeShutdownLock || + // Thread Ids are allocated/released before threads are established. + level == kAllocatedThreadIdsLock || + // Thread LDT's are initialized without Thread::Current established. + level == kModifyLdtLock || + // Threads are unregistered while holding the thread list lock, during this process they + // no longer exist and so we expect an unlock with no self. + level == kThreadListLock || + // Ignore logging which may or may not have set up thread data structures. + level == kLoggingLock || + // When transitioning from suspended to runnable, a daemon thread might be in + // a situation where the runtime is shutting down. To not crash our debug locking + // mechanism we just pass null Thread* to the MutexLock during that transition + // (see Thread::TransitionFromSuspendedToRunnable). + level == kThreadSuspendCountLock || + // Avoid recursive death. + level == kAbortLock || + // Locks at the absolute top of the stack can be locked at any time. + level == kTopLockLevel || + // The unexpected signal handler may be catching signals from any thread. + level == kUnexpectedSignalLock) << level; } -inline void BaseMutex::RegisterAsLocked(Thread* self) { +inline void BaseMutex::RegisterAsLocked(Thread* self, bool check) { if (UNLIKELY(self == nullptr)) { - CheckUnattachedThread(level_); - return; + if (check) { + CheckUnattachedThread(level_); + } + } else { + RegisterAsLockedImpl(self, level_, check); } - RegisterAsLockedImpl(self, level_); } -inline void BaseMutex::RegisterAsLockedImpl(Thread* self, LockLevel level) { +inline void BaseMutex::RegisterAsLockedImpl(Thread* self, LockLevel level, bool check) { DCHECK(self != nullptr); DCHECK_EQ(level_, level); // It would be nice to avoid this condition checking in the non-debug case, @@ -107,7 +107,7 @@ inline void BaseMutex::RegisterAsLockedImpl(Thread* self, LockLevel level) { if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitLock) != nullptr) { level = kThreadWaitWakeLock; } - if (kDebugLocking) { + if (check) { // Check if a bad Mutex of this level or lower is held. bool bad_mutexes_held = false; // Specifically allow a kTopLockLevel lock to be gained when the current thread holds the @@ -161,10 +161,12 @@ inline void BaseMutex::RegisterAsLockedImpl(Thread* self, LockLevel level) { inline void BaseMutex::RegisterAsUnlocked(Thread* self) { if (UNLIKELY(self == nullptr)) { - CheckUnattachedThread(level_); - return; + if (kDebugLocking) { + CheckUnattachedThread(level_); + } + } else { + RegisterAsUnlockedImpl(self , level_); } - RegisterAsUnlockedImpl(self , level_); } inline void BaseMutex::RegisterAsUnlockedImpl(Thread* self, LockLevel level) { @@ -306,7 +308,7 @@ inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) { } inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) { - RegisterAsLockedImpl(self, kMutatorLock); + RegisterAsLockedImpl(self, kMutatorLock, kDebugLocking); AssertSharedHeld(self); } |