diff options
Diffstat (limited to 'runtime/base')
-rw-r--r-- | runtime/base/locks.h | 11 | ||||
-rw-r--r-- | runtime/base/mutex-inl.h | 75 | ||||
-rw-r--r-- | runtime/base/mutex.cc | 28 | ||||
-rw-r--r-- | runtime/base/mutex.h | 28 |
4 files changed, 59 insertions, 83 deletions
diff --git a/runtime/base/locks.h b/runtime/base/locks.h index e8c83fe2b2..c15e5dee71 100644 --- a/runtime/base/locks.h +++ b/runtime/base/locks.h @@ -108,6 +108,10 @@ enum LockLevel : uint8_t { kClassLinkerClassesLock, // TODO rename. kSubtypeCheckLock, kBreakpointLock, + // This is a generic lock level for a lock meant to be gained after having a + // monitor lock. + kPostMonitorLock, + kMonitorLock, kMonitorListLock, kThreadListLock, kAllocTrackerLock, @@ -121,10 +125,7 @@ enum LockLevel : uint8_t { kRuntimeShutdownLock, kTraceLock, kHeapBitmapLock, - // This is a generic lock level for a lock meant to be gained after having a - // monitor lock. - kPostMonitorLock, - kMonitorLock, + // This is a generic lock level for a top-level lock meant to be gained after having the // mutator_lock_. kPostMutatorTopLockLevel, @@ -137,7 +138,7 @@ enum LockLevel : uint8_t { kUserCodeSuspensionLock, kZygoteCreationLock, - // The highest valid lock level. Use this for locks that should only be acquired with no + // The highest valid lock level. Use this if there is code that should only be called with no // other locks held. Since this is the highest lock level we also allow it to be held even if the // runtime or current thread is not fully set-up yet (for example during thread attach). Note that // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h index 712b61d4ac..dba1e1299b 100644 --- a/runtime/base/mutex-inl.h +++ b/runtime/base/mutex-inl.h @@ -60,44 +60,43 @@ static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALY // The check below enumerates the cases where we expect not to be able to check the validity of // locks on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock. // TODO: tighten this check. - CHECK(!Locks::IsSafeToCallAbortRacy() || - // Used during thread creation to avoid races with runtime shutdown. Thread::Current not - // yet established. - level == kRuntimeShutdownLock || - // Thread Ids are allocated/released before threads are established. - level == kAllocatedThreadIdsLock || - // Thread LDT's are initialized without Thread::Current established. - level == kModifyLdtLock || - // Threads are unregistered while holding the thread list lock, during this process they - // no longer exist and so we expect an unlock with no self. - level == kThreadListLock || - // Ignore logging which may or may not have set up thread data structures. - level == kLoggingLock || - // When transitioning from suspended to runnable, a daemon thread might be in - // a situation where the runtime is shutting down. To not crash our debug locking - // mechanism we just pass null Thread* to the MutexLock during that transition - // (see Thread::TransitionFromSuspendedToRunnable). - level == kThreadSuspendCountLock || - // Avoid recursive death. - level == kAbortLock || - // Locks at the absolute top of the stack can be locked at any time. - level == kTopLockLevel || - // The unexpected signal handler may be catching signals from any thread. - level == kUnexpectedSignalLock) - << level; + if (kDebugLocking) { + CHECK(!Locks::IsSafeToCallAbortRacy() || + // Used during thread creation to avoid races with runtime shutdown. Thread::Current not + // yet established. + level == kRuntimeShutdownLock || + // Thread Ids are allocated/released before threads are established. + level == kAllocatedThreadIdsLock || + // Thread LDT's are initialized without Thread::Current established. + level == kModifyLdtLock || + // Threads are unregistered while holding the thread list lock, during this process they + // no longer exist and so we expect an unlock with no self. + level == kThreadListLock || + // Ignore logging which may or may not have set up thread data structures. + level == kLoggingLock || + // When transitioning from suspended to runnable, a daemon thread might be in + // a situation where the runtime is shutting down. To not crash our debug locking + // mechanism we just pass null Thread* to the MutexLock during that transition + // (see Thread::TransitionFromSuspendedToRunnable). + level == kThreadSuspendCountLock || + // Avoid recursive death. + level == kAbortLock || + // Locks at the absolute top of the stack can be locked at any time. + level == kTopLockLevel || + // The unexpected signal handler may be catching signals from any thread. + level == kUnexpectedSignalLock) << level; + } } -inline void BaseMutex::RegisterAsLocked(Thread* self, bool check) { +inline void BaseMutex::RegisterAsLocked(Thread* self) { if (UNLIKELY(self == nullptr)) { - if (check) { - CheckUnattachedThread(level_); - } - } else { - RegisterAsLockedImpl(self, level_, check); + CheckUnattachedThread(level_); + return; } + RegisterAsLockedImpl(self, level_); } -inline void BaseMutex::RegisterAsLockedImpl(Thread* self, LockLevel level, bool check) { +inline void BaseMutex::RegisterAsLockedImpl(Thread* self, LockLevel level) { DCHECK(self != nullptr); DCHECK_EQ(level_, level); // It would be nice to avoid this condition checking in the non-debug case, @@ -108,7 +107,7 @@ inline void BaseMutex::RegisterAsLockedImpl(Thread* self, LockLevel level, bool if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitLock) != nullptr) { level = kThreadWaitWakeLock; } - if (check) { + if (kDebugLocking) { // Check if a bad Mutex of this level or lower is held. bool bad_mutexes_held = false; // Specifically allow a kTopLockLevel lock to be gained when the current thread holds the @@ -162,12 +161,10 @@ inline void BaseMutex::RegisterAsLockedImpl(Thread* self, LockLevel level, bool inline void BaseMutex::RegisterAsUnlocked(Thread* self) { if (UNLIKELY(self == nullptr)) { - if (kDebugLocking) { - CheckUnattachedThread(level_); - } - } else { - RegisterAsUnlockedImpl(self, level_); + CheckUnattachedThread(level_); + return; } + RegisterAsUnlockedImpl(self , level_); } inline void BaseMutex::RegisterAsUnlockedImpl(Thread* self, LockLevel level) { @@ -309,7 +306,7 @@ inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) { } inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) { - RegisterAsLockedImpl(self, kMutatorLock, kDebugLocking); + RegisterAsLockedImpl(self, kMutatorLock); AssertSharedHeld(self); } diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index 5b74bee279..728dc842c2 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -246,12 +246,11 @@ void BaseMutex::DumpAll(std::ostream& os) { } void BaseMutex::CheckSafeToWait(Thread* self) { - if (!kDebugLocking) { - return; - } if (self == nullptr) { CheckUnattachedThread(level_); - } else { + return; + } + if (kDebugLocking) { CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock) << "Waiting on unacquired mutex: " << name_; bool bad_mutexes_held = false; @@ -571,7 +570,6 @@ bool Mutex::IsDumpFrequent(Thread* thread, uint64_t try_times) { } } -template <bool kCheck> bool Mutex::ExclusiveTryLock(Thread* self) { DCHECK(self == nullptr || self == Thread::Current()); if (kDebugLocking && !recursive_) { @@ -602,7 +600,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) { #endif DCHECK_EQ(GetExclusiveOwnerTid(), 0); exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); - RegisterAsLocked(self, kCheck); + RegisterAsLocked(self); } recursion_count_++; if (kDebugLocking) { @@ -613,9 +611,6 @@ bool Mutex::ExclusiveTryLock(Thread* self) { return true; } -template bool Mutex::ExclusiveTryLock<false>(Thread* self); -template bool Mutex::ExclusiveTryLock<true>(Thread* self); - bool Mutex::ExclusiveTryLockWithSpinning(Thread* self) { // Spin a small number of times, since this affects our ability to respond to suspension // requests. We spin repeatedly only if the mutex repeatedly becomes available and unavailable @@ -722,12 +717,11 @@ void Mutex::ExclusiveUnlock(Thread* self) { } void Mutex::Dump(std::ostream& os) const { - os << (recursive_ ? "recursive " : "non-recursive ") << name_ - << " level=" << static_cast<int>(level_) << " rec=" << recursion_count_ -#if ART_USE_FUTEXES - << " state_and_contenders = " << std::hex << state_and_contenders_ << std::dec -#endif - << " owner=" << GetExclusiveOwnerTid() << " "; + os << (recursive_ ? "recursive " : "non-recursive ") + << name_ + << " level=" << static_cast<int>(level_) + << " rec=" << recursion_count_ + << " owner=" << GetExclusiveOwnerTid() << " "; DumpContention(os); } @@ -929,7 +923,7 @@ void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_sta } #endif -bool ReaderWriterMutex::SharedTryLock(Thread* self, bool check) { +bool ReaderWriterMutex::SharedTryLock(Thread* self) { DCHECK(self == nullptr || self == Thread::Current()); #if ART_USE_FUTEXES bool done = false; @@ -953,7 +947,7 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self, bool check) { PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; } #endif - RegisterAsLocked(self, check); + RegisterAsLocked(self); AssertSharedHeld(self); return true; } diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index dc9b885bf7..87e9525557 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -103,11 +103,10 @@ class BaseMutex { BaseMutex(const char* name, LockLevel level); virtual ~BaseMutex(); - // Add this mutex to those owned by self, and optionally perform lock order checking. Caller - // may wish to disable checking for trylock calls that cannot result in deadlock. For this call - // only, self may also be another suspended thread. - void RegisterAsLocked(Thread* self, bool check = kDebugLocking); - void RegisterAsLockedImpl(Thread* self, LockLevel level, bool check); + // Add this mutex to those owned by self, and perform appropriate checking. + // For this call only, self may also be another suspended thread. + void RegisterAsLocked(Thread* self); + void RegisterAsLockedImpl(Thread* self, LockLevel level); void RegisterAsUnlocked(Thread* self); void RegisterAsUnlockedImpl(Thread* self, LockLevel level); @@ -184,10 +183,7 @@ class LOCKABLE Mutex : public BaseMutex { void ExclusiveLock(Thread* self) ACQUIRE(); void Lock(Thread* self) ACQUIRE() { ExclusiveLock(self); } - // Returns true if acquires exclusive access, false otherwise. The `check` argument specifies - // whether lock level checking should be performed. Should be defaulted unless we are using - // TryLock instead of Lock for deadlock avoidance. - template <bool kCheck = kDebugLocking> + // Returns true if acquires exclusive access, false otherwise. bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true); bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); } // Equivalent to ExclusiveTryLock, but retry for a short period before giving up. @@ -346,7 +342,7 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex { void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); } // Try to acquire share of ReaderWriterMutex. - bool SharedTryLock(Thread* self, bool check = kDebugLocking) SHARED_TRYLOCK_FUNCTION(true); + bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true); // Release a share of the access. void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE; @@ -524,18 +520,6 @@ class SCOPED_CAPABILITY MutexLock { DISALLOW_COPY_AND_ASSIGN(MutexLock); }; -// Pretend to acquire a mutex for checking purposes, without actually doing so. Use with -// extreme caution when it is known the condition that the mutex would guard against cannot arise. -class SCOPED_CAPABILITY FakeMutexLock { - public: - explicit FakeMutexLock(Mutex& mu) ACQUIRE(mu) NO_THREAD_SAFETY_ANALYSIS {} - - ~FakeMutexLock() RELEASE() NO_THREAD_SAFETY_ANALYSIS {} - - private: - DISALLOW_COPY_AND_ASSIGN(FakeMutexLock); -}; - // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon // construction and releases it upon destruction. class SCOPED_CAPABILITY ReaderMutexLock { |