diff options
Diffstat (limited to 'runtime/base/mutex.h')
| -rw-r--r-- | runtime/base/mutex.h | 21 |
1 files changed, 13 insertions, 8 deletions
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index c59664b9cd..2414b5f937 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -62,10 +62,11 @@ enum LockLevel { kJdwpAdbStateLock, kJdwpSocketLock, kRegionSpaceRegionLock, + kMarkSweepMarkStackLock, kRosAllocGlobalLock, kRosAllocBracketLock, kRosAllocBulkFreeLock, - kMarkSweepMarkStackLock, + kTaggingLockLevel, kTransactionLogLock, kJniFunctionTableLock, kJniWeakGlobalsLock, @@ -516,12 +517,12 @@ class SCOPED_CAPABILITY MutexLock { // construction and releases it upon destruction. class SCOPED_CAPABILITY ReaderMutexLock { public: - ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) : + ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) ALWAYS_INLINE : self_(self), mu_(mu) { mu_.SharedLock(self_); } - ~ReaderMutexLock() RELEASE() { + ~ReaderMutexLock() RELEASE() ALWAYS_INLINE { mu_.SharedUnlock(self_); } @@ -583,6 +584,12 @@ class Locks { // Checks for whether it is safe to call Abort() without using locks. static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS; + // Add a mutex to expected_mutexes_on_weak_ref_access_. + static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true); + // Remove a mutex from expected_mutexes_on_weak_ref_access_. + static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true); + // Check if the given mutex is in expected_mutexes_on_weak_ref_access_. + static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex); // Guards allocation entrypoint instrumenting. static Mutex* instrument_entrypoints_lock_; @@ -630,12 +637,8 @@ class Locks { // Guards shutdown of the runtime. static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_); - static Mutex* jdwp_event_list_lock_ - ACQUIRED_AFTER(runtime_shutdown_lock_) - ACQUIRED_BEFORE(breakpoint_lock_); - // Guards background profiler global state. - static Mutex* profiler_lock_ ACQUIRED_AFTER(jdwp_event_list_lock_); + static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_); // Guards trace (ie traceview) requests. static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_); @@ -738,6 +741,8 @@ class Locks { // encounter an unexpected mutex on accessing weak refs, // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it. static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_; + static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_; + class ScopedExpectedMutexesOnWeakRefAccessLock; }; class Roles { |