diff options
author | 2017-09-19 16:37:30 +0000 | |
---|---|---|
committer | 2017-09-19 16:37:30 +0000 | |
commit | 4d159807a4854caa6396b708a38bbd6fa49d736f (patch) | |
tree | 25c2ea8ce12881cf26c933c89a0a3096d260de46 | |
parent | 08601a494e87dfba9b06f9fdea37c9342e4896d1 (diff) | |
parent | 0882af2e3ca253184b6ab56a8966a2f37407144e (diff) |
Merge "Shrink ART Mutex exclusive_owner_ field to Atomic<pid_t>"
-rw-r--r-- | runtime/base/mutex-inl.h | 24 | ||||
-rw-r--r-- | runtime/base/mutex.cc | 51 | ||||
-rw-r--r-- | runtime/base/mutex.h | 16 | ||||
-rw-r--r-- | runtime/thread.cc | 6 | ||||
-rw-r--r-- | runtime/thread.h | 2 |
5 files changed, 52 insertions, 47 deletions
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h index 0ac2399a5d..c9d48ff7f7 100644 --- a/runtime/base/mutex-inl.h +++ b/runtime/base/mutex-inl.h @@ -44,11 +44,15 @@ static inline int futex(volatile int *uaddr, int op, int val, const struct times } #endif // ART_USE_FUTEXES -static inline uint64_t SafeGetTid(const Thread* self) { +// The following isn't strictly necessary, but we want updates on Atomic<pid_t> to be lock-free. +// TODO: Use std::atomic::is_always_lock_free after switching to C++17 atomics. +static_assert(sizeof(pid_t) <= sizeof(int32_t), "pid_t should fit in 32 bits"); + +static inline pid_t SafeGetTid(const Thread* self) { if (self != nullptr) { - return static_cast<uint64_t>(self->GetTid()); + return self->GetTid(); } else { - return static_cast<uint64_t>(GetTid()); + return GetTid(); } } @@ -142,14 +146,14 @@ inline void ReaderWriterMutex::SharedLock(Thread* self) { #else CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_)); #endif - DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U); + DCHECK(GetExclusiveOwnerTid() == 0 || GetExclusiveOwnerTid() == -1); RegisterAsLocked(self); AssertSharedHeld(self); } inline void ReaderWriterMutex::SharedUnlock(Thread* self) { DCHECK(self == nullptr || self == Thread::Current()); - DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U); + DCHECK(GetExclusiveOwnerTid() == 0 || GetExclusiveOwnerTid() == -1); AssertSharedHeld(self); RegisterAsUnlocked(self); #if ART_USE_FUTEXES @@ -190,8 +194,8 @@ inline bool Mutex::IsExclusiveHeld(const Thread* self) const { return result; } -inline uint64_t Mutex::GetExclusiveOwnerTid() const { - return exclusive_owner_; +inline pid_t Mutex::GetExclusiveOwnerTid() const { + return exclusive_owner_.LoadRelaxed(); } inline void Mutex::AssertExclusiveHeld(const Thread* self) const { @@ -216,7 +220,7 @@ inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const { return result; } -inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const { +inline pid_t ReaderWriterMutex::GetExclusiveOwnerTid() const { #if ART_USE_FUTEXES int32_t state = state_.LoadRelaxed(); if (state == 0) { @@ -224,10 +228,10 @@ inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const { } else if (state > 0) { return -1; // Shared. } else { - return exclusive_owner_; + return exclusive_owner_.LoadRelaxed(); } #else - return exclusive_owner_; + return exclusive_owner_.LoadRelaxed(); #endif } diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index 6392198bdd..d1ea13ab18 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -361,14 +361,13 @@ void BaseMutex::DumpContention(std::ostream& os) const { Mutex::Mutex(const char* name, LockLevel level, bool recursive) - : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) { + : BaseMutex(name, level), exclusive_owner_(0), recursive_(recursive), recursion_count_(0) { #if ART_USE_FUTEXES DCHECK_EQ(0, state_.LoadRelaxed()); DCHECK_EQ(0, num_contenders_.LoadRelaxed()); #else CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr)); #endif - exclusive_owner_ = 0; } // Helper to allow checking shutdown while locking for thread safety. @@ -382,9 +381,9 @@ Mutex::~Mutex() { #if ART_USE_FUTEXES if (state_.LoadRelaxed() != 0) { LOG(safe_to_call_abort ? FATAL : WARNING) - << "destroying mutex with owner: " << exclusive_owner_; + << "destroying mutex with owner: " << GetExclusiveOwnerTid(); } else { - if (exclusive_owner_ != 0) { + if (GetExclusiveOwnerTid() != 0) { LOG(safe_to_call_abort ? FATAL : WARNING) << "unexpectedly found an owner on unlocked mutex " << name_; } @@ -439,8 +438,8 @@ void Mutex::ExclusiveLock(Thread* self) { #else CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_)); #endif - DCHECK_EQ(exclusive_owner_, 0U); - exclusive_owner_ = SafeGetTid(self); + DCHECK_EQ(GetExclusiveOwnerTid(), 0); + exclusive_owner_.StoreRelaxed(SafeGetTid(self)); RegisterAsLocked(self); } recursion_count_++; @@ -479,8 +478,8 @@ bool Mutex::ExclusiveTryLock(Thread* self) { PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; } #endif - DCHECK_EQ(exclusive_owner_, 0U); - exclusive_owner_ = SafeGetTid(self); + DCHECK_EQ(GetExclusiveOwnerTid(), 0); + exclusive_owner_.StoreRelaxed(SafeGetTid(self)); RegisterAsLocked(self); } recursion_count_++; @@ -506,7 +505,7 @@ void Mutex::ExclusiveUnlock(Thread* self) { << " Thread::Current()=" << name2; } AssertHeld(self); - DCHECK_NE(exclusive_owner_, 0U); + DCHECK_NE(GetExclusiveOwnerTid(), 0); recursion_count_--; if (!recursive_ || recursion_count_ == 0) { if (kDebugLocking) { @@ -520,9 +519,9 @@ void Mutex::ExclusiveUnlock(Thread* self) { int32_t cur_state = state_.LoadRelaxed(); if (LIKELY(cur_state == 1)) { // We're no longer the owner. - exclusive_owner_ = 0; + exclusive_owner_.StoreRelaxed(0); // Change state to 0 and impose load/store ordering appropriate for lock release. - // Note, the relaxed loads below musn't reorder before the CompareExchange. + // Note, the relaxed loads below mustn't reorder before the CompareExchange. // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing // a status bit into the state on contention. done = state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */); @@ -547,7 +546,7 @@ void Mutex::ExclusiveUnlock(Thread* self) { } } while (!done); #else - exclusive_owner_ = 0; + exclusive_owner_.StoreRelaxed(0); CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); #endif } @@ -588,13 +587,13 @@ ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) #if !ART_USE_FUTEXES CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr)); #endif - exclusive_owner_ = 0; + exclusive_owner_.StoreRelaxed(0); } ReaderWriterMutex::~ReaderWriterMutex() { #if ART_USE_FUTEXES CHECK_EQ(state_.LoadRelaxed(), 0); - CHECK_EQ(exclusive_owner_, 0U); + CHECK_EQ(GetExclusiveOwnerTid(), 0); CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0); CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0); #else @@ -640,8 +639,8 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) { #else CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_)); #endif - DCHECK_EQ(exclusive_owner_, 0U); - exclusive_owner_ = SafeGetTid(self); + DCHECK_EQ(GetExclusiveOwnerTid(), 0); + exclusive_owner_.StoreRelaxed(SafeGetTid(self)); RegisterAsLocked(self); AssertExclusiveHeld(self); } @@ -650,14 +649,14 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { DCHECK(self == nullptr || self == Thread::Current()); AssertExclusiveHeld(self); RegisterAsUnlocked(self); - DCHECK_NE(exclusive_owner_, 0U); + DCHECK_NE(GetExclusiveOwnerTid(), 0); #if ART_USE_FUTEXES bool done = false; do { int32_t cur_state = state_.LoadRelaxed(); if (LIKELY(cur_state == -1)) { // We're no longer the owner. - exclusive_owner_ = 0; + exclusive_owner_.StoreRelaxed(0); // Change state from -1 to 0 and impose load/store ordering appropriate for lock release. // Note, the relaxed loads below musn't reorder before the CompareExchange. // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing @@ -675,7 +674,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { } } while (!done); #else - exclusive_owner_ = 0; + exclusive_owner_.StoreRelaxed(0); CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); #endif } @@ -731,7 +730,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32 PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_; } #endif - exclusive_owner_ = SafeGetTid(self); + exclusive_owner_.StoreRelaxed(SafeGetTid(self)); RegisterAsLocked(self); AssertSharedHeld(self); return true; @@ -955,11 +954,11 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) { CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0); guard_.num_contenders_--; #else - uint64_t old_owner = guard_.exclusive_owner_; - guard_.exclusive_owner_ = 0; + pid_t old_owner = guard_.GetExclusiveOwnerTid(); + guard_.exclusive_owner.StoreRelaxed(0); guard_.recursion_count_ = 0; CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_)); - guard_.exclusive_owner_ = old_owner; + guard_.exclusive_owner_.StoreRelaxed(old_owner); #endif guard_.recursion_count_ = old_recursion_count; } @@ -1001,8 +1000,8 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { #else int clock = CLOCK_REALTIME; #endif - uint64_t old_owner = guard_.exclusive_owner_; - guard_.exclusive_owner_ = 0; + pid_t old_owner = guard_.GetExclusiveOwnerTid(); + guard_.exclusive_owner_.StoreRelaxed(0); guard_.recursion_count_ = 0; timespec ts; InitTimeSpec(true, clock, ms, ns, &ts); @@ -1013,7 +1012,7 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { errno = rc; PLOG(FATAL) << "TimedWait failed for " << name_; } - guard_.exclusive_owner_ = old_owner; + guard_.exclusive_owner_.StoreRelaxed(old_owner); #endif guard_.recursion_count_ = old_recursion_count; return timed_out; diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 7a472e741b..189c0d0030 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -19,6 +19,7 @@ #include <pthread.h> #include <stdint.h> +#include <unistd.h> // for pid_t #include <iosfwd> #include <string> @@ -263,7 +264,7 @@ class LOCKABLE Mutex : public BaseMutex { // Id associated with exclusive owner. No memory ordering semantics if called from a thread other // than the owner. - uint64_t GetExclusiveOwnerTid() const; + pid_t GetExclusiveOwnerTid() const; // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld. unsigned int GetDepth() const { @@ -282,12 +283,12 @@ class LOCKABLE Mutex : public BaseMutex { // 0 is unheld, 1 is held. AtomicInteger state_; // Exclusive owner. - volatile uint64_t exclusive_owner_; + Atomic<pid_t> exclusive_owner_; // Number of waiting contenders. AtomicInteger num_contenders_; #else pthread_mutex_t mutex_; - volatile uint64_t exclusive_owner_; // Guarded by mutex_. + Atomic<pid_t> exclusive_owner_; // Guarded by mutex_. Asynchronous reads are OK. #endif const bool recursive_; // Can the lock be recursively held? unsigned int recursion_count_; @@ -385,8 +386,9 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex { } // Id associated with exclusive owner. No memory ordering semantics if called from a thread other - // than the owner. - uint64_t GetExclusiveOwnerTid() const; + // than the owner. Returns 0 if the lock is not held. Returns either 0 or -1 if it is held by + // one or more readers. + pid_t GetExclusiveOwnerTid() const; virtual void Dump(std::ostream& os) const; @@ -403,14 +405,14 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex { // -1 implies held exclusive, +ve shared held by state_ many owners. AtomicInteger state_; // Exclusive owner. Modification guarded by this mutex. - volatile uint64_t exclusive_owner_; + Atomic<pid_t> exclusive_owner_; // Number of contenders waiting for a reader share. AtomicInteger num_pending_readers_; // Number of contenders waiting to be the writer. AtomicInteger num_pending_writers_; #else pthread_rwlock_t rwlock_; - volatile uint64_t exclusive_owner_; // Guarded by rwlock_. + Atomic<pid_t> exclusive_owner_; // Writes guarded by rwlock_. Asynchronous reads are OK. #endif DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex); }; diff --git a/runtime/thread.cc b/runtime/thread.cc index 6e5594d620..968a23b926 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1737,7 +1737,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { os << " \"" << mutex->GetName() << "\""; if (mutex->IsReaderWriterMutex()) { ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex); - if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) { + if (rw_mutex->GetExclusiveOwnerTid() == tid) { os << "(exclusive held)"; } else { os << "(shared held)"; @@ -2292,7 +2292,7 @@ bool Thread::HandleScopeContains(jobject obj) const { return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry); } -void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) { +void Thread::HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id) { BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( visitor, RootInfo(kRootNativeStack, thread_id)); for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) { @@ -3458,7 +3458,7 @@ class RootCallbackVisitor { template <bool kPrecise> void Thread::VisitRoots(RootVisitor* visitor) { - const uint32_t thread_id = GetThreadId(); + const pid_t thread_id = GetThreadId(); visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id)); if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) { visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), diff --git a/runtime/thread.h b/runtime/thread.h index 94c0af2e6f..2e4a3da348 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -839,7 +839,7 @@ class Thread { // Is the given obj in this thread's stack indirect reference table? bool HandleScopeContains(jobject obj) const; - void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) + void HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id) REQUIRES_SHARED(Locks::mutator_lock_); BaseHandleScope* GetTopHandleScope() { |