diff options
| -rw-r--r-- | runtime/base/mutex-inl.h | 40 | ||||
| -rw-r--r-- | runtime/base/mutex.h | 8 | ||||
| -rw-r--r-- | runtime/monitor.cc | 208 | ||||
| -rw-r--r-- | runtime/monitor.h | 5 | ||||
| -rw-r--r-- | runtime/thread.cc | 2 |
5 files changed, 189 insertions, 74 deletions
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h index e775fe4505..5daead9901 100644 --- a/runtime/base/mutex-inl.h +++ b/runtime/base/mutex-inl.h @@ -91,6 +91,15 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) { CheckUnattachedThread(level_); return; } + LockLevel level = level_; + // It would be nice to avoid this condition checking in the non-debug case, + // but that would make the various methods that check if a mutex is held not + // work properly for thread wait locks. Since the vast majority of lock + // acquisitions are not thread wait locks, this check should not be too + // expensive. + if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitLock) != nullptr) { + level = kThreadWaitWakeLock; + } if (kDebugLocking) { // Check if a bad Mutex of this level or lower is held. bool bad_mutexes_held = false; @@ -98,13 +107,13 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) { // mutator_lock_ exclusive. This is because we suspending when holding locks at this level is // not allowed and if we hold the mutator_lock_ exclusive we must unsuspend stuff eventually // so there are no deadlocks. - if (level_ == kTopLockLevel && + if (level == kTopLockLevel && Locks::mutator_lock_->IsSharedHeld(self) && !Locks::mutator_lock_->IsExclusiveHeld(self)) { LOG(ERROR) << "Lock level violation: holding \"" << Locks::mutator_lock_->name_ << "\" " << "(level " << kMutatorLock << " - " << static_cast<int>(kMutatorLock) << ") non-exclusive while locking \"" << name_ << "\" " - << "(level " << level_ << " - " << static_cast<int>(level_) << ") a top level" + << "(level " << level << " - " << static_cast<int>(level) << ") a top level" << "mutex. This is not allowed."; bad_mutexes_held = true; } else if (this == Locks::mutator_lock_ && self->GetHeldMutex(kTopLockLevel) != nullptr) { @@ -113,10 +122,10 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) { << "not allowed."; bad_mutexes_held = true; } - for (int i = level_; i >= 0; --i) { + for (int i = level; i >= 0; --i) { LockLevel lock_level_i = static_cast<LockLevel>(i); BaseMutex* held_mutex = self->GetHeldMutex(lock_level_i); - if (level_ == kTopLockLevel && + if (level == kTopLockLevel && lock_level_i == kMutatorLock && Locks::mutator_lock_->IsExclusiveHeld(self)) { // This is checked above. @@ -125,7 +134,7 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) { LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" " << "(level " << lock_level_i << " - " << i << ") while locking \"" << name_ << "\" " - << "(level " << level_ << " - " << static_cast<int>(level_) << ")"; + << "(level " << level << " - " << static_cast<int>(level) << ")"; if (lock_level_i > kAbortLock) { // Only abort in the check below if this is more than abort level lock. bad_mutexes_held = true; @@ -138,8 +147,8 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) { } // Don't record monitors as they are outside the scope of analysis. They may be inspected off of // the monitor list. - if (level_ != kMonitorLock) { - self->SetHeldMutex(level_, this); + if (level != kMonitorLock) { + self->SetHeldMutex(level, this); } } @@ -149,10 +158,17 @@ inline void BaseMutex::RegisterAsUnlocked(Thread* self) { return; } if (level_ != kMonitorLock) { + auto level = level_; + if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitWakeLock) == this) { + level = kThreadWaitWakeLock; + } if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts. - CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_; + if (level == kThreadWaitWakeLock) { + CHECK(self->GetHeldMutex(kThreadWaitLock) != nullptr) << "Held " << kThreadWaitWakeLock << " without " << kThreadWaitLock;; + } + CHECK(self->GetHeldMutex(level) == this) << "Unlocking on unacquired mutex: " << name_; } - self->SetHeldMutex(level_, nullptr); + self->SetHeldMutex(level, nullptr); } } @@ -214,7 +230,11 @@ inline bool Mutex::IsExclusiveHeld(const Thread* self) const { if (kDebugLocking) { // Sanity debug check that if we think it is locked we have it in our held mutexes. if (result && self != nullptr && level_ != kMonitorLock && !gAborting) { - CHECK_EQ(self->GetHeldMutex(level_), this); + if (level_ == kThreadWaitLock && self->GetHeldMutex(kThreadWaitLock) != this) { + CHECK_EQ(self->GetHeldMutex(kThreadWaitWakeLock), this); + } else { + CHECK_EQ(self->GetHeldMutex(level_), this); + } } } return result; diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 7711be9c90..0c8fe58252 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -68,6 +68,14 @@ enum LockLevel : uint8_t { // A generic lock level for mutexs that should not allow any additional mutexes to be gained after // acquiring it. kGenericBottomLock, + // Tracks the second acquisition at the same lock level for kThreadWaitLock. This is an exception + // to the normal lock ordering, used to implement Monitor::Wait - while holding one kThreadWait + // level lock, it is permitted to acquire a second one - with internal safeguards to ensure that + // the second lock acquisition does not result in deadlock. This is implemented in the lock + // order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock + // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock. + kThreadWaitWakeLock, + kThreadWaitLock, kJdwpAdbStateLock, kJdwpSocketLock, kRegionSpaceRegionLock, diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 0f0a378142..0353efdfbc 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -97,6 +97,7 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_ lock_count_(0), obj_(GcRoot<mirror::Object>(obj)), wait_set_(nullptr), + wake_set_(nullptr), hash_code_(hash_code), locking_method_(nullptr), locking_dex_pc_(0), @@ -120,6 +121,7 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_ lock_count_(0), obj_(GcRoot<mirror::Object>(obj)), wait_set_(nullptr), + wake_set_(nullptr), hash_code_(hash_code), locking_method_(nullptr), locking_dex_pc_(0), @@ -226,7 +228,8 @@ Monitor::~Monitor() { } void Monitor::AppendToWaitSet(Thread* thread) { - DCHECK(owner_ == Thread::Current()); + // Not checking that the owner is equal to this thread, since we've released + // the monitor by the time this method is called. DCHECK(thread != nullptr); DCHECK(thread->GetWaitNext() == nullptr) << thread->GetWaitNext(); if (wait_set_ == nullptr) { @@ -245,24 +248,29 @@ void Monitor::AppendToWaitSet(Thread* thread) { void Monitor::RemoveFromWaitSet(Thread *thread) { DCHECK(owner_ == Thread::Current()); DCHECK(thread != nullptr); - if (wait_set_ == nullptr) { - return; - } - if (wait_set_ == thread) { - wait_set_ = thread->GetWaitNext(); - thread->SetWaitNext(nullptr); - return; - } - - Thread* t = wait_set_; - while (t->GetWaitNext() != nullptr) { - if (t->GetWaitNext() == thread) { - t->SetWaitNext(thread->GetWaitNext()); - thread->SetWaitNext(nullptr); - return; + auto remove = [&](Thread*& set){ + if (set != nullptr) { + if (set == thread) { + set = thread->GetWaitNext(); + thread->SetWaitNext(nullptr); + return true; + } + Thread* t = set; + while (t->GetWaitNext() != nullptr) { + if (t->GetWaitNext() == thread) { + t->SetWaitNext(thread->GetWaitNext()); + thread->SetWaitNext(nullptr); + return true; + } + t = t->GetWaitNext(); + } } - t = t->GetWaitNext(); + return false; + }; + if (remove(wait_set_)) { + return; } + remove(wake_set_); } void Monitor::SetObject(mirror::Object* object) { @@ -699,33 +707,102 @@ void Monitor::FailedUnlock(mirror::Object* o, bool Monitor::Unlock(Thread* self) { DCHECK(self != nullptr); uint32_t owner_thread_id = 0u; - { - MutexLock mu(self, monitor_lock_); - Thread* owner = owner_; - if (owner != nullptr) { - owner_thread_id = owner->GetThreadId(); - } - if (owner == self) { - // We own the monitor, so nobody else can be in here. - AtraceMonitorUnlock(); - if (lock_count_ == 0) { - owner_ = nullptr; - locking_method_ = nullptr; - locking_dex_pc_ = 0; - // Wake a contender. - monitor_contenders_.Signal(self); - } else { - --lock_count_; - } + DCHECK(!monitor_lock_.IsExclusiveHeld(self)); + monitor_lock_.Lock(self); + Thread* owner = owner_; + if (owner != nullptr) { + owner_thread_id = owner->GetThreadId(); + } + if (owner == self) { + // We own the monitor, so nobody else can be in here. + AtraceMonitorUnlock(); + if (lock_count_ == 0) { + owner_ = nullptr; + locking_method_ = nullptr; + locking_dex_pc_ = 0; + SignalContendersAndReleaseMonitorLock(self); + return true; + } else { + --lock_count_; + monitor_lock_.Unlock(self); return true; } } // We don't own this, so we're not allowed to unlock it. // The JNI spec says that we should throw IllegalMonitorStateException in this case. FailedUnlock(GetObject(), self->GetThreadId(), owner_thread_id, this); + monitor_lock_.Unlock(self); return false; } +void Monitor::SignalContendersAndReleaseMonitorLock(Thread* self) { + // We want to signal one thread to wake up, to acquire the monitor that + // we are releasing. This could either be a Thread waiting on its own + // ConditionVariable, or a thread waiting on monitor_contenders_. + while (true) { + Thread* thread = wake_set_; + if (thread == nullptr) { + break; + } else if (thread == self) { + // In the case of wait(), this will be invoked with self's GetWaitMutex held. + // On a second run through this while loop, we will have released and reacquired + // monitor_lock_, so it is possible that self has moved into wake_set. Since we + // don't want to signal ourselves before waiting or recursively acquire GetWaitMutex, + // skip ourself if we encounter it while traversing the wake set. + thread = self->GetWaitNext(); + if (thread == nullptr) { + break; + } + self->SetWaitNext(thread->GetWaitNext()); + } else { + wake_set_ = thread->GetWaitNext(); + } + thread->SetWaitNext(nullptr); + + // Release the lock, so that a potentially awakened thread will not + // immediately contend on it. + monitor_lock_.Unlock(self); + // Check to see if the thread is still waiting. + { + // In the case of wait(), we'll be acquiring another thread's GetWaitMutex with + // self's GetWaitMutex held. This does not risk deadlock, because we only acquire this lock + // for threads in the wake_set_. A thread can only enter wake_set_ from Notify or NotifyAll, + // and those acquire each thread's GetWaitMutex before moving them. Thus, the threads whose + // wait mutexes we acquire here must have already been released from wait(), so there is no + // risk of the following lock ordering leading to deadlock: + // Thread 1 waits + // Thread 2 waits + // While threads 1 and 2 have released both the monitor and the monitor_lock_, thread 3 calls + // notify() to wake them both up. + // Thread 1 enters this block, and attempts to acquire Thread 2's GetWaitMutex to wake it + // Thread 2 enters this block, and attempts to acquire Thread 1's GetWaitMutex to wake it + // + // Thanks to the lock checking in Notify and NotifyAll, no thread is observable in wake_set_ + // unless that thread has actually started waiting (and therefore will not subsequently + // acquire another thread's GetWaitMutex while holding its own). + MutexLock wait_mu(self, *thread->GetWaitMutex()); + if (thread->GetWaitMonitor() != nullptr) { + thread->GetWaitConditionVariable()->Signal(self); + return; + } + } + // Reacquire the lock for the next iteration + monitor_lock_.Lock(self); + // We had to reacquire the lock so that we can wake a contender, or look + // for another notified waiter thread, but if someone else has already acquired our + // monitor, there's no need to wake anybody else as they'll just contend + // with the current owner. + if (owner_ != nullptr) { + monitor_lock_.Unlock(self); + return; + } + } + // If we didn't wake any threads that were originally waiting on us, + // wake a contender. + monitor_contenders_.Signal(self); + monitor_lock_.Unlock(self); +} + void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow, ThreadState why) { DCHECK(self != nullptr); @@ -755,17 +832,9 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, } /* - * Add ourselves to the set of threads waiting on this monitor, and - * release our hold. We need to let it go even if we're a few levels + * Release our hold - we need to let it go even if we're a few levels * deep in a recursive lock, and we need to restore that later. - * - * We append to the wait set ahead of clearing the count and owner - * fields so the subroutine can check that the calling thread owns - * the monitor. Aside from that, the order of member updates is - * not order sensitive as we hold the pthread mutex. */ - AppendToWaitSet(self); - ++num_waiters_; int prev_lock_count = lock_count_; lock_count_ = 0; owner_ = nullptr; @@ -790,6 +859,17 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, // Pseudo-atomically wait on self's wait_cond_ and release the monitor lock. MutexLock mu(self, *self->GetWaitMutex()); + /* + * Add ourselves to the set of threads waiting on this monitor. + * It's important that we are only added to the wait set after + * acquiring our GetWaitMutex, so that calls to Notify() that occur after we + * have released monitor_lock_ will not move us from wait_set_ to wake_set_ + * until we've signalled contenders on this monitor. + */ + AppendToWaitSet(self); + ++num_waiters_; + + // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is // non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it // up. @@ -797,8 +877,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, self->SetWaitMonitor(this); // Release the monitor lock. - monitor_contenders_.Signal(self); - monitor_lock_.Unlock(self); + SignalContendersAndReleaseMonitorLock(self); // Handle the case where the thread was interrupted before we called wait(). if (self->IsInterrupted()) { @@ -874,18 +953,16 @@ void Monitor::Notify(Thread* self) { ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()"); return; } - // Signal the first waiting thread in the wait set. - while (wait_set_ != nullptr) { - Thread* thread = wait_set_; - wait_set_ = thread->GetWaitNext(); - thread->SetWaitNext(nullptr); - - // Check to see if the thread is still waiting. - MutexLock wait_mu(self, *thread->GetWaitMutex()); - if (thread->GetWaitMonitor() != nullptr) { - thread->GetWaitConditionVariable()->Signal(self); - return; - } + // Move one thread from waiters to wake set + Thread* to_move = wait_set_; + if (to_move != nullptr) { + // Acquiring the thread's wait mutex before moving it prevents us from moving a thread that + // has released monitor_lock_ in wait() but not yet tried to wake an entry in wake_set_. See + // comments in SignalContendersAndReleaseMonitorLock. + MutexLock wait(self, *to_move->GetWaitMutex()); + wait_set_ = to_move->GetWaitNext(); + to_move->SetWaitNext(wake_set_); + wake_set_ = to_move; } } @@ -897,12 +974,17 @@ void Monitor::NotifyAll(Thread* self) { ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()"); return; } - // Signal all threads in the wait set. + + // Move all threads from waiters to wake set while (wait_set_ != nullptr) { - Thread* thread = wait_set_; - wait_set_ = thread->GetWaitNext(); - thread->SetWaitNext(nullptr); - thread->Notify(); + Thread* to_move = wait_set_; + // Acquiring the thread's wait mutex before moving it prevents us from moving a thread that + // has released monitor_lock_ in wait() but not yet tried to wake an entry in wake_set_. See + // comments in SignalContendersAndReleaseMonitorLock. + MutexLock wait(self, *to_move->GetWaitMutex()); + wait_set_ = to_move->GetWaitNext(); + to_move->SetWaitNext(wake_set_); + wake_set_ = to_move; } } diff --git a/runtime/monitor.h b/runtime/monitor.h index 6b7604ec8a..c1f84e92bf 100644 --- a/runtime/monitor.h +++ b/runtime/monitor.h @@ -181,6 +181,8 @@ class Monitor { // this routine. void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_); + void SignalContendersAndReleaseMonitorLock(Thread* self) RELEASE(monitor_lock_); + // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The // calling thread must own the lock or the owner must be suspended. There's a race with other // threads inflating the lock, installing hash codes and spurious failures. The caller should @@ -306,6 +308,9 @@ class Monitor { // Threads currently waiting on this monitor. Thread* wait_set_ GUARDED_BY(monitor_lock_); + // Threads that were waiting on this monitor, but are now contending on it. + Thread* wake_set_ GUARDED_BY(monitor_lock_); + // Stored object hash code, generated lazily by GetHashCode. AtomicInteger hash_code_; diff --git a/runtime/thread.cc b/runtime/thread.cc index dda3b82612..cfab1f0bc9 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -2123,7 +2123,7 @@ Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), is_runtime_thread_(false) { - wait_mutex_ = new Mutex("a thread wait mutex"); + wait_mutex_ = new Mutex("a thread wait mutex", LockLevel::kThreadWaitLock); wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_); tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>; tlsPtr_.name = new std::string(kThreadNameDuringStartup); |