summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Elliott Hughes <enh@google.com> 2011-11-07 14:40:00 -0800
committer Elliott Hughes <enh@google.com> 2011-11-07 14:40:00 -0800
commitbbd9d830fe0eb8ce44405d7504dcf9a6fe91ffa1 (patch)
tree211747a79d87a2412c156e42713eea4638bbc340
parentc2f8006ede3d07ca53467fa73573bdbfb864a65e (diff)
Fix at least two deadlocks.
Pretty much every caller that takes the thread list lock can then go on to cause allocation, which requires the heap lock. The GC always takes the heap lock first and the thread list lock second (to suspend/resume other threads). Cue deadlocks. This patch is a pretty degenerate fix that basically makes the thread list lock irrelevant; we now always take the heap lock first. Change-Id: I0537cffb0b841bfb5033789817793734d75dfb31
-rw-r--r--src/dalvik_system_VMStack.cc2
-rw-r--r--src/debugger.cc2
-rw-r--r--src/java_lang_Thread.cc12
-rw-r--r--src/mutex.cc19
-rw-r--r--src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc4
-rw-r--r--src/thread_list.cc82
-rw-r--r--src/thread_list.h28
7 files changed, 67 insertions, 82 deletions
diff --git a/src/dalvik_system_VMStack.cc b/src/dalvik_system_VMStack.cc
index 5a242f963f..d87bc2b7b8 100644
--- a/src/dalvik_system_VMStack.cc
+++ b/src/dalvik_system_VMStack.cc
@@ -26,7 +26,7 @@ namespace art {
namespace {
static jobject GetThreadStack(JNIEnv* env, jobject javaThread) {
- ThreadListLock thread_list_lock;
+ ScopedThreadListLock thread_list_lock;
Thread* thread = Thread::FromManagedThread(env, javaThread);
return (thread != NULL) ? GetThreadStack(env, thread) : NULL;
}
diff --git a/src/debugger.cc b/src/debugger.cc
index 9f4bc4dee4..828ee4a873 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -833,7 +833,7 @@ void Dbg::DdmSetThreadNotification(bool enable) {
// We lock the thread list to avoid sending duplicate events or missing
// a thread change. We should be okay holding this lock while sending
// the messages out. (We have to hold it while accessing a live thread.)
- ThreadListLock lock;
+ ScopedThreadListLock thread_list_lock;
gDdmThreadNotification = enable;
if (enable) {
diff --git a/src/java_lang_Thread.cc b/src/java_lang_Thread.cc
index 891238ef79..9031471ae6 100644
--- a/src/java_lang_Thread.cc
+++ b/src/java_lang_Thread.cc
@@ -35,7 +35,7 @@ jboolean Thread_interrupted(JNIEnv* env, jclass) {
}
jboolean Thread_isInterrupted(JNIEnv* env, jobject javaThread) {
- ThreadListLock lock;
+ ScopedThreadListLock thread_list_lock;
Thread* thread = Thread::FromManagedThread(env, javaThread);
return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE;
}
@@ -46,7 +46,7 @@ void Thread_nativeCreate(JNIEnv* env, jclass, jobject javaThread, jlong stackSiz
}
jint Thread_nativeGetStatus(JNIEnv* env, jobject javaThread) {
- ThreadListLock lock;
+ ScopedThreadListLock thread_list_lock;
Thread* thread = Thread::FromManagedThread(env, javaThread);
if (thread == NULL) {
return -1;
@@ -61,13 +61,13 @@ jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject javaThread, jobject javaObj
Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "object == null");
return JNI_FALSE;
}
- ThreadListLock lock;
+ ScopedThreadListLock thread_list_lock;
Thread* thread = Thread::FromManagedThread(env, javaThread);
return thread->HoldsLock(object);
}
void Thread_nativeInterrupt(JNIEnv* env, jobject javaThread) {
- ThreadListLock lock;
+ ScopedThreadListLock thread_list_lock;
Thread* thread = Thread::FromManagedThread(env, javaThread);
if (thread != NULL) {
thread->Interrupt();
@@ -75,7 +75,7 @@ void Thread_nativeInterrupt(JNIEnv* env, jobject javaThread) {
}
void Thread_nativeSetName(JNIEnv* env, jobject javaThread, jstring javaName) {
- ThreadListLock lock;
+ ScopedThreadListLock thread_list_lock;
Thread* thread = Thread::FromManagedThread(env, javaThread);
if (thread != NULL) {
ScopedUtfChars name(env, javaName);
@@ -96,7 +96,7 @@ void Thread_nativeSetName(JNIEnv* env, jobject javaThread, jstring javaName) {
* threads at Thread.NORM_PRIORITY (5).
*/
void Thread_nativeSetPriority(JNIEnv* env, jobject javaThread, jint newPriority) {
- ThreadListLock lock;
+ ScopedThreadListLock thread_list_lock;
Thread* thread = Thread::FromManagedThread(env, javaThread);
if (thread != NULL) {
thread->SetNativePriority(newPriority);
diff --git a/src/mutex.cc b/src/mutex.cc
index 25d8b76d69..0dfab1163d 100644
--- a/src/mutex.cc
+++ b/src/mutex.cc
@@ -27,19 +27,12 @@
namespace art {
Mutex::Mutex(const char* name) : name_(name) {
-#ifndef NDEBUG
- pthread_mutexattr_t debug_attributes;
- CHECK_MUTEX_CALL(pthread_mutexattr_init, (&debug_attributes));
-#if VERIFY_OBJECT_ENABLED
- CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&debug_attributes, PTHREAD_MUTEX_RECURSIVE));
-#else
- CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&debug_attributes, PTHREAD_MUTEX_ERRORCHECK));
-#endif
- CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &debug_attributes));
- CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&debug_attributes));
-#else
- CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, NULL));
-#endif
+ // Like Java, we use recursive mutexes.
+ pthread_mutexattr_t attributes;
+ CHECK_MUTEX_CALL(pthread_mutexattr_init, (&attributes));
+ CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&attributes, PTHREAD_MUTEX_RECURSIVE));
+ CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &attributes));
+ CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&attributes));
}
Mutex::~Mutex() {
diff --git a/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index f423e38233..1cda892807 100644
--- a/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -74,7 +74,7 @@ static Thread* FindThreadByThinLockId(uint32_t thin_lock_id) {
* NULL on failure, e.g. if the threadId couldn't be found.
*/
static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) {
- ThreadListLock thread_list_lock;
+ ScopedThreadListLock thread_list_lock;
Thread* thread = FindThreadByThinLockId(static_cast<uint32_t>(thin_lock_id));
if (thread == NULL) {
return NULL;
@@ -132,7 +132,7 @@ static void ThreadStatsGetterCallback(Thread* t, void* context) {
static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) {
std::vector<uint8_t> bytes;
{
- ThreadListLock thread_list_lock;
+ ScopedThreadListLock thread_list_lock;
ThreadList* thread_list = Runtime::Current()->GetThreadList();
uint16_t thread_count;
diff --git a/src/thread_list.cc b/src/thread_list.cc
index 6da5e682f0..4b33908e4a 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -22,33 +22,43 @@
namespace art {
-// TODO: merge with ThreadListLock?
-class ThreadListLocker {
- public:
-
- explicit ThreadListLocker(const ThreadList* thread_list) : thread_list_(thread_list) {
- // Avoid deadlock between two threads trying to SuspendAll
- // simultaneously by going to kVmWait if the lock cannot be
- // immediately acquired.
- if (!thread_list_->thread_list_lock_.TryLock()) {
- Thread* self = Thread::Current();
- if (self == NULL) {
- thread_list_->thread_list_lock_.Lock();
- } else {
- ScopedThreadStateChange tsc(self, Thread::kVmWait);
- thread_list_->thread_list_lock_.Lock();
- }
+ScopedThreadListLock::ScopedThreadListLock() {
+ // Self may be null during shutdown.
+ Thread* self = Thread::Current();
+
+ // We insist that anyone taking the thread list lock already has the heap lock,
+ // because pretty much any time someone takes the thread list lock, they may
+ // end up needing the heap lock (even removing a thread from the thread list calls
+ // back into managed code to remove the thread from its ThreadGroup, and that allocates
+ // an iterator).
+ // TODO: this makes the distinction between the two locks pretty pointless.
+ if (self != NULL) {
+ Heap::Lock();
+ }
+
+ // Avoid deadlock between two threads trying to SuspendAll
+ // simultaneously by going to kVmWait if the lock cannot be
+ // immediately acquired.
+ // TODO: is this needed if we took the heap lock? taking the heap lock will have done this,
+ // and the other thread will now be in kVmWait waiting for the heap lock.
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ if (!thread_list->thread_list_lock_.TryLock()) {
+ if (self == NULL) {
+ thread_list->thread_list_lock_.Lock();
+ } else {
+ ScopedThreadStateChange tsc(self, Thread::kVmWait);
+ thread_list->thread_list_lock_.Lock();
}
}
- ~ThreadListLocker() {
- thread_list_->thread_list_lock_.Unlock();
+ if (self != NULL) {
+ Heap::Unlock();
}
+}
- private:
- const ThreadList* thread_list_;
- DISALLOW_COPY_AND_ASSIGN(ThreadListLocker);
-};
+ScopedThreadListLock::~ScopedThreadListLock() {
+ Runtime::Current()->GetThreadList()->thread_list_lock_.Unlock();
+}
ThreadList::ThreadList(bool verbose)
: verbose_(verbose),
@@ -78,7 +88,7 @@ pid_t ThreadList::GetLockOwner() {
}
void ThreadList::Dump(std::ostream& os) {
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
os << "DALVIK THREADS (" << list_.size() << "):\n";
for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
(*it)->Dump(os);
@@ -140,7 +150,7 @@ void ThreadList::SuspendAll(bool for_debugger) {
}
CHECK_EQ(self->GetState(), Thread::kRunnable);
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
Thread* debug_thread = Dbg::GetDebugThread();
{
@@ -225,7 +235,7 @@ void ThreadList::SuspendSelfForDebugger() {
// Collisions with other suspends aren't really interesting. We want
// to ensure that we're the only one fiddling with the suspend count
// though.
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
MutexLock mu(thread_suspend_count_lock_);
ModifySuspendCount(self, +1, true);
@@ -270,7 +280,7 @@ void ThreadList::ResumeAll(bool for_debugger) {
// writes, since nobody should be moving until we decrement the count.
// We do need to hold the thread list because of JNI attaches.
{
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
Thread* debug_thread = Dbg::GetDebugThread();
MutexLock mu(thread_suspend_count_lock_);
for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
@@ -346,7 +356,7 @@ void ThreadList::UndoDebuggerSuspensions() {
}
{
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
MutexLock mu(thread_suspend_count_lock_);
for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
Thread* thread = *it;
@@ -374,7 +384,7 @@ void ThreadList::Register() {
LOG(INFO) << "ThreadList::Register() " << *self << "\n" << Dumpable<Thread>(*self);
}
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
CHECK(!Contains(self));
list_.push_back(self);
}
@@ -399,7 +409,7 @@ void ThreadList::Unregister() {
self->RemoveFromThreadGroup();
}
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
// Remove this thread from the list.
CHECK(Contains(self));
@@ -426,7 +436,7 @@ void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
}
void ThreadList::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
(*it)->VisitRoots(visitor, arg);
}
@@ -448,7 +458,7 @@ void ThreadList::SignalGo(Thread* child) {
CHECK(child != self);
{
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
if (verbose_) {
LOG(INFO) << *self << " waiting for child " << *child << " to be in thread list...";
}
@@ -464,7 +474,7 @@ void ThreadList::SignalGo(Thread* child) {
self->SetState(Thread::kRunnable);
// Tell the child that it's safe: it will see any future suspend request.
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
if (verbose_) {
LOG(INFO) << *self << " telling child " << *child << " it's safe to proceed...";
}
@@ -477,7 +487,7 @@ void ThreadList::WaitForGo() {
DCHECK(Contains(self));
{
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
// Tell our parent that we're in the thread list.
if (verbose_) {
@@ -521,14 +531,14 @@ bool ThreadList::AllThreadsAreDaemons() {
}
void ThreadList::WaitForNonDaemonThreadsToExit() {
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
while (!AllThreadsAreDaemons()) {
thread_exit_cond_.Wait(thread_list_lock_);
}
}
void ThreadList::SuspendAllDaemonThreads() {
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
// Tell all the daemons it's time to suspend. (At this point, we know
// all threads are daemons.)
@@ -562,7 +572,7 @@ void ThreadList::SuspendAllDaemonThreads() {
}
uint32_t ThreadList::AllocThreadId() {
- ThreadListLocker locker(this);
+ ScopedThreadListLock thread_list_lock;
for (size_t i = 0; i < allocated_ids_.size(); ++i) {
if (!allocated_ids_[i]) {
allocated_ids_.set(i);
diff --git a/src/thread_list.h b/src/thread_list.h
index 7bbd866171..57f9cf4585 100644
--- a/src/thread_list.h
+++ b/src/thread_list.h
@@ -83,36 +83,18 @@ class ThreadList {
ConditionVariable thread_suspend_count_cond_;
friend class Thread;
- friend class ThreadListLock;
- friend class ThreadListLocker;
+ friend class ScopedThreadListLock;
DISALLOW_COPY_AND_ASSIGN(ThreadList);
};
-class ThreadListLock {
+class ScopedThreadListLock {
public:
- explicit ThreadListLock(Thread* self = NULL) {
- if (self == NULL) {
- // Try to get it from TLS.
- self = Thread::Current();
- }
- // self may still be NULL during VM shutdown.
- Thread::State old_state;
- if (self != NULL) {
- old_state = self->SetState(Thread::kVmWait);
- }
- Runtime::Current()->GetThreadList()->thread_list_lock_.Lock();
- if (self != NULL) {
- self->SetState(old_state);
- }
- }
-
- ~ThreadListLock() {
- Runtime::Current()->GetThreadList()->thread_list_lock_.Unlock();
- }
+ ScopedThreadListLock();
+ ~ScopedThreadListLock();
private:
- DISALLOW_COPY_AND_ASSIGN(ThreadListLock);
+ DISALLOW_COPY_AND_ASSIGN(ScopedThreadListLock);
};
} // namespace art