Don't call GetPeerFromOtherThread with thread_list_lock_ held
GetPeerFromOtherThread() may call EnsureFlipFunctionStarted(), which
expects thread_list_lock_ to be not acquired.
Test: art/test/testrunner/testrunner.py --host
Change-Id: Ic19df7aa553a95818f81a265f721baddec90af47
diff --git a/openjdkjvmti/ti_object.cc b/openjdkjvmti/ti_object.cc
index f37df86..24ccbaf 100644
--- a/openjdkjvmti/ti_object.cc
+++ b/openjdkjvmti/ti_object.cc
@@ -105,13 +105,15 @@
notify_wait.push_back(jni->AddLocalReference<jthread>(thd->GetPeerFromOtherThread()));
wait.push_back(jni->AddLocalReference<jthread>(thd->GetPeerFromOtherThread()));
}
+ // Scan all threads to see which are waiting on this particular monitor.
+ std::list<art::Thread*> thread_list;
{
- // Scan all threads to see which are waiting on this particular monitor.
art::MutexLock tll(self, *art::Locks::thread_list_lock_);
- for (art::Thread* thd : art::Runtime::Current()->GetThreadList()->GetList()) {
- if (thd != info.owner_ && target.Ptr() == thd->GetMonitorEnterObject()) {
- wait.push_back(jni->AddLocalReference<jthread>(thd->GetPeerFromOtherThread()));
- }
+ thread_list = art::Runtime::Current()->GetThreadList()->GetList();
+ }
+ for (art::Thread* thd : thread_list) {
+ if (thd != info.owner_ && target.Ptr() == thd->GetMonitorEnterObject()) {
+ wait.push_back(jni->AddLocalReference<jthread>(thd->GetPeerFromOtherThread()));
}
}
}
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 13eebbf..33cc754 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -265,12 +265,13 @@
art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
-
art::Thread* target;
jvmtiError err = ERR(INTERNAL);
- if (!GetNativeThread(thread, soa, &target, &err)) {
- return err;
+ {
+ art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ if (!GetNativeThread(thread, soa, &target, &err)) {
+ return err;
+ }
}
JvmtiUniquePtr<char[]> name_uptr;
@@ -637,10 +638,11 @@
art::Thread* current = art::Thread::Current();
art::ScopedObjectAccess soa(current);
-
- art::MutexLock mu(current, *art::Locks::thread_list_lock_);
- std::list<art::Thread*> thread_list = art::Runtime::Current()->GetThreadList()->GetList();
-
+ std::list<art::Thread*> thread_list;
+ {
+ art::MutexLock mu(current, *art::Locks::thread_list_lock_);
+ thread_list = art::Runtime::Current()->GetThreadList()->GetList();
+ }
std::vector<art::ObjPtr<art::mirror::Object>> peers;
for (art::Thread* thread : thread_list) {
diff --git a/openjdkjvmti/ti_threadgroup.cc b/openjdkjvmti/ti_threadgroup.cc
index 120024e..f6f0e21 100644
--- a/openjdkjvmti/ti_threadgroup.cc
+++ b/openjdkjvmti/ti_threadgroup.cc
@@ -169,9 +169,12 @@
std::vector<art::ObjPtr<art::mirror::Object>>* thread_peers)
REQUIRES_SHARED(art::Locks::mutator_lock_) REQUIRES(!art::Locks::thread_list_lock_) {
CHECK(thread_group != nullptr);
-
- art::MutexLock mu(art::Thread::Current(), *art::Locks::thread_list_lock_);
- for (art::Thread* t : art::Runtime::Current()->GetThreadList()->GetList()) {
+ std::list<art::Thread*> thread_list;
+ {
+ art::MutexLock mu(art::Thread::Current(), *art::Locks::thread_list_lock_);
+ thread_list = art::Runtime::Current()->GetThreadList()->GetList();
+ }
+ for (art::Thread* t : thread_list) {
if (t->IsStillStarting()) {
continue;
}
diff --git a/runtime/thread.h b/runtime/thread.h
index e1503ae..88e3d45 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -521,7 +521,8 @@
// This function will force a flip for the other thread if necessary.
// Since we hold a shared mutator lock, a new flip function cannot be concurrently
// installed
- mirror::Object* GetPeerFromOtherThread() REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::Object* GetPeerFromOtherThread() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::thread_list_lock_);
bool HasPeer() const {
return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;