Merge "Change RequestSynchronousCheckpoint to release thread_list_lock_"
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 50402a0..43f5447 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -779,13 +779,15 @@
// Suspend JIT since it can get confused if we deoptimize methods getting jitted.
art::jit::ScopedJitSuspend suspend_jit;
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
GetLocalVariableClosure c(self, depth, slot, type, val);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!target->RequestSynchronousCheckpoint(&c)) {
return ERR(THREAD_NOT_ALIVE);
} else {
@@ -906,13 +908,15 @@
// Suspend JIT since it can get confused if we deoptimize methods getting jitted.
art::jit::ScopedJitSuspend suspend_jit;
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
SetLocalVariableClosure c(self, depth, slot, type, val);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!target->RequestSynchronousCheckpoint(&c)) {
return ERR(THREAD_NOT_ALIVE);
} else {
@@ -963,13 +967,15 @@
}
art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
GetLocalInstanceClosure c(self, depth, data);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!target->RequestSynchronousCheckpoint(&c)) {
return ERR(THREAD_NOT_ALIVE);
} else {
diff --git a/openjdkjvmti/ti_monitor.cc b/openjdkjvmti/ti_monitor.cc
index 5a38f46..5881f8c 100644
--- a/openjdkjvmti/ti_monitor.cc
+++ b/openjdkjvmti/ti_monitor.cc
@@ -334,10 +334,11 @@
}
art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
struct GetContendedMonitorClosure : public art::Closure {
@@ -393,6 +394,7 @@
jobject* out_;
};
GetContendedMonitorClosure closure(self, monitor);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!target->RequestSynchronousCheckpoint(&closure)) {
return ERR(THREAD_NOT_ALIVE);
}
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 23cf659..8c4eb0b 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -220,28 +220,33 @@
// It is not great that we have to hold these locks for so long, but it is necessary to ensure
// that the thread isn't dying on us.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
art::Thread* thread;
jvmtiError thread_error = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return thread_error;
}
DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(THREAD_NOT_ALIVE);
}
if (max_frame_count < 0) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(ILLEGAL_ARGUMENT);
}
if (frame_buffer == nullptr || count_ptr == nullptr) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(NULL_POINTER);
}
if (max_frame_count == 0) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
*count_ptr = 0;
return ERR(NONE);
}
@@ -251,23 +256,29 @@
GetStackTraceDirectClosure closure(frame_buffer,
static_cast<size_t>(start_depth),
static_cast<size_t>(max_frame_count));
- thread->RequestSynchronousCheckpoint(&closure);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!thread->RequestSynchronousCheckpoint(&closure)) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
*count_ptr = static_cast<jint>(closure.index);
if (closure.index < static_cast<size_t>(start_depth)) {
return ERR(ILLEGAL_ARGUMENT);
}
return ERR(NONE);
+ } else {
+ GetStackTraceVectorClosure closure(0, 0);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!thread->RequestSynchronousCheckpoint(&closure)) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+
+ return TranslateFrameVector(closure.frames,
+ start_depth,
+ closure.start_result,
+ max_frame_count,
+ frame_buffer,
+ count_ptr);
}
-
- GetStackTraceVectorClosure closure(0, 0);
- thread->RequestSynchronousCheckpoint(&closure);
-
- return TranslateFrameVector(closure.frames,
- start_depth,
- closure.start_result,
- max_frame_count,
- frame_buffer,
- count_ptr);
}
template <typename Data>
@@ -678,25 +689,29 @@
// It is not great that we have to hold these locks for so long, but it is necessary to ensure
// that the thread isn't dying on us.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
art::Thread* thread;
jvmtiError thread_error = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return thread_error;
}
DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(THREAD_NOT_ALIVE);
}
if (count_ptr == nullptr) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(NULL_POINTER);
}
GetFrameCountClosure closure;
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!thread->RequestSynchronousCheckpoint(&closure)) {
return ERR(THREAD_NOT_ALIVE);
}
@@ -760,29 +775,36 @@
// It is not great that we have to hold these locks for so long, but it is necessary to ensure
// that the thread isn't dying on us.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
art::Thread* thread;
jvmtiError thread_error = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return thread_error;
}
DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(THREAD_NOT_ALIVE);
}
if (depth < 0) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(ILLEGAL_ARGUMENT);
}
if (method_ptr == nullptr || location_ptr == nullptr) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(NULL_POINTER);
}
GetLocationClosure closure(static_cast<size_t>(depth));
- thread->RequestSynchronousCheckpoint(&closure);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!thread->RequestSynchronousCheckpoint(&closure)) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
if (closure.method == nullptr) {
return ERR(NO_MORE_FRAMES);
@@ -891,17 +913,21 @@
MonitorInfoClosure<Fn> closure(soa, handle_results);
bool called_method = false;
{
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
if (target != self) {
called_method = true;
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!target->RequestSynchronousCheckpoint(&closure)) {
return ERR(THREAD_NOT_ALIVE);
}
+ } else {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
}
}
// Cannot call the closure on the current thread if we have thread_list_lock since we need to call
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index d0913cf..1082a9e 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -1008,12 +1008,14 @@
return ERR(INVALID_OBJECT);
}
art::Handle<art::mirror::Throwable> exc(hs.NewHandle(obj->AsThrowable()));
- art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
} else if (target->GetState() == art::ThreadState::kStarting || target->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return ERR(THREAD_NOT_ALIVE);
}
struct StopThreadClosure : public art::Closure {
@@ -1032,6 +1034,7 @@
art::Handle<art::mirror::Throwable> exception_;
};
StopThreadClosure c(exc);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (target->RequestSynchronousCheckpoint(&c)) {
return OK;
} else {
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 7823413..e43f174 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -453,7 +453,7 @@
// Acquire thread-list lock to find thread and keep it from dying until we've got all
// the info we need.
{
- MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
+ Locks::thread_list_lock_->ExclusiveLock(Thread::Current());
// Re-find the owner in case the thread got killed.
Thread* original_owner = Runtime::Current()->GetThreadList()->FindThreadByThreadId(
@@ -475,9 +475,15 @@
std::ostringstream oss;
};
CollectStackTrace owner_trace;
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its
+ // execution.
original_owner->RequestSynchronousCheckpoint(&owner_trace);
owner_stack_dump = owner_trace.oss.str();
+ } else {
+ Locks::thread_list_lock_->ExclusiveUnlock(Thread::Current());
}
+ } else {
+ Locks::thread_list_lock_->ExclusiveUnlock(Thread::Current());
}
// This is all the data we need. Now drop the thread-list lock, it's OK for the
// owner to go away now.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2753bf7..3355b10 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1451,21 +1451,25 @@
Barrier barrier_;
};
+// RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
bool Thread::RequestSynchronousCheckpoint(Closure* function) {
+ Thread* self = Thread::Current();
if (this == Thread::Current()) {
+ Locks::thread_list_lock_->AssertExclusiveHeld(self);
+ // Unlock the tll before running so that the state is the same regardless of thread.
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
// Asked to run on this thread. Just run.
function->Run(this);
return true;
}
- Thread* self = Thread::Current();
// The current thread is not this thread.
if (GetState() == ThreadState::kTerminated) {
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
return false;
}
- // Note: we're holding the thread-list lock. The thread cannot die at this point.
struct ScopedThreadListLockUnlock {
explicit ScopedThreadListLockUnlock(Thread* self_in) RELEASE(*Locks::thread_list_lock_)
: self_thread(self_in) {
@@ -1482,6 +1486,7 @@
};
for (;;) {
+ Locks::thread_list_lock_->AssertExclusiveHeld(self);
// If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the
// suspend-count lock for too long.
if (GetState() == ThreadState::kRunnable) {
@@ -1492,8 +1497,9 @@
installed = RequestCheckpoint(&barrier_closure);
}
if (installed) {
- // Relinquish the thread-list lock, temporarily. We should not wait holding any locks.
- ScopedThreadListLockUnlock stllu(self);
+ // Relinquish the thread-list lock. We should not wait holding any locks. We cannot
+ // reacquire it since we don't know if 'this' hasn't been deleted yet.
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
ScopedThreadSuspension sts(self, ThreadState::kWaiting);
barrier_closure.Wait(self);
return true;
@@ -1515,6 +1521,8 @@
}
{
+ // Release for the wait. The suspension will keep us from being deleted. Reacquire after so
+ // that we can call ModifySuspendCount without racing against ThreadList::Unregister.
ScopedThreadListLockUnlock stllu(self);
{
ScopedThreadSuspension sts(self, ThreadState::kWaiting);
@@ -1543,6 +1551,9 @@
Thread::resume_cond_->Broadcast(self);
}
+ // Release the thread_list_lock_ to be consistent with the barrier-closure path.
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
+
return true; // We're done, break out of the loop.
}
}
diff --git a/runtime/thread.h b/runtime/thread.h
index ab89778..42b38da 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -265,9 +265,13 @@
bool RequestCheckpoint(Closure* function)
REQUIRES(Locks::thread_suspend_count_lock_);
+
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
+ // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
+ // execute the checkpoint for us if it is Runnable.
bool RequestSynchronousCheckpoint(Closure* function)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(Locks::thread_list_lock_)
+ RELEASE(Locks::thread_list_lock_)
REQUIRES(!Locks::thread_suspend_count_lock_);
bool RequestEmptyCheckpoint()
REQUIRES(Locks::thread_suspend_count_lock_);