Change RequestSynchronousCheckpoint to release thread_list_lock_

The RequestSynchronousCheckpoint function in some cases needs to
release the thread_list_lock_ as it waits for a checkpoint to be
executed. This means that the thread being checkpointed might be
deleted. Previously it was not obvious this was the case since the
thread_list_lock_ seemed to be held throughout the execution of the
method.

In order to prevent bugs we make RequestSynchronousCheckpoint
explicitly release the thread_list_lock_ when executed, meaning code
will be aware that threads might die during its execution.

Bug: 67362832
Test: ./test.py --host -j50
Change-Id: I1cbdf7660096dc1908b0eeabc1062447307bc888
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index f05977a..8141f51 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -778,13 +778,15 @@
   // Suspend JIT since it can get confused if we deoptimize methods getting jitted.
   art::jit::ScopedJitSuspend suspend_jit;
   art::ScopedObjectAccess soa(self);
-  art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+  art::Locks::thread_list_lock_->ExclusiveLock(self);
   art::Thread* target = nullptr;
   jvmtiError err = ERR(INTERNAL);
   if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     return err;
   }
   GetLocalVariableClosure c(self, depth, slot, type, val);
+  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
   if (!target->RequestSynchronousCheckpoint(&c)) {
     return ERR(THREAD_NOT_ALIVE);
   } else {
@@ -905,13 +907,15 @@
   // Suspend JIT since it can get confused if we deoptimize methods getting jitted.
   art::jit::ScopedJitSuspend suspend_jit;
   art::ScopedObjectAccess soa(self);
-  art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+  art::Locks::thread_list_lock_->ExclusiveLock(self);
   art::Thread* target = nullptr;
   jvmtiError err = ERR(INTERNAL);
   if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     return err;
   }
   SetLocalVariableClosure c(self, depth, slot, type, val);
+  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
   if (!target->RequestSynchronousCheckpoint(&c)) {
     return ERR(THREAD_NOT_ALIVE);
   } else {
@@ -962,13 +966,15 @@
   }
   art::Thread* self = art::Thread::Current();
   art::ScopedObjectAccess soa(self);
-  art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+  art::Locks::thread_list_lock_->ExclusiveLock(self);
   art::Thread* target = nullptr;
   jvmtiError err = ERR(INTERNAL);
   if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     return err;
   }
   GetLocalInstanceClosure c(self, depth, data);
+  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
   if (!target->RequestSynchronousCheckpoint(&c)) {
     return ERR(THREAD_NOT_ALIVE);
   } else {
diff --git a/openjdkjvmti/ti_monitor.cc b/openjdkjvmti/ti_monitor.cc
index 5a38f46..5881f8c 100644
--- a/openjdkjvmti/ti_monitor.cc
+++ b/openjdkjvmti/ti_monitor.cc
@@ -334,10 +334,11 @@
   }
   art::Thread* self = art::Thread::Current();
   art::ScopedObjectAccess soa(self);
-  art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+  art::Locks::thread_list_lock_->ExclusiveLock(self);
   art::Thread* target = nullptr;
   jvmtiError err = ERR(INTERNAL);
   if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     return err;
   }
   struct GetContendedMonitorClosure : public art::Closure {
@@ -393,6 +394,7 @@
     jobject* out_;
   };
   GetContendedMonitorClosure closure(self, monitor);
+  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
   if (!target->RequestSynchronousCheckpoint(&closure)) {
     return ERR(THREAD_NOT_ALIVE);
   }
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index d4cc42a..31cec51 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -220,28 +220,33 @@
   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
   // that the thread isn't dying on us.
   art::ScopedObjectAccess soa(art::Thread::Current());
-  art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+  art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
 
   art::Thread* thread;
   jvmtiError thread_error = ERR(INTERNAL);
   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return thread_error;
   }
   DCHECK(thread != nullptr);
 
   art::ThreadState state = thread->GetState();
   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return ERR(THREAD_NOT_ALIVE);
   }
 
   if (max_frame_count < 0) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return ERR(ILLEGAL_ARGUMENT);
   }
   if (frame_buffer == nullptr || count_ptr == nullptr) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return ERR(NULL_POINTER);
   }
 
   if (max_frame_count == 0) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     *count_ptr = 0;
     return ERR(NONE);
   }
@@ -251,23 +256,29 @@
     GetStackTraceDirectClosure closure(frame_buffer,
                                        static_cast<size_t>(start_depth),
                                        static_cast<size_t>(max_frame_count));
-    thread->RequestSynchronousCheckpoint(&closure);
+    // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+    if (!thread->RequestSynchronousCheckpoint(&closure)) {
+      return ERR(THREAD_NOT_ALIVE);
+    }
     *count_ptr = static_cast<jint>(closure.index);
     if (closure.index < static_cast<size_t>(start_depth)) {
       return ERR(ILLEGAL_ARGUMENT);
     }
     return ERR(NONE);
+  } else {
+    GetStackTraceVectorClosure closure(0, 0);
+    // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+    if (!thread->RequestSynchronousCheckpoint(&closure)) {
+      return ERR(THREAD_NOT_ALIVE);
+    }
+
+    return TranslateFrameVector(closure.frames,
+                                start_depth,
+                                closure.start_result,
+                                max_frame_count,
+                                frame_buffer,
+                                count_ptr);
   }
-
-  GetStackTraceVectorClosure closure(0, 0);
-  thread->RequestSynchronousCheckpoint(&closure);
-
-  return TranslateFrameVector(closure.frames,
-                              start_depth,
-                              closure.start_result,
-                              max_frame_count,
-                              frame_buffer,
-                              count_ptr);
 }
 
 template <typename Data>
@@ -678,25 +689,29 @@
   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
   // that the thread isn't dying on us.
   art::ScopedObjectAccess soa(art::Thread::Current());
-  art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+  art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
 
   art::Thread* thread;
   jvmtiError thread_error = ERR(INTERNAL);
   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return thread_error;
   }
 
   DCHECK(thread != nullptr);
   art::ThreadState state = thread->GetState();
   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return ERR(THREAD_NOT_ALIVE);
   }
 
   if (count_ptr == nullptr) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return ERR(NULL_POINTER);
   }
 
   GetFrameCountClosure closure;
+  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
   if (!thread->RequestSynchronousCheckpoint(&closure)) {
     return ERR(THREAD_NOT_ALIVE);
   }
@@ -760,29 +775,36 @@
   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
   // that the thread isn't dying on us.
   art::ScopedObjectAccess soa(art::Thread::Current());
-  art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+  art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
 
   art::Thread* thread;
   jvmtiError thread_error = ERR(INTERNAL);
   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return thread_error;
   }
   DCHECK(thread != nullptr);
 
   art::ThreadState state = thread->GetState();
   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return ERR(THREAD_NOT_ALIVE);
   }
 
   if (depth < 0) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return ERR(ILLEGAL_ARGUMENT);
   }
   if (method_ptr == nullptr || location_ptr == nullptr) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
     return ERR(NULL_POINTER);
   }
 
   GetLocationClosure closure(static_cast<size_t>(depth));
-  thread->RequestSynchronousCheckpoint(&closure);
+  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+  if (!thread->RequestSynchronousCheckpoint(&closure)) {
+    return ERR(THREAD_NOT_ALIVE);
+  }
 
   if (closure.method == nullptr) {
     return ERR(NO_MORE_FRAMES);
@@ -891,17 +913,21 @@
   MonitorInfoClosure<Fn> closure(soa, handle_results);
   bool called_method = false;
   {
-    art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+    art::Locks::thread_list_lock_->ExclusiveLock(self);
     art::Thread* target = nullptr;
     jvmtiError err = ERR(INTERNAL);
     if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+      art::Locks::thread_list_lock_->ExclusiveUnlock(self);
       return err;
     }
     if (target != self) {
       called_method = true;
+      // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
       if (!target->RequestSynchronousCheckpoint(&closure)) {
         return ERR(THREAD_NOT_ALIVE);
       }
+    } else {
+      art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     }
   }
   // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 9a809df..687fda7 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -963,12 +963,14 @@
     return ERR(INVALID_OBJECT);
   }
   art::Handle<art::mirror::Throwable> exc(hs.NewHandle(obj->AsThrowable()));
-  art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+  art::Locks::thread_list_lock_->ExclusiveLock(self);
   art::Thread* target = nullptr;
   jvmtiError err = ERR(INTERNAL);
   if (!GetAliveNativeThread(thread, soa, &target, &err)) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     return err;
   } else if (target->GetState() == art::ThreadState::kStarting || target->IsStillStarting()) {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     return ERR(THREAD_NOT_ALIVE);
   }
   struct StopThreadClosure : public art::Closure {
@@ -987,6 +989,7 @@
     art::Handle<art::mirror::Throwable> exception_;
   };
   StopThreadClosure c(exc);
+  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
   if (target->RequestSynchronousCheckpoint(&c)) {
     return OK;
   } else {