summaryrefslogtreecommitdiff
path: root/openjdkjvmti
diff options
context:
space:
mode:
author Hans Boehm <hboehm@google.com> 2023-12-19 18:48:15 +0000
committer Hans Boehm <hboehm@google.com> 2023-12-19 20:32:27 +0000
commit8bc6a58df7046b4d6f4b51eb274c7e60fea396ff (patch)
treeac6aa639279f5cf2048cb147a91cbea98c8088a4 /openjdkjvmti
parentee7471ec0a7aba338c3ac90de0f2ef0be9a35fed (diff)
Revert^17 "Thread suspension cleanup and deadlock fix"
This reverts commit c6371b52df0da31acc174a3526274417b7aac0a7. Reason for revert: This seems to have two remaining issues: 1. The second DCHECK in WaitForFlipFunction is not completely guaranteed to hold, resulting in failures for 658-fp-read-barrier. 2. WaitForSuspendBarrier seems to time out occasionally, possibly spuriously so. We fail when the futex times out once. That's probably incompatible with the app freezer. We should retry a few times. Change-Id: Ibd8909b31083fc29e6d4f1fcde003d08eb16fc0a
Diffstat (limited to 'openjdkjvmti')
-rw-r--r--openjdkjvmti/ti_heap.cc7
-rw-r--r--openjdkjvmti/ti_object.cc14
-rw-r--r--openjdkjvmti/ti_stack.cc12
-rw-r--r--openjdkjvmti/ti_thread.cc54
-rw-r--r--openjdkjvmti/ti_threadgroup.cc26
5 files changed, 41 insertions, 72 deletions
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index 80bfa0ff43..662f464ad2 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -975,13 +975,6 @@ class FollowReferencesHelper final {
jvmtiHeapReferenceKind GetReferenceKind(const art::RootInfo& info,
jvmtiHeapReferenceInfo* ref_info)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- // We do not necessarily hold thread_list_lock_ here, but we may if we are called from
- // VisitThreadRoots, which can happen from JVMTI FollowReferences. If it was acquired in
- // ThreadList::VisitRoots, it's unsafe to temporarily release it. Thus we act as if we did
- // not hold the thread_list_lock_ here, and relax CHECKs appropriately. If it does happen,
- // we are in a SuspendAll situation with concurrent GC disabled, and should not need to run
- // flip functions. TODO: Find a way to clean this up.
-
// TODO: Fill in ref_info.
memset(ref_info, 0, sizeof(jvmtiHeapReferenceInfo));
diff --git a/openjdkjvmti/ti_object.cc b/openjdkjvmti/ti_object.cc
index 3333a8a834..f37df86048 100644
--- a/openjdkjvmti/ti_object.cc
+++ b/openjdkjvmti/ti_object.cc
@@ -105,17 +105,13 @@ jvmtiError ObjectUtil::GetObjectMonitorUsage(
notify_wait.push_back(jni->AddLocalReference<jthread>(thd->GetPeerFromOtherThread()));
wait.push_back(jni->AddLocalReference<jthread>(thd->GetPeerFromOtherThread()));
}
- // Scan all threads to see which are waiting on this particular monitor.
- std::list<art::Thread*> thread_list;
{
- // Since we're in a SuspendAll, exiting threads are not a concern.
+ // Scan all threads to see which are waiting on this particular monitor.
art::MutexLock tll(self, *art::Locks::thread_list_lock_);
- thread_list = art::Runtime::Current()->GetThreadList()->GetList();
- }
- for (art::Thread* thd : thread_list) {
- if (thd != info.owner_ && target.Ptr() == thd->GetMonitorEnterObject()) {
- art::mirror::Object* peer = thd->GetPeerFromOtherThread();
- wait.push_back(jni->AddLocalReference<jthread>(peer));
+ for (art::Thread* thd : art::Runtime::Current()->GetThreadList()->GetList()) {
+ if (thd != info.owner_ && target.Ptr() == thd->GetMonitorEnterObject()) {
+ wait.push_back(jni->AddLocalReference<jthread>(thd->GetPeerFromOtherThread()));
+ }
}
}
}
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index a56081408a..9af8861260 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -363,13 +363,7 @@ static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
// Note: requires the mutator lock as the checkpoint requires the mutator lock.
GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
- // TODO(b/253671779): Replace this use of RunCheckpointUnchecked() with RunCheckpoint(). This is
- // currently not possible, since the following undesirable call chain (abbreviated here) is then
- // possible and exercised by current tests: (jvmti) GetAllStackTraces -> <this function> ->
- // RunCheckpoint -> GetStackTraceVisitor -> EncodeMethodId -> Class::EnsureMethodIds ->
- // Class::Alloc -> AllocObjectWithAllocator -> potentially suspends, or runs GC, etc. -> CHECK
- // failure.
- size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpointUnchecked(&closure);
+ size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
if (barrier_count == 0) {
return;
}
@@ -550,6 +544,7 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
// Found the thread.
art::MutexLock mu(self, mutex);
+ threads.push_back(thread);
thread_list_indices.push_back(index);
frames.emplace_back(new std::vector<jvmtiFrameInfo>());
@@ -567,6 +562,7 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
// Storage. Only access directly after completion.
+ std::vector<art::Thread*> threads;
std::vector<size_t> thread_list_indices;
std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
@@ -604,10 +600,12 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
jvmtiStackInfo& stack_info = stack_info_array.get()[index];
memset(&stack_info, 0, sizeof(jvmtiStackInfo));
+ art::Thread* self = data.threads[index];
const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
// For the time being, set the thread to null. We don't have good ScopedLocalRef
// infrastructure.
+ DCHECK(self->GetPeerFromOtherThread() != nullptr);
stack_info.thread = nullptr;
stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 191b63c050..13eebbff04 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -289,9 +289,7 @@ jvmtiError ThreadUtil::GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadI
info_ptr->is_daemon = target->IsDaemon();
- art::ObjPtr<art::mirror::Object> peer = target->LockedGetPeerFromOtherThread();
- // *target may be invalid here since we may have temporarily released thread_list_lock_.
- target = nullptr; // Value should not be used.
+ art::ObjPtr<art::mirror::Object> peer = target->GetPeerFromOtherThread();
// ThreadGroup.
if (peer != nullptr) {
@@ -549,7 +547,6 @@ static jint GetJavaStateFromInternal(const InternalThreadState& state) {
// Suspends the current thread if it has any suspend requests on it.
void ThreadUtil::SuspendCheck(art::Thread* self) {
- DCHECK(!self->ReadFlag(art::ThreadFlag::kSuspensionImmune));
art::ScopedObjectAccess soa(self);
// Really this is only needed if we are in FastJNI and actually have the mutator_lock_ already.
self->FullSuspendCheck();
@@ -643,30 +640,20 @@ jvmtiError ThreadUtil::GetAllThreads(jvmtiEnv* env,
art::MutexLock mu(current, *art::Locks::thread_list_lock_);
std::list<art::Thread*> thread_list = art::Runtime::Current()->GetThreadList()->GetList();
- // We have to be careful with threads exiting while we build this list.
- std::vector<art::ThreadExitFlag> tefs(thread_list.size());
- auto i = tefs.begin();
- for (art::Thread* thd : thread_list) {
- thd->NotifyOnThreadExit(&*i++);
- }
- DCHECK(i == tefs.end());
std::vector<art::ObjPtr<art::mirror::Object>> peers;
- i = tefs.begin();
for (art::Thread* thread : thread_list) {
- art::ThreadExitFlag* tef = &*i++;
- // Skip threads that have since exited or are still starting.
- if (!tef->HasExited() && !thread->IsStillStarting()) {
- // LockedGetPeerFromOtherThreads() may release lock!
- art::ObjPtr<art::mirror::Object> peer = thread->LockedGetPeerFromOtherThread(tef);
- if (peer != nullptr) {
- peers.push_back(peer);
- }
+ // Skip threads that are still starting.
+ if (thread->IsStillStarting()) {
+ continue;
+ }
+
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
+ if (peer != nullptr) {
+ peers.push_back(peer);
}
- thread->UnregisterThreadExitFlag(tef);
}
- DCHECK(i == tefs.end());
if (peers.empty()) {
*threads_count_ptr = 0;
@@ -678,8 +665,8 @@ jvmtiError ThreadUtil::GetAllThreads(jvmtiEnv* env,
return data_result;
}
jthread* threads = reinterpret_cast<jthread*>(data);
- for (size_t j = 0; j != peers.size(); ++j) {
- threads[j] = soa.AddLocalReference<jthread>(peers[j]);
+ for (size_t i = 0; i != peers.size(); ++i) {
+ threads[i] = soa.AddLocalReference<jthread>(peers[i]);
}
*threads_count_ptr = static_cast<jint>(peers.size());
@@ -913,13 +900,17 @@ jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
}
}
}
+ bool timeout = true;
art::Thread* ret_target = art::Runtime::Current()->GetThreadList()->SuspendThreadByPeer(
- target_jthread, art::SuspendReason::kForUserCode);
- if (ret_target == nullptr) {
+ target_jthread,
+ art::SuspendReason::kForUserCode,
+ &timeout);
+ if (ret_target == nullptr && !timeout) {
// TODO It would be good to get more information about why exactly the thread failed to
// suspend.
return ERR(INTERNAL);
- } else {
+ } else if (!timeout) {
+ // we didn't time out and got a result.
return OK;
}
// We timed out. Just go around and try again.
@@ -936,11 +927,10 @@ jvmtiError ThreadUtil::SuspendSelf(art::Thread* self) {
// This can only happen if we race with another thread to suspend 'self' and we lose.
return ERR(THREAD_SUSPENDED);
}
- {
- // IncrementSuspendCount normally needs thread_list_lock_ to ensure the thread stays
- // around. In this case we are the target thread, so we fake it.
- art::FakeMutexLock fmu(*art::Locks::thread_list_lock_);
- self->IncrementSuspendCount(self, nullptr, nullptr, art::SuspendReason::kForUserCode);
+ // We shouldn't be able to fail this.
+ if (!self->ModifySuspendCount(self, +1, nullptr, art::SuspendReason::kForUserCode)) {
+ // TODO More specific error would be nice.
+ return ERR(INTERNAL);
}
}
// Once we have requested the suspend we actually go to sleep. We need to do this after releasing
diff --git a/openjdkjvmti/ti_threadgroup.cc b/openjdkjvmti/ti_threadgroup.cc
index bfe6b51791..120024e8b2 100644
--- a/openjdkjvmti/ti_threadgroup.cc
+++ b/openjdkjvmti/ti_threadgroup.cc
@@ -171,26 +171,18 @@ static void GetThreads(art::Handle<art::mirror::Object> thread_group,
CHECK(thread_group != nullptr);
art::MutexLock mu(art::Thread::Current(), *art::Locks::thread_list_lock_);
- std::list<art::Thread*> thread_list = art::Runtime::Current()->GetThreadList()->GetList();
- // We have to be careful with threads exiting while we build this list.
- std::vector<art::ThreadExitFlag> tefs(thread_list.size());
- auto i = tefs.begin();
- for (art::Thread* thd : thread_list) {
- thd->NotifyOnThreadExit(&*i++);
- }
- DCHECK(i == tefs.end());
-
- i = tefs.begin();
- for (art::Thread* t : thread_list) {
- art::ThreadExitFlag* tef = &*i++;
- art::ObjPtr<art::mirror::Object> peer = t->LockedGetPeerFromOtherThread(tef);
- if (peer != nullptr && !tef->HasExited() && !t->IsStillStarting() &&
- IsInDesiredThreadGroup(thread_group, peer)) {
+ for (art::Thread* t : art::Runtime::Current()->GetThreadList()->GetList()) {
+ if (t->IsStillStarting()) {
+ continue;
+ }
+ art::ObjPtr<art::mirror::Object> peer = t->GetPeerFromOtherThread();
+ if (peer == nullptr) {
+ continue;
+ }
+ if (IsInDesiredThreadGroup(thread_group, peer)) {
thread_peers->push_back(peer);
}
- t->UnregisterThreadExitFlag(tef);
}
- DCHECK(i == tefs.end());
}
static void GetChildThreadGroups(art::Handle<art::mirror::Object> thread_group,