summaryrefslogtreecommitdiff
path: root/openjdkjvmti/ti_stack.cc
diff options
context:
space:
mode:
author Hans Boehm <hboehm@google.com> 2023-12-19 18:48:15 +0000
committer Hans Boehm <hboehm@google.com> 2023-12-19 20:32:27 +0000
commit8bc6a58df7046b4d6f4b51eb274c7e60fea396ff (patch)
treeac6aa639279f5cf2048cb147a91cbea98c8088a4 /openjdkjvmti/ti_stack.cc
parentee7471ec0a7aba338c3ac90de0f2ef0be9a35fed (diff)
Revert^17 "Thread suspension cleanup and deadlock fix"
This reverts commit c6371b52df0da31acc174a3526274417b7aac0a7. Reason for revert: This seems to have two remaining issues: 1. The second DCHECK in WaitForFlipFunction is not completely guaranteed to hold, resulting in failures for 658-fp-read-barrier. 2. WaitForSuspendBarrier seems to time out occasionally, possibly spuriously so. We fail when the futex times out once. That's probably incompatible with the app freezer. We should retry a few times. Change-Id: Ibd8909b31083fc29e6d4f1fcde003d08eb16fc0a
Diffstat (limited to 'openjdkjvmti/ti_stack.cc')
-rw-r--r--openjdkjvmti/ti_stack.cc12
1 files changed, 5 insertions, 7 deletions
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index a56081408a..9af8861260 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -363,13 +363,7 @@ static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
// Note: requires the mutator lock as the checkpoint requires the mutator lock.
GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
- // TODO(b/253671779): Replace this use of RunCheckpointUnchecked() with RunCheckpoint(). This is
- // currently not possible, since the following undesirable call chain (abbreviated here) is then
- // possible and exercised by current tests: (jvmti) GetAllStackTraces -> <this function> ->
- // RunCheckpoint -> GetStackTraceVisitor -> EncodeMethodId -> Class::EnsureMethodIds ->
- // Class::Alloc -> AllocObjectWithAllocator -> potentially suspends, or runs GC, etc. -> CHECK
- // failure.
- size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpointUnchecked(&closure);
+ size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
if (barrier_count == 0) {
return;
}
@@ -550,6 +544,7 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
// Found the thread.
art::MutexLock mu(self, mutex);
+ threads.push_back(thread);
thread_list_indices.push_back(index);
frames.emplace_back(new std::vector<jvmtiFrameInfo>());
@@ -567,6 +562,7 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
// Storage. Only access directly after completion.
+ std::vector<art::Thread*> threads;
std::vector<size_t> thread_list_indices;
std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
@@ -604,10 +600,12 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
jvmtiStackInfo& stack_info = stack_info_array.get()[index];
memset(&stack_info, 0, sizeof(jvmtiStackInfo));
+ art::Thread* self = data.threads[index];
const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
// For the time being, set the thread to null. We don't have good ScopedLocalRef
// infrastructure.
+ DCHECK(self->GetPeerFromOtherThread() != nullptr);
stack_info.thread = nullptr;
stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;