ART: Factor out stack dumping with monitor objects
Factor out generic stack walking with monitor object visitation into
MonitorObjectsStackVisitor. Rewrite StackDumpVisitor in terms of this,
removing Monitor::DescribeWait on the way.
Bug: 70538431
Test: m test-art-host
Test: manual (SIGQUIT dumping)
Change-Id: I6f359f9a5f665f308328ad466bc7a437f52aea24
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index ba18f8d..cfef9c7 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1289,65 +1289,6 @@
}
}
-void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
- // Determine the wait message and object we're waiting or blocked upon.
- mirror::Object* pretty_object;
- uint32_t lock_owner;
- ThreadState state = FetchState(thread, &pretty_object, &lock_owner);
-
- const char* wait_message = nullptr;
- switch (state) {
- case kWaiting:
- case kTimedWaiting:
- wait_message = " - waiting on ";
- break;
-
- case kSleeping:
- wait_message = " - sleeping on ";
- break;
-
- case kBlocked:
- wait_message = " - waiting to lock ";
- break;
-
- case kWaitingForLockInflation:
- wait_message = " - waiting for lock inflation of ";
- break;
-
- default:
- break;
- }
-
- if (wait_message == nullptr) {
- return;
- }
-
- if (pretty_object == nullptr) {
- os << wait_message << "an unknown object";
- } else {
- if ((pretty_object->GetLockWord(true).GetState() == LockWord::kThinLocked) &&
- Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
- // Getting the identity hashcode here would result in lock inflation and suspension of the
- // current thread, which isn't safe if this is the only runnable thread.
- os << wait_message << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)",
- reinterpret_cast<intptr_t>(pretty_object),
- pretty_object->PrettyTypeOf().c_str());
- } else {
- // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
- // Call PrettyTypeOf before IdentityHashCode since IdentityHashCode can cause thread
- // suspension and move pretty_object.
- const std::string pretty_type(pretty_object->PrettyTypeOf());
- os << wait_message << StringPrintf("<0x%08x> (a %s)", pretty_object->IdentityHashCode(),
- pretty_type.c_str());
- }
- }
- // - waiting to lock <0x613f83d8> (a java.lang.Object) held by thread 5
- if (lock_owner != ThreadList::kInvalidThreadId) {
- os << " held by thread " << lock_owner;
- }
- os << "\n";
-}
-
ThreadState Monitor::FetchState(const Thread* thread,
/* out */ mirror::Object** monitor_object,
/* out */ uint32_t* lock_owner_tid) {
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 30f47d8..f150a8c 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -94,9 +94,6 @@
bool interruptShouldThrow, ThreadState why)
REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
- static void DescribeWait(std::ostream& os, const Thread* thread)
- REQUIRES(!Locks::thread_suspend_count_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
static ThreadState FetchState(const Thread* thread,
/* out */ mirror::Object** monitor_object,
/* out */ uint32_t* lock_owner_tid)
diff --git a/runtime/monitor_objects_stack_visitor.h b/runtime/monitor_objects_stack_visitor.h
new file mode 100644
index 0000000..5c962c3
--- /dev/null
+++ b/runtime/monitor_objects_stack_visitor.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MONITOR_OBJECTS_STACK_VISITOR_H_
+#define ART_RUNTIME_MONITOR_OBJECTS_STACK_VISITOR_H_
+
+#include <android-base/logging.h>
+
+#include "art_method.h"
+#include "base/mutex.h"
+#include "monitor.h"
+#include "stack.h"
+#include "thread.h"
+#include "thread_state.h"
+
+namespace art {
+
+namespace mirror {
+class Object;
+}
+
+class Context;
+
+class MonitorObjectsStackVisitor : public StackVisitor {
+ public:
+ MonitorObjectsStackVisitor(Thread* thread_in,
+ Context* context,
+ bool check_suspended = true,
+ bool dump_locks_in = true)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ : StackVisitor(thread_in,
+ context,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended),
+ frame_count(0u),
+ dump_locks(dump_locks_in) {}
+
+ enum class VisitMethodResult {
+ kContinueMethod,
+ kSkipMethod,
+ kEndStackWalk,
+ };
+
+ bool VisitFrame() FINAL REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = GetMethod();
+ if (m->IsRuntimeMethod()) {
+ return true;
+ }
+
+ VisitMethodResult vmrEntry = StartMethod(m, frame_count);
+ switch (vmrEntry) {
+ case VisitMethodResult::kContinueMethod:
+ break;
+ case VisitMethodResult::kSkipMethod:
+ return true;
+ case VisitMethodResult::kEndStackWalk:
+ return false;
+ }
+
+ if (frame_count == 0) {
+ // Top frame, check for blocked state.
+
+ mirror::Object* monitor_object;
+ uint32_t lock_owner_tid;
+ ThreadState state = Monitor::FetchState(GetThread(),
+ &monitor_object,
+ &lock_owner_tid);
+ switch (state) {
+ case kWaiting:
+ case kTimedWaiting:
+ VisitWaitingObject(monitor_object, state);
+ break;
+ case kSleeping:
+ VisitSleepingObject(monitor_object);
+ break;
+
+ case kBlocked:
+ case kWaitingForLockInflation:
+ VisitBlockedOnObject(monitor_object, state, lock_owner_tid);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (dump_locks) {
+ // Visit locks, but do not abort on errors. This could trigger a nested abort.
+ // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in
+ // RegTypeCache::RegTypeCache due to thread_list_lock.
+ Monitor::VisitLocks(this, VisitLockedObject, this, false);
+ }
+
+ ++frame_count;
+
+ VisitMethodResult vmrExit = EndMethod(m);
+ switch (vmrExit) {
+ case VisitMethodResult::kContinueMethod:
+ case VisitMethodResult::kSkipMethod:
+ return true;
+
+ case VisitMethodResult::kEndStackWalk:
+ return false;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
+
+ protected:
+ virtual VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr)
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+ virtual VisitMethodResult EndMethod(ArtMethod* m)
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+ virtual void VisitWaitingObject(mirror::Object* obj, ThreadState state)
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+ virtual void VisitSleepingObject(mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+ virtual void VisitBlockedOnObject(mirror::Object* obj, ThreadState state, uint32_t owner_tid)
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+ virtual void VisitLockedObject(mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+ size_t frame_count;
+
+ private:
+ static void VisitLockedObject(mirror::Object* o, void* context)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ MonitorObjectsStackVisitor* self = reinterpret_cast<MonitorObjectsStackVisitor*>(context);
+ if (o != nullptr) {
+ if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
+ // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
+ // may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the
+ // IdentityHashCode call below will crash. So explicitly mark/forward it here.
+ o = ReadBarrier::Mark(o);
+ }
+ }
+ self->VisitLockedObject(o);
+ }
+
+ const bool dump_locks;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_MONITOR_OBJECTS_STACK_VISITOR_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 21b8ea5..6812105 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -70,6 +70,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/stack_trace_element.h"
#include "monitor.h"
+#include "monitor_objects_stack_visitor.h"
#include "native_stack_dump.h"
#include "nativehelper/scoped_local_ref.h"
#include "nativehelper/scoped_utf_chars.h"
@@ -1756,25 +1757,22 @@
Thread::DumpState(os, this, GetTid());
}
-struct StackDumpVisitor : public StackVisitor {
+struct StackDumpVisitor : public MonitorObjectsStackVisitor {
StackDumpVisitor(std::ostream& os_in,
Thread* thread_in,
Context* context,
- bool can_allocate_in,
+ bool can_allocate,
bool check_suspended = true,
- bool dump_locks_in = true)
+ bool dump_locks = true)
REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread_in,
- context,
- StackVisitor::StackWalkKind::kIncludeInlinedFrames,
- check_suspended),
+ : MonitorObjectsStackVisitor(thread_in,
+ context,
+ check_suspended,
+ can_allocate && dump_locks),
os(os_in),
- can_allocate(can_allocate_in),
last_method(nullptr),
last_line_number(0),
- repetition_count(0),
- frame_count(0),
- dump_locks(dump_locks_in) {}
+ repetition_count(0) {}
virtual ~StackDumpVisitor() {
if (frame_count == 0) {
@@ -1782,13 +1780,12 @@
}
}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- if (m->IsRuntimeMethod()) {
- return true;
- }
+ static constexpr size_t kMaxRepetition = 3u;
+
+ VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- const int kMaxRepetition = 3;
ObjPtr<mirror::Class> c = m->GetDeclaringClass();
ObjPtr<mirror::DexCache> dex_cache = c->GetDexCache();
int line_number = -1;
@@ -1806,67 +1803,97 @@
last_line_number = line_number;
last_method = m;
}
- if (repetition_count < kMaxRepetition) {
- os << " at " << m->PrettyMethod(false);
- if (m->IsNative()) {
- os << "(Native method)";
- } else {
- const char* source_file(m->GetDeclaringClassSourceFile());
- os << "(" << (source_file != nullptr ? source_file : "unavailable")
- << ":" << line_number << ")";
- }
- os << "\n";
- if (frame_count == 0) {
- Monitor::DescribeWait(os, GetThread());
- }
- if (can_allocate && dump_locks) {
- // Visit locks, but do not abort on errors. This would trigger a nested abort.
- // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in
- // RegTypeCache::RegTypeCache due to thread_list_lock.
- Monitor::VisitLocks(this, DumpLockedObject, &os, false);
- }
+
+ if (repetition_count >= kMaxRepetition) {
+ // Skip visiting=printing anything.
+ return VisitMethodResult::kSkipMethod;
}
- ++frame_count;
- return true;
+ os << " at " << m->PrettyMethod(false);
+ if (m->IsNative()) {
+ os << "(Native method)";
+ } else {
+ const char* source_file(m->GetDeclaringClassSourceFile());
+ os << "(" << (source_file != nullptr ? source_file : "unavailable")
+ << ":" << line_number << ")";
+ }
+ os << "\n";
+ // Go and visit locks.
+ return VisitMethodResult::kContinueMethod;
}
- static void DumpLockedObject(mirror::Object* o, void* context)
+ VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+ return VisitMethodResult::kContinueMethod;
+ }
+
+ void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
+ OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_) {
- std::ostream& os = *reinterpret_cast<std::ostream*>(context);
- os << " - locked ";
- if (o == nullptr) {
- os << "an unknown object";
+ PrintObject(obj, " - waiting on ", ThreadList::kInvalidThreadId);
+ }
+ void VisitSleepingObject(mirror::Object* obj)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ PrintObject(obj, " - sleeping on ", ThreadList::kInvalidThreadId);
+ }
+ void VisitBlockedOnObject(mirror::Object* obj,
+ ThreadState state,
+ uint32_t owner_tid)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const char* msg;
+ switch (state) {
+ case kBlocked:
+ msg = " - waiting to lock ";
+ break;
+
+ case kWaitingForLockInflation:
+ msg = " - waiting for lock inflation of ";
+ break;
+
+ default:
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
+ PrintObject(obj, msg, owner_tid);
+ }
+ void VisitLockedObject(mirror::Object* obj)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ PrintObject(obj, " - locked ", ThreadList::kInvalidThreadId);
+ }
+
+ void PrintObject(mirror::Object* obj,
+ const char* msg,
+ uint32_t owner_tid) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (obj == nullptr) {
+ os << msg << "an unknown object";
} else {
- if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
- // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
- // may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the
- // IdentityHashCode call below will crash. So explicitly mark/forward it here.
- o = ReadBarrier::Mark(o);
- }
- if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
+ if ((obj->GetLockWord(true).GetState() == LockWord::kThinLocked) &&
Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
// Getting the identity hashcode here would result in lock inflation and suspension of the
// current thread, which isn't safe if this is the only runnable thread.
- os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o),
- o->PrettyTypeOf().c_str());
+ os << msg << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)",
+ reinterpret_cast<intptr_t>(obj),
+ obj->PrettyTypeOf().c_str());
} else {
- // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So
- // we get the pretty type beofre we call IdentityHashCode.
- const std::string pretty_type(o->PrettyTypeOf());
- os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str());
+ // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
+ // Call PrettyTypeOf before IdentityHashCode since IdentityHashCode can cause thread
+ // suspension and move pretty_object.
+ const std::string pretty_type(obj->PrettyTypeOf());
+ os << msg << StringPrintf("<0x%08x> (a %s)", obj->IdentityHashCode(), pretty_type.c_str());
}
}
+ if (owner_tid != ThreadList::kInvalidThreadId) {
+ os << " held by thread " << owner_tid;
+ }
os << "\n";
}
std::ostream& os;
- const bool can_allocate;
ArtMethod* last_method;
int last_line_number;
- int repetition_count;
- int frame_count;
- const bool dump_locks;
+ size_t repetition_count;
};
static bool ShouldShowNativeStack(const Thread* thread)