summaryrefslogtreecommitdiff
path: root/runtime/thread.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/thread.cc')
-rw-r--r--runtime/thread.cc62
1 files changed, 50 insertions, 12 deletions
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 632a380bf0..7b6540436a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -65,6 +65,7 @@
#include "object_lock.h"
#include "quick_exception_handler.h"
#include "quick/quick_method_frame_info.h"
+#include "read_barrier-inl.h"
#include "reflection.h"
#include "runtime.h"
#include "runtime_callbacks.h"
@@ -1583,15 +1584,24 @@ void Thread::DumpState(std::ostream& os) const {
}
struct StackDumpVisitor : public StackVisitor {
- StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
+ StackDumpVisitor(std::ostream& os_in,
+ Thread* thread_in,
+ Context* context,
+ bool can_allocate_in,
+ bool check_suspended = true,
+ bool dump_locks_in = true)
REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ : StackVisitor(thread_in,
+ context,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended),
os(os_in),
can_allocate(can_allocate_in),
last_method(nullptr),
last_line_number(0),
repetition_count(0),
- frame_count(0) {}
+ frame_count(0),
+ dump_locks(dump_locks_in) {}
virtual ~StackDumpVisitor() {
if (frame_count == 0) {
@@ -1636,8 +1646,10 @@ struct StackDumpVisitor : public StackVisitor {
if (frame_count == 0) {
Monitor::DescribeWait(os, GetThread());
}
- if (can_allocate) {
+ if (can_allocate && dump_locks) {
// Visit locks, but do not abort on errors. This would trigger a nested abort.
+ // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in
+ // RegTypeCache::RegTypeCache due to thread_list_lock.
Monitor::VisitLocks(this, DumpLockedObject, &os, false);
}
}
@@ -1681,6 +1693,7 @@ struct StackDumpVisitor : public StackVisitor {
int last_line_number;
int repetition_count;
int frame_count;
+ const bool dump_locks;
};
static bool ShouldShowNativeStack(const Thread* thread)
@@ -1712,7 +1725,7 @@ static bool ShouldShowNativeStack(const Thread* thread)
return current_method != nullptr && current_method->IsNative();
}
-void Thread::DumpJavaStack(std::ostream& os) const {
+void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const {
// If flip_function is not null, it means we have run a checkpoint
// before the thread wakes up to execute the flip function and the
// thread roots haven't been forwarded. So the following access to
@@ -1741,7 +1754,7 @@ void Thread::DumpJavaStack(std::ostream& os) const {
std::unique_ptr<Context> context(Context::Create());
StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
- !tls32_.throwing_OutOfMemoryError);
+ !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks);
dumper.WalkStack();
if (have_exception) {
@@ -1767,10 +1780,15 @@ void Thread::DumpStack(std::ostream& os,
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- ArtMethod* method = GetCurrentMethod(nullptr, !(dump_for_abort || force_dump_stack));
+ ArtMethod* method =
+ GetCurrentMethod(nullptr,
+ /*check_suspended*/ !force_dump_stack,
+ /*abort_on_error*/ !(dump_for_abort || force_dump_stack));
DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method);
}
- DumpJavaStack(os);
+ DumpJavaStack(os,
+ /*check_suspended*/ !force_dump_stack,
+ /*dump_locks*/ !force_dump_stack);
} else {
os << "Not able to dump stack of thread that isn't suspended";
}
@@ -1845,6 +1863,7 @@ Thread::Thread(bool daemon)
: tls32_(daemon),
wait_monitor_(nullptr),
interrupted_(false),
+ custom_tls_(nullptr),
can_call_into_java_(true) {
wait_mutex_ = new Mutex("a thread wait mutex");
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
@@ -2918,9 +2937,12 @@ Context* Thread::GetLongJumpContext() {
// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
struct CurrentMethodVisitor FINAL : public StackVisitor {
- CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
+ CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ : StackVisitor(thread,
+ context,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended),
this_object_(nullptr),
method_(nullptr),
dex_pc_(0),
@@ -2944,8 +2966,13 @@ struct CurrentMethodVisitor FINAL : public StackVisitor {
const bool abort_on_error_;
};
-ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
- CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
+ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc,
+ bool check_suspended,
+ bool abort_on_error) const {
+ CurrentMethodVisitor visitor(const_cast<Thread*>(this),
+ nullptr,
+ check_suspended,
+ abort_on_error);
visitor.WalkStack(false);
if (dex_pc != nullptr) {
*dex_pc = visitor.dex_pc_;
@@ -3457,4 +3484,15 @@ bool Thread::IsAotCompiler() {
return Runtime::Current()->IsAotCompiler();
}
+mirror::Object* Thread::GetPeerFromOtherThread() const {
+ mirror::Object* peer = GetPeer();
+ if (kUseReadBarrier && Current()->GetIsGcMarking()) {
+ // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
+ // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly
+ // mark/forward it here.
+ peer = art::ReadBarrier::Mark(peer);
+ }
+ return peer;
+}
+
} // namespace art