diff options
| author | 2017-01-31 20:18:59 +0000 | |
|---|---|---|
| committer | 2017-01-31 20:19:00 +0000 | |
| commit | 318797a758f81e7f8a0b440129238b9b5eb1b74e (patch) | |
| tree | 4d8558ec590cb1938c7e9cefcb983615a177437d | |
| parent | 44790fe8b33c7a80da2e77787d5f8d4d4549114b (diff) | |
| parent | 13c1635e07b8ee09120e267e2abe860aee05db7c (diff) | |
Merge "Force stack dump to diagnose empty checkpoint timeout."
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 5 | ||||
| -rw-r--r-- | runtime/thread.cc | 14 | ||||
| -rw-r--r-- | runtime/thread.h | 6 |
3 files changed, 16 insertions, 9 deletions
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index c9e5746990..0819ba04f7 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -877,7 +877,10 @@ void ConcurrentCopying::IssueEmptyCheckpoint() { thread->ReadFlag(kEmptyCheckpointRequest)) { // Found a runnable thread that hasn't responded to the empty checkpoint request. // Assume it's stuck and safe to dump its stack. - thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); + thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT), + /*dump_native_stack*/ true, + /*backtrace_map*/ nullptr, + /*force_dump_stack*/ true); } } } diff --git a/runtime/thread.cc b/runtime/thread.cc index 3c7a71aba9..eea68aa74e 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1047,9 +1047,10 @@ void Thread::ShortDump(std::ostream& os) const { << "]"; } -void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map) const { +void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map, + bool force_dump_stack) const { DumpState(os); - DumpStack(os, dump_native_stack, backtrace_map); + DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack); } mirror::String* Thread::GetThreadName() const { @@ -1750,7 +1751,8 @@ void Thread::DumpJavaStack(std::ostream& os) const { void Thread::DumpStack(std::ostream& os, bool dump_native_stack, - BacktraceMap* backtrace_map) const { + BacktraceMap* backtrace_map, + bool force_dump_stack) const { // TODO: we call this code when dying but may not have suspended the thread ourself. The // IsSuspended check is therefore racy with the use for dumping (normally we inhibit // the race with the thread_suspend_count_lock_). @@ -1761,11 +1763,11 @@ void Thread::DumpStack(std::ostream& os, // thread's stack in debug builds where we'll hit the not suspended check in the stack walk. safe_to_dump = (safe_to_dump || dump_for_abort); } - if (safe_to_dump) { + if (safe_to_dump || force_dump_stack) { // If we're currently in native code, dump that stack before dumping the managed stack. - if (dump_native_stack && (dump_for_abort || ShouldShowNativeStack(this))) { + if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) { DumpKernelStack(os, GetTid(), " kernel: ", false); - ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort); + ArtMethod* method = GetCurrentMethod(nullptr, !(dump_for_abort || force_dump_stack)); DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method); } DumpJavaStack(os); diff --git a/runtime/thread.h b/runtime/thread.h index b609e723e9..b59eac68e9 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -196,7 +196,8 @@ class Thread { // Dumps the detailed thread state and the thread stack (used for SIGQUIT). void Dump(std::ostream& os, bool dump_native_stack = true, - BacktraceMap* backtrace_map = nullptr) const + BacktraceMap* backtrace_map = nullptr, + bool force_dump_stack = false) const REQUIRES(!Locks::thread_suspend_count_lock_) REQUIRES_SHARED(Locks::mutator_lock_); @@ -1204,7 +1205,8 @@ class Thread { void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_); void DumpStack(std::ostream& os, bool dump_native_stack = true, - BacktraceMap* backtrace_map = nullptr) const + BacktraceMap* backtrace_map = nullptr, + bool force_dump_stack = false) const REQUIRES(!Locks::thread_suspend_count_lock_) REQUIRES_SHARED(Locks::mutator_lock_); |