Revert "Revert "Revert "Add an option to disable native stack dumping on SIGQUIT."""
This reverts commit 74d25c9040dfd1e0985987eb38817e526878a3db.
Reason for revert: The original failing condition appears to be gone.
Bug: 74121887
Test: Ran 004-ThreadStress many times against the target.
Change-Id: Ie5bd050112e654a99bdfea7d6dd673882ca35567
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 5518eb2..470287b 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -161,10 +161,6 @@
.Define({"-XX:EnableHSpaceCompactForOOM", "-XX:DisableHSpaceCompactForOOM"})
.WithValues({true, false})
.IntoKey(M::EnableHSpaceCompactForOOM)
- .Define("-XX:DumpNativeStackOnSigQuit:_")
- .WithType<bool>()
- .WithValueMap({{"false", false}, {"true", true}})
- .IntoKey(M::DumpNativeStackOnSigQuit)
.Define("-XX:MadviseRandomAccess:_")
.WithType<bool>()
.WithValueMap({{"false", false}, {"true", true}})
@@ -735,7 +731,6 @@
UsageMessage(stream, " -XX:BackgroundGC=none\n");
UsageMessage(stream, " -XX:LargeObjectSpace={disabled,map,freelist}\n");
UsageMessage(stream, " -XX:LargeObjectThreshold=N\n");
- UsageMessage(stream, " -XX:DumpNativeStackOnSigQuit=booleanvalue\n");
UsageMessage(stream, " -XX:MadviseRandomAccess:booleanvalue\n");
UsageMessage(stream, " -XX:SlowDebug={false,true}\n");
UsageMessage(stream, " -Xmethod-trace\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 9a626ba..bb76e61 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -271,7 +271,6 @@
pending_hidden_api_warning_(false),
dedupe_hidden_api_warnings_(true),
always_set_hidden_api_warning_flag_(false),
- dump_native_stack_on_sig_quit_(true),
pruned_dalvik_cache_(false),
// Initially assume we perceive jank in case the process state is never updated.
process_state_(kProcessStateJankPerceptible),
@@ -1153,7 +1152,6 @@
is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::Dex2Oat);
image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
- dump_native_stack_on_sig_quit_ = runtime_options.GetOrDefault(Opt::DumpNativeStackOnSigQuit);
vfprintf_ = runtime_options.GetOrDefault(Opt::HookVfprintf);
exit_ = runtime_options.GetOrDefault(Opt::HookExit);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index dba31b2..7d7cbaf 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -655,10 +655,6 @@
safe_mode_ = mode;
}
- bool GetDumpNativeStackOnSigQuit() const {
- return dump_native_stack_on_sig_quit_;
- }
-
bool GetPrunedDalvikCache() const {
return pruned_dalvik_cache_;
}
@@ -1009,9 +1005,6 @@
// when there is a warning. This is only used for testing.
bool always_set_hidden_api_warning_flag_;
- // Whether threads should dump their native stack on SIGQUIT.
- bool dump_native_stack_on_sig_quit_;
-
// Whether the dalvik cache was pruned when initializing the runtime.
bool pruned_dalvik_cache_;
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
index 59af918..41bfb58 100644
--- a/runtime/runtime_common.cc
+++ b/runtime/runtime_common.cc
@@ -41,7 +41,6 @@
using android::base::StringPrintf;
static constexpr bool kUseSigRTTimeout = true;
-static constexpr bool kDumpNativeStackOnTimeout = true;
const char* GetSignalName(int signal_number) {
switch (signal_number) {
@@ -441,7 +440,7 @@
// Special timeout signal. Try to dump all threads.
// Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
// are of value here.
- runtime->GetThreadList()->Dump(std::cerr, kDumpNativeStackOnTimeout);
+ runtime->GetThreadList()->Dump(std::cerr);
std::cerr << std::endl;
}
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 4121ad6..dcb1335 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -70,7 +70,6 @@
RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier))
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
RUNTIME_OPTIONS_KEY (bool, UseJitCompilation, false)
-RUNTIME_OPTIONS_KEY (bool, DumpNativeStackOnSigQuit, true)
RUNTIME_OPTIONS_KEY (bool, MadviseRandomAccess, false)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b13d8ec..50cf9e0 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1161,10 +1161,9 @@
<< "]";
}
-void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map,
- bool force_dump_stack) const {
+void Thread::Dump(std::ostream& os, BacktraceMap* backtrace_map, bool force_dump_stack) const {
DumpState(os);
- DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack);
+ DumpStack(os, backtrace_map, force_dump_stack);
}
mirror::String* Thread::GetThreadName() const {
@@ -1968,10 +1967,7 @@
}
}
-void Thread::DumpStack(std::ostream& os,
- bool dump_native_stack,
- BacktraceMap* backtrace_map,
- bool force_dump_stack) const {
+void Thread::DumpStack(std::ostream& os, BacktraceMap* backtrace_map, bool force_dump_stack) const {
// TODO: we call this code when dying but may not have suspended the thread ourself. The
// IsSuspended check is therefore racy with the use for dumping (normally we inhibit
// the race with the thread_suspend_count_lock_).
@@ -1984,7 +1980,7 @@
}
if (safe_to_dump || force_dump_stack) {
// If we're currently in native code, dump that stack before dumping the managed stack.
- if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
+ if (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
ArtMethod* method =
GetCurrentMethod(nullptr,
diff --git a/runtime/thread.h b/runtime/thread.h
index 22b77ee..af1401e 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -207,7 +207,6 @@
// Dumps the detailed thread state and the thread stack (used for SIGQUIT).
void Dump(std::ostream& os,
- bool dump_native_stack = true,
BacktraceMap* backtrace_map = nullptr,
bool force_dump_stack = false) const
REQUIRES(!Locks::thread_suspend_count_lock_)
@@ -1318,7 +1317,6 @@
void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
void DumpStack(std::ostream& os,
- bool dump_native_stack = true,
BacktraceMap* backtrace_map = nullptr,
bool force_dump_stack = false) const
REQUIRES(!Locks::thread_suspend_count_lock_)
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 44af867..ee68399 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -152,9 +152,8 @@
suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data); // Dump time to suspend.
}
}
- bool dump_native_stack = Runtime::Current()->GetDumpNativeStackOnSigQuit();
- Dump(os, dump_native_stack);
- DumpUnattachedThreads(os, dump_native_stack && kDumpUnattachedThreadNativeStackForSigQuit);
+ Dump(os);
+ DumpUnattachedThreads(os, kDumpUnattachedThreadNativeStackForSigQuit);
}
static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_stack)
@@ -201,11 +200,10 @@
// A closure used by Thread::Dump.
class DumpCheckpoint FINAL : public Closure {
public:
- DumpCheckpoint(std::ostream* os, bool dump_native_stack)
+ explicit DumpCheckpoint(std::ostream* os)
: os_(os),
barrier_(0),
- backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr),
- dump_native_stack_(dump_native_stack) {
+ backtrace_map_(BacktraceMap::Create(getpid())) {
if (backtrace_map_ != nullptr) {
backtrace_map_->SetSuffixesToIgnore(std::vector<std::string> { "oat", "odex" });
}
@@ -219,7 +217,7 @@
std::ostringstream local_os;
{
ScopedObjectAccess soa(self);
- thread->Dump(local_os, dump_native_stack_, backtrace_map_.get());
+ thread->Dump(local_os, backtrace_map_.get());
}
{
// Use the logging lock to ensure serialization when writing to the common ostream.
@@ -247,18 +245,16 @@
Barrier barrier_;
// A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
std::unique_ptr<BacktraceMap> backtrace_map_;
- // Whether we should dump the native stack.
- const bool dump_native_stack_;
};
-void ThreadList::Dump(std::ostream& os, bool dump_native_stack) {
+void ThreadList::Dump(std::ostream& os) {
Thread* self = Thread::Current();
{
MutexLock mu(self, *Locks::thread_list_lock_);
os << "DALVIK THREADS (" << list_.size() << "):\n";
}
if (self != nullptr) {
- DumpCheckpoint checkpoint(&os, dump_native_stack);
+ DumpCheckpoint checkpoint(&os);
size_t threads_running_checkpoint;
{
// Use SOA to prevent deadlocks if multiple threads are calling Dump() at the same time.
@@ -269,7 +265,7 @@
checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
}
} else {
- DumpUnattachedThreads(os, dump_native_stack);
+ DumpUnattachedThreads(os, /* dump_native_stack */ true);
}
}
@@ -491,7 +487,6 @@
// Found a runnable thread that hasn't responded to the empty checkpoint request.
// Assume it's stuck and safe to dump its stack.
thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
- /*dump_native_stack*/ true,
/*backtrace_map*/ nullptr,
/*force_dump_stack*/ true);
}
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 895c1a4..09b10d2 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -57,7 +57,7 @@
void DumpForSigQuit(std::ostream& os)
REQUIRES(!Locks::thread_list_lock_, !Locks::mutator_lock_);
// For thread suspend timeout dumps.
- void Dump(std::ostream& os, bool dump_native_stack = true)
+ void Dump(std::ostream& os)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
pid_t GetLockOwner(); // For SignalCatcher.
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index e9127a8..86adb73 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -775,9 +775,6 @@
TMP_DIR_OPTION="-Djava.io.tmpdir=/data/local/tmp"
fi
-# We set DumpNativeStackOnSigQuit to false to avoid stressing libunwind.
-# b/27185632
-# b/24664297
dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
$GDB_ARGS \
$FLAGS \
@@ -792,7 +789,6 @@
$DEBUGGER_OPTS \
$DALVIKVM_BOOT_OPT \
$TMP_DIR_OPTION \
- -XX:DumpNativeStackOnSigQuit:false \
-cp $DEX_LOCATION/$TEST_NAME.jar$SECONDARY_DEX $MAIN $ARGS"
# Remove whitespace.