Do backtrace collection while holding lock
Prevent cases where enough threads are doing backtraces at the same
time so that the system runs out of virtual address space, resulting
in std::bad_alloc.
Re-enabled test 708.
Test: test/testrunner/run_build_test_target.py art-jit-on-first-use-gcstress
Bug: 120112467
Change-Id: I291b9028880998aa33a53720dc2b3258ea7f3000
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8eebd12..4b63138 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -4006,10 +4006,10 @@
bool new_backtrace = false;
{
static constexpr size_t kMaxFrames = 16u;
+ MutexLock mu(self, *backtrace_lock_);
FixedSizeBacktrace<kMaxFrames> backtrace;
backtrace.Collect(/* skip_count= */ 2);
uint64_t hash = backtrace.Hash();
- MutexLock mu(self, *backtrace_lock_);
new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
if (new_backtrace) {
seen_backtraces_.insert(hash);