Assert mutator doesn't get mark-stack assigned once destroyed
Write a non-null value to thread-local mark-stack pointer when
a mutator revokes the previously assigned thread-local mark-stack
so that we can catch the mutator if it ever invokes the read-barrier
or executes the flip function, both of which may assign mark-stack
to the mutator.
Test: art/test/testrunner/testrunner.py
Bug:140119552
Change-Id: I82f43c8a3aab6dacb0f6bd35471fc2cdd969b154
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 2de7910..544258e 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -199,6 +199,9 @@
inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
mirror::Object* ret;
Thread* const self = Thread::Current();
+ // TODO (lokeshgidra): Remove the check once b/140119552 is fixed.
+ CHECK(self->GetThreadLocalMarkStack()
+ != reinterpret_cast<accounting::AtomicStack<mirror::Object>*>(0x1));
// We can get here before marking starts since we gray immune objects before the marking phase.
if (from_ref == nullptr || !self->GetIsGcMarking()) {
return from_ref;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index afae3ef..d34bbdb 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -440,6 +440,9 @@
void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
+ // TODO (lokeshgidra): Remove once b/140119552 is fixed.
+ CHECK(self->GetThreadLocalMarkStack()
+ != reinterpret_cast<accounting::AtomicStack<mirror::Object>*>(0x1));
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
<< thread->GetState() << " thread " << thread << " self " << self;
thread->SetIsGcMarkingAndUpdateEntrypoints(true);
@@ -996,7 +999,9 @@
<< thread->GetState() << " thread " << thread << " self " << self;
// Revoke thread local mark stacks.
accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
- if (tl_mark_stack != nullptr) {
+ // TODO (lokeshgidra): remove the 0x1 condition once b/140119552 is fixed.
+ if (tl_mark_stack != nullptr
+ && tl_mark_stack != reinterpret_cast<accounting::AtomicStack<mirror::Object>*>(0x1)) {
MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
thread->SetThreadLocalMarkStack(nullptr);
@@ -2059,6 +2064,9 @@
RemoveThreadMarkStackMapping(thread, tl_mark_stack);
thread->SetThreadLocalMarkStack(nullptr);
}
+ // TODO (lokeshgidra): storing a non-null value to diagnose b/140119552.
+ // The CL to be reverted once the issue is fixed.
+ thread->SetThreadLocalMarkStack(reinterpret_cast<accounting::AtomicStack<mirror::Object>*>(0x1));
}
void ConcurrentCopying::ProcessMarkStack() {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 59a38e1..6f0776b 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2466,7 +2466,9 @@
Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()
->AssertNoThreadMarkStackMapping(this);
gc::accounting::AtomicStack<mirror::Object>* tl_mark_stack = GetThreadLocalMarkStack();
- CHECK(tl_mark_stack == nullptr) << "mark-stack: " << tl_mark_stack;
+ CHECK(tl_mark_stack == nullptr
+ || tl_mark_stack == reinterpret_cast<gc::accounting::AtomicStack<mirror::Object>*>(0x1))
+ << "mark-stack: " << tl_mark_stack;
}
// Make sure we processed all deoptimization requests.
CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";