summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hiroshi Yamauchi <yamauchi@google.com> 2016-03-18 17:17:52 -0700
committer Hiroshi Yamauchi <yamauchi@google.com> 2016-03-18 17:33:24 -0700
commit6f0c6cdae6962532c4699df9dd5af6289b86d1c6 (patch)
treea3659d13b6b7f043b18f99536684a96e1e1e2971
parent38ceb62339514a8012695673b9e1110d13546f02 (diff)
Fix a CC 145-alloc-tracking-stress deadlock.
When the allocation tracking gets disabled, there may be threads blocking on the system weak access for recording allocations, but when GC reenables the system weak access, it fails to wake up those blocked threads (which causes a deadlock) because the broadcast call is guarded by Heap::IsAllocTrackingEnabled(), which is false at this point. Broadcast in Heap::BroadcastForNewAllocationRecords() regardless of Heap::IsAllocTrackingEnabled(), which is safe. Also apply a similar fix for the non-CC case. Bug: 27467554 Change-Id: I74cf88bceb306589ce11a19a688be223e099e88a
-rw-r--r--runtime/gc/allocation_record.cc6
-rw-r--r--runtime/gc/heap.cc30
2 files changed, 21 insertions, 15 deletions
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 73190455cf..e3714bbde6 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -288,6 +288,12 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self, mirror::Object* obj, m
records->new_record_condition_.WaitHoldingLocks(self);
}
+ if (!heap->IsAllocTrackingEnabled()) {
+ // Return if the allocation tracking has been disabled while waiting for system weak access
+ // above.
+ return;
+ }
+
DCHECK_LE(records->Size(), records->alloc_record_max_);
// Get stack trace.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f4fccee034..4ff0c6bfbd 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3960,31 +3960,31 @@ void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
void Heap::AllowNewAllocationRecords() const {
CHECK(!kUseReadBarrier);
- if (IsAllocTrackingEnabled()) {
- MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
- if (IsAllocTrackingEnabled()) {
- GetAllocationRecords()->AllowNewAllocationRecords();
- }
+ MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+ AllocRecordObjectMap* allocation_records = GetAllocationRecords();
+ if (allocation_records != nullptr) {
+ allocation_records->AllowNewAllocationRecords();
}
}
void Heap::DisallowNewAllocationRecords() const {
CHECK(!kUseReadBarrier);
- if (IsAllocTrackingEnabled()) {
- MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
- if (IsAllocTrackingEnabled()) {
- GetAllocationRecords()->DisallowNewAllocationRecords();
- }
+ MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+ AllocRecordObjectMap* allocation_records = GetAllocationRecords();
+ if (allocation_records != nullptr) {
+ allocation_records->DisallowNewAllocationRecords();
}
}
void Heap::BroadcastForNewAllocationRecords() const {
CHECK(kUseReadBarrier);
- if (IsAllocTrackingEnabled()) {
- MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
- if (IsAllocTrackingEnabled()) {
- GetAllocationRecords()->BroadcastForNewAllocationRecords();
- }
+ // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
+ // be set to false while some threads are waiting for system weak access in
+ // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
+ MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+ AllocRecordObjectMap* allocation_records = GetAllocationRecords();
+ if (allocation_records != nullptr) {
+ allocation_records->BroadcastForNewAllocationRecords();
}
}