diff options
author | 2012-10-10 18:26:27 -0700 | |
---|---|---|
committer | 2012-10-10 18:26:27 -0700 | |
commit | f0bbeabf0577627e9fb29f204a036e3cd51e8bae (patch) | |
tree | 7b7789bcd8ed03deb25a7c68aa6a0ccf82201324 | |
parent | 7204db592a999318067b9c509e14aa8b30293e58 (diff) |
Improve heap lock annotations.
Fix a deadlock in non-concurrent mark sweep caught by this.
Broaden heap_bitmap_lock_ over bitmap swapping.
Change-Id: I5e749f25d181217d530e2f573dc8aee2685108ad
-rw-r--r-- | src/gc/heap_bitmap.h | 3 | ||||
-rw-r--r-- | src/heap.cc | 66 | ||||
-rw-r--r-- | src/heap.h | 19 |
3 files changed, 40 insertions, 48 deletions
diff --git a/src/gc/heap_bitmap.h b/src/gc/heap_bitmap.h index 23dcd47d53..1610df8fe2 100644 --- a/src/gc/heap_bitmap.h +++ b/src/gc/heap_bitmap.h @@ -25,8 +25,7 @@ namespace art { class HeapBitmap { public: - bool Test(const Object* obj) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + bool Test(const Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { SpaceBitmap* bitmap = GetSpaceBitmap(obj); if (LIKELY(bitmap != NULL)) { return bitmap->Test(obj); diff --git a/src/heap.cc b/src/heap.cc index df0641d94a..3989a247e4 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -491,12 +491,13 @@ void Heap::VerifyObjectBody(const Object* obj) { LOG(FATAL) << "Object isn't aligned: " << obj; } + // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the + // heap_bitmap_lock_. if (!GetLiveBitmap()->Test(obj)) { // Check the allocation stack / live stack. if (!std::binary_search(live_stack_->Begin(), live_stack_->End(), obj) && std::find(allocation_stack_->Begin(), allocation_stack_->End(), obj) == allocation_stack_->End()) { - ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); if (large_object_space_->GetLiveObjects()->Test(obj)) { DumpSpaces(); LOG(FATAL) << "Object is dead: " << obj; @@ -1013,9 +1014,9 @@ void Heap::CollectGarbageMarkSweepPlan(Thread* self, GcType gc_type, GcCause gc_ const bool swap = true; if (swap) { if (gc_type == kGcTypeSticky) { - SwapLargeObjects(self); + SwapLargeObjects(); } else { - SwapBitmaps(self, gc_type); + SwapBitmaps(gc_type); } } @@ -1358,30 +1359,25 @@ bool Heap::VerifyMissingCardMarks() { return true; } -void Heap::SwapBitmaps(Thread* self, GcType gc_type) { +void Heap::SwapBitmaps(GcType gc_type) { // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live // bits of dead objects in the live bitmap. - { - WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); - for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) { - ContinuousSpace* space = *it; - // We never allocate into zygote spaces. - if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect || - (gc_type == kGcTypeFull && - space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)) { - live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap()); - mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap()); - space->AsAllocSpace()->SwapBitmaps(); - } + for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) { + ContinuousSpace* space = *it; + // We never allocate into zygote spaces. + if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect || + (gc_type == kGcTypeFull && + space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)) { + live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap()); + mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap()); + space->AsAllocSpace()->SwapBitmaps(); } } - - SwapLargeObjects(self); + SwapLargeObjects(); } -void Heap::SwapLargeObjects(Thread* self) { - WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); +void Heap::SwapLargeObjects() { large_object_space_->SwapBitmaps(); live_bitmap_->SetLargeObjects(large_object_space_->GetLiveObjects()); mark_bitmap_->SetLargeObjects(large_object_space_->GetMarkObjects()); @@ -1612,14 +1608,12 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_type, G } if (verify_post_gc_heap_) { - SwapBitmaps(self, gc_type); - { - WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); - if (!VerifyHeapReferences()) { - LOG(FATAL) << "Post " << gc_type_str.str() << "Gc verification failed"; - } + WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); + SwapBitmaps(gc_type); + if (!VerifyHeapReferences()) { + LOG(FATAL) << "Post " << gc_type_str.str() << "Gc verification failed"; } - SwapBitmaps(self, gc_type); + SwapBitmaps(gc_type); timings.AddSplit("VerifyHeapReferencesPostGC"); } @@ -1647,16 +1641,16 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_type, G WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); // Unbind the live and mark bitmaps. mark_sweep.UnBindBitmaps(); - } - // Swap the live and mark bitmaps for each space which we modified space. This is an - // optimization that enables us to not clear live bits inside of the sweep. - const bool swap = true; - if (swap) { - if (gc_type == kGcTypeSticky) { - SwapLargeObjects(self); - } else { - SwapBitmaps(self, gc_type); + // Swap the live and mark bitmaps for each space which we modified space. This is an + // optimization that enables us to not clear live bits inside of the sweep. + const bool swap = true; + if (swap) { + if (gc_type == kGcTypeSticky) { + SwapLargeObjects(); + } else { + SwapBitmaps(gc_type); + } } } diff --git a/src/heap.h b/src/heap.h index 14782904b3..209d631cf8 100644 --- a/src/heap.h +++ b/src/heap.h @@ -108,7 +108,7 @@ class Heap { #endif // Check sanity of all live references. Requires the heap lock. - void VerifyHeap(); + void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); static void RootMatchesObjectVisitor(const Object* root, void* arg); bool VerifyHeapReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) @@ -133,7 +133,7 @@ class Heap { // Does a concurrent GC, should only be called by the GC daemon thread // through runtime. - void ConcurrentGC(Thread* self); + void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); // Implements java.lang.Runtime.maxMemory. int64_t GetMaxMemory(); @@ -170,7 +170,7 @@ class Heap { // Blocks the caller until the garbage collector becomes idle and returns // true if we waited for the GC to complete. - GcType WaitForConcurrentGcToComplete(Thread* self); + GcType WaitForConcurrentGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_); const Spaces& GetSpaces() { return spaces_; @@ -268,7 +268,7 @@ class Heap { return mark_bitmap_.get(); } - void PreZygoteFork(); + void PreZygoteFork() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); // Mark and empty stack. void FlushAllocStack() @@ -313,12 +313,12 @@ class Heap { // Pushes a list of cleared references out to the managed heap. void EnqueueClearedReferences(Object** cleared_references); - void RequestHeapTrim(); - void RequestConcurrentGC(Thread* self); + void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); + void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); // Swap bitmaps (if we are a full Gc then we swap the zygote bitmap too). - void SwapBitmaps(Thread* self, GcType gc_type); - void SwapLargeObjects(Thread* self); + void SwapBitmaps(GcType gc_type) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void SwapLargeObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); void RecordAllocation(size_t size, Object* object) LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_) @@ -351,8 +351,7 @@ class Heap { // No thread saftey analysis since we call this everywhere and it is impossible to find a proper // lock ordering for it. - void VerifyObjectBody(const Object *obj) - NO_THREAD_SAFETY_ANALYSIS; + void VerifyObjectBody(const Object *obj) NO_THREAD_SAFETY_ANALYSIS; static void VerificationCallback(Object* obj, void* arg) SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_); |