From 1b3ec0fb74395c3f8b7251e0dec4abb74a9b9ac4 Mon Sep 17 00:00:00 2001 From: Hans Boehm Date: Wed, 26 Jan 2022 16:53:07 +0000 Subject: Revert^2 "Reduce pauses for weak reference access" This reverts commit 07cbc5ba4f117ea74faecffe14ffc0ce8aa7ee0e. PS1 is identical to aosp/1933038 . PS2 applies a small fix for the CMS build. Original commit message: Remove the "preserve references" machinery. Instead let the reference processor inform GetReferent about its state, so that it can leverage knowledge about whether reachable memory has in fact been completely marked. Restructure the ReferenceProcessor interface by adding Setup to ensure that ReferenceProcessor fields are properly set up before we disable the normal fast path through GetReferent. For the CC collector, forward essentially all SoftReferences as part of normal marking, so we don't stall weak reference access for those. Note briefly in the log if we encounter References that are only reachable from finalizers. SS and MS collectors are only minimally updated to keep them working. We now block in GetReferent only for the hopefully very brief period of marking objects that were initially missed as a result of a mutator collector race. This should hopefully eliminate multi-millisecond delays here. For 2043-reference-pauses from aosp/1952438, it reduces blocking from over 100 msecs to under 1 on host. This is mostly due to the better SoftReference treatment; 100 msec pauses in GetReferent() were never near-typical. We iteratively mark through SoftReferences now. Previously we could mistakenly clear SoftReferences discovered while marking from the top level ones. (Lokesh pointed this out.) To make this work, we change ForwardSoftReferences to actually remove References from the queue, as the comment always said it did. This also somewhat prepares us for a much less complete solution for pauses to access WeakGlobalRefs or other "system weaks". This fixes a memory ordering issue for the per-thread weak reference access flags used with the CC collector. I think the issue is still there for the CMS collector. That requires further discussion. Bug: 190867430 Bug: 189738006 Bug: 211784084 Test: Build and boot aosp & Treehugger; aosp/195243 Change-Id: I9aa38da6c87555302243bd6c7d460747277ba8e7 --- runtime/gc/reference_queue.cc | 49 ++++++++++++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 14 deletions(-) (limited to 'runtime/gc/reference_queue.cc') diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc index 568ca04c1d..6bdacaf18c 100644 --- a/runtime/gc/reference_queue.cc +++ b/runtime/gc/reference_queue.cc @@ -131,7 +131,8 @@ size_t ReferenceQueue::GetLength() const { } void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references, - collector::GarbageCollector* collector) { + collector::GarbageCollector* collector, + bool report_cleared) { while (!IsEmpty()) { ObjPtr ref = DequeuePendingReference(); mirror::HeapReference* referent_addr = ref->GetReferentReferenceAddr(); @@ -145,6 +146,15 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references, ref->ClearReferent(); } cleared_references->EnqueueReference(ref); + if (report_cleared) { + static bool already_reported = false; + if (!already_reported) { + // TODO: Maybe do this only if the queue is non-null? + LOG(WARNING) + << "Cleared Reference was only reachable from finalizer (only reported once)"; + already_reported = true; + } + } } // Delay disabling the read barrier until here so that the ClearReferent call above in // transaction mode will trigger the read barrier. @@ -182,22 +192,33 @@ FinalizerStats ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleare } uint32_t ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) { - if (UNLIKELY(IsEmpty())) { - return 0; - } uint32_t num_refs(0); - const ObjPtr head = list_; - ObjPtr ref = head; + Thread* self = Thread::Current(); + static constexpr int SR_BUF_SIZE = 32; + ObjPtr buf[SR_BUF_SIZE]; + int n_entries; + bool empty; do { - mirror::HeapReference* referent_addr = ref->GetReferentReferenceAddr(); - if (referent_addr->AsMirrorPtr() != nullptr) { - // do_atomic_update is false because mutators can't access the referent due to the weak ref - // access blocking. - visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ false); - ++num_refs; + { + // Acquire lock only a few times and hold it as briefly as possible. + MutexLock mu(self, *lock_); + empty = IsEmpty(); + for (n_entries = 0; n_entries < SR_BUF_SIZE && !empty; ++n_entries) { + // Dequeuing the Reference here means it could possibly be enqueued again during this GC. + // That's unlikely and benign. + buf[n_entries] = DequeuePendingReference(); + empty = IsEmpty(); + } + } + for (int i = 0; i < n_entries; ++i) { + mirror::HeapReference* referent_addr = buf[i]->GetReferentReferenceAddr(); + if (referent_addr->AsMirrorPtr() != nullptr) { + visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ true); + ++num_refs; + } + DisableReadBarrierForReference(buf[i]->AsReference()); } - ref = ref->GetPendingNext(); - } while (LIKELY(ref != head)); + } while (!empty); return num_refs; } -- cgit v1.2.3-59-g8ed1b