summaryrefslogtreecommitdiff
path: root/runtime/gc/reference_queue.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc/reference_queue.cc')
-rw-r--r--runtime/gc/reference_queue.cc49
1 files changed, 35 insertions, 14 deletions
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 568ca04c1d..6bdacaf18c 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -131,7 +131,8 @@ size_t ReferenceQueue::GetLength() const {
}
void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
- collector::GarbageCollector* collector) {
+ collector::GarbageCollector* collector,
+ bool report_cleared) {
while (!IsEmpty()) {
ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
@@ -145,6 +146,15 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
ref->ClearReferent<false>();
}
cleared_references->EnqueueReference(ref);
+ if (report_cleared) {
+ static bool already_reported = false;
+ if (!already_reported) {
+ // TODO: Maybe do this only if the queue is non-null?
+ LOG(WARNING)
+ << "Cleared Reference was only reachable from finalizer (only reported once)";
+ already_reported = true;
+ }
+ }
}
// Delay disabling the read barrier until here so that the ClearReferent call above in
// transaction mode will trigger the read barrier.
@@ -182,22 +192,33 @@ FinalizerStats ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleare
}
uint32_t ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
- if (UNLIKELY(IsEmpty())) {
- return 0;
- }
uint32_t num_refs(0);
- const ObjPtr<mirror::Reference> head = list_;
- ObjPtr<mirror::Reference> ref = head;
+ Thread* self = Thread::Current();
+ static constexpr int SR_BUF_SIZE = 32;
+ ObjPtr<mirror::Reference> buf[SR_BUF_SIZE];
+ int n_entries;
+ bool empty;
do {
- mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr) {
- // do_atomic_update is false because mutators can't access the referent due to the weak ref
- // access blocking.
- visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ false);
- ++num_refs;
+ {
+ // Acquire lock only a few times and hold it as briefly as possible.
+ MutexLock mu(self, *lock_);
+ empty = IsEmpty();
+ for (n_entries = 0; n_entries < SR_BUF_SIZE && !empty; ++n_entries) {
+ // Dequeuing the Reference here means it could possibly be enqueued again during this GC.
+ // That's unlikely and benign.
+ buf[n_entries] = DequeuePendingReference();
+ empty = IsEmpty();
+ }
+ }
+ for (int i = 0; i < n_entries; ++i) {
+ mirror::HeapReference<mirror::Object>* referent_addr = buf[i]->GetReferentReferenceAddr();
+ if (referent_addr->AsMirrorPtr() != nullptr) {
+ visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ true);
+ ++num_refs;
+ }
+ DisableReadBarrierForReference(buf[i]->AsReference());
}
- ref = ref->GetPendingNext();
- } while (LIKELY(ref != head));
+ } while (!empty);
return num_refs;
}