summaryrefslogtreecommitdiff
path: root/runtime/gc/reference_queue.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc/reference_queue.cc')
-rw-r--r--runtime/gc/reference_queue.cc49
1 files changed, 14 insertions, 35 deletions
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 6bdacaf18c..568ca04c1d 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -131,8 +131,7 @@ size_t ReferenceQueue::GetLength() const {
}
void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
- collector::GarbageCollector* collector,
- bool report_cleared) {
+ collector::GarbageCollector* collector) {
while (!IsEmpty()) {
ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
@@ -146,15 +145,6 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
ref->ClearReferent<false>();
}
cleared_references->EnqueueReference(ref);
- if (report_cleared) {
- static bool already_reported = false;
- if (!already_reported) {
- // TODO: Maybe do this only if the queue is non-null?
- LOG(WARNING)
- << "Cleared Reference was only reachable from finalizer (only reported once)";
- already_reported = true;
- }
- }
}
// Delay disabling the read barrier until here so that the ClearReferent call above in
// transaction mode will trigger the read barrier.
@@ -192,33 +182,22 @@ FinalizerStats ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleare
}
uint32_t ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
+ if (UNLIKELY(IsEmpty())) {
+ return 0;
+ }
uint32_t num_refs(0);
- Thread* self = Thread::Current();
- static constexpr int SR_BUF_SIZE = 32;
- ObjPtr<mirror::Reference> buf[SR_BUF_SIZE];
- int n_entries;
- bool empty;
+ const ObjPtr<mirror::Reference> head = list_;
+ ObjPtr<mirror::Reference> ref = head;
do {
- {
- // Acquire lock only a few times and hold it as briefly as possible.
- MutexLock mu(self, *lock_);
- empty = IsEmpty();
- for (n_entries = 0; n_entries < SR_BUF_SIZE && !empty; ++n_entries) {
- // Dequeuing the Reference here means it could possibly be enqueued again during this GC.
- // That's unlikely and benign.
- buf[n_entries] = DequeuePendingReference();
- empty = IsEmpty();
- }
- }
- for (int i = 0; i < n_entries; ++i) {
- mirror::HeapReference<mirror::Object>* referent_addr = buf[i]->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr) {
- visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ true);
- ++num_refs;
- }
- DisableReadBarrierForReference(buf[i]->AsReference());
+ mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
+ if (referent_addr->AsMirrorPtr() != nullptr) {
+ // do_atomic_update is false because mutators can't access the referent due to the weak ref
+ // access blocking.
+ visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ false);
+ ++num_refs;
}
- } while (!empty);
+ ref = ref->GetPendingNext();
+ } while (LIKELY(ref != head));
return num_refs;
}