Always use pendingNext to test enqueability of references.
Also clean up a misleading comment in a reference queue test.
Bug: 24404957
Change-Id: Ieea4788039ecef73cba1871fb480a439bf65b499
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 8356814..e172f85 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -93,7 +93,7 @@
// in the heap causing corruption since this field would get swept.
if (collector_->IsMarkedHeapReference(referent_addr)) {
if (!preserving_references_ ||
- (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
+ (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
return referent_addr->AsMirrorPtr();
}
}
@@ -275,7 +275,7 @@
// GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
// race.
MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
- if (!reference->IsEnqueued()) {
+ if (reference->IsUnprocessed()) {
CHECK(reference->IsFinalizerReferenceInstance());
reference->SetPendingNext(reference);
return true;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 67dcc2d..03ab9a1 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -32,42 +32,37 @@
void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
DCHECK(ref != nullptr);
MutexLock mu(self, *lock_);
- if (!ref->IsEnqueued()) {
- EnqueuePendingReference(ref);
+ if (ref->IsUnprocessed()) {
+ EnqueueReference(ref);
}
}
void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
- CHECK(ref->IsEnqueuable());
- EnqueuePendingReference(ref);
-}
-
-void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
DCHECK(ref != nullptr);
+ CHECK(ref->IsUnprocessed());
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
list_ = ref;
} else {
mirror::Reference* head = list_->GetPendingNext();
+ DCHECK(head != nullptr);
ref->SetPendingNext(head);
}
+ // Add the reference in the middle to preserve the cycle.
list_->SetPendingNext(ref);
}
mirror::Reference* ReferenceQueue::DequeuePendingReference() {
DCHECK(!IsEmpty());
- mirror::Reference* head = list_->GetPendingNext();
- DCHECK(head != nullptr);
- mirror::Reference* ref;
+ mirror::Reference* ref = list_->GetPendingNext();
+ DCHECK(ref != nullptr);
// Note: the following code is thread-safe because it is only called from ProcessReferences which
// is single threaded.
- if (list_ == head) {
- ref = list_;
+ if (list_ == ref) {
list_ = nullptr;
} else {
- mirror::Reference* next = head->GetPendingNext();
+ mirror::Reference* next = ref->GetPendingNext();
list_->SetPendingNext(next);
- ref = head;
}
ref->SetPendingNext(nullptr);
Heap* heap = Runtime::Current()->GetHeap();
@@ -152,9 +147,7 @@
} else {
ref->ClearReferent<false>();
}
- if (ref->IsEnqueuable()) {
- cleared_references->EnqueuePendingReference(ref);
- }
+ cleared_references->EnqueueReference(ref);
}
}
}
@@ -167,8 +160,6 @@
if (referent_addr->AsMirrorPtr() != nullptr &&
!collector->IsMarkedHeapReference(referent_addr)) {
mirror::Object* forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
- // If the referent is non-null the reference must queuable.
- DCHECK(ref->IsEnqueuable());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
ref->SetZombie<true>(forward_address);
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index aabac97..04d3454 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -44,27 +44,24 @@
class Heap;
// Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the
-// appropriate java.lang.ref.ReferenceQueue. The linked list is maintained in the
-// java.lang.ref.Reference objects.
+// appropriate java.lang.ref.ReferenceQueue. The linked list is maintained as an unordered,
+// circular, and singly-linked list using the pendingNext fields of the java.lang.ref.Reference
+// objects.
class ReferenceQueue {
public:
explicit ReferenceQueue(Mutex* lock);
- // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
- // since it uses a lock to avoid a race between checking for the references presence and adding
- // it.
+ // Enqueue a reference if it is unprocessed. Thread safe to call from multiple
+ // threads since it uses a lock to avoid a race between checking for the references presence and
+ // adding it.
void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*lock_);
- // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
- // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
- // overhead.
+ // Enqueue a reference. The reference must be unprocessed.
+ // Not thread safe, used when mutators are paused to minimize lock overhead.
void EnqueueReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
- // Enqueue a reference without checking that it is enqueable.
- void EnqueuePendingReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Dequeue the first reference (returns list_).
+ // Dequeue a reference from the queue and return that dequeued reference.
mirror::Reference* DequeuePendingReference() SHARED_REQUIRES(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index dc23afe..35bf718 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -41,19 +41,22 @@
ASSERT_TRUE(ref1.Get() != nullptr);
auto ref2(hs.NewHandle(ref_class->AllocObject(self)->AsReference()));
ASSERT_TRUE(ref2.Get() != nullptr);
- // FIFO ordering.
- queue.EnqueuePendingReference(ref1.Get());
+ queue.EnqueueReference(ref1.Get());
ASSERT_TRUE(!queue.IsEmpty());
ASSERT_EQ(queue.GetLength(), 1U);
- queue.EnqueuePendingReference(ref2.Get());
+ queue.EnqueueReference(ref2.Get());
ASSERT_TRUE(!queue.IsEmpty());
ASSERT_EQ(queue.GetLength(), 2U);
- ASSERT_EQ(queue.DequeuePendingReference(), ref2.Get());
+
+ std::set<mirror::Reference*> refs = {ref1.Get(), ref2.Get()};
+ std::set<mirror::Reference*> dequeued;
+ dequeued.insert(queue.DequeuePendingReference());
ASSERT_TRUE(!queue.IsEmpty());
ASSERT_EQ(queue.GetLength(), 1U);
- ASSERT_EQ(queue.DequeuePendingReference(), ref1.Get());
+ dequeued.insert(queue.DequeuePendingReference());
ASSERT_EQ(queue.GetLength(), 0U);
ASSERT_TRUE(queue.IsEmpty());
+ ASSERT_EQ(refs, dequeued);
}
TEST_F(ReferenceQueueTest, Dump) {
@@ -75,9 +78,9 @@
ASSERT_TRUE(ref1.Get() != nullptr);
auto ref2(hs.NewHandle(finalizer_ref_class->AllocObject(self)->AsReference()));
ASSERT_TRUE(ref2.Get() != nullptr);
- queue.EnqueuePendingReference(ref1.Get());
+ queue.EnqueueReference(ref1.Get());
queue.Dump(LOG(INFO));
- queue.EnqueuePendingReference(ref2.Get());
+ queue.EnqueueReference(ref2.Get());
queue.Dump(LOG(INFO));
}
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index bd4a9c1..12bfe38 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -27,14 +27,6 @@
return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size);
}
-inline bool Reference::IsEnqueuable() {
- // Not using volatile reads as an optimization since this is only called with all the mutators
- // suspended.
- const Object* queue = GetFieldObject<mirror::Object>(QueueOffset());
- const Object* queue_next = GetFieldObject<mirror::Object>(QueueNextOffset());
- return queue != nullptr && queue_next == nullptr;
-}
-
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 5e467ab..3baa12e 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -75,9 +75,7 @@
void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
}
- // Volatile read/write is not necessary since the java pending next is only accessed from
- // the java threads for cleared references. Once these cleared references have a null referent,
- // we never end up reading their pending next from the GC again.
+
Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<Reference>(PendingNextOffset());
}
@@ -91,14 +89,22 @@
}
}
- bool IsEnqueued() SHARED_REQUIRES(Locks::mutator_lock_) {
- // Since the references are stored as cyclic lists it means that once enqueued, the pending
- // next is always non-null.
- return GetPendingNext() != nullptr;
+ // Returns true if the reference's pendingNext is null, indicating it is
+ // okay to process this reference.
+ //
+ // If pendingNext is not null, then one of the following cases holds:
+ // 1. The reference has already been enqueued to a java ReferenceQueue. In
+ // this case the referent should not be considered for reference processing
+ // ever again.
+ // 2. The reference is currently part of a list of references that may
+ // shortly be enqueued on a java ReferenceQueue. In this case the reference
+ // should not be processed again until and unless the reference has been
+ // removed from the list after having determined the reference is not ready
+ // to be enqueued on a java ReferenceQueue.
+ bool IsUnprocessed() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return GetPendingNext() == nullptr;
}
- bool IsEnqueuable() SHARED_REQUIRES(Locks::mutator_lock_);
-
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!java_lang_ref_Reference_.IsNull());
@@ -115,9 +121,9 @@
}
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
- HeapReference<Reference> pending_next_; // Note this is Java volatile:
- HeapReference<Object> queue_; // Note this is Java volatile:
- HeapReference<Reference> queue_next_; // Note this is Java volatile:
+ HeapReference<Reference> pending_next_;
+ HeapReference<Object> queue_;
+ HeapReference<Reference> queue_next_;
HeapReference<Object> referent_; // Note this is Java volatile:
static GcRoot<Class> java_lang_ref_Reference_;