Always use pendingNext to test enqueability of references.

Also clean up a misleading comment in a reference queue test.

Bug: 24404957
Change-Id: Ieea4788039ecef73cba1871fb480a439bf65b499
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 8356814..e172f85 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -93,7 +93,7 @@
       // in the heap causing corruption since this field would get swept.
       if (collector_->IsMarkedHeapReference(referent_addr)) {
         if (!preserving_references_ ||
-           (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
+           (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
           return referent_addr->AsMirrorPtr();
         }
       }
@@ -275,7 +275,7 @@
   // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
   // race.
   MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
-  if (!reference->IsEnqueued()) {
+  if (reference->IsUnprocessed()) {
     CHECK(reference->IsFinalizerReferenceInstance());
     reference->SetPendingNext(reference);
     return true;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 67dcc2d..03ab9a1 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -32,42 +32,37 @@
 void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
   DCHECK(ref != nullptr);
   MutexLock mu(self, *lock_);
-  if (!ref->IsEnqueued()) {
-    EnqueuePendingReference(ref);
+  if (ref->IsUnprocessed()) {
+    EnqueueReference(ref);
   }
 }
 
 void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
-  CHECK(ref->IsEnqueuable());
-  EnqueuePendingReference(ref);
-}
-
-void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
   DCHECK(ref != nullptr);
+  CHECK(ref->IsUnprocessed());
   if (IsEmpty()) {
     // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
     list_ = ref;
   } else {
     mirror::Reference* head = list_->GetPendingNext();
+    DCHECK(head != nullptr);
     ref->SetPendingNext(head);
   }
+  // Add the reference in the middle to preserve the cycle.
   list_->SetPendingNext(ref);
 }
 
 mirror::Reference* ReferenceQueue::DequeuePendingReference() {
   DCHECK(!IsEmpty());
-  mirror::Reference* head = list_->GetPendingNext();
-  DCHECK(head != nullptr);
-  mirror::Reference* ref;
+  mirror::Reference* ref = list_->GetPendingNext();
+  DCHECK(ref != nullptr);
   // Note: the following code is thread-safe because it is only called from ProcessReferences which
   // is single threaded.
-  if (list_ == head) {
-    ref = list_;
+  if (list_ == ref) {
     list_ = nullptr;
   } else {
-    mirror::Reference* next = head->GetPendingNext();
+    mirror::Reference* next = ref->GetPendingNext();
     list_->SetPendingNext(next);
-    ref = head;
   }
   ref->SetPendingNext(nullptr);
   Heap* heap = Runtime::Current()->GetHeap();
@@ -152,9 +147,7 @@
       } else {
         ref->ClearReferent<false>();
       }
-      if (ref->IsEnqueuable()) {
-        cleared_references->EnqueuePendingReference(ref);
-      }
+      cleared_references->EnqueueReference(ref);
     }
   }
 }
@@ -167,8 +160,6 @@
     if (referent_addr->AsMirrorPtr() != nullptr &&
         !collector->IsMarkedHeapReference(referent_addr)) {
       mirror::Object* forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
-      // If the referent is non-null the reference must queuable.
-      DCHECK(ref->IsEnqueuable());
       // Move the updated referent to the zombie field.
       if (Runtime::Current()->IsActiveTransaction()) {
         ref->SetZombie<true>(forward_address);
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index aabac97..04d3454 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -44,27 +44,24 @@
 class Heap;
 
 // Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the
-// appropriate java.lang.ref.ReferenceQueue. The linked list is maintained in the
-// java.lang.ref.Reference objects.
+// appropriate java.lang.ref.ReferenceQueue. The linked list is maintained as an unordered,
+// circular, and singly-linked list using the pendingNext fields of the java.lang.ref.Reference
+// objects.
 class ReferenceQueue {
  public:
   explicit ReferenceQueue(Mutex* lock);
 
-  // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
-  // since it uses a lock to avoid a race between checking for the references presence and adding
-  // it.
+  // Enqueue a reference if it is unprocessed. Thread safe to call from multiple
+  // threads since it uses a lock to avoid a race between checking for the references presence and
+  // adding it.
   void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*lock_);
 
-  // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
-  // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
-  // overhead.
+  // Enqueue a reference. The reference must be unprocessed.
+  // Not thread safe, used when mutators are paused to minimize lock overhead.
   void EnqueueReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Enqueue a reference without checking that it is enqueable.
-  void EnqueuePendingReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Dequeue the first reference (returns list_).
+  // Dequeue a reference from the queue and return that dequeued reference.
   mirror::Reference* DequeuePendingReference() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Enqueues finalizer references with white referents.  White referents are blackened, moved to
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index dc23afe..35bf718 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -41,19 +41,22 @@
   ASSERT_TRUE(ref1.Get() != nullptr);
   auto ref2(hs.NewHandle(ref_class->AllocObject(self)->AsReference()));
   ASSERT_TRUE(ref2.Get() != nullptr);
-  // FIFO ordering.
-  queue.EnqueuePendingReference(ref1.Get());
+  queue.EnqueueReference(ref1.Get());
   ASSERT_TRUE(!queue.IsEmpty());
   ASSERT_EQ(queue.GetLength(), 1U);
-  queue.EnqueuePendingReference(ref2.Get());
+  queue.EnqueueReference(ref2.Get());
   ASSERT_TRUE(!queue.IsEmpty());
   ASSERT_EQ(queue.GetLength(), 2U);
-  ASSERT_EQ(queue.DequeuePendingReference(), ref2.Get());
+
+  std::set<mirror::Reference*> refs = {ref1.Get(), ref2.Get()};
+  std::set<mirror::Reference*> dequeued;
+  dequeued.insert(queue.DequeuePendingReference());
   ASSERT_TRUE(!queue.IsEmpty());
   ASSERT_EQ(queue.GetLength(), 1U);
-  ASSERT_EQ(queue.DequeuePendingReference(), ref1.Get());
+  dequeued.insert(queue.DequeuePendingReference());
   ASSERT_EQ(queue.GetLength(), 0U);
   ASSERT_TRUE(queue.IsEmpty());
+  ASSERT_EQ(refs, dequeued);
 }
 
 TEST_F(ReferenceQueueTest, Dump) {
@@ -75,9 +78,9 @@
   ASSERT_TRUE(ref1.Get() != nullptr);
   auto ref2(hs.NewHandle(finalizer_ref_class->AllocObject(self)->AsReference()));
   ASSERT_TRUE(ref2.Get() != nullptr);
-  queue.EnqueuePendingReference(ref1.Get());
+  queue.EnqueueReference(ref1.Get());
   queue.Dump(LOG(INFO));
-  queue.EnqueuePendingReference(ref2.Get());
+  queue.EnqueueReference(ref2.Get());
   queue.Dump(LOG(INFO));
 }