Fix soft reference clearing issue.

There was a bug where we would check that the pending next field was
non null before enqueueing up cleared references. This was causing
references to not get queued up during ProcessReferences.

Bug: 10626133

Change-Id: Ic1e00e42045092280b4abb3d41f1c58f7adbc3de
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 953fbf9..92bfc4e 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1342,18 +1342,28 @@
     }
     Thread* self = Thread::Current();
     // TODO: Remove these locks, and use atomic stacks for storing references?
+    // We need to check that the references haven't already been enqueued since we can end up
+    // scanning the same reference multiple times due to dirty cards.
     if (klass->IsSoftReferenceClass()) {
       MutexLock mu(self, *heap_->GetSoftRefQueueLock());
-      heap_->EnqueuePendingReference(obj, &soft_reference_list_);
+      if (!heap_->IsEnqueued(obj)) {
+        heap_->EnqueuePendingReference(obj, &soft_reference_list_);
+      }
     } else if (klass->IsWeakReferenceClass()) {
       MutexLock mu(self, *heap_->GetWeakRefQueueLock());
-      heap_->EnqueuePendingReference(obj, &weak_reference_list_);
+      if (!heap_->IsEnqueued(obj)) {
+        heap_->EnqueuePendingReference(obj, &weak_reference_list_);
+      }
     } else if (klass->IsFinalizerReferenceClass()) {
       MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
-      heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
+      if (!heap_->IsEnqueued(obj)) {
+        heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
+      }
     } else if (klass->IsPhantomReferenceClass()) {
       MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
-      heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
+      if (!heap_->IsEnqueued(obj)) {
+        heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
+      }
     } else {
       LOG(FATAL) << "Invalid reference type " << PrettyClass(klass)
                  << " " << std::hex << klass->GetAccessFlags();
@@ -1499,7 +1509,6 @@
   return heap_->GetMarkBitmap()->Test(object);
 }
 
-
 // Unlink the reference list clearing references objects with white
 // referents.  Cleared references registered to a reference queue are
 // scheduled for appending by the heap worker thread.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index e0048a0..63f0405 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1858,21 +1858,24 @@
   EnqueuePendingReference(ref, cleared_reference_list);
 }
 
+bool Heap::IsEnqueued(mirror::Object* ref) {
+  // Since the references are stored as cyclic lists it means that once enqueued, the pending next
+  // will always be non-null.
+  return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
+}
+
 void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) {
   DCHECK(ref != NULL);
   DCHECK(list != NULL);
-  mirror::Object* pending =
-      ref->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
-  if (pending == NULL) {
-    if (*list == NULL) {
-      ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
-      *list = ref;
-    } else {
-      mirror::Object* head =
-          (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
-      ref->SetFieldObject(reference_pendingNext_offset_, head, false);
-      (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
-    }
+  if (*list == NULL) {
+    // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
+    ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
+    *list = ref;
+  } else {
+    mirror::Object* head =
+        (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
+    ref->SetFieldObject(reference_pendingNext_offset_, head, false);
+    (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
   }
 }
 
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index c93dacb..6782a51 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -228,10 +228,13 @@
 
   // Returns true if the reference object has not yet been enqueued.
   bool IsEnqueuable(const mirror::Object* ref);
-  void EnqueueReference(mirror::Object* ref, mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void EnqueueReference(mirror::Object* ref, mirror::Object** list)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsEnqueued(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void EnqueuePendingReference(mirror::Object* ref, mirror::Object** list)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Object* DequeuePendingReference(mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* DequeuePendingReference(mirror::Object** list)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   MemberOffset GetReferencePendingNextOffset() {
     DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);