Add concurrent reference processing.

Concurrent reference processing currently works by going into native
code from java.lang.ref.Reference.get(). From there, we have a fast
path if the references aren't being processed which returns the
referent without needing to access any locks. In the slow path we
block until reference processing is complete. It may be possible to
improve the slow path if the referent is blackened.

TODO: Investigate doing the fast path in java code by using racy reads
of a static volatile boolean. This will work as long as there are no
suspend points inbetween the boolean read and referent read.

Bug: 14381653

Change-Id: I1546b55be4691fe4ff4aa6d857b234cce7187d87
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5d38b02..bb656e5 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -39,6 +39,7 @@
 #include "gc/collector/partial_mark_sweep.h"
 #include "gc/collector/semi_space.h"
 #include "gc/collector/sticky_mark_sweep.h"
+#include "gc/reference_processor.h"
 #include "gc/space/bump_pointer_space.h"
 #include "gc/space/dlmalloc_space-inl.h"
 #include "gc/space/image_space.h"
@@ -771,102 +772,6 @@
   return FindDiscontinuousSpaceFromObject(obj, true);
 }
 
-struct SoftReferenceArgs {
-  IsMarkedCallback* is_marked_callback_;
-  MarkObjectCallback* mark_callback_;
-  void* arg_;
-};
-
-mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
-  SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
-  // TODO: Not preserve all soft references.
-  return args->mark_callback_(obj, args->arg_);
-}
-
-void Heap::ProcessSoftReferences(TimingLogger& timings, bool clear_soft,
-                                 IsMarkedCallback* is_marked_callback,
-                                 MarkObjectCallback* mark_object_callback,
-                                 ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
-  // Unless required to clear soft references with white references, preserve some white referents.
-  if (!clear_soft) {
-    // Don't clear for sticky GC.
-    SoftReferenceArgs soft_reference_args;
-    soft_reference_args.is_marked_callback_ = is_marked_callback;
-    soft_reference_args.mark_callback_ = mark_object_callback;
-    soft_reference_args.arg_ = arg;
-    // References with a marked referent are removed from the list.
-    soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
-                                                     &soft_reference_args);
-    process_mark_stack_callback(arg);
-  }
-}
-
-// Process reference class instances and schedule finalizations.
-void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
-                             IsMarkedCallback* is_marked_callback,
-                             MarkObjectCallback* mark_object_callback,
-                             ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
-  timings.StartSplit("(Paused)ProcessReferences");
-  ProcessSoftReferences(timings, clear_soft, is_marked_callback, mark_object_callback,
-                        process_mark_stack_callback, arg);
-  // Clear all remaining soft and weak references with white referents.
-  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
-  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
-  timings.EndSplit();
-  // Preserve all white objects with finalize methods and schedule them for finalization.
-  timings.StartSplit("(Paused)EnqueueFinalizerReferences");
-  finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
-                                                        mark_object_callback, arg);
-  process_mark_stack_callback(arg);
-  timings.EndSplit();
-  timings.StartSplit("(Paused)ProcessReferences");
-  // Clear all f-reachable soft and weak references with white referents.
-  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
-  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
-  // Clear all phantom references with white referents.
-  phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
-  // At this point all reference queues other than the cleared references should be empty.
-  DCHECK(soft_reference_queue_.IsEmpty());
-  DCHECK(weak_reference_queue_.IsEmpty());
-  DCHECK(finalizer_reference_queue_.IsEmpty());
-  DCHECK(phantom_reference_queue_.IsEmpty());
-  timings.EndSplit();
-}
-
-// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
-// marked, put it on the appropriate list in the heap for later processing.
-void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
-                                  IsMarkedCallback is_marked_callback, void* arg) {
-  // klass can be the class of the old object if the visitor already updated the class of ref.
-  DCHECK(klass->IsReferenceClass());
-  mirror::Object* referent = ref->GetReferent();
-  if (referent != nullptr) {
-    mirror::Object* forward_address = is_marked_callback(referent, arg);
-    // Null means that the object is not currently marked.
-    if (forward_address == nullptr) {
-      Thread* self = Thread::Current();
-      // TODO: Remove these locks, and use atomic stacks for storing references?
-      // We need to check that the references haven't already been enqueued since we can end up
-      // scanning the same reference multiple times due to dirty cards.
-      if (klass->IsSoftReferenceClass()) {
-        soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
-      } else if (klass->IsWeakReferenceClass()) {
-        weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
-      } else if (klass->IsFinalizerReferenceClass()) {
-        finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
-      } else if (klass->IsPhantomReferenceClass()) {
-        phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
-      } else {
-        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
-                   << klass->GetAccessFlags();
-      }
-    } else if (referent != forward_address) {
-      // Referent is already marked and we need to update it.
-      ref->SetReferent<false>(forward_address);
-    }
-  }
-}
-
 space::ImageSpace* Heap::GetImageSpace() const {
   for (const auto& space : continuous_spaces_) {
     if (space->IsImageSpace()) {
@@ -1477,7 +1382,7 @@
   ChangeCollector(collector_type);
   tl->ResumeAll();
   // Can't call into java code with all threads suspended.
-  EnqueueClearedReferences();
+  reference_processor_.EnqueueClearedReferences();
   uint64_t duration = NanoTime() - start_time;
   GrowForUtilization(semi_space_collector_);
   FinishGC(self, collector::kGcTypeFull);
@@ -1881,7 +1786,7 @@
   total_bytes_freed_ever_ += collector->GetFreedBytes();
   RequestHeapTrim();
   // Enqueue cleared references.
-  EnqueueClearedReferences();
+  reference_processor_.EnqueueClearedReferences();
   // Grow the heap so that we know when to perform the next GC.
   GrowForUtilization(collector);
   const size_t duration = collector->GetDurationNs();
@@ -1952,9 +1857,9 @@
 // Verify a reference from an object.
 class VerifyReferenceVisitor {
  public:
-  explicit VerifyReferenceVisitor(Heap* heap)
+  explicit VerifyReferenceVisitor(Heap* heap, bool verify_referent)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
-      : heap_(heap), failed_(false) {}
+      : heap_(heap), failed_(false), verify_referent_(verify_referent) {}
 
   bool Failed() const {
     return failed_;
@@ -1962,7 +1867,9 @@
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+    if (verify_referent_) {
+      this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+    }
   }
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
@@ -2079,18 +1986,21 @@
  private:
   Heap* const heap_;
   mutable bool failed_;
+  bool verify_referent_;
 };
 
 // Verify all references within an object, for use with HeapBitmap::Visit.
 class VerifyObjectVisitor {
  public:
-  explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
+  explicit VerifyObjectVisitor(Heap* heap, bool verify_referent)
+      : heap_(heap), failed_(false), verify_referent_(verify_referent) {
+  }
 
   void operator()(mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // Note: we are verifying the references in obj but not obj itself, this is because obj must
     // be live or else how did we find it in the live bitmap?
-    VerifyReferenceVisitor visitor(heap_);
+    VerifyReferenceVisitor visitor(heap_, verify_referent_);
     // The class doesn't count as a reference but we should verify it anyways.
     obj->VisitReferences<true>(visitor, visitor);
     failed_ = failed_ || visitor.Failed();
@@ -2109,10 +2019,11 @@
  private:
   Heap* const heap_;
   mutable bool failed_;
+  const bool verify_referent_;
 };
 
 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
-bool Heap::VerifyHeapReferences() {
+bool Heap::VerifyHeapReferences(bool verify_referents) {
   Thread* self = Thread::Current();
   Locks::mutator_lock_->AssertExclusiveHeld(self);
   // Lets sort our allocation stacks so that we can efficiently binary search them.
@@ -2121,7 +2032,7 @@
   // Since we sorted the allocation stack content, need to revoke all
   // thread-local allocation stacks.
   RevokeAllThreadLocalAllocationStacks(self);
-  VerifyObjectVisitor visitor(this);
+  VerifyObjectVisitor visitor(this, verify_referents);
   // Verify objects in the allocation stack since these will be objects which were:
   // 1. Allocated prior to the GC (pre GC verification).
   // 2. Allocated during the GC (pre sweep GC verification).
@@ -2399,7 +2310,9 @@
     // Swapping bound bitmaps does nothing.
     gc->SwapBitmaps();
     SwapSemiSpaces();
-    if (!VerifyHeapReferences()) {
+    // Pass in false since concurrent reference processing can mean that the reference referents
+    // may point to dead objects at the point which PreSweepingGcVerification is called.
+    if (!VerifyHeapReferences(false)) {
       LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
     }
     SwapSemiSpaces();
@@ -2622,27 +2535,10 @@
   *object = soa.Decode<mirror::Object*>(arg.get());
 }
 
-void Heap::EnqueueClearedReferences() {
-  Thread* self = Thread::Current();
-  Locks::mutator_lock_->AssertNotHeld(self);
-  if (!cleared_references_.IsEmpty()) {
-    // When a runtime isn't started there are no reference queues to care about so ignore.
-    if (LIKELY(Runtime::Current()->IsStarted())) {
-      ScopedObjectAccess soa(self);
-      ScopedLocalRef<jobject> arg(self->GetJniEnv(),
-                                  soa.AddLocalReference<jobject>(cleared_references_.GetList()));
-      jvalue args[1];
-      args[0].l = arg.get();
-      InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
-    }
-    cleared_references_.Clear();
-  }
-}
-
 void Heap::RequestConcurrentGC(Thread* self) {
   // Make sure that we can do a concurrent GC.
   Runtime* runtime = Runtime::Current();
-  if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
+  if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
       self->IsHandlingStackOverflow()) {
     return;
   }