Add native support for FinalizerList.makeCircularListIfUnenqueued

Called from FinalizerReference.enqueueSentinelReference to prevent
a race where the GC updates pendingNext of the sentinel reference
before enqueueSentinelReference.

Bug: 17462553

(cherry picked from commit 3256166df40981f1f1997a5f00303712277c963f)

Change-Id: I7ad2fd250c2715d1aeb919bd548ef9aab24f30a2
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 5065c58..61bc9ff 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -120,6 +120,7 @@
   native/java_lang_Thread.cc \
   native/java_lang_Throwable.cc \
   native/java_lang_VMClassLoader.cc \
+  native/java_lang_ref_FinalizerReference.cc \
   native/java_lang_ref_Reference.cc \
   native/java_lang_reflect_Array.cc \
   native/java_lang_reflect_Constructor.cc \
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 52a3dea..455680b 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -37,19 +37,25 @@
 ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
 Mutex* Locks::deoptimization_lock_ = nullptr;
 ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::intern_table_lock_ = nullptr;
 Mutex* Locks::jni_libraries_lock_ = nullptr;
 Mutex* Locks::logging_lock_ = nullptr;
 Mutex* Locks::mem_maps_lock_ = nullptr;
 Mutex* Locks::modify_ldt_lock_ = nullptr;
 ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
 Mutex* Locks::profiler_lock_ = nullptr;
+Mutex* Locks::reference_processor_lock_ = nullptr;
+Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
 Mutex* Locks::runtime_shutdown_lock_ = nullptr;
 Mutex* Locks::thread_list_lock_ = nullptr;
 Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
 Mutex* Locks::thread_suspend_count_lock_ = nullptr;
 Mutex* Locks::trace_lock_ = nullptr;
 Mutex* Locks::unexpected_signal_lock_ = nullptr;
-Mutex* Locks::intern_table_lock_ = nullptr;
 
 struct AllMutexData {
   // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -933,6 +939,30 @@
     DCHECK(intern_table_lock_ == nullptr);
     intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
+    DCHECK(reference_processor_lock_ == nullptr);
+    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
+    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
+    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
+    DCHECK(reference_queue_weak_references_lock_ == nullptr);
+    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
+    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
+    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
+    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
+    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
+    DCHECK(reference_queue_soft_references_lock_ == nullptr);
+    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
     DCHECK(abort_lock_ == nullptr);
     abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 354298e..20f58de 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -60,11 +60,16 @@
   kThreadSuspendCountLock,
   kAbortLock,
   kJdwpSocketLock,
+  kReferenceQueueSoftReferencesLock,
+  kReferenceQueuePhantomReferencesLock,
+  kReferenceQueueFinalizerReferencesLock,
+  kReferenceQueueWeakReferencesLock,
+  kReferenceQueueClearedReferencesLock,
+  kReferenceProcessorLock,
   kRosAllocGlobalLock,
   kRosAllocBracketLock,
   kRosAllocBulkFreeLock,
   kAllocSpaceLock,
-  kReferenceProcessorLock,
   kDexFileMethodInlinerLock,
   kDexFileToMethodInlinerMapLock,
   kMarkSweepMarkStackLock,
@@ -594,8 +599,26 @@
   // Guards intern table.
   static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
 
+  // Guards reference processor.
+  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
+
+  // Guards cleared references queue.
+  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
+
+  // Guards weak references queue.
+  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
+
+  // Guards finalizer references queue.
+  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
+
+  // Guards phantom references queue.
+  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
+
+  // Guards soft references queue.
+  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
+
   // Have an exclusive aborting thread.
-  static Mutex* abort_lock_ ACQUIRED_AFTER(intern_table_lock_);
+  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
 
   // Allow mutual exclusion when manipulating Thread::suspend_count_.
   // TODO: Does the trade-off of a per-thread lock make sense?
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index d3641d1..75de623 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -30,8 +30,13 @@
 
 ReferenceProcessor::ReferenceProcessor()
     : process_references_args_(nullptr, nullptr, nullptr),
-      preserving_references_(false), lock_("reference processor lock", kReferenceProcessorLock),
-      condition_("reference processor condition", lock_) {
+      preserving_references_(false),
+      condition_("reference processor condition", *Locks::reference_processor_lock_) ,
+      soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
+      weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
+      finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
+      phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
+      cleared_references_(Locks::reference_queue_cleared_references_lock_) {
 }
 
 void ReferenceProcessor::EnableSlowPath() {
@@ -50,7 +55,7 @@
   if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
     return referent;
   }
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::reference_processor_lock_);
   while (SlowPathEnabled()) {
     mirror::HeapReference<mirror::Object>* const referent_addr =
         reference->GetReferentReferenceAddr();
@@ -93,12 +98,12 @@
 }
 
 void ReferenceProcessor::StartPreservingReferences(Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::reference_processor_lock_);
   preserving_references_ = true;
 }
 
 void ReferenceProcessor::StopPreservingReferences(Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::reference_processor_lock_);
   preserving_references_ = false;
   // We are done preserving references, some people who are blocked may see a marked referent.
   condition_.Broadcast(self);
@@ -114,7 +119,7 @@
   TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
   Thread* self = Thread::Current();
   {
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::reference_processor_lock_);
     process_references_args_.is_marked_callback_ = is_marked_callback;
     process_references_args_.mark_callback_ = mark_object_callback;
     process_references_args_.arg_ = arg;
@@ -163,7 +168,7 @@
   DCHECK(finalizer_reference_queue_.IsEmpty());
   DCHECK(phantom_reference_queue_.IsEmpty());
   {
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::reference_processor_lock_);
     // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
     // could result in a stale is_marked_callback_ being called before the reference processing
     // starts since there is a small window of time where slow_path_enabled_ is enabled but the
@@ -225,5 +230,31 @@
   }
 }
 
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, *Locks::reference_processor_lock_);
+  // Wait untul we are done processing reference.
+  while (SlowPathEnabled()) {
+    condition_.Wait(self);
+  }
+  // At this point, since the sentinel of the reference is live, it is guaranteed to not be
+  // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
+  // phase. Since we are holding the reference processor lock, it guarantees that reference
+  // processing can't begin. The GC could have just enqueued the reference one one of the internal
+  // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
+  // race.
+  MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
+  if (!reference->IsEnqueued()) {
+    CHECK(reference->IsFinalizerReferenceInstance());
+    if (Runtime::Current()->IsActiveTransaction()) {
+      reference->SetPendingNext<true>(reference);
+    } else {
+      reference->SetPendingNext<false>(reference);
+    }
+    return true;
+  }
+  return false;
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 7274457..5eb095b 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -28,6 +28,7 @@
 class TimingLogger;
 
 namespace mirror {
+class FinalizerReference;
 class Object;
 class Reference;
 }  // namespace mirror
@@ -48,20 +49,25 @@
                          ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      LOCKS_EXCLUDED(lock_);
+      LOCKS_EXCLUDED(Locks::reference_processor_lock_);
   // The slow path bool is contained in the reference class object, can only be set once
   // Only allow setting this with mutators suspended so that we can avoid using a lock in the
   // GetReferent fast path as an optimization.
   void EnableSlowPath() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Decode the referent, may block if references are being processed.
   mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
   void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_);
   void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
                               IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void UpdateRoots(IsMarkedCallback* callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  // Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
+  bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(Locks::reference_processor_lock_,
+                     Locks::reference_queue_finalizer_references_lock_);
 
  private:
   class ProcessReferencesArgs {
@@ -78,23 +84,21 @@
   };
   bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Called by ProcessReferences.
-  void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(lock_)
+  void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::reference_processor_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // If we are preserving references it means that some dead objects may become live, we use start
   // and stop preserving to block mutators using GetReferrent from getting access to these
   // referents.
-  void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
-  void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
+  void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
+  void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
   // Process args, used by the GetReferent to return referents which are already marked.
-  ProcessReferencesArgs process_references_args_ GUARDED_BY(lock_);
+  ProcessReferencesArgs process_references_args_ GUARDED_BY(Locks::reference_processor_lock_);
   // Boolean for whether or not we are preserving references (either soft references or finalizers).
   // If this is true, then we cannot return a referent (see comment in GetReferent).
-  bool preserving_references_ GUARDED_BY(lock_);
-  // Lock that guards the reference processing.
-  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  bool preserving_references_ GUARDED_BY(Locks::reference_processor_lock_);
   // Condition that people wait on if they attempt to get the referent of a reference while
   // processing is in progress.
-  ConditionVariable condition_ GUARDED_BY(lock_);
+  ConditionVariable condition_ GUARDED_BY(Locks::reference_processor_lock_);
   // Reference queues used by the GC.
   ReferenceQueue soft_reference_queue_;
   ReferenceQueue weak_reference_queue_;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index c3931e8..4003524 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -25,13 +25,12 @@
 namespace art {
 namespace gc {
 
-ReferenceQueue::ReferenceQueue()
-    : lock_("reference queue lock"), list_(nullptr) {
+ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
 }
 
 void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
   DCHECK(ref != NULL);
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *lock_);
   if (!ref->IsEnqueued()) {
     EnqueuePendingReference(ref);
   }
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index cd814bb..dbf4abc 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -44,7 +44,7 @@
 // java.lang.ref.Reference objects.
 class ReferenceQueue {
  public:
-  explicit ReferenceQueue();
+  explicit ReferenceQueue(Mutex* lock);
   // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
   // since it uses a lock to avoid a race between checking for the references presence and adding
   // it.
@@ -90,7 +90,7 @@
  private:
   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
   // calling AtomicEnqueueIfNotEnqueued.
-  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  Mutex* lock_;
   // The actual reference list. Only a root for the mark compact GC since it will be null for other
   // GC types.
   mirror::Reference* list_;
diff --git a/runtime/native/java_lang_ref_FinalizerReference.cc b/runtime/native/java_lang_ref_FinalizerReference.cc
new file mode 100644
index 0000000..ad48ec0
--- /dev/null
+++ b/runtime/native/java_lang_ref_FinalizerReference.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "gc/reference_processor.h"
+#include "jni_internal.h"
+#include "mirror/object-inl.h"
+#include "mirror/reference-inl.h"
+#include "scoped_fast_native_object_access.h"
+
+namespace art {
+
+static jboolean FinalizerReference_makeCircularListIfUnenqueued(JNIEnv* env, jobject javaThis) {
+  ScopedFastNativeObjectAccess soa(env);
+  mirror::FinalizerReference* const ref = soa.Decode<mirror::FinalizerReference*>(javaThis);
+  return Runtime::Current()->GetHeap()->GetReferenceProcessor()->MakeCircularListIfUnenqueued(ref);
+}
+
+static JNINativeMethod gMethods[] = {
+  NATIVE_METHOD(FinalizerReference, makeCircularListIfUnenqueued, "!()Z"),
+};
+
+void register_java_lang_ref_FinalizerReference(JNIEnv* env) {
+  REGISTER_NATIVE_METHODS("java/lang/ref/FinalizerReference");
+}
+
+}  // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index b4a09e5..a1ea3cf 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -957,6 +957,7 @@
   REGISTER(register_java_lang_System);
   REGISTER(register_java_lang_Thread);
   REGISTER(register_java_lang_VMClassLoader);
+  REGISTER(register_java_lang_ref_FinalizerReference);
   REGISTER(register_java_lang_ref_Reference);
   REGISTER(register_java_lang_reflect_Array);
   REGISTER(register_java_lang_reflect_Constructor);