Add native support for FinalizerList.makeCircularListIfUnenqueued
Called from FinalizerReference.enqueueSentinelReference to prevent
a race where the GC updates pendingNext of the sentinel reference
before enqueueSentinelReference.
Bug: 17462553
(cherry picked from commit 3256166df40981f1f1997a5f00303712277c963f)
Change-Id: I7ad2fd250c2715d1aeb919bd548ef9aab24f30a2
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index d3641d1..75de623 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -30,8 +30,13 @@
ReferenceProcessor::ReferenceProcessor()
: process_references_args_(nullptr, nullptr, nullptr),
- preserving_references_(false), lock_("reference processor lock", kReferenceProcessorLock),
- condition_("reference processor condition", lock_) {
+ preserving_references_(false),
+ condition_("reference processor condition", *Locks::reference_processor_lock_) ,
+ soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
+ weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
+ finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
+ phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
+ cleared_references_(Locks::reference_queue_cleared_references_lock_) {
}
void ReferenceProcessor::EnableSlowPath() {
@@ -50,7 +55,7 @@
if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
return referent;
}
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::reference_processor_lock_);
while (SlowPathEnabled()) {
mirror::HeapReference<mirror::Object>* const referent_addr =
reference->GetReferentReferenceAddr();
@@ -93,12 +98,12 @@
}
void ReferenceProcessor::StartPreservingReferences(Thread* self) {
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::reference_processor_lock_);
preserving_references_ = true;
}
void ReferenceProcessor::StopPreservingReferences(Thread* self) {
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::reference_processor_lock_);
preserving_references_ = false;
// We are done preserving references, some people who are blocked may see a marked referent.
condition_.Broadcast(self);
@@ -114,7 +119,7 @@
TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
Thread* self = Thread::Current();
{
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::reference_processor_lock_);
process_references_args_.is_marked_callback_ = is_marked_callback;
process_references_args_.mark_callback_ = mark_object_callback;
process_references_args_.arg_ = arg;
@@ -163,7 +168,7 @@
DCHECK(finalizer_reference_queue_.IsEmpty());
DCHECK(phantom_reference_queue_.IsEmpty());
{
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::reference_processor_lock_);
// Need to always do this since the next GC may be concurrent. Doing this for only concurrent
// could result in a stale is_marked_callback_ being called before the reference processing
// starts since there is a small window of time where slow_path_enabled_ is enabled but the
@@ -225,5 +230,31 @@
}
}
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::reference_processor_lock_);
+ // Wait untul we are done processing reference.
+ while (SlowPathEnabled()) {
+ condition_.Wait(self);
+ }
+ // At this point, since the sentinel of the reference is live, it is guaranteed to not be
+ // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
+ // phase. Since we are holding the reference processor lock, it guarantees that reference
+ // processing can't begin. The GC could have just enqueued the reference one one of the internal
+ // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
+ // race.
+ MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
+ if (!reference->IsEnqueued()) {
+ CHECK(reference->IsFinalizerReferenceInstance());
+ if (Runtime::Current()->IsActiveTransaction()) {
+ reference->SetPendingNext<true>(reference);
+ } else {
+ reference->SetPendingNext<false>(reference);
+ }
+ return true;
+ }
+ return false;
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 7274457..5eb095b 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -28,6 +28,7 @@
class TimingLogger;
namespace mirror {
+class FinalizerReference;
class Object;
class Reference;
} // namespace mirror
@@ -48,20 +49,25 @@
ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- LOCKS_EXCLUDED(lock_);
+ LOCKS_EXCLUDED(Locks::reference_processor_lock_);
// The slow path bool is contained in the reference class object, can only be set once
// Only allow setting this with mutators suspended so that we can avoid using a lock in the
// GetReferent fast path as an optimization.
void EnableSlowPath() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Decode the referent, may block if references are being processed.
mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_);
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UpdateRoots(IsMarkedCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ // Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
+ bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::reference_processor_lock_,
+ Locks::reference_queue_finalizer_references_lock_);
private:
class ProcessReferencesArgs {
@@ -78,23 +84,21 @@
};
bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called by ProcessReferences.
- void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(lock_)
+ void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::reference_processor_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// If we are preserving references it means that some dead objects may become live, we use start
// and stop preserving to block mutators using GetReferrent from getting access to these
// referents.
- void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
- void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
+ void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
+ void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
// Process args, used by the GetReferent to return referents which are already marked.
- ProcessReferencesArgs process_references_args_ GUARDED_BY(lock_);
+ ProcessReferencesArgs process_references_args_ GUARDED_BY(Locks::reference_processor_lock_);
// Boolean for whether or not we are preserving references (either soft references or finalizers).
// If this is true, then we cannot return a referent (see comment in GetReferent).
- bool preserving_references_ GUARDED_BY(lock_);
- // Lock that guards the reference processing.
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ bool preserving_references_ GUARDED_BY(Locks::reference_processor_lock_);
// Condition that people wait on if they attempt to get the referent of a reference while
// processing is in progress.
- ConditionVariable condition_ GUARDED_BY(lock_);
+ ConditionVariable condition_ GUARDED_BY(Locks::reference_processor_lock_);
// Reference queues used by the GC.
ReferenceQueue soft_reference_queue_;
ReferenceQueue weak_reference_queue_;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index c3931e8..4003524 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -25,13 +25,12 @@
namespace art {
namespace gc {
-ReferenceQueue::ReferenceQueue()
- : lock_("reference queue lock"), list_(nullptr) {
+ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
}
void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
DCHECK(ref != NULL);
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *lock_);
if (!ref->IsEnqueued()) {
EnqueuePendingReference(ref);
}
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index cd814bb..dbf4abc 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -44,7 +44,7 @@
// java.lang.ref.Reference objects.
class ReferenceQueue {
public:
- explicit ReferenceQueue();
+ explicit ReferenceQueue(Mutex* lock);
// Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
// since it uses a lock to avoid a race between checking for the references presence and adding
// it.
@@ -90,7 +90,7 @@
private:
// Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
// calling AtomicEnqueueIfNotEnqueued.
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex* lock_;
// The actual reference list. Only a root for the mark compact GC since it will be null for other
// GC types.
mirror::Reference* list_;