summaryrefslogtreecommitdiff
path: root/runtime/gc/reference_processor.cc
diff options
context:
space:
mode:
author Mathieu Chartier <mathieuc@google.com> 2016-12-13 14:44:33 -0800
committer Mathieu Chartier <mathieuc@google.com> 2016-12-13 16:40:14 -0800
commitc9a7028430d95090ad3d7690203fd887d1e201a4 (patch)
tree83e690d43ce87913c274b788c61d3425e6570e51 /runtime/gc/reference_processor.cc
parent36994ba006c18c1933815cc0c4c036df086e6814 (diff)
Add exclusion for Reference.clear and reference processing
Prevents race conditions like unclearing cleared references or calling IsMarkedHeapReference on null references. Bug: 33389022 Test: test-art-host Change-Id: Iee83b76d84453e929172f1a83f284aa4910e126c
Diffstat (limited to 'runtime/gc/reference_processor.cc')
-rw-r--r--runtime/gc/reference_processor.cc25
1 files changed, 22 insertions, 3 deletions
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 641a919506..081be968eb 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -269,11 +269,23 @@ void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
}
}
-bool ReferenceProcessor::MakeCircularListIfUnenqueued(
- ObjPtr<mirror::FinalizerReference> reference) {
+void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::reference_processor_lock_);
- // Wait untul we are done processing reference.
+ // Need to wait until reference processing is done since IsMarkedHeapReference does not have a
+ // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions.
+ // This also handles the race where the referent gets cleared after a null check but before
+ // IsMarkedHeapReference is called.
+ WaitUntilDoneProcessingReferences(self);
+ if (Runtime::Current()->IsActiveTransaction()) {
+ ref->ClearReferent<true>();
+ } else {
+ ref->ClearReferent<false>();
+ }
+}
+
+void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
+ // Wait until we are done processing reference.
while ((!kUseReadBarrier && SlowPathEnabled()) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
@@ -281,6 +293,13 @@ bool ReferenceProcessor::MakeCircularListIfUnenqueued(
self->CheckEmptyCheckpoint();
condition_.WaitHoldingLocks(self);
}
+}
+
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(
+ ObjPtr<mirror::FinalizerReference> reference) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::reference_processor_lock_);
+ WaitUntilDoneProcessingReferences(self);
// At this point, since the sentinel of the reference is live, it is guaranteed to not be
// enqueued if we just finished processing references. Otherwise, we may be doing the main GC
// phase. Since we are holding the reference processor lock, it guarantees that reference