summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hiroshi Yamauchi <yamauchi@google.com> 2016-12-19 11:44:47 -0800
committer Hiroshi Yamauchi <yamauchi@google.com> 2016-12-19 11:44:47 -0800
commit65f5f247a367af9d6b9ac63767b69ecf3ab079bc (patch)
treebff631b0efd7e8fe77be9c445ea9224e2b67433b
parent5b32b91d731d6187ada4c6fc804041b7b3b6903c (diff)
Fix race condition btw DelayReferenceRefernent vs Reference.clear().
Rename IsMarkedHeapReference to IsNullOrMarkedHeapReference. Move the null check from the caller of IsMarkedHeapReference into IsNullOrMarkedHeapReference. Make sure that the referent is only loaded once between the null check and the IsMarked call. Use a CAS in ConcurrentCopying::IsNullOrMarkedHeapReference when called from DelayReferenceRefernent. Bug: 33389022 Test: test-art-host without and with CC. Change-Id: I20edab4dac2a4bb02dbb72af0f09de77b55ac08e
-rw-r--r--runtime/gc/collector/concurrent_copying.cc21
-rw-r--r--runtime/gc/collector/concurrent_copying.h3
-rw-r--r--runtime/gc/collector/garbage_collector.h5
-rw-r--r--runtime/gc/collector/mark_compact.cc10
-rw-r--r--runtime/gc/collector/mark_compact.h3
-rw-r--r--runtime/gc/collector/mark_sweep.cc9
-rw-r--r--runtime/gc/collector/mark_sweep.h3
-rw-r--r--runtime/gc/collector/semi_space.cc7
-rw-r--r--runtime/gc/collector/semi_space.h3
-rw-r--r--runtime/gc/reference_processor.cc4
-rw-r--r--runtime/gc/reference_queue.cc10
-rw-r--r--runtime/mirror/object_reference-inl.h9
-rw-r--r--runtime/mirror/object_reference.h3
13 files changed, 71 insertions, 19 deletions
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b8899137bc..741d1dad1f 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2385,16 +2385,29 @@ void ConcurrentCopying::FinishPhase() {
}
}
-bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
+bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) {
mirror::Object* from_ref = field->AsMirrorPtr();
+ if (from_ref == nullptr) {
+ return true;
+ }
mirror::Object* to_ref = IsMarked(from_ref);
if (to_ref == nullptr) {
return false;
}
if (from_ref != to_ref) {
- QuasiAtomic::ThreadFenceRelease();
- field->Assign(to_ref);
- QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ if (do_atomic_update) {
+ do {
+ if (field->AsMirrorPtr() != from_ref) {
+ // Concurrently overwritten by a mutator.
+ break;
+ }
+ } while (!field->CasWeakRelaxed(from_ref, to_ref));
+ } else {
+ QuasiAtomic::ThreadFenceRelease();
+ field->Assign(to_ref);
+ QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ }
}
return true;
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 5b8a557375..844bb450cc 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -183,7 +183,8 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 5b513991d1..0177e2a1ad 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -187,7 +187,10 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
// and will be used for reading system weaks while the GC is running.
virtual mirror::Object* IsMarked(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
+ // Returns true if the given heap reference is null or is already marked. If it's already marked,
+ // update the reference (uses a CAS if do_atomic_update is true. Otherwise, returns false.
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Used by reference processor.
virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index ddcb6c0698..85e6783599 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -472,9 +472,15 @@ mirror::Object* MarkCompact::IsMarked(mirror::Object* object) {
return mark_bitmap_->Test(object) ? object : nullptr;
}
-bool MarkCompact::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr) {
+bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr,
+ // MarkCompact does the GC in a pause. No CAS needed.
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
// Side effect free since we call this before ever moving objects.
- return IsMarked(ref_ptr->AsMirrorPtr()) != nullptr;
+ mirror::Object* obj = ref_ptr->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
+ return IsMarked(obj) != nullptr;
}
void MarkCompact::SweepSystemWeaks() {
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 564f85b3f8..6d52d5d515 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -175,7 +175,8 @@ class MarkCompact : public GarbageCollector {
virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 06ed0290a9..f00da73458 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -390,8 +390,13 @@ inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
}
}
-bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
- return IsMarked(ref->AsMirrorPtr());
+bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
+ mirror::Object* obj = ref->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
+ return IsMarked(obj);
}
class MarkSweep::MarkObjectSlowPath {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 02cf462bd3..a6e2d61f6d 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -188,7 +188,8 @@ class MarkSweep : public GarbageCollector {
void VerifyIsLive(const mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a815b830c1..82505ce82f 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -765,8 +765,13 @@ mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
-bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) {
+bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ // SemiSpace does the GC in a pause. No CAS needed.
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
mirror::Object* obj = object->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
mirror::Object* new_obj = IsMarked(obj);
if (new_obj == nullptr) {
return false;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 4cebcc3044..52b5e5fe30 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -166,7 +166,8 @@ class SemiSpace : public GarbageCollector {
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ bool do_atomic_update) OVERRIDE
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 081be968eb..c1548365c7 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -203,7 +203,9 @@ void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
DCHECK(klass != nullptr);
DCHECK(klass->IsTypeOfReferenceClass());
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
- if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
+ // do_atomic_update needs to be true because this happens outside of the reference processing
+ // phase.
+ if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
Thread* self = Thread::Current();
// TODO: Remove these locks, and use atomic stacks for storing references?
// We need to check that the references haven't already been enqueued since we can end up
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index a0eb197bd5..734caea371 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -129,8 +129,9 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
while (!IsEmpty()) {
ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr &&
- !collector->IsMarkedHeapReference(referent_addr)) {
+ // do_atomic_update is false because this happens during the reference processing phase where
+ // Reference.clear() would block.
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
// Referent is white, clear it.
if (Runtime::Current()->IsActiveTransaction()) {
ref->ClearReferent<true>();
@@ -147,8 +148,9 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_referenc
while (!IsEmpty()) {
ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr &&
- !collector->IsMarkedHeapReference(referent_addr)) {
+ // do_atomic_update is false because this happens during the reference processing phase where
+ // Reference.clear() would block.
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index e70b93607e..22fb83cb5c 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -34,6 +34,15 @@ HeapReference<MirrorType> HeapReference<MirrorType>::FromObjPtr(ObjPtr<MirrorTyp
return HeapReference<MirrorType>(ptr.Ptr());
}
+template<class MirrorType>
+bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
+ HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_ptr));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_ptr));
+ Atomic<uint32_t>* atomic_reference = reinterpret_cast<Atomic<uint32_t>*>(&this->reference_);
+ return atomic_reference->CompareExchangeWeakRelaxed(expected_ref.reference_,
+ new_ref.reference_);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 71f34c66e2..a96a120d68 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -94,6 +94,9 @@ class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, Mirr
static HeapReference<MirrorType> FromObjPtr(ObjPtr<MirrorType> ptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ bool CasWeakRelaxed(MirrorType* old_ptr, MirrorType* new_ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}