summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Lokesh Gidra <lokeshgidra@google.com> 2023-10-19 19:44:46 +0000
committer Lokesh Gidra <lokeshgidra@google.com> 2023-10-23 18:36:14 +0000
commitea6525ab31ca14d5a919a244824cc8e63ba8c767 (patch)
tree59f7572f2152a0f643ed19c66e21261c3f94fd3b
parent23bdf0606a4d97050de96dfb5a87c32587721579 (diff)
Use release memory-order when going from gray to non-gray state
At certain places we were using `relaxed` order which allows reordering the preceding stores of to-space references in the object after the state change. Bug: 302845084 Test: art/test/testrunner/testrunner.py Change-Id: Ibbf27c8fa9eda2bf9635c69668b3750139178a30
-rw-r--r--runtime/gc/collector/concurrent_copying.cc13
-rw-r--r--runtime/gc/reference_queue.cc11
-rw-r--r--runtime/gc/reference_queue.h6
-rw-r--r--runtime/mirror/object-readbarrier-inl.h7
-rw-r--r--runtime/mirror/object.h5
5 files changed, 24 insertions, 18 deletions
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 787e997ba0..3f9c3a016e 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -915,9 +915,11 @@ class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
// Only need to scan gray objects.
if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
collector_->ScanImmuneObject(obj);
- // Done scanning the object, go back to black (non-gray).
- bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
- ReadBarrier::NonGrayState());
+ // Done scanning the object, go back to black (non-gray). Release order
+ // required to ensure that stores of to-space references done by
+ // ScanImmuneObject() are visible before state change.
+ bool success = obj->AtomicSetReadBarrierState(
+ ReadBarrier::GrayState(), ReadBarrier::NonGrayState(), std::memory_order_release);
CHECK(success)
<< Runtime::Current()->GetHeap()->GetVerification()->DumpObjectInfo(obj, "failed CAS");
}
@@ -2370,9 +2372,8 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
// above IsInToSpace() evaluates to true and we change the color from gray to non-gray here in
// this else block.
if (kUseBakerReadBarrier) {
- bool success = to_ref->AtomicSetReadBarrierState<std::memory_order_release>(
- ReadBarrier::GrayState(),
- ReadBarrier::NonGrayState());
+ bool success = to_ref->AtomicSetReadBarrierState(
+ ReadBarrier::GrayState(), ReadBarrier::NonGrayState(), std::memory_order_release);
DCHECK(success) << "Must succeed as we won the race.";
}
}
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 6bdacaf18c..53eef9c027 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -73,7 +73,8 @@ ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() {
}
// This must be called whenever DequeuePendingReference is called.
-void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref) {
+void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref,
+ std::memory_order order) {
Heap* heap = Runtime::Current()->GetHeap();
if (kUseBakerReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
heap->ConcurrentCopyingCollector()->IsActive()) {
@@ -84,7 +85,7 @@ void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> re
collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
uint32_t rb_state = ref->GetReadBarrierState();
if (rb_state == ReadBarrier::GrayState()) {
- ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::NonGrayState());
+ ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::NonGrayState(), order);
CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
} else {
// In ConcurrentCopying::ProcessMarkStackRef() we may leave a non-gray reference in the queue
@@ -158,7 +159,7 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
}
// Delay disabling the read barrier until here so that the ClearReferent call above in
// transaction mode will trigger the read barrier.
- DisableReadBarrierForReference(ref);
+ DisableReadBarrierForReference(ref, std::memory_order_relaxed);
}
}
@@ -186,7 +187,7 @@ FinalizerStats ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleare
}
// Delay disabling the read barrier until here so that the ClearReferent call above in
// transaction mode will trigger the read barrier.
- DisableReadBarrierForReference(ref->AsReference());
+ DisableReadBarrierForReference(ref->AsReference(), std::memory_order_relaxed);
}
return FinalizerStats(num_refs, num_enqueued);
}
@@ -216,7 +217,7 @@ uint32_t ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ true);
++num_refs;
}
- DisableReadBarrierForReference(buf[i]->AsReference());
+ DisableReadBarrierForReference(buf[i]->AsReference(), std::memory_order_release);
}
} while (!empty);
return num_refs;
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 3fda7167d4..69f04d783a 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -80,8 +80,10 @@ class ReferenceQueue {
// If applicable, disable the read barrier for the reference after its referent is handled (see
// ConcurrentCopying::ProcessMarkStackRef.) This must be called for a reference that's dequeued
- // from pending queue (DequeuePendingReference).
- void DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref)
+ // from pending queue (DequeuePendingReference). 'order' is expected to be
+ // 'release' if called outside 'weak-ref access disabled' critical section.
+ // Otherwise 'relaxed' order will suffice.
+ void DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref, std::memory_order order)
REQUIRES_SHARED(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 259b3dd6f5..2895009d17 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -146,8 +146,9 @@ inline uint32_t Object::GetReadBarrierStateAcquire() {
return rb_state;
}
-template<std::memory_order kMemoryOrder>
-inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
+inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state,
+ uint32_t rb_state,
+ std::memory_order order) {
if (!kUseBakerReadBarrier) {
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -171,7 +172,7 @@ inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32
// If `kMemoryOrder` == `std::memory_order_release`, use a CAS release so that when GC updates
// all the fields of an object and then changes the object from gray to black (non-gray), the
// field updates (stores) will be visible (won't be reordered after this CAS.)
- } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, kMemoryOrder));
+ } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, order));
return true;
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 95b9f86a4b..54a17b1d4e 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -111,8 +111,9 @@ class MANAGED LOCKABLE Object {
ALWAYS_INLINE void SetReadBarrierState(uint32_t rb_state) REQUIRES_SHARED(Locks::mutator_lock_);
- template<std::memory_order kMemoryOrder = std::memory_order_relaxed>
- ALWAYS_INLINE bool AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state)
+ ALWAYS_INLINE bool AtomicSetReadBarrierState(uint32_t expected_rb_state,
+ uint32_t rb_state,
+ std::memory_order order = std::memory_order_relaxed)
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE uint32_t GetMarkBit() REQUIRES_SHARED(Locks::mutator_lock_);