Move to release CAS for updating object fields
Relaxed cas is not sufficient to make sure threads that read the
field will see the copied contents of objects.
Bug: 37187694
Bug: 62240510
Test: test-art-host
(cherry picked from commit a1f20c3f8d0dabb9723acccf3ba760acf3ebe62d)
Change-Id: I8bff8a67c2c52eb131714b52e6d842c8c08dd70a
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 45c3165..25dd1a3 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -257,6 +257,13 @@
return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed);
}
+ // Atomically replace the value with desired value if it matches the expected value. Prior writes
+ // to other memory locations become visible to the threads that do a consume or an acquire on the
+ // same location.
+ bool CompareExchangeStrongRelease(T expected_value, T desired_value) {
+ return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_release);
+ }
+
// The same, except it may fail spuriously.
bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) {
return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed);
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index dfe8949..35fce40 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -93,7 +93,7 @@
if (kReadBarrierOption != kWithoutReadBarrier && before_ptr != after_ptr) {
// If another thread raced and updated the reference, do not store the read barrier updated
// one.
- data_.CompareExchangeStrongRelaxed(before, Encode(after_ptr, MaskHash(before)));
+ data_.CompareExchangeStrongRelease(before, Encode(after_ptr, MaskHash(before)));
}
return after_ptr.Ptr();
}
@@ -108,7 +108,7 @@
if (before_ptr != after_ptr) {
// If another thread raced and updated the reference, do not store the read barrier updated
// one.
- data_.CompareExchangeStrongRelaxed(before, Encode(after_ptr, MaskHash(before)));
+ data_.CompareExchangeStrongRelease(before, Encode(after_ptr, MaskHash(before)));
}
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 5488287..c0d6481 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2086,8 +2086,11 @@
// It was updated by the mutator.
break;
}
- } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
- false, false, kVerifyNone>(offset, expected_ref, new_ref));
+ // Use release cas to make sure threads reading the reference see contents of copied objects.
+ } while (!obj->CasFieldWeakReleaseObjectWithoutWriteBarrier<false, false, kVerifyNone>(
+ offset,
+ expected_ref,
+ new_ref));
}
// Process some roots.
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index baed5f1..d3fc95f 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -899,6 +899,36 @@
return success;
}
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldWeakReleaseObjectWithoutWriteBarrier(
+ MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(new_value);
+ }
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(old_value);
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+ }
+ HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
+
+ bool success = atomic_addr->CompareExchangeWeakRelease(old_ref.reference_,
+ new_ref.reference_);
+ return success;
+}
+
template<bool kIsStatic,
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 58e7c20..69365af 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -221,6 +221,36 @@
return success;
}
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldStrongReleaseObjectWithoutWriteBarrier(
+ MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(new_value);
+ }
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(old_value);
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+ }
+ HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
+
+ bool success = atomic_addr->CompareExchangeStrongRelease(old_ref.reference_,
+ new_ref.reference_);
+ return success;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 35a1b73..9cf4252 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -350,10 +350,25 @@
template<bool kTransactionActive,
bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldWeakReleaseObjectWithoutWriteBarrier(MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
ObjPtr<Object> old_value,
ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldStrongReleaseObjectWithoutWriteBarrier(MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value)
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index db77490..2d06e54 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -63,7 +63,7 @@
// If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
// updates before us, but it's OK.
if (kAlwaysUpdateField && ref != old_ref) {
- obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
+ obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
offset, old_ref, ref);
}
}
@@ -81,7 +81,7 @@
ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
if (ref != old_ref) {
- obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
+ obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
offset, old_ref, ref);
}
}