Don't use fences to implement volatiles
Mixing the fence-based implementation with acquire/release instructions
on ARMv8 is not just ugly but incorrect. A volatile store; volatile
load sequence implemented as a release store followed by ld; dmb
does not prevent reordering.
This should remove the last places we were using fences to implement
volatiles.
The HeapReference representation is changed to be an Atomic,
thereby avoiding many casts. We no longer inherit from ObjectReference,
which was documented to be a value type. HeapReference is not, since
it contains an atomic.
Disentangle HeapReference and ObjectReference/CompressedReference
uses sufficiently to get the code to compile again. They were
previously used somewhat interchangably in a few places, in spite
of the different intended semantics (value-type vs. a concurrently-
updateable field). Further disentanglement might be useful.
Flag a strange fence use I haven't yet understood.
Test: Booted AOSP. Ran default tests. Some object code inspection.
Bug: 31023171
Test: Built AOSP
Change-Id: I7b3c3e624f480994541c8e3a79e585071c122a3d
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 726bddd..b298db6 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -301,6 +301,7 @@
extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUSED,
mirror::Object* obj,
uint32_t offset) {
+ // Used only in connection with non-volatile loads.
DCHECK(kEmitCompilerReadBarrier);
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(obj) + offset;
mirror::HeapReference<mirror::Object>* ref_addr =
@@ -308,9 +309,10 @@
constexpr ReadBarrierOption kReadBarrierOption =
kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
mirror::Object* result =
- ReadBarrier::Barrier<mirror::Object, kReadBarrierOption>(obj,
- MemberOffset(offset),
- ref_addr);
+ ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kReadBarrierOption>(
+ obj,
+ MemberOffset(offset),
+ ref_addr);
return result;
}
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index fd0cd5f..f863299 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -79,8 +79,8 @@
static mirror::Class* SafeGetClass(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
char* obj_cls = reinterpret_cast<char*>(obj) + mirror::Object::ClassOffset().SizeValue();
- mirror::HeapReference<mirror::Class> cls =
- mirror::HeapReference<mirror::Class>::FromMirrorPtr(nullptr);
+ mirror::CompressedReference<mirror::Class> cls =
+ mirror::CompressedReference<mirror::Class>::FromMirrorPtr(nullptr);
ssize_t rc = SafeCopy(&cls, obj_cls, sizeof(cls));
CHECK_NE(-1, rc);
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 2901995..1b3d0da 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -115,8 +115,8 @@
}
private:
- template<bool kPoisonReferences>
- void MarkReference(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) const
+ template<typename CompressedReferenceType>
+ void MarkReference(CompressedReferenceType* obj_ptr) const
REQUIRES_SHARED(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
mirror::Object* ref = obj_ptr->AsMirrorPtr();
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 9d672b1..8fab285 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2457,6 +2457,7 @@
}
bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
+ // TODO: Explain why this is here. What release operation does it pair with?
QuasiAtomic::ThreadFenceAcquire();
accounting::ObjectStack* alloc_stack = GetAllocationStack();
return alloc_stack->Contains(ref);
@@ -2617,9 +2618,8 @@
}
} while (!field->CasWeakRelaxed(from_ref, to_ref));
} else {
- QuasiAtomic::ThreadFenceRelease();
- field->Assign(to_ref);
- QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ // TODO: Why is this seq_cst when the above is relaxed? Document memory ordering.
+ field->Assign</* kIsVolatile */ true>(to_ref);
}
}
return true;
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index dec206b..f722e8d 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -113,7 +113,7 @@
virtual mirror::Object* IsMarked(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Returns true if the given heap reference is null or is already marked. If it's already marked,
- // update the reference (uses a CAS if do_atomic_update is true. Otherwise, returns false.
+ // update the reference (uses a CAS if do_atomic_update is true). Otherwise, returns false.
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
bool do_atomic_update)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 78fb2d2..7db5d2c 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -38,9 +38,8 @@
// Used to mark and copy objects. Any newly-marked objects who are in the from space Get moved to
// the to-space and have their forward address updated. Objects which have been newly marked are
// pushed on the mark stack.
-template<bool kPoisonReferences>
-inline void SemiSpace::MarkObject(
- mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) {
+template<typename CompressedReferenceType>
+inline void SemiSpace::MarkObject(CompressedReferenceType* obj_ptr) {
mirror::Object* obj = obj_ptr->AsMirrorPtr();
if (obj == nullptr) {
return;
@@ -73,9 +72,8 @@
}
}
-template<bool kPoisonReferences>
-inline void SemiSpace::MarkObjectIfNotInToSpace(
- mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) {
+template<typename CompressedReferenceType>
+inline void SemiSpace::MarkObjectIfNotInToSpace(CompressedReferenceType* obj_ptr) {
if (!to_space_->HasAddress(obj_ptr->AsMirrorPtr())) {
MarkObject(obj_ptr);
}
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index fd52da3..6d4d789 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -97,13 +97,13 @@
// Find the default mark bitmap.
void FindDefaultMarkBitmap();
- // Updates obj_ptr if the object has moved.
- template<bool kPoisonReferences>
- void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr)
+ // Updates obj_ptr if the object has moved. Takes either an ObjectReference or a HeapReference.
+ template<typename CompressedReferenceType>
+ void MarkObject(CompressedReferenceType* obj_ptr)
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- template<bool kPoisonReferences>
- void MarkObjectIfNotInToSpace(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr)
+ template<typename CompressedReferenceType>
+ void MarkObjectIfNotInToSpace(CompressedReferenceType* obj_ptr)
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 2c72821..ce06a03 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1438,7 +1438,11 @@
mirror::HeapReference<mirror::Object>* field_addr =
reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
- ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /* kAlwaysUpdateField */ true>(
+ ReadBarrier::Barrier<
+ mirror::Object,
+ /* kIsVolatile */ false,
+ kWithReadBarrier,
+ /* kAlwaysUpdateField */ true>(
obj,
MemberOffset(offset),
field_addr);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 086925b..71596c9 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -724,11 +724,10 @@
}
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
HeapReference<T>* objref_addr = reinterpret_cast<HeapReference<T>*>(raw_addr);
- T* result = ReadBarrier::Barrier<T, kReadBarrierOption>(this, field_offset, objref_addr);
- if (kIsVolatile) {
- // TODO: Refactor to use a SequentiallyConsistent load instead.
- QuasiAtomic::ThreadFenceAcquire(); // Ensure visibility of operations preceding store.
- }
+ T* result = ReadBarrier::Barrier<T, kIsVolatile, kReadBarrierOption>(
+ this,
+ field_offset,
+ objref_addr);
if (kVerifyFlags & kVerifyReads) {
VerifyObject(result);
}
@@ -764,15 +763,7 @@
}
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
HeapReference<Object>* objref_addr = reinterpret_cast<HeapReference<Object>*>(raw_addr);
- if (kIsVolatile) {
- // TODO: Refactor to use a SequentiallyConsistent store instead.
- QuasiAtomic::ThreadFenceRelease(); // Ensure that prior accesses are visible before store.
- objref_addr->Assign(new_value.Ptr());
- QuasiAtomic::ThreadFenceSequentiallyConsistent();
- // Ensure this store occurs before any volatile loads.
- } else {
- objref_addr->Assign(new_value.Ptr());
- }
+ objref_addr->Assign<kIsVolatile>(new_value.Ptr());
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
@@ -843,13 +834,12 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
+ uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_ref.reference_,
- new_ref.reference_);
+ bool success = atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_ref, new_ref);
return success;
}
@@ -885,13 +875,12 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
+ uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref.reference_,
- new_ref.reference_);
+ bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
return success;
}
@@ -915,13 +904,12 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
+ uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeWeakRelaxed(old_ref.reference_,
- new_ref.reference_);
+ bool success = atomic_addr->CompareExchangeWeakRelaxed(old_ref, new_ref);
return success;
}
@@ -945,13 +933,12 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
+ uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeWeakRelease(old_ref.reference_,
- new_ref.reference_);
+ bool success = atomic_addr->CompareExchangeWeakRelease(old_ref, new_ref);
return success;
}
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 69365af..f076940 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -211,13 +211,12 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
+ uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeStrongRelaxed(old_ref.reference_,
- new_ref.reference_);
+ bool success = atomic_addr->CompareExchangeStrongRelaxed(old_ref, new_ref);
return success;
}
@@ -241,13 +240,12 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
+ uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeStrongRelease(old_ref.reference_,
- new_ref.reference_);
+ bool success = atomic_addr->CompareExchangeStrongRelease(old_ref, new_ref);
return success;
}
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index 22fb83c..60f3ce1 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -30,17 +30,10 @@
}
template<class MirrorType>
-HeapReference<MirrorType> HeapReference<MirrorType>::FromObjPtr(ObjPtr<MirrorType> ptr) {
- return HeapReference<MirrorType>(ptr.Ptr());
-}
-
-template<class MirrorType>
bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
- HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_ptr));
- HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_ptr));
- Atomic<uint32_t>* atomic_reference = reinterpret_cast<Atomic<uint32_t>*>(&this->reference_);
- return atomic_reference->CompareExchangeWeakRelaxed(expected_ref.reference_,
- new_ref.reference_);
+ return reference_.CompareExchangeWeakRelaxed(
+ Compression::Compress(expected_ptr),
+ Compression::Compress(new_ptr));
}
} // namespace mirror
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index a96a120..108e8ae 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_OBJECT_REFERENCE_H_
#define ART_RUNTIME_MIRROR_OBJECT_REFERENCE_H_
+#include "atomic.h"
#include "base/mutex.h" // For Locks::mutator_lock_.
#include "globals.h"
#include "obj_ptr.h"
@@ -30,20 +31,43 @@
// extra platform specific padding.
#define MANAGED PACKED(4)
+template<bool kPoisonReferences, class MirrorType>
+class PtrCompression {
+ public:
+ // Compress reference to its bit representation.
+ static uint32_t Compress(MirrorType* mirror_ptr) {
+ uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr);
+ return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits);
+ }
+
+ // Uncompress an encoded reference from its bit representation.
+ static MirrorType* Decompress(uint32_t ref) {
+ uintptr_t as_bits = kPoisonReferences ? -ref : ref;
+ return reinterpret_cast<MirrorType*>(as_bits);
+ }
+
+ // Convert an ObjPtr to a compressed reference.
+ static uint32_t Compress(ObjPtr<MirrorType> ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Compress(ptr.Ptr());
+ }
+};
+
// Value type representing a reference to a mirror::Object of type MirrorType.
template<bool kPoisonReferences, class MirrorType>
class MANAGED ObjectReference {
+ private:
+ using Compression = PtrCompression<kPoisonReferences, MirrorType>;
+
public:
- MirrorType* AsMirrorPtr() const REQUIRES_SHARED(Locks::mutator_lock_) {
- return UnCompress();
+ MirrorType* AsMirrorPtr() const {
+ return Compression::Decompress(reference_);
}
- void Assign(MirrorType* other) REQUIRES_SHARED(Locks::mutator_lock_) {
- reference_ = Compress(other);
+ void Assign(MirrorType* other) {
+ reference_ = Compression::Compress(other);
}
- void Assign(ObjPtr<MirrorType> ptr)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ void Assign(ObjPtr<MirrorType> ptr) REQUIRES_SHARED(Locks::mutator_lock_);
void Clear() {
reference_ = 0;
@@ -58,48 +82,69 @@
return reference_;
}
+ static ObjectReference<kPoisonReferences, MirrorType> FromMirrorPtr(MirrorType* mirror_ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return ObjectReference<kPoisonReferences, MirrorType>(mirror_ptr);
+ }
+
protected:
- explicit ObjectReference(MirrorType* mirror_ptr)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : reference_(Compress(mirror_ptr)) {
+ explicit ObjectReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
+ : reference_(Compression::Compress(mirror_ptr)) {
}
- // Compress reference to its bit representation.
- static uint32_t Compress(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
- uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr);
- return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits);
- }
-
- // Uncompress an encoded reference from its bit representation.
- MirrorType* UnCompress() const REQUIRES_SHARED(Locks::mutator_lock_) {
- uintptr_t as_bits = kPoisonReferences ? -reference_ : reference_;
- return reinterpret_cast<MirrorType*>(as_bits);
- }
-
- friend class Object;
-
// The encoded reference to a mirror::Object.
uint32_t reference_;
};
// References between objects within the managed heap.
+// Similar API to ObjectReference, but not a value type. Supports atomic access.
template<class MirrorType>
-class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, MirrorType> {
+class MANAGED HeapReference {
+ private:
+ using Compression = PtrCompression<kPoisonHeapReferences, MirrorType>;
+
public:
+ template <bool kIsVolatile = false>
+ MirrorType* AsMirrorPtr() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Compression::Decompress(
+ kIsVolatile ? reference_.LoadSequentiallyConsistent() : reference_.LoadJavaData());
+ }
+
+ template <bool kIsVolatile = false>
+ void Assign(MirrorType* other) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kIsVolatile) {
+ reference_.StoreSequentiallyConsistent(Compression::Compress(other));
+ } else {
+ reference_.StoreJavaData(Compression::Compress(other));
+ }
+ }
+
+ template <bool kIsVolatile = false>
+ void Assign(ObjPtr<MirrorType> ptr) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void Clear() {
+ reference_.StoreJavaData(0);
+ DCHECK(IsNull());
+ }
+
+ bool IsNull() const {
+ return reference_.LoadJavaData() == 0;
+ }
+
static HeapReference<MirrorType> FromMirrorPtr(MirrorType* mirror_ptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
return HeapReference<MirrorType>(mirror_ptr);
}
- static HeapReference<MirrorType> FromObjPtr(ObjPtr<MirrorType> ptr)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
bool CasWeakRelaxed(MirrorType* old_ptr, MirrorType* new_ptr)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
- : ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
+ : reference_(this->Compress(mirror_ptr)) {}
+
+ // The encoded reference to a mirror::Object. Atomically updateable.
+ Atomic<uint32_t> reference_;
};
static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 761362f..b2bdeed 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -67,10 +67,12 @@
if (kUseReadBarrier) {
// Need to make sure the reference stored in the field is a to-space one before attempting the
// CAS or the CAS could fail incorrectly.
+ // Note that the read barrier load does NOT need to be volatile.
mirror::HeapReference<mirror::Object>* field_addr =
reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
reinterpret_cast<uint8_t*>(obj.Ptr()) + static_cast<size_t>(offset));
- ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /* kAlwaysUpdateField */ true>(
+ ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kWithReadBarrier,
+ /* kAlwaysUpdateField */ true>(
obj.Ptr(),
MemberOffset(offset),
field_addr);
@@ -112,6 +114,7 @@
jint newValue) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::Object> obj = soa.Decode<mirror::Object>(javaObj);
+ // TODO: A release store is likely to be faster on future processors.
QuasiAtomic::ThreadFenceRelease();
// JNI must use non transactional mode.
obj->SetField32<false>(MemberOffset(offset), newValue);
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index b0935c0..6424599 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -33,7 +33,8 @@
// Disabled for performance reasons.
static constexpr bool kCheckDebugDisallowReadBarrierCount = false;
-template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kAlwaysUpdateField>
+template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierOption,
+ bool kAlwaysUpdateField>
inline MirrorType* ReadBarrier::Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
@@ -55,7 +56,7 @@
}
ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
- MirrorType* ref = ref_addr->AsMirrorPtr();
+ MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
MirrorType* old_ref = ref;
if (is_gray) {
// Slow-path.
@@ -71,9 +72,9 @@
return ref;
} else if (kUseBrooksReadBarrier) {
// To be implemented.
- return ref_addr->AsMirrorPtr();
+ return ref_addr->template AsMirrorPtr<kIsVolatile>();
} else if (kUseTableLookupReadBarrier) {
- MirrorType* ref = ref_addr->AsMirrorPtr();
+ MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
MirrorType* old_ref = ref;
// The heap or the collector can be null at startup. TODO: avoid the need for this null check.
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -93,7 +94,7 @@
}
} else {
// No read barrier.
- return ref_addr->AsMirrorPtr();
+ return ref_addr->template AsMirrorPtr<kIsVolatile>();
}
}
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index d36acbc..8a106aa 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -46,9 +46,13 @@
// fast-debug environment.
DECLARE_RUNTIME_DEBUG_FLAG(kEnableReadBarrierInvariantChecks);
+ // Return the reference at ref_addr, invoking read barrier as appropriate.
+ // Ref_addr is an address within obj.
// It's up to the implementation whether the given field gets updated whereas the return value
// must be an updated reference unless kAlwaysUpdateField is true.
- template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ template <typename MirrorType,
+ bool kIsVolatile,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
bool kAlwaysUpdateField = false>
ALWAYS_INLINE static MirrorType* Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)