diff options
| -rw-r--r-- | runtime/interpreter/interpreter.cc | 8 | ||||
| -rw-r--r-- | runtime/mirror/object-inl.h | 93 | ||||
| -rw-r--r-- | runtime/mirror/object.cc | 2 | ||||
| -rw-r--r-- | runtime/mirror/object.h | 35 | ||||
| -rw-r--r-- | runtime/native/sun_misc_Unsafe.cc | 12 |
5 files changed, 131 insertions, 19 deletions
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 2db62f8ead..e3068b338d 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -95,11 +95,11 @@ static void UnstartedRuntimeJni(Thread* self, ArtMethod* method, jint newValue = args[4]; bool success; if (Runtime::Current()->IsActiveTransaction()) { - success = obj->CasFieldWeakSequentiallyConsistent32<true>(MemberOffset(offset), - expectedValue, newValue); + success = obj->CasFieldStrongSequentiallyConsistent32<true>(MemberOffset(offset), + expectedValue, newValue); } else { - success = obj->CasFieldWeakSequentiallyConsistent32<false>(MemberOffset(offset), - expectedValue, newValue); + success = obj->CasFieldStrongSequentiallyConsistent32<false>(MemberOffset(offset), + expectedValue, newValue); } result->SetZ(success ? JNI_TRUE : JNI_FALSE); } else if (name == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") { diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index d9f442c3e4..3d4568379d 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -80,6 +80,12 @@ inline bool Object::CasLockWordWeakSequentiallyConsistent(LockWord old_val, Lock OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue()); } +inline bool Object::CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) { + // Force use of non-transactional mode and do not check. + return CasFieldWeakRelaxed32<false, false>( + OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue()); +} + inline uint32_t Object::GetLockOwnerThreadId() { return Monitor::GetLockOwnerThreadId(this); } @@ -448,6 +454,8 @@ inline void Object::SetField32Volatile(MemberOffset field_offset, int32_t new_va SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(field_offset, new_value); } +// TODO: Pass memory_order_ and strong/weak as arguments to avoid code duplication? + template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) { @@ -466,6 +474,42 @@ inline bool Object::CasFieldWeakSequentiallyConsistent32(MemberOffset field_offs return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value); } +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline bool Object::CasFieldWeakRelaxed32(MemberOffset field_offset, + int32_t old_value, int32_t new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value(); + AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr); + + return atomic_addr->CompareExchangeWeakRelaxed(old_value, new_value); +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, + int32_t old_value, int32_t new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value(); + AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr); + + return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value); +} + template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile> inline int64_t Object::GetField64(MemberOffset field_offset) { if (kVerifyFlags & kVerifyThis) { @@ -531,6 +575,23 @@ inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offs return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value); } +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline bool Object::CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, + int64_t old_value, int64_t new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteField64(this, field_offset, old_value, true); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value(); + Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr); + return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value); +} + template<class T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, bool kIsVolatile> inline T* Object::GetFieldObject(MemberOffset field_offset) { @@ -649,6 +710,38 @@ inline bool Object::CasFieldWeakSequentiallyConsistentObject(MemberOffset field_ return success; } +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, + Object* old_value, Object* new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + if (kVerifyFlags & kVerifyWrites) { + VerifyObject(new_value); + } + if (kVerifyFlags & kVerifyReads) { + VerifyObject(old_value); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true); + } + HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value)); + HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value)); + byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value(); + Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); + + bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref.reference_, + new_ref.reference_); + + if (success) { + Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value); + } + return success; +} + template<bool kVisitClass, bool kIsStatic, typename Visitor> inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) { if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) { diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index e58091fe09..5f88d54fe0 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -156,7 +156,7 @@ int32_t Object::IdentityHashCode() const { // loop iteration. LockWord hash_word(LockWord::FromHashCode(GenerateIdentityHashCode())); DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode); - if (const_cast<Object*>(this)->CasLockWordWeakSequentiallyConsistent(lw, hash_word)) { + if (const_cast<Object*>(this)->CasLockWordWeakRelaxed(lw, hash_word)) { return hash_word.GetHashCode(); } break; diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 11998cca23..4fae4704bf 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -110,19 +110,14 @@ class MANAGED LOCKABLE Object { return OFFSET_OF_OBJECT_MEMBER(Object, monitor_); } - // As volatile can be false if the mutators are suspended. This is an optimization since it + // As_volatile can be false if the mutators are suspended. This is an optimization since it // avoids the barriers. LockWord GetLockWord(bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetLockWord(LockWord new_val, bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // All Cas operations defined here have C++11 memory_order_seq_cst ordering - // semantics: Preceding memory operations become visible to other threads - // before the CAS, and subsequent operations become visible after the CAS. - // The Cas operations defined here do not fail spuriously, i.e. they - // have C++11 "strong" semantics. - // TODO: In most, possibly all, cases, these assumptions are too strong. - // Confirm and weaken the implementation. bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint32_t GetLockOwnerThreadId(); mirror::Object* MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) @@ -242,6 +237,12 @@ class MANAGED LOCKABLE Object { Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value, + Object* new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset); @@ -269,6 +270,18 @@ class MANAGED LOCKABLE Object { int32_t old_value, int32_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value, + int32_t new_value) ALWAYS_INLINE + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, + int32_t new_value) ALWAYS_INLINE + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -294,6 +307,12 @@ class MANAGED LOCKABLE Object { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, + int64_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T> void SetFieldPtr(MemberOffset field_offset, T new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc index 7cc4cac83f..65dece04e6 100644 --- a/runtime/native/sun_misc_Unsafe.cc +++ b/runtime/native/sun_misc_Unsafe.cc @@ -28,8 +28,8 @@ static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, ScopedFastNativeObjectAccess soa(env); mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj); // JNI must use non transactional mode. - bool success = obj->CasFieldWeakSequentiallyConsistent32<false>(MemberOffset(offset), - expectedValue, newValue); + bool success = obj->CasFieldStrongSequentiallyConsistent32<false>(MemberOffset(offset), + expectedValue, newValue); return success ? JNI_TRUE : JNI_FALSE; } @@ -38,8 +38,8 @@ static jboolean Unsafe_compareAndSwapLong(JNIEnv* env, jobject, jobject javaObj, ScopedFastNativeObjectAccess soa(env); mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj); // JNI must use non transactional mode. - bool success = obj->CasFieldWeakSequentiallyConsistent64<false>(MemberOffset(offset), - expectedValue, newValue); + bool success = obj->CasFieldStrongSequentiallyConsistent64<false>(MemberOffset(offset), + expectedValue, newValue); return success ? JNI_TRUE : JNI_FALSE; } @@ -50,8 +50,8 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb mirror::Object* expectedValue = soa.Decode<mirror::Object*>(javaExpectedValue); mirror::Object* newValue = soa.Decode<mirror::Object*>(javaNewValue); // JNI must use non transactional mode. - bool success = obj->CasFieldWeakSequentiallyConsistentObject<false>(MemberOffset(offset), - expectedValue, newValue); + bool success = obj->CasFieldStrongSequentiallyConsistentObject<false>(MemberOffset(offset), + expectedValue, newValue); return success ? JNI_TRUE : JNI_FALSE; } |