From bdf7f1c3ab65ccb70f62db5ab31dba060632d458 Mon Sep 17 00:00:00 2001 From: Andreas Gampe Date: Tue, 30 Aug 2016 16:38:47 -0700 Subject: ART: SHARED_REQUIRES to REQUIRES_SHARED This coincides with the actual attribute name and upstream usage. Preparation for deferring to libbase. Test: m Test: m test-art-host Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518 --- runtime/mirror/reference.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'runtime/mirror/reference.h') diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h index 38c6616182..6a8b32b62d 100644 --- a/runtime/mirror/reference.h +++ b/runtime/mirror/reference.h @@ -64,26 +64,26 @@ class MANAGED Reference : public Object { return OFFSET_OF_OBJECT_MEMBER(Reference, referent_); } template - Object* GetReferent() SHARED_REQUIRES(Locks::mutator_lock_) { + Object* GetReferent() REQUIRES_SHARED(Locks::mutator_lock_) { return GetFieldObjectVolatile( ReferentOffset()); } template - void SetReferent(Object* referent) SHARED_REQUIRES(Locks::mutator_lock_) { + void SetReferent(Object* referent) REQUIRES_SHARED(Locks::mutator_lock_) { SetFieldObjectVolatile(ReferentOffset(), referent); } template - void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) { + void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) { SetFieldObjectVolatile(ReferentOffset(), nullptr); } template - Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) { + Reference* GetPendingNext() REQUIRES_SHARED(Locks::mutator_lock_) { return GetFieldObject(PendingNextOffset()); } void SetPendingNext(Reference* pending_next) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { if (Runtime::Current()->IsActiveTransaction()) { SetFieldObject(PendingNextOffset(), pending_next); } else { @@ -103,22 +103,22 @@ class MANAGED Reference : public Object { // should not be processed again until and unless the reference has been // removed from the list after having determined the reference is not ready // to be enqueued on a java ReferenceQueue. - bool IsUnprocessed() SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsUnprocessed() REQUIRES_SHARED(Locks::mutator_lock_) { return GetPendingNext() == nullptr; } template - static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) { + static Class* GetJavaLangRefReference() REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(!java_lang_ref_Reference_.IsNull()); return java_lang_ref_Reference_.Read(); } static void SetClass(Class* klass); static void ResetClass(); - static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); private: // Note: This avoids a read barrier, it should only be used by the GC. - HeapReference* GetReferentReferenceAddr() SHARED_REQUIRES(Locks::mutator_lock_) { + HeapReference* GetReferentReferenceAddr() REQUIRES_SHARED(Locks::mutator_lock_) { return GetFieldObjectReferenceAddr(ReferentOffset()); } @@ -144,10 +144,10 @@ class MANAGED FinalizerReference : public Reference { } template - void SetZombie(Object* zombie) SHARED_REQUIRES(Locks::mutator_lock_) { + void SetZombie(Object* zombie) REQUIRES_SHARED(Locks::mutator_lock_) { return SetFieldObjectVolatile(ZombieOffset(), zombie); } - Object* GetZombie() SHARED_REQUIRES(Locks::mutator_lock_) { + Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) { return GetFieldObjectVolatile(ZombieOffset()); } -- cgit v1.2.3-59-g8ed1b