summaryrefslogtreecommitdiff
path: root/runtime/mirror/reference.h
diff options
context:
space:
mode:
author Andreas Gampe <agampe@google.com> 2016-08-30 16:38:47 -0700
committer Andreas Gampe <agampe@google.com> 2016-08-30 17:02:53 -0700
commitbdf7f1c3ab65ccb70f62db5ab31dba060632d458 (patch)
tree25cc77adfeb05232d0ab00aa561a693f1d71745c /runtime/mirror/reference.h
parentd7eabc2cc1a88c1f7f927da61246ae65aab0626c (diff)
ART: SHARED_REQUIRES to REQUIRES_SHARED
This coincides with the actual attribute name and upstream usage. Preparation for deferring to libbase. Test: m Test: m test-art-host Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518
Diffstat (limited to 'runtime/mirror/reference.h')
-rw-r--r--runtime/mirror/reference.h22
1 files changed, 11 insertions, 11 deletions
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 38c6616182..6a8b32b62d 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -64,26 +64,26 @@ class MANAGED Reference : public Object {
return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Object* GetReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
ReferentOffset());
}
template<bool kTransactionActive>
- void SetReferent(Object* referent) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetReferent(Object* referent) REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
}
template<bool kTransactionActive>
- void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
}
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Reference* GetPendingNext() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset());
}
void SetPendingNext(Reference* pending_next)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(PendingNextOffset(), pending_next);
} else {
@@ -103,22 +103,22 @@ class MANAGED Reference : public Object {
// should not be processed again until and unless the reference has been
// removed from the list after having determined the reference is not ready
// to be enqueued on a java ReferenceQueue.
- bool IsUnprocessed() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsUnprocessed() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPendingNext<kWithoutReadBarrier>() == nullptr;
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetJavaLangRefReference() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!java_lang_ref_Reference_.IsNull());
return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
static void SetClass(Class* klass);
static void ResetClass();
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Note: This avoids a read barrier, it should only be used by the GC.
- HeapReference<Object>* GetReferentReferenceAddr() SHARED_REQUIRES(Locks::mutator_lock_) {
+ HeapReference<Object>* GetReferentReferenceAddr() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
}
@@ -144,10 +144,10 @@ class MANAGED FinalizerReference : public Reference {
}
template<bool kTransactionActive>
- void SetZombie(Object* zombie) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetZombie(Object* zombie) REQUIRES_SHARED(Locks::mutator_lock_) {
return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
}
- Object* GetZombie() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(ZombieOffset());
}