diff options
author | 2016-08-30 16:38:47 -0700 | |
---|---|---|
committer | 2016-08-30 17:02:53 -0700 | |
commit | bdf7f1c3ab65ccb70f62db5ab31dba060632d458 (patch) | |
tree | 25cc77adfeb05232d0ab00aa561a693f1d71745c /runtime/gc/accounting/atomic_stack.h | |
parent | d7eabc2cc1a88c1f7f927da61246ae65aab0626c (diff) |
ART: SHARED_REQUIRES to REQUIRES_SHARED
This coincides with the actual attribute name and upstream usage.
Preparation for deferring to libbase.
Test: m
Test: m test-art-host
Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518
Diffstat (limited to 'runtime/gc/accounting/atomic_stack.h')
-rw-r--r-- | runtime/gc/accounting/atomic_stack.h | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h index 45db50010c..db9568a198 100644 --- a/runtime/gc/accounting/atomic_stack.h +++ b/runtime/gc/accounting/atomic_stack.h @@ -73,12 +73,12 @@ class AtomicStack { // Beware: Mixing atomic pushes and atomic pops will cause ABA problem. // Returns false if we overflowed the stack. - bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_REQUIRES(Locks::mutator_lock_) { + bool AtomicPushBackIgnoreGrowthLimit(T* value) REQUIRES_SHARED(Locks::mutator_lock_) { return AtomicPushBackInternal(value, capacity_); } // Returns false if we overflowed the stack. - bool AtomicPushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) { + bool AtomicPushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) { return AtomicPushBackInternal(value, growth_limit_); } @@ -86,7 +86,7 @@ class AtomicStack { // slots. Returns false if we overflowed the stack. bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address, StackReference<T>** end_address) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { if (kIsDebugBuild) { debug_is_sorted_ = false; } @@ -112,7 +112,7 @@ class AtomicStack { return true; } - void AssertAllZero() SHARED_REQUIRES(Locks::mutator_lock_) { + void AssertAllZero() REQUIRES_SHARED(Locks::mutator_lock_) { if (kIsDebugBuild) { for (size_t i = 0; i < capacity_; ++i) { DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i; @@ -120,7 +120,7 @@ class AtomicStack { } } - void PushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) { + void PushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) { if (kIsDebugBuild) { debug_is_sorted_ = false; } @@ -130,7 +130,7 @@ class AtomicStack { begin_[index].Assign(value); } - T* PopBack() SHARED_REQUIRES(Locks::mutator_lock_) { + T* PopBack() REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed()); // Decrement the back index non atomically. back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1); @@ -193,12 +193,12 @@ class AtomicStack { } } - bool ContainsSorted(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) { + bool ContainsSorted(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(debug_is_sorted_); return std::binary_search(Begin(), End(), value, ObjectComparator()); } - bool Contains(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) { + bool Contains(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) { for (auto cur = Begin(), end = End(); cur != end; ++cur) { if (cur->AsMirrorPtr() == value) { return true; @@ -220,7 +220,7 @@ class AtomicStack { // Returns false if we overflowed the stack. bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { if (kIsDebugBuild) { debug_is_sorted_ = false; } |