ART: SHARED_REQUIRES to REQUIRES_SHARED

This coincides with the actual attribute name and upstream usage.
Preparation for deferring to libbase.

Test: m
Test: m test-art-host
Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index 4f714a6..22a3ea8 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -35,14 +35,14 @@
  public:
   // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
   template <PointerSize kPointerSize, bool kTransactionActive>
-  bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_)
+  bool CreateFromArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
-  ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+  ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_);
   // Only used by the image writer.
   template <bool kTransactionActive = false>
-  void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
-  mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+  mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   static MemberOffset ArtMethodOffset() {
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index dcf5118..1d934a8 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -36,12 +36,12 @@
   }
 
   template<bool kTransactionActive>
-  void SetAccessible(bool value) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetAccessible(bool value) REQUIRES_SHARED(Locks::mutator_lock_) {
     UNUSED(padding_);
     return SetFieldBoolean<kTransactionActive>(FlagOffset(), value ? 1u : 0u);
   }
 
-  bool IsAccessible() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsAccessible() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldBoolean(FlagOffset());
   }
 
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 014e54b..9d7f98f 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -101,7 +101,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsArray as object is not yet in live bitmap or allocation stack.
     Array* array = down_cast<Array*>(obj);
     // DCHECK(array->IsArrayInstance());
@@ -125,7 +125,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsArray as object is not yet in live bitmap or allocation stack.
     Array* array = down_cast<Array*>(obj);
     // DCHECK(array->IsArrayInstance());
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 4128689..aee48cc 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -43,7 +43,7 @@
 static Array* RecursiveCreateMultiArray(Thread* self,
                                         Handle<Class> array_class, int current_dimension,
                                         Handle<mirror::IntArray> dimensions)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   int32_t array_length = dimensions->Get(current_dimension);
   StackHandleScope<1> hs(self);
   Handle<Array> new_array(
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index ec10a43..6c82eb9 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -40,21 +40,21 @@
   template <bool kIsInstrumented, bool kFillUsable = false>
   ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
                                     size_t component_size_shift, gc::AllocatorType allocator_type)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   static Array* CreateMultiArray(Thread* self, Handle<Class> element_class,
                                  Handle<IntArray> dimensions)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
+  size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE int32_t GetLength() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_));
   }
 
-  void SetLength(int32_t length) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetLength(int32_t length) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK_GE(length, 0);
     // We use non transactional version since we can't undo this write. We also disable checking
     // since it would fail during a transaction.
@@ -68,7 +68,7 @@
   static MemberOffset DataOffset(size_t component_size);
 
   void* GetRawData(size_t component_size, int32_t index)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() +
         + (index * component_size);
     return reinterpret_cast<void*>(data);
@@ -83,18 +83,18 @@
   // Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
   // returns false.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  Array* CopyOf(Thread* self, int32_t new_length) SHARED_REQUIRES(Locks::mutator_lock_)
+  Array* CopyOf(Thread* self, int32_t new_length) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
  protected:
-  void ThrowArrayStoreException(Object* object) SHARED_REQUIRES(Locks::mutator_lock_)
+  void ThrowArrayStoreException(Object* object) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
  private:
   void ThrowArrayIndexOutOfBoundsException(int32_t index)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The number of array elements.
   int32_t length_;
@@ -110,32 +110,32 @@
   typedef T ElementType;
 
   static PrimitiveArray<T>* Alloc(Thread* self, size_t length)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  const T* GetData() const ALWAYS_INLINE  SHARED_REQUIRES(Locks::mutator_lock_) {
+  const T* GetData() const ALWAYS_INLINE  REQUIRES_SHARED(Locks::mutator_lock_) {
     return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0));
   }
 
-  T* GetData() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+  T* GetData() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     return reinterpret_cast<T*>(GetRawData(sizeof(T), 0));
   }
 
-  T Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+  T Get(int32_t i) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
 
-  T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+  T GetWithoutChecks(int32_t i) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(CheckIsValidIndex(i)) << "i=" << i << " length=" << GetLength();
     return GetData()[i];
   }
 
-  void Set(int32_t i, T value) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+  void Set(int32_t i, T value) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
 
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_REQUIRES(Locks::mutator_lock_).
+  // REQUIRES_SHARED(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true>
   void Set(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
 
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_REQUIRES(Locks::mutator_lock_).
+  // REQUIRES_SHARED(Locks::mutator_lock_).
   template<bool kTransactionActive,
            bool kCheckTransaction = true,
            VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -147,7 +147,7 @@
    * and the arrays non-null.
    */
   void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
    * Works like memcpy(), except we guarantee not to allow tearing of array values (ie using
@@ -155,7 +155,7 @@
    * and the arrays non-null.
    */
   void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   static void SetArrayClass(Class* array_class) {
     CHECK(array_class_.IsNull());
@@ -164,7 +164,7 @@
   }
 
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  static Class* GetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static Class* GetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!array_class_.IsNull());
     return array_class_.Read<kReadBarrierOption>();
   }
@@ -174,7 +174,7 @@
     array_class_ = GcRoot<Class>(nullptr);
   }
 
-  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   static GcRoot<Class> array_class_;
@@ -189,14 +189,14 @@
            VerifyObjectFlags kVerifyFlags = kVerifyNone,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive = false, bool kUnchecked = false>
   void SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template<bool kTransactionActive = false, bool kUnchecked = false, typename T>
   void SetElementPtrSize(uint32_t idx, T* element, PointerSize ptr_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Fixup the pointers in the dest arrays by passing our pointers through the visitor. Only copies
   // to dest if visitor(source_ptr) != source_ptr.
@@ -204,7 +204,7 @@
             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
             typename Visitor>
   void Fixup(mirror::PointerArray* dest, PointerSize pointer_size, const Visitor& visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
 }  // namespace mirror
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f948be7..96b3345 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -641,7 +641,7 @@
 static ArtField* FindFieldByNameAndType(LengthPrefixedArray<ArtField>* fields,
                                         const StringPiece& name,
                                         const StringPiece& type)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   if (fields == nullptr) {
     return nullptr;
   }
@@ -952,14 +952,14 @@
                   bool is_static ATTRIBUTE_UNUSED) const {}
 
   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!root->IsNull()) {
       VisitRoot(root);
     }
   }
 
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     mirror::Object* old_ref = root->AsMirrorPtr();
     mirror::Object* new_ref = ReadBarrier::BarrierForRoot(root);
     if (old_ref != new_ref) {
@@ -987,7 +987,7 @@
   }
 
   void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     StackHandleScope<1> hs(self_);
     Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
     mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 1fed190..1751f32 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -133,7 +133,7 @@
   };
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  Status GetStatus() SHARED_REQUIRES(Locks::mutator_lock_) {
+  Status GetStatus() REQUIRES_SHARED(Locks::mutator_lock_) {
     static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
     return static_cast<Status>(
         GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_)));
@@ -141,7 +141,7 @@
 
   // This is static because 'this' may be moved by GC.
   static void SetStatus(Handle<Class> h_this, Status new_status, Thread* self)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   static MemberOffset StatusOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, status_);
@@ -149,157 +149,157 @@
 
   // Returns true if the class has been retired.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsRetired() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsRetired() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() == kStatusRetired;
   }
 
   // Returns true if the class has failed to link.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsErroneous() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsErroneous() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() == kStatusError;
   }
 
   // Returns true if the class has been loaded.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsIdxLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsIdxLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusIdx;
   }
 
   // Returns true if the class has been loaded.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusLoaded;
   }
 
   // Returns true if the class has been linked.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsResolved() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsResolved() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusResolved;
   }
 
   // Returns true if the class was compile-time verified.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsCompileTimeVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsCompileTimeVerified() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime;
   }
 
   // Returns true if the class has been verified.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsVerified() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusVerified;
   }
 
   // Returns true if the class is initializing.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsInitializing() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsInitializing() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusInitializing;
   }
 
   // Returns true if the class is initialized.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() == kStatusInitialized;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
   static MemberOffset AccessFlagsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE uint32_t GetClassFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE uint32_t GetClassFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_));
   }
-  void SetClassFlags(uint32_t new_flags) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetClassFlags(uint32_t new_flags) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns true if the class is an interface.
-  ALWAYS_INLINE bool IsInterface() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsInterface() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccInterface) != 0;
   }
 
   // Returns true if the class is declared public.
-  ALWAYS_INLINE bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsPublic() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPublic) != 0;
   }
 
   // Returns true if the class is declared final.
-  ALWAYS_INLINE bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsFinal() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  ALWAYS_INLINE bool IsFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsFinalizable() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
   }
 
-  ALWAYS_INLINE void SetRecursivelyInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE void SetRecursivelyInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
     SetAccessFlags(flags | kAccRecursivelyInitialized);
   }
 
-  ALWAYS_INLINE void SetHasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE void SetHasDefaultMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
     SetAccessFlags(flags | kAccHasDefaultMethod);
   }
 
-  ALWAYS_INLINE void SetFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE void SetFinalizable() REQUIRES_SHARED(Locks::mutator_lock_) {
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
     SetAccessFlags(flags | kAccClassIsFinalizable);
   }
 
-  ALWAYS_INLINE bool IsStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetClassFlags() & kClassFlagString) != 0;
   }
 
-  ALWAYS_INLINE void SetStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE void SetStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     SetClassFlags(kClassFlagString | kClassFlagNoReferenceFields);
   }
 
-  ALWAYS_INLINE bool IsClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsClassLoaderClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetClassFlags() == kClassFlagClassLoader;
   }
 
-  ALWAYS_INLINE void SetClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE void SetClassLoaderClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     SetClassFlags(kClassFlagClassLoader);
   }
 
-  ALWAYS_INLINE bool IsDexCacheClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsDexCacheClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetClassFlags() & kClassFlagDexCache) != 0;
   }
 
-  ALWAYS_INLINE void SetDexCacheClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE void SetDexCacheClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     SetClassFlags(GetClassFlags() | kClassFlagDexCache);
   }
 
   // Returns true if the class is abstract.
-  ALWAYS_INLINE bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsAbstract() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAbstract) != 0;
   }
 
   // Returns true if the class is an annotation.
-  ALWAYS_INLINE bool IsAnnotation() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsAnnotation() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAnnotation) != 0;
   }
 
   // Returns true if the class is synthetic.
-  ALWAYS_INLINE bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsSynthetic() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
   // Return whether the class had run the verifier at least once.
   // This does not necessarily mean that access checks are avoidable,
   // since the class methods might still need to be run with access checks.
-  bool WasVerificationAttempted() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool WasVerificationAttempted() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
   }
 
   // Mark the class as having gone through a verification attempt.
   // Mutually exclusive from whether or not each method is allowed to skip access checks.
-  void SetVerificationAttempted() SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetVerificationAttempted() REQUIRES_SHARED(Locks::mutator_lock_) {
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
     if ((flags & kAccVerificationAttempted) == 0) {
       SetAccessFlags(flags | kAccVerificationAttempted);
@@ -307,27 +307,27 @@
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsTypeOfReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsTypeOfReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetClassFlags<kVerifyFlags>() & kClassFlagReference) != 0;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsWeakReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsWeakReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetClassFlags<kVerifyFlags>() == kClassFlagWeakReference;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsSoftReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsSoftReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetClassFlags<kVerifyFlags>() == kClassFlagSoftReference;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsFinalizerReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsFinalizerReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetClassFlags<kVerifyFlags>() == kClassFlagFinalizerReference;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPhantomReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPhantomReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetClassFlags<kVerifyFlags>() == kClassFlagPhantomReference;
   }
 
@@ -336,7 +336,7 @@
   // For array classes, where all the classes are final due to there being no sub-classes, an
   // Object[] may be assigned to by a String[] but a String[] may not be assigned to by other
   // types as the component is final.
-  bool CannotBeAssignedFromOtherTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool CannotBeAssignedFromOtherTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!IsArrayClass()) {
       return IsFinal();
     } else {
@@ -351,19 +351,19 @@
 
   // Returns true if this class is the placeholder and should retire and
   // be replaced with a class with the right size for embedded imt/vtable.
-  bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsTemp() REQUIRES_SHARED(Locks::mutator_lock_) {
     Status s = GetStatus();
     return s < Status::kStatusResolving && ShouldHaveEmbeddedVTable();
   }
 
-  String* GetName() SHARED_REQUIRES(Locks::mutator_lock_);  // Returns the cached name.
-  void SetName(String* name) SHARED_REQUIRES(Locks::mutator_lock_);  // Sets the cached name.
+  String* GetName() REQUIRES_SHARED(Locks::mutator_lock_);  // Returns the cached name.
+  void SetName(String* name) REQUIRES_SHARED(Locks::mutator_lock_);  // Sets the cached name.
   // Computes the name, then sets the cached value.
-  static String* ComputeName(Handle<Class> h_this) SHARED_REQUIRES(Locks::mutator_lock_)
+  static String* ComputeName(Handle<Class> h_this) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsProxyClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     // Read access flags without using getter as whether something is a proxy can be check in
     // any loaded state
     // TODO: switch to a check if the super class is java.lang.reflect.Proxy?
@@ -376,9 +376,9 @@
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+  Primitive::Type GetPrimitiveType() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetPrimitiveType(Primitive::Type new_type) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
     uint32_t v32 = static_cast<uint32_t>(new_type);
     DCHECK_EQ(v32 & kPrimitiveTypeMask, v32) << "upper 16 bits aren't zero";
@@ -388,81 +388,81 @@
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+  size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns true if the class is a primitive type.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitive() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitive() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() != Primitive::kPrimNot;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveBoolean() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveBoolean() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimBoolean;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveByte() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveByte() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimByte;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveChar() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveChar() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimChar;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveShort() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveShort() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimShort;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveInt() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveInt() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimInt;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveLong() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveLong() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimLong;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveFloat() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveFloat() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimFloat;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveDouble() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveDouble() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimDouble;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveVoid() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveVoid() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveArray() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveArray() REQUIRES_SHARED(Locks::mutator_lock_) {
     return IsArrayClass<kVerifyFlags>() &&
         GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()->
         IsPrimitive();
   }
 
   // Depth of class from java.lang.Object
-  uint32_t Depth() SHARED_REQUIRES(Locks::mutator_lock_);
+  uint32_t Depth() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsClassClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsClassClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool IsThrowableClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsThrowableClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsReferenceClass() const SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsReferenceClass() const REQUIRES_SHARED(Locks::mutator_lock_);
 
   static MemberOffset ComponentTypeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
@@ -470,9 +470,9 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  Class* GetComponentType() SHARED_REQUIRES(Locks::mutator_lock_);
+  Class* GetComponentType() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetComponentType(Class* new_component_type) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetComponentType(Class* new_component_type) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(GetComponentType() == nullptr);
     DCHECK(new_component_type != nullptr);
     // Component type is invariant: use non-transactional mode without check.
@@ -480,46 +480,46 @@
   }
 
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  size_t GetComponentSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+  size_t GetComponentSize() REQUIRES_SHARED(Locks::mutator_lock_) {
     return 1U << GetComponentSizeShift<kReadBarrierOption>();
   }
 
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  size_t GetComponentSizeShift() SHARED_REQUIRES(Locks::mutator_lock_) {
+  size_t GetComponentSizeShift() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetComponentType<kDefaultVerifyFlags, kReadBarrierOption>()->GetPrimitiveTypeSizeShift();
   }
 
-  bool IsObjectClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsObjectClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return !IsPrimitive() && GetSuperClass() == nullptr;
   }
 
-  bool IsInstantiableNonArray() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsInstantiableNonArray() REQUIRES_SHARED(Locks::mutator_lock_) {
     return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsInstantiable() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsInstantiable() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
         (IsAbstract() && IsArrayClass<kVerifyFlags, kReadBarrierOption>());
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsObjectArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsObjectArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     mirror::Class* const component_type = GetComponentType<kVerifyFlags, kReadBarrierOption>();
     return component_type != nullptr && !component_type->IsPrimitive();
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsIntArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsIntArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
     auto* component_type = GetComponentType<kVerifyFlags>();
     return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsLongArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsLongArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
     auto* component_type = GetComponentType<kVerifyFlags>();
     return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
@@ -528,16 +528,16 @@
   // Creates a raw object instance but does not invoke the default constructor.
   template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
   ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   Object* AllocObject(Thread* self)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
   Object* AllocNonMovableObject(Thread* self)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsVariableSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsVariableSize() REQUIRES_SHARED(Locks::mutator_lock_) {
     // Classes, arrays, and strings vary in size, and so the object_size_ field cannot
     // be used to Get their instance size
     return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
@@ -546,17 +546,17 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  uint32_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  uint32_t GetClassSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t GetClassSize() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
   }
 
   void SetClassSize(uint32_t new_class_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Compute how many bytes would be used a class with the given elements.
   static uint32_t ComputeClassSize(bool has_embedded_vtable,
@@ -582,31 +582,31 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  uint32_t GetObjectSize() SHARED_REQUIRES(Locks::mutator_lock_);
+  uint32_t GetObjectSize() REQUIRES_SHARED(Locks::mutator_lock_);
   static MemberOffset ObjectSizeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
   }
 
-  void SetObjectSize(uint32_t new_object_size) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetObjectSize(uint32_t new_object_size) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!IsVariableSize());
     // Not called within a transaction.
     return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
   }
 
   void SetObjectSizeWithoutChecks(uint32_t new_object_size)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // Not called within a transaction.
     return SetField32<false, false, kVerifyNone>(
         OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
   }
 
   // Returns true if this class is in the same packages as that class.
-  bool IsInSamePackage(Class* that) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsInSamePackage(Class* that) REQUIRES_SHARED(Locks::mutator_lock_);
 
   static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2);
 
   // Returns true if this class can access that class.
-  bool CanAccess(Class* that) SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool CanAccess(Class* that) REQUIRES_SHARED(Locks::mutator_lock_) {
     return that->IsPublic() || this->IsInSamePackage(that);
   }
 
@@ -614,7 +614,7 @@
   // Note that access to the class isn't checked in case the declaring class is protected and the
   // method has been exposed by a public sub-class
   bool CanAccessMember(Class* access_to, uint32_t member_flags)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // Classes can access all of their own members
     if (this == access_to) {
       return true;
@@ -642,40 +642,40 @@
   // referenced by the FieldId in the DexFile in case the declaring class is inaccessible.
   bool CanAccessResolvedField(Class* access_to, ArtField* field,
                               DexCache* dex_cache, uint32_t field_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool CheckResolvedFieldAccess(Class* access_to, ArtField* field,
                                 uint32_t field_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Can this class access a resolved method?
   // Note that access to methods's class is checked and this may require looking up the class
   // referenced by the MethodId in the DexFile in case the declaring class is inaccessible.
   bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method,
                                DexCache* dex_cache, uint32_t method_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template <InvokeType throw_invoke_type>
   bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method,
                                  uint32_t method_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool IsSubClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsSubClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Can src be assigned to this class? For example, String can be assigned to Object (by an
   // upcast), however, an Object cannot be assigned to a String as a potentially exception throwing
   // downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
   // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
   // to themselves. Classes for primitive types may not assign to each other.
-  ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE bool IsAssignableFrom(Class* src) REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ALWAYS_INLINE Class* GetSuperClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE Class* GetSuperClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get first common super class. It will never return null.
   // `This` and `klass` must be classes.
-  Class* GetCommonSuperClass(Handle<Class> klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  Class* GetCommonSuperClass(Handle<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetSuperClass(Class* new_super_class) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetSuperClass(Class* new_super_class) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Super class is assigned once, except during class linker initialization.
     Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
     DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
@@ -683,7 +683,7 @@
     SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
   }
 
-  bool HasSuperClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool HasSuperClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetSuperClass() != nullptr;
   }
 
@@ -691,9 +691,9 @@
     return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
   }
 
-  ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+  ClassLoader* GetClassLoader() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetClassLoader(ClassLoader* new_cl) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetClassLoader(ClassLoader* new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
 
   static MemberOffset DexCacheOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
@@ -705,125 +705,125 @@
     kDumpClassInitialized = (1 << 2),
   };
 
-  void DumpClass(std::ostream& os, int flags) SHARED_REQUIRES(Locks::mutator_lock_);
+  void DumpClass(std::ostream& os, int flags) REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+  DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Also updates the dex_cache_strings_ variable from new_dex_cache.
-  void SetDexCache(DexCache* new_dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetDexCache(DexCache* new_dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   static MemberOffset MethodsOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
   }
 
   ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetMethods(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
                      uint32_t num_direct,
                      uint32_t num_virtual)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   // Used by image writer.
   void SetMethodsPtrUnchecked(LengthPrefixedArray<ArtMethod>* new_methods,
                               uint32_t num_direct,
                               uint32_t num_virtual)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSlice(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Use only when we are allocating populating the method arrays.
   ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, PointerSize pointer_size)
-        SHARED_REQUIRES(Locks::mutator_lock_);
+        REQUIRES_SHARED(Locks::mutator_lock_);
   ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, PointerSize pointer_size)
-        SHARED_REQUIRES(Locks::mutator_lock_);
+        REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns the number of static, private, and constructor methods.
-  ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t NumDirectMethods() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE ArraySlice<ArtMethod> GetMethodsSlice(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSlice(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredMethods(
         PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <PointerSize kPointerSize, bool kTransactionActive>
   static Method* GetDeclaredMethodInternal(Thread* self,
                                            mirror::Class* klass,
                                            mirror::String* name,
                                            mirror::ObjectArray<mirror::Class>* args)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template <PointerSize kPointerSize, bool kTransactionActive>
   static Constructor* GetDeclaredConstructorInternal(Thread* self,
                                                      mirror::Class* klass,
                                                      mirror::ObjectArray<mirror::Class>* args)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredVirtualMethods(
         PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSlice(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetCopiedMethods(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSlice(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(
       PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns the number of non-inherited virtual methods (sum of declared and copied methods).
-  ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t NumVirtualMethods() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns the number of copied virtual methods.
-  ALWAYS_INLINE uint32_t NumCopiedVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t NumCopiedVirtualMethods() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns the number of declared virtual methods.
-  ALWAYS_INLINE uint32_t NumDeclaredVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t NumDeclaredVirtualMethods() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE uint32_t NumMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t NumMethods() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ArtMethod* GetVirtualMethod(size_t i, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* GetVirtualMethodDuringLinking(size_t i, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ALWAYS_INLINE PointerArray* GetVTable() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE PointerArray* GetVTable() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE PointerArray* GetVTableDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetVTable(PointerArray* new_vtable) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetVTable(PointerArray* new_vtable) REQUIRES_SHARED(Locks::mutator_lock_);
 
   static MemberOffset VTableOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
@@ -841,212 +841,212 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool ShouldHaveImt() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool ShouldHaveImt() REQUIRES_SHARED(Locks::mutator_lock_) {
     return ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>();
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool ShouldHaveEmbeddedVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool ShouldHaveEmbeddedVTable() REQUIRES_SHARED(Locks::mutator_lock_) {
     return IsInstantiable<kVerifyFlags, kReadBarrierOption>();
   }
 
-  bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool HasVTable() REQUIRES_SHARED(Locks::mutator_lock_);
 
   static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size);
 
-  int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
+  int32_t GetVTableLength() REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* GetVTableEntry(uint32_t i, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  int32_t GetEmbeddedVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
+  int32_t GetEmbeddedVTableLength() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetEmbeddedVTableLength(int32_t len) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ImTable* GetImt(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+  ImTable* GetImt(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetImt(ImTable* imt, PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetImt(ImTable* imt, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* GetEmbeddedVTableEntry(uint32_t i, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   inline void SetEmbeddedVTableEntryUnchecked(uint32_t i,
                                               ArtMethod* method,
                                               PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void PopulateEmbeddedVTable(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Given a method implemented by this class but potentially from a super class, return the
   // specific implementation method for this class.
   ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Given a method implemented by this class' super class, return the specific implementation
   // method for this class.
   ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Given a method from some implementor of this interface, return the specific implementation
   // method for this class.
   ArtMethod* FindVirtualMethodForInterfaceSuper(ArtMethod* method, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Given a method implemented by this class, but potentially from a
   // super class or interface, return the specific implementation
   // method for this class.
   ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE;
+      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE;
 
   ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
                                  PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
                                  PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
                                  PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
                                       PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
                                       PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
                                       PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
                               PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
                               PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
                               PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
                                        PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
                                        PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
                                        PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
                                PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
                                PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
                                PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ArtMethod* FindClassInitializer(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+  ArtMethod* FindClassInitializer(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool HasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool HasDefaultMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccHasDefaultMethod) != 0;
   }
 
-  bool HasBeenRecursivelyInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool HasBeenRecursivelyInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccRecursivelyInitialized) != 0;
   }
 
-  ALWAYS_INLINE int32_t GetIfTableCount() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE int32_t GetIfTableCount() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ALWAYS_INLINE IfTable* GetIfTable() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE IfTable* GetIfTable() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get instance fields of the class (See also GetSFields).
-  LengthPrefixedArray<ArtField>* GetIFieldsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
+  LengthPrefixedArray<ArtField>* GetIFieldsPtr() REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE IterationRange<StrideIterator<ArtField>> GetIFields()
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetIFieldsPtr(LengthPrefixedArray<ArtField>* new_ifields)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Unchecked edition has no verification flags.
   void SetIFieldsPtrUnchecked(LengthPrefixedArray<ArtField>* new_sfields)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  uint32_t NumInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_);
-  ArtField* GetInstanceField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+  uint32_t NumInstanceFields() REQUIRES_SHARED(Locks::mutator_lock_);
+  ArtField* GetInstanceField(uint32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns the number of instance fields containing reference types. Does not count fields in any
   // super classes.
-  uint32_t NumReferenceInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t NumReferenceInstanceFields() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsResolved() || IsErroneous());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
   }
 
-  uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t NumReferenceInstanceFieldsDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsLoaded() || IsErroneous());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
   }
 
-  void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetNumReferenceInstanceFields(uint32_t new_num) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Not called within a transaction.
     SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+  uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetReferenceInstanceOffsets(uint32_t new_reference_offsets)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the offset of the first reference instance field. Other reference instance fields follow.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   MemberOffset GetFirstReferenceInstanceFieldOffset()
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns the number of static fields containing reference types.
-  uint32_t NumReferenceStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t NumReferenceStaticFields() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsResolved() || IsErroneous());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
   }
 
-  uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t NumReferenceStaticFieldsDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsLoaded() || IsErroneous() || IsRetired());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
   }
 
-  void SetNumReferenceStaticFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetNumReferenceStaticFields(uint32_t new_num) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Not called within a transaction.
     SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num);
   }
@@ -1055,53 +1055,53 @@
   template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
             ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   MemberOffset GetFirstReferenceStaticFieldOffset(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the offset of the first reference static field. Other reference static fields follow.
   MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Gets the static fields of the class.
-  LengthPrefixedArray<ArtField>* GetSFieldsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
+  LengthPrefixedArray<ArtField>* GetSFieldsPtr() REQUIRES_SHARED(Locks::mutator_lock_);
   ALWAYS_INLINE IterationRange<StrideIterator<ArtField>> GetSFields()
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetSFieldsPtr(LengthPrefixedArray<ArtField>* new_sfields)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Unchecked edition has no verification flags.
   void SetSFieldsPtrUnchecked(LengthPrefixedArray<ArtField>* new_sfields)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  uint32_t NumStaticFields() SHARED_REQUIRES(Locks::mutator_lock_);
+  uint32_t NumStaticFields() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // TODO: uint16_t
-  ArtField* GetStaticField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+  ArtField* GetStaticField(uint32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Find a static or instance field using the JLS resolution order
   static ArtField* FindField(Thread* self, Handle<Class> klass, const StringPiece& name,
                              const StringPiece& type)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Finds the given instance field in this class or a superclass.
   ArtField* FindInstanceField(const StringPiece& name, const StringPiece& type)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Finds the given instance field in this class or a superclass, only searches classes that
   // have the same dex cache.
   ArtField* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtField* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtField* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Finds the given static field in this class or a superclass.
   static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name,
                                    const StringPiece& type)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Finds the given static field in this class or superclass, only searches classes that
   // have the same dex cache.
@@ -1109,122 +1109,122 @@
                                    Class* klass,
                                    const DexCache* dex_cache,
                                    uint32_t dex_field_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtField* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  pid_t GetClinitThreadId() SHARED_REQUIRES(Locks::mutator_lock_) {
+  pid_t GetClinitThreadId() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsIdxLoaded() || IsErroneous()) << PrettyClass(this);
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_));
   }
 
-  void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetClinitThreadId(pid_t new_clinit_thread_id) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  Object* GetVerifyError() SHARED_REQUIRES(Locks::mutator_lock_) {
+  Object* GetVerifyError() REQUIRES_SHARED(Locks::mutator_lock_) {
     // DCHECK(IsErroneous());
     return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_));
   }
 
-  uint16_t GetDexClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint16_t GetDexClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_));
   }
 
-  void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetDexClassDefIndex(uint16_t class_def_idx) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Not called within a transaction.
     SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx);
   }
 
-  uint16_t GetDexTypeIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint16_t GetDexTypeIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_));
   }
 
-  void SetDexTypeIndex(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetDexTypeIndex(uint16_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Not called within a transaction.
     SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx);
   }
 
   uint32_t FindTypeIndexInOtherDexFile(const DexFile& dex_file)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static Class* GetJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static Class* GetJavaLangClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(HasJavaLangClass());
     return java_lang_Class_.Read();
   }
 
-  static bool HasJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static bool HasJavaLangClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return !java_lang_Class_.IsNull();
   }
 
   // Can't call this SetClass or else gets called instead of Object::SetClass in places.
-  static void SetClassClass(Class* java_lang_Class) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void SetClassClass(Class* java_lang_Class) REQUIRES_SHARED(Locks::mutator_lock_);
   static void ResetClass();
   static void VisitRoots(RootVisitor* visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
   // ArtMethods.
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
   void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // When class is verified, set the kAccSkipAccessChecks flag on each method.
   void SetSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the descriptor of the class. In a few cases a std::string is required, rather than
   // always create one the storage argument is populated and its internal c_str() returned. We do
   // this to avoid memory allocation in the common case.
-  const char* GetDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
+  const char* GetDescriptor(std::string* storage) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const char* GetArrayDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
+  const char* GetArrayDescriptor(std::string* storage) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool DescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool DescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const DexFile::ClassDef* GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
+  const DexFile::ClassDef* GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t NumDirectInterfaces() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_);
+  uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_);
 
   static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
                                            uint32_t idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const char* GetSourceFile() SHARED_REQUIRES(Locks::mutator_lock_);
+  const char* GetSourceFile() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  std::string GetLocation() SHARED_REQUIRES(Locks::mutator_lock_);
+  std::string GetLocation() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const DexFile& GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
+  const DexFile& GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const DexFile::TypeList* GetInterfaceTypeList() SHARED_REQUIRES(Locks::mutator_lock_);
+  const DexFile::TypeList* GetInterfaceTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Asserts we are initialized or initializing in the given thread.
   void AssertInitializedOrInitializingInThread(Thread* self)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt,
                 PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   // For proxy class only.
-  ObjectArray<Class>* GetInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
+  ObjectArray<Class>* GetInterfaces() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // For proxy class only.
-  ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_REQUIRES(Locks::mutator_lock_);
+  ObjectArray<ObjectArray<Class>>* GetThrows() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // For reference class only.
-  MemberOffset GetDisableIntrinsicFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
-  MemberOffset GetSlowPathFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
-  bool GetSlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
-  void SetSlowPath(bool enabled) SHARED_REQUIRES(Locks::mutator_lock_);
+  MemberOffset GetDisableIntrinsicFlagOffset() REQUIRES_SHARED(Locks::mutator_lock_);
+  MemberOffset GetSlowPathFlagOffset() REQUIRES_SHARED(Locks::mutator_lock_);
+  bool GetSlowPathEnabled() REQUIRES_SHARED(Locks::mutator_lock_);
+  void SetSlowPath(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  StringDexCacheType* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_);
+  StringDexCacheType* GetDexCacheStrings() REQUIRES_SHARED(Locks::mutator_lock_);
   void SetDexCacheStrings(StringDexCacheType* new_dex_cache_strings)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   static MemberOffset DexCacheStringsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_);
   }
@@ -1232,10 +1232,10 @@
   // May cause thread suspension due to EqualParameters.
   ArtMethod* GetDeclaredConstructor(
       Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   static int32_t GetInnerClassFlags(Handle<Class> h_this, int32_t default_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
   // fence.
@@ -1245,7 +1245,7 @@
     }
 
     void operator()(mirror::Object* obj, size_t usable_size) const
-        SHARED_REQUIRES(Locks::mutator_lock_);
+        REQUIRES_SHARED(Locks::mutator_lock_);
 
    private:
     const uint32_t class_size_;
@@ -1254,7 +1254,7 @@
   };
 
   // Returns true if the class loader is null, ie the class loader is the boot strap class loader.
-  bool IsBootStrapClassLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsBootStrapClassLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetClassLoader() == nullptr;
   }
 
@@ -1267,20 +1267,20 @@
   }
 
   ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSliceUnchecked(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSliceUnchecked(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSliceUnchecked(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSliceUnchecked(
       PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSliceUnchecked(PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Fix up all of the native pointers in the class by running them through the visitor. Only sets
   // the corresponding entry in dest if visitor(obj) != obj to prevent dirty memory. Dest should be
@@ -1290,47 +1290,47 @@
             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
             typename Visitor>
   void FixupNativePointers(mirror::Class* dest, PointerSize pointer_size, const Visitor& visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetVerifyError(Object* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetVerifyError(Object* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <bool throw_on_failure, bool use_referrers_cache>
   bool ResolvedFieldAccessTest(Class* access_to, ArtField* field,
                                uint32_t field_idx, DexCache* dex_cache)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
   bool ResolvedMethodAccessTest(Class* access_to, ArtMethod* resolved_method,
                                 uint32_t method_idx, DexCache* dex_cache)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool Implements(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
-  bool IsArrayAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
-  bool IsAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool Implements(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsArrayAssignableFromArray(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsAssignableFromArray(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void CheckObjectAlloc() SHARED_REQUIRES(Locks::mutator_lock_);
+  void CheckObjectAlloc() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Unchecked editions is for root visiting.
-  LengthPrefixedArray<ArtField>* GetSFieldsPtrUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
+  LengthPrefixedArray<ArtField>* GetSFieldsPtrUnchecked() REQUIRES_SHARED(Locks::mutator_lock_);
   IterationRange<StrideIterator<ArtField>> GetSFieldsUnchecked()
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  LengthPrefixedArray<ArtField>* GetIFieldsPtrUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  LengthPrefixedArray<ArtField>* GetIFieldsPtrUnchecked() REQUIRES_SHARED(Locks::mutator_lock_);
   IterationRange<StrideIterator<ArtField>> GetIFieldsUnchecked()
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The index in the methods_ array where the first declared virtual method is.
-  ALWAYS_INLINE uint32_t GetVirtualMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetVirtualMethodsStartOffset() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The index in the methods_ array where the first direct method is.
-  ALWAYS_INLINE uint32_t GetDirectMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetDirectMethodsStartOffset() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The index in the methods_ array where the first copied method is.
-  ALWAYS_INLINE uint32_t GetCopiedMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetCopiedMethodsStartOffset() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool ProxyDescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Check that the pointer size matches the one in the class linker.
   ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
@@ -1341,7 +1341,7 @@
             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
             typename Visitor>
   void VisitReferences(mirror::Class* klass, const Visitor& visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // 'Class' Object Fields
   // Order governed by java field ordering. See art::ClassLinker::LinkFields.
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 1957e13..407678a 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -36,26 +36,26 @@
     return sizeof(ClassLoader);
   }
 
-  ClassLoader* GetParent() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ClassLoader* GetParent() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_));
   }
 
-  ClassTable* GetClassTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ClassTable* GetClassTable() REQUIRES_SHARED(Locks::mutator_lock_) {
     return reinterpret_cast<ClassTable*>(
         GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_)));
   }
 
-  void SetClassTable(ClassTable* class_table) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetClassTable(ClassTable* class_table) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_),
                       reinterpret_cast<uint64_t>(class_table));
   }
 
-  LinearAlloc* GetAllocator() SHARED_REQUIRES(Locks::mutator_lock_) {
+  LinearAlloc* GetAllocator() REQUIRES_SHARED(Locks::mutator_lock_) {
     return reinterpret_cast<LinearAlloc*>(
         GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, allocator_)));
   }
 
-  void SetAllocator(LinearAlloc* allocator) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetAllocator(LinearAlloc* allocator) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, allocator_),
                       reinterpret_cast<uint64_t>(allocator));
   }
@@ -68,7 +68,7 @@
             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
             typename Visitor>
   void VisitReferences(mirror::Class* klass, const Visitor& visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::classlinker_classes_lock_);
 
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 4ddfc7b..caf00c2 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -105,20 +105,20 @@
             uint32_t num_resolved_methods,
             ArtField** resolved_fields,
             uint32_t num_resolved_fields,
-            PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+            PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
 
   void Fixup(ArtMethod* trampoline, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
   void FixupStrings(StringDexCacheType* dest, const Visitor& visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
   void FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  String* GetLocation() SHARED_REQUIRES(Locks::mutator_lock_) {
+  String* GetLocation() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
   }
 
@@ -159,94 +159,94 @@
   }
 
   mirror::String* GetResolvedString(uint32_t string_idx) ALWAYS_INLINE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetResolvedString(uint32_t string_idx, mirror::String* resolved) ALWAYS_INLINE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  Class* GetResolvedType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
+  Class* GetResolvedType(uint32_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetResolvedType(uint32_t type_idx, Class* resolved) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetResolvedType(uint32_t type_idx, Class* resolved) REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx,
                                        ArtMethod* resolved,
                                        PointerSize ptr_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Pointer sized variant, used for patching.
   ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, PointerSize ptr_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Pointer sized variant, used for patching.
   ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  StringDexCacheType* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+  StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
   }
 
-  void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldPtr<false>(StringsOffset(), strings);
   }
 
-  GcRoot<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+  GcRoot<Class>* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldPtr<GcRoot<Class>*>(ResolvedTypesOffset());
   }
 
   void SetResolvedTypes(GcRoot<Class>* resolved_types)
       ALWAYS_INLINE
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
   }
 
-  ArtMethod** GetResolvedMethods() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+  ArtMethod** GetResolvedMethods() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldPtr<ArtMethod**>(ResolvedMethodsOffset());
   }
 
   void SetResolvedMethods(ArtMethod** resolved_methods)
       ALWAYS_INLINE
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
   }
 
-  ArtField** GetResolvedFields() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+  ArtField** GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldPtr<ArtField**>(ResolvedFieldsOffset());
   }
 
   void SetResolvedFields(ArtField** resolved_fields)
       ALWAYS_INLINE
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
   }
 
-  size_t NumStrings() SHARED_REQUIRES(Locks::mutator_lock_) {
+  size_t NumStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(NumStringsOffset());
   }
 
-  size_t NumResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
+  size_t NumResolvedTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(NumResolvedTypesOffset());
   }
 
-  size_t NumResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+  size_t NumResolvedMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(NumResolvedMethodsOffset());
   }
 
-  size_t NumResolvedFields() SHARED_REQUIRES(Locks::mutator_lock_) {
+  size_t NumResolvedFields() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(NumResolvedFieldsOffset());
   }
 
-  const DexFile* GetDexFile() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+  const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
   }
 
-  void SetDexFile(const DexFile* dex_file) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
   }
 
-  void SetLocation(mirror::String* location) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetLocation(mirror::String* location) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // NOTE: Get/SetElementPtrSize() are intended for working with ArtMethod** and ArtField**
   // provided by GetResolvedMethods/Fields() and ArtMethod::GetDexCacheResolvedMethods(),
@@ -265,7 +265,7 @@
             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
             typename Visitor>
   void VisitReferences(mirror::Class* klass, const Visitor& visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
 
   HeapReference<Object> dex_;
   HeapReference<String> location_;
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index 93fd7f1..7eb9da4 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -37,66 +37,66 @@
 // C++ mirror of java.lang.reflect.Field.
 class MANAGED Field : public AccessibleObject {
  public:
-  static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return static_class_.Read();
   }
 
-  static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static mirror::Class* ArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return array_class_.Read();
   }
 
-  ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE uint32_t GetDexFieldIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_));
   }
 
-  mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_));
   }
 
-  uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_));
   }
 
-  bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsStatic() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccStatic) != 0;
   }
 
-  bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsFinal() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsVolatile() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccVolatile) != 0;
   }
 
   ALWAYS_INLINE Primitive::Type GetTypeAsPrimitiveType()
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetType()->GetPrimitiveType();
   }
 
-  mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_) {
+  mirror::Class* GetType() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<mirror::Class>(OFFSET_OF_OBJECT_MEMBER(Field, type_));
   }
 
-  int32_t GetOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
+  int32_t GetOffset() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
   }
 
-  static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
-  static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+  static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
-  static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+  static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Slow, try to use only for PrettyField and such.
-  ArtField* GetArtField() SHARED_REQUIRES(Locks::mutator_lock_);
+  ArtField* GetArtField() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <PointerSize kPointerSize, bool kTransactionActive = false>
   static mirror::Field* CreateFromArtField(Thread* self, ArtField* field,
                                            bool force_resolve)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
  private:
   HeapReference<mirror::Class> declaring_class_;
@@ -106,27 +106,27 @@
   int32_t offset_;
 
   template<bool kTransactionActive>
-  void SetDeclaringClass(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetDeclaringClass(mirror::Class* c) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
   }
 
   template<bool kTransactionActive>
-  void SetType(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetType(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
   }
 
   template<bool kTransactionActive>
-  void SetAccessFlags(uint32_t flags) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetAccessFlags(uint32_t flags) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), flags);
   }
 
   template<bool kTransactionActive>
-  void SetDexFieldIndex(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetDexFieldIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_), idx);
   }
 
   template<bool kTransactionActive>
-  void SetOffset(uint32_t offset) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetOffset(uint32_t offset) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, offset_), offset);
   }
 
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index d6571f2..a1a2f98 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -25,18 +25,18 @@
 
 class MANAGED IfTable FINAL : public ObjectArray<Object> {
  public:
-  ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+  ALWAYS_INLINE Class* GetInterface(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
     Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
     DCHECK(interface != nullptr);
     return interface;
   }
 
   ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  PointerArray* GetMethodArray(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+  PointerArray* GetMethodArray(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
     auto* method_array = down_cast<PointerArray*>(Get<kVerifyFlags, kReadBarrierOption>(
         (i * kMax) + kMethodArray));
     DCHECK(method_array != nullptr);
@@ -45,20 +45,20 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  size_t GetMethodArrayCount(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+  size_t GetMethodArrayCount(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
     auto* method_array = down_cast<PointerArray*>(
         Get<kVerifyFlags, kReadBarrierOption>((i * kMax) + kMethodArray));
     return method_array == nullptr ? 0u : method_array->GetLength();
   }
 
-  void SetMethodArray(int32_t i, PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetMethodArray(int32_t i, PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(arr != nullptr);
     auto idx = i * kMax + kMethodArray;
     DCHECK(Get(idx) == nullptr);
     Set<false>(idx, arr);
   }
 
-  size_t Count() SHARED_REQUIRES(Locks::mutator_lock_) {
+  size_t Count() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetLength() / kMax;
   }
 
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index be51784..6881991 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -30,25 +30,25 @@
  public:
   template <PointerSize kPointerSize, bool kTransactionActive>
   static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return static_class_.Read();
   }
 
-  static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static mirror::Class* ArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return array_class_.Read();
   }
 
-  static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   static GcRoot<Class> static_class_;  // java.lang.reflect.Method.class.
@@ -62,25 +62,25 @@
  public:
   template <PointerSize kPointerSize, bool kTransactionActive>
   static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return static_class_.Read();
   }
 
-  static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static mirror::Class* ArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return array_class_.Read();
   }
 
-  static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   static GcRoot<Class> static_class_;  // java.lang.reflect.Constructor.class.
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 27f8bd7..0f5cbb2 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -542,7 +542,7 @@
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
     bool kIsVolatile>
 inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   if (kCheckTransaction) {
     DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
   }
@@ -560,7 +560,7 @@
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
     bool kIsVolatile>
 inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   if (kCheckTransaction) {
     DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
   }
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 13c536e..c37deb5 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -47,7 +47,7 @@
       : dest_obj_(dest_obj) {}
 
   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
-      ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+      ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     // GetFieldObject() contains a RB.
     Object* ref = obj->GetFieldObject<Object>(offset);
     // No WB here as a large object space does not have a card table
@@ -56,7 +56,7 @@
   }
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
-      ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+      ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     // Copy java.lang.ref.Reference.referent which isn't visited in
     // Object::VisitReferences().
     DCHECK(klass->IsTypeOfReferenceClass());
@@ -112,7 +112,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
   }
 
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 8649294..262cb57 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -88,49 +88,49 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ALWAYS_INLINE Class* GetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE Class* GetClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetClass(Class* new_klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // TODO: Clean these up and change to return int32_t
-  Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_);
+  Object* GetReadBarrierPointer() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the read barrier pointer with release semantics, only supported for baker.
-  Object* GetReadBarrierPointerAcquire() SHARED_REQUIRES(Locks::mutator_lock_);
+  Object* GetReadBarrierPointerAcquire() REQUIRES_SHARED(Locks::mutator_lock_);
 
 #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
   NO_RETURN
 #endif
-  void SetReadBarrierPointer(Object* rb_ptr) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetReadBarrierPointer(Object* rb_ptr) REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kCasRelease = false>
   ALWAYS_INLINE bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE uint32_t GetMarkBit() SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetMarkBit() REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE bool AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_);
+  void AssertReadBarrierPointer() const REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
   // invoke-interface to detect incompatible interface types.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool VerifierInstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool VerifierInstanceOf(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE bool InstanceOf(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
+  size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  Object* Clone(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+  Object* Clone(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   int32_t IdentityHashCode() const
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   static MemberOffset MonitorOffset() {
@@ -140,356 +140,356 @@
   // As_volatile can be false if the mutators are suspended. This is an optimization since it
   // avoids the barriers.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  LockWord GetLockWord(bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
+  LockWord GetLockWord(bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  void SetLockWord(LockWord new_val, bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetLockWord(LockWord new_val, bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_);
   bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   uint32_t GetLockOwnerThreadId();
 
   // Try to enter the monitor, returns non null if we succeeded.
   mirror::Object* MonitorTryEnter(Thread* self)
       EXCLUSIVE_LOCK_FUNCTION()
       REQUIRES(!Roles::uninterruptible_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   mirror::Object* MonitorEnter(Thread* self)
       EXCLUSIVE_LOCK_FUNCTION()
       REQUIRES(!Roles::uninterruptible_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool MonitorExit(Thread* self)
       REQUIRES(!Roles::uninterruptible_)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       UNLOCK_FUNCTION();
-  void Notify(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
-  void NotifyAll(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
-  void Wait(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
-  void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_REQUIRES(Locks::mutator_lock_);
+  void Notify(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+  void NotifyAll(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+  void Wait(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+  void Wait(Thread* self, int64_t timeout, int32_t nanos) REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsClass() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  Class* AsClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  Class* AsClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<class T,
            VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ObjectArray<T>* AsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  ObjectArray<T>* AsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ClassLoader* AsClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+  ClassLoader* AsClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  DexCache* AsDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+  DexCache* AsDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsArrayInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsArrayInstance() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  Array* AsArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  Array* AsArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  BooleanArray* AsBooleanArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  BooleanArray* AsBooleanArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ByteArray* AsByteArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  ByteArray* AsByteArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ByteArray* AsByteSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  ByteArray* AsByteSizedArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  CharArray* AsCharArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  CharArray* AsCharArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ShortArray* AsShortArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  ShortArray* AsShortArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ShortArray* AsShortSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  ShortArray* AsShortSizedArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  IntArray* AsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  IntArray* AsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  LongArray* AsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  LongArray* AsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsFloatArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  FloatArray* AsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  FloatArray* AsFloatArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsDoubleArray() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  DoubleArray* AsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
+  DoubleArray* AsDoubleArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsString() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsString() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  String* AsString() SHARED_REQUIRES(Locks::mutator_lock_);
+  String* AsString() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  Throwable* AsThrowable() SHARED_REQUIRES(Locks::mutator_lock_);
+  Throwable* AsThrowable() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  Reference* AsReference() SHARED_REQUIRES(Locks::mutator_lock_);
+  Reference* AsReference() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsWeakReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsWeakReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsSoftReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsSoftReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsFinalizerReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsFinalizerReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  FinalizerReference* AsFinalizerReference() SHARED_REQUIRES(Locks::mutator_lock_);
+  FinalizerReference* AsFinalizerReference() REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPhantomReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsPhantomReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Accessor for Java type fields.
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
       ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kIsVolatile = false>
   ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
       ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
                                                 Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
                                                                    Object* old_value,
                                                                    Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
                                                   Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
                                                                      Object* old_value,
                                                                      Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
                                                     Object* old_value,
                                                     Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
                                                       Object* old_value,
                                                       Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset,
                                                           int32_t old_value, int32_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value,
                              int32_t new_value) ALWAYS_INLINE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldWeakRelease32(MemberOffset field_offset, int32_t old_value,
                              int32_t new_value) ALWAYS_INLINE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value,
                                               int32_t new_value) ALWAYS_INLINE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE int64_t GetField64Volatile(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
                                             int64_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
                                               int64_t new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
   void SetFieldPtr(MemberOffset field_offset, T new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
         field_offset, new_value, kRuntimePointerSize);
   }
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
   void SetFieldPtr64(MemberOffset field_offset, T new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
         field_offset, new_value, 8u);
   }
@@ -499,7 +499,7 @@
   ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset,
                                          T new_value,
                                          PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     if (pointer_size == PointerSize::k32) {
       intptr_t ptr  = reinterpret_cast<intptr_t>(new_value);
       DCHECK_EQ(static_cast<int32_t>(ptr), ptr);  // Check that we dont lose any non 0 bits.
@@ -511,7 +511,7 @@
     }
   }
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_REQUIRES(Locks::mutator_lock_).
+  // REQUIRES_SHARED(Locks::mutator_lock_).
   template <bool kVisitNativeRoots = true,
             VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
@@ -520,7 +520,7 @@
   void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor)
       NO_THREAD_SAFETY_ANALYSIS;
 
-  ArtField* FindFieldByOffset(MemberOffset offset) SHARED_REQUIRES(Locks::mutator_lock_);
+  ArtField* FindFieldByOffset(MemberOffset offset) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Used by object_test.
   static void SetHashCodeSeed(uint32_t new_seed);
@@ -531,19 +531,19 @@
   // Accessors for non-Java type fields
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   T GetFieldPtr(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, kRuntimePointerSize);
   }
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   T GetFieldPtr64(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset,
                                                              PointerSize::k64);
   }
 
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     if (pointer_size == PointerSize::k32) {
       return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
     } else {
@@ -563,31 +563,31 @@
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
            typename Visitor>
   void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
            typename Visitor>
   void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   template<typename kSize, bool kIsVolatile>
   ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   template<typename kSize, bool kIsVolatile>
   ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   // Get a field with acquire semantics.
   template<typename kSize>
   ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Verify the type correctness of stores to fields.
   // TODO: This can cause thread suspension and isn't moving GC safe.
   void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void CheckFieldAssignment(MemberOffset field_offset, Object* new_value)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     if (kCheckFieldAssignments) {
       CheckFieldAssignmentImpl(field_offset, new_value);
     }
@@ -598,7 +598,7 @@
   // Class::CopyOf().
   static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
                             size_t num_bytes)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   static Atomic<uint32_t> hash_code_seed;
 
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index a99d616..19b9d87 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -32,23 +32,23 @@
 
   static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
                                gc::AllocatorType allocator_type)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ALWAYS_INLINE T* Get(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE T* Get(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns true if the object can be stored into the array. If not, throws
   // an ArrayStoreException and returns false.
-  // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
+  // TODO fix thread safety analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
 
-  ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_REQUIRES(Locks::mutator_lock_);
-  // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
+  ALWAYS_INLINE void Set(int32_t i, T* object) REQUIRES_SHARED(Locks::mutator_lock_);
+  // TODO fix thread safety analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
@@ -56,41 +56,41 @@
   // Set element without bound and element type checks, to be used in limited
   // circumstances, such as during boot image writing.
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_REQUIRES(Locks::mutator_lock_).
+  // REQUIRES_SHARED(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_REQUIRES(Locks::mutator_lock_).
+  // REQUIRES_SHARED(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
       NO_THREAD_SAFETY_ANALYSIS;
 
-  ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE T* GetWithoutChecks(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
   void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
-                         int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
+                         int32_t count) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Copy src into this array assuming no overlap and without assignability checks.
   void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
-                        int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
+                        int32_t count) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Copy src into this array with assignability checks.
   template<bool kTransactionActive>
   void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
                                 int32_t count, bool throw_exception)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ObjectArray<T>* CopyOf(Thread* self, int32_t new_length)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   static MemberOffset OffsetOfElement(int32_t i);
 
  private:
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_REQUIRES(Locks::mutator_lock_).
+  // REQUIRES_SHARED(Locks::mutator_lock_).
   template<typename Visitor>
   void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
 
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 2a5c88e..583cfc3 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -33,11 +33,11 @@
 template<bool kPoisonReferences, class MirrorType>
 class MANAGED ObjectReference {
  public:
-  MirrorType* AsMirrorPtr() const SHARED_REQUIRES(Locks::mutator_lock_) {
+  MirrorType* AsMirrorPtr() const REQUIRES_SHARED(Locks::mutator_lock_) {
     return UnCompress();
   }
 
-  void Assign(MirrorType* other) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void Assign(MirrorType* other) REQUIRES_SHARED(Locks::mutator_lock_) {
     reference_ = Compress(other);
   }
 
@@ -56,18 +56,18 @@
 
  protected:
   ObjectReference<kPoisonReferences, MirrorType>(MirrorType* mirror_ptr)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       : reference_(Compress(mirror_ptr)) {
   }
 
   // Compress reference to its bit representation.
-  static uint32_t Compress(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
+  static uint32_t Compress(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
     uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr);
     return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits);
   }
 
   // Uncompress an encoded reference from its bit representation.
-  MirrorType* UnCompress() const SHARED_REQUIRES(Locks::mutator_lock_) {
+  MirrorType* UnCompress() const REQUIRES_SHARED(Locks::mutator_lock_) {
     uintptr_t as_bits = kPoisonReferences ? -reference_ : reference_;
     return reinterpret_cast<MirrorType*>(as_bits);
   }
@@ -83,11 +83,11 @@
 class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, MirrorType> {
  public:
   static HeapReference<MirrorType> FromMirrorPtr(MirrorType* mirror_ptr)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return HeapReference<MirrorType>(mirror_ptr);
   }
  private:
-  HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_)
+  HeapReference<MirrorType>(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
       : ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
 };
 
@@ -95,16 +95,16 @@
 template<class MirrorType>
 class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {
  public:
-  CompressedReference<MirrorType>() SHARED_REQUIRES(Locks::mutator_lock_)
+  CompressedReference<MirrorType>() REQUIRES_SHARED(Locks::mutator_lock_)
       : mirror::ObjectReference<false, MirrorType>(nullptr) {}
 
   static CompressedReference<MirrorType> FromMirrorPtr(MirrorType* p)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return CompressedReference<MirrorType>(p);
   }
 
  private:
-  CompressedReference<MirrorType>(MirrorType* p) SHARED_REQUIRES(Locks::mutator_lock_)
+  CompressedReference<MirrorType>(MirrorType* p) REQUIRES_SHARED(Locks::mutator_lock_)
       : mirror::ObjectReference<false, MirrorType>(p) {}
 };
 
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index b35a479..afd6115 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -49,7 +49,7 @@
                     const char* utf8_in,
                     const char* utf16_expected_le,
                     int32_t expected_hash)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     std::unique_ptr<uint16_t[]> utf16_expected(new uint16_t[expected_utf16_length]);
     for (int32_t i = 0; i < expected_utf16_length; i++) {
       uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) |
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 38c6616..6a8b32b 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -64,26 +64,26 @@
     return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
   }
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  Object* GetReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
+  Object* GetReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
         ReferentOffset());
   }
   template<bool kTransactionActive>
-  void SetReferent(Object* referent) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetReferent(Object* referent) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
   }
   template<bool kTransactionActive>
-  void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
+  void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
   }
 
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) {
+  Reference* GetPendingNext() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset());
   }
 
   void SetPendingNext(Reference* pending_next)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     if (Runtime::Current()->IsActiveTransaction()) {
       SetFieldObject<true>(PendingNextOffset(), pending_next);
     } else {
@@ -103,22 +103,22 @@
   // should not be processed again until and unless the reference has been
   // removed from the list after having determined the reference is not ready
   // to be enqueued on a java ReferenceQueue.
-  bool IsUnprocessed() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsUnprocessed() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetPendingNext<kWithoutReadBarrier>() == nullptr;
   }
 
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static Class* GetJavaLangRefReference() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!java_lang_ref_Reference_.IsNull());
     return java_lang_ref_Reference_.Read<kReadBarrierOption>();
   }
   static void SetClass(Class* klass);
   static void ResetClass();
-  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   // Note: This avoids a read barrier, it should only be used by the GC.
-  HeapReference<Object>* GetReferentReferenceAddr() SHARED_REQUIRES(Locks::mutator_lock_) {
+  HeapReference<Object>* GetReferentReferenceAddr() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
   }
 
@@ -144,10 +144,10 @@
   }
 
   template<bool kTransactionActive>
-  void SetZombie(Object* zombie) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetZombie(Object* zombie) REQUIRES_SHARED(Locks::mutator_lock_) {
     return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
   }
-  Object* GetZombie() SHARED_REQUIRES(Locks::mutator_lock_) {
+  Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObjectVolatile<Object>(ZombieOffset());
   }
 
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 1167391..4b3d9d0 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -31,32 +31,32 @@
 // C++ mirror of java.lang.StackTraceElement
 class MANAGED StackTraceElement FINAL : public Object {
  public:
-  String* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  String* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
   }
 
-  String* GetMethodName() SHARED_REQUIRES(Locks::mutator_lock_) {
+  String* GetMethodName() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_));
   }
 
-  String* GetFileName() SHARED_REQUIRES(Locks::mutator_lock_) {
+  String* GetFileName() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_));
   }
 
-  int32_t GetLineNumber() SHARED_REQUIRES(Locks::mutator_lock_) {
+  int32_t GetLineNumber() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
   }
 
   static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class,
                                   Handle<String> method_name, Handle<String> file_name,
                                   int32_t line_number)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   static void SetClass(Class* java_lang_StackTraceElement);
   static void ResetClass();
   static void VisitRoots(RootVisitor* visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  static Class* GetStackTraceElement() SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  static Class* GetStackTraceElement() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!java_lang_StackTraceElement_.IsNull());
     return java_lang_StackTraceElement_.Read();
   }
@@ -71,7 +71,7 @@
   template<bool kTransactionActive>
   void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name,
             int32_t line_number)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   static GcRoot<Class> java_lang_StackTraceElement_;
 
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index bc39ea8..86e5139 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -44,7 +44,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     String* string = down_cast<String*>(obj);
     string->SetCount(count_);
@@ -64,7 +64,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     String* string = down_cast<String*>(obj);
     string->SetCount(count_);
@@ -100,7 +100,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     String* string = down_cast<String*>(obj);
     string->SetCount(count_);
@@ -132,7 +132,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     String* string = down_cast<String*>(obj);
     string->SetCount(count_);
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 8695fe8..a18692f 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -53,100 +53,100 @@
     return OFFSET_OF_OBJECT_MEMBER(String, value_);
   }
 
-  uint16_t* GetValue() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint16_t* GetValue() REQUIRES_SHARED(Locks::mutator_lock_) {
     return &value_[0];
   }
 
-  uint8_t* GetValueCompressed() SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint8_t* GetValueCompressed() REQUIRES_SHARED(Locks::mutator_lock_) {
     return &value_compressed_[0];
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
+  size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Taking out the first/uppermost bit because it is not part of actual length value
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
+  int32_t GetLength() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetLengthFromCount(GetCount<kVerifyFlags>());
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  int32_t GetCount() SHARED_REQUIRES(Locks::mutator_lock_) {
+  int32_t GetCount() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(String, count_));
   }
 
-  void SetCount(int32_t new_count) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetCount(int32_t new_count) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Count is invariant so use non-transactional mode. Also disable check as we may run inside
     // a transaction.
     DCHECK_LE(0, (new_count & INT32_MAX));
     SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
   }
 
-  int32_t GetHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
+  int32_t GetHashCode() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Computes, stores, and returns the hash code.
-  int32_t ComputeHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
+  int32_t ComputeHashCode() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  int32_t GetUtfLength() SHARED_REQUIRES(Locks::mutator_lock_);
+  int32_t GetUtfLength() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  uint16_t CharAt(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
+  uint16_t CharAt(int32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetCharAt(int32_t index, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetCharAt(int32_t index, uint16_t c) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  String* Intern() SHARED_REQUIRES(Locks::mutator_lock_);
+  String* Intern() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <bool kIsInstrumented>
   ALWAYS_INLINE static String* AllocFromByteArray(Thread* self, int32_t byte_length,
                                                   Handle<ByteArray> array, int32_t offset,
                                                   int32_t high_byte,
                                                   gc::AllocatorType allocator_type)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   template <bool kIsInstrumented>
   ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t count,
                                                   Handle<CharArray> array, int32_t offset,
                                                   gc::AllocatorType allocator_type)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   template <bool kIsInstrumented>
   ALWAYS_INLINE static String* AllocFromString(Thread* self, int32_t string_length,
                                                Handle<String> string, int32_t offset,
                                                gc::AllocatorType allocator_type)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   template <bool kIsInstrumented>
   ALWAYS_INLINE static String* AllocEmptyString(Thread* self,
                                                 gc::AllocatorType allocator_type)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   static String* AllocFromStrings(Thread* self, Handle<String> string, Handle<String> string2)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   static String* AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   static String* AllocFromModifiedUtf8(Thread* self, const char* utf)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
                                        const char* utf8_data_in, int32_t utf8_length)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   // TODO: This is only used in the interpreter to compare against
   // entries from a dex files constant pool (ArtField names). Should
   // we unify this with Equals(const StringPiece&); ?
-  bool Equals(const char* modified_utf8) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool Equals(const char* modified_utf8) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // TODO: This is only used to compare DexCache.location with
   // a dex_file's location (which is an std::string). Do we really
   // need this in mirror::String just for that one usage ?
   bool Equals(const StringPiece& modified_utf8)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool Equals(String* that) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool Equals(String* that) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Compare UTF-16 code point values not in a locale-sensitive manner
   int Compare(int32_t utf16_length, const char* utf8_data_in);
@@ -154,31 +154,31 @@
   // TODO: do we need this overload? give it a more intention-revealing name.
   bool Equals(const uint16_t* that_chars, int32_t that_offset,
               int32_t that_length)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Create a modified UTF-8 encoded std::string from a java/lang/String object.
-  std::string ToModifiedUtf8() SHARED_REQUIRES(Locks::mutator_lock_);
+  std::string ToModifiedUtf8() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_REQUIRES(Locks::mutator_lock_);
+  int32_t FastIndexOf(int32_t ch, int32_t start) REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <typename MemoryType>
   int32_t FastIndexOf(MemoryType* chars, int32_t ch, int32_t start)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  int32_t CompareTo(String* other) SHARED_REQUIRES(Locks::mutator_lock_);
+  int32_t CompareTo(String* other) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  CharArray* ToCharArray(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+  CharArray* ToCharArray(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   void GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsCompressed() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsCompressed() REQUIRES_SHARED(Locks::mutator_lock_) {
     return kUseStringCompression && GetCompressionFlagFromCount(GetCount());
   }
 
-  bool IsValueNull() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsValueNull() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<typename MemoryType>
   static bool AllASCII(const MemoryType* const chars, const int length);
@@ -195,17 +195,17 @@
     return kUseStringCompression ? (count | (1u << 31)) : count;
   }
 
-  static Class* GetJavaLangString() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static Class* GetJavaLangString() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!java_lang_String_.IsNull());
     return java_lang_String_.Read();
   }
 
-  static void SetClass(Class* java_lang_String) SHARED_REQUIRES(Locks::mutator_lock_);
-  static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
-  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void SetClass(Class* java_lang_String) REQUIRES_SHARED(Locks::mutator_lock_);
+  static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void SetHashCode(int32_t new_hash_code) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetHashCode(int32_t new_hash_code) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Hash code is invariant so use non-transactional mode. Also disable check as we may run inside
     // a transaction.
     DCHECK_EQ(0, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_)));
@@ -216,7 +216,7 @@
   ALWAYS_INLINE static String* Alloc(Thread* self, int32_t utf16_length_with_flag,
                                      gc::AllocatorType allocator_type,
                                      const PreFenceVisitor& pre_fence_visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
   // First bit (uppermost/leftmost) is taken out for Compressed/Uncompressed flag
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 0bccc8b..8f3ed84 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -54,7 +54,7 @@
   }
 }
 
-void Throwable::SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_) {
+void Throwable::SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_) {
   CHECK(state != nullptr);
   if (Runtime::Current()->IsActiveTransaction()) {
     SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_), state);
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 6aacc8d..76824cb 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -31,38 +31,38 @@
 // C++ mirror of java.lang.Throwable
 class MANAGED Throwable : public Object {
  public:
-  void SetDetailMessage(String* new_detail_message) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetDetailMessage(String* new_detail_message) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  String* GetDetailMessage() SHARED_REQUIRES(Locks::mutator_lock_) {
+  String* GetDetailMessage() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
   }
 
-  std::string Dump() SHARED_REQUIRES(Locks::mutator_lock_);
+  std::string Dump() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // This is a runtime version of initCause, you shouldn't use it if initCause may have been
   // overridden. Also it asserts rather than throwing exceptions. Currently this is only used
   // in cases like the verifier where the checks cannot fail and initCause isn't overridden.
-  void SetCause(Throwable* cause) SHARED_REQUIRES(Locks::mutator_lock_);
-  void SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_);
-  bool IsCheckedException() SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetCause(Throwable* cause) REQUIRES_SHARED(Locks::mutator_lock_);
+  void SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsCheckedException() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static Class* GetJavaLangThrowable() SHARED_REQUIRES(Locks::mutator_lock_) {
+  static Class* GetJavaLangThrowable() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!java_lang_Throwable_.IsNull());
     return java_lang_Throwable_.Read();
   }
 
-  int32_t GetStackDepth() SHARED_REQUIRES(Locks::mutator_lock_);
+  int32_t GetStackDepth() REQUIRES_SHARED(Locks::mutator_lock_);
 
   static void SetClass(Class* java_lang_Throwable);
   static void ResetClass();
   static void VisitRoots(RootVisitor* visitor)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  Object* GetStackState() SHARED_REQUIRES(Locks::mutator_lock_) {
+  Object* GetStackState() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
   }
-  Object* GetStackTrace() SHARED_REQUIRES(Locks::mutator_lock_) {
+  Object* GetStackTrace() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
   }