ART: SHARED_REQUIRES to REQUIRES_SHARED

This coincides with the actual attribute name and upstream usage.
Preparation for deferring to libbase.

Test: m
Test: m test-art-host
Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 2aa6f3d..44e0e2a 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -50,7 +50,7 @@
   virtual ~Jit();
   static Jit* Create(JitOptions* options, std::string* error_msg);
   bool CompileMethod(ArtMethod* method, Thread* self, bool osr)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void CreateThreadPool();
 
   const JitCodeCache* GetCodeCache() const {
@@ -70,7 +70,7 @@
 
   void AddMemoryUsage(ArtMethod* method, size_t bytes)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   size_t OSRMethodThreshold() const {
     return osr_method_threshold_;
@@ -102,25 +102,25 @@
 
   // Profiling methods.
   void MethodEntered(Thread* thread, ArtMethod* method)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void AddSamples(Thread* self, ArtMethod* method, uint16_t samples, bool with_backedges)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void InvokeVirtualOrInterface(Thread* thread,
                                 mirror::Object* this_object,
                                 ArtMethod* caller,
                                 uint32_t dex_pc,
                                 ArtMethod* callee)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     AddSamples(self, caller, invoke_transition_weight_, false);
   }
 
   void NotifyCompiledCodeToInterpreterTransition(Thread* self, ArtMethod* callee)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     AddSamples(self, callee, invoke_transition_weight_, false);
   }
 
@@ -140,7 +140,7 @@
   void DumpForSigQuit(std::ostream& os) REQUIRES(!lock_);
 
   static void NewTypeLoadedIfUsingJit(mirror::Class* type)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // If debug info generation is turned on then write the type information for types already loaded
   // into the specified class linker to the jit debug interface,
@@ -164,7 +164,7 @@
                                         uint32_t dex_pc,
                                         int32_t dex_pc_offset,
                                         JValue* result)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   static bool LoadCompilerLibrary(std::string* error_msg);
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b1079dd..c9227b1 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -446,7 +446,7 @@
         code_cache_(code_cache_in),
         bitmap_(code_cache_->GetLiveBitmap()) {}
 
-  bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
     if (method_header == nullptr) {
       return true;
@@ -469,7 +469,7 @@
   MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
       : code_cache_(code_cache), barrier_(barrier) {}
 
-  void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+  void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     ScopedTrace trace(__PRETTY_FUNCTION__);
     DCHECK(thread == Thread::Current() || thread->IsSuspended());
     MarkCodeVisitor visitor(thread, code_cache_);
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 1938221..e15c93a 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -70,7 +70,7 @@
   size_t DataCacheSize() REQUIRES(!lock_);
 
   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
   // Notify to the code cache that the compiler wants to use the
@@ -78,15 +78,15 @@
   // and therefore ensure the returned profiling info object is not
   // collected.
   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
   void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
   void DoneCompilerUse(ArtMethod* method, Thread* self)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
   // Allocate and write code and its metadata to the code cache.
@@ -99,7 +99,7 @@
                       const uint8_t* code,
                       size_t code_size,
                       bool osr)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
   // Return true if the code cache contains this pc.
@@ -110,12 +110,12 @@
 
   // Reserve a region of data of size at least "size". Returns null if there is no more room.
   uint8_t* ReserveData(Thread* self, size_t size, ArtMethod* method)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
   // Clear data from the data portion of the code cache.
   void ClearData(Thread* self, void* data)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
   CodeCacheBitmap* GetLiveBitmap() const {
@@ -125,28 +125,28 @@
   // Return whether we should do a full collection given the current state of the cache.
   bool ShouldDoFullCollection()
       REQUIRES(lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Perform a collection on the code cache.
   void GarbageCollectCache(Thread* self)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Given the 'pc', try to find the JIT compiled code associated with it.
   // Return null if 'pc' is not in the code cache. 'method' is passed for
   // sanity check.
   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Remove all methods in our cache that were allocated by 'alloc'.
   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
 
@@ -157,7 +157,7 @@
                                   const std::vector<uint32_t>& entries,
                                   bool retry_allocation)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
     return mspace == code_mspace_ || mspace == data_mspace_;
@@ -169,7 +169,7 @@
   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
                           std::vector<MethodReference>& methods)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   uint64_t GetLastUpdateTimeNs() const;
 
@@ -182,7 +182,7 @@
 
   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void Dump(std::ostream& os) REQUIRES(!lock_);
 
@@ -209,13 +209,13 @@
                               size_t code_size,
                               bool osr)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
                                           ArtMethod* method,
                                           const std::vector<uint32_t>& entries)
       REQUIRES(lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // If a collection is in progress, wait for it to finish. Return
   // whether the thread actually waited.
@@ -243,15 +243,15 @@
 
   void DoCollection(Thread* self, bool collect_profiling_info)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void RemoveUnmarkedCode(Thread* self)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void MarkCompiledCodeOnThreadStacks(Thread* self)
       REQUIRES(!lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool CheckLiveCompiledCodeHasProfilingInfo()
       REQUIRES(lock_);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 927681c..42916c3 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -189,7 +189,7 @@
     : methods_(methods),
       startup_method_samples_(startup_method_samples) {}
 
-  virtual bool operator()(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+  virtual bool operator()(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
       return true;
     }
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a890fbb..1056fac 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -53,7 +53,7 @@
     return true;
   }
 
-  mirror::Class* GetMonomorphicType() const SHARED_REQUIRES(Locks::mutator_lock_) {
+  mirror::Class* GetMonomorphicType() const REQUIRES_SHARED(Locks::mutator_lock_) {
     // Note that we cannot ensure the inline cache is actually monomorphic
     // at this point, as other threads may have updated it.
     DCHECK(!classes_[0].IsNull());
@@ -69,7 +69,7 @@
     return !classes_[1].IsNull() && classes_[kIndividualCacheSize - 1].IsNull();
   }
 
-  mirror::Class* GetTypeAt(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) {
+  mirror::Class* GetTypeAt(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
     return classes_[i].Read();
   }
 
@@ -93,14 +93,14 @@
   // Create a ProfilingInfo for 'method'. Return whether it succeeded, or if it is
   // not needed in case the method does not have virtual/interface invocations.
   static bool Create(Thread* self, ArtMethod* method, bool retry_allocation)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Add information from an executed INVOKE instruction to the profile.
   void AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls)
       // Method should not be interruptible, as it manipulates the ProfilingInfo
       // which can be concurrently collected.
       REQUIRES(Roles::uninterruptible_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
   template<typename RootVisitorType>