Move to newer clang annotations

Also enable -Wthread-safety-negative.

Changes:
Switch to capabilities and negative capabilities.

Future work:
Use capabilities to implement uninterruptible annotations to work
with AssertNoThreadSuspension.

Bug: 20072211

Change-Id: I42fcbe0300d98a831c89d1eff3ecd5a7e99ebf33
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 47865a2..dc04c0a 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -45,12 +45,12 @@
 ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
                                                      ArtMethod* method,
                                                      Thread* self, bool* slow_path)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
                                                                         Thread* self,
                                                                         bool* slow_path)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
 // cannot be resolved, throw an error. If it can, use it to create an instance.
@@ -61,21 +61,21 @@
                                                          ArtMethod* method,
                                                          Thread* self,
                                                          gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Given the context of a calling Method and a resolved class, create an instance.
 template <bool kInstrumented>
 ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
                                                                  Thread* self,
                                                                  gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Given the context of a calling Method and an initialized class, create an instance.
 template <bool kInstrumented>
 ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
                                                                     Thread* self,
                                                                     gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 
 template <bool kAccessCheck>
@@ -83,7 +83,7 @@
                                                     int32_t component_count,
                                                     ArtMethod* method,
                                                     bool* slow_path)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
 // it cannot be resolved, throw an error. If it can, use it to create an array.
@@ -95,7 +95,7 @@
                                                        ArtMethod* method,
                                                        Thread* self,
                                                        gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 template <bool kAccessCheck, bool kInstrumented>
 ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
@@ -103,13 +103,13 @@
                                                                ArtMethod* method,
                                                                Thread* self,
                                                                gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count,
                                                  ArtMethod* method, Thread* self,
                                                  bool access_check,
                                                  gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
                                                              int32_t component_count,
@@ -117,7 +117,7 @@
                                                              Thread* self,
                                                              bool access_check,
                                                              gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Type of find field operation for fast and slow case.
 enum FindFieldType {
@@ -134,47 +134,47 @@
 template<FindFieldType type, bool access_check>
 inline ArtField* FindFieldFromCode(
     uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 template<InvokeType type, bool access_check>
 inline ArtMethod* FindMethodFromCode(
     uint32_t method_idx, mirror::Object** this_object, ArtMethod** referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Fast path field resolution that can't initialize classes or throw exceptions.
 inline ArtField* FindFieldFast(
     uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Fast path method resolution that can't throw exceptions.
 inline ArtMethod* FindMethodFast(
     uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check,
     InvokeType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 inline mirror::Class* ResolveVerifyAndClinit(
     uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
-extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+extern void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
 
 inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // TODO: annotalysis disabled as monitor semantics are maintained in Java code.
 inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
     NO_THREAD_SAFETY_ANALYSIS;
 
 void CheckReferenceResult(mirror::Object* o, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
                                     jobject rcvr_jobj, jobject interface_art_method_jobj,
                                     std::vector<jvalue>& args)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 template <typename INT_TYPE, typename FLOAT_TYPE>
 inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);