ART: SHARED_REQUIRES to REQUIRES_SHARED
This coincides with the actual attribute name and upstream usage.
Preparation for deferring to libbase.
Test: m
Test: m test-art-host
Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 45e49e2..d88c9d4 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -270,7 +270,7 @@
explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::String* string = root->AsString();
table_->operator[](string->ToModifiedUtf8()) = string;
}
@@ -282,7 +282,7 @@
// Based on ClassLinker::ResolveString.
static void PreloadDexCachesResolveString(
Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::String* string = dex_cache->GetResolvedString(string_idx);
if (string != nullptr) {
return;
@@ -300,7 +300,7 @@
// Based on ClassLinker::ResolveType.
static void PreloadDexCachesResolveType(
Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
if (klass != nullptr) {
return;
@@ -329,7 +329,7 @@
// Based on ClassLinker::ResolveField.
static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx,
bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
if (field != nullptr) {
return;
@@ -357,7 +357,7 @@
// Based on ClassLinker::ResolveMethod.
static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx,
InvokeType invoke_type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, kRuntimePointerSize);
if (method != nullptr) {
return;
@@ -431,7 +431,7 @@
}
static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!kPreloadDexCachesCollectStats) {
return;
}
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 9e12806..9da40b9 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -29,7 +29,7 @@
namespace art {
static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
jobject trace = nullptr;
if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
trace = soa.Self()->CreateInternalStackTrace<false>(soa);
@@ -85,7 +85,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
class_loader(nullptr) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(class_loader == nullptr);
mirror::Class* c = GetMethod()->GetDeclaringClass();
// c is null for runtime methods.
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index d4e54cf..d89a334 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -44,7 +44,7 @@
ALWAYS_INLINE static inline mirror::Class* DecodeClass(
const ScopedFastNativeObjectAccess& soa, jobject java_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
DCHECK(c != nullptr);
DCHECK(c->IsClass());
@@ -111,7 +111,7 @@
static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
Thread* self, mirror::Class* klass, bool public_only, bool force_resolve)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
IterationRange<StrideIterator<ArtField>> ifields = klass->GetIFields();
IterationRange<StrideIterator<ArtField>> sfields = klass->GetSFields();
@@ -192,7 +192,7 @@
// fast.
ALWAYS_INLINE static inline ArtField* FindFieldByName(
Thread* self ATTRIBUTE_UNUSED, mirror::String* name, LengthPrefixedArray<ArtField>* fields)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (fields == nullptr) {
return nullptr;
}
@@ -237,7 +237,7 @@
ALWAYS_INLINE static inline mirror::Field* GetDeclaredField(
Thread* self, mirror::Class* c, mirror::String* name)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* art_field = FindFieldByName(self, name, c->GetIFieldsPtr());
if (art_field != nullptr) {
return mirror::Field::CreateFromArtField<kRuntimePointerSize>(self, art_field, true);
@@ -251,7 +251,7 @@
static mirror::Field* GetPublicFieldRecursive(
Thread* self, mirror::Class* clazz, mirror::String* name)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(clazz != nullptr);
DCHECK(name != nullptr);
DCHECK(self != nullptr);
@@ -352,7 +352,7 @@
}
static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(m != nullptr);
return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor();
}
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 9e2d68d..1b399aa 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -36,7 +36,7 @@
*/
static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string actualType(PrettyTypeOf(array));
Thread* self = Thread::Current();
self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index aac800a..5a4ced2 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -32,7 +32,7 @@
template<bool kIsSet>
ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::Field* field,
mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsSet && field->IsFinal()) {
ThrowIllegalAccessException(
StringPrintf("Cannot set %s field %s of class %s",
@@ -60,7 +60,7 @@
template<bool kAllowReferences>
ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::Field* f,
Primitive::Type field_type, JValue* value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(value->GetJ(), INT64_C(0));
MemberOffset offset(f->GetOffset());
const bool is_volatile = f->IsVolatile();
@@ -105,7 +105,7 @@
ALWAYS_INLINE inline static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa,
jobject j_rcvr, mirror::Field** f,
mirror::Object** class_or_rcvr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
soa.Self()->AssertThreadSuspensionIsAllowable();
mirror::Class* declaringClass = (*f)->GetDeclaringClass();
if ((*f)->IsStatic()) {
@@ -232,7 +232,7 @@
ALWAYS_INLINE inline static void SetFieldValue(mirror::Object* o, mirror::Field* f,
Primitive::Type field_type, bool allow_references,
const JValue& new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(f->GetDeclaringClass()->IsInitialized());
MemberOffset offset(f->GetOffset());
const bool is_volatile = f->IsVolatile();
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 858849f..472340c 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -305,7 +305,7 @@
static void copyToArray(jlong srcAddr, mirror::PrimitiveArray<T>* array,
size_t array_offset,
size_t size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const T* src = reinterpret_cast<T*>(srcAddr);
size_t sz = size / sizeof(T);
size_t of = array_offset / sizeof(T);
@@ -318,7 +318,7 @@
static void copyFromArray(jlong dstAddr, mirror::PrimitiveArray<T>* array,
size_t array_offset,
size_t size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
T* dst = reinterpret_cast<T*>(dstAddr);
size_t sz = size / sizeof(T);
size_t of = array_offset / sizeof(T);