ART: SHARED_REQUIRES to REQUIRES_SHARED
This coincides with the actual attribute name and upstream usage.
Preparation for deferring to libbase.
Test: m
Test: m test-art-host
Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 08fec91..d03a9d8 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -44,7 +44,7 @@
const InlineInfo& inline_info,
const InlineInfoEncoding& encoding,
uint8_t inlining_depth)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// This method is being used by artQuickResolutionTrampoline, before it sets up
// the passed parameters in a GC friendly way. Therefore we must never be
// suspended while executing it.
@@ -121,7 +121,7 @@
}
inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return GetCalleeSaveMethodCaller(
self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */);
}
@@ -457,7 +457,7 @@
// Explicit template declarations of FindFieldFromCode for all field access types.
#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \
+template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
ArtMethod* referrer, \
Thread* self, size_t expected_size) \
@@ -640,7 +640,7 @@
// Explicit template declarations of FindMethodFromCode for all invoke types.
#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \
+ template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
mirror::Object** this_object, \
ArtMethod* referrer, \
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index fd1c02f..4056ec5 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -43,7 +43,7 @@
ArtMethod* referrer,
Thread* self,
bool access_check)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
return nullptr; // Failure
@@ -259,7 +259,7 @@
ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
Runtime::CalleeSaveType type,
bool do_caller_check)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index a28376f..f88e81d 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -47,12 +47,12 @@
ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
ArtMethod* method,
Thread* self, bool* slow_path)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
bool* slow_path)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
@@ -63,21 +63,21 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kAccessCheck>
@@ -85,7 +85,7 @@
int32_t component_count,
ArtMethod* method,
bool* slow_path)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
// it cannot be resolved, throw an error. If it can, use it to create an array.
@@ -97,7 +97,7 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
@@ -105,13 +105,13 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count,
ArtMethod* method, Thread* self,
bool access_check,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
int32_t component_count,
@@ -119,7 +119,7 @@
Thread* self,
bool access_check,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Type of find field operation for fast and slow case.
enum FindFieldType {
@@ -136,45 +136,45 @@
template<FindFieldType type, bool access_check>
inline ArtField* FindFieldFromCode(
uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<InvokeType type, bool access_check>
inline ArtMethod* FindMethodFromCode(
uint32_t method_idx, mirror::Object** this_object, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Fast path field resolution that can't initialize classes or throw exceptions.
inline ArtField* FindFieldFast(
uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Fast path method resolution that can't throw exceptions.
inline ArtMethod* FindMethodFast(
uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
inline mirror::Class* ResolveVerifyAndClinit(
uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
NO_THREAD_SAFETY_ANALYSIS;
void CheckReferenceResult(mirror::Object* o, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
jobject rcvr_jobj, jobject interface_art_method_jobj,
std::vector<jvalue>& args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <typename INT_TYPE, typename FLOAT_TYPE>
inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index a81a7e7..df37f95 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -40,32 +40,32 @@
explicit ScopedQuickEntrypointChecks(Thread *self,
bool entry_check = kIsDebugBuild,
bool exit_check = kIsDebugBuild)
- SHARED_REQUIRES(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) {
+ REQUIRES_SHARED(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) {
if (entry_check) {
TestsOnEntry();
}
}
- ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_)
+ ScopedQuickEntrypointChecks() REQUIRES_SHARED(Locks::mutator_lock_)
: self_(kIsDebugBuild ? Thread::Current() : nullptr), exit_check_(kIsDebugBuild) {
if (kIsDebugBuild) {
TestsOnEntry();
}
}
- ~ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ~ScopedQuickEntrypointChecks() REQUIRES_SHARED(Locks::mutator_lock_) {
if (exit_check_) {
TestsOnExit();
}
}
private:
- void TestsOnEntry() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void TestsOnEntry() REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(self_);
self_->VerifyStack();
}
- void TestsOnExit() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void TestsOnExit() REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(self_);
self_->VerifyStack();
}
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 4686a51..dc5fd07 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -31,7 +31,7 @@
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
uint32_t type_idx, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, kRuntimePointerSize); \
@@ -58,7 +58,7 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
@@ -84,7 +84,7 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
@@ -108,34 +108,34 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<false, instrumented_bool>(type_idx, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<true, instrumented_bool>(type_idx, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (!(instrumented_bool)) { \
return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, false, allocator_type); \
@@ -145,7 +145,7 @@
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (!(instrumented_bool)) { \
return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, true, allocator_type); \
@@ -156,7 +156,7 @@
extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \
mirror::ByteArray* byte_array, int32_t high, int32_t offset, int32_t byte_count, \
Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
StackHandleScope<1> hs(self); \
Handle<mirror::ByteArray> handle_array(hs.NewHandle(byte_array)); \
@@ -165,7 +165,7 @@
} \
extern "C" mirror::String* artAllocStringFromCharsFromCode##suffix##suffix2( \
int32_t offset, int32_t char_count, mirror::CharArray* char_array, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
StackHandleScope<1> hs(self); \
Handle<mirror::CharArray> handle_array(hs.NewHandle(char_array)); \
return mirror::String::AllocFromCharArray<instrumented_bool>(self, char_count, handle_array, \
@@ -173,7 +173,7 @@
} \
extern "C" mirror::String* artAllocStringFromStringFromCode##suffix##suffix2( /* NOLINT */ \
mirror::String* string, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
StackHandleScope<1> hs(self); \
Handle<mirror::String> handle_string(hs.NewHandle(string)); \
return mirror::String::AllocFromString<instrumented_bool>(self, handle_string->GetLength(), \
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index 8db69a3..2732d68 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -21,7 +21,7 @@
// Assignable test for code, won't throw. Null and equality tests already performed
extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(klass != nullptr);
DCHECK(ref_class != nullptr);
return klass->IsAssignableFrom(ref_class) ? 1 : 0;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index f35c2fe..d680c5d 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -30,7 +30,7 @@
namespace art {
NO_RETURN static void artDeoptimizeImpl(Thread* self, bool single_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (VLOG_IS_ON(deopt)) {
if (single_frame) {
// Deopt logging will be in DeoptimizeSingleFrame. It is there to take advantage of the
@@ -60,14 +60,14 @@
}
}
-extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+extern "C" NO_RETURN void artDeoptimize(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
artDeoptimizeImpl(self, false);
}
// This is called directly from compiled code by an HDepptimize.
extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
// Before deoptimizing to interpreter, we must push the deoptimization context.
JValue return_value;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index c045e84..2cd0331 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -26,7 +26,7 @@
namespace art {
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
@@ -36,7 +36,7 @@
}
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
@@ -44,7 +44,7 @@
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
@@ -53,7 +53,7 @@
}
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
return ResolveStringFromCode(caller, string_idx);
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 08e0d6e..89712a3 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -86,7 +86,7 @@
// barrier fast path implementations generated by the compiler to mark
// an object that is referenced by a field of a gray object.
extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
// Read barrier entrypoint for heap references.
// This is the read barrier slow path for instance and static fields
@@ -94,11 +94,11 @@
extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref,
mirror::Object* obj,
uint32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
// Read barrier entrypoint for GC roots.
extern "C" mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 1a12bd4..5b65029 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -44,7 +44,7 @@
size_t size,
mirror::Object** obj)
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(obj));
ArtField* field = FindFieldFromCode<type, kAccessCheck>(field_idx, referrer, self, size);
@@ -56,7 +56,7 @@
}
extern "C" ssize_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -70,7 +70,7 @@
}
extern "C" size_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -84,7 +84,7 @@
}
extern "C" ssize_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -98,7 +98,7 @@
}
extern "C" size_t artGetCharStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -112,7 +112,7 @@
}
extern "C" size_t artGet32StaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
if (LIKELY(field != nullptr)) {
@@ -128,7 +128,7 @@
extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -144,7 +144,7 @@
extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
@@ -167,7 +167,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -188,7 +188,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -208,7 +208,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -229,7 +229,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -250,7 +250,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -271,7 +271,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -292,7 +292,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
@@ -316,7 +316,7 @@
uint32_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -349,7 +349,7 @@
uint16_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -382,7 +382,7 @@
uint32_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != nullptr)) {
@@ -403,7 +403,7 @@
ArtMethod* referrer,
uint64_t new_value,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -424,7 +424,7 @@
mirror::Object* new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
@@ -459,7 +459,7 @@
uint8_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -496,7 +496,7 @@
uint16_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -534,7 +534,7 @@
uint32_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -560,7 +560,7 @@
uint64_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -586,7 +586,7 @@
mirror::Object* new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index 22b2fa3..f63c9c2 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -26,7 +26,7 @@
*/
extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array,
ArtMethod* method, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
const uint16_t* const insns = method->GetCodeItem()->insns_;
const Instruction::ArrayDataPayload* payload =
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 82d5467..fec7373 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -29,7 +29,7 @@
mirror::Object* this_object,
Thread* self,
uintptr_t lr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip
// that part.
ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
@@ -51,7 +51,7 @@
extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtMethod** sp,
uint64_t gpr_result,
uint64_t fpr_result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Instrumentation exit stub must not be entered with a pending exception.
CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
<< self->GetException()->Dump();
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index c06824c..64f19af 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -78,7 +78,7 @@
}
static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JNIEnvExt* env = self->GetJniEnv();
if (UNLIKELY(env->check_jni)) {
env->CheckNoHeldMonitors();
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 4adb39b..b4f945a 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -23,7 +23,7 @@
extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
+ REQUIRES_SHARED(Locks::mutator_lock_) /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("Null reference used for synchronization (monitor-enter)");
@@ -44,7 +44,7 @@
extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
+ REQUIRES_SHARED(Locks::mutator_lock_) /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("Null reference used for synchronization (monitor-exit)");
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 47b3eff..0838059 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -19,7 +19,7 @@
namespace art {
-extern "C" void artTestSuspendFromCode(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+extern "C" void artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when suspend count check value is 0 and thread->suspend_count_ != 0
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend();
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index ea9f7b0..67cae8a 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -24,14 +24,14 @@
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->QuickDeliverException();
}
// Called by generated code to throw an exception.
extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
/*
* exception may be null, in which case this routine should
* throw NPE. NOTE: this is a convenience for generated code,
@@ -50,7 +50,7 @@
// Called by generated code to throw a NPE exception.
extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
// We come from an explicit check in the generated code. This path is triggered
// only if the object is indeed null.
@@ -60,7 +60,7 @@
// Installed by a signal handler to throw a NPE exception.
extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
ThrowNullPointerExceptionFromDexPC(/* check_address */ true, addr);
@@ -70,7 +70,7 @@
// Called by generated code to throw an arithmetic divide by zero exception.
extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArithmeticExceptionDivideByZero();
self->QuickDeliverException();
@@ -78,7 +78,7 @@
// Called by generated code to throw an array index out of bounds exception.
extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
@@ -86,14 +86,14 @@
// Called by generated code to throw a string index out of bounds exception.
extern "C" NO_RETURN void artThrowStringBoundsFromCode(int index, int length, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowStringIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
}
extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
ThrowStackOverflowError(self);
@@ -102,7 +102,7 @@
}
extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowNoSuchMethodError(method_idx);
self->QuickDeliverException();
@@ -111,7 +111,7 @@
extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
mirror::Class* src_type,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
DCHECK(!dest_type->IsAssignableFrom(src_type));
ThrowClassCastException(dest_type, src_type);
@@ -120,7 +120,7 @@
extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayStoreException(value->GetClass(), array->GetClass());
self->QuickDeliverException();
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index c67379a..3043c83 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -295,7 +295,7 @@
// kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
// 1st GPR.
static mirror::Object* GetProxyThisObject(ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK((*sp)->IsProxyMethod());
CHECK_GT(kNumQuickGprArgs, 0u);
constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
@@ -305,19 +305,19 @@
return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
}
- static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
return GetCalleeSaveMethodCaller(sp, Runtime::kSaveRefsAndArgs);
}
- static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
uint8_t* previous_sp =
reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
return *reinterpret_cast<ArtMethod**>(previous_sp);
}
- static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
@@ -344,14 +344,14 @@
}
// For the given quick ref and args quick frame, return the caller's PC.
- static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
return *reinterpret_cast<uintptr_t*>(lr);
}
QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) :
+ uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) :
is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
@@ -436,7 +436,7 @@
}
}
- void VisitArguments() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) {
// (a) 'stack_args_' should point to the first method's argument
// (b) whatever the argument type it is, the 'stack_index_' should
// be moved forward along with every visiting.
@@ -589,7 +589,7 @@
// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
// allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return QuickArgumentVisitor::GetProxyThisObject(sp);
}
@@ -600,7 +600,7 @@
uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
private:
ShadowFrame* const sf_;
@@ -643,7 +643,7 @@
}
extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
// frame.
ScopedQuickEntrypointChecks sqec(self);
@@ -698,10 +698,10 @@
if (kIsDebugBuild) {
class DummyStackVisitor : public StackVisitor {
public:
- explicit DummyStackVisitor(Thread* self_in) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
// Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
// logic. Just always say we want to continue.
return true;
@@ -782,9 +782,9 @@
ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
- void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -843,7 +843,7 @@
// field within the proxy object, which will box the primitive arguments and deal with error cases.
extern "C" uint64_t artQuickProxyInvokeHandler(
ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
// Ensure we don't get thread suspension until the object arguments are safely in jobjects.
@@ -899,9 +899,9 @@
uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
- void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -932,7 +932,7 @@
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(
ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// The resolution trampoline stashes the resolved method into the callee-save frame to transport
// it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
// does not have the same stack layout as the callee-save method).
@@ -1309,7 +1309,7 @@
return gpr_index_ > 0;
}
- void AdvanceHandleScope(mirror::Object* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
uintptr_t handle = PushHandle(ptr);
if (HaveHandleScopeGpr()) {
gpr_index_--;
@@ -1497,7 +1497,7 @@
void PushStack(uintptr_t val) {
delegate_->PushStack(val);
}
- uintptr_t PushHandle(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
return delegate_->PushHandle(ref);
}
@@ -1557,10 +1557,10 @@
virtual void WalkHeader(
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
}
- void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) {
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
WalkHeader(&sm);
@@ -1632,7 +1632,7 @@
//
// Note: assumes ComputeAll() has been run before.
void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = **m;
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
@@ -1673,7 +1673,7 @@
// Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
// Returns the new bottom. Note: this may be unaligned.
uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// First, fix up the layout of the callee-save frame.
// We have to squeeze in the HandleScope, and relocate the method pointer.
LayoutCalleeSaveFrame(self, m, sp, handle_scope);
@@ -1691,7 +1691,7 @@
uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len,
HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr,
uint32_t** start_fpr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Walk(shorty, shorty_len);
// JNI part.
@@ -1707,7 +1707,7 @@
// Add JNIEnv* and jobj/jclass before the shorty-derived elements.
void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
uint32_t num_handle_scope_references_;
@@ -1763,7 +1763,7 @@
cur_stack_arg_++;
}
- virtual uintptr_t PushHandle(mirror::Object*) SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
UNREACHABLE();
}
@@ -1801,15 +1801,15 @@
}
}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
- void FinalizeHandleScope(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
StackReference<mirror::Object>* GetFirstHandleScopeEntry() {
return handle_scope_->GetHandle(0).GetReference();
}
- jobject GetFirstHandleScopeJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
return handle_scope_->GetHandle(0).ToJObject();
}
@@ -1825,7 +1825,7 @@
HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
handle_scope_(handle_scope), cur_entry_(0) {}
- uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
@@ -1833,7 +1833,7 @@
cur_entry_ = 0U;
}
- void ResetRemainingScopeSlots() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) {
// Initialize padding entries.
size_t expected_slots = handle_scope_->NumberOfReferences();
while (cur_entry_ < expected_slots) {
@@ -1953,7 +1953,7 @@
* 2) An error, if the value is negative.
*/
extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* called = *sp;
DCHECK(called->IsNative()) << PrettyMethod(called, true);
uint32_t shorty_len = 0;
@@ -2048,7 +2048,7 @@
// for the method pointer.
//
// It is valid to use this, as at the usage points here (returns from C functions) we are assuming
-// to hold the mutator lock (see SHARED_REQUIRES(Locks::mutator_lock_) annotations).
+// to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
template<InvokeType type, bool access_check>
static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self,
@@ -2090,7 +2090,7 @@
// Explicit artInvokeCommon template function declarations to please analysis tool.
#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
TwoWordReturn artInvokeCommon<type, access_check>( \
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
@@ -2109,31 +2109,31 @@
// See comments in runtime_support_asm.S
extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
}
@@ -2144,7 +2144,7 @@
mirror::Object* this_object,
Thread* self,
ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
StackHandleScope<1> hs(self);
Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));