diff options
author | 2012-09-05 08:57:23 -0700 | |
---|---|---|
committer | 2012-09-05 09:06:19 -0700 | |
commit | b726dcb581bf72da46527378ccb6889020f0e6e9 (patch) | |
tree | c0383ed788652926e93147e50d659ed226519548 | |
parent | fd678beb171a4686a4f2d53ca4188a4ade8fa54e (diff) |
Rename GlobalSynchronization to Locks
Also address some review comments in common_throws relating to
ToStr<InvokeType> and exception detail messages.
Change-Id: Ibf2c0f147689fa236d349bd7f01eed3c2522552b
108 files changed, 1535 insertions, 1542 deletions
diff --git a/src/card_table.h b/src/card_table.h index a6284e3887..9dc720184a 100644 --- a/src/card_table.h +++ b/src/card_table.h @@ -76,8 +76,8 @@ class CardTable { template <typename Visitor, typename FingerVisitor> void Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, const Visitor& visitor, const FingerVisitor& finger_visitor) const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(bitmap->HasAddress(scan_begin)); DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan. byte* card_cur = CardFromAddr(scan_begin); diff --git a/src/check_jni.cc b/src/check_jni.cc index 0e521af5aa..c53a1e31dc 100644 --- a/src/check_jni.cc +++ b/src/check_jni.cc @@ -124,7 +124,7 @@ static const char* gBuiltInPrefixes[] = { }; static bool ShouldTrace(JavaVMExt* vm, const Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages // when a native method that matches the -Xjnitrace argument calls a JNI function // such as NewByteArray. @@ -151,7 +151,7 @@ class ScopedCheck { public: // For JNIEnv* functions. explicit ScopedCheck(JNIEnv* env, int flags, const char* functionName) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : soa_(env) { Init(flags, functionName, true); CheckThread(flags); @@ -160,12 +160,12 @@ class ScopedCheck { // For JavaVM* functions. // TODO: it's not correct that this is a lock function, but making it so aids annotalysis. explicit ScopedCheck(JavaVM* vm, bool has_method, const char* functionName) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : soa_(vm) { Init(kFlag_Invocation, functionName, has_method); } - ~ScopedCheck() UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) {} + ~ScopedCheck() UNLOCK_FUNCTION(Locks::mutator_lock_) {} const ScopedObjectAccess& soa() { return soa_; @@ -195,7 +195,7 @@ class ScopedCheck { * Works for both static and instance fields. */ void CheckFieldType(jobject java_object, jfieldID fid, char prim, bool isStatic) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* f = CheckFieldID(fid); if (f == NULL) { return; @@ -241,7 +241,7 @@ class ScopedCheck { * Assumes "jobj" has already been validated. */ void CheckInstanceFieldID(jobject java_object, jfieldID fid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* o = soa_.Decode<Object*>(java_object); if (o == NULL || !Runtime::Current()->GetHeap()->IsHeapAddress(o)) { JniAbortF(function_name_, "field operation on invalid %s: %p", @@ -275,7 +275,7 @@ class ScopedCheck { * 'expectedType' will be "L" for all objects, including arrays. */ void CheckSig(jmethodID mid, const char* expectedType, bool isStatic) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = CheckMethodID(mid); if (m == NULL) { return; @@ -301,7 +301,7 @@ class ScopedCheck { * Assumes "java_class" has already been validated. */ void CheckStaticFieldID(jclass java_class, jfieldID fid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* c = soa_.Decode<Class*>(java_class); const Field* f = CheckFieldID(fid); if (f == NULL) { @@ -323,7 +323,7 @@ class ScopedCheck { * Instances of "java_class" must be instances of the method's declaring class. */ void CheckStaticMethod(jclass java_class, jmethodID mid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const Method* m = CheckMethodID(mid); if (m == NULL) { return; @@ -343,7 +343,7 @@ class ScopedCheck { * will be handled automatically by the instanceof check.) */ void CheckVirtualMethod(jobject java_object, jmethodID mid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const Method* m = CheckMethodID(mid); if (m == NULL) { return; @@ -392,7 +392,7 @@ class ScopedCheck { * Use the kFlag_NullableUtf flag where 'u' field(s) are nullable. */ void Check(bool entry, const char* fmt0, ...) - SHARED_LOCKS_REQUIRED (GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED (Locks::mutator_lock_) { va_list ap; const Method* traceMethod = NULL; @@ -594,7 +594,7 @@ class ScopedCheck { * to "running" mode before doing the checks. */ bool CheckInstance(InstanceKind kind, jobject java_object) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* what = NULL; switch (kind) { case kClass: @@ -668,7 +668,7 @@ class ScopedCheck { * * Since we're dealing with objects, switch to "running" mode. */ - void CheckArray(jarray java_array) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void CheckArray(jarray java_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (java_array == NULL) { JniAbortF(function_name_, "jarray was NULL"); return; @@ -689,7 +689,7 @@ class ScopedCheck { } } - Field* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Field* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (fid == NULL) { JniAbortF(function_name_, "jfieldID was NULL"); return NULL; @@ -702,7 +702,7 @@ class ScopedCheck { return f; } - Method* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Method* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (mid == NULL) { JniAbortF(function_name_, "jmethodID was NULL"); return NULL; @@ -722,7 +722,7 @@ class ScopedCheck { * Switches to "running" mode before performing checks. */ void CheckObject(jobject java_object) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (java_object == NULL) { return; } @@ -745,7 +745,7 @@ class ScopedCheck { } } - void CheckThread(int flags) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void CheckThread(int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* self = Thread::Current(); if (self == NULL) { JniAbortF(function_name_, "a thread (tid %d) is making JNI calls without being attached", GetTid()); diff --git a/src/class_linker.cc b/src/class_linker.cc index f6fb4701a9..3c52370f21 100644 --- a/src/class_linker.cc +++ b/src/class_linker.cc @@ -60,7 +60,7 @@ namespace art { static void ThrowNoClassDefFoundError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ThrowNoClassDefFoundError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -70,7 +70,7 @@ static void ThrowNoClassDefFoundError(const char* fmt, ...) { static void ThrowClassFormatError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ThrowClassFormatError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -80,7 +80,7 @@ static void ThrowClassFormatError(const char* fmt, ...) { static void ThrowLinkageError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ThrowLinkageError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -90,7 +90,7 @@ static void ThrowLinkageError(const char* fmt, ...) { static void ThrowNoSuchFieldError(const StringPiece& scope, Class* c, const StringPiece& type, const StringPiece& name) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassHelper kh(c); std::ostringstream msg; msg << "No " << scope << "field " << name << " of type " << type @@ -104,7 +104,7 @@ static void ThrowNoSuchFieldError(const StringPiece& scope, Class* c, const Stri static void ThrowNullPointerException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ThrowNullPointerException(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -113,7 +113,7 @@ static void ThrowNullPointerException(const char* fmt, ...) { } static void ThrowEarlierClassFailure(Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // The class failed to initialize on a previous attempt, so we want to throw // a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we // failed in verification, in which case v2 5.4.1 says we need to re-throw @@ -134,7 +134,7 @@ static void ThrowEarlierClassFailure(Class* c) } static void WrapExceptionInInitializer() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* self = Thread::Current(); JNIEnv* env = self->GetJniEnv(); @@ -916,7 +916,7 @@ void ClassLinker::InitFromImage() { // reinit clases_ table { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); const Spaces& vec = heap->GetSpaces(); // TODO: C++0x auto @@ -985,7 +985,7 @@ void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { } { - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) { visitor(it->second, arg); @@ -999,7 +999,7 @@ void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { } void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const { - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) { if (!visitor(it->second, arg)) { @@ -1124,7 +1124,7 @@ ObjectArray<StackTraceElement>* ClassLinker::AllocStackTraceElementArray(size_t } static Class* EnsureResolved(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass != NULL); // Wait for the class if it has not already been linked. Thread* self = Thread::Current(); @@ -1456,7 +1456,7 @@ void ClassLinker::FixupStaticTrampolines(Class* klass) { static void LinkCode(SirtRef<Method>& method, const OatFile::OatClass* oat_class, uint32_t method_index) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Every kind of method should at least get an invoke stub from the oat_method. // non-abstract methods also get their code pointers. const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index); @@ -1891,7 +1891,7 @@ Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, boo LOG(INFO) << "Loaded class " << descriptor << source; } size_t hash = StringPieceHash()(descriptor); - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); Table& classes = image_class ? image_classes_ : classes_; Class* existing = LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes); #ifndef NDEBUG @@ -1908,7 +1908,7 @@ Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, boo bool ClassLinker::RemoveClass(const char* descriptor, const ClassLoader* class_loader) { size_t hash = Hash(descriptor); - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); typedef Table::iterator It; // TODO: C++0x auto // TODO: determine if its better to search classes_ or image_classes_ first ClassHelper kh; @@ -1933,7 +1933,7 @@ bool ClassLinker::RemoveClass(const char* descriptor, const ClassLoader* class_l Class* ClassLinker::LookupClass(const char* descriptor, const ClassLoader* class_loader) { size_t hash = Hash(descriptor); - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); // TODO: determine if its better to search classes_ or image_classes_ first Class* klass = LookupClassLocked(descriptor, class_loader, hash, classes_); if (klass != NULL) { @@ -1968,7 +1968,7 @@ Class* ClassLinker::LookupClassLocked(const char* descriptor, const ClassLoader* void ClassLinker::LookupClasses(const char* descriptor, std::vector<Class*>& classes) { classes.clear(); size_t hash = Hash(descriptor); - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto // TODO: determine if its better to search classes_ or image_classes_ first ClassHelper kh(NULL, this); @@ -1990,7 +1990,7 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector<Class*>& cla #if !defined(NDEBUG) && !defined(ART_USE_LLVM_COMPILER) static void CheckMethodsHaveGcMaps(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!Runtime::Current()->IsStarted()) { return; } @@ -2339,7 +2339,7 @@ Method* ClassLinker::CreateProxyConstructor(SirtRef<Class>& klass, Class* proxy_ } static void CheckProxyConstructor(Method* constructor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(constructor->IsConstructor()); MethodHelper mh(constructor); CHECK_STREQ(mh.GetName(), "<init>"); @@ -2378,7 +2378,7 @@ Method* ClassLinker::CreateProxyMethod(SirtRef<Class>& klass, SirtRef<Method>& p } static void CheckProxyMethod(Method* method, SirtRef<Method>& prototype) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Basic sanity CHECK(!prototype->IsFinal()); CHECK(method->IsFinal()); @@ -2526,7 +2526,7 @@ bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_in } bool ClassLinker::WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { while (true) { self->AssertNoPendingException(); lock.Wait(); @@ -3138,7 +3138,7 @@ bool ClassLinker::LinkStaticFields(SirtRef<Class>& klass) { struct LinkFieldsComparator { explicit LinkFieldsComparator(FieldHelper* fh) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : fh_(fh) {} // No thread safety analysis as will be called from STL. Checked lock held in constructor. bool operator()(const Field* field1, const Field* field2) NO_THREAD_SAFETY_ANALYSIS { @@ -3636,7 +3636,7 @@ void ClassLinker::DumpAllClasses(int flags) const { // lock held, because it might need to resolve a field's type, which would try to take the lock. std::vector<Class*> all_classes; { - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); typedef Table::const_iterator It; // TODO: C++0x auto for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) { all_classes.push_back(it->second); @@ -3652,18 +3652,18 @@ void ClassLinker::DumpAllClasses(int flags) const { } void ClassLinker::DumpForSigQuit(std::ostream& os) const { - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); os << "Loaded classes: " << image_classes_.size() << " image classes; " << classes_.size() << " allocated classes\n"; } size_t ClassLinker::NumLoadedClasses() const { - MutexLock mu(*GlobalSynchronization::classlinker_classes_lock_); + MutexLock mu(*Locks::classlinker_classes_lock_); return classes_.size() + image_classes_.size(); } pid_t ClassLinker::GetClassesLockOwner() { - return GlobalSynchronization::classlinker_classes_lock_->GetExclusiveOwnerTid(); + return Locks::classlinker_classes_lock_->GetExclusiveOwnerTid(); } pid_t ClassLinker::GetDexLockOwner() { diff --git a/src/class_linker.h b/src/class_linker.h index eed9f6a1a9..c21ff71951 100644 --- a/src/class_linker.h +++ b/src/class_linker.h @@ -46,60 +46,60 @@ class ClassLinker { // Creates the class linker by boot strapping from dex files. static ClassLinker* CreateFromCompiler(const std::vector<const DexFile*>& boot_class_path, InternTable* intern_table) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Creates the class linker from an image. static ClassLinker* CreateFromImage(InternTable* intern_table) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ~ClassLinker(); // Finds a class by its descriptor, loading it if necessary. // If class_loader is null, searches boot_class_path_. Class* FindClass(const char* descriptor, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Class* FindSystemClass(const char* descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Define a new a class based on a ClassDef from a DexFile Class* DefineClass(const StringPiece& descriptor, ClassLoader* class_loader, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded // by the given 'class_loader'. Class* LookupClass(const char* descriptor, const ClassLoader* class_loader) - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds all the classes with the given descriptor, regardless of ClassLoader. void LookupClasses(const char* descriptor, std::vector<Class*>& classes) - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // General class unloading is not supported, this is used to prune // unwanted classes during image writing. bool RemoveClass(const char* descriptor, const ClassLoader* class_loader) - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpAllClasses(int flags) const - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) const - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); - size_t NumLoadedClasses() const LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); + size_t NumLoadedClasses() const LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. String* ResolveString(uint32_t string_idx, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx); if (UNLIKELY(resolved_string == NULL)) { Class* declaring_class = referrer->GetDeclaringClass(); @@ -113,13 +113,13 @@ class ClassLinker { // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. String* ResolveString(const DexFile& dex_file, uint32_t string_idx, DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identity the // target DexCache and ClassLoader to use for resolution. Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, const Class* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveType(dex_file, type_idx, referrer->GetDexCache(), @@ -130,7 +130,7 @@ class ClassLinker { // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. Class* ResolveType(uint16_t type_idx, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx); if (UNLIKELY(resolved_type == NULL)) { Class* declaring_class = referrer->GetDeclaringClass(); @@ -143,7 +143,7 @@ class ClassLinker { } Class* ResolveType(uint16_t type_idx, const Field* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* declaring_class = referrer->GetDeclaringClass(); DexCache* dex_cache = declaring_class->GetDexCache(); Class* resolved_type = dex_cache->GetResolvedType(type_idx); @@ -163,7 +163,7 @@ class ClassLinker { uint16_t type_idx, DexCache* dex_cache, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a method with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as @@ -176,10 +176,10 @@ class ClassLinker { ClassLoader* class_loader, const Method* referrer, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* ResolveMethod(uint32_t method_idx, const Method* referrer, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* resolved_method = referrer->GetDexCacheResolvedMethods()->Get(method_idx); if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) { Class* declaring_class = referrer->GetDeclaringClass(); @@ -192,7 +192,7 @@ class ClassLinker { } Field* ResolveField(uint32_t field_idx, const Method* referrer, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* resolved_field = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); if (UNLIKELY(resolved_field == NULL)) { @@ -215,7 +215,7 @@ class ClassLinker { DexCache* dex_cache, ClassLoader* class_loader, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a field with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as @@ -225,28 +225,28 @@ class ClassLinker { uint32_t field_idx, DexCache* dex_cache, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get shorty from method index without resolution. Used to do handlerization. const char* MethodShorty(uint32_t method_idx, Method* referrer, uint32_t* length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true on success, false if there's an exception pending. // can_run_clinit=false allows the compiler to attempt to init a class, // given the restriction that no <clinit> execution is possible. bool EnsureInitialized(Class* c, bool can_run_clinit, bool can_init_fields) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initializes classes that have instances in the image but that have // <clinit> methods so they could not be initialized by the compiler. - void RunRootClinits() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void RunRootClinits() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterDexFile(const DexFile& dex_file) LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterDexFile(const DexFile& dex_file, SirtRef<DexCache>& dex_cache) LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterOatFile(const OatFile& oat_file) LOCKS_EXCLUDED(dex_lock_); @@ -256,26 +256,26 @@ class ClassLinker { } void VisitClasses(ClassVisitor* visitor, void* arg) const - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); // Less efficient variant of VisitClasses that doesn't hold the classlinker_classes_lock_ // when calling the visitor. void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); void VisitRoots(Heap::RootVisitor* visitor, void* arg) const - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_, dex_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_, dex_lock_); const DexFile& FindDexFile(const DexCache* dex_cache) const LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); DexCache* FindDexCache(const DexFile& dex_file) const LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDexFileRegistered(const DexFile& dex_file) const LOCKS_EXCLUDED(dex_lock_); void FixupDexCaches(Method* resolution_method) const LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Generate an oat file from a dex file bool GenerateOatFile(const std::string& dex_filename, @@ -302,51 +302,51 @@ class ClassLinker { // does not match the OatFile. const DexFile* FindDexFileInOatFileFromDexLocation(const std::string& location) LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true if oat file contains the dex file with the given location and checksum static bool VerifyOatFileChecksums(const OatFile* oat_file, const std::string& dex_location, uint32_t dex_location_checksum) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // TODO: replace this with multiple methods that allocate the correct managed type. template <class T> ObjectArray<T>* AllocObjectArray(size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ObjectArray<T>::Alloc(GetClassRoot(kObjectArrayClass), length); } ObjectArray<Class>* AllocClassArray(size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ObjectArray<Class>::Alloc(GetClassRoot(kClassArrayClass), length); } ObjectArray<StackTraceElement>* AllocStackTraceElementArray(size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VerifyClass(Class* klass) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void VerifyClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool VerifyClassUsingOatFile(const DexFile& dex_file, Class* klass, Class::Status& oat_file_class_status) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, Method* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Class* CreateProxyClass(String* name, ObjectArray<Class>* interfaces, ClassLoader* loader, ObjectArray<Method>* methods, ObjectArray<ObjectArray<Class> >* throws) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string GetDescriptorForProxy(const Class* proxy_class) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindMethodForProxy(const Class* proxy_class, const Method* proxy_method) LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the oat code for a method when its class isn't yet initialized const void* GetOatCodeFor(const Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Relocate the OatFiles (ELF images) void RelocateExecutable() LOCKS_EXCLUDED(dex_lock_); @@ -358,59 +358,59 @@ class ClassLinker { explicit ClassLinker(InternTable*); const OatFile::OatMethod GetOatMethodFor(const Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initialize class linker by bootstraping from dex files void InitFromCompiler(const std::vector<const DexFile*>& boot_class_path) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initialize class linker from one or more images. - void InitFromImage() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); OatFile* OpenOat(const ImageSpace* space) LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void InitFromImageCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FinishInit() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FinishInit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // For early bootstrapping by Init Class* AllocClass(Class* java_lang_Class, size_t class_size) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Alloc* convenience functions to avoid needing to pass in Class* // values that are known to the ClassLinker such as // kObjectArrayClass and kJavaLangString etc. - Class* AllocClass(size_t class_size) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Class* AllocClass(size_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); DexCache* AllocDexCache(const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Field* AllocField() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - Method* AllocMethod() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Field* AllocField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Method* AllocMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); InterfaceEntry* AllocInterfaceEntry(Class* interface) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Class* CreatePrimitiveClass(const char* descriptor, Primitive::Type type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return InitializePrimitiveClass(AllocClass(sizeof(Class)), descriptor, type); } Class* InitializePrimitiveClass(Class* primitive_class, const char* descriptor, Primitive::Type type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Class* CreateArrayClass(const std::string& descriptor, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AppendToBootClassPath(const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AppendToBootClassPath(const DexFile& dex_file, SirtRef<DexCache>& dex_cache) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, Class* c, SafeMap<uint32_t, Field*>& field_map) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t SizeOfClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def); @@ -419,17 +419,17 @@ class ClassLinker { const DexFile::ClassDef& dex_class_def, SirtRef<Class>& klass, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it, SirtRef<Class>& klass, SirtRef<Field>& dst); void LoadMethod(const DexFile& dex_file, const ClassDataItemIterator& dex_method, SirtRef<Class>& klass, SirtRef<Method>& dst) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupStaticTrampolines(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the associated oat class for a dex_file and descriptor const OatFile::OatClass* GetOatClass(const DexFile& dex_file, const char* descriptor); @@ -438,70 +438,70 @@ class ClassLinker { // the class was inserted, otherwise returns an existing class with // the same descriptor and ClassLoader. Class* InsertClass(const StringPiece& descriptor, Class* klass, bool image_class) - LOCKS_EXCLUDED(GlobalSynchronization::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterDexFileLocked(const DexFile& dex_file, SirtRef<DexCache>& dex_cache) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDexFileRegisteredLocked(const DexFile& dex_file) const EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); void RegisterOatFileLocked(const OatFile& oat_file) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); bool InitializeClass(Class* klass, bool can_run_clinit, bool can_init_statics) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock); bool ValidateSuperClassDescriptors(const Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool InitializeSuperClass(Class* klass, bool can_run_clinit, bool can_init_fields) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initialize static fields, returns true if fields were initialized. bool InitializeStaticFields(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsSameDescriptorInDifferentClassContexts(const char* descriptor, const Class* klass1, const Class* klass2) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsSameMethodSignatureInDifferentClassContexts(const Method* descriptor, const Class* klass1, const Class* klass2) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkClass(SirtRef<Class>& klass, ObjectArray<Class>* interfaces) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkSuperClass(SirtRef<Class>& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LoadSuperAndInterfaces(SirtRef<Class>& klass, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkMethods(SirtRef<Class>& klass, ObjectArray<Class>* interfaces) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkVirtualMethods(SirtRef<Class>& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkInterfaceMethods(SirtRef<Class>& klass, ObjectArray<Class>* interfaces) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkStaticFields(SirtRef<Class>& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkInstanceFields(SirtRef<Class>& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkFields(SirtRef<Class>& klass, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceInstanceOffsets(SirtRef<Class>& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceStaticOffsets(SirtRef<Class>& klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceOffsets(SirtRef<Class>& klass, bool is_static, uint32_t reference_offsets) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // For use by ImageWriter to find DexCaches for its roots const std::vector<DexCache*>& GetDexCaches() { @@ -518,12 +518,12 @@ class ClassLinker { const std::string& dex_location, uint32_t dex_location_checksum) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* CreateProxyConstructor(SirtRef<Class>& klass, Class* proxy_class) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* CreateProxyMethod(SirtRef<Class>& klass, SirtRef<Method>& prototype) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::vector<const DexFile*> boot_class_path_; @@ -537,13 +537,13 @@ class ClassLinker { // Class* instances. Results should be compared for a matching // Class::descriptor_ and Class::class_loader_. typedef std::multimap<size_t, Class*> Table; - Table image_classes_ GUARDED_BY(GlobalSynchronization::classlinker_classes_lock_); - Table classes_ GUARDED_BY(GlobalSynchronization::classlinker_classes_lock_); + Table image_classes_ GUARDED_BY(Locks::classlinker_classes_lock_); + Table classes_ GUARDED_BY(Locks::classlinker_classes_lock_); Class* LookupClassLocked(const char* descriptor, const ClassLoader* class_loader, size_t hash, const Table& classes) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::classlinker_classes_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::classlinker_classes_lock_); // indexes into class_roots_. // needs to be kept in sync with class_roots_descriptors_. @@ -585,7 +585,7 @@ class ClassLinker { ObjectArray<Class>* class_roots_; Class* GetClassRoot(ClassRoot class_root) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(class_roots_ != NULL); Class* klass = class_roots_->Get(class_root); DCHECK(klass != NULL); @@ -593,7 +593,7 @@ class ClassLinker { } void SetClassRoot(ClassRoot class_root, Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ObjectArray<Class>* GetClassRoots() { DCHECK(class_roots_ != NULL); diff --git a/src/class_linker_test.cc b/src/class_linker_test.cc index 1eb5e0d1c4..f249c6a1c2 100644 --- a/src/class_linker_test.cc +++ b/src/class_linker_test.cc @@ -30,7 +30,7 @@ namespace art { class ClassLinkerTest : public CommonTest { protected: void AssertNonExistentClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { EXPECT_TRUE(class_linker_->FindSystemClass(descriptor.c_str()) == NULL); Thread* self = Thread::Current(); EXPECT_TRUE(self->IsExceptionPending()); @@ -41,12 +41,12 @@ class ClassLinkerTest : public CommonTest { } void AssertPrimitiveClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { AssertPrimitiveClass(descriptor, class_linker_->FindSystemClass(descriptor.c_str())); } void AssertPrimitiveClass(const std::string& descriptor, const Class* primitive) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassHelper primitive_ch(primitive); ASSERT_TRUE(primitive != NULL); ASSERT_TRUE(primitive->GetClass() != NULL); @@ -83,7 +83,7 @@ class ClassLinkerTest : public CommonTest { void AssertArrayClass(const std::string& array_descriptor, const std::string& component_type, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* array = class_linker_->FindClass(array_descriptor.c_str(), class_loader); ClassHelper array_component_ch(array->GetComponentType()); EXPECT_STREQ(component_type.c_str(), array_component_ch.GetDescriptor()); @@ -92,7 +92,7 @@ class ClassLinkerTest : public CommonTest { } void AssertArrayClass(const std::string& array_descriptor, Class* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassHelper kh(array); ASSERT_TRUE(array != NULL); ASSERT_TRUE(array->GetClass() != NULL); @@ -135,7 +135,7 @@ class ClassLinkerTest : public CommonTest { EXPECT_STREQ(kh.GetDescriptor(), "Ljava/io/Serializable;"); } - void AssertMethod(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void AssertMethod(Method* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MethodHelper mh(method); EXPECT_TRUE(method != NULL); EXPECT_TRUE(method->GetClass() != NULL); @@ -157,7 +157,7 @@ class ClassLinkerTest : public CommonTest { } void AssertField(Class* klass, Field* field) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FieldHelper fh(field); EXPECT_TRUE(field != NULL); EXPECT_TRUE(field->GetClass() != NULL); @@ -167,7 +167,7 @@ class ClassLinkerTest : public CommonTest { } void AssertClass(const std::string& descriptor, Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassHelper kh(klass); EXPECT_STREQ(descriptor.c_str(), kh.GetDescriptor()); if (descriptor == "Ljava/lang/Object;") { @@ -291,7 +291,7 @@ class ClassLinkerTest : public CommonTest { } void AssertDexFileClass(ClassLoader* class_loader, const std::string& descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ASSERT_TRUE(descriptor != NULL); Class* klass = class_linker_->FindSystemClass(descriptor.c_str()); ASSERT_TRUE(klass != NULL); @@ -307,7 +307,7 @@ class ClassLinkerTest : public CommonTest { } void AssertDexFile(const DexFile* dex, ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ASSERT_TRUE(dex != NULL); // Verify all the classes defined in this file @@ -350,7 +350,7 @@ struct CheckOffsets { std::string class_descriptor; std::vector<CheckOffset> offsets; - bool Check() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool Check() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* klass = Runtime::Current()->GetClassLinker()->FindSystemClass(class_descriptor.c_str()); CHECK(klass != NULL) << class_descriptor; diff --git a/src/common_test.h b/src/common_test.h index 4424d91059..dee40e301e 100644 --- a/src/common_test.h +++ b/src/common_test.h @@ -209,7 +209,7 @@ class CommonTest : public testing::Test { ); } - void MakeExecutable(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void MakeExecutable(Method* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(method != NULL); MethodHelper mh(method); @@ -452,7 +452,7 @@ class CommonTest : public testing::Test { } jobject LoadDex(const char* dex_name) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* dex_file = OpenTestDexFile(dex_name); CHECK(dex_file != NULL); class_linker_->RegisterDexFile(*dex_file); @@ -479,7 +479,7 @@ class CommonTest : public testing::Test { } } - void CompileMethod(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void CompileMethod(Method* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(method != NULL); compiler_->CompileOne(method); MakeExecutable(method); @@ -491,7 +491,7 @@ class CommonTest : public testing::Test { const char* class_name, const char* method_name, const char* signature) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string class_descriptor(DotToDescriptor(class_name)); Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); CHECK(klass != NULL) << "Class not found " << class_name; @@ -505,7 +505,7 @@ class CommonTest : public testing::Test { const char* class_name, const char* method_name, const char* signature) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string class_descriptor(DotToDescriptor(class_name)); Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader); CHECK(klass != NULL) << "Class not found " << class_name; diff --git a/src/common_throws.cc b/src/common_throws.cc index 5bd30b4ec0..758e03b1ff 100644 --- a/src/common_throws.cc +++ b/src/common_throws.cc @@ -27,7 +27,7 @@ namespace art { static void AddReferrerLocation(std::ostream& os, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (referrer != NULL) { ClassHelper kh(referrer->GetDeclaringClass()); std::string location(kh.GetLocation()); @@ -38,7 +38,7 @@ static void AddReferrerLocation(std::ostream& os, const Method* referrer) } static void AddReferrerLocationFromClass(std::ostream& os, Class* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (referrer != NULL) { ClassHelper kh(referrer); std::string location(kh.GetLocation()); @@ -63,7 +63,7 @@ void ThrowNullPointerExceptionForMethodAccess(Method* caller, uint32_t method_id DexCache* dex_cache = caller->GetDeclaringClass()->GetDexCache(); const DexFile& dex_file = Runtime::Current()->GetClassLinker()->FindDexFile(dex_cache); std::ostringstream msg; - msg << "Attempt to invoke " << ToStr<InvokeType>(type).str() << " method '" + msg << "Attempt to invoke " << type << " method '" << PrettyMethod(method_idx, dex_file, true) << "' on a null object reference"; Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", msg.str().c_str()); } @@ -131,6 +131,8 @@ void ThrowNullPointerExceptionFromDexPC(Method* throw_method, uint32_t dex_pc) { "Attempt to get length of null array"); break; default: { + // TODO: We should have covered all the cases where we expect a NPE above, this + // message/logging is so we can improve any cases we've missed in the future. const DexFile& dex_file = Runtime::Current()->GetClassLinker() ->FindDexFile(throw_method->GetDeclaringClass()->GetDexCache()); std::string message("Null pointer exception during instruction '"); @@ -146,7 +148,7 @@ void ThrowNullPointerExceptionFromDexPC(Method* throw_method, uint32_t dex_pc) { void ThrowIllegalAccessErrorClass(Class* referrer, Class* accessed) { std::ostringstream msg; - msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' -> '" + msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' attempting to access '" << PrettyDescriptor(accessed) << "'"; AddReferrerLocationFromClass(msg, referrer); Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); @@ -157,8 +159,8 @@ void ThrowIllegalAccessErrorClassForMethodDispatch(Class* referrer, Class* acces const Method* called, InvokeType type) { std::ostringstream msg; - msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' -> '" - << PrettyDescriptor(accessed) << "') in attempt to invoke " << ToStr<InvokeType>(type).str() + msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' attempting to access '" + << PrettyDescriptor(accessed) << "') in attempt to invoke " << type << " method " << PrettyMethod(called).c_str(); AddReferrerLocation(msg, caller); Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str()); @@ -220,7 +222,8 @@ void ThrowIncompatibleClassChangeErrorField(const Field* resolved_field, bool is const Method* referrer) { std::ostringstream msg; msg << "Expected '" << PrettyField(resolved_field) << "' to be a " - << (is_static ? "static" : "instance") << " field"; + << (is_static ? "static" : "instance") << " field" << " rather than a " + << (is_static ? "instance" : "static") << " field"; AddReferrerLocation(msg, referrer); Thread::Current()->ThrowNewException("Ljava/lang/IncompatibleClassChangeError;", msg.str().c_str()); diff --git a/src/common_throws.h b/src/common_throws.h index 47186b04f2..ca2211f007 100644 --- a/src/common_throws.h +++ b/src/common_throws.h @@ -25,56 +25,56 @@ namespace art { // NullPointerException void ThrowNullPointerExceptionForFieldAccess(Field* field, bool is_read) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowNullPointerExceptionForMethodAccess(Method* caller, uint32_t method_idx, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowNullPointerExceptionFromDexPC(Method* throw_method, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // IllegalAccessError void ThrowIllegalAccessErrorClass(Class* referrer, Class* accessed) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIllegalAccessErrorClassForMethodDispatch(Class* referrer, Class* accessed, const Method* caller, const Method* called, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIllegalAccessErrorMethod(Class* referrer, Method* accessed) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIllegalAccessErrorField(Class* referrer, Field* accessed) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIllegalAccessErrorFinalField(const Method* referrer, Field* accessed) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // IncompatibleClassChangeError void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type, Method* method, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const Method* interface_method, Object* this_object, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowIncompatibleClassChangeErrorField(const Field* resolved_field, bool is_static, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // NoSuchMethodError void ThrowNoSuchMethodError(InvokeType type, Class* c, const StringPiece& name, const StringPiece& signature, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowNoSuchMethodError(uint32_t method_idx, const Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } // namespace art diff --git a/src/compiler.cc b/src/compiler.cc index b05e688fd2..69de8caaba 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -669,7 +669,7 @@ bool Compiler::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, static Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa, OatCompilationUnit* mUnit) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); ClassLoader* class_loader = soa.Decode<ClassLoader*>(mUnit->class_loader_); const DexFile::MethodId& referrer_method_id = mUnit->dex_file_->GetMethodId(mUnit->method_idx_); @@ -680,7 +680,7 @@ static Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa, static Field* ComputeFieldReferencedFromCompilingMethod(ScopedObjectAccess& soa, OatCompilationUnit* mUnit, uint32_t field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); ClassLoader* class_loader = soa.Decode<ClassLoader*>(mUnit->class_loader_); return mUnit->class_linker_->ResolveField(*mUnit->dex_file_, field_idx, dex_cache, @@ -691,7 +691,7 @@ static Method* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& so OatCompilationUnit* mUnit, uint32_t method_idx, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_); ClassLoader* class_loader = soa.Decode<ClassLoader*>(mUnit->class_loader_); return mUnit->class_linker_->ResolveMethod(*mUnit->dex_file_, method_idx, dex_cache, @@ -1026,7 +1026,7 @@ class WorkerThread { } private: - static void* Go(void* arg) LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + static void* Go(void* arg) LOCKS_EXCLUDED(Locks::mutator_lock_) { WorkerThread* worker = reinterpret_cast<WorkerThread*>(arg); Runtime* runtime = Runtime::Current(); if (worker->spawn_) { @@ -1039,11 +1039,11 @@ class WorkerThread { return NULL; } - void Go() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + void Go() LOCKS_EXCLUDED(Locks::mutator_lock_) { Go(this); } - void Run() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + void Run() LOCKS_EXCLUDED(Locks::mutator_lock_) { Thread* self = Thread::Current(); for (size_t i = begin_; i < end_; i += stripe_) { callback_(context_, i); @@ -1066,7 +1066,7 @@ class WorkerThread { static void ForAll(CompilationContext* context, size_t begin, size_t end, Callback callback, size_t thread_count) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { Thread* self = Thread::Current(); self->AssertNoPendingException(); CHECK_GT(thread_count, 0U); @@ -1080,7 +1080,7 @@ static void ForAll(CompilationContext* context, size_t begin, size_t end, Callba // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker // thread destructor's called below perform join). { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_NE(self->GetState(), kRunnable); } STLDeleteElements(&threads); @@ -1096,7 +1096,7 @@ static void ForAll(CompilationContext* context, size_t begin, size_t end, Callba static bool SkipClass(ClassLoader* class_loader, const DexFile& dex_file, const DexFile::ClassDef& class_def) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (class_loader == NULL) { return false; } @@ -1113,7 +1113,7 @@ static bool SkipClass(ClassLoader* class_loader, } static void ResolveClassFieldsAndMethods(const CompilationContext* context, size_t class_def_index) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { ScopedObjectAccess soa(Thread::Current()); ClassLoader* class_loader = soa.Decode<ClassLoader*>(context->GetClassLoader()); const DexFile& dex_file = *context->GetDexFile(); @@ -1182,7 +1182,7 @@ static void ResolveClassFieldsAndMethods(const CompilationContext* context, size } static void ResolveType(const CompilationContext* context, size_t type_idx) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { // Class derived values are more complicated, they require the linker and loader. ScopedObjectAccess soa(Thread::Current()); ClassLinker* class_linker = context->GetClassLinker(); @@ -1221,7 +1221,7 @@ void Compiler::Verify(jobject class_loader, const std::vector<const DexFile*>& d } static void VerifyClass(const CompilationContext* context, size_t class_def_index) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { ScopedObjectAccess soa(Thread::Current()); const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index); const char* descriptor = context->GetDexFile()->GetClassDescriptor(class_def); @@ -1474,7 +1474,7 @@ void ForClassesInAllDexFiles(CompilationContext* worker_context, // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker // thread destructor's called below perform join). { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_NE(self->GetState(), kRunnable); } STLDeleteElements(&threads); diff --git a/src/compiler.h b/src/compiler.h index 4a7fc2a820..c27bf956e8 100644 --- a/src/compiler.h +++ b/src/compiler.h @@ -53,11 +53,11 @@ class Compiler { ~Compiler(); void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Compile a single Method void CompileOne(const Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDebuggingSupported() { return support_debugging_; @@ -73,16 +73,16 @@ class Compiler { // Stub to throw AbstractMethodError static ByteArray* CreateAbstractMethodErrorStub(InstructionSet instruction_set) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Generate the trampoline that's invoked by unresolved direct methods static ByteArray* CreateResolutionStub(InstructionSet instruction_set, Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static ByteArray* CreateJniDlsymLookupStub(InstructionSet instruction_set) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // A class is uniquely located by its DexFile and the class_defs_ table index into that DexFile typedef std::pair<const DexFile*, uint32_t> ClassReference; @@ -107,38 +107,38 @@ class Compiler { // Callbacks from compiler to see what runtime checks must be generated. bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Are runtime access checks necessary in the compiled code? bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, uint32_t type_idx) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Are runtime access and instantiable checks necessary in the code? bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, uint32_t type_idx) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Can we fast path instance field access? Computes field's offset and volatility. bool ComputeInstanceFieldInfo(uint32_t field_idx, OatCompilationUnit* mUnit, int& field_offset, bool& is_volatile, bool is_put) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Can we fastpath static field access? Computes field's offset, volatility and whether the // field is within the referrer (which can avoid checking class initialization). bool ComputeStaticFieldInfo(uint32_t field_idx, OatCompilationUnit* mUnit, int& field_offset, int& ssb_index, bool& is_referrers_class, bool& is_volatile, bool is_put) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Can we fastpath a interface, super class or virtual method call? Computes method's vtable // index. bool ComputeInvokeInfo(uint32_t method_idx, OatCompilationUnit* mUnit, InvokeType& type, int& vtable_idx, uintptr_t& direct_code, uintptr_t& direct_method) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Record patch information for later fix up. void AddCodePatch(const DexFile* dex_file, @@ -231,14 +231,14 @@ class Compiler { // Compute constant code and method pointers when possible void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type, Method* method, uintptr_t& direct_code, uintptr_t& direct_method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Checks if class specified by type_idx is one of the image_classes_ bool IsImageClass(const std::string& descriptor) const; void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files, TimingLogger& timings) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void PostCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files); // Attempt to resolve all type, methods, fields, and strings @@ -246,37 +246,37 @@ class Compiler { // ordering semantics. void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files, TimingLogger& timings) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void ResolveDexFile(jobject class_loader, const DexFile& dex_file, TimingLogger& timings) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files); void VerifyDexFile(jobject class_loader, const DexFile& dex_file) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void InitializeClassesWithoutClinit(jobject class_loader, const std::vector<const DexFile*>& dex_files) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void InitializeClassesWithoutClinit(jobject class_loader, const DexFile& dex_file) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, compiled_classes_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_); void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files); void CompileDexFile(jobject class_loader, const DexFile& dex_file) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint32_t method_idx, jobject class_loader, const DexFile& dex_file) LOCKS_EXCLUDED(compiled_methods_lock_); static void CompileClass(const CompilationContext* context, size_t class_def_index) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void SetGcMaps(jobject class_loader, const std::vector<const DexFile*>& dex_files) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void SetGcMapsDexFile(jobject class_loader, const DexFile& dex_file) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void SetGcMapsMethod(const DexFile& dex_file, Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void InsertInvokeStub(const std::string& key, const CompiledInvokeStub* compiled_invoke_stub) LOCKS_EXCLUDED(compiled_invoke_stubs_lock_); diff --git a/src/compiler_llvm/runtime_support_llvm.cc b/src/compiler_llvm/runtime_support_llvm.cc index dd56ac896c..4384e87331 100644 --- a/src/compiler_llvm/runtime_support_llvm.cc +++ b/src/compiler_llvm/runtime_support_llvm.cc @@ -85,7 +85,7 @@ void art_unlock_object_from_code(Object* obj, Thread* thread) } void art_test_suspend_from_code(Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { thread->FullSuspendCheck(); } @@ -115,21 +115,21 @@ bool art_is_exception_pending_from_code() { } void art_throw_div_zero_from_code() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); thread->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); } void art_throw_array_bounds_from_code(int32_t index, int32_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); thread->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "length=%d; index=%d", length, index); } void art_throw_no_such_method_from_code(int32_t method_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); // We need the calling method as context for the method_idx Method* method = thread->GetCurrentMethod(); @@ -137,7 +137,7 @@ void art_throw_no_such_method_from_code(int32_t method_idx) } void art_throw_null_pointer_exception_from_code(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); NthCallerVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack(), 0); visitor.WalkStack(); @@ -146,7 +146,7 @@ void art_throw_null_pointer_exception_from_code(uint32_t dex_pc) } void art_throw_stack_overflow_from_code() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); if (Runtime::Current()->IsMethodTracingActive()) { TraceMethodUnwindFromCode(thread); @@ -158,7 +158,7 @@ void art_throw_stack_overflow_from_code() } void art_throw_exception_from_code(Object* exception) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); if (exception == NULL) { thread->ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception"); @@ -169,7 +169,7 @@ void art_throw_exception_from_code(Object* exception) int32_t art_find_catch_block_from_code(Method* current_method, uint32_t ti_offset) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* thread = art_get_current_thread_from_code(); Class* exception_type = thread->GetException()->GetClass(); MethodHelper mh(current_method); @@ -208,14 +208,14 @@ int32_t art_find_catch_block_from_code(Method* current_method, Object* art_alloc_object_from_code(uint32_t type_idx, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocObjectFromCode(type_idx, referrer, thread, false); } Object* art_alloc_object_from_code_with_access_check(uint32_t type_idx, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocObjectFromCode(type_idx, referrer, thread, true); } @@ -223,7 +223,7 @@ Object* art_alloc_array_from_code(uint32_t type_idx, Method* referrer, uint32_t length, Thread* /*thread*/) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocArrayFromCode(type_idx, referrer, length, false); } @@ -231,7 +231,7 @@ Object* art_alloc_array_from_code_with_access_check(uint32_t type_idx, Method* referrer, uint32_t length, Thread* /*thread*/) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocArrayFromCode(type_idx, referrer, length, true); } @@ -239,7 +239,7 @@ Object* art_check_and_alloc_array_from_code(uint32_t type_idx, Method* referrer, uint32_t length, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false); } @@ -247,13 +247,13 @@ Object* art_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx, Method* referrer, uint32_t length, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true); } static Method* FindMethodHelper(uint32_t method_idx, Object* this_object, Method* caller_method, bool access_check, InvokeType type, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); if (UNLIKELY(method == NULL)) { method = FindMethodFromCode(method_idx, this_object, caller_method, @@ -279,7 +279,7 @@ Object* art_find_static_method_from_code_with_access_check(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread); } @@ -287,7 +287,7 @@ Object* art_find_direct_method_from_code_with_access_check(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread); } @@ -295,7 +295,7 @@ Object* art_find_virtual_method_from_code_with_access_check(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread); } @@ -303,7 +303,7 @@ Object* art_find_super_method_from_code_with_access_check(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread); } @@ -312,7 +312,7 @@ art_find_interface_method_from_code_with_access_check(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread); } @@ -320,40 +320,40 @@ Object* art_find_interface_method_from_code(uint32_t method_idx, Object* this_object, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread); } Object* art_initialize_static_storage_from_code(uint32_t type_idx, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false); } Object* art_initialize_type_from_code(uint32_t type_idx, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false); } Object* art_initialize_type_and_verify_access_from_code(uint32_t type_idx, Method* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when caller isn't guaranteed to have access to a type and the dex cache may be // unpopulated return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true); } Object* art_resolve_string_from_code(Method* referrer, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveStringFromCode(referrer, string_idx); } int32_t art_set32_static_from_code(uint32_t field_idx, Method* referrer, int32_t new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint32_t)); if (LIKELY(field != NULL)) { field->Set32(NULL, new_value); @@ -369,7 +369,7 @@ int32_t art_set32_static_from_code(uint32_t field_idx, Method* referrer, int32_t } int32_t art_set64_static_from_code(uint32_t field_idx, Method* referrer, int64_t new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t)); if (LIKELY(field != NULL)) { field->Set64(NULL, new_value); @@ -385,7 +385,7 @@ int32_t art_set64_static_from_code(uint32_t field_idx, Method* referrer, int64_t } int32_t art_set_obj_static_from_code(uint32_t field_idx, Method* referrer, Object* new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*)); if (LIKELY(field != NULL)) { field->SetObj(NULL, new_value); @@ -401,7 +401,7 @@ int32_t art_set_obj_static_from_code(uint32_t field_idx, Method* referrer, Objec } int32_t art_get32_static_from_code(uint32_t field_idx, Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t)); if (LIKELY(field != NULL)) { return field->Get32(NULL); @@ -415,7 +415,7 @@ int32_t art_get32_static_from_code(uint32_t field_idx, Method* referrer) } int64_t art_get64_static_from_code(uint32_t field_idx, Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t)); if (LIKELY(field != NULL)) { return field->Get64(NULL); @@ -429,7 +429,7 @@ int64_t art_get64_static_from_code(uint32_t field_idx, Method* referrer) } Object* art_get_obj_static_from_code(uint32_t field_idx, Method* referrer) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*)); if (LIKELY(field != NULL)) { return field->GetObj(NULL); @@ -444,7 +444,7 @@ Object* art_get_obj_static_from_code(uint32_t field_idx, Method* referrer) int32_t art_set32_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj, uint32_t new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t)); if (LIKELY(field != NULL)) { field->Set32(obj, new_value); @@ -461,7 +461,7 @@ int32_t art_set32_instance_from_code(uint32_t field_idx, Method* referrer, int32_t art_set64_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj, int64_t new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t)); if (LIKELY(field != NULL)) { field->Set64(obj, new_value); @@ -478,7 +478,7 @@ int32_t art_set64_instance_from_code(uint32_t field_idx, Method* referrer, int32_t art_set_obj_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj, Object* new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*)); if (LIKELY(field != NULL)) { field->SetObj(obj, new_value); @@ -494,7 +494,7 @@ int32_t art_set_obj_instance_from_code(uint32_t field_idx, Method* referrer, } int32_t art_get32_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t)); if (LIKELY(field != NULL)) { return field->Get32(obj); @@ -508,7 +508,7 @@ int32_t art_get32_instance_from_code(uint32_t field_idx, Method* referrer, Objec } int64_t art_get64_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t)); if (LIKELY(field != NULL)) { return field->Get64(obj); @@ -522,7 +522,7 @@ int64_t art_get64_instance_from_code(uint32_t field_idx, Method* referrer, Objec } Object* art_get_obj_instance_from_code(uint32_t field_idx, Method* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*)); if (LIKELY(field != NULL)) { return field->GetObj(obj); @@ -537,7 +537,7 @@ Object* art_get_obj_instance_from_code(uint32_t field_idx, Method* referrer, Obj void art_fill_array_data_from_code(Method* method, uint32_t dex_pc, Array* array, uint32_t payload_offset) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Test: Is array equal to null? (Guard NullPointerException) if (UNLIKELY(array == NULL)) { art_throw_null_pointer_exception_from_code(dex_pc); @@ -577,14 +577,14 @@ void art_fill_array_data_from_code(Method* method, uint32_t dex_pc, //---------------------------------------------------------------------------- int32_t art_is_assignable_from_code(const Class* dest_type, const Class* src_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(dest_type != NULL); DCHECK(src_type != NULL); return dest_type->IsAssignableFrom(src_type) ? 1 : 0; } void art_check_cast_from_code(const Class* dest_type, const Class* src_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); DCHECK(src_type->IsClass()) << PrettyClass(src_type); if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) { @@ -597,7 +597,7 @@ void art_check_cast_from_code(const Class* dest_type, const Class* src_type) } void art_check_put_array_element_from_code(const Object* element, const Object* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (element == NULL) { return; } @@ -631,7 +631,7 @@ uint32_t art_jni_method_start(Thread* self) } uint32_t art_jni_method_start_synchronized(jobject to_lock, Thread* self) - UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + UNLOCK_FUNCTION(Locks::mutator_lock_) { self->DecodeJObject(to_lock)->MonitorEnter(self); return art_jni_method_start(self); } @@ -643,7 +643,7 @@ static inline void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* s } void art_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); PopLocalReferences(saved_local_ref_cookie, self); } @@ -651,7 +651,7 @@ void art_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self) void art_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); @@ -659,7 +659,7 @@ void art_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, jobject lo Object* art_jni_method_end_with_reference(jobject result, uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); Object* o = self->DecodeJObject(result); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); @@ -676,7 +676,7 @@ Object* art_jni_method_end_with_reference(jobject result, uint32_t saved_local_r Object* art_jni_method_end_with_reference_synchronized(jobject result, uint32_t saved_local_ref_cookie, jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. Object* o = self->DecodeJObject(result); @@ -734,7 +734,7 @@ static void* art_find_compiler_runtime_func(const char* name) { // Handler for invocation on proxy methods. We create a boxed argument array. And we invoke // the invocation handler which is a field within the proxy object receiver. void art_proxy_invoke_handler_from_code(Method* proxy_method, ...) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { va_list ap; va_start(ap, proxy_method); diff --git a/src/compiler_test.cc b/src/compiler_test.cc index 0c53bb9981..98b21422f4 100644 --- a/src/compiler_test.cc +++ b/src/compiler_test.cc @@ -31,14 +31,14 @@ namespace art { class CompilerTest : public CommonTest { protected: - void CompileAll(jobject class_loader) LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) { compiler_->CompileAll(class_loader, Runtime::Current()->GetCompileTimeClassPath(class_loader)); MakeAllExecutable(class_loader); } void EnsureCompiled(jobject class_loader, const char* class_name, const char* method, const char* signature, bool is_virtual) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { CompileAll(class_loader); Thread::Current()->TransitionFromSuspendedToRunnable(); runtime_->Start(); diff --git a/src/debugger.cc b/src/debugger.cc index b47377e2eb..87ad446c4e 100644 --- a/src/debugger.cc +++ b/src/debugger.cc @@ -98,7 +98,7 @@ struct AllocRecordStackTraceElement { Method* method; uint32_t dex_pc; - int32_t LineNumber() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + int32_t LineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return MethodHelper(method).GetLineNumFromDexPC(dex_pc); } }; @@ -125,7 +125,7 @@ struct Breakpoint { }; static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.method).c_str(), rhs.dex_pc); return os; } @@ -182,7 +182,7 @@ static std::vector<Breakpoint> gBreakpoints GUARDED_BY(gBreakpointsLock); static SingleStepControl gSingleStepControl GUARDED_BY(gBreakpointsLock); static bool IsBreakpoint(Method* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MutexLock mu(gBreakpointsLock); for (size_t i = 0; i < gBreakpoints.size(); ++i) { if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == dex_pc) { @@ -194,7 +194,7 @@ static bool IsBreakpoint(Method* m, uint32_t dex_pc) } static Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* o = gRegistry->Get<Object*>(id); if (o == NULL || o == kInvalidObject) { status = JDWP::ERR_INVALID_OBJECT; @@ -209,7 +209,7 @@ static Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) } static Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* o = gRegistry->Get<Object*>(id); if (o == NULL || o == kInvalidObject) { status = JDWP::ERR_INVALID_OBJECT; @@ -224,8 +224,8 @@ static Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) } static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId threadId) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* thread_peer = gRegistry->Get<Object*>(threadId); if (thread_peer == NULL || thread_peer == kInvalidObject) { return NULL; @@ -241,7 +241,7 @@ static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { } static JDWP::JdwpTag TagFromClass(Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(c != NULL); if (c->IsArrayClass()) { return JDWP::JT_ARRAY; @@ -272,7 +272,7 @@ static JDWP::JdwpTag TagFromClass(Class* c) * Null objects are tagged JT_OBJECT. */ static JDWP::JdwpTag TagFromObject(const Object* o) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(o->GetClass()); } @@ -491,7 +491,7 @@ static void SetDebuggerUpdatesEnabledCallback(Thread* t, void* user_data) { } static void SetDebuggerUpdatesEnabled(bool enabled) { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(SetDebuggerUpdatesEnabledCallback, &enabled); } @@ -830,7 +830,7 @@ JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId arrayId, int offset, int count, JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId arrayId, int offset, int count, const uint8_t* src) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JDWP::JdwpError status; Array* a = DecodeArray(arrayId, status); if (a == NULL) { @@ -920,7 +920,7 @@ bool Dbg::MatchType(JDWP::RefTypeId instClassId, JDWP::RefTypeId classId) { } static JDWP::FieldId ToFieldId(const Field* f) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -929,7 +929,7 @@ static JDWP::FieldId ToFieldId(const Field* f) } static JDWP::MethodId ToMethodId(const Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -938,7 +938,7 @@ static JDWP::MethodId ToMethodId(const Method* m) } static Field* FromFieldId(JDWP::FieldId fid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -947,7 +947,7 @@ static Field* FromFieldId(JDWP::FieldId fid) } static Method* FromMethodId(JDWP::MethodId mid) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(FATAL); #else @@ -956,7 +956,7 @@ static Method* FromMethodId(JDWP::MethodId mid) } static void SetLocation(JDWP::JdwpLocation& location, Method* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (m == NULL) { memset(&location, 0, sizeof(location)); } else { @@ -969,7 +969,7 @@ static void SetLocation(JDWP::JdwpLocation& location, Method* m, uint32_t dex_pc } std::string Dbg::GetMethodName(JDWP::RefTypeId, JDWP::MethodId methodId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = FromMethodId(methodId); return MethodHelper(m).GetName(); } @@ -1013,7 +1013,7 @@ static uint16_t MangleSlot(uint16_t slot, const char* name) { } static uint16_t DemangleSlot(uint16_t slot, Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (slot == kEclipseWorkaroundSlot) { return 0; } else if (slot == 0) { @@ -1096,7 +1096,7 @@ JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId classId, JDWP::Exp } void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId methodId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { struct DebugCallbackContext { int numItems; JDWP::ExpandBuf* pReply; @@ -1198,7 +1198,7 @@ JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId fieldId) { static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId refTypeId, JDWP::ObjectId objectId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JDWP::JdwpError status; Class* c = DecodeClass(refTypeId, status); if (refTypeId != 0 && c == NULL) { @@ -1268,7 +1268,7 @@ JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId refTypeId, JDWP::FieldI static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId objectId, JDWP::FieldId fieldId, uint64_t value, int width, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* o = gRegistry->Get<Object*>(objectId); if ((!is_static && o == NULL) || o == kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; @@ -1330,7 +1330,7 @@ std::string Dbg::StringToUtf8(JDWP::ObjectId strId) { } bool Dbg::GetThreadName(JDWP::ObjectId threadId, std::string& name) { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); ScopedObjectAccessUnchecked soa(Thread::Current()); Thread* thread = DecodeThread(soa, threadId); if (thread == NULL) { @@ -1348,7 +1348,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId threadId, JDWP::ExpandBuf* pR } // Okay, so it's an object, but is it actually a thread? - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); if (DecodeThread(soa, threadId) == NULL) { return JDWP::ERR_INVALID_THREAD; } @@ -1407,13 +1407,13 @@ JDWP::ObjectId Dbg::GetMainThreadGroupId() { bool Dbg::GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus) { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = DecodeThread(soa, threadId); if (thread == NULL) { return false; } - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); // TODO: if we're in Thread.sleep(long), we should return TS_SLEEPING, // even if it's implemented using Object.wait(long). @@ -1447,28 +1447,28 @@ bool Dbg::GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThre JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId threadId, JDWP::ExpandBuf* pReply) { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = DecodeThread(soa, threadId); if (thread == NULL) { return JDWP::ERR_INVALID_THREAD; } - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); expandBufAdd4BE(pReply, thread->GetDebugSuspendCount()); return JDWP::ERR_NONE; } bool Dbg::ThreadExists(JDWP::ObjectId threadId) { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); return DecodeThread(soa, threadId) != NULL; } bool Dbg::IsSuspended(JDWP::ObjectId threadId) { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = DecodeThread(soa, threadId); CHECK(thread != NULL); - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); return thread->IsSuspended(); } @@ -1477,7 +1477,7 @@ void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId> public: ThreadListVisitor(const ScopedObjectAccessUnchecked& ts, Object* thread_group, std::vector<JDWP::ObjectId>& thread_ids) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : ts_(ts), thread_group_(thread_group), thread_ids_(thread_ids) {} static void Visit(Thread* t, void* arg) { @@ -1506,7 +1506,7 @@ void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId> ScopedObjectAccessUnchecked soa(Thread::Current()); Object* thread_group = gRegistry->Get<Object*>(thread_group_id); ThreadListVisitor tlv(soa, thread_group, thread_ids); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv); } @@ -1531,7 +1531,7 @@ void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP: } static int GetStackDepth(Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { struct CountStackDepthVisitor : public StackVisitor { CountStackDepthVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack) @@ -1547,7 +1547,7 @@ static int GetStackDepth(Thread* thread) }; if (kIsDebugBuild) { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK(thread->IsSuspended()); } CountStackDepthVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack()); @@ -1565,7 +1565,7 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram public: GetFrameVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), depth_(0), start_frame_(start_frame), frame_count_(frame_count), buf_(buf) { expandBufAdd4BE(buf_, frame_count_); @@ -1644,7 +1644,7 @@ JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId threadId, bool request_suspens void Dbg::ResumeThread(JDWP::ObjectId threadId) { ScopedObjectAccessUnchecked soa(Thread::Current()); Object* peer = gRegistry->Get<Object*>(threadId); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, peer); if (thread == NULL) { LOG(WARNING) << "No such thread for resume: " << peer; @@ -1652,7 +1652,7 @@ void Dbg::ResumeThread(JDWP::ObjectId threadId) { } bool needs_resume; { - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); needs_resume = thread->GetSuspendCount() > 0; } if (needs_resume) { @@ -1667,7 +1667,7 @@ void Dbg::SuspendSelf() { struct GetThisVisitor : public StackVisitor { GetThisVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack, Context* context, JDWP::FrameId frameId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, context), this_object(NULL), frame_id(frameId) {} // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses @@ -1691,7 +1691,7 @@ struct GetThisVisitor : public StackVisitor { }; static Object* GetThis(Thread* self, Method* m, size_t frame_id) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // TODO: should we return the 'this' we passed through to non-static native methods? if (m->IsNative() || m->IsStatic()) { return NULL; @@ -1708,12 +1708,12 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame ScopedObjectAccessUnchecked soa(Thread::Current()); Thread* thread; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); thread = DecodeThread(soa, thread_id); if (thread == NULL) { return JDWP::ERR_INVALID_THREAD; } - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); if (!thread->IsSuspended()) { return JDWP::ERR_THREAD_NOT_SUSPENDED; } @@ -1731,7 +1731,7 @@ void Dbg::GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot GetLocalVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack, Context* context, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, context), frame_id_(frameId), slot_(slot), tag_(tag), buf_(buf), width_(width) {} @@ -1850,7 +1850,7 @@ void Dbg::SetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot SetLocalVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack, Context* context, JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, context), frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width) {} @@ -2139,7 +2139,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize s SingleStepStackVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack) EXCLUSIVE_LOCKS_REQUIRED(gBreakpointsLock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL) { gBreakpointsLock.AssertHeld(); gSingleStepControl.method = NULL; @@ -2299,7 +2299,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object Thread* self = Thread::Current(); { ScopedObjectAccessUnchecked soa(self); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); targetThread = DecodeThread(soa, threadId); if (targetThread == NULL) { LOG(ERROR) << "InvokeMethod request for non-existent thread " << threadId; @@ -2327,7 +2327,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId object */ int suspend_count; { - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); suspend_count = targetThread->GetSuspendCount(); } if (suspend_count > 1) { @@ -2624,7 +2624,7 @@ void Dbg::DdmBroadcast(bool connect) { Thread* self = Thread::Current(); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); if (self->GetState() != kRunnable) { LOG(ERROR) << "DDM broadcast in thread state " << self->GetState(); /* try anyway? */ @@ -2692,7 +2692,7 @@ void Dbg::DdmSetThreadNotification(bool enable) { SuspendVM(); std::list<Thread*> threads; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); threads = Runtime::Current()->GetThreadList()->GetList(); } { @@ -2885,7 +2885,7 @@ class HeapChunkContext { needHeader_ = false; } - void Flush() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Patch the "length of piece" field. CHECK_LE(&buf_[0], pieceLenField_); CHECK_LE(pieceLenField_, p_); @@ -2896,8 +2896,8 @@ class HeapChunkContext { } static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes); } @@ -2913,8 +2913,8 @@ class HeapChunkContext { } void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken // in the following code not to allocate memory, by ensuring buf_ is of the correct size if (used_bytes == 0) { @@ -2967,7 +2967,7 @@ class HeapChunkContext { } void AppendChunk(uint8_t state, void* ptr, size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Make sure there's enough room left in the buffer. // We need to use two bytes for every fractional 256 allocation units used by the chunk plus // 17 bytes for any header. @@ -2997,7 +2997,7 @@ class HeapChunkContext { } uint8_t ExamineObject(const Object* o, bool is_native_heap) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { if (o == NULL) { return HPSG_STATE(SOLIDITY_FREE, 0); } @@ -3089,7 +3089,7 @@ void Dbg::DdmSendHeapSegments(bool native) { const Spaces& spaces = heap->GetSpaces(); for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) { if ((*cur)->IsAllocSpace()) { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); (*cur)->AsAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); } } @@ -3119,7 +3119,7 @@ void Dbg::SetAllocTrackingEnabled(bool enabled) { struct AllocRecordStackVisitor : public StackVisitor { AllocRecordStackVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack, AllocRecord* record) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), record(record), depth(0) {} // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses diff --git a/src/debugger.h b/src/debugger.h index 6e82001d82..43590f839c 100644 --- a/src/debugger.h +++ b/src/debugger.h @@ -81,7 +81,7 @@ class Dbg { static void StopJdwp(); // Invoked by the GC in case we need to keep DDMS informed. - static void GcDidFinish() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + static void GcDidFinish() LOCKS_EXCLUDED(Locks::mutator_lock_); // Return the DebugInvokeReq for the current thread. static DebugInvokeReq* GetInvokeReq(); @@ -124,105 +124,105 @@ class Dbg { * Class, Object, Array */ static std::string GetClassName(JDWP::RefTypeId id) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& classObjectId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclassId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId classId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void GetClassList(std::vector<JDWP::RefTypeId>& classes) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId classId, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetReferenceType(JDWP::ObjectId objectId, JDWP::ExpandBuf* pReply); static JDWP::JdwpError GetSignature(JDWP::RefTypeId refTypeId, std::string& signature) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId refTypeId, std::string& source_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetObjectTag(JDWP::ObjectId objectId, uint8_t& tag) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static size_t GetTagWidth(JDWP::JdwpTag tag); static JDWP::JdwpError GetArrayLength(JDWP::ObjectId arrayId, int& length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError OutputArray(JDWP::ObjectId arrayId, int firstIndex, int count, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError SetArrayElements(JDWP::ObjectId arrayId, int firstIndex, int count, const uint8_t* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::ObjectId CreateString(const std::string& str) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError CreateObject(JDWP::RefTypeId classId, JDWP::ObjectId& new_object) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId arrayTypeId, uint32_t length, JDWP::ObjectId& new_array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool MatchType(JDWP::RefTypeId instClassId, JDWP::RefTypeId classId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Method and Field */ static std::string GetMethodName(JDWP::RefTypeId refTypeId, JDWP::MethodId id) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId refTypeId, bool withGeneric, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId refTypeId, bool withGeneric, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId refTypeId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void OutputLineTable(JDWP::RefTypeId refTypeId, JDWP::MethodId methodId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void OutputVariableTable(JDWP::RefTypeId refTypeId, JDWP::MethodId id, bool withGeneric, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId fieldId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId fieldId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);; + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);; static JDWP::JdwpError GetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError SetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, uint64_t value, int width) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId refTypeId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId fieldId, uint64_t value, int width) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static std::string StringToUtf8(JDWP::ObjectId strId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Thread, ThreadGroup, Frame */ static bool GetThreadName(JDWP::ObjectId threadId, std::string& name) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_list_lock_); static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId threadId, JDWP::ExpandBuf* pReply); static std::string GetThreadGroupName(JDWP::ObjectId threadGroupId); static JDWP::ObjectId GetThreadGroupParent(JDWP::ObjectId threadGroupId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::ObjectId GetSystemThreadGroupId() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::ObjectId GetMainThreadGroupId(); static bool GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus); @@ -234,43 +234,43 @@ class Dbg { // Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0, // returns all threads. static void GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids); static int GetThreadFrameCount(JDWP::ObjectId threadId); static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::ObjectId GetThreadSelfId() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SuspendVM() - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); static void ResumeVM(); static JDWP::JdwpError SuspendThread(JDWP::ObjectId threadId, bool request_suspension = true) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); static void ResumeThread(JDWP::ObjectId threadId) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SuspendSelf(); static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, JDWP::ObjectId* result) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t expectedLen) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Debugger notification @@ -282,29 +282,29 @@ class Dbg { kMethodExit = 0x08, }; static void PostLocationEvent(const Method* method, int pcOffset, Object* thisPtr, int eventFlags) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostException(Thread* thread, JDWP::FrameId throw_frame_id, Method* throw_method, uint32_t throw_dex_pc, Method* catch_method, uint32_t catch_dex_pc, Throwable* exception) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadStart(Thread* t) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadDeath(Thread* t) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostClassPrepare(Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void UpdateDebugger(int32_t dex_pc, Thread* self) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void WatchLocation(const JDWP::JdwpLocation* pLoc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void UnwatchLocation(const JDWP::JdwpLocation* pLoc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize size, JDWP::JdwpStepDepth depth) LOCKS_EXCLUDED(gBreakpointsLock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void UnconfigureStep(JDWP::ObjectId threadId); static JDWP::JdwpError InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId objectId, @@ -313,9 +313,9 @@ class Dbg { JDWP::JdwpTag* arg_types, uint32_t options, JDWP::JdwpTag* pResultTag, uint64_t* pResultValue, JDWP::ObjectId* pExceptObj) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void ExecuteMethod(DebugInvokeReq* pReq); /* perform "late registration" of an object ID */ @@ -325,27 +325,27 @@ class Dbg { * DDM support. */ static void DdmSendThreadNotification(Thread* t, uint32_t type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DdmSetThreadNotification(bool enable); static bool DdmHandlePacket(const uint8_t* buf, int dataLen, uint8_t** pReplyBuf, int* pReplyLen); - static void DdmConnected() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - static void DdmDisconnected() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void DdmConnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DdmDisconnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Recent allocation tracking support. */ static void RecordAllocation(Class* type, size_t byte_count) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetAllocTrackingEnabled(bool enabled); static inline bool IsAllocTrackingEnabled() { return recent_allocation_records_ != NULL; } static jbyteArray GetRecentAllocations() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DumpRecentAllocations(); enum HpifWhen { @@ -355,7 +355,7 @@ class Dbg { HPIF_WHEN_EVERY_GC = 3 }; static int DdmHandleHpifChunk(HpifWhen when) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); enum HpsgWhen { HPSG_WHEN_NEVER = 0, @@ -368,14 +368,14 @@ class Dbg { static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native); static void DdmSendHeapInfo(HpifWhen reason) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DdmSendHeapSegments(bool native) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: - static void DdmBroadcast(bool) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + static void DdmBroadcast(bool) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadStartOrStop(Thread*, uint32_t) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static AllocRecord* recent_allocation_records_; }; diff --git a/src/dex2oat.cc b/src/dex2oat.cc index b68a75b18b..2a6a2204c4 100644 --- a/src/dex2oat.cc +++ b/src/dex2oat.cc @@ -123,7 +123,7 @@ class Dex2Oat { public: static bool Create(Dex2Oat** p_dex2oat, Runtime::Options& options, InstructionSet instruction_set, size_t thread_count, bool support_debugging) - SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_) { + SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) { if (!CreateRuntime(options, instruction_set)) { *p_dex2oat = NULL; return false; @@ -139,7 +139,7 @@ class Dex2Oat { // Make a list of descriptors for classes to include in the image const std::set<std::string>* GetImageClassDescriptors(const char* image_classes_filename) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { UniquePtr<std::ifstream> image_classes_file(new std::ifstream(image_classes_filename, std::ifstream::in)); if (image_classes_file.get() == NULL) { LOG(ERROR) << "Failed to open image classes file " << image_classes_filename; @@ -213,7 +213,7 @@ class Dex2Oat { const std::set<std::string>* image_classes, bool dump_stats, bool dump_timings) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // SirtRef and ClassLoader creation needs to come after Runtime::Create jobject class_loader = NULL; if (!boot_image_option.empty()) { @@ -279,7 +279,7 @@ class Dex2Oat { const std::string& oat_filename, const std::string& oat_location, const Compiler& compiler) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { ImageWriter image_writer(image_classes); if (!image_writer.Write(image_filename, image_base, oat_filename, oat_location, compiler)) { LOG(ERROR) << "Failed to create image file " << image_filename; @@ -299,7 +299,7 @@ class Dex2Oat { } static bool CreateRuntime(Runtime::Options& options, InstructionSet instruction_set) - SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_) { + SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) { if (!Runtime::Create(options, false)) { LOG(ERROR) << "Failed to create runtime"; return false; @@ -333,7 +333,7 @@ class Dex2Oat { static void ResolveExceptionsForMethod(MethodHelper* mh, std::set<std::pair<uint16_t, const DexFile*> >& exceptions_to_resolve) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = mh->GetCodeItem(); if (code_item == NULL) { return; // native or abstract method @@ -370,7 +370,7 @@ class Dex2Oat { } static bool ResolveCatchBlockExceptionsClassVisitor(Class* c, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::set<std::pair<uint16_t, const DexFile*> >* exceptions_to_resolve = reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*> >*>(arg); MethodHelper mh; @@ -388,7 +388,7 @@ class Dex2Oat { } static bool RecordImageClassesVisitor(Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::set<std::string>* image_classes = reinterpret_cast<std::set<std::string>*>(arg); if (klass->IsArrayClass() || klass->IsPrimitive()) { return true; diff --git a/src/dex_cache.h b/src/dex_cache.h index b3c5ce6e91..a08c644d7f 100644 --- a/src/dex_cache.h +++ b/src/dex_cache.h @@ -39,11 +39,11 @@ class MANAGED DexCache : public ObjectArray<Object> { ObjectArray<Method>* methods, ObjectArray<Field>* fields, ObjectArray<StaticStorageBase>* initialized_static_storage) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Fixup(Method* trampoline) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Fixup(Method* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - String* GetLocation() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + String* GetLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return Get(kLocation)->AsString(); } @@ -62,49 +62,49 @@ class MANAGED DexCache : public ObjectArray<Object> { kResolvedMethods * sizeof(Object*)); } - size_t NumStrings() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t NumStrings() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetStrings()->GetLength(); } - size_t NumResolvedTypes() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t NumResolvedTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetResolvedTypes()->GetLength(); } - size_t NumResolvedMethods() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t NumResolvedMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetResolvedMethods()->GetLength(); } - size_t NumResolvedFields() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t NumResolvedFields() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetResolvedFields()->GetLength(); } size_t NumInitializedStaticStorage() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetInitializedStaticStorage()->GetLength(); } String* GetResolvedString(uint32_t string_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetStrings()->Get(string_idx); } void SetResolvedString(uint32_t string_idx, String* resolved) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { GetStrings()->Set(string_idx, resolved); } Class* GetResolvedType(uint32_t type_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetResolvedTypes()->Get(type_idx); } void SetResolvedType(uint32_t type_idx, Class* resolved) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { GetResolvedTypes()->Set(type_idx, resolved); } Method* GetResolvedMethod(uint32_t method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method = GetResolvedMethods()->Get(method_idx); // Hide resolution trampoline methods from the caller if (method != NULL && method->GetDexMethodIndex() == DexFile::kDexNoIndex16) { @@ -116,38 +116,38 @@ class MANAGED DexCache : public ObjectArray<Object> { } void SetResolvedMethod(uint32_t method_idx, Method* resolved) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { GetResolvedMethods()->Set(method_idx, resolved); } Field* GetResolvedField(uint32_t field_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetResolvedFields()->Get(field_idx); } void SetResolvedField(uint32_t field_idx, Field* resolved) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { GetResolvedFields()->Set(field_idx, resolved); } ObjectArray<String>* GetStrings() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return static_cast<ObjectArray<String>*>(GetNonNull(kStrings)); } ObjectArray<Class>* GetResolvedTypes() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return static_cast<ObjectArray<Class>*>(GetNonNull(kResolvedTypes)); } ObjectArray<Method>* GetResolvedMethods() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return static_cast<ObjectArray<Method>*>(GetNonNull(kResolvedMethods)); } ObjectArray<Field>* GetResolvedFields() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return static_cast<ObjectArray<Field>*>(GetNonNull(kResolvedFields)); } ObjectArray<StaticStorageBase>* GetInitializedStaticStorage() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return static_cast<ObjectArray<StaticStorageBase>*>(GetNonNull(kInitializedStaticStorage)); } @@ -167,7 +167,7 @@ class MANAGED DexCache : public ObjectArray<Object> { }; Object* GetNonNull(ArrayIndex array_index) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* obj = Get(array_index); DCHECK(obj != NULL); return obj; diff --git a/src/dex_file.h b/src/dex_file.h index 99a748d723..7d39945a10 100644 --- a/src/dex_file.h +++ b/src/dex_file.h @@ -776,7 +776,7 @@ class DexFile { // // This is used by runtime; therefore use art::Method not art::DexFile::Method. int32_t GetLineNumFromPC(const Method* method, uint32_t rel_pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx, DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb, @@ -1125,10 +1125,10 @@ class EncodedStaticFieldValueIterator { public: EncodedStaticFieldValueIterator(const DexFile& dex_file, DexCache* dex_cache, ClassLinker* linker, const DexFile::ClassDef& class_def) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ReadValueToField(Field* field) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasNext() { return pos_ < array_size_; } diff --git a/src/heap.cc b/src/heap.cc index 47473e3c53..d8f55ab7d5 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -273,7 +273,7 @@ class SpaceSorter { }; void Heap::AddSpace(Space* space) { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); DCHECK(space != NULL); DCHECK(space->GetLiveBitmap() != NULL); live_bitmap_->AddSpaceBitmap(space->GetLiveBitmap()); @@ -366,7 +366,7 @@ Object* Heap::AllocObject(Class* c, size_t byte_count) { Object* obj = Allocate(alloc_space_, byte_count); if (LIKELY(obj != NULL)) { #if VERIFY_OBJECT_ENABLED - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); // Verify objects doesn't like objects in allocation stack not being marked as live. live_bitmap_->Set(obj); #endif @@ -426,7 +426,7 @@ bool Heap::IsHeapAddress(const Object* obj) { } bool Heap::IsLiveObjectLocked(const Object* obj) { - GlobalSynchronization::heap_bitmap_lock_->AssertReaderHeld(); + Locks::heap_bitmap_lock_->AssertReaderHeld(); return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj); } @@ -489,7 +489,7 @@ void Heap::VerificationCallback(Object* obj, void* arg) { } void Heap::VerifyHeap() { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); GetLiveBitmap()->Walk(Heap::VerificationCallback, this); } @@ -546,7 +546,7 @@ Object* Heap::Allocate(AllocSpace* space, size_t alloc_size) { // done in the runnable state where suspension is expected. #ifndef NDEBUG { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(self->GetState(), kRunnable); } self->AssertThreadSuspensionIsAllowable(); @@ -660,7 +660,7 @@ int64_t Heap::GetFreeMemory() { class InstanceCounter { public: InstanceCounter(Class* c, bool count_assignable) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_(c), count_assignable_(count_assignable), count_(0) { } @@ -670,12 +670,12 @@ class InstanceCounter { } static void Callback(Object* o, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o); } private: - void VisitInstance(Object* o) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void VisitInstance(Object* o) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* instance_class = o->GetClass(); if (count_assignable_) { if (instance_class == class_) { @@ -694,7 +694,7 @@ class InstanceCounter { }; int64_t Heap::CountInstances(Class* c, bool count_assignable) { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); InstanceCounter counter(c, count_assignable); GetLiveBitmap()->Walk(InstanceCounter::Callback, &counter); return counter.GetCount(); @@ -722,7 +722,7 @@ void Heap::PreZygoteFork() { { // Flush the alloc stack. - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); FlushAllocStack(); } @@ -806,10 +806,10 @@ void Heap::UnMarkStackAsLive(MarkStack* alloc_stack) { } void Heap::CollectGarbageInternal(GcType gc_type, bool clear_soft_references) { - GlobalSynchronization::mutator_lock_->AssertNotHeld(); + Locks::mutator_lock_->AssertNotHeld(); #ifndef NDEBUG { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(Thread::Current()->GetState(), kWaitingPerformingGc); } #endif @@ -868,7 +868,7 @@ void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_reference ThreadList* thread_list = Runtime::Current()->GetThreadList(); thread_list->SuspendAll(); timings.AddSplit("SuspendAll"); - GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Locks::mutator_lock_->AssertExclusiveHeld(); size_t bytes_freed = 0; Object* cleared_references = NULL; @@ -880,7 +880,7 @@ void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_reference // Pre verify the heap if (pre_gc_verify_heap_) { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); VerifyHeapReferences(std::string("Pre ") + gc_type_str.str() + "Gc"); timings.AddSplit("VerifyHeapReferencesPreGC"); } @@ -918,7 +918,7 @@ void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_reference } } - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); if (gc_type == GC_PARTIAL) { // Copy the mark bits over from the live bits, do this as early as possible or else we can // accidentally un-mark roots. @@ -1007,7 +1007,7 @@ void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_reference // Post gc verify the heap if (post_gc_verify_heap_) { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); VerifyHeapReferences(std::string("Post ") + gc_type_str.str() + "Gc"); timings.AddSplit("VerifyHeapReferencesPostGC"); } @@ -1081,8 +1081,8 @@ class ScanVisitor { class VerifyReferenceVisitor { public: VerifyReferenceVisitor(Heap* heap, bool* failed) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, + Locks::heap_bitmap_lock_) : heap_(heap), failed_(failed) { } @@ -1183,8 +1183,7 @@ class VerifyObjectVisitor { } void operator ()(const Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { VerifyReferenceVisitor visitor(heap_, const_cast<bool*>(&failed_)); MarkSweep::VisitObjectReferences(obj, visitor); } @@ -1200,7 +1199,7 @@ class VerifyObjectVisitor { // Must do this with mutators suspended since we are directly accessing the allocation stacks. void Heap::VerifyHeapReferences(const std::string& phase) { - GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Locks::mutator_lock_->AssertExclusiveHeld(); // Lets sort our allocation stacks so that we can efficiently binary search them. std::sort(allocation_stack_->Begin(), allocation_stack_->End()); std::sort(live_stack_->Begin(), live_stack_->End()); @@ -1220,7 +1219,7 @@ void Heap::SwapBitmaps() { // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark bit // instead, resulting in no new allocated objects being incorrectly freed by sweep. - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) { Space* space = *it; // We never allocate into zygote spaces. @@ -1242,7 +1241,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft ThreadList* thread_list = Runtime::Current()->GetThreadList(); thread_list->SuspendAll(); timings.AddSplit("SuspendAll"); - GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Locks::mutator_lock_->AssertExclusiveHeld(); size_t bytes_freed = 0; Object* cleared_references = NULL; @@ -1255,7 +1254,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft // Pre verify the heap if (pre_gc_verify_heap_) { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); VerifyHeapReferences(std::string("Pre ") + gc_type_str.str() + "Gc"); timings.AddSplit("VerifyHeapReferencesPreGC"); } @@ -1294,7 +1293,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft } { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); if (gc_type == GC_PARTIAL) { // Copy the mark bits over from the live bits, do this as early as possible or else we can @@ -1343,11 +1342,11 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft // Allow mutators to go again, acquire share on mutator_lock_ to continue. thread_list->ResumeAll(); { - ReaderMutexLock reader_lock(*GlobalSynchronization::mutator_lock_); + ReaderMutexLock reader_lock(*Locks::mutator_lock_); root_end = NanoTime(); timings.AddSplit("RootEnd"); - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); UpdateAndMarkModUnion(timings, gc_type); if (gc_type != GC_STICKY) { // Recursively mark all the non-image bits set in the mark bitmap. @@ -1361,10 +1360,10 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft dirty_begin = NanoTime(); thread_list->SuspendAll(); timings.AddSplit("ReSuspend"); - GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Locks::mutator_lock_->AssertExclusiveHeld(); { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); // Re-mark root set. mark_sweep.ReMarkRoots(); @@ -1376,7 +1375,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft } { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); mark_sweep.ProcessReferences(clear_soft_references); timings.AddSplit("ProcessReferences"); @@ -1395,7 +1394,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft if (kIsDebugBuild) { // Verify that we only reach marked objects from the image space. - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); mark_sweep.VerifyImageRoots(); timings.AddSplit("VerifyImageRoots"); } @@ -1411,18 +1410,18 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft // If we are going to do post Gc verification, lets keep the mutators paused since we don't // want them to touch dead objects before we find these in verification. if (post_gc_verify_heap_) { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); VerifyHeapReferences(std::string("Post ") + gc_type_str.str() + "Gc"); timings.AddSplit("VerifyHeapReferencesPostGC"); } thread_list->ResumeAll(); dirty_end = NanoTime(); - GlobalSynchronization::mutator_lock_->AssertNotHeld(); + Locks::mutator_lock_->AssertNotHeld(); { // TODO: this lock shouldn't be necessary (it's why we did the bitmap flip above). - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); if (gc_type != GC_STICKY) { mark_sweep.Sweep(gc_type == GC_PARTIAL, swap); } else { diff --git a/src/heap.h b/src/heap.h index 23f2ac3c37..104cbdf84e 100644 --- a/src/heap.h +++ b/src/heap.h @@ -90,7 +90,7 @@ class LOCKABLE Heap { // Allocates and initializes storage for an object instance. Object* AllocObject(Class* klass, size_t num_bytes) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Check sanity of given reference. Requires the heap lock. #if VERIFY_OBJECT_ENABLED @@ -103,8 +103,8 @@ class LOCKABLE Heap { void VerifyHeap(); static void RootMatchesObjectVisitor(const Object* root, void* arg); void VerifyHeapReferences(const std::string& phase) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, // and doesn't abort on error, allowing the caller to report more @@ -114,11 +114,11 @@ class LOCKABLE Heap { // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). // Requires the heap lock to be held. bool IsLiveObjectLocked(const Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Initiates an explicit garbage collection. void CollectGarbage(bool clear_soft_references) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); // Does a concurrent GC, should only be called by the GC daemon thread // through runtime. @@ -133,8 +133,8 @@ class LOCKABLE Heap { // Implements VMDebug.countInstancesOfClass. int64_t CountInstances(Class* c, bool count_assignable) - LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to // implement dalvik.system.VMRuntime.clearGrowthLimit. @@ -244,11 +244,11 @@ class LOCKABLE Heap { void Trim(); - HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { return live_bitmap_.get(); } - HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { return mark_bitmap_.get(); } @@ -256,7 +256,7 @@ class LOCKABLE Heap { // Mark and empty stack. void FlushAllocStack() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Mark all the objects in the allocation stack as live. void MarkStackAsLive(MarkStack* alloc_stack); @@ -269,7 +269,7 @@ class LOCKABLE Heap { // Update and mark mod union table based on gc type. void UpdateAndMarkModUnion(TimingLogger& timings, GcType gc_type) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added. // Assumes there is only one image space. @@ -280,8 +280,8 @@ class LOCKABLE Heap { private: // Allocates uninitialized storage. Object* Allocate(AllocSpace* space, size_t num_bytes) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Pushes a list of cleared references out to the managed heap. void EnqueueClearedReferences(Object** cleared_references); @@ -290,19 +290,19 @@ class LOCKABLE Heap { void RequestConcurrentGC(); void RecordAllocation(AllocSpace* space, const Object* object) - LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_); + LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); void CollectGarbageInternal(GcType gc_plan, bool clear_soft_references) LOCKS_EXCLUDED(gc_complete_lock_, - GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + Locks::heap_bitmap_lock_, + Locks::mutator_lock_, + Locks::thread_suspend_count_lock_); void CollectGarbageMarkSweepPlan(GcType gc_plan, bool clear_soft_references) - LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_); void CollectGarbageConcurrentMarkSweepPlan(GcType gc_plan, bool clear_soft_references) - LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_); // Given the current contents of the alloc space, increase the allowed heap footprint to match // the target utilization ratio. This should only be called immediately after a full garbage @@ -311,7 +311,7 @@ class LOCKABLE Heap { size_t GetPercentFree(); - void AddSpace(Space* space) LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_); + void AddSpace(Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); // No thread saftey analysis since we call this everywhere and it is impossible to find a proper // lock ordering for it. @@ -376,8 +376,8 @@ class LOCKABLE Heap { // Last trim time uint64_t last_trim_time_; - UniquePtr<HeapBitmap> live_bitmap_ GUARDED_BY(GlobalSynchronization::heap_bitmap_lock_); - UniquePtr<HeapBitmap> mark_bitmap_ GUARDED_BY(GlobalSynchronization::heap_bitmap_lock_); + UniquePtr<HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); + UniquePtr<HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); // True while the garbage collector is trying to signal the GC daemon thread. // This flag is needed to prevent recursion from occurring when the JNI calls diff --git a/src/heap_bitmap.h b/src/heap_bitmap.h index d202ae3591..50ecc7a9ee 100644 --- a/src/heap_bitmap.h +++ b/src/heap_bitmap.h @@ -26,14 +26,14 @@ namespace art { class HeapBitmap { public: bool Test(const Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { SpaceBitmap* bitmap = GetSpaceBitmap(obj); DCHECK(bitmap != NULL); return bitmap->Test(obj); } void Clear(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { SpaceBitmap* bitmap = GetSpaceBitmap(obj); DCHECK(bitmap != NULL) << "tried to clear object " @@ -43,7 +43,7 @@ namespace art { } void Set(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { SpaceBitmap* bitmap = GetSpaceBitmap(obj); DCHECK(bitmap != NULL) << "tried to mark object " @@ -63,7 +63,7 @@ namespace art { } void Walk(SpaceBitmap::Callback* callback, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { // TODO: C++0x auto for (Bitmaps::iterator it = bitmaps_.begin(); it!= bitmaps_.end(); ++it) { (*it)->Walk(callback, arg); @@ -72,7 +72,7 @@ namespace art { template <typename Visitor> void Visit(const Visitor& visitor) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { // TODO: C++0x auto for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) { SpaceBitmap* bitmap = *it; @@ -83,7 +83,7 @@ namespace art { // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC. void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); HeapBitmap(Heap* heap) : heap_(heap) { diff --git a/src/hprof/hprof.cc b/src/hprof/hprof.cc index 356c1fb4f6..0cad70968b 100644 --- a/src/hprof/hprof.cc +++ b/src/hprof/hprof.cc @@ -402,17 +402,17 @@ class Hprof { } void Dump() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) - LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) { // Walk the roots and the heap. current_record_.StartNewRecord(body_fp_, HPROF_TAG_HEAP_DUMP_SEGMENT, HPROF_TIME); Runtime::Current()->VisitRoots(RootVisitor, this); { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); Runtime::Current()->GetHeap()->FlushAllocStack(); } { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); Runtime::Current()->GetHeap()->GetLiveBitmap()->Walk(HeapBitmapCallback, this); } current_record_.StartNewRecord(body_fp_, HPROF_TAG_HEAP_DUMP_END, HPROF_TIME); @@ -474,28 +474,28 @@ class Hprof { private: static void RootVisitor(const Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(arg != NULL); Hprof* hprof = reinterpret_cast<Hprof*>(arg); hprof->VisitRoot(obj); } static void HeapBitmapCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(obj != NULL); CHECK(arg != NULL); Hprof* hprof = reinterpret_cast<Hprof*>(arg); hprof->DumpHeapObject(obj); } - void VisitRoot(const Object* obj) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void VisitRoot(const Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - int DumpHeapObject(Object* obj) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + int DumpHeapObject(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Finish() { } - int WriteClassTable() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + int WriteClassTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { HprofRecord* rec = ¤t_record_; uint32_t nextSerialNumber = 1; @@ -563,7 +563,7 @@ class Hprof { int MarkRootObject(const Object* obj, jobject jniObj); HprofClassObjectId LookupClassId(Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (c == NULL) { // c is the superclass of java.lang.Object or a primitive return (HprofClassObjectId)0; @@ -598,7 +598,7 @@ class Hprof { } HprofStringId LookupClassNameId(const Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return LookupStringId(PrettyDescriptor(c)); } diff --git a/src/image.h b/src/image.h index 253b762e54..399b1439d2 100644 --- a/src/image.h +++ b/src/image.h @@ -95,7 +95,7 @@ class PACKED ImageHeader { }; Object* GetImageRoot(ImageRoot image_root) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetImageRoots()->Get(image_root); } diff --git a/src/image_writer.cc b/src/image_writer.cc index 0932c307f7..d91fc59905 100644 --- a/src/image_writer.cc +++ b/src/image_writer.cc @@ -189,10 +189,10 @@ void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) { } void ImageWriter::ComputeEagerResolvedStrings() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // TODO: Check image spaces only? Heap* heap = Runtime::Current()->GetHeap(); - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); heap->GetLiveBitmap()->Walk(ComputeEagerResolvedStringsCallback, this); } @@ -270,18 +270,18 @@ bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) { } void ImageWriter::CheckNonImageClassesRemoved() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (image_classes_ == NULL) { return; } Heap* heap = Runtime::Current()->GetHeap(); { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); } - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); heap->GetLiveBitmap()->Walk(CheckNonImageClassesRemovedCallback, this); } @@ -394,7 +394,7 @@ void ImageWriter::CalculateNewObjectOffsets() { { Heap* heap = Runtime::Current()->GetHeap(); - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); } @@ -420,12 +420,12 @@ void ImageWriter::CalculateNewObjectOffsets() { } void ImageWriter::CopyAndFixupObjects() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Heap* heap = Runtime::Current()->GetHeap(); // TODO: heap validation can't handle this fix up pass heap->DisableObjectValidation(); // TODO: Image spaces only? - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); heap->GetLiveBitmap()->Walk(CopyAndFixupObjectsCallback, this); } @@ -604,7 +604,7 @@ void ImageWriter::FixupFields(const Object* orig, } static Method* GetReferrerMethod(const Compiler::PatchInformation* patch) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedObjectAccessUnchecked soa(Thread::Current()); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); DexCache* dex_cache = class_linker->FindDexCache(patch->GetDexFile()); @@ -626,7 +626,7 @@ static Method* GetReferrerMethod(const Compiler::PatchInformation* patch) } static Method* GetTargetMethod(const Compiler::PatchInformation* patch) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); DexCache* dex_cache = class_linker->FindDexCache(patch->GetDexFile()); Method* method = class_linker->ResolveMethod(patch->GetDexFile(), diff --git a/src/image_writer.h b/src/image_writer.h index f768d8707d..8e8d106c68 100644 --- a/src/image_writer.h +++ b/src/image_writer.h @@ -49,14 +49,14 @@ class ImageWriter { const std::string& oat_filename, const std::string& oat_location, const Compiler& compiler) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); private: bool AllocMemory(); // we use the lock word to store the offset of the object in the image void AssignImageOffset(Object* object) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(object != NULL); SetImageOffset(object, image_end_); image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment @@ -108,55 +108,55 @@ class ImageWriter { return oat_begin_ + offset; } - bool IsImageClass(const Class* klass) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsImageClass(const Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpImageClasses(); void ComputeLazyFieldsForImageClasses() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool ComputeLazyFieldsForClassesVisitor(Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Wire dex cache resolved strings to strings in the image to avoid runtime resolution void ComputeEagerResolvedStrings(); static void ComputeEagerResolvedStringsCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool NonImageClassesVisitor(Class* c, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CheckNonImageClassesRemoved(); static void CheckNonImageClassesRemovedCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CalculateNewObjectOffsets() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void CalculateNewObjectOffsets() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ObjectArray<Object>* CreateImageRoots() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void CalculateNewObjectOffsetsCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CopyAndFixupObjects(); static void CopyAndFixupObjectsCallback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupClass(const Class* orig, Class* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupMethod(const Method* orig, Method* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupObject(const Object* orig, Object* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupObjectArray(const ObjectArray<Object>* orig, ObjectArray<Object>* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupInstanceFields(const Object* orig, Object* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupStaticFields(const Class* orig, Class* copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupFields(const Object* orig, Object* copy, uint32_t ref_offsets, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PatchOatCodeAndMethods(const Compiler& compiler) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetPatchLocation(const Compiler::PatchInformation* patch, uint32_t value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); SafeMap<const Object*, size_t> offsets_; diff --git a/src/indirect_reference_table.h b/src/indirect_reference_table.h index c3e17b0f89..48bd35b1fc 100644 --- a/src/indirect_reference_table.h +++ b/src/indirect_reference_table.h @@ -258,7 +258,7 @@ class IndirectReferenceTable { * failed during expansion). */ IndirectRef Add(uint32_t cookie, const Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Given an IndirectRef in the table, return the Object it refers to. @@ -288,7 +288,7 @@ class IndirectReferenceTable { void AssertEmpty(); - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Return the #of entries in the entire table. This includes holes, and diff --git a/src/intern_table.h b/src/intern_table.h index 0d9e0971ab..6f5677385f 100644 --- a/src/intern_table.h +++ b/src/intern_table.h @@ -42,26 +42,26 @@ class InternTable { // Interns a potentially new string in the 'strong' table. (See above.) String* InternStrong(int32_t utf16_length, const char* utf8_data) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Interns a potentially new string in the 'strong' table. (See above.) String* InternStrong(const char* utf8_data) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Interns a potentially new string in the 'strong' table. (See above.) - String* InternStrong(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + String* InternStrong(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Interns a potentially new string in the 'weak' table. (See above.) - String* InternWeak(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + String* InternWeak(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Register a String trusting that it is safe to intern. // Used when reinitializing InternTable from an image. - void RegisterStrong(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void RegisterStrong(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SweepInternTableWeaks(Heap::IsMarkedTester is_marked, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - bool ContainsWeak(String* s) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool ContainsWeak(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t Size() const; @@ -73,10 +73,10 @@ class InternTable { typedef std::multimap<int32_t, String*> Table; String* Insert(String* s, bool is_strong) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); String* Lookup(Table& table, String* s, uint32_t hash_code) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); String* Insert(Table& table, String* s, uint32_t hash_code); void Remove(Table& table, const String* s, uint32_t hash_code); diff --git a/src/intern_table_test.cc b/src/intern_table_test.cc index 0d46076382..15fa259d20 100644 --- a/src/intern_table_test.cc +++ b/src/intern_table_test.cc @@ -102,7 +102,7 @@ TEST_F(InternTableTest, SweepInternTableWeaks) { p.Expect(s0.get()); p.Expect(s1.get()); { - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); t.SweepInternTableWeaks(IsMarked, &p); } diff --git a/src/jdwp/jdwp.h b/src/jdwp/jdwp.h index 725e857aea..54e5cc75e2 100644 --- a/src/jdwp/jdwp.h +++ b/src/jdwp/jdwp.h @@ -79,7 +79,7 @@ struct JdwpLocation { uint64_t dex_pc; }; std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs); bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs); @@ -120,7 +120,7 @@ struct JdwpState { * Returns a newly-allocated JdwpState struct on success, or NULL on failure. */ static JdwpState* Create(const JdwpOptions* options) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); ~JdwpState(); @@ -180,7 +180,7 @@ struct JdwpState { * The VM has finished initializing. Only called when the debugger is * connected at the time initialization completes. */ - bool PostVMStart() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool PostVMStart() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * A location of interest has been reached. This is used for breakpoints, @@ -193,7 +193,7 @@ struct JdwpState { * "eventFlags" indicates the types of events that have occurred. */ bool PostLocationEvent(const JdwpLocation* pLoc, ObjectId thisPtr, int eventFlags) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * An exception has been thrown. @@ -202,20 +202,20 @@ struct JdwpState { */ bool PostException(const JdwpLocation* pThrowLoc, ObjectId excepId, RefTypeId excepClassId, const JdwpLocation* pCatchLoc, ObjectId thisPtr) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * A thread has started or stopped. */ bool PostThreadChange(ObjectId threadId, bool start) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Class has been prepared. */ bool PostClassPrepare(JdwpTypeTag tag, RefTypeId refTypeId, const std::string& signature, int status) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * The VM is about to stop. @@ -223,13 +223,13 @@ struct JdwpState { bool PostVMDeath(); // Called if/when we realize we're talking to DDMS. - void NotifyDdmsActive() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void NotifyDdmsActive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Send up a chunk of DDM data. */ void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Process a request from the debugger. @@ -248,15 +248,15 @@ struct JdwpState { void ResetState() LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* atomic ops to get next serial number */ uint32_t NextRequestSerial(); uint32_t NextEventSerial(); void Run() - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_suspend_count_lock_); /* * Register an event by adding it to the event list. @@ -266,45 +266,45 @@ struct JdwpState { */ JdwpError RegisterEvent(JdwpEvent* pEvent) LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Unregister an event, given the requestId. */ void UnregisterEventById(uint32_t requestId) LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Unregister all events. */ void UnregisterAll() LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: explicit JdwpState(const JdwpOptions* options); bool InvokeInProgress(); bool IsConnected(); void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_); void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy, ObjectId threadId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CleanupMatchList(JdwpEvent** match_list, int match_count) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void EventFinish(ExpandBuf* pReq); void FindMatchingEvents(JdwpEventKind eventKind, ModBasket* basket, JdwpEvent** match_list, int* pMatchCount) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void UnregisterEvent(JdwpEvent* pEvent) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); public: // TODO: fix privacy const JdwpOptions* options_; diff --git a/src/jdwp/jdwp_event.cc b/src/jdwp/jdwp_event.cc index 0eb2930f01..4f11a65331 100644 --- a/src/jdwp/jdwp_event.cc +++ b/src/jdwp/jdwp_event.cc @@ -375,7 +375,7 @@ static bool PatternMatch(const char* pattern, const std::string& target) { * need to do this even if later mods cause us to ignore the event. */ static bool ModsMatch(JdwpEvent* pEvent, ModBasket* basket) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JdwpEventMod* pMod = pEvent->mods; for (int i = pEvent->modCount; i > 0; i--, pMod++) { diff --git a/src/jdwp/jdwp_handler.cc b/src/jdwp/jdwp_handler.cc index fbe9192ee2..07e47b5c68 100644 --- a/src/jdwp/jdwp_handler.cc +++ b/src/jdwp/jdwp_handler.cc @@ -92,7 +92,7 @@ static void JdwpWriteValue(ExpandBuf* pReply, int width, uint64_t value) { static JdwpError FinishInvoke(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, ObjectId thread_id, ObjectId object_id, RefTypeId class_id, MethodId method_id, bool is_constructor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(!is_constructor || object_id != 0); uint32_t arg_count = Read4BE(&buf); @@ -161,7 +161,7 @@ static JdwpError FinishInvoke(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Request for version info. */ static JdwpError VM_Version(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { /* text information on runtime version */ std::string version(StringPrintf("Android Runtime %s", Runtime::Current()->GetVersion())); expandBufAddUtf8String(pReply, version); @@ -182,7 +182,7 @@ static JdwpError VM_Version(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) * been loaded by multiple class loaders. */ static JdwpError VM_ClassesBySignature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string classDescriptor(ReadNewUtf8String(&buf)); VLOG(jdwp) << " Req for class by signature '" << classDescriptor << "'"; @@ -215,7 +215,7 @@ static JdwpError VM_ClassesBySignature(JdwpState*, const uint8_t* buf, int, Expa * to be suspended, and that violates some JDWP expectations. */ static JdwpError VM_AllThreads(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::vector<ObjectId> thread_ids; Dbg::GetThreads(0, thread_ids); @@ -231,7 +231,7 @@ static JdwpError VM_AllThreads(JdwpState*, const uint8_t*, int, ExpandBuf* pRepl * List all thread groups that do not have a parent. */ static JdwpError VM_TopLevelThreadGroups(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { /* * TODO: maintain a list of parentless thread groups in the VM. * @@ -254,7 +254,7 @@ static JdwpError VM_TopLevelThreadGroups(JdwpState*, const uint8_t*, int, Expand * All IDs are 8 bytes. */ static JdwpError VM_IDSizes(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { expandBufAdd4BE(pReply, sizeof(FieldId)); expandBufAdd4BE(pReply, sizeof(MethodId)); expandBufAdd4BE(pReply, sizeof(ObjectId)); @@ -264,7 +264,7 @@ static JdwpError VM_IDSizes(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) } static JdwpError VM_Dispose(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::Disposed(); return ERR_NONE; } @@ -276,7 +276,7 @@ static JdwpError VM_Dispose(JdwpState*, const uint8_t*, int, ExpandBuf*) * This needs to increment the "suspend count" on all threads. */ static JdwpError VM_Suspend(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::SuspendVM(); return ERR_NONE; } @@ -285,7 +285,7 @@ static JdwpError VM_Suspend(JdwpState*, const uint8_t*, int, ExpandBuf*) * Resume execution. Decrements the "suspend count" of all threads. */ static JdwpError VM_Resume(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::ResumeVM(); return ERR_NONE; } @@ -294,7 +294,7 @@ static JdwpError VM_Resume(JdwpState*, const uint8_t*, int, ExpandBuf*) * The debugger wants the entire VM to exit. */ static JdwpError VM_Exit(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t exitCode = Get4BE(buf); LOG(WARNING) << "Debugger is telling the VM to exit with code=" << exitCode; @@ -310,7 +310,7 @@ static JdwpError VM_Exit(JdwpState*, const uint8_t* buf, int, ExpandBuf*) * string "java.util.Arrays".) */ static JdwpError VM_CreateString(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string str(ReadNewUtf8String(&buf)); VLOG(jdwp) << " Req to create string '" << str << "'"; ObjectId stringId = Dbg::CreateString(str); @@ -325,7 +325,7 @@ static JdwpError VM_CreateString(JdwpState*, const uint8_t* buf, int, ExpandBuf* * Tell the debugger what we are capable of. */ static JdwpError VM_Capabilities(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { expandBufAdd1(pReply, false); /* canWatchFieldModification */ expandBufAdd1(pReply, false); /* canWatchFieldAccess */ expandBufAdd1(pReply, false); /* canGetBytecodes */ @@ -337,7 +337,7 @@ static JdwpError VM_Capabilities(JdwpState*, const uint8_t*, int, ExpandBuf* pRe } static JdwpError VM_ClassPaths(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { expandBufAddUtf8String(pReply, "/"); std::vector<std::string> class_path; @@ -363,7 +363,7 @@ static JdwpError VM_ClassPaths(JdwpState*, const uint8_t*, int, ExpandBuf* pRepl * Currently does nothing. */ static JdwpError VM_DisposeObjects(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ERR_NONE; } @@ -371,7 +371,7 @@ static JdwpError VM_DisposeObjects(JdwpState*, const uint8_t*, int, ExpandBuf*) * Tell the debugger what we are capable of. */ static JdwpError VM_CapabilitiesNew(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { expandBufAdd1(pReply, false); /* canWatchFieldModification */ expandBufAdd1(pReply, false); /* canWatchFieldAccess */ expandBufAdd1(pReply, false); /* canGetBytecodes */ @@ -402,7 +402,7 @@ static JdwpError VM_CapabilitiesNew(JdwpState*, const uint8_t*, int, ExpandBuf* } static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::vector<JDWP::RefTypeId> classes; Dbg::GetClassList(classes); @@ -433,17 +433,17 @@ static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status } static JdwpError VM_AllClasses(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return VM_AllClassesImpl(pReply, true, false); } static JdwpError VM_AllClassesWithGeneric(JdwpState*, const uint8_t*, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return VM_AllClassesImpl(pReply, true, true); } static JdwpError RT_Modifiers(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); return Dbg::GetModifiers(refTypeId, pReply); } @@ -452,7 +452,7 @@ static JdwpError RT_Modifiers(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Get values from static fields in a reference type. */ static JdwpError RT_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); uint32_t field_count = Read4BE(&buf); expandBufAdd4BE(pReply, field_count); @@ -470,7 +470,7 @@ static JdwpError RT_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Get the name of the source file in which a reference type was declared. */ static JdwpError RT_SourceFile(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); std::string source_file; JdwpError status = Dbg::GetSourceFile(refTypeId, source_file); @@ -485,7 +485,7 @@ static JdwpError RT_SourceFile(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * Return the current status of the reference type. */ static JdwpError RT_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); JDWP::JdwpTypeTag type_tag; uint32_t class_status; @@ -501,7 +501,7 @@ static JdwpError RT_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * Return interfaces implemented directly by this class. */ static JdwpError RT_Interfaces(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << StringPrintf(" Req for interfaces in %#llx (%s)", refTypeId, Dbg::GetClassName(refTypeId).c_str()); return Dbg::OutputDeclaredInterfaces(refTypeId, pReply); @@ -511,7 +511,7 @@ static JdwpError RT_Interfaces(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * Return the class object corresponding to this type. */ static JdwpError RT_ClassObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); ObjectId classObjectId; JdwpError status = Dbg::GetClassObject(refTypeId, classObjectId); @@ -529,14 +529,14 @@ static JdwpError RT_ClassObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* * JDB seems interested, but DEX files don't currently support this. */ static JdwpError RT_SourceDebugExtension(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { /* referenceTypeId in, string out */ return ERR_ABSENT_INFORMATION; } static JdwpError RT_Signature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, bool with_generic) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << StringPrintf(" Req for signature of refTypeId=%#llx", refTypeId); @@ -554,13 +554,13 @@ static JdwpError RT_Signature(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR } static JdwpError RT_Signature(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return RT_Signature(state, buf, dataLen, pReply, false); } static JdwpError RT_SignatureWithGeneric(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return RT_Signature(state, buf, dataLen, pReply, true); } @@ -569,13 +569,13 @@ static JdwpError RT_SignatureWithGeneric(JdwpState* state, const uint8_t* buf, i * reference type, or null if it was loaded by the system loader. */ static JdwpError RT_ClassLoader(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); return Dbg::GetClassLoader(refTypeId, pReply); } static std::string Describe(const RefTypeId& refTypeId) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string signature("unknown"); Dbg::GetSignature(refTypeId, signature); return StringPrintf("refTypeId=%#llx (%s)", refTypeId, signature.c_str()); @@ -586,7 +586,7 @@ static std::string Describe(const RefTypeId& refTypeId) * fields declared by a class. */ static JdwpError RT_FieldsWithGeneric(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for fields in " << Describe(refTypeId); return Dbg::OutputDeclaredFields(refTypeId, true, pReply); @@ -594,7 +594,7 @@ static JdwpError RT_FieldsWithGeneric(JdwpState*, const uint8_t* buf, int, Expan // Obsolete equivalent of FieldsWithGeneric, without the generic type information. static JdwpError RT_Fields(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for fields in " << Describe(refTypeId); return Dbg::OutputDeclaredFields(refTypeId, false, pReply); @@ -605,7 +605,7 @@ static JdwpError RT_Fields(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * methods declared by a class. */ static JdwpError RT_MethodsWithGeneric(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for methods in " << Describe(refTypeId); return Dbg::OutputDeclaredMethods(refTypeId, true, pReply); @@ -613,7 +613,7 @@ static JdwpError RT_MethodsWithGeneric(JdwpState*, const uint8_t* buf, int, Expa // Obsolete equivalent of MethodsWithGeneric, without the generic type information. static JdwpError RT_Methods(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); VLOG(jdwp) << " Req for methods in " << Describe(refTypeId); return Dbg::OutputDeclaredMethods(refTypeId, false, pReply); @@ -623,7 +623,7 @@ static JdwpError RT_Methods(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRep * Return the immediate superclass of a class. */ static JdwpError CT_Superclass(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); RefTypeId superClassId; JdwpError status = Dbg::GetSuperclass(class_id, superClassId); @@ -638,7 +638,7 @@ static JdwpError CT_Superclass(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * Set static class values. */ static JdwpError CT_SetValues(JdwpState* , const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); uint32_t values = Read4BE(&buf); @@ -668,7 +668,7 @@ static JdwpError CT_SetValues(JdwpState* , const uint8_t* buf, int, ExpandBuf*) */ static JdwpError CT_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); ObjectId thread_id = ReadObjectId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -685,7 +685,7 @@ static JdwpError CT_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataL */ static JdwpError CT_NewInstance(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); ObjectId thread_id = ReadObjectId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -706,7 +706,7 @@ static JdwpError CT_NewInstance(JdwpState* state, const uint8_t* buf, int dataLe * Create a new array object of the requested type and length. */ static JdwpError AT_newInstance(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId arrayTypeId = ReadRefTypeId(&buf); uint32_t length = Read4BE(&buf); @@ -728,7 +728,7 @@ static JdwpError AT_newInstance(JdwpState*, const uint8_t* buf, int, ExpandBuf* * Return line number information for the method, if present. */ static JdwpError M_LineTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId refTypeId = ReadRefTypeId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -741,7 +741,7 @@ static JdwpError M_LineTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRe static JdwpError M_VariableTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply, bool generic) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId class_id = ReadRefTypeId(&buf); MethodId method_id = ReadMethodId(&buf); @@ -759,13 +759,13 @@ static JdwpError M_VariableTable(JdwpState*, const uint8_t* buf, int, ExpandBuf* static JdwpError M_VariableTable(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return M_VariableTable(state, buf, dataLen, pReply, false); } static JdwpError M_VariableTableWithGeneric(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return M_VariableTable(state, buf, dataLen, pReply, true); } @@ -777,7 +777,7 @@ static JdwpError M_VariableTableWithGeneric(JdwpState* state, const uint8_t* buf * passed in here. */ static JdwpError OR_ReferenceType(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for type of object_id=%#llx", object_id); return Dbg::GetReferenceType(object_id, pReply); @@ -787,7 +787,7 @@ static JdwpError OR_ReferenceType(JdwpState*, const uint8_t* buf, int, ExpandBuf * Get values from the fields of an object. */ static JdwpError OR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); uint32_t field_count = Read4BE(&buf); @@ -810,7 +810,7 @@ static JdwpError OR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Set values in the fields of an object. */ static JdwpError OR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); uint32_t field_count = Read4BE(&buf); @@ -846,7 +846,7 @@ static JdwpError OR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) */ static JdwpError OR_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId object_id = ReadObjectId(&buf); ObjectId thread_id = ReadObjectId(&buf); RefTypeId class_id = ReadRefTypeId(&buf); @@ -859,7 +859,7 @@ static JdwpError OR_InvokeMethod(JdwpState* state, const uint8_t* buf, int dataL * Disable garbage collection of the specified object. */ static JdwpError OR_DisableCollection(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // this is currently a no-op return ERR_NONE; } @@ -868,7 +868,7 @@ static JdwpError OR_DisableCollection(JdwpState*, const uint8_t*, int, ExpandBuf * Enable garbage collection of the specified object. */ static JdwpError OR_EnableCollection(JdwpState*, const uint8_t*, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // this is currently a no-op return ERR_NONE; } @@ -877,7 +877,7 @@ static JdwpError OR_EnableCollection(JdwpState*, const uint8_t*, int, ExpandBuf* * Determine whether an object has been garbage collected. */ static JdwpError OR_IsCollected(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId object_id; object_id = ReadObjectId(&buf); @@ -893,7 +893,7 @@ static JdwpError OR_IsCollected(JdwpState*, const uint8_t* buf, int, ExpandBuf* * Return the string value in a string object. */ static JdwpError SR_Value(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId stringObject = ReadObjectId(&buf); std::string str(Dbg::StringToUtf8(stringObject)); @@ -908,7 +908,7 @@ static JdwpError SR_Value(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply * Return a thread's name. */ static JdwpError TR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for name of thread %#llx", thread_id); @@ -929,7 +929,7 @@ static JdwpError TR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) * resume it; only the JDI is allowed to resume it. */ static JdwpError TR_Suspend(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); if (thread_id == Dbg::GetThreadSelfId()) { @@ -948,7 +948,7 @@ static JdwpError TR_Suspend(JdwpState*, const uint8_t* buf, int, ExpandBuf*) * Resume the specified thread. */ static JdwpError TR_Resume(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); if (thread_id == Dbg::GetThreadSelfId()) { @@ -964,7 +964,7 @@ static JdwpError TR_Resume(JdwpState*, const uint8_t* buf, int, ExpandBuf*) * Return status of specified thread. */ static JdwpError TR_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for status of thread %#llx", thread_id); @@ -987,7 +987,7 @@ static JdwpError TR_Status(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * Return the thread group that the specified thread is a member of. */ static JdwpError TR_ThreadGroup(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); return Dbg::GetThreadGroup(thread_id, pReply); } @@ -999,7 +999,7 @@ static JdwpError TR_ThreadGroup(JdwpState*, const uint8_t* buf, int, ExpandBuf* * be THREAD_NOT_SUSPENDED. */ static JdwpError TR_Frames(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); uint32_t start_frame = Read4BE(&buf); uint32_t length = Read4BE(&buf); @@ -1036,7 +1036,7 @@ static JdwpError TR_Frames(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * Returns the #of frames on the specified thread, which must be suspended. */ static JdwpError TR_FrameCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); if (!Dbg::ThreadExists(thread_id)) { @@ -1060,7 +1060,7 @@ static JdwpError TR_FrameCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * Get the monitor that the thread is waiting on. */ static JdwpError TR_CurrentContendedMonitor(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ReadObjectId(&buf); // thread_id // TODO: create an Object to represent the monitor (we're currently @@ -1076,7 +1076,7 @@ static JdwpError TR_CurrentContendedMonitor(JdwpState*, const uint8_t* buf, int, * its suspend count recently.) */ static JdwpError TR_DebugSuspendCount(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); return Dbg::GetThreadDebugSuspendCount(thread_id, pReply); } @@ -1087,7 +1087,7 @@ static JdwpError TR_DebugSuspendCount(JdwpState*, const uint8_t* buf, int, Expan * The Eclipse debugger recognizes "main" and "system" as special. */ static JdwpError TGR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_group_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for name of thread_group_id=%#llx", thread_group_id); @@ -1101,7 +1101,7 @@ static JdwpError TGR_Name(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply * thread group. */ static JdwpError TGR_Parent(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_group_id = ReadObjectId(&buf); ObjectId parentGroup = Dbg::GetThreadGroupParent(thread_group_id); @@ -1115,7 +1115,7 @@ static JdwpError TGR_Parent(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRep * specified thread group. */ static JdwpError TGR_Children(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_group_id = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for threads in thread_group_id=%#llx", thread_group_id); @@ -1140,7 +1140,7 @@ static JdwpError TGR_Children(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Return the #of components in the array. */ static JdwpError AR_Length(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId arrayId = ReadObjectId(&buf); VLOG(jdwp) << StringPrintf(" Req for length of array %#llx", arrayId); @@ -1160,7 +1160,7 @@ static JdwpError AR_Length(JdwpState*, const uint8_t* buf, int, ExpandBuf* pRepl * Return the values from an array. */ static JdwpError AR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId arrayId = ReadObjectId(&buf); uint32_t firstIndex = Read4BE(&buf); uint32_t length = Read4BE(&buf); @@ -1173,7 +1173,7 @@ static JdwpError AR_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Set values in an array. */ static JdwpError AR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId arrayId = ReadObjectId(&buf); uint32_t firstIndex = Read4BE(&buf); uint32_t values = Read4BE(&buf); @@ -1185,7 +1185,7 @@ static JdwpError AR_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) } static JdwpError CLR_VisibleClasses(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ReadObjectId(&buf); // classLoaderObject // TODO: we should only return classes which have the given class loader as a defining or // initiating loader. The former would be easy; the latter is hard, because we don't have @@ -1199,7 +1199,7 @@ static JdwpError CLR_VisibleClasses(JdwpState*, const uint8_t* buf, int, ExpandB * Reply with a requestID. */ static JdwpError ER_Set(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const uint8_t* origBuf = buf; uint8_t eventKind = Read1(&buf); @@ -1369,7 +1369,7 @@ static JdwpError ER_Set(JdwpState* state, const uint8_t* buf, int dataLen, Expan * and does not return an error. */ static JdwpError ER_Clear(JdwpState* state, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint8_t eventKind; eventKind = Read1(&buf); uint32_t requestId = Read4BE(&buf); @@ -1385,7 +1385,7 @@ static JdwpError ER_Clear(JdwpState* state, const uint8_t* buf, int, ExpandBuf*) * Return the values of arguments and local variables. */ static JdwpError SF_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); FrameId frame_id = ReadFrameId(&buf); uint32_t slots = Read4BE(&buf); @@ -1411,7 +1411,7 @@ static JdwpError SF_GetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf* pR * Set the values of arguments and local variables. */ static JdwpError SF_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); FrameId frame_id = ReadFrameId(&buf); uint32_t slots = Read4BE(&buf); @@ -1435,7 +1435,7 @@ static JdwpError SF_SetValues(JdwpState*, const uint8_t* buf, int, ExpandBuf*) * Returns the value of "this" for the specified frame. */ static JdwpError SF_ThisObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectId thread_id = ReadObjectId(&buf); FrameId frame_id = ReadFrameId(&buf); @@ -1467,7 +1467,7 @@ static JdwpError SF_ThisObject(JdwpState*, const uint8_t* buf, int, ExpandBuf* p * that, or I have no idea what this is for.) */ static JdwpError COR_ReflectedType(JdwpState*, const uint8_t* buf, int, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { RefTypeId classObjectId = ReadRefTypeId(&buf); VLOG(jdwp) << StringPrintf(" Req for refTypeId for class=%#llx (%s)", classObjectId, Dbg::GetClassName(classObjectId).c_str()); @@ -1478,7 +1478,7 @@ static JdwpError COR_ReflectedType(JdwpState*, const uint8_t* buf, int, ExpandBu * Handle a DDM packet with a single chunk in it. */ static JdwpError DDM_Chunk(JdwpState* state, const uint8_t* buf, int dataLen, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint8_t* replyBuf = NULL; int replyLen = -1; diff --git a/src/jdwp/jdwp_main.cc b/src/jdwp/jdwp_main.cc index 4b442db51f..4fec005005 100644 --- a/src/jdwp/jdwp_main.cc +++ b/src/jdwp/jdwp_main.cc @@ -118,7 +118,7 @@ JdwpState::JdwpState(const JdwpOptions* options) * the thread is accepting network connections. */ JdwpState* JdwpState::Create(const JdwpOptions* options) { - GlobalSynchronization::mutator_lock_->AssertNotHeld(); + Locks::mutator_lock_->AssertNotHeld(); UniquePtr<JdwpState> state(new JdwpState(options)); switch (options->transport) { case kJdwpTransportSocket: @@ -301,7 +301,7 @@ void JdwpState::Run() { /* set the thread state to kWaitingInMainDebuggerLoop so GCs don't wait for us */ { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(thread_->GetState(), kNative); thread_->SetState(kWaitingInMainDebuggerLoop); } @@ -346,7 +346,7 @@ void JdwpState::Run() { while (!Dbg::IsDisposed()) { { // sanity check -- shouldn't happen? - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(thread_->GetState(), kWaitingInMainDebuggerLoop); } @@ -401,7 +401,7 @@ void JdwpState::Run() { /* back to native, for thread shutdown */ { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(thread_->GetState(), kWaitingInMainDebuggerLoop); thread_->SetState(kNative); } diff --git a/src/jni_compiler_test.cc b/src/jni_compiler_test.cc index 72b5848fc3..e0320b35dd 100644 --- a/src/jni_compiler_test.cc +++ b/src/jni_compiler_test.cc @@ -116,9 +116,9 @@ void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) { // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); - GlobalSynchronization::mutator_lock_->AssertNotHeld(); + Locks::mutator_lock_->AssertNotHeld(); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); EXPECT_TRUE(thisObj != NULL); @@ -170,7 +170,7 @@ jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) { // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -198,7 +198,7 @@ jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) { // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -227,7 +227,7 @@ jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y) // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -257,7 +257,7 @@ jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdoub // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -288,7 +288,7 @@ jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv* env, jobject thisObj, jlong // 1 = thisObj EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -316,7 +316,7 @@ jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject // 3 = this + y + z EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -369,7 +369,7 @@ jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) { // 1 = klass EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -394,7 +394,7 @@ jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble // 1 = klass EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -425,7 +425,7 @@ jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y // 3 = klass + y + z EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); @@ -479,7 +479,7 @@ jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject // 3 = klass + y + z EXPECT_EQ(3U, Thread::Current()->NumStackReferences()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); EXPECT_EQ(kNative, Thread::Current()->GetState()); } EXPECT_EQ(Thread::Current()->GetJniEnv(), env); diff --git a/src/jni_internal.cc b/src/jni_internal.cc index ab2286fa5c..6dc1a73267 100644 --- a/src/jni_internal.cc +++ b/src/jni_internal.cc @@ -93,7 +93,7 @@ size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len) { class ArgArray { public: - explicit ArgArray(Method* method) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + explicit ArgArray(Method* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MethodHelper mh(method); shorty_ = mh.GetShorty(); shorty_len_ = mh.GetShortyLength(); @@ -110,7 +110,7 @@ class ArgArray { } void BuildArgArray(const ScopedObjectAccess& soa, va_list ap) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) { switch (shorty_[i]) { case 'Z': @@ -145,7 +145,7 @@ class ArgArray { } void BuildArgArray(const ScopedObjectAccess& soa, jvalue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) { switch (shorty_[i]) { case 'Z': @@ -189,7 +189,7 @@ class ArgArray { }; static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (obj == NULL) { return NULL; } @@ -201,7 +201,7 @@ static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, Object* obj) } static void CheckMethodArguments(Method* m, JValue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MethodHelper mh(m); ObjectArray<Class>* parameter_types = mh.GetParameterTypes(); CHECK(parameter_types != NULL); @@ -227,7 +227,7 @@ static void CheckMethodArguments(Method* m, JValue* args) static JValue InvokeWithArgArray(const ScopedObjectAccess& soa, Object* receiver, Method* method, JValue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(soa.Env()->check_jni)) { CheckMethodArguments(method, args); } @@ -238,7 +238,7 @@ static JValue InvokeWithArgArray(const ScopedObjectAccess& soa, Object* receiver static JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, va_list args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* receiver = soa.Decode<Object*>(obj); Method* method = soa.DecodeMethod(mid); ArgArray arg_array(method); @@ -247,13 +247,13 @@ static JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj, } static Method* FindVirtualMethod(Object* receiver, Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method); } static JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, jvalue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* receiver = soa.Decode<Object*>(obj); Method* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); ArgArray arg_array(method); @@ -263,7 +263,7 @@ static JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccess& soa, static JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, va_list args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Object* receiver = soa.Decode<Object*>(obj); Method* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); ArgArray arg_array(method); @@ -297,14 +297,14 @@ static std::string NormalizeJniClassDescriptor(const char* name) { static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, Class* c, const char* name, const char* sig, const char* kind) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;", "no %s method \"%s.%s%s\"", kind, ClassHelper(c).GetDescriptor(), name, sig); } static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, const char* name, const char* sig, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* c = soa.Decode<Class*>(jni_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; @@ -331,7 +331,7 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, } static ClassLoader* GetClassLoader(Thread* self) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method = self->GetCurrentMethod(); if (method == NULL || PrettyMethod(method, false) == "java.lang.Runtime.nativeLoad") { return self->GetClassLoaderOverride(); @@ -341,7 +341,7 @@ static ClassLoader* GetClassLoader(Thread* self) static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name, const char* sig, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* c = soa.Decode<Class*>(jni_class); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) { return NULL; @@ -380,14 +380,14 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con } static void PinPrimitiveArray(const ScopedObjectAccess& soa, const Array* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JavaVMExt* vm = soa.Vm(); MutexLock mu(vm->pins_lock); vm->pin_table.Add(array); } static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, const Array* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { JavaVMExt* vm = soa.Vm(); MutexLock mu(vm->pins_lock); vm->pin_table.Remove(array); @@ -395,7 +395,7 @@ static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, const Array* arra static void ThrowAIOOBE(ScopedObjectAccess& soa, Array* array, jsize start, jsize length, const char* identifier) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string type(PrettyTypeOf(array)); soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "%s offset=%d length=%d %s.length=%d", @@ -404,13 +404,13 @@ static void ThrowAIOOBE(ScopedObjectAccess& soa, Array* array, jsize start, static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length, jsize array_length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;", "offset=%d length=%d string.length()=%d", start, length, array_length); } int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { + LOCKS_EXCLUDED(Locks::mutator_lock_) { ScopedObjectAccess soa(env); // Turn the const char* into a java.lang.String. @@ -512,7 +512,7 @@ class SharedLibrary { */ bool CheckOnLoadResult() LOCKS_EXCLUDED(jni_on_load_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Thread* self = Thread::Current(); self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad); bool okay; @@ -615,7 +615,7 @@ class Libraries { // See section 11.3 "Linking Native Methods" of the JNI spec. void* FindNativeMethod(const Method* m, std::string& detail) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string jni_short_name(JniShortName(m)); std::string jni_long_name(JniLongName(m)); const ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader(); @@ -660,7 +660,7 @@ JValue InvokeWithJValues(const ScopedObjectAccess& soa, jobject obj, jmethodID m JValue InvokeWithJValues(const ScopedObjectAccess& soa, Object* receiver, Method* m, JValue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return InvokeWithArgArray(soa, receiver, m, args); } @@ -2286,7 +2286,7 @@ class JNI { private: static jint EnsureLocalCapacity(const ScopedObjectAccess& soa, jint desired_capacity, const char* caller) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // TODO: we should try to expand the table if necessary. if (desired_capacity < 1 || desired_capacity > static_cast<jint>(kLocalsMax)) { LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity; @@ -2303,7 +2303,7 @@ class JNI { template<typename JniT, typename ArtT> static JniT NewPrimitiveArray(const ScopedObjectAccess& soa, jsize length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK_GE(length, 0); // TODO: ReportJniError ArtT* result = ArtT::Alloc(length); return soa.AddLocalReference<JniT>(result); @@ -2312,7 +2312,7 @@ class JNI { template <typename ArrayT, typename CArrayT, typename ArtArrayT> static CArrayT GetPrimitiveArray(ScopedObjectAccess& soa, ArrayT java_array, jboolean* is_copy) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array); PinPrimitiveArray(soa, array); if (is_copy != NULL) { @@ -2324,7 +2324,7 @@ class JNI { template <typename ArrayT> static void ReleasePrimitiveArray(ScopedObjectAccess& soa, ArrayT java_array, jint mode) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (mode != JNI_COMMIT) { Array* array = soa.Decode<Array*>(java_array); UnpinPrimitiveArray(soa, array); @@ -2334,7 +2334,7 @@ class JNI { template <typename JavaArrayT, typename JavaT, typename ArrayT> static void GetPrimitiveArrayRegion(ScopedObjectAccess& soa, JavaArrayT java_array, jsize start, jsize length, JavaT* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ArrayT* array = soa.Decode<ArrayT*>(java_array); if (start < 0 || length < 0 || start + length > array->GetLength()) { ThrowAIOOBE(soa, array, start, length, "src"); @@ -2347,7 +2347,7 @@ class JNI { template <typename JavaArrayT, typename JavaT, typename ArrayT> static void SetPrimitiveArrayRegion(ScopedObjectAccess& soa, JavaArrayT java_array, jsize start, jsize length, const JavaT* buf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ArrayT* array = soa.Decode<ArrayT*>(java_array); if (start < 0 || length < 0 || start + length > array->GetLength()) { ThrowAIOOBE(soa, array, start, length, "dst"); diff --git a/src/jni_internal.h b/src/jni_internal.h index fad06e1bec..6833c2a622 100644 --- a/src/jni_internal.h +++ b/src/jni_internal.h @@ -54,9 +54,9 @@ void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINat size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len); JValue InvokeWithJValues(const ScopedObjectAccess&, jobject obj, jmethodID mid, jvalue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); JValue InvokeWithJValues(const ScopedObjectAccess&, Object* receiver, Method* m, JValue* args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause); @@ -71,19 +71,19 @@ struct JavaVMExt : public JavaVM { * human-readable description of the error. */ bool LoadNativeLibrary(const std::string& path, ClassLoader* class_loader, std::string& detail) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /** * Returns a pointer to the code for the native method 'm', found * using dlsym(3) on every native library that's been loaded so far. */ void* FindCodeForNativeMethod(Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os); void DumpReferenceTables(std::ostream& os) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetCheckJniEnabled(bool enabled); @@ -129,7 +129,7 @@ struct JNIEnvExt : public JNIEnv { ~JNIEnvExt(); void DumpReferenceTables(std::ostream& os) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetCheckJniEnabled(bool enabled); diff --git a/src/jni_internal_test.cc b/src/jni_internal_test.cc index 64461b010f..5db258d789 100644 --- a/src/jni_internal_test.cc +++ b/src/jni_internal_test.cc @@ -70,7 +70,7 @@ class JniInternalTest : public CommonTest { Method::InvokeStub* DoCompile(Method*& method, Object*& receiver, bool is_static, const char* method_name, const char* method_signature) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods"; jobject jclass_loader(LoadDex(class_name)); SirtRef<ClassLoader> class_loader(ScopedObjectAccessUnchecked(Thread::Current()).Decode<ClassLoader*>(jclass_loader)); @@ -97,7 +97,7 @@ class JniInternalTest : public CommonTest { return stub; } - void InvokeNopMethod(bool is_static) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void InvokeNopMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "nop", "()V"); @@ -105,7 +105,7 @@ class JniInternalTest : public CommonTest { } void InvokeIdentityByteMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(B)B"); @@ -135,7 +135,7 @@ class JniInternalTest : public CommonTest { } void InvokeIdentityIntMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(I)I"); @@ -165,7 +165,7 @@ class JniInternalTest : public CommonTest { } void InvokeIdentityDoubleMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "identity", "(D)D"); @@ -195,7 +195,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumIntIntMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(II)I"); @@ -234,7 +234,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumIntIntIntMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(III)I"); @@ -278,7 +278,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumIntIntIntIntMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(IIII)I"); @@ -327,7 +327,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumIntIntIntIntIntMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(IIIII)I"); @@ -381,7 +381,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumDoubleDoubleMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DD)D"); @@ -421,7 +421,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumDoubleDoubleDoubleMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDD)D"); @@ -452,7 +452,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDDD)D"); @@ -486,7 +486,7 @@ class JniInternalTest : public CommonTest { } void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method; Object* receiver; Method::InvokeStub* stub = DoCompile(method, receiver, is_static, "sum", "(DDDDD)D"); diff --git a/src/logging.cc b/src/logging.cc index 712c02bcd7..a0c07cf227 100644 --- a/src/logging.cc +++ b/src/logging.cc @@ -51,7 +51,7 @@ const char* ProgramInvocationShortName() { // This can be used to reveal or conceal logs with specific tags. void InitLogging(char* argv[]) { // TODO: Move this to a more obvious InitART... - GlobalSynchronization::Init(); + Locks::Init(); // Stash the command line for later use. We can use /proc/self/cmdline on Linux to recover this, // but we don't have that luxury on the Mac, and there are a couple of argv[0] variants that are @@ -104,7 +104,7 @@ LogMessage::~LogMessage() { // Do the actual logging with the lock held. { - MutexLock mu(*GlobalSynchronization::logging_lock_); + MutexLock mu(*Locks::logging_lock_); if (msg.find('\n') == std::string::npos) { LogLine(msg.c_str()); } else { diff --git a/src/logging.h b/src/logging.h index ce86a72cbb..75782d5640 100644 --- a/src/logging.h +++ b/src/logging.h @@ -187,7 +187,7 @@ struct LogMessageData { class LogMessage { public: LogMessage(const char* file, int line, LogSeverity severity, int error); - ~LogMessage() LOCKS_EXCLUDED(GlobalSynchronization::logging_lock_); + ~LogMessage() LOCKS_EXCLUDED(Locks::logging_lock_); std::ostream& stream(); private: @@ -253,10 +253,10 @@ template<typename T> class MutatorLockedDumpable { public: explicit MutatorLockedDumpable(T& value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) : value_(value) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : value_(value) { } - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { value_.Dump(os); } @@ -271,7 +271,7 @@ class MutatorLockedDumpable { template<typename T> std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs) -// TODO: should be SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) however annotalysis +// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) however annotalysis // currently fails for this. NO_THREAD_SAFETY_ANALYSIS { rhs.Dump(os); diff --git a/src/mark_sweep.cc b/src/mark_sweep.cc index 0b09f90b4f..c21c19c4ed 100644 --- a/src/mark_sweep.cc +++ b/src/mark_sweep.cc @@ -160,8 +160,8 @@ class CheckObjectVisitor { } void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { mark_sweep_->CheckReference(obj, ref, offset, is_static); } @@ -195,8 +195,8 @@ class ScanImageRootVisitor { } void operator ()(const Object* root) const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(root != NULL); mark_sweep_->ScanObject(root); } @@ -245,8 +245,8 @@ class CheckBitmapVisitor { } void operator ()(const Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { DCHECK(obj != NULL); mark_sweep_->CheckObject(obj); } @@ -280,8 +280,8 @@ class ScanObjectVisitor { } void operator ()(const Object* obj) const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { mark_sweep_->ScanObject(obj); } @@ -415,7 +415,7 @@ struct SweepCallbackContext { }; void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { - GlobalSynchronization::heap_bitmap_lock_->AssertExclusiveHeld(); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(); size_t freed_objects = num_ptrs; size_t freed_bytes = 0; @@ -449,7 +449,7 @@ void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { } void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { - GlobalSynchronization::heap_bitmap_lock_->AssertExclusiveHeld(); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(); SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); Heap* heap = context->mark_sweep->GetHeap(); diff --git a/src/mark_sweep.h b/src/mark_sweep.h index 2333bdbfca..d1e348187c 100644 --- a/src/mark_sweep.h +++ b/src/mark_sweep.h @@ -46,14 +46,14 @@ class MarkSweep { // Marks the root set at the start of a garbage collection. void MarkRoots() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Marks the roots in the image space on dirty cards. - void ScanDirtyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void ScanDirtyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Verify that image roots point to only marked objects within the alloc space. - void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); bool IsMarkStackEmpty() const { return mark_stack_->IsEmpty(); @@ -61,8 +61,8 @@ class MarkSweep { // Builds a mark stack and recursively mark until it empties. void RecursiveMark(bool partial, TimingLogger& timings) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Copies mark bits from live bitmap of ZygoteSpace to mark bitmap for partial GCs. void CopyMarkBits(Space* space); @@ -70,27 +70,27 @@ class MarkSweep { // Builds a mark stack with objects on dirty cards and recursively mark // until it empties. void RecursiveMarkDirtyObjects(bool update_finger) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Recursive mark objects on specified cards. Updates finger. void RecursiveMarkCards(CardTable* card_table, const std::vector<byte*>& cards, TimingLogger& timings) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);; + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);; // Remarks the root set after completing the concurrent mark. void ReMarkRoots() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Heap* GetHeap() { return heap_; } void ProcessReferences(bool clear_soft_references) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ProcessReferences(&soft_reference_list_, clear_soft_references, &weak_reference_list_, &finalizer_reference_list_, @@ -99,11 +99,11 @@ class MarkSweep { // Sweeps unmarked objects to complete the garbage collection. void Sweep(bool partial, bool swap_bitmaps) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Sweep only pointers within an array. WARNING: Trashes objects. void SweepArray(TimingLogger& logger, MarkStack* allocation_stack_, bool swap_bitmaps) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); Object* GetClearedReferences() { return cleared_reference_list_; @@ -111,13 +111,13 @@ class MarkSweep { // Proxy for external access to ScanObject. void ScanRoot(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Blackens an object. void ScanObject(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetFinger(Object* new_finger) { finger_ = new_finger; @@ -140,12 +140,12 @@ class MarkSweep { } void SweepSystemWeaks(bool swap_bitmaps) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); template <typename Visitor> static void VisitObjectReferences(const Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { DCHECK(obj != NULL); DCHECK(obj->GetClass() != NULL); if (obj->IsClass()) { @@ -160,7 +160,7 @@ class MarkSweep { private: // Returns true if the object has its bit set in the mark bitmap. bool IsMarked(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { DCHECK(current_mark_bitmap_ != NULL); if (current_mark_bitmap_->HasAddress(object)) { return current_mark_bitmap_->Test(object); @@ -169,61 +169,58 @@ class MarkSweep { } static bool IsMarkedCallback(const Object* object, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); static bool IsLiveCallback(const Object* object, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); static void MarkObjectVisitor(const Object* root, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); static void ReMarkObjectVisitor(const Object* root, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); static void VerifyImageRootVisitor(Object* root, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_); static void ScanDirtyCardCallback(Object* obj, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Marks an object. void MarkObject(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Yuck. void MarkObject0(const Object* obj, bool check_finger) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); static void ScanBitmapCallback(Object* obj, void* finger, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Special sweep for zygote that just marks objects / dirties cards. static void ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); void CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); void CheckObject(const Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Grays references in instance fields. void ScanInstanceFields(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template <typename Visitor> static void VisitInstanceFieldsReferences(const Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { DCHECK(obj != NULL); Class* klass = obj->GetClass(); DCHECK(klass != NULL); @@ -232,41 +229,39 @@ class MarkSweep { // Blackens a class object. void ScanClass(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template <typename Visitor> static void VisitClassReferences(const Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { VisitInstanceFieldsReferences(obj, visitor); VisitStaticFieldsReferences(obj->AsClass(), visitor); } // Grays references in static fields. void ScanStaticFields(const Class* klass) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template <typename Visitor> static void VisitStaticFieldsReferences(const Class* klass, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { DCHECK(klass != NULL); VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor); } // Used by ScanInstanceFields and ScanStaticFields void ScanFields(const Object* obj, uint32_t ref_offsets, bool is_static) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template <typename Visitor> static void VisitFieldsReferences(const Object* obj, uint32_t ref_offsets, bool is_static, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { if (ref_offsets != CLASS_WALK_SUPER) { // Found a reference offset bitmap. Mark the specified offsets. while (ref_offsets != 0) { @@ -301,13 +296,12 @@ class MarkSweep { // Grays references in an array. void ScanArray(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template <typename Visitor> static void VisitArrayReferences(const Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { visitor(obj, obj->GetClass(), Object::ClassOffset(), false); if (obj->IsObjectArray()) { const ObjectArray<Object>* array = obj->AsObjectArray<Object>(); @@ -320,49 +314,48 @@ class MarkSweep { } void ScanOther(const Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template <typename Visitor> static void VisitOtherReferences(const Object* obj, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { return VisitInstanceFieldsReferences(obj, visitor); } // Blackens objects grayed during a garbage collection. void ScanGrayObjects(bool update_finger) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Schedules an unmarked object for reference processing. void DelayReferenceReferent(Object* reference) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Recursively blackens objects on the mark stack. void ProcessMarkStack() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void EnqueueFinalizerReferences(Object** ref) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PreserveSomeSoftReferences(Object** ref) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ClearWhiteReferences(Object** list) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); void ProcessReferences(Object** soft_references, bool clear_soft_references, Object** weak_references, Object** finalizer_references, Object** phantom_references) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SweepJniWeakGlobals(bool swap_bitmaps) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Current space, we check this space first to avoid searching for the appropriate space for an object. SpaceBitmap* current_mark_bitmap_; diff --git a/src/mod_union_table.cc b/src/mod_union_table.cc index eb8c598db5..d62128d610 100644 --- a/src/mod_union_table.cc +++ b/src/mod_union_table.cc @@ -57,8 +57,8 @@ class ModUnionVisitor { } void operator ()(Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { DCHECK(obj != NULL); // We don't have an early exit since we use the visitor pattern, an early // exit should significantly speed this up. @@ -155,8 +155,8 @@ class ModUnionScanImageRootVisitor { } void operator ()(const Object* root) const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(root != NULL); mark_sweep_->ScanRoot(root); } @@ -226,8 +226,8 @@ class ModUnionReferenceVisitor { } void operator ()(Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + Locks::mutator_lock_) { DCHECK(obj != NULL); // We don't have an early exit since we use the visitor pattern, an early // exit should significantly speed this up. @@ -254,8 +254,7 @@ class CheckReferenceVisitor { // Extra parameters are required since we use this same visitor signature for checking objects. void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { Heap* heap = mod_union_table_->GetMarkSweep()->GetHeap(); if (mod_union_table_->AddReference(obj, ref) && references_.find(ref) == references_.end()) { Space* from_space = heap->FindSpaceFromObject(obj); @@ -287,8 +286,7 @@ class ModUnionCheckReferences { } void operator ()(Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_, - GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { DCHECK(obj != NULL); MarkSweep* mark_sweep = mod_union_table_->GetMarkSweep(); CheckReferenceVisitor visitor(mod_union_table_, references_); diff --git a/src/mod_union_table.h b/src/mod_union_table.h index df2023fb9f..17ca24085d 100644 --- a/src/mod_union_table.h +++ b/src/mod_union_table.h @@ -54,7 +54,7 @@ class ModUnionTable { // for said cards. Exclusive lock is required since verify sometimes uses // SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the // bitmap or not. - virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) = 0; + virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0; // Should probably clean this up later. void Init(MarkSweep* mark_sweep) { @@ -86,11 +86,11 @@ class ModUnionTableBitmap : public ModUnionTable { // Update table based on cleared cards. void Update() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Mark all references to the alloc space(s). - void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); protected: // Cleared card array, used to update the mod-union table. @@ -115,15 +115,15 @@ class ModUnionTableReferenceCache : public ModUnionTable { // Update table based on cleared cards. void Update() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Mark all references to the alloc space(s). - void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and // VisitMarkedRange can't know if the callback will modify the bitmap or not. - void Verify() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Function that tells whether or not to add a reference to the table. virtual bool AddReference(const Object* obj, const Object* ref) = 0; @@ -152,8 +152,8 @@ class ModUnionTableCardCache : public ModUnionTable { // Mark all references to the alloc space(s). void MarkReferences() - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Nothing to verify. void Verify() {} diff --git a/src/monitor.cc b/src/monitor.cc index 0e6735d1d3..6b7fbf116a 100644 --- a/src/monitor.cc +++ b/src/monitor.cc @@ -252,7 +252,7 @@ static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) __attribute__((format(printf, 1, 2))); static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { va_list args; va_start(args, fmt); Thread::Current()->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args); @@ -284,7 +284,7 @@ void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owne { // TODO: isn't this too late to prevent threads from disappearing? // Acquire thread list lock so threads won't disappear from under us. - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); // Re-read owner now that we hold lock. current_owner = (monitor != NULL) ? monitor->owner_ : NULL; // Get short descriptions of the threads involved. @@ -366,7 +366,7 @@ bool Monitor::Unlock(Thread* self, bool for_wait) { // Converts the given waiting time (relative to "now") into an absolute time in 'ts'. static void ToAbsoluteTime(int64_t ms, int32_t ns, timespec* ts) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { int64_t endSec; #ifdef HAVE_TIMEDWAIT_MONOTONIC @@ -873,7 +873,7 @@ static uint32_t LockOwnerFromThreadLock(Object* thread_lock) { void Monitor::DescribeWait(std::ostream& os, const Thread* thread) { ThreadState state; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); state = thread->GetState(); } @@ -913,7 +913,7 @@ void Monitor::DescribeWait(std::ostream& os, const Thread* thread) { } static void DumpLockedObject(std::ostream& os, Object* o) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os << " - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n"; } diff --git a/src/monitor.h b/src/monitor.h index b506b39a96..de70803f2c 100644 --- a/src/monitor.h +++ b/src/monitor.h @@ -73,43 +73,43 @@ class Monitor { static void MonitorEnter(Thread* thread, Object* obj) EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool MonitorExit(Thread* thread, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) UNLOCK_FUNCTION(monitor_lock_); static void Notify(Thread* self, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void NotifyAll(Thread* self, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void Wait(Thread* self, Object* obj, int64_t ms, int32_t ns, bool interruptShouldThrow) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DescribeWait(std::ostream& os, const Thread* thread) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DescribeLocks(std::ostream& os, StackVisitor* stack_visitor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Object* GetObject(); private: explicit Monitor(Thread* owner, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); static void Inflate(Thread* self, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent, const char* owner_filename, uint32_t owner_line_number) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void FailedUnlock(Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(monitor_lock_); bool Unlock(Thread* thread, bool for_wait) UNLOCK_FUNCTION(monitor_lock_); @@ -117,24 +117,24 @@ class Monitor { void Notify(Thread* self) NO_THREAD_SAFETY_ANALYSIS; void NotifyWithLock() EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void NotifyAll(Thread* self) NO_THREAD_SAFETY_ANALYSIS; void NotifyAllWithLock() EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow) NO_THREAD_SAFETY_ANALYSIS; void WaitWithLock(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Translates the provided method and pc into its declaring class' source file and line number. void TranslateLocation(const Method* method, uint32_t pc, const char*& source_file, uint32_t& line_number) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool (*is_sensitive_thread_hook_)(); static uint32_t lock_profiling_threshold_; @@ -172,7 +172,7 @@ class MonitorList { void Add(Monitor* m); void SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); private: Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/src/mutex.cc b/src/mutex.cc index 2811843526..cb344d4f81 100644 --- a/src/mutex.cc +++ b/src/mutex.cc @@ -75,16 +75,16 @@ struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t { // ...other stuff we don't care about. }; -ReaderWriterMutex* GlobalSynchronization::mutator_lock_ = NULL; -Mutex* GlobalSynchronization::thread_list_lock_ = NULL; -Mutex* GlobalSynchronization::classlinker_classes_lock_ = NULL; -ReaderWriterMutex* GlobalSynchronization::heap_bitmap_lock_ = NULL; -Mutex* GlobalSynchronization::abort_lock_ = NULL; -Mutex* GlobalSynchronization::logging_lock_ = NULL; -Mutex* GlobalSynchronization::unexpected_signal_lock_ = NULL; -Mutex* GlobalSynchronization::thread_suspend_count_lock_ = NULL; - -void GlobalSynchronization::Init() { +ReaderWriterMutex* Locks::mutator_lock_ = NULL; +Mutex* Locks::thread_list_lock_ = NULL; +Mutex* Locks::classlinker_classes_lock_ = NULL; +ReaderWriterMutex* Locks::heap_bitmap_lock_ = NULL; +Mutex* Locks::abort_lock_ = NULL; +Mutex* Locks::logging_lock_ = NULL; +Mutex* Locks::unexpected_signal_lock_ = NULL; +Mutex* Locks::thread_suspend_count_lock_ = NULL; + +void Locks::Init() { if (logging_lock_ != NULL) { // Already initialized. DCHECK(mutator_lock_ != NULL); diff --git a/src/mutex.h b/src/mutex.h index be3704c234..5154d45f58 100644 --- a/src/mutex.h +++ b/src/mutex.h @@ -65,7 +65,7 @@ enum MutexLevel { std::ostream& operator<<(std::ostream& os, const MutexLevel& rhs); // Global mutexes corresponding to the levels above. -class GlobalSynchronization { +class Locks { public: static void Init(); diff --git a/src/native/dalvik_system_DexFile.cc b/src/native/dalvik_system_DexFile.cc index 30f411cc3e..b5e1c19d8e 100644 --- a/src/native/dalvik_system_DexFile.cc +++ b/src/native/dalvik_system_DexFile.cc @@ -108,7 +108,7 @@ static jint DexFile_openDexFile(JNIEnv* env, jclass, jstring javaSourceName, jst } static const DexFile* toDexFile(int dex_file_address) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(dex_file_address)); if (dex_file == NULL) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/NullPointerException;", "dex_file == null"); diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc index ed61de92f5..fae06f68ca 100644 --- a/src/native/dalvik_system_VMRuntime.cc +++ b/src/native/dalvik_system_VMRuntime.cc @@ -138,7 +138,7 @@ static void VMRuntime_setTargetSdkVersion(JNIEnv*, jobject, jint targetSdkVersio #if !defined(ART_USE_LLVM_COMPILER) if (vm->check_jni) { LOG(WARNING) << "Turning off CheckJNI so we can turn on JNI app bug workarounds..."; - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); vm->SetCheckJniEnabled(false); runtime->GetThreadList()->ForEach(DisableCheckJniCallback, NULL); } diff --git a/src/native/java_lang_Class.cc b/src/native/java_lang_Class.cc index e63cf1aece..68fc97e922 100644 --- a/src/native/java_lang_Class.cc +++ b/src/native/java_lang_Class.cc @@ -28,7 +28,7 @@ namespace art { static Class* DecodeClass(const ScopedObjectAccess& soa, jobject java_class) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* c = soa.Decode<Class*>(java_class); DCHECK(c != NULL); DCHECK(c->IsClass()); @@ -93,7 +93,7 @@ struct WorkAroundGccAnnotalysisBug { template<typename T> static jobjectArray ToArray(const ScopedObjectAccessUnchecked& soa, const char* array_class_name, const std::vector<T*>& objects) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedLocalRef<jclass> array_class(soa.Env(), soa.Env()->FindClass(array_class_name)); jobjectArray result = soa.Env()->NewObjectArray(objects.size(), array_class.get(), NULL); for (size_t i = 0; i < objects.size(); ++i) { @@ -240,7 +240,7 @@ static jobject Class_getDex(JNIEnv* env, jobject javaClass) { } static bool MethodMatches(MethodHelper* mh, const std::string& name, ObjectArray<Class>* arg_array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (name != mh->GetName()) { return false; } @@ -262,7 +262,7 @@ static bool MethodMatches(MethodHelper* mh, const std::string& name, ObjectArray static Method* FindConstructorOrMethodInArray(ObjectArray<Method>* methods, const std::string& name, ObjectArray<Class>* arg_array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (methods == NULL) { return NULL; } diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc index f4fe6ca9cd..d74c9dbc7f 100644 --- a/src/native/java_lang_System.cc +++ b/src/native/java_lang_System.cc @@ -102,7 +102,7 @@ static void move32(void* dst, const void* src, size_t n) { namespace art { static void ThrowArrayStoreException_NotAnArray(const char* identifier, Object* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string actualType(PrettyTypeOf(array)); Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", "%s of type %s is not an array", identifier, actualType.c_str()); diff --git a/src/native/java_lang_Thread.cc b/src/native/java_lang_Thread.cc index a0c90eea7f..adc246aa28 100644 --- a/src/native/java_lang_Thread.cc +++ b/src/native/java_lang_Thread.cc @@ -35,7 +35,7 @@ static jboolean Thread_interrupted(JNIEnv* env, jclass) { static jboolean Thread_isInterrupted(JNIEnv* env, jobject java_thread) { ScopedObjectAccess soa(env); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, java_thread); return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE; } @@ -56,10 +56,10 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha ScopedObjectAccess soa(env); ThreadState internal_thread_state = (has_been_started ? kTerminated : kStarting); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, java_thread); if (thread != NULL) { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); internal_thread_state = thread->GetState(); } switch (internal_thread_state) { @@ -92,14 +92,14 @@ static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "object == null"); return JNI_FALSE; } - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, java_thread); return thread->HoldsLock(object); } static void Thread_nativeInterrupt(JNIEnv* env, jobject java_thread) { ScopedObjectAccess soa(env); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, java_thread); if (thread != NULL) { thread->Interrupt(); @@ -140,7 +140,7 @@ static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) { */ static void Thread_nativeSetPriority(JNIEnv* env, jobject java_thread, jint new_priority) { ScopedObjectAccess soa(env); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Thread* thread = Thread::FromManagedThread(soa, java_thread); if (thread != NULL) { thread->SetNativePriority(new_priority); diff --git a/src/native/java_lang_reflect_Array.cc b/src/native/java_lang_reflect_Array.cc index fa5975019d..49a4694eda 100644 --- a/src/native/java_lang_reflect_Array.cc +++ b/src/native/java_lang_reflect_Array.cc @@ -25,7 +25,7 @@ namespace art { // Recursively create an array with multiple dimensions. Elements may be // Objects or primitive types. static Array* CreateMultiArray(Class* array_class, int current_dimension, IntArray* dimensions) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { int32_t array_length = dimensions->Get(current_dimension++); SirtRef<Array> new_array(Array::Alloc(array_class, array_length)); if (new_array.get() == NULL) { diff --git a/src/native/java_lang_reflect_Field.cc b/src/native/java_lang_reflect_Field.cc index e764b2500b..c82e5034b7 100644 --- a/src/native/java_lang_reflect_Field.cc +++ b/src/native/java_lang_reflect_Field.cc @@ -25,7 +25,7 @@ namespace art { static bool GetFieldValue(const ScopedObjectAccess& soa, Object* o, Field* f, JValue& value, bool allow_references) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_EQ(value.GetJ(), 0LL); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(f->GetDeclaringClass(), true, true)) { @@ -74,7 +74,7 @@ static bool GetFieldValue(const ScopedObjectAccess& soa, Object* o, Field* f, static bool CheckReceiver(const ScopedObjectAccess& soa, jobject javaObj, Field* f, Object*& o) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (f->IsStatic()) { o = NULL; return true; @@ -163,7 +163,7 @@ static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj) { } static void SetFieldValue(Object* o, Field* f, const JValue& new_value, bool allow_references) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(f->GetDeclaringClass(), true, true)) { return; diff --git a/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc index 9bcea0492d..8e5bbf6ee4 100644 --- a/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc +++ b/src/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc @@ -56,7 +56,7 @@ static jobject FindThreadByThinLockId(JNIEnv* env, uint32_t thin_lock_id) { }; ThreadFinder finder(thin_lock_id); { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(ThreadFinder::Callback, &finder); } if (finder.thread != NULL) { @@ -134,7 +134,7 @@ static void ThreadStatsGetterCallback(Thread* t, void* context) { std::vector<uint8_t>& bytes = *reinterpret_cast<std::vector<uint8_t>*>(context); JDWP::Append4BE(bytes, t->GetThinLockId()); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); JDWP::Append1BE(bytes, t->GetState()); } JDWP::Append4BE(bytes, t->GetTid()); @@ -146,7 +146,7 @@ static void ThreadStatsGetterCallback(Thread* t, void* context) { static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) { std::vector<uint8_t> bytes; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); ThreadList* thread_list = Runtime::Current()->GetThreadList(); uint16_t thread_count = 0; diff --git a/src/oat/runtime/callee_save_frame.h b/src/oat/runtime/callee_save_frame.h index 14ba0465c7..9288a955c9 100644 --- a/src/oat/runtime/callee_save_frame.h +++ b/src/oat/runtime/callee_save_frame.h @@ -25,9 +25,9 @@ class Method; // Place a special frame at the TOS that will save the callee saves for the given type. static void FinishCalleeSaveFrameSetup(Thread* self, Method** sp, Runtime::CalleeSaveType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Be aware the store below may well stomp on an incoming argument. - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + Locks::mutator_lock_->AssertSharedHeld(); *sp = Runtime::Current()->GetCalleeSaveMethod(type); self->SetTopOfStack(sp, 0); self->VerifyStack(); diff --git a/src/oat/runtime/stub.h b/src/oat/runtime/stub.h index 9e5e66fa6b..0e5f0dd67d 100644 --- a/src/oat/runtime/stub.h +++ b/src/oat/runtime/stub.h @@ -23,29 +23,29 @@ namespace art { namespace arm { ByteArray* CreateAbstractMethodErrorStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* CreateJniDlsymLookupStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } namespace mips { ByteArray* CreateAbstractMethodErrorStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* MipsCreateResolutionTrampoline(Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* CreateJniDlsymLookupStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } namespace x86 { ByteArray* CreateAbstractMethodErrorStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ByteArray* CreateJniDlsymLookupStub() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } } // namespace art diff --git a/src/oat/runtime/support_alloc.cc b/src/oat/runtime/support_alloc.cc index 528198d81c..fb83fadeef 100644 --- a/src/oat/runtime/support_alloc.cc +++ b/src/oat/runtime/support_alloc.cc @@ -21,21 +21,21 @@ namespace art { extern "C" Object* artAllocObjectFromCode(uint32_t type_idx, Method* method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocObjectFromCode(type_idx, method, self, false); } extern "C" Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx, Method* method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocObjectFromCode(type_idx, method, self, true); } extern "C" Array* artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocArrayFromCode(type_idx, method, component_count, false); } @@ -43,14 +43,14 @@ extern "C" Array* artAllocArrayFromCode(uint32_t type_idx, Method* method, int32 extern "C" Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, Method* method, int32_t component_count, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return AllocArrayFromCode(type_idx, method, component_count, true); } extern "C" Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false); } @@ -58,7 +58,7 @@ extern "C" Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* metho extern "C" Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, Method* method, int32_t component_count, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true); } diff --git a/src/oat/runtime/support_cast.cc b/src/oat/runtime/support_cast.cc index ea083f1aae..45a3e60d83 100644 --- a/src/oat/runtime/support_cast.cc +++ b/src/oat/runtime/support_cast.cc @@ -21,7 +21,7 @@ namespace art { // Assignable test for code, won't throw. Null and equality tests already performed extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref_class) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass != NULL); DCHECK(ref_class != NULL); return klass->IsAssignableFrom(ref_class) ? 1 : 0; @@ -29,7 +29,7 @@ extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref // Check whether it is safe to cast one class to the other, throw exception and return -1 on failure extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(a->IsClass()) << PrettyClass(a); DCHECK(b->IsClass()) << PrettyClass(b); if (LIKELY(b->IsAssignableFrom(a))) { @@ -48,7 +48,7 @@ extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self // Returns 0 on success and -1 if an exception is pending. extern "C" int artCanPutArrayElementFromCode(const Object* element, const Class* array_class, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(array_class != NULL); // element can't be NULL as we catch this is screened in runtime_support Class* element_class = element->GetClass(); diff --git a/src/oat/runtime/support_debug.cc b/src/oat/runtime/support_debug.cc index 996804396a..9eaf55b71c 100644 --- a/src/oat/runtime/support_debug.cc +++ b/src/oat/runtime/support_debug.cc @@ -26,14 +26,14 @@ namespace art { * to denote method entry. */ extern "C" void artUpdateDebuggerFromCode(int32_t dex_pc, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); Dbg::UpdateDebugger(dex_pc, self); } // Temporary debugging hook for compiler. extern void DebugMe(Method* method, uint32_t info) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { LOG(INFO) << "DebugMe"; if (method != NULL) { LOG(INFO) << PrettyMethod(method); diff --git a/src/oat/runtime/support_dexcache.cc b/src/oat/runtime/support_dexcache.cc index 8e7c2ad8a4..98cce55c7b 100644 --- a/src/oat/runtime/support_dexcache.cc +++ b/src/oat/runtime/support_dexcache.cc @@ -21,7 +21,7 @@ namespace art { extern "C" Class* artInitializeStaticStorageFromCode(uint32_t type_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called to ensure static storage base is initialized for direct static field reads and writes. // A class may be accessing another class' fields when it doesn't have access, as access has been // given by inheritance. @@ -31,7 +31,7 @@ extern "C" Class* artInitializeStaticStorageFromCode(uint32_t type_idx, const Me extern "C" Class* artInitializeTypeFromCode(uint32_t type_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when method->dex_cache_resolved_types_[] misses. FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return ResolveVerifyAndClinit(type_idx, referrer, self, false, false); @@ -40,7 +40,7 @@ extern "C" Class* artInitializeTypeFromCode(uint32_t type_idx, const Method* ref extern "C" Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when caller isn't guaranteed to have access to a type and the dex cache may be // unpopulated. FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); @@ -49,7 +49,7 @@ extern "C" Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, extern "C" String* artResolveStringFromCode(Method* referrer, int32_t string_idx, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); return ResolveStringFromCode(referrer, string_idx); } diff --git a/src/oat/runtime/support_field.cc b/src/oat/runtime/support_field.cc index fe8974bff0..90a99dafd1 100644 --- a/src/oat/runtime/support_field.cc +++ b/src/oat/runtime/support_field.cc @@ -23,7 +23,7 @@ namespace art { extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t)); if (LIKELY(field != NULL)) { return field->Get32(NULL); @@ -38,7 +38,7 @@ extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, const Method* ref extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t)); if (LIKELY(field != NULL)) { return field->Get64(NULL); @@ -53,7 +53,7 @@ extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, const Method* ref extern "C" Object* artGetObjStaticFromCode(uint32_t field_idx, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*)); if (LIKELY(field != NULL)) { return field->GetObj(NULL); @@ -68,7 +68,7 @@ extern "C" Object* artGetObjStaticFromCode(uint32_t field_idx, const Method* ref extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, Object* obj, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t)); if (LIKELY(field != NULL && obj != NULL)) { return field->Get32(obj); @@ -87,7 +87,7 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, Object* obj, extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, Object* obj, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t)); if (LIKELY(field != NULL && obj != NULL)) { return field->Get64(obj); @@ -106,7 +106,7 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, Object* obj, extern "C" Object* artGetObjInstanceFromCode(uint32_t field_idx, Object* obj, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*)); if (LIKELY(field != NULL && obj != NULL)) { return field->GetObj(obj); @@ -125,7 +125,7 @@ extern "C" Object* artGetObjInstanceFromCode(uint32_t field_idx, Object* obj, extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != NULL)) { field->Set32(NULL, new_value); @@ -142,7 +142,7 @@ extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, extern "C" int artSet64StaticFromCode(uint32_t field_idx, const Method* referrer, uint64_t new_value, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t)); if (LIKELY(field != NULL)) { field->Set64(NULL, new_value); @@ -159,7 +159,7 @@ extern "C" int artSet64StaticFromCode(uint32_t field_idx, const Method* referrer extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*)); if (LIKELY(field != NULL)) { if (LIKELY(!FieldHelper(field).IsPrimitiveType())) { @@ -178,7 +178,7 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value, extern "C" int artSet32InstanceFromCode(uint32_t field_idx, Object* obj, uint32_t new_value, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != NULL && obj != NULL)) { field->Set32(obj, new_value); @@ -199,7 +199,7 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, Object* obj, uint32_ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, Object* obj, uint64_t new_value, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly); Method* referrer = sp[callee_save->GetFrameSizeInBytes() / sizeof(Method*)]; Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t)); @@ -223,7 +223,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, Object* obj, uint64_ extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, Object* obj, Object* new_value, const Method* referrer, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*)); if (LIKELY(field != NULL && obj != NULL)) { field->SetObj(obj, new_value); diff --git a/src/oat/runtime/support_fillarray.cc b/src/oat/runtime/support_fillarray.cc index 8561bd8308..62b9cf96cf 100644 --- a/src/oat/runtime/support_fillarray.cc +++ b/src/oat/runtime/support_fillarray.cc @@ -38,7 +38,7 @@ namespace art { extern "C" int artHandleFillArrayDataFromCode(Array* array, const Instruction::ArrayDataPayload* payload, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature)); if (UNLIKELY(array == NULL)) { diff --git a/src/oat/runtime/support_invoke.cc b/src/oat/runtime/support_invoke.cc index b2867ef52f..540e46bbfe 100644 --- a/src/oat/runtime/support_invoke.cc +++ b/src/oat/runtime/support_invoke.cc @@ -21,7 +21,7 @@ namespace art { static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, Method** sp, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); if (UNLIKELY(method == NULL)) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); @@ -56,7 +56,7 @@ static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Method extern "C" uint64_t artInvokeInterfaceTrampoline(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, false, kInterface); } @@ -64,7 +64,7 @@ extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_ Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface); } @@ -73,7 +73,7 @@ extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect); } @@ -81,7 +81,7 @@ extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic); } @@ -89,7 +89,7 @@ extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx, Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper); } @@ -97,7 +97,7 @@ extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_id Object* this_object, Method* caller_method, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual); } diff --git a/src/oat/runtime/support_jni.cc b/src/oat/runtime/support_jni.cc index 5c0bbee45c..49365b55bd 100644 --- a/src/oat/runtime/support_jni.cc +++ b/src/oat/runtime/support_jni.cc @@ -23,8 +23,8 @@ namespace art { // Used by the JNI dlsym stub to find the native method to invoke if none is registered. -extern void* FindNativeMethod(Thread* self) LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertNotHeld(); // We come here as Native. +extern void* FindNativeMethod(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertNotHeld(); // We come here as Native. DCHECK(Thread::Current() == self); ScopedObjectAccess soa(self); @@ -54,7 +54,7 @@ extern uint32_t JniMethodStart(Thread* self) UNLOCK_FUNCTION(GlobalSynchronizati } extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) - UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + UNLOCK_FUNCTION(Locks::mutator_lock_) { self->DecodeJObject(to_lock)->MonitorEnter(self); return JniMethodStart(self); } @@ -67,14 +67,14 @@ static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { } extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); PopLocalReferences(saved_local_ref_cookie, self); } extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); @@ -82,7 +82,7 @@ extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject lo extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); Object* o = self->DecodeJObject(result); // Must decode before pop. PopLocalReferences(saved_local_ref_cookie, self); @@ -99,7 +99,7 @@ extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_re extern Object* JniMethodEndWithReferenceSynchronized(jobject result, uint32_t saved_local_ref_cookie, jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { self->TransitionFromSuspendedToRunnable(); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. Object* o = self->DecodeJObject(result); @@ -123,7 +123,7 @@ static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) { } extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ DCHECK(Thread::Current() == self); // TODO: this code is specific to ARM // On entry the stack pointed by sp is: diff --git a/src/oat/runtime/support_proxy.cc b/src/oat/runtime/support_proxy.cc index c22448479d..8c40aba147 100644 --- a/src/oat/runtime/support_proxy.cc +++ b/src/oat/runtime/support_proxy.cc @@ -48,7 +48,7 @@ namespace art { // the invocation handler which is a field within the proxy object receiver. extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver, Thread* self, byte* stack_args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Register the top of the managed stack Method** proxy_sp = reinterpret_cast<Method**>(stack_args - SP_OFFSET_IN_BYTES); DCHECK_EQ(*proxy_sp, proxy_method); diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc index 510df3b3dc..41de9f7e0b 100644 --- a/src/oat/runtime/support_stubs.cc +++ b/src/oat/runtime/support_stubs.cc @@ -34,7 +34,7 @@ namespace art { // Lazily resolve a method. Called by stub code. const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp, Thread* thread, Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__arm__) // On entry the stack pointed by sp is: // | argN | | @@ -230,7 +230,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp #else // ART_USE_LLVM_COMPILER const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** called_addr, Thread* thread, Runtime::TrampolineType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t dex_pc; Method* caller = thread->GetCurrentMethod(&dex_pc); @@ -323,7 +323,7 @@ const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** ca #if !defined(ART_USE_LLVM_COMPILER) // Called by the AbstractMethodError. Called by stub code. extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;", "abstract method \"%s\"", PrettyMethod(method).c_str()); @@ -331,7 +331,7 @@ extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Met } #else // ART_USE_LLVM_COMPILER extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method**) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { thread->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;", "abstract method \"%s\"", PrettyMethod(method).c_str()); } diff --git a/src/oat/runtime/support_thread.cc b/src/oat/runtime/support_thread.cc index 32284bbccc..20fe3e590d 100644 --- a/src/oat/runtime/support_thread.cc +++ b/src/oat/runtime/support_thread.cc @@ -21,14 +21,14 @@ namespace art { void CheckSuspendFromCode(Thread* thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when thread->suspend_count_ != 0 on JNI return. JNI method acts as callee-save frame. thread->VerifyStack(); thread->FullSuspendCheck(); } extern "C" void artTestSuspendFromCode(Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Called when suspend count check value is 0 and thread->suspend_count_ != 0 FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly); thread->FullSuspendCheck(); diff --git a/src/oat/runtime/support_throw.cc b/src/oat/runtime/support_throw.cc index 0abdb0459f..6e1e1010ce 100644 --- a/src/oat/runtime/support_throw.cc +++ b/src/oat/runtime/support_throw.cc @@ -24,14 +24,14 @@ namespace art { // Deliver an exception that's pending on thread helping set up a callee save frame on the way. extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->DeliverException(); } // Called by generated call to throw an exception. extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { /* * exception may be NULL, in which case this routine should * throw NPE. NOTE: this is a convenience for generated code, @@ -50,7 +50,7 @@ extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread // Called by generated call to throw a NPE exception. extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); uint32_t dex_pc; Method* throw_method = self->GetCurrentMethod(&dex_pc); @@ -60,7 +60,7 @@ extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp) // Called by generated call to throw an arithmetic divide by zero exception. extern "C" void artThrowDivZeroFromCode(Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero"); thread->DeliverException(); @@ -68,7 +68,7 @@ extern "C" void artThrowDivZeroFromCode(Thread* thread, Method** sp) // Called by generated call to throw an array index out of bounds exception. extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); thread->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "length=%d; index=%d", limit, index); @@ -76,7 +76,7 @@ extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread } extern "C" void artThrowStackOverflowFromCode(Thread* thread, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); // Remove extra entry pushed onto second stack during method tracing. if (Runtime::Current()->IsMethodTracingActive()) { @@ -90,7 +90,7 @@ extern "C" void artThrowStackOverflowFromCode(Thread* thread, Method** sp) } extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, Method** sp) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); Method* method = self->GetCurrentMethod(); ThrowNoSuchMethodError(method_idx, method); diff --git a/src/oat_test.cc b/src/oat_test.cc index 288854bd8a..0886864a8a 100644 --- a/src/oat_test.cc +++ b/src/oat_test.cc @@ -26,7 +26,7 @@ class OatTest : public CommonTest { void CheckMethod(Method* method, const OatFile::OatMethod& oat_method, const DexFile* dex_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const CompiledMethod* compiled_method = compiler_->GetCompiledMethod(Compiler::MethodReference(dex_file, method->GetDexMethodIndex())); diff --git a/src/oat_writer.h b/src/oat_writer.h index 200d695f98..fd8e2b2533 100644 --- a/src/oat_writer.h +++ b/src/oat_writer.h @@ -69,14 +69,14 @@ class OatWriter { uint32_t image_file_location_checksum, const std::string& image_file_location, const Compiler& compiler) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: OatWriter(const std::vector<const DexFile*>& dex_files, uint32_t image_file_location_checksum, const std::string& image_file_location, jobject class_loader, - const Compiler& compiler) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + const Compiler& compiler) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ~OatWriter(); size_t InitOatHeader(); @@ -85,20 +85,20 @@ class OatWriter { size_t InitOatClasses(size_t offset); size_t InitOatCode(size_t offset); size_t InitOatCodeDexFiles(size_t offset) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t InitOatCodeDexFile(size_t offset, size_t& oat_class_index, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t InitOatCodeClassDef(size_t offset, size_t oat_class_index, size_t class_def_index, const DexFile& dex_file, const DexFile::ClassDef& class_def) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t InitOatCodeMethod(size_t offset, size_t oat_class_index, size_t class_def_index, size_t class_def_method_index, bool is_native, InvokeType type, uint32_t method_idx, const DexFile*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool Write(File* file); bool WriteTables(File* file); diff --git a/src/oatdump.cc b/src/oatdump.cc index cde4dc6a13..e0ee14843c 100644 --- a/src/oatdump.cc +++ b/src/oatdump.cc @@ -156,7 +156,7 @@ class OatDumper { return oat_file_.GetOatHeader().GetInstructionSet(); } - const void* GetOatCode(Method* m) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const void* GetOatCode(Method* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MethodHelper mh(m); for (size_t i = 0; i < oat_dex_files_.size(); i++) { const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i]; @@ -487,7 +487,7 @@ class ImageDumper { : os_(os), image_filename_(image_filename), host_prefix_(host_prefix), image_space_(image_space), image_header_(image_header) {} - void Dump() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os_ << "MAGIC:\n"; os_ << image_header_.GetMagic() << "\n\n"; @@ -558,10 +558,10 @@ class ImageDumper { Heap* heap = Runtime::Current()->GetHeap(); const Spaces& spaces = heap->GetSpaces(); { - WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + WriterMutexLock mu(*Locks::heap_bitmap_lock_); heap->FlushAllocStack(); } - ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_); + ReaderMutexLock mu(*Locks::heap_bitmap_lock_); // TODO: C++0x auto for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) { (*cur)->GetLiveBitmap()->Walk(ImageDumper::Callback, this); @@ -585,7 +585,7 @@ class ImageDumper { private: static void PrettyObjectValue(std::string& summary, Class* type, Object* value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(type != NULL); if (value == NULL) { StringAppendF(&summary, "null %s\n", PrettyDescriptor(type).c_str()); @@ -607,7 +607,7 @@ class ImageDumper { } static void PrintField(std::string& summary, Field* field, Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { FieldHelper fh(field); const char* descriptor = fh.GetTypeDescriptor(); StringAppendF(&summary, "\t%s: ", fh.GetName()); @@ -636,7 +636,7 @@ class ImageDumper { } static void DumpFields(std::string& summary, Object* obj, Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* super = klass->GetSuperClass(); if (super != NULL) { DumpFields(summary, obj, super); @@ -655,7 +655,7 @@ class ImageDumper { } const void* GetOatCodeBegin(Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); const void* code = m->GetCode(); if (code == runtime->GetResolutionStubArray(Runtime::kStaticMethod)->GetData()) { @@ -668,7 +668,7 @@ class ImageDumper { } uint32_t GetOatCodeSize(Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const uint32_t* oat_code_begin = reinterpret_cast<const uint32_t*>(GetOatCodeBegin(m)); if (oat_code_begin == NULL) { return 0; @@ -677,7 +677,7 @@ class ImageDumper { } const void* GetOatCodeEnd(Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const uint8_t* oat_code_begin = reinterpret_cast<const uint8_t*>(GetOatCodeBegin(m)); if (oat_code_begin == NULL) { return NULL; @@ -686,7 +686,7 @@ class ImageDumper { } static void Callback(Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(obj != NULL); DCHECK(arg != NULL); ImageDumper* state = reinterpret_cast<ImageDumper*>(arg); @@ -945,7 +945,7 @@ class ImageDumper { } void DumpOutliers(std::ostream& os) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { size_t sum_of_sizes = 0; size_t sum_of_sizes_squared = 0; size_t sum_of_expansion = 0; @@ -1045,7 +1045,7 @@ class ImageDumper { os << "\n" << std::flush; } - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os << "\tart_file_bytes = " << PrettySize(file_bytes) << "\n\n" << "\tart_file_bytes = header_bytes + object_bytes + alignment_bytes\n" << StringPrintf("\theader_bytes = %8zd (%2.0f%% of art file bytes)\n" diff --git a/src/object.cc b/src/object.cc index 83994ffd59..cde7e04e48 100644 --- a/src/object.cc +++ b/src/object.cc @@ -439,7 +439,7 @@ Method* Method::FindOverriddenMethod() const { } static const void* GetOatCode(const Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); const void* code = m->GetCode(); // Peel off any method tracing trampoline. @@ -529,7 +529,7 @@ uint32_t Method::FindCatchBlock(Class* exception_type, uint32_t dex_pc) const { void Method::Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) const { if (kIsDebugBuild) { self->AssertThreadSuspensionIsAllowable(); - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(kRunnable, self->GetState()); } diff --git a/src/object.h b/src/object.h index c20c99aaab..36db13d04c 100644 --- a/src/object.h +++ b/src/object.h @@ -172,11 +172,11 @@ class MANAGED Object { void SetClass(Class* new_klass); bool InstanceOf(const Class* klass) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - size_t SizeOf() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + size_t SizeOf() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Object* Clone() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Object* Clone() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t IdentityHashCode() const { #ifdef MOVING_GARBAGE_COLLECTOR @@ -199,20 +199,20 @@ class MANAGED Object { uint32_t GetThinLockId(); - void MonitorEnter(Thread* thread) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + void MonitorEnter(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCK_FUNCTION(monitor_lock_); - bool MonitorExit(Thread* thread) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + bool MonitorExit(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) UNLOCK_FUNCTION(monitor_lock_); - void Notify() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void NotifyAll() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Wait(int64_t timeout) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Wait(int64_t timeout) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Wait(int64_t timeout, int32_t nanos) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsClass() const; @@ -260,14 +260,14 @@ class MANAGED Object { return down_cast<const Method*>(this); } - bool IsField() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Field* AsField() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Field* AsField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(IsField()); return down_cast<Field*>(this); } - const Field* AsField() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const Field* AsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(IsField()); return down_cast<const Field*>(this); } @@ -424,55 +424,55 @@ class MANAGED Field : public Object { // field access, null object for static fields bool GetBoolean(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetBoolean(Object* object, bool z) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int8_t GetByte(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetByte(Object* object, int8_t b) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint16_t GetChar(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetChar(Object* object, uint16_t c) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int16_t GetShort(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetShort(Object* object, int16_t s) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t GetInt(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetInt(Object* object, int32_t i) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int64_t GetLong(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetLong(Object* object, int64_t j) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); float GetFloat(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetFloat(Object* object, float f) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); double GetDouble(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetDouble(Object* object, double d) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Object* GetObject(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetObject(Object* object, const Object* l) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // raw field accesses uint32_t Get32(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Set32(Object* object, uint32_t new_value) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint64_t Get64(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Set64(Object* object, uint64_t new_value) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Object* GetObj(const Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetObj(Object* object, const Object* new_value) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Class* GetJavaLangReflectField() { DCHECK(java_lang_reflect_Field_ != NULL); @@ -652,10 +652,10 @@ class MANAGED Method : public Object { void SetDexCacheInitializedStaticStorage(ObjectArray<StaticStorageBase>* new_value); // Find the method that this method overrides - Method* FindOverriddenMethod() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Method* FindOverriddenMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const void* GetCode() const { return GetFieldPtr<const void*>(OFFSET_OF_OBJECT_MEMBER(Method, code_), false); @@ -665,7 +665,7 @@ class MANAGED Method : public Object { SetFieldPtr<const void*>(OFFSET_OF_OBJECT_MEMBER(Method, code_), code, false); } - uint32_t GetCodeSize() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + uint32_t GetCodeSize() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this); uintptr_t code = reinterpret_cast<uintptr_t>(GetCode()); if (code == 0) { @@ -677,7 +677,7 @@ class MANAGED Method : public Object { } bool IsWithinCode(uintptr_t pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uintptr_t code = reinterpret_cast<uintptr_t>(GetCode()); if (code == 0) { return pc == 0; @@ -686,7 +686,7 @@ class MANAGED Method : public Object { } void AssertPcIsWithinCode(uintptr_t pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint32_t GetOatCodeOffset() const { DCHECK(!Runtime::Current()->IsStarted()); @@ -811,9 +811,9 @@ class MANAGED Method : public Object { bool IsRegistered() const; void RegisterNative(Thread* self, const void* native_method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static MemberOffset NativeMethodOffset() { return OFFSET_OF_OBJECT_MEMBER(Method, native_method_); @@ -914,16 +914,16 @@ class MANAGED Method : public Object { // Converts a native PC to a dex PC. TODO: this is a no-op // until we associate a PC mapping table with each method. uint32_t ToDexPC(const uintptr_t pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Converts a dex PC to a native PC. TODO: this is a no-op // until we associate a PC mapping table with each method. uintptr_t ToNativePC(const uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Find the catch block for the given exception type and dex_pc uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetClasses(Class* java_lang_reflect_Constructor, Class* java_lang_reflect_Method); @@ -1014,10 +1014,10 @@ class MANAGED Array : public Object { // A convenience for code that doesn't know the component size, // and doesn't want to have to work it out itself. static Array* Alloc(Class* array_class, int32_t component_count) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Array* Alloc(Class* array_class, int32_t component_count, size_t component_size) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t SizeOf() const; @@ -1055,7 +1055,7 @@ class MANAGED Array : public Object { protected: bool IsValidIndex(int32_t index) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(index < 0 || index >= length_)) { return ThrowArrayIndexOutOfBoundsException(index); } @@ -1064,9 +1064,9 @@ class MANAGED Array : public Object { protected: bool ThrowArrayIndexOutOfBoundsException(int32_t index) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool ThrowArrayStoreException(Object* object) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: // The number of array elements. @@ -1081,26 +1081,26 @@ template<class T> class MANAGED ObjectArray : public Array { public: static ObjectArray<T>* Alloc(Class* object_array_class, int32_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - T* Get(int32_t i) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + T* Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Set element without bound and element type checks, to be used in limited // circumstances, such as during boot image writing void SetWithoutChecks(int32_t i, T* object) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - T* GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + T* GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void Copy(const ObjectArray<T>* src, int src_pos, ObjectArray<T>* dst, int dst_pos, size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ObjectArray<T>* CopyOf(int32_t new_length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectArray); @@ -1191,7 +1191,7 @@ class MANAGED Class : public StaticStorageBase { return static_cast<Status>(GetField32(OFFSET_OF_OBJECT_MEMBER(Class, status_), false)); } - void SetStatus(Status new_status) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetStatus(Status new_status) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true if the class has failed to link. bool IsErroneous() const { @@ -1302,7 +1302,7 @@ class MANAGED Class : public StaticStorageBase { String* GetName() const; // Returns the cached name void SetName(String* name); // Sets the cached name String* ComputeName() // Computes the name, then sets the cached value - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsProxyClass() const { // Read access flags without using getter as whether something is a proxy can be check in @@ -1381,7 +1381,7 @@ class MANAGED Class : public StaticStorageBase { bool IsStringClass() const; - bool IsThrowableClass() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsThrowableClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Class* GetComponentType() const { return GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(Class, component_type_), false); @@ -1405,7 +1405,7 @@ class MANAGED Class : public StaticStorageBase { } // Creates a raw object instance but does not invoke the default constructor. - Object* AllocObject() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Object* AllocObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsVariableSize() const { // Classes and arrays vary in size, and so the object_size_ field cannot @@ -1424,9 +1424,9 @@ class MANAGED Class : public StaticStorageBase { } void SetClassSize(size_t new_class_size) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - size_t GetObjectSize() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t GetObjectSize() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(!IsVariableSize()) << " class=" << PrettyTypeOf(this); DCHECK_EQ(sizeof(size_t), sizeof(int32_t)); size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), false); @@ -1442,12 +1442,12 @@ class MANAGED Class : public StaticStorageBase { // Returns true if this class is in the same packages as that class. bool IsInSamePackage(const Class* that) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2); // Returns true if this class can access that class. - bool CanAccess(Class* that) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool CanAccess(Class* that) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return that->IsPublic() || this->IsInSamePackage(that); } @@ -1455,7 +1455,7 @@ class MANAGED Class : public StaticStorageBase { // Note that access to the class isn't checked in case the declaring class is protected and the // method has been exposed by a public sub-class bool CanAccessMember(Class* access_to, uint32_t member_flags) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Classes can access all of their own members if (this == access_to) { return true; @@ -1479,7 +1479,7 @@ class MANAGED Class : public StaticStorageBase { } bool IsSubClass(const Class* klass) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can src be assigned to this class? For example, String can be assigned to Object (by an // upcast), however, an Object cannot be assigned to a String as a potentially exception throwing @@ -1487,7 +1487,7 @@ class MANAGED Class : public StaticStorageBase { // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign // to themselves. Classes for primitive types may not assign to each other. bool IsAssignableFrom(const Class* src) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(src != NULL); if (this == src) { // Can always assign to things of the same type @@ -1543,7 +1543,7 @@ class MANAGED Class : public StaticStorageBase { }; void DumpClass(std::ostream& os, int flags) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); DexCache* GetDexCache() const; @@ -1564,12 +1564,12 @@ class MANAGED Class : public StaticStorageBase { } Method* GetDirectMethod(int32_t i) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetDirectMethods()->Get(i); } void SetDirectMethod(uint32_t i, Method* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ ObjectArray<Method>* direct_methods = GetFieldObject<ObjectArray<Method>*>( OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false); @@ -1601,19 +1601,19 @@ class MANAGED Class : public StaticStorageBase { } Method* GetVirtualMethod(uint32_t i) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(IsResolved() || IsErroneous()); return GetVirtualMethods()->Get(i); } Method* GetVirtualMethodDuringLinking(uint32_t i) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(IsLoaded() || IsErroneous()); return GetVirtualMethods()->Get(i); } void SetVirtualMethod(uint32_t i, Method* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray<Method>* virtual_methods = GetFieldObject<ObjectArray<Method>*>( OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false); @@ -1642,7 +1642,7 @@ class MANAGED Class : public StaticStorageBase { // super class, return the specific implementation // method for this class. Method* FindVirtualMethodForVirtual(Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(!method->GetDeclaringClass()->IsInterface()); // The argument method may from a super class. // Use the index to a potentially overridden one for this instance's class. @@ -1653,16 +1653,16 @@ class MANAGED Class : public StaticStorageBase { // super class or interface, return the specific implementation // method for this class. Method* FindVirtualMethodForInterface(Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindInterfaceMethod(const StringPiece& name, const StringPiece& descriptor) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindVirtualMethodForVirtualOrInterface(Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (method->IsDirect()) { return method; } @@ -1673,28 +1673,28 @@ class MANAGED Class : public StaticStorageBase { } Method* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindVirtualMethod(const StringPiece& name, const StringPiece& descriptor) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindDirectMethod(const StringPiece& name, const StringPiece& signature) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t GetIfTableCount() const { ObjectArray<InterfaceEntry>* iftable = GetIfTable(); @@ -1731,13 +1731,13 @@ class MANAGED Class : public StaticStorageBase { } Field* GetInstanceField(uint32_t i) const // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ DCHECK_NE(NumInstanceFields(), 0U); return GetIFields()->Get(i); } void SetInstanceField(uint32_t i, Field* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ ObjectArray<Field>* ifields= GetFieldObject<ObjectArray<Field>*>( OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false); ifields->Set(i, f); @@ -1808,12 +1808,12 @@ class MANAGED Class : public StaticStorageBase { } Field* GetStaticField(uint32_t i) const // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetSFields()->Get(i); } void SetStaticField(uint32_t i, Field* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray<Field>* sfields= GetFieldObject<ObjectArray<Field>*>( OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false); sfields->Set(i, f); @@ -1827,37 +1827,37 @@ class MANAGED Class : public StaticStorageBase { // Find a static or instance field using the JLS resolution order Field* FindField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the given instance field in this class or a superclass. Field* FindInstanceField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the given instance field in this class or a superclass, only searches classes that // have the same dex cache. Field* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Field* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Field* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the given static field in this class or a superclass. Field* FindStaticField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the given static field in this class or superclass, only searches classes that // have the same dex cache. Field* FindStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Field* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Field* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); pid_t GetClinitThreadId() const { DCHECK(IsIdxLoaded() || IsErroneous()); @@ -1883,17 +1883,17 @@ class MANAGED Class : public StaticStorageBase { private: void SetVerifyErrorClass(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(klass != NULL) << PrettyClass(this); SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass, false); } bool Implements(const Class* klass) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsArrayAssignableFromArray(const Class* klass) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsAssignableFromArray(const Class* klass) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // defining class loader, or NULL for the "bootstrap" system loader ClassLoader* class_loader_; @@ -2233,7 +2233,7 @@ class MANAGED PrimitiveArray : public Array { typedef T ElementType; static PrimitiveArray<T>* Alloc(size_t length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const T* GetData() const { intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(sizeof(T)).Int32Value(); @@ -2245,14 +2245,14 @@ class MANAGED PrimitiveArray : public Array { return reinterpret_cast<T*>(data); } - T Get(int32_t i) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + T Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!IsValidIndex(i)) { return T(0); } return GetData()[i]; } - void Set(int32_t i, T value) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Set(int32_t i, T value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // TODO: ArrayStoreException if (IsValidIndex(i)) { GetData()[i] = value; @@ -2306,9 +2306,9 @@ class MANAGED String : public Object { int32_t GetLength() const; - int32_t GetHashCode() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ComputeHashCode() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { SetHashCode(ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength())); } @@ -2316,36 +2316,36 @@ class MANAGED String : public Object { return CountUtf8Bytes(GetCharArray()->GetData() + GetOffset(), GetLength()); } - uint16_t CharAt(int32_t index) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + uint16_t CharAt(int32_t index) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - String* Intern() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static String* AllocFromUtf16(int32_t utf16_length, const uint16_t* utf16_data_in, int32_t hash_code = 0) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static String* AllocFromModifiedUtf8(const char* utf) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static String* AllocFromModifiedUtf8(int32_t utf16_length, const char* utf8_data_in) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static String* Alloc(Class* java_lang_String, int32_t utf16_length) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static String* Alloc(Class* java_lang_String, CharArray* array) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool Equals(const char* modified_utf8) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // TODO: do we need this overload? give it a more intention-revealing name. bool Equals(const StringPiece& modified_utf8) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool Equals(const String* that) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool Equals(const String* that) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Compare UTF-16 code point values not in a locale-sensitive manner int Compare(int32_t utf16_length, const char* utf8_data_in); @@ -2353,7 +2353,7 @@ class MANAGED String : public Object { // TODO: do we need this overload? give it a more intention-revealing name. bool Equals(const uint16_t* that_chars, int32_t that_offset, int32_t that_length) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Create a modified UTF-8 encoded std::string from a java/lang/String object. std::string ToModifiedUtf8() const; @@ -2471,7 +2471,7 @@ inline bool Method::CheckIncompatibleClassChange(InvokeType type) { return IsDirect() || !(methods_class->IsInterface() || methods_class->IsObjectClass()); } default: - LOG(FATAL) << "UNREACHABLE"; + LOG(FATAL) << "Unreachable - invocation type: " << type; return true; } } @@ -2511,13 +2511,13 @@ class MANAGED Throwable : public Object { String* GetDetailMessage() const { return GetFieldObject<String*>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), false); } - std::string Dump() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // This is a runtime version of initCause, you shouldn't use it if initCause may have been // overridden. Also it asserts rather than throwing exceptions. Currently this is only used // in cases like the verifier where the checks cannot fail and initCause isn't overridden. void SetCause(Throwable* cause); - bool IsCheckedException() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool IsCheckedException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Class* GetJavaLangThrowable() { DCHECK(java_lang_Throwable_ != NULL); @@ -2572,7 +2572,7 @@ class MANAGED StackTraceElement : public Object { String* method_name, String* file_name, int32_t line_number) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetClass(Class* java_lang_StackTraceElement); @@ -2598,20 +2598,20 @@ class MANAGED StackTraceElement : public Object { class MANAGED InterfaceEntry : public ObjectArray<Object> { public: - Class* GetInterface() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Class* GetInterface() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* interface = Get(kInterface)->AsClass(); DCHECK(interface != NULL); return interface; } - void SetInterface(Class* interface) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void SetInterface(Class* interface) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(interface != NULL); DCHECK(interface->IsInterface()); DCHECK(Get(kInterface) == NULL); Set(kInterface, interface); } - size_t GetMethodArrayCount() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t GetMethodArrayCount() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray<Method>* method_array = down_cast<ObjectArray<Method>*>(Get(kMethodArray)); if (method_array == NULL) { return 0; @@ -2620,14 +2620,14 @@ class MANAGED InterfaceEntry : public ObjectArray<Object> { } ObjectArray<Method>* GetMethodArray() const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray<Method>* method_array = down_cast<ObjectArray<Method>*>(Get(kMethodArray)); DCHECK(method_array != NULL); return method_array; } void SetMethodArray(ObjectArray<Method>* new_ma) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(new_ma != NULL); DCHECK(Get(kMethodArray) == NULL); Set(kMethodArray, new_ma); diff --git a/src/object_test.cc b/src/object_test.cc index e44cf64e72..081be4b2c1 100644 --- a/src/object_test.cc +++ b/src/object_test.cc @@ -35,7 +35,7 @@ class ObjectTest : public CommonTest { const char* utf8_in, const char* utf16_expected_le, int32_t expected_hash) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { UniquePtr<uint16_t[]> utf16_expected(new uint16_t[length]); for (int32_t i = 0; i < length; i++) { uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) | diff --git a/src/object_utils.h b/src/object_utils.h index d523ecc1c5..f6158f3b9e 100644 --- a/src/object_utils.h +++ b/src/object_utils.h @@ -32,25 +32,25 @@ namespace art { class ObjectLock { public: - explicit ObjectLock(Object* object) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + explicit ObjectLock(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : self_(Thread::Current()), obj_(object) { CHECK(object != NULL); obj_->MonitorEnter(self_); } - ~ObjectLock() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + ~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { obj_->MonitorExit(self_); } - void Wait() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Wait() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return Monitor::Wait(self_, obj_, 0, 0, false); } - void Notify() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { obj_->Notify(); } - void NotifyAll() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { obj_->NotifyAll(); } @@ -63,7 +63,7 @@ class ObjectLock { class ClassHelper { public: ClassHelper(const Class* c = NULL, ClassLinker* l = NULL) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_def_(NULL), class_linker_(l), dex_cache_(NULL), @@ -76,7 +76,7 @@ class ClassHelper { } void ChangeClass(const Class* new_c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(new_c != NULL) << "klass_=" << klass_; // Log what we were changing from if any CHECK(new_c->IsClass()) << "new_c=" << new_c; if (dex_cache_ != NULL) { @@ -93,7 +93,7 @@ class ClassHelper { // The returned const char* is only guaranteed to be valid for the lifetime of the ClassHelper. // If you need it longer, copy it into a std::string. - const char* GetDescriptor() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(klass_ != NULL); if (UNLIKELY(klass_->IsArrayClass())) { return GetArrayDescriptor(); @@ -109,7 +109,7 @@ class ClassHelper { } } - const char* GetArrayDescriptor() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetArrayDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string result("["); const Class* saved_klass = klass_; CHECK(saved_klass != NULL); @@ -121,7 +121,7 @@ class ClassHelper { } const DexFile::ClassDef* GetClassDef() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::ClassDef* result = class_def_; if (result == NULL) { result = GetDexFile().FindClassDef(GetDescriptor()); @@ -130,7 +130,7 @@ class ClassHelper { return result; } - uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass_ != NULL); if (klass_->IsPrimitive()) { return 0; @@ -149,7 +149,7 @@ class ClassHelper { } uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass_ != NULL); DCHECK(!klass_->IsPrimitive()); DCHECK(!klass_->IsArrayClass()); @@ -157,7 +157,7 @@ class ClassHelper { } Class* GetDirectInterface(uint32_t idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(klass_ != NULL); DCHECK(!klass_->IsPrimitive()); if (klass_->IsArrayClass()) { @@ -180,7 +180,7 @@ class ClassHelper { } } - const char* GetSourceFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string descriptor(GetDescriptor()); const DexFile& dex_file = GetDexFile(); const DexFile::ClassDef* dex_class_def = dex_file.FindClassDef(descriptor); @@ -188,7 +188,7 @@ class ClassHelper { return dex_file.GetSourceFile(*dex_class_def); } - std::string GetLocation() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + std::string GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* dex_cache = GetDexCache(); if (dex_cache != NULL && !klass_->IsProxyClass()) { return dex_cache->GetLocation()->ToModifiedUtf8(); @@ -198,7 +198,7 @@ class ClassHelper { } } - const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* result = dex_file_; if (result == NULL) { const DexCache* dex_cache = GetDexCache(); @@ -208,7 +208,7 @@ class ClassHelper { return *result; } - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* result = dex_cache_; if (result == NULL) { DCHECK(klass_ != NULL); @@ -220,7 +220,7 @@ class ClassHelper { private: const DexFile::TypeList* GetInterfaceTypeList() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::TypeList* result = interface_type_list_; if (result == NULL) { const DexFile::ClassDef* class_def = GetClassDef(); @@ -270,7 +270,7 @@ class FieldHelper { } field_ = new_f; } - const char* GetName() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t field_index = field_->GetDexFieldIndex(); if (!field_->GetDeclaringClass()->IsProxyClass()) { const DexFile& dex_file = GetDexFile(); @@ -291,7 +291,7 @@ class FieldHelper { return Runtime::Current()->GetInternTable()->InternStrong(GetName()); } } - Class* GetType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t field_index = field_->GetDexFieldIndex(); if (!field_->GetDeclaringClass()->IsProxyClass()) { const DexFile& dex_file = GetDexFile(); @@ -306,7 +306,7 @@ class FieldHelper { return GetClassLinker()->FindSystemClass(GetTypeDescriptor()); } } - const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t field_index = field_->GetDexFieldIndex(); if (!field_->GetDeclaringClass()->IsProxyClass()) { const DexFile& dex_file = GetDexFile(); @@ -320,14 +320,14 @@ class FieldHelper { } } Primitive::Type GetTypeAsPrimitiveType() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return Primitive::GetType(GetTypeDescriptor()[0]); } - bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Primitive::Type type = GetTypeAsPrimitiveType(); return type != Primitive::kPrimNot; } - size_t FieldSize() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Primitive::Type type = GetTypeAsPrimitiveType(); return Primitive::FieldSize(type); } @@ -335,7 +335,7 @@ class FieldHelper { // The returned const char* is only guaranteed to be valid for the lifetime of the FieldHelper. // If you need it longer, copy it into a std::string. const char* GetDeclaringClassDescriptor() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint16_t type_idx = field_->GetDeclaringClass()->GetDexTypeIndex(); if (type_idx != DexFile::kDexNoIndex16) { const DexFile& dex_file = GetDexFile(); @@ -349,7 +349,7 @@ class FieldHelper { } private: - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* result = dex_cache_; if (result == NULL) { result = field_->GetDeclaringClass()->GetDexCache(); @@ -365,7 +365,7 @@ class FieldHelper { } return result; } - const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* result = dex_file_; if (result == NULL) { const DexCache* dex_cache = GetDexCache(); @@ -391,20 +391,20 @@ class MethodHelper { shorty_len_(0) {} explicit MethodHelper(const Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), shorty_len_(0) { SetMethod(m); } MethodHelper(const Method* m, ClassLinker* l) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : class_linker_(l), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL), shorty_len_(0) { SetMethod(m); } - void ChangeMethod(Method* new_m) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void ChangeMethod(Method* new_m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(new_m != NULL); if (dex_cache_ != NULL) { Class* klass = new_m->GetDeclaringClass(); @@ -423,7 +423,7 @@ class MethodHelper { shorty_ = NULL; } - const char* GetName() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); if (dex_method_idx != DexFile::kDexNoIndex16) { @@ -444,15 +444,15 @@ class MethodHelper { } } - String* GetNameAsString() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + String* GetNameAsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx); return GetClassLinker()->ResolveString(dex_file, method_id.name_idx_, GetDexCache()); } - const char* GetShorty() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* result = shorty_; if (result == NULL) { const DexFile& dex_file = GetDexFile(); @@ -463,14 +463,14 @@ class MethodHelper { return result; } - uint32_t GetShortyLength() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + uint32_t GetShortyLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (shorty_ == NULL) { GetShorty(); } return shorty_len_; } - const std::string GetSignature() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const std::string GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); if (dex_method_idx != DexFile::kDexNoIndex16) { @@ -481,19 +481,19 @@ class MethodHelper { } const DexFile::ProtoId& GetPrototype() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); return dex_file.GetMethodPrototype(dex_file.GetMethodId(method_->GetDexMethodIndex())); } const DexFile::TypeList* GetParameterTypeList() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::ProtoId& proto = GetPrototype(); return GetDexFile().GetProtoParameters(proto); } ObjectArray<Class>* GetParameterTypes() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::TypeList* params = GetParameterTypeList(); Class* array_class = GetClassLinker()->FindSystemClass("[Ljava/lang/Class;"); uint32_t num_params = params == NULL ? 0 : params->Size(); @@ -509,7 +509,7 @@ class MethodHelper { return result; } - Class* GetReturnType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Class* GetReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); const DexFile::MethodId& method_id = dex_file.GetMethodId(method_->GetDexMethodIndex()); const DexFile::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id); @@ -518,7 +518,7 @@ class MethodHelper { } const char* GetReturnTypeDescriptor() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); const DexFile::MethodId& method_id = dex_file.GetMethodId(method_->GetDexMethodIndex()); const DexFile::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id); @@ -527,7 +527,7 @@ class MethodHelper { } int32_t GetLineNumFromDexPC(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (dex_pc == DexFile::kDexNoIndex) { return method_->IsNative() ? -2 : -1; } else { @@ -537,7 +537,7 @@ class MethodHelper { } const char* GetDeclaringClassDescriptor() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* klass = method_->GetDeclaringClass(); DCHECK(!klass->IsProxyClass()); uint16_t type_idx = klass->GetDexTypeIndex(); @@ -546,7 +546,7 @@ class MethodHelper { } const char* GetDeclaringClassSourceFile() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* descriptor = GetDeclaringClassDescriptor(); const DexFile& dex_file = GetDexFile(); const DexFile::ClassDef* dex_class_def = dex_file.FindClassDef(descriptor); @@ -555,7 +555,7 @@ class MethodHelper { } uint32_t GetClassDefIndex() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const char* descriptor = GetDeclaringClassDescriptor(); const DexFile& dex_file = GetDexFile(); uint32_t index; @@ -564,20 +564,20 @@ class MethodHelper { } ClassLoader* GetClassLoader() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return method_->GetDeclaringClass()->GetClassLoader(); } bool IsStatic() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return method_->IsStatic(); } - bool IsClassInitializer() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool IsClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return IsStatic() && StringPiece(GetName()) == "<clinit>"; } - size_t NumArgs() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t NumArgs() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // "1 +" because the first in Args is the receiver. // "- 1" because we don't count the return type. return (IsStatic() ? 0 : 1) + GetShortyLength() - 1; @@ -585,7 +585,7 @@ class MethodHelper { // Is the specified parameter a long or double, where parameter 0 is 'this' for instance methods bool IsParamALongOrDouble(size_t param) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty @@ -597,7 +597,7 @@ class MethodHelper { } // Is the specified parameter a reference, where parameter 0 is 'this' for instance methods - bool IsParamAReference(size_t param) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool IsParamAReference(size_t param) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty @@ -608,7 +608,7 @@ class MethodHelper { } bool HasSameNameAndSignature(MethodHelper* other) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (GetDexCache() == other->GetDexCache()) { const DexFile& dex_file = GetDexFile(); const DexFile::MethodId& mid = dex_file.GetMethodId(method_->GetDexMethodIndex()); @@ -622,17 +622,17 @@ class MethodHelper { } const DexFile::CodeItem* GetCodeItem() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetDexFile().GetCodeItem(method_->GetCodeItemOffset()); } bool IsResolvedTypeIdx(uint16_t type_idx) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return method_->GetDexCacheResolvedTypes()->Get(type_idx) != NULL; } Class* GetClassFromTypeIdx(uint16_t type_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* type = method_->GetDexCacheResolvedTypes()->Get(type_idx); if (type == NULL) { type = GetClassLinker()->ResolveType(type_idx, method_); @@ -642,17 +642,17 @@ class MethodHelper { } const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); return dex_file.GetTypeDescriptor(dex_file.GetTypeId(type_idx)); } Class* GetDexCacheResolvedType(uint16_t type_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetDexCache()->GetResolvedType(type_idx); } - const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile* result = dex_file_; if (result == NULL) { const DexCache* dex_cache = GetDexCache(); @@ -662,7 +662,7 @@ class MethodHelper { return *result; } - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DexCache* result = dex_cache_; if (result == NULL) { Class* klass = method_->GetDeclaringClass(); @@ -676,7 +676,7 @@ class MethodHelper { // Set the method_ field, for proxy methods looking up the interface method via the resolved // methods table. void SetMethod(const Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (method != NULL) { Class* klass = method->GetDeclaringClass(); if (klass->IsProxyClass()) { diff --git a/src/reference_table.cc b/src/reference_table.cc index a2b54d6a14..1f6cab7446 100644 --- a/src/reference_table.cc +++ b/src/reference_table.cc @@ -63,7 +63,7 @@ struct ObjectComparator { bool operator()(const Object* obj1, const Object* obj2) // TODO: enable analysis when analysis can work with the STL. NO_THREAD_SAFETY_ANALYSIS { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + Locks::mutator_lock_->AssertSharedHeld(); // Ensure null references and cleared jweaks appear at the end. if (obj1 == NULL) { return true; @@ -100,7 +100,7 @@ struct ObjectComparator { // or equivalent to the original. static void DumpSummaryLine(std::ostream& os, const Object* obj, size_t element_count, int identical, int equiv) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (obj == NULL) { os << " NULL reference (count=" << equiv << ")\n"; return; diff --git a/src/reference_table.h b/src/reference_table.h index de9d45d31e..f398eb2a44 100644 --- a/src/reference_table.h +++ b/src/reference_table.h @@ -43,14 +43,14 @@ class ReferenceTable { size_t Size() const; - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void VisitRoots(Heap::RootVisitor* visitor, void* arg); private: typedef std::vector<const Object*> Table; static void Dump(std::ostream& os, const Table& entries) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); friend class IndirectReferenceTable; // For Dump. std::string name_; diff --git a/src/reflection.cc b/src/reflection.cc index 542f1a2e44..7f1d4d1b30 100644 --- a/src/reflection.cc +++ b/src/reflection.cc @@ -235,7 +235,7 @@ void BoxPrimitive(Primitive::Type src_class, JValue& value) { } if (kIsDebugBuild) { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(Thread::Current()->GetState(), kRunnable); } ScopedObjectAccessUnchecked soa(Thread::Current()); @@ -244,7 +244,7 @@ void BoxPrimitive(Primitive::Type src_class, JValue& value) { } static std::string UnboxingFailureKind(Method* m, int index, Field* f) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (m != NULL && index != -1) { ++index; // Humans count from 1. return StringPrintf("method %s argument %d", PrettyMethod(m, false).c_str(), index); @@ -257,7 +257,7 @@ static std::string UnboxingFailureKind(Method* m, int index, Field* f) static bool UnboxPrimitive(Object* o, Class* dst_class, JValue& unboxed_value, Method* m, int index, Field* f) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!dst_class->IsPrimitive()) { if (o != NULL && !o->InstanceOf(dst_class)) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;", diff --git a/src/reflection.h b/src/reflection.h index 2979e5bdee..93219f8afc 100644 --- a/src/reflection.h +++ b/src/reflection.h @@ -30,24 +30,24 @@ class Object; class ScopedObjectAccess; void BoxPrimitive(Primitive::Type src_class, JValue& value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool UnboxPrimitiveForArgument(Object* o, Class* dst_class, JValue& unboxed_value, Method* m, size_t index) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool UnboxPrimitiveForField(Object* o, Class* dst_class, JValue& unboxed_value, Field* f) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool UnboxPrimitiveForResult(Object* o, Class* dst_class, JValue& unboxed_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool ConvertPrimitiveValue(Primitive::Type src_class, Primitive::Type dst_class, const JValue& src, JValue& dst) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); jobject InvokeMethod(const ScopedObjectAccess& soa, jobject method, jobject receiver, jobject args) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool VerifyObjectInClass(Object* o, Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } // namespace art diff --git a/src/runtime.cc b/src/runtime.cc index 89477b0484..f37788da98 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -175,7 +175,7 @@ struct AbortState { void Runtime::Abort() { // Ensure that we don't have multiple threads trying to abort at once, // which would result in significantly worse diagnostics. - MutexLock mu(*GlobalSynchronization::abort_lock_); + MutexLock mu(*Locks::abort_lock_); // Get any pending output out of the way. fflush(NULL); @@ -536,7 +536,7 @@ bool Runtime::Create(const Options& options, bool ignore_unrecognized) { if (Runtime::instance_ != NULL) { return false; } - GlobalSynchronization::Init(); + Locks::Init(); instance_ = new Runtime; if (!instance_->Init(options, ignore_unrecognized)) { delete instance_; @@ -642,7 +642,7 @@ void Runtime::StartDaemonThreads() { // Must be in the kNative state for calling native methods. { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(self->GetState(), kNative); } @@ -742,7 +742,7 @@ void Runtime::InitNativeMethods() { // Must be in the kNative state for calling native methods (JNI_OnLoad code). { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_EQ(self->GetState(), kNative); } @@ -831,7 +831,7 @@ void Runtime::DumpForSigQuit(std::ostream& os) { } void Runtime::DumpLockHolders(std::ostream& os) { - uint64_t mutator_lock_owner = GlobalSynchronization::mutator_lock_->GetExclusiveOwnerTid(); + uint64_t mutator_lock_owner = Locks::mutator_lock_->GetExclusiveOwnerTid(); pid_t thread_list_lock_owner = GetThreadList()->GetLockOwner(); pid_t classes_lock_owner = GetClassLinker()->GetClassesLockOwner(); pid_t dex_lock_owner = GetClassLinker()->GetDexLockOwner(); diff --git a/src/runtime.h b/src/runtime.h index f61399163d..4dcefb50d1 100644 --- a/src/runtime.h +++ b/src/runtime.h @@ -97,7 +97,7 @@ class Runtime { // Creates and initializes a new runtime. static bool Create(const Options& options, bool ignore_unrecognized) - SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_); + SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); bool IsCompiler() const { return is_compiler_; @@ -117,7 +117,7 @@ class Runtime { } // Starts a runtime, which may cause threads to be started and code to run. - void Start() UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_); + void Start() UNLOCK_FUNCTION(Locks::mutator_lock_); bool IsShuttingDown() const { return shutting_down_; @@ -140,7 +140,7 @@ class Runtime { // This isn't marked ((noreturn)) because then gcc will merge multiple calls // in a single function together. This reduces code size slightly, but means // that the native stack trace we get may point at the wrong call site. - static void Abort() LOCKS_EXCLUDED(GlobalSynchronization::abort_lock_); + static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_); // Returns the "main" ThreadGroup, used when attaching user threads. jobject GetMainThreadGroup() const; @@ -154,10 +154,10 @@ class Runtime { void CallExitHook(jint status); // Detaches the current native thread from the runtime. - void DetachCurrentThread() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpLockHolders(std::ostream& os); ~Runtime(); @@ -211,7 +211,7 @@ class Runtime { } void VisitRoots(Heap::RootVisitor* visitor, void* arg) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasJniDlsymLookupStub() const { return jni_stub_array_ != NULL; @@ -267,7 +267,7 @@ class Runtime { resolution_method_ = method; } - Method* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Method* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a special method that describes all callee saves being spilled to the stack. enum CalleeSaveType { @@ -289,13 +289,13 @@ class Runtime { void SetCalleeSaveMethod(Method* method, CalleeSaveType type); Method* CreateCalleeSaveMethod(InstructionSet instruction_set, CalleeSaveType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t GetStat(int kind); @@ -341,8 +341,8 @@ class Runtime { void BlockSignals(); bool Init(const Options& options, bool ignore_unrecognized) - SHARED_TRYLOCK_FUNCTION(true, GlobalSynchronization::mutator_lock_); - void InitNativeMethods() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_); + SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); + void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_); void InitThreadGroups(Thread* self); void RegisterRuntimeNativeMethods(JNIEnv* env); diff --git a/src/runtime_linux.cc b/src/runtime_linux.cc index 8365079cb6..e5033585c1 100644 --- a/src/runtime_linux.cc +++ b/src/runtime_linux.cc @@ -226,7 +226,7 @@ struct UContext { }; static void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) { - MutexLock mu(*GlobalSynchronization::unexpected_signal_lock_); + MutexLock mu(*Locks::unexpected_signal_lock_); bool has_address = (signal_number == SIGILL || signal_number == SIGBUS || signal_number == SIGFPE || signal_number == SIGSEGV); diff --git a/src/runtime_support.h b/src/runtime_support.h index 8f6b6558ea..16f0d2ec6e 100644 --- a/src/runtime_support.h +++ b/src/runtime_support.h @@ -50,7 +50,7 @@ class Object; // check. static inline Object* AllocObjectFromCode(uint32_t type_idx, Method* method, Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); Runtime* runtime = Runtime::Current(); if (UNLIKELY(klass == NULL)) { @@ -85,7 +85,7 @@ static inline Object* AllocObjectFromCode(uint32_t type_idx, Method* method, Thr // check. static inline Array* AllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, bool access_check) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(component_count < 0)) { Thread::Current()->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", component_count); @@ -112,7 +112,7 @@ static inline Array* AllocArrayFromCode(uint32_t type_idx, Method* method, int32 extern Array* CheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Type of find field operation for fast and slow case. enum FindFieldType { @@ -129,12 +129,12 @@ enum FindFieldType { // Slow field find that can initialize classes and may throw exceptions. extern Field* FindFieldFromCode(uint32_t field_idx, const Method* referrer, Thread* self, FindFieldType type, size_t expected_size) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Fast path field resolution that can't initialize classes or throw exceptions. static inline Field* FindFieldFast(uint32_t field_idx, const Method* referrer, FindFieldType type, size_t expected_size) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* resolved_field = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); if (UNLIKELY(resolved_field == NULL)) { return NULL; @@ -183,7 +183,7 @@ static inline Field* FindFieldFast(uint32_t field_idx, const Method* referrer, // Fast path method resolution that can't throw exceptions. static inline Method* FindMethodFast(uint32_t method_idx, Object* this_object, const Method* referrer, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { bool is_direct = type == kStatic || type == kDirect; if (UNLIKELY(this_object == NULL && !is_direct)) { return NULL; @@ -223,20 +223,20 @@ static inline Method* FindMethodFast(uint32_t method_idx, Object* this_object, extern Method* FindMethodFromCode(uint32_t method_idx, Object* this_object, const Method* referrer, Thread* self, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); extern Class* ResolveVerifyAndClinit(uint32_t type_idx, const Method* referrer, Thread* self, bool can_run_clinit, bool verify_access) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static inline String* ResolveStringFromCode(const Method* referrer, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); return class_linker->ResolveString(string_idx, referrer); } static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) UNLOCK_FUNCTION(monitor_lock_) { // Save any pending exception over monitor exit call. Throwable* saved_exception = NULL; @@ -259,7 +259,7 @@ static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) } static inline void CheckReferenceResult(Object* o, Thread* self) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (o == NULL) { return; } diff --git a/src/scoped_thread_state_change.h b/src/scoped_thread_state_change.h index 745e2d6803..ed3c384590 100644 --- a/src/scoped_thread_state_change.h +++ b/src/scoped_thread_state_change.h @@ -29,7 +29,7 @@ namespace art { class ScopedThreadStateChange { public: ScopedThreadStateChange(Thread* self, ThreadState new_thread_state) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) { if (self_ == NULL) { // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL. @@ -38,7 +38,7 @@ class ScopedThreadStateChange { } else { bool runnable_transition; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); old_thread_state_ = self->GetState(); runnable_transition = old_thread_state_ == kRunnable || new_thread_state == kRunnable; if (!runnable_transition) { @@ -56,7 +56,7 @@ class ScopedThreadStateChange { } } - ~ScopedThreadStateChange() LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) { + ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) { if (self_ == NULL) { if (!expected_has_no_thread_) { CHECK(Runtime::Current()->IsShuttingDown()); @@ -68,7 +68,7 @@ class ScopedThreadStateChange { } else if (thread_state_ == kRunnable) { self_->TransitionFromRunnableToSuspended(old_thread_state_); } else { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); self_->SetState(old_thread_state_); } } @@ -112,14 +112,14 @@ class ScopedThreadStateChange { class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { public: explicit ScopedObjectAccessUnchecked(JNIEnv* env) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) : ScopedThreadStateChange(ThreadForEnv(env), kRunnable), env_(reinterpret_cast<JNIEnvExt*>(env)), vm_(env_->vm) { self_->VerifyStack(); } explicit ScopedObjectAccessUnchecked(Thread* self) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) : ScopedThreadStateChange(self, kRunnable), env_(reinterpret_cast<JNIEnvExt*>(self->GetJniEnv())), vm_(env_ != NULL ? env_->vm : NULL) { @@ -158,7 +158,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { */ template<typename T> T AddLocalReference(Object* obj) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. if (obj == NULL) { return NULL; @@ -195,8 +195,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { T Decode(jobject obj) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertSharedHeld(); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. return down_cast<T>(Self()->DecodeJObject(obj)); } @@ -204,8 +204,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { Field* DecodeField(jfieldID fid) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertSharedHeld(); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. #ifdef MOVING_GARBAGE_COLLECTOR // TODO: we should make these unique weak globals if Field instances can ever move. @@ -217,8 +217,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { jfieldID EncodeField(Field* field) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertSharedHeld(); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(WARNING); @@ -229,8 +229,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { Method* DecodeMethod(jmethodID mid) const LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertSharedHeld(); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. #ifdef MOVING_GARBAGE_COLLECTOR // TODO: we should make these unique weak globals if Method instances can ever move. @@ -240,8 +240,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { } jmethodID EncodeMethod(Method* method) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Locks::mutator_lock_->AssertSharedHeld(); DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states. #ifdef MOVING_GARBAGE_COLLECTOR UNIMPLEMENTED(WARNING); @@ -281,20 +281,20 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange { class ScopedObjectAccess : public ScopedObjectAccessUnchecked { public: explicit ScopedObjectAccess(JNIEnv* env) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : ScopedObjectAccessUnchecked(env) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + Locks::mutator_lock_->AssertSharedHeld(); } explicit ScopedObjectAccess(Thread* self) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : ScopedObjectAccessUnchecked(self) { - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + Locks::mutator_lock_->AssertSharedHeld(); } - ~ScopedObjectAccess() UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) { + ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) { // Base class will release share of lock. Invoked after this destructor. } @@ -303,7 +303,7 @@ class ScopedObjectAccess : public ScopedObjectAccessUnchecked { // routines operating with just a VM are sound, they are not, but when you have just a VM // you cannot call the unsound routines. explicit ScopedObjectAccess(JavaVM* vm) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) : ScopedObjectAccessUnchecked(vm) {} friend class ScopedCheck; diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc index 156aec6a63..229edf6278 100644 --- a/src/signal_catcher.cc +++ b/src/signal_catcher.cc @@ -122,12 +122,12 @@ void SignalCatcher::HandleSigQuit() { // We should exclusively hold the mutator lock, set state to Runnable without a pending // suspension to avoid giving away or trying re-acquire the mutator lock. - GlobalSynchronization::mutator_lock_->AssertExclusiveHeld(); + Locks::mutator_lock_->AssertExclusiveHeld(); Thread* self = Thread::Current(); ThreadState old_state; int suspend_count; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); suspend_count = self->GetSuspendCount(); if (suspend_count != 0) { CHECK_EQ(suspend_count, 1); @@ -155,7 +155,7 @@ void SignalCatcher::HandleSigQuit() { os << "----- end " << getpid() << " -----\n"; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); self->SetState(old_state); if (suspend_count != 0) { self->ModifySuspendCount(+1, false); diff --git a/src/signal_catcher.h b/src/signal_catcher.h index 11a2c09382..e8ac812a17 100644 --- a/src/signal_catcher.h +++ b/src/signal_catcher.h @@ -35,9 +35,9 @@ class SignalCatcher { explicit SignalCatcher(const std::string& stack_trace_file); ~SignalCatcher(); - void HandleSigQuit() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + void HandleSigQuit() LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); private: diff --git a/src/space.h b/src/space.h index 79d5ad44e3..3132911c1f 100644 --- a/src/space.h +++ b/src/space.h @@ -53,7 +53,7 @@ class Space { // create a Space from an image file. cannot be used for future allocation or collected. static ImageSpace* CreateImageSpace(const std::string& image) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); virtual ~Space() {} @@ -282,7 +282,7 @@ class ImageSpace : public Space { // Mark the objects defined in this space in the given live bitmap void RecordImageAllocations(SpaceBitmap* live_bitmap) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); virtual bool IsAllocSpace() const { return false; diff --git a/src/space_bitmap.cc b/src/space_bitmap.cc index 439e637069..7a4c48dacf 100644 --- a/src/space_bitmap.cc +++ b/src/space_bitmap.cc @@ -174,7 +174,7 @@ static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callb // class. static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj, Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Visit fields of parent classes first. Class* super = klass->GetSuperClass(); if (super != NULL) { @@ -199,7 +199,7 @@ static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* call // For an unvisited object, visit it then all its children found via fields. static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (visited->Test(obj)) { return; } diff --git a/src/space_bitmap.h b/src/space_bitmap.h index db1a5eb326..02f003489f 100644 --- a/src/space_bitmap.h +++ b/src/space_bitmap.h @@ -109,7 +109,7 @@ class SpaceBitmap { template <typename Visitor, typename FingerVisitor> void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor, const FingerVisitor& finger_visitor) const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { DCHECK_LT(visit_begin, visit_end); const size_t word_span = kAlignment * kBitsPerWord; // Equals IndexToOffset(1). @@ -177,11 +177,11 @@ class SpaceBitmap { } void Walk(Callback* callback, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); void InOrderWalk(Callback* callback, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SweepWalk(const SpaceBitmap& live, const SpaceBitmap& mark, diff --git a/src/stack.h b/src/stack.h index 578c631141..60218b16a1 100644 --- a/src/stack.h +++ b/src/stack.h @@ -215,7 +215,7 @@ class StackVisitor { protected: StackVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack, Context* context) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : stack_start_(stack), trace_stack_(trace_stack), cur_shadow_frame_(NULL), cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0), context_(context) {} @@ -224,10 +224,10 @@ class StackVisitor { virtual ~StackVisitor() {} // Return 'true' if we should continue to visit more frames, 'false' to stop. - virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) = 0; + virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; void WalkStack(bool include_transitions = false) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Method* GetMethod() const { if (cur_shadow_frame_ != NULL) { @@ -255,19 +255,19 @@ class StackVisitor { return *reinterpret_cast<uintptr_t*>(save_addr); } - uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the height of the stack in the managed stack frames, including transitions. - size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetNumFrames() - cur_depth_; } // Returns a frame ID for JDWP use, starting from 1. - size_t GetFrameId() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetFrameHeight() + 1; } - size_t GetNumFrames() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (num_frames_ == 0) { num_frames_ = ComputeNumFrames(); } @@ -275,10 +275,10 @@ class StackVisitor { } uint32_t GetVReg(Method* m, int vreg) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetVReg(Method* m, int vreg, uint32_t new_value) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uintptr_t GetGPR(uint32_t reg) const; @@ -370,13 +370,13 @@ class StackVisitor { } private: - size_t ComputeNumFrames() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + size_t ComputeNumFrames() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); TraceStackFrame GetTraceStackFrame(uint32_t depth) const { return trace_stack_->at(trace_stack_->size() - depth - 1); } - void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const ManagedStack* const stack_start_; const std::vector<TraceStackFrame>* const trace_stack_; diff --git a/src/thread.cc b/src/thread.cc index c2bb1a63c1..57f99055d0 100644 --- a/src/thread.cc +++ b/src/thread.cc @@ -127,7 +127,7 @@ void* Thread::CreateCallback(void* arg) { static void SetVmData(const ScopedObjectAccess& soa, Object* managed_thread, Thread* native_thread) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData); f->SetInt(managed_thread, reinterpret_cast<uintptr_t>(native_thread)); } @@ -137,9 +137,9 @@ Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, Object Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetInt(thread_peer))); // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ // to stop it from going away. - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); if (result != NULL && !result->IsSuspended()) { - GlobalSynchronization::thread_list_lock_->AssertHeld(); + Locks::thread_list_lock_->AssertHeld(); } return result; } @@ -285,7 +285,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g self->Init(); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_NE(self->GetState(), kRunnable); self->SetState(kNative); } @@ -444,13 +444,13 @@ void Thread::GetThreadName(std::string& name) const { // Attempt to rectify locks so that we dump thread list with required locks before exiting. static void UnsafeLogFatalForSuspendCount(Thread* self) NO_THREAD_SAFETY_ANALYSIS { - GlobalSynchronization::thread_suspend_count_lock_->Unlock(); - GlobalSynchronization::mutator_lock_->SharedTryLock(); - if (!GlobalSynchronization::mutator_lock_->IsSharedHeld()) { + Locks::thread_suspend_count_lock_->Unlock(); + Locks::mutator_lock_->SharedTryLock(); + if (!Locks::mutator_lock_->IsSharedHeld()) { LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; } - GlobalSynchronization::thread_list_lock_->TryLock(); - if (!GlobalSynchronization::thread_list_lock_->IsExclusiveHeld()) { + Locks::thread_list_lock_->TryLock(); + if (!Locks::thread_list_lock_->IsExclusiveHeld()) { LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; } std::ostringstream ss; @@ -462,7 +462,7 @@ void Thread::ModifySuspendCount(int delta, bool for_debugger) { DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_) << delta << " " << debug_suspend_count_ << " " << this; DCHECK_GE(suspend_count_, debug_suspend_count_) << this; - GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + Locks::thread_suspend_count_lock_->AssertHeld(); if (delta == -1 && suspend_count_ <= 0) { // This is expected if you attach a thread during a GC. @@ -494,7 +494,7 @@ void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) { ThreadState old_state = SetStateUnsafe(new_state); CHECK_EQ(old_state, kRunnable); // Release share on mutator_lock_. - GlobalSynchronization::mutator_lock_->SharedUnlock(); + Locks::mutator_lock_->SharedUnlock(); } ThreadState Thread::TransitionFromSuspendedToRunnable() { @@ -506,24 +506,24 @@ ThreadState Thread::TransitionFromSuspendedToRunnable() { // may occur is covered by the second check after we acquire a share of the mutator_lock_. if (GetSuspendCountUnsafe() > 0) { // Wait while our suspend count is non-zero. - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); - GlobalSynchronization::mutator_lock_->AssertNotHeld(); // Otherwise we starve GC.. + MutexLock mu(*Locks::thread_suspend_count_lock_); + Locks::mutator_lock_->AssertNotHeld(); // Otherwise we starve GC.. while (GetSuspendCount() != 0) { // Re-check when Thread::resume_cond_ is notified. - Thread::resume_cond_->Wait(*GlobalSynchronization::thread_suspend_count_lock_); + Thread::resume_cond_->Wait(*Locks::thread_suspend_count_lock_); } } // Re-acquire shared mutator_lock_ access. - GlobalSynchronization::mutator_lock_->SharedLock(); + Locks::mutator_lock_->SharedLock(); // Holding the mutator_lock_, synchronize with any thread trying to raise the suspend count // and change state to Runnable if no suspend is pending. - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); if (GetSuspendCount() == 0) { SetState(kRunnable); done = true; } else { // Release shared mutator_lock_ access and try again. - GlobalSynchronization::mutator_lock_->SharedUnlock(); + Locks::mutator_lock_->SharedUnlock(); } } while (!done); return old_state; @@ -539,14 +539,14 @@ Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* Thread* thread; { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); thread = Thread::FromManagedThread(soa, peer); if (thread == NULL) { LOG(WARNING) << "No such thread for suspend: " << peer; return NULL; } { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); if (request_suspension) { thread->ModifySuspendCount(+1, true /* for_debugger */); request_suspension = false; @@ -629,7 +629,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { if (is_daemon) { os << " daemon"; } - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); os << " prio=" << priority << " tid=" << thread->GetThinLockId() << " " << thread->GetState() << "\n"; @@ -640,7 +640,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { } if (thread != NULL) { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); os << " | group=\"" << group_name << "\"" << " sCount=" << thread->suspend_count_ << " dsCount=" << thread->debug_suspend_count_ @@ -690,7 +690,7 @@ void Thread::DumpState(std::ostream& os) const { struct StackDumpVisitor : public StackVisitor { StackDumpVisitor(std::ostream& os, const Thread* thread, Context* context, bool can_allocate) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(thread->GetManagedStack(), thread->GetTraceStack(), context), os(os), thread(thread), can_allocate(can_allocate), last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) { @@ -702,7 +702,7 @@ struct StackDumpVisitor : public StackVisitor { } } - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; @@ -762,7 +762,7 @@ void Thread::DumpStack(std::ostream& os) const { // If we're currently in native code, dump that stack before dumping the managed stack. ThreadState state; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); state = GetState(); } if (state == kNative) { @@ -787,7 +787,7 @@ void Thread::ThreadExitCallback(void* arg) { void Thread::Startup() { { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); // Keep GCC happy. + MutexLock mu(*Locks::thread_suspend_count_lock_); // Keep GCC happy. resume_cond_ = new ConditionVariable("Thread resumption condition variable"); } @@ -920,7 +920,7 @@ Thread::~Thread() { jni_env_ = NULL; { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_NE(GetState(), kRunnable); SetState(kTerminated); } @@ -1078,11 +1078,11 @@ class CountStackDepthVisitor : public StackVisitor { public: CountStackDepthVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), depth_(0), skip_depth_(0), skipping_(true) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // We want to skip frames up to and including the exception's constructor. // Note we also skip the frame if it doesn't have a method (namely the callee // save frame) @@ -1124,7 +1124,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {} bool Init(int depth, const ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Allocate method trace with an extra slot that will hold the PC trace SirtRef<ObjectArray<Object> > method_trace(Runtime::Current()->GetClassLinker()->AllocObjectArray<Object>(depth + 1)); @@ -1153,7 +1153,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { } } - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (method_trace_ == NULL || dex_pc_trace_ == NULL) { return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. } @@ -1513,7 +1513,7 @@ static const bool kDebugExceptionDelivery = false; class CatchBlockStackVisitor : public StackVisitor { public: CatchBlockStackVisitor(Thread* self, Throwable* exception) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(self->GetManagedStack(), self->GetTraceStack(), self->GetLongJumpContext()), self_(self), exception_(exception), to_find_(exception->GetClass()), throw_method_(NULL), throw_frame_id_(0), throw_dex_pc_(0), handler_quick_frame_(NULL), @@ -1527,8 +1527,8 @@ class CatchBlockStackVisitor : public StackVisitor { LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump. } - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* method = GetMethod(); if (method == NULL) { // This is the upcall, we remember the frame and last pc so that we may long jump to them. @@ -1570,7 +1570,7 @@ class CatchBlockStackVisitor : public StackVisitor { return true; // Continue stack walk. } - void DoLongJump() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* catch_method = *handler_quick_frame_; Dbg::PostException(self_, throw_frame_id_, throw_method_, throw_dex_pc_, catch_method, handler_dex_pc_, exception_); @@ -1650,10 +1650,10 @@ Method* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) const { struct CurrentMethodVisitor : public StackVisitor { CurrentMethodVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL), method_(NULL), dex_pc_(0), frame_id_(0) {} - virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = GetMethod(); if (m->IsRuntimeMethod()) { // Continue if this is a runtime method. @@ -1691,10 +1691,10 @@ class ReferenceMapVisitor : public StackVisitor { public: ReferenceMapVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack, Context* context, Heap::RootVisitor* root_visitor, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, context), root_visitor_(root_visitor), arg_(arg) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (false) { LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod()) << StringPrintf("@ PC:%04x", GetDexPc()); diff --git a/src/thread.h b/src/thread.h index 8dce4142cb..9355dca617 100644 --- a/src/thread.h +++ b/src/thread.h @@ -121,11 +121,11 @@ class PACKED Thread { } static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, Object* thread_peer) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Translates 172 to pAllocArrayFromCode and so on. static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers); @@ -135,79 +135,79 @@ class PACKED Thread { // Dumps the detailed thread state and the thread stack (used for SIGQUIT). void Dump(std::ostream& os) const - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which // case we use 'tid' to identify the thread, and we'll include as much information as we can. static void DumpState(std::ostream& os, const Thread* thread, pid_t tid) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); ThreadState GetState() const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { - GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + Locks::thread_suspend_count_lock_->AssertHeld(); return state_; } ThreadState SetState(ThreadState new_state) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { - GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + Locks::thread_suspend_count_lock_->AssertHeld(); ThreadState old_state = state_; if (new_state == kRunnable) { // Sanity, should never become runnable with a pending suspension and should always hold // share of mutator_lock_. CHECK_EQ(GetSuspendCount(), 0); - GlobalSynchronization::mutator_lock_->AssertSharedHeld(); + Locks::mutator_lock_->AssertSharedHeld(); } state_ = new_state; return old_state; } int GetSuspendCount() const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { - GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + Locks::thread_suspend_count_lock_->AssertHeld(); return suspend_count_; } int GetDebugSuspendCount() const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { - GlobalSynchronization::thread_suspend_count_lock_->AssertHeld(); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + Locks::thread_suspend_count_lock_->AssertHeld(); return debug_suspend_count_; } bool IsSuspended() const - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { int suspend_count = GetSuspendCount(); return suspend_count != 0 && GetState() != kRunnable; } void ModifySuspendCount(int delta, bool for_debugger) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_); // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero. void FullSuspendCheck() - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Transition from non-runnable to runnable state acquiring share on mutator_lock_. ThreadState TransitionFromSuspendedToRunnable() - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_); // Transition from runnable into a state where mutator privileges are denied. Releases share of // mutator lock. void TransitionFromRunnableToSuspended(ThreadState new_state) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + UNLOCK_FUNCTION(Locks::mutator_lock_); // Wait for a debugger suspension on the thread associated with the given peer. Returns the // thread on success, else NULL. If the thread should be suspended then request_suspension should // be true on entry. If the suspension times out then *timeout is set to true. static Thread* SuspendForDebugger(jobject peer, bool request_suspension, bool* timeout) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); // Once called thread suspension will cause an assertion failure. #ifndef NDEBUG @@ -286,16 +286,16 @@ class PACKED Thread { // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer. String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, // allocation, or locking. void GetThreadName(std::string& name) const; // Sets the thread's name. - void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Object* GetPeer() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return peer_; } @@ -304,7 +304,7 @@ class PACKED Thread { } Object* GetThreadGroup(const ScopedObjectAccessUnchecked& ts) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); RuntimeStats* GetStats() { return &stats_; @@ -316,7 +316,7 @@ class PACKED Thread { return exception_ != NULL; } - Throwable* GetException() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(CanAccessDirectReferences()); return exception_; } @@ -324,7 +324,7 @@ class PACKED Thread { void AssertNoPendingException() const; void SetException(Throwable* new_exception) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(CanAccessDirectReferences()); CHECK(new_exception != NULL); // TODO: CHECK(exception_ == NULL); @@ -336,7 +336,7 @@ class PACKED Thread { } // Find catch block and perform long jump to appropriate exception handle - void DeliverException() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void DeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Context* GetLongJumpContext(); void ReleaseLongJumpContext(Context* context) { @@ -345,7 +345,7 @@ class PACKED Thread { } Method* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetTopOfStack(void* stack, uintptr_t pc) { Method** top_method = reinterpret_cast<Method**>(stack); @@ -359,24 +359,24 @@ class PACKED Thread { // If 'msg' is NULL, no detail message is set. void ThrowNewException(const char* exception_class_descriptor, const char* msg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be // used as the new exception's cause. void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) __attribute__((format(printf, 3, 4))) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // OutOfMemoryError is special, because we need to pre-allocate an instance. // Only the GC should call this. void ThrowOutOfMemoryError(const char* msg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc); @@ -398,7 +398,7 @@ class PACKED Thread { Object* DecodeJObject(jobject obj) LOCKS_EXCLUDED(JavaVMExt::globals_lock, JavaVMExt::weak_globals_lock) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implements java.lang.Thread.interrupted. bool Interrupted() { @@ -429,7 +429,7 @@ class PACKED Thread { } ClassLoader* GetClassLoaderOverride() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(CanAccessDirectReferences()); return class_loader_override_; } @@ -441,7 +441,7 @@ class PACKED Thread { // Create the internal representation of a stack trace, that is more time // and space efficient to compute than the StackTraceElement[] jobject CreateInternalStackTrace(const ScopedObjectAccess& soa) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many @@ -451,12 +451,12 @@ class PACKED Thread { jobjectArray output_array = NULL, int* stack_depth = NULL); void VisitRoots(Heap::RootVisitor* visitor, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); #if VERIFY_OBJECT_ENABLED - void VerifyStack() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); #else - void VerifyStack() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){} + void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){} #endif // @@ -493,7 +493,7 @@ class PACKED Thread { } // Set the stack end to that to be used during a stack overflow - void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Set the stack end to that to be used during regular execution void ResetDefaultStackEnd() { @@ -608,8 +608,8 @@ class PACKED Thread { typedef uint32_t bool32_t; explicit Thread(bool daemon); - ~Thread() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_suspend_count_lock_); void Destroy(); friend class ThreadList; // For ~Thread and Destroy. @@ -640,20 +640,20 @@ class PACKED Thread { void DumpState(std::ostream& os) const; void DumpStack(std::ostream& os) const - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Out-of-line conveniences for debugging in gdb. static Thread* CurrentFromGdb(); // Like Thread::Current. // Like Thread::Dump(std::cerr). - void DumpFromGdb() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void* CreateCallback(void* arg); void HandleUncaughtExceptions(const ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RemoveFromThreadGroup(const ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Init(); void InitCardTable(); @@ -677,13 +677,13 @@ class PACKED Thread { // Used to notify threads that they should attempt to resume, they will suspend again if // their suspend count is > 0. static ConditionVariable* resume_cond_ - GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + GUARDED_BY(Locks::thread_suspend_count_lock_); // --- Frequently accessed fields first for short offsets --- // A non-zero value is used to tell the current thread to enter a safe point // at the next poll. - int suspend_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + int suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); // The biased card table, see CardTable for details byte* card_table_; @@ -706,7 +706,7 @@ class PACKED Thread { // is hard. This field can be read off of Thread::Current to give the address. Thread* self_; - volatile ThreadState state_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + volatile ThreadState state_ GUARDED_BY(Locks::thread_suspend_count_lock_); // Our managed peer (an instance of java.lang.Thread). Object* peer_; @@ -760,7 +760,7 @@ class PACKED Thread { // How much of 'suspend_count_' is by request of the debugger, used to set things right // when the debugger detaches. Must be <= suspend_count_. - int debug_suspend_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + int debug_suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); // JDWP invoke-during-breakpoint support. DebugInvokeReq* debug_invoke_req_; diff --git a/src/thread_list.cc b/src/thread_list.cc index 6008e16003..c1db387e0f 100644 --- a/src/thread_list.cc +++ b/src/thread_list.cc @@ -61,12 +61,12 @@ bool ThreadList::Contains(pid_t tid) { } pid_t ThreadList::GetLockOwner() { - return GlobalSynchronization::thread_list_lock_->GetExclusiveOwnerTid(); + return Locks::thread_list_lock_->GetExclusiveOwnerTid(); } void ThreadList::DumpForSigQuit(std::ostream& os) { { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); DumpLocked(os); } DumpUnattachedThreads(os); @@ -97,7 +97,7 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os) { if (!*end) { bool contains; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); contains = Contains(tid); } if (!contains) { @@ -109,7 +109,7 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os) { } void ThreadList::DumpLocked(std::ostream& os) { - GlobalSynchronization::thread_list_lock_->AssertHeld(); + Locks::thread_list_lock_->AssertHeld(); os << "DALVIK THREADS (" << list_.size() << "):\n"; for (It it = list_.begin(), end = list_.end(); it != end; ++it) { (*it)->Dump(os); @@ -118,8 +118,8 @@ void ThreadList::DumpLocked(std::ostream& os) { } void ThreadList::AssertThreadsAreSuspended() { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_list_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); for (It it = list_.begin(), end = list_.end(); it != end; ++it) { Thread* thread = *it; CHECK_NE(thread->GetState(), kRunnable); @@ -134,12 +134,12 @@ static void UnsafeLogFatalForThreadSuspendAllTimeout() NO_THREAD_SAFETY_ANALYSIS ss << "Thread suspend timeout\n"; runtime->DumpLockHolders(ss); ss << "\n"; - GlobalSynchronization::mutator_lock_->SharedTryLock(); - if (!GlobalSynchronization::mutator_lock_->IsSharedHeld()) { + Locks::mutator_lock_->SharedTryLock(); + if (!Locks::mutator_lock_->IsSharedHeld()) { LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; } - GlobalSynchronization::thread_list_lock_->TryLock(); - if (!GlobalSynchronization::thread_list_lock_->IsExclusiveHeld()) { + Locks::thread_list_lock_->TryLock(); + if (!Locks::thread_list_lock_->IsExclusiveHeld()) { LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; } runtime->GetThreadList()->DumpLocked(ss); @@ -153,16 +153,16 @@ void ThreadList::SuspendAll() { VLOG(threads) << *self << " SuspendAll starting..."; if (kIsDebugBuild) { - GlobalSynchronization::mutator_lock_->AssertNotHeld(); - GlobalSynchronization::thread_list_lock_->AssertNotHeld(); - GlobalSynchronization::thread_suspend_count_lock_->AssertNotHeld(); - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + Locks::mutator_lock_->AssertNotHeld(); + Locks::thread_list_lock_->AssertNotHeld(); + Locks::thread_suspend_count_lock_->AssertNotHeld(); + MutexLock mu(*Locks::thread_suspend_count_lock_); CHECK_NE(self->GetState(), kRunnable); } { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); { - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); // Update global suspend all state for attaching threads. ++suspend_all_count_; // Increment everybody's suspend count (except our own). @@ -183,11 +183,11 @@ void ThreadList::SuspendAll() { timespec timeout; clock_gettime(CLOCK_REALTIME, &timeout); timeout.tv_sec += 30; - if (UNLIKELY(!GlobalSynchronization::mutator_lock_->ExclusiveLockWithTimeout(timeout))) { + if (UNLIKELY(!Locks::mutator_lock_->ExclusiveLockWithTimeout(timeout))) { UnsafeLogFatalForThreadSuspendAllTimeout(); } #else - GlobalSynchronization::mutator_lock_->ExclusiveLock(); + Locks::mutator_lock_->ExclusiveLock(); #endif // Debug check that all threads are suspended. @@ -201,8 +201,8 @@ void ThreadList::ResumeAll() { VLOG(threads) << *self << " ResumeAll starting"; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_list_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); // Update global suspend all state for attaching threads. --suspend_all_count_; // Decrement the suspend counts for all threads. @@ -219,7 +219,7 @@ void ThreadList::ResumeAll() { VLOG(threads) << *self << " ResumeAll waking others"; Thread::resume_cond_->Broadcast(); } - GlobalSynchronization::mutator_lock_->ExclusiveUnlock(); + Locks::mutator_lock_->ExclusiveUnlock(); VLOG(threads) << *self << " ResumeAll complete"; } @@ -229,9 +229,9 @@ void ThreadList::Resume(Thread* thread, bool for_debugger) { { // To check Contains. - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); // To check IsSuspended. - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); CHECK(thread->IsSuspended()); if (!Contains(thread)) { return; @@ -241,7 +241,7 @@ void ThreadList::Resume(Thread* thread, bool for_debugger) { { VLOG(threads) << "Resume(" << *thread << ") waking others"; - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); Thread::resume_cond_->Broadcast(); } @@ -255,9 +255,9 @@ void ThreadList::SuspendAllForDebugger() { VLOG(threads) << *self << " SuspendAllForDebugger starting..."; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); // Update global suspend all state for attaching threads. ++suspend_all_count_; ++debug_suspend_all_count_; @@ -280,14 +280,14 @@ void ThreadList::SuspendAllForDebugger() { timespec timeout; clock_gettime(CLOCK_REALTIME, &timeout); timeout.tv_sec += 30; - if (!GlobalSynchronization::mutator_lock_->ExclusiveLockWithTimeout(timeout)) { + if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(timeout)) { UnsafeLogFatalForThreadSuspendAllTimeout(); } else { - GlobalSynchronization::mutator_lock_->ExclusiveUnlock(); + Locks::mutator_lock_->ExclusiveUnlock(); } #else - GlobalSynchronization::mutator_lock_->ExclusiveLock(); - GlobalSynchronization::mutator_lock_->ExclusiveUnlock(); + Locks::mutator_lock_->ExclusiveLock(); + Locks::mutator_lock_->ExclusiveUnlock(); #endif AssertThreadsAreSuspended(); @@ -305,7 +305,7 @@ void ThreadList::SuspendSelfForDebugger() { // Collisions with other suspends aren't really interesting. We want // to ensure that we're the only one fiddling with the suspend count // though. - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); self->ModifySuspendCount(+1, true); // Suspend ourselves. @@ -319,7 +319,7 @@ void ThreadList::SuspendSelfForDebugger() { Dbg::ClearWaitForEventThread(); while (self->suspend_count_ != 0) { - Thread::resume_cond_->Wait(*GlobalSynchronization::thread_suspend_count_lock_); + Thread::resume_cond_->Wait(*Locks::thread_suspend_count_lock_); if (self->suspend_count_ != 0) { // The condition was signaled but we're still suspended. This // can happen if the debugger lets go while a SIGQUIT thread @@ -340,8 +340,8 @@ void ThreadList::UndoDebuggerSuspensions() { VLOG(threads) << *self << " UndoDebuggerSuspensions starting"; { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_list_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); // Update global suspend all state for attaching threads. suspend_all_count_ -= debug_suspend_all_count_; debug_suspend_all_count_ = 0; @@ -356,7 +356,7 @@ void ThreadList::UndoDebuggerSuspensions() { } { - MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_suspend_count_lock_); Thread::resume_cond_->Broadcast(); } @@ -364,8 +364,8 @@ void ThreadList::UndoDebuggerSuspensions() { } void ThreadList::WaitForOtherNonDaemonThreadsToExit() { - GlobalSynchronization::mutator_lock_->AssertNotHeld(); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Locks::mutator_lock_->AssertNotHeld(); + MutexLock mu(*Locks::thread_list_lock_); bool all_threads_are_daemons; do { all_threads_are_daemons = true; @@ -380,15 +380,15 @@ void ThreadList::WaitForOtherNonDaemonThreadsToExit() { } if (!all_threads_are_daemons) { // Wait for another thread to exit before re-checking. - thread_exit_cond_.Wait(*GlobalSynchronization::thread_list_lock_); + thread_exit_cond_.Wait(*Locks::thread_list_lock_); } } while(!all_threads_are_daemons); } void ThreadList::SuspendAllDaemonThreads() { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); { // Tell all the daemons it's time to suspend. - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); for (It it = list_.begin(), end = list_.end(); it != end; ++it) { Thread* thread = *it; // This is only run after all non-daemon threads have exited, so the remainder should all be @@ -406,7 +406,7 @@ void ThreadList::SuspendAllDaemonThreads() { bool all_suspended = true; for (It it = list_.begin(), end = list_.end(); it != end; ++it) { Thread* thread = *it; - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); if (thread != Thread::Current() && thread->GetState() == kRunnable) { if (!have_complained) { LOG(WARNING) << "daemon thread not yet suspended: " << *thread; @@ -432,8 +432,8 @@ void ThreadList::Register(Thread* self) { // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing // SuspendAll requests. - MutexLock mu(*GlobalSynchronization::thread_list_lock_); - MutexLock mu2(*GlobalSynchronization::thread_suspend_count_lock_); + MutexLock mu(*Locks::thread_list_lock_); + MutexLock mu2(*Locks::thread_suspend_count_lock_); self->suspend_count_ = suspend_all_count_; self->debug_suspend_count_ = debug_suspend_all_count_; CHECK(!Contains(self)); @@ -451,7 +451,7 @@ void ThreadList::Unregister(Thread* self) { { // Remove this thread from the list. - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); CHECK(Contains(self)); list_.remove(self); } @@ -466,7 +466,7 @@ void ThreadList::Unregister(Thread* self) { CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, NULL), "detach self"); // Signal that a thread just detached. - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); thread_exit_cond_.Signal(); } @@ -477,7 +477,7 @@ void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) { } void ThreadList::VisitRoots(Heap::RootVisitor* visitor, void* arg) const { - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); for (It it = list_.begin(), end = list_.end(); it != end; ++it) { (*it)->VisitRoots(visitor, arg); } diff --git a/src/thread_list.h b/src/thread_list.h index e5b911489a..b80c1a5ecf 100644 --- a/src/thread_list.h +++ b/src/thread_list.h @@ -34,57 +34,57 @@ class ThreadList { ~ThreadList(); void DumpForSigQuit(std::ostream& os) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpLocked(std::ostream& os) // For thread suspend timeout dumps. - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); pid_t GetLockOwner(); // For SignalCatcher. // Thread suspension support. void ResumeAll() - UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + UNLOCK_FUNCTION(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); void Resume(Thread* thread, bool for_debugger = false) - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); // Suspends all threads and gets exclusive access to the mutator_lock_. void SuspendAll() - EXCLUSIVE_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_) + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); // Suspends all threads void SuspendAllForDebugger() - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); void SuspendSelfForDebugger() - LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); void UndoDebuggerSuspensions() - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); // Iterates over all the threads. void ForEach(void (*callback)(Thread*, void*), void* context) - EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_); + EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); // Add/remove current thread from list. void Register(Thread* self) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_); void Unregister(Thread* self) - LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_, - GlobalSynchronization::thread_list_lock_); + LOCKS_EXCLUDED(Locks::mutator_lock_, + Locks::thread_list_lock_); void VisitRoots(Heap::RootVisitor* visitor, void* arg) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Return a copy of the thread list. - std::list<Thread*> GetList() EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_) { + std::list<Thread*> GetList() EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) { return list_; } @@ -94,35 +94,35 @@ class ThreadList { uint32_t AllocThreadId(); void ReleaseThreadId(uint32_t id) LOCKS_EXCLUDED(allocated_ids_lock_); - bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_); - bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_list_lock_); + bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); + bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); void DumpUnattachedThreads(std::ostream& os) - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_); void SuspendAllDaemonThreads() - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); void WaitForOtherNonDaemonThreadsToExit() - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); void AssertThreadsAreSuspended() - LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_, - GlobalSynchronization::thread_suspend_count_lock_); + LOCKS_EXCLUDED(Locks::thread_list_lock_, + Locks::thread_suspend_count_lock_); mutable Mutex allocated_ids_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(allocated_ids_lock_); // The actual list of all threads. - std::list<Thread*> list_ GUARDED_BY(GlobalSynchronization::thread_list_lock_); + std::list<Thread*> list_ GUARDED_BY(Locks::thread_list_lock_); // Ongoing suspend all requests, used to ensure threads added to list_ respect SuspendAll. - int suspend_all_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); - int debug_suspend_all_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_); + int suspend_all_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); + int debug_suspend_all_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); // Signaled when threads terminate. Used to determine when all non-daemons have terminated. - ConditionVariable thread_exit_cond_ GUARDED_BY(GlobalSynchronization::thread_list_lock_); + ConditionVariable thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_); friend class Thread; diff --git a/src/trace.cc b/src/trace.cc index 5ac7e3d6f8..e4bc83663c 100644 --- a/src/trace.cc +++ b/src/trace.cc @@ -159,7 +159,7 @@ static void Append8LE(uint8_t* buf, uint64_t val) { } static bool InstallStubsClassVisitor(Class* klass, void*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Trace* tracer = Runtime::Current()->GetTracer(); for (size_t i = 0; i < klass->NumDirectMethods(); i++) { Method* method = klass->GetDirectMethod(i); @@ -178,7 +178,7 @@ static bool InstallStubsClassVisitor(Class* klass, void*) } static bool UninstallStubsClassVisitor(Class* klass, void*) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Trace* tracer = Runtime::Current()->GetTracer(); for (size_t i = 0; i < klass->NumDirectMethods(); i++) { Method* method = klass->GetDirectMethod(i); @@ -489,8 +489,8 @@ static void DumpThread(Thread* t, void* arg) { } void Trace::DumpThreadList(std::ostream& os) { - GlobalSynchronization::thread_list_lock_->AssertNotHeld(); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + Locks::thread_list_lock_->AssertNotHeld(); + MutexLock mu(*Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os); } @@ -499,9 +499,9 @@ void Trace::InstallStubs() { } void Trace::UninstallStubs() { - GlobalSynchronization::thread_list_lock_->AssertNotHeld(); + Locks::thread_list_lock_->AssertNotHeld(); Runtime::Current()->GetClassLinker()->VisitClasses(UninstallStubsClassVisitor, NULL); - MutexLock mu(*GlobalSynchronization::thread_list_lock_); + MutexLock mu(*Locks::thread_list_lock_); Runtime::Current()->GetThreadList()->ForEach(TraceRestoreStack, NULL); } diff --git a/src/trace.h b/src/trace.h index c60ae1576b..9c64347b8b 100644 --- a/src/trace.h +++ b/src/trace.h @@ -83,18 +83,18 @@ class Trace { explicit Trace(File* trace_file, int buffer_size, int flags); void BeginTracing(); - void FinishTracing() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FinishTracing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Replaces code of each method with a pointer to a stub for method tracing. void InstallStubs(); // Restores original code for each method and fixes the return values of each thread's stack. - void UninstallStubs() LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); + void UninstallStubs() LOCKS_EXCLUDED(Locks::thread_list_lock_); // Methods to output traced methods and threads. void GetVisitedMethods(size_t end_offset); - void DumpMethodList(std::ostream& os) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); - void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(GlobalSynchronization::thread_list_lock_); + void DumpMethodList(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_); // Maps a method to its original code pointer. SafeMap<const Method*, const void*> saved_code_map_; @@ -65,7 +65,7 @@ void ConvertUtf16ToModifiedUtf8(char* utf8_out, const uint16_t* utf16_in, size_t * The java.lang.String hashCode() algorithm. */ int32_t ComputeUtf16Hash(const CharArray* chars, int32_t offset, size_t char_count) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count); /* diff --git a/src/utils.h b/src/utils.h index 2846dadb6b..335a66951c 100644 --- a/src/utils.h +++ b/src/utils.h @@ -168,18 +168,18 @@ std::string PrettyDescriptor(const String* descriptor); std::string PrettyDescriptor(const std::string& descriptor); std::string PrettyDescriptor(Primitive::Type type); std::string PrettyDescriptor(const Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable signature for 'f'. Something like "a.b.C.f" or // "int a.b.C.f" (depending on the value of 'with_type'). std::string PrettyField(const Field* f, bool with_type = true) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true); // Returns a human-readable signature for 'm'. Something like "a.b.C.m" or // "a.b.C.m(II)V" (depending on the value of 'with_signature'). std::string PrettyMethod(const Method* m, bool with_signature = true) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true); // Returns a human-readable form of the name of the *class* of the given object. @@ -187,16 +187,16 @@ std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with // be "java.lang.String". Given an array of int, the output would be "int[]". // Given String.class, the output would be "java.lang.Class<java.lang.String>". std::string PrettyTypeOf(const Object* obj) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable form of the name of the given class. // Given String.class, the output would be "java.lang.Class<java.lang.String>". std::string PrettyClass(const Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable form of the name of the given class with its class loader. std::string PrettyClassAndClassLoader(const Class* c) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a human-readable size string such as "1MB". std::string PrettySize(size_t size_in_bytes); @@ -230,10 +230,10 @@ bool IsValidMemberName(const char* s); // Returns the JNI native function name for the non-overloaded method 'm'. std::string JniShortName(const Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the JNI native function name for the overloaded method 'm'. std::string JniLongName(const Method* m) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool ReadFileToString(const std::string& file_name, std::string* result); diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc index 55d537c81c..813073d062 100644 --- a/src/verifier/method_verifier.cc +++ b/src/verifier/method_verifier.cc @@ -1029,7 +1029,7 @@ std::ostream& MethodVerifier::DumpFailures(std::ostream& os) { } extern "C" void MethodVerifierGdbDump(MethodVerifier* v) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { v->Dump(std::cerr); } diff --git a/src/verifier/method_verifier.h b/src/verifier/method_verifier.h index 244f1f8720..51bed6ea5f 100644 --- a/src/verifier/method_verifier.h +++ b/src/verifier/method_verifier.h @@ -164,11 +164,11 @@ class MethodVerifier { /* Verify a class. Returns "kNoFailure" on success. */ static FailureKind VerifyClass(const Class* klass, std::string& error) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static FailureKind VerifyClass(const DexFile* dex_file, DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, std::string& error) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint8_t EncodePcToReferenceMapData() const; @@ -194,7 +194,7 @@ class MethodVerifier { // Dump the state of the verifier, namely each instruction, what flags are set on it, register // information - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static const std::vector<uint8_t>* GetGcMap(Compiler::MethodReference ref) LOCKS_EXCLUDED(gc_maps_lock_); @@ -203,7 +203,7 @@ class MethodVerifier { // to the locks held at 'dex_pc' in 'm'. static void FindLocksAtDexPc(Method* m, uint32_t dex_pc, std::vector<uint32_t>& monitor_enter_dex_pcs) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void Init(); static void Shutdown(); @@ -220,7 +220,7 @@ class MethodVerifier { explicit MethodVerifier(const DexFile* dex_file, DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, uint32_t method_idx, Method* method, uint32_t access_flags) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Adds the given string to the beginning of the last failure message. void PrependToLastFailMessage(std::string); @@ -242,15 +242,15 @@ class MethodVerifier { static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file, DexCache* dex_cache, ClassLoader* class_loader, uint32_t class_def_idx, const DexFile::CodeItem* code_item, Method* method, uint32_t method_access_flags) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void VerifyMethodAndDump(Method* method) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Run verification on the method. Returns true if verification completes and false if the input // has an irrecoverable corruption. - bool Verify() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool Verify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Compute the width of the instruction at each address in the instruction stream, and store it in @@ -278,7 +278,7 @@ class MethodVerifier { * Returns "false" if something in the exception table looks fishy, but we're expecting the * exception table to be somewhat sane. */ - bool ScanTryCatchBlocks() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool ScanTryCatchBlocks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Perform static verification on all instructions in a method. @@ -384,11 +384,11 @@ class MethodVerifier { bool* selfOkay); /* Perform detailed code-flow analysis on a single method. */ - bool VerifyCodeFlow() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool VerifyCodeFlow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Set the register types for the first instruction in the method based on the method signature. // This has the side-effect of validating the signature. - bool SetTypesFromSignature() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool SetTypesFromSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Perform code flow on a method. @@ -436,7 +436,7 @@ class MethodVerifier { * reordering by specifying that you can't execute the new-instance instruction if a register * contains an uninitialized instance created by that same instruction. */ - bool CodeFlowVerifyMethod() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool CodeFlowVerifyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Perform verification for a single instruction. @@ -448,45 +448,45 @@ class MethodVerifier { * addresses. Does not set or clear any other flags in "insn_flags_". */ bool CodeFlowVerifyInstruction(uint32_t* start_guess) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of a new array instruction void VerifyNewArray(const DecodedInstruction& dec_insn, bool is_filled, bool is_range) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of an aget instruction. The destination register's type will be set to // be that of component type of the array unless the array type is unknown, in which case a // bottom type inferred from the type of instruction is used. is_primitive is false for an // aget-object. void VerifyAGet(const DecodedInstruction& insn, const RegType& insn_type, - bool is_primitive) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of an aput instruction. void VerifyAPut(const DecodedInstruction& insn, const RegType& insn_type, - bool is_primitive) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Lookup instance field and fail for resolution violations Field* GetInstanceField(const RegType& obj_type, int field_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Lookup static field and fail for resolution violations - Field* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + Field* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of an iget or sget instruction. void VerifyISGet(const DecodedInstruction& insn, const RegType& insn_type, bool is_primitive, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Perform verification of an iput or sput instruction. void VerifyISPut(const DecodedInstruction& insn, const RegType& insn_type, bool is_primitive, bool is_static) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolves a class based on an index and performs access checks to ensure the referrer can // access the resolved class. const RegType& ResolveClassAndCheckAccess(uint32_t class_idx) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * For the "move-exception" instruction at "work_insn_idx_", which must be at an exception handler @@ -494,7 +494,7 @@ class MethodVerifier { * exception handler can be found or if the Join of exception types fails. */ const RegType& GetCaughtExceptionType() - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Resolves a method based on an index and performs access checks to ensure @@ -502,7 +502,7 @@ class MethodVerifier { * Does not throw exceptions. */ Method* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify the arguments to a method. We're executing in "method", making @@ -528,7 +528,7 @@ class MethodVerifier { */ Method* VerifyInvocationArgs(const DecodedInstruction& dec_insn, MethodType method_type, bool is_range, bool is_super) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify that the target instruction is not "move-exception". It's important that the only way @@ -543,7 +543,7 @@ class MethodVerifier { * Returns "false" if an error is encountered. */ bool UpdateRegisters(uint32_t next_insn, const RegisterLine* merge_line) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Is the method being verified a constructor? bool IsConstructor() const { @@ -556,10 +556,10 @@ class MethodVerifier { } // Return the register type for the method. - const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get a type representing the declaring class of the method. - const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); #if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER) /* @@ -626,13 +626,13 @@ class MethodVerifier { uint32_t method_idx_; // The method we're working on. // Its object representation if known. - Method* foo_method_ GUARDED_BY(GlobalSynchronization::mutator_lock_); + Method* foo_method_ GUARDED_BY(Locks::mutator_lock_); uint32_t method_access_flags_; // Method's access flags. const DexFile* dex_file_; // The dex file containing the method. // The dex_cache for the declaring class of the method. - DexCache* dex_cache_ GUARDED_BY(GlobalSynchronization::mutator_lock_); + DexCache* dex_cache_ GUARDED_BY(Locks::mutator_lock_); // The class loader for the declaring class of the method. - ClassLoader* class_loader_ GUARDED_BY(GlobalSynchronization::mutator_lock_); + ClassLoader* class_loader_ GUARDED_BY(Locks::mutator_lock_); uint32_t class_def_idx_; // The class def index of the declaring class of the method. const DexFile::CodeItem* code_item_; // The code item containing the code for the method. UniquePtr<InsnFlags[]> insn_flags_; // Instruction widths and flags, one entry per code unit. diff --git a/src/verifier/method_verifier_test.cc b/src/verifier/method_verifier_test.cc index e52feb3258..9c9c745098 100644 --- a/src/verifier/method_verifier_test.cc +++ b/src/verifier/method_verifier_test.cc @@ -28,7 +28,7 @@ namespace verifier { class MethodVerifierTest : public CommonTest { protected: void VerifyClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ASSERT_TRUE(descriptor != NULL); Class* klass = class_linker_->FindSystemClass(descriptor.c_str()); @@ -38,7 +38,7 @@ class MethodVerifierTest : public CommonTest { } void VerifyDexFile(const DexFile* dex) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ASSERT_TRUE(dex != NULL); // Verify all the classes defined in this file diff --git a/src/verifier/reg_type.cc b/src/verifier/reg_type.cc index 281d96e6b4..f5552233d0 100644 --- a/src/verifier/reg_type.cc +++ b/src/verifier/reg_type.cc @@ -385,7 +385,7 @@ Class* RegType::ClassJoin(Class* s, Class* t) { } std::ostream& operator<<(std::ostream& os, const RegType& rhs) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os << rhs.Dump(); return os; } diff --git a/src/verifier/reg_type.h b/src/verifier/reg_type.h index a0e2ff8525..3064f30ae3 100644 --- a/src/verifier/reg_type.h +++ b/src/verifier/reg_type.h @@ -118,7 +118,7 @@ class RegType { // The high half that corresponds to this low half const RegType& HighHalf(RegTypeCache* cache) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsConstant() const { return type_ == kRegTypeConst; } bool IsLongConstant() const { return type_ == kRegTypeConstLo; } @@ -209,7 +209,7 @@ class RegType { return IsReference() && GetClass()->IsObjectClass(); } - bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { return GetDescriptor()->CharAt(0) == '['; } else if (IsReference()) { @@ -219,7 +219,7 @@ class RegType { } } - bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { // Primitive arrays will always resolve DCHECK(GetDescriptor()->CharAt(1) == 'L' || GetDescriptor()->CharAt(1) == '['); @@ -295,27 +295,27 @@ class RegType { } const RegType& GetSuperClass(RegTypeCache* cache) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string Dump(const RegTypeCache* reg_types = NULL) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can this type access other? bool CanAccess(const RegType& other) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can this type access a member with the given properties? bool CanAccessMember(Class* klass, uint32_t access_flags) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can this type be assigned by src? bool IsAssignableFrom(const RegType& src) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool Equals(const RegType& other) const { return GetId() == other.GetId(); } // Compute the merge of this register from one edge (path) with incoming_type from another. const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is @@ -334,7 +334,7 @@ class RegType { * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy */ static Class* ClassJoin(Class* s, Class* t) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: friend class RegTypeCache; diff --git a/src/verifier/reg_type_cache.h b/src/verifier/reg_type_cache.h index 1287388e07..5a2c49cc75 100644 --- a/src/verifier/reg_type_cache.h +++ b/src/verifier/reg_type_cache.h @@ -41,65 +41,65 @@ class RegTypeCache { } const RegType& From(RegType::Type type, ClassLoader* loader, const char* descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& FromClass(Class* klass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& FromCat1Const(int32_t value); const RegType& FromDescriptor(ClassLoader* loader, const char* descriptor) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& FromType(RegType::Type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right); const RegType& FromUnresolvedSuperClass(const RegType& child); - const RegType& Boolean() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeBoolean); } - const RegType& Byte() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Byte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeByte); } - const RegType& Char() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Char() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeChar); } - const RegType& Short() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Short() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeShort); } - const RegType& Integer() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Integer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeInteger); } - const RegType& Float() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Float() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeFloat); } - const RegType& Long() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Long() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeLongLo); } - const RegType& Double() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Double() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeDoubleLo); } - const RegType& JavaLangClass() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& JavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Class;"); } - const RegType& JavaLangObject() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& JavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Object;"); } - const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/String;"); } - const RegType& JavaLangThrowable() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& JavaLangThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return From(RegType::kRegTypeReference, NULL, "Ljava/lang/Throwable;"); } - const RegType& Undefined() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Undefined() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeUndefined); } - const RegType& Conflict() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Conflict() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeConflict); } - const RegType& ConstLo() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& ConstLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromType(RegType::kRegTypeConstLo); } - const RegType& Zero() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + const RegType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return FromCat1Const(0); } @@ -116,7 +116,7 @@ class RegTypeCache { const RegType& IntConstant() { return FromCat1Const(std::numeric_limits<jint>::max()); } const RegType& GetComponentType(const RegType& array, ClassLoader* loader) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: // The allocated entries diff --git a/src/verifier/register_line.cc b/src/verifier/register_line.cc index ec7891edce..4882740dcc 100644 --- a/src/verifier/register_line.cc +++ b/src/verifier/register_line.cc @@ -368,7 +368,7 @@ void RegisterLine::WriteReferenceBitMap(std::vector<uint8_t>& data, size_t max_b } std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { os << rhs.Dump(); return os; } diff --git a/src/verifier/register_line.h b/src/verifier/register_line.h index 1d131ad8f4..9f0fcb05ae 100644 --- a/src/verifier/register_line.h +++ b/src/verifier/register_line.h @@ -62,22 +62,22 @@ class RegisterLine { // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst". void CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This // copies both halves of the register. void CopyRegister2(uint32_t vdst, uint32_t vsrc) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implement "move-result". Copy the category-1 value from the result register to another // register, and reset the result register. void CopyResultRegister1(uint32_t vdst, bool is_reference) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Implement "move-result-wide". Copy the category-2 value from the result register to another // register, and reset the result register. void CopyResultRegister2(uint32_t vdst) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Set the invisible result register to unknown void SetResultTypeToUnknown(); @@ -86,17 +86,17 @@ class RegisterLine { // part of a 64-bit value, register N+1 will be set to "newType+1". // The register index was validated during the static pass, so we don't need to check it here. bool SetRegisterType(uint32_t vdst, const RegType& new_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* Set the type of the "result" register. */ void SetResultRegisterType(const RegType& new_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the type of register vsrc. const RegType& GetRegisterType(uint32_t vsrc) const; bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CopyFromLine(const RegisterLine* src) { DCHECK_EQ(num_regs_, src->num_regs_); @@ -105,7 +105,7 @@ class RegisterLine { reg_to_lock_depths_ = src->reg_to_lock_depths_; } - std::string Dump() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FillWithGarbage() { memset(line_.get(), 0xf1, num_regs_ * sizeof(uint16_t)); @@ -122,7 +122,7 @@ class RegisterLine { * the new ones at the same time). */ void MarkUninitRefsAsInvalid(const RegType& uninit_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Update all registers holding "uninit_type" to instead hold the corresponding initialized @@ -161,7 +161,7 @@ class RegisterLine { * versions. We just need to make sure vA is >= 1 and then return vC. */ const RegType& GetInvocationThis(const DecodedInstruction& dec_insn) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify types for a simple two-register instruction (e.g. "neg-int"). @@ -169,7 +169,7 @@ class RegisterLine { */ void CheckUnaryOp(const DecodedInstruction& dec_insn, const RegType& dst_type, const RegType& src_type) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify types for a simple three-register instruction (e.g. "add-int"). @@ -179,7 +179,7 @@ class RegisterLine { void CheckBinaryOp(const DecodedInstruction& dec_insn, const RegType& dst_type, const RegType& src_type1, const RegType& src_type2, bool check_boolean_op) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify types for a binary "2addr" operation. "src_type1"/"src_type2" @@ -189,7 +189,7 @@ class RegisterLine { const RegType& dst_type, const RegType& src_type1, const RegType& src_type2, bool check_boolean_op) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* * Verify types for A two-register instruction with a literal constant (e.g. "add-int/lit8"). @@ -199,7 +199,7 @@ class RegisterLine { */ void CheckLiteralOp(const DecodedInstruction& dec_insn, const RegType& dst_type, const RegType& src_type, bool check_boolean_op) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx. void PushMonitor(uint32_t reg_idx, int32_t insn_idx); @@ -217,7 +217,7 @@ class RegisterLine { bool VerifyMonitorStackEmpty(); bool MergeRegisters(const RegisterLine* incoming_line) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t GetMaxNonZeroReferenceReg(size_t max_ref_reg) { size_t i = static_cast<int>(max_ref_reg) < 0 ? 0 : max_ref_reg; diff --git a/src/well_known_classes.h b/src/well_known_classes.h index 15961e28ae..65ee6b49c2 100644 --- a/src/well_known_classes.h +++ b/src/well_known_classes.h @@ -33,7 +33,7 @@ struct WellKnownClasses { static void Init(JNIEnv* env); static Class* ToClass(jclass global_jclass) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static jclass com_android_dex_Dex; static jclass dalvik_system_PathClassLoader; diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc index 0aefa97022..e6d652de66 100644 --- a/test/ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/ReferenceMap/stack_walk_refmap_jni.cc @@ -44,11 +44,11 @@ namespace art { struct ReferenceMap2Visitor : public StackVisitor { explicit ReferenceMap2Visitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL) { } - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = GetMethod(); if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) { return true; diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc index 41e8d583ca..444eaed818 100644 --- a/test/StackWalk/stack_walk_jni.cc +++ b/test/StackWalk/stack_walk_jni.cc @@ -42,11 +42,11 @@ static int gJava_StackWalk_refmap_calls = 0; struct TestReferenceMapVisitor : public StackVisitor { explicit TestReferenceMapVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(stack, trace_stack, NULL) { } - bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) { + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Method* m = GetMethod(); CHECK(m != NULL); LOG(INFO) << "At " << PrettyMethod(m, false); |