diff options
-rw-r--r-- | compiler/image_writer.cc | 2 | ||||
-rw-r--r-- | runtime/barrier.h | 2 | ||||
-rw-r--r-- | runtime/base/mutex.h | 19 | ||||
-rw-r--r-- | runtime/class_table.h | 3 | ||||
-rw-r--r-- | runtime/debugger.cc | 2 | ||||
-rw-r--r-- | runtime/gc/weak_root_state.h | 2 | ||||
-rw-r--r-- | runtime/intern_table.cc | 28 | ||||
-rw-r--r-- | runtime/intern_table.h | 10 | ||||
-rw-r--r-- | runtime/interpreter/interpreter_common.cc | 6 | ||||
-rw-r--r-- | runtime/jdwp/jdwp.h | 3 | ||||
-rw-r--r-- | runtime/native/java_lang_Thread.cc | 2 | ||||
-rw-r--r-- | runtime/runtime.cc | 2 | ||||
-rw-r--r-- | runtime/thread.cc | 2 | ||||
-rw-r--r-- | runtime/thread_state.h | 2 |
14 files changed, 54 insertions, 31 deletions
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 17d75a3cd3..953b1de3ed 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -719,7 +719,7 @@ void ImageWriter::CalculateObjectBinSlots(Object* obj) { } // InternImageString allows us to intern while holding the heap bitmap lock. This is safe since // we are guaranteed to not have GC during image writing. - mirror::String* const interned = Runtime::Current()->GetInternTable()->InternImageString( + mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString( obj->AsString()); if (obj != interned) { if (!IsImageBinSlotAssigned(interned)) { diff --git a/runtime/barrier.h b/runtime/barrier.h index 02f9f58ff0..94977fb741 100644 --- a/runtime/barrier.h +++ b/runtime/barrier.h @@ -51,7 +51,7 @@ class Barrier { // to sleep, resulting in a deadlock. // Increment the count by delta, wait on condition if count is non zero. - void Increment(Thread* self, int delta) REQUIRES(!lock_);; + void Increment(Thread* self, int delta) REQUIRES(!lock_); // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns // true if time out occurred. diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index d0504d993f..2801fb7a7d 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -332,36 +332,40 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex { bool IsExclusiveHeld(const Thread* self) const; // Assert the current thread has exclusive access to the ReaderWriterMutex. - void AssertExclusiveHeld(const Thread* self) { + void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) { if (kDebugLocking && (gAborting == 0)) { CHECK(IsExclusiveHeld(self)) << *this; } } - void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); } + void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); } // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex. - void AssertNotExclusiveHeld(const Thread* self) { + void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) { if (kDebugLocking && (gAborting == 0)) { CHECK(!IsExclusiveHeld(self)) << *this; } } - void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); } + void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) { + AssertNotExclusiveHeld(self); + } // Is the current thread a shared holder of the ReaderWriterMutex. bool IsSharedHeld(const Thread* self) const; // Assert the current thread has shared access to the ReaderWriterMutex. - void AssertSharedHeld(const Thread* self) { + void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { if (kDebugLocking && (gAborting == 0)) { // TODO: we can only assert this well when self != null. CHECK(IsSharedHeld(self) || self == nullptr) << *this; } } - void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); } + void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { + AssertSharedHeld(self); + } // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive // mode. - void AssertNotHeld(const Thread* self) { + void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) { if (kDebugLocking && (gAborting == 0)) { CHECK(!IsSharedHeld(self)) << *this; } @@ -679,6 +683,7 @@ class Locks { class Roles { public: + // Uninterruptible means that the thread may not become suspended. static Uninterruptible uninterruptible_; }; diff --git a/runtime/class_table.h b/runtime/class_table.h index 252a47dd25..418295449f 100644 --- a/runtime/class_table.h +++ b/runtime/class_table.h @@ -107,13 +107,14 @@ class ClassTable { return item.IsNull(); } }; - // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results + // hash set which hashes class descriptor, and compares descriptors and class loaders. Results // should be compared for a matching Class descriptor and class loader. typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals, ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>> ClassSet; // TODO: shard lock to have one per class loader. + // We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot. std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_); }; diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 1865516939..e1e130bc01 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -2094,7 +2094,7 @@ JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) { case kWaitingInMainDebuggerLoop: case kWaitingInMainSignalCatcherLoop: case kWaitingPerformingGc: - case kWaitingWeakRootRead: + case kWaitingWeakGcRootRead: case kWaiting: return JDWP::TS_WAIT; // Don't add a 'default' here so the compiler can spot incompatible enum changes. diff --git a/runtime/gc/weak_root_state.h b/runtime/gc/weak_root_state.h index b66f19d4d8..e3cefc443b 100644 --- a/runtime/gc/weak_root_state.h +++ b/runtime/gc/weak_root_state.h @@ -28,6 +28,8 @@ enum WeakRootState { // Need to wait until we can read weak roots. kWeakRootStateNoReadsOrWrites, // Need to mark new weak roots to make sure they don't get swept. + // kWeakRootStateMarkNewRoots is currently unused but I was planning on using to allow adding new + // weak roots during the CMS reference processing phase. kWeakRootStateMarkNewRoots, }; diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc index ae521b164e..2be570ac85 100644 --- a/runtime/intern_table.cc +++ b/runtime/intern_table.cc @@ -90,7 +90,6 @@ mirror::String* InternTable::LookupStrong(mirror::String* s) { } mirror::String* InternTable::LookupWeak(mirror::String* s) { - // TODO: Return only if marked. return weak_interns_.Find(s); } @@ -229,7 +228,7 @@ void InternTable::BroadcastForNewInterns() { void InternTable::WaitUntilAccessible(Thread* self) { Locks::intern_table_lock_->ExclusiveUnlock(self); - self->TransitionFromRunnableToSuspended(kWaitingWeakRootRead); + self->TransitionFromRunnableToSuspended(kWaitingWeakGcRootRead); Locks::intern_table_lock_->ExclusiveLock(self); while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) { weak_intern_condition_.Wait(self); @@ -250,24 +249,35 @@ mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool hold CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock"; } while (true) { + if (holding_locks) { + if (!kUseReadBarrier) { + CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal); + } else { + CHECK(self->GetWeakRefAccessEnabled()); + } + } // Check the strong table for a match. mirror::String* strong = LookupStrong(s); if (strong != nullptr) { return strong; } + if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) || + (kUseReadBarrier && self->GetWeakRefAccessEnabled())) { + break; + } // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is // cleared. - if (weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) { - break; - } CHECK(!holding_locks); StackHandleScope<1> hs(self); auto h = hs.NewHandleWrapper(&s); WaitUntilAccessible(self); } - CHECK_NE(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites); - DCHECK_NE(weak_root_state_, gc::kWeakRootStateMarkNewRoots) << "Unsupported"; + if (!kUseReadBarrier) { + CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal); + } else { + CHECK(self->GetWeakRefAccessEnabled()); + } // There is no match in the strong table, check the weak table. mirror::String* weak = LookupWeak(s); if (weak != nullptr) { @@ -298,7 +308,7 @@ mirror::String* InternTable::InternStrong(const char* utf8_data) { return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data)); } -mirror::String* InternTable::InternImageString(mirror::String* s) { +mirror::String* InternTable::InternStrongImageString(mirror::String* s) { // May be holding the heap bitmap lock. return Insert(s, true, true); } @@ -319,8 +329,6 @@ bool InternTable::ContainsWeak(mirror::String* s) { void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); weak_interns_.SweepWeaks(visitor); - // Done sweeping, back to a normal state. - ChangeWeakRootStateLocked(gc::kWeakRootStateNormal); } void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) { diff --git a/runtime/intern_table.h b/runtime/intern_table.h index 0be66759ac..ae9f7a7acd 100644 --- a/runtime/intern_table.h +++ b/runtime/intern_table.h @@ -61,8 +61,10 @@ class InternTable { SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // Only used by image writer. Special version that may not cause thread suspension since the GC - // can not be running while we are doing image writing. - mirror::String* InternImageString(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_); + // can not be running while we are doing image writing. Maybe be called while while holding a + // lock since there will not be thread suspension. + mirror::String* InternStrongImageString(mirror::String* s) + SHARED_REQUIRES(Locks::mutator_lock_); // Interns a potentially new string in the 'strong' table. May cause thread suspension. mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_) @@ -184,7 +186,9 @@ class InternTable { UnorderedSet post_zygote_table_; }; - // Insert if non null, otherwise return null. + // Insert if non null, otherwise return null. Must be called holding the mutator lock. + // If holding_locks is true, then we may also hold other locks. If holding_locks is true, then we + // require GC is not running since it is not safe to wait while holding locks. mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks) REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index 9de9e8ada2..f923b848fa 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -884,7 +884,7 @@ void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count) // Explicit DoCall template function declarations. #define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \ - template SHARED_REQUIRES(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \ ShadowFrame& shadow_frame, \ const Instruction* inst, uint16_t inst_data, \ @@ -897,7 +897,7 @@ EXPLICIT_DO_CALL_TEMPLATE_DECL(true, true); // Explicit DoLambdaCall template function declarations. #define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \ - template SHARED_REQUIRES(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \ ShadowFrame& shadow_frame, \ const Instruction* inst, \ @@ -911,7 +911,7 @@ EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(true, true); // Explicit DoFilledNewArray template function declarations. #define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \ - template SHARED_REQUIRES(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \ const ShadowFrame& shadow_frame, \ Thread* self, JValue* result) diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h index f5ac9d0241..ae02fe6a0b 100644 --- a/runtime/jdwp/jdwp.h +++ b/runtime/jdwp/jdwp.h @@ -128,6 +128,9 @@ struct JdwpState { * the debugger. * * Returns a newly-allocated JdwpState struct on success, or nullptr on failure. + * + * NO_THREAD_SAFETY_ANALYSIS since we can't annotate that we do not have + * state->thread_start_lock_ held. */ static JdwpState* Create(const JdwpOptions* options) REQUIRES(!Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc index b40d94a1a7..7118f36c88 100644 --- a/runtime/native/java_lang_Thread.cc +++ b/runtime/native/java_lang_Thread.cc @@ -90,7 +90,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha case kWaitingInMainSignalCatcherLoop: return kJavaWaiting; case kWaitingForMethodTracingStart: return kJavaWaiting; case kWaitingForVisitObjects: return kJavaWaiting; - case kWaitingWeakRootRead: return kJavaWaiting; + case kWaitingWeakGcRootRead: return kJavaWaiting; case kSuspended: return kJavaRunnable; // Don't add a 'default' here so the compiler can spot incompatible enum changes. } diff --git a/runtime/runtime.cc b/runtime/runtime.cc index a27acb22e5..b08e52126e 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1516,7 +1516,7 @@ void Runtime::DisallowNewSystemWeaks() { void Runtime::AllowNewSystemWeaks() { monitor_list_->AllowNewMonitors(); - intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping? + intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping. java_vm_->AllowNewWeakGlobals(); heap_->AllowNewAllocationRecords(); lambda_box_table_->AllowNewWeakBoxedLambdas(); diff --git a/runtime/thread.cc b/runtime/thread.cc index b3efad0742..7433600f31 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -2736,7 +2736,7 @@ void Thread::PopVerifier(verifier::MethodVerifier* verifier) { size_t Thread::NumberOfHeldMutexes() const { size_t count = 0; for (BaseMutex* mu : tlsPtr_.held_mutexes) { - count += static_cast<size_t>(mu != nullptr); + count += mu != nullptr ? 1 : 0; } return count; } diff --git a/runtime/thread_state.h b/runtime/thread_state.h index c000e61d20..a11d213ea3 100644 --- a/runtime/thread_state.h +++ b/runtime/thread_state.h @@ -43,7 +43,7 @@ enum ThreadState { kWaitingForMethodTracingStart, // WAITING TS_WAIT waiting for method tracing to start kWaitingForVisitObjects, // WAITING TS_WAIT waiting for visiting objects kWaitingForGetObjectsAllocated, // WAITING TS_WAIT waiting for getting the number of allocated objects - kWaitingWeakRootRead, // WAITING TS_WAIT waiting to read a weak root + kWaitingWeakGcRootRead, // WAITING TS_WAIT waiting on the GC to read a weak root kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code kNative, // RUNNABLE TS_RUNNING running in a JNI native method kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger |