diff options
author | 2021-08-07 19:44:20 +0000 | |
---|---|---|
committer | 2021-08-07 19:44:20 +0000 | |
commit | f955425d11ed4b8f6dc02530f0f5ab69edecd8d2 (patch) | |
tree | 9f81ffa70d28a9021209bf508a052023e7495e2a | |
parent | 0c3cc6350749a441fd54f8f3f67b7c69775000c8 (diff) |
Revert "Handle suspend requests in getReferent()"
This reverts commit 0c3cc6350749a441fd54f8f3f67b7c69775000c8.
Reason for revert: Seems to break tests, e.g. https://ci.chromium.org/ui/p/art/builders/ci/host-x86-cms/6824/overview
Change-Id: Id75641d28be218055eca07c99ab2e1bfd579fe71
-rw-r--r-- | runtime/base/mutex.cc | 6 | ||||
-rw-r--r-- | runtime/base/mutex.h | 2 | ||||
-rw-r--r-- | runtime/gc/reference_processor.cc | 30 | ||||
-rw-r--r-- | runtime/gc/reference_processor.h | 4 |
4 files changed, 9 insertions, 33 deletions
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index a5fb40d622..6574ec0db6 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -1105,14 +1105,10 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) { } bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { - guard_.CheckSafeToWait(self); - return TimedWaitHoldingLocks(self, ms, ns); -} - -bool ConditionVariable::TimedWaitHoldingLocks(Thread* self, int64_t ms, int32_t ns) { DCHECK(self == nullptr || self == Thread::Current()); bool timed_out = false; guard_.AssertExclusiveHeld(self); + guard_.CheckSafeToWait(self); unsigned int old_recursion_count = guard_.recursion_count_; #if ART_USE_FUTEXES timespec rel_ts; diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index d4fb7786ab..8f2a8eac39 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -465,13 +465,11 @@ class ConditionVariable { // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their // pointer copy, thereby defeating annotalysis. void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS; - // Returns true on timeout. bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS; // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held // when waiting. // TODO: remove this. void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS; - bool TimedWaitHoldingLocks(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS; void CheckSafeToWait(Thread* self) NO_THREAD_SAFETY_ANALYSIS { if (kDebugLocking) { diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index ea0ea4b829..e34d140db4 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc @@ -147,12 +147,15 @@ ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self, } } } + // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the + // presence of threads blocking for weak ref access. + self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_); if (!started_trace) { ATraceBegin("GetReferent blocked"); started_trace = true; start_millis = MilliTime(); } - WaitUntilDoneProcessingReferences(self); + condition_.WaitHoldingLocks(self); } if (started_trace) { finish_trace(start_millis); @@ -377,34 +380,13 @@ void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) { } void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) { - // Wait until we are done processing references. - // TODO: We must hold reference_processor_lock_ to wait, and we cannot release and reacquire - // the mutator lock while we hold it. But we shouldn't remain runnable while we're asleep. - // Is there a way to do this more cleanly if we release the mutator lock in the condvar - // implementation? Without such a fix, we still need to be careful that we only very rarely - // need checkpoint or suspend requests to be serviced while we're waiting here; waiting for - // a timeout is better than a deadlock, but not cheap. See b/195664026 . - bool warned = false; + // Wait until we are done processing reference. while ((!kUseReadBarrier && SlowPathEnabled()) || (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) { // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the // presence of threads blocking for weak ref access. self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_); - if (condition_.TimedWaitHoldingLocks(self, /*ms=*/ 10, /*nsec=*/ 0)) { - // Timed out. - // We should rarely get here. If we do, temporarily release reference_processor_lock_ and - // mutator lock, so we can respond to checkpoint and suspend requests. - Locks::reference_processor_lock_->ExclusiveUnlock(self); - { - ScopedThreadSuspension sts(self, ThreadState::kSuspended); - if (!warned) { - LOG(WARNING) << "Long wait for reference processor."; - warned = true; - } - usleep(100); - } - Locks::reference_processor_lock_->ExclusiveLock(self); - } + condition_.WaitHoldingLocks(self); } } diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h index 8ea7bb1297..54de5cc572 100644 --- a/runtime/gc/reference_processor.h +++ b/runtime/gc/reference_processor.h @@ -58,7 +58,7 @@ class ReferenceProcessor { // GetReferent fast path as an optimization. void EnableSlowPath() REQUIRES_SHARED(Locks::mutator_lock_); void BroadcastForSlowPath(Thread* self); - // Decode the referent, may block and allow suspension if references are being processed. + // Decode the referent, may block if references are being processed. ObjPtr<mirror::Object> GetReferent(Thread* self, ObjPtr<mirror::Reference> reference) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_); // Collects the cleared references and returns a task, to be executed after FinishGC, that will @@ -89,7 +89,7 @@ class ReferenceProcessor { // referents. void StartPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_); void StopPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_); - // Wait until reference processing is done. May temporarily release both required locks. + // Wait until reference processing is done. void WaitUntilDoneProcessingReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::reference_processor_lock_); |