diff options
| author | 2023-04-19 17:04:04 -0400 | |
|---|---|---|
| committer | 2023-04-27 10:02:37 -0400 | |
| commit | b2253605afdb7496ed534d6f745359edbdb58cea (patch) | |
| tree | 13a6bc81534825841cce9223b3fec9dd80cfdf24 | |
| parent | 6c440aeb79fdadca718d34f6eca754589730afb0 (diff) | |
Fix for potential deadlock in EventThread::onNewVsyncSchedule
I haven't seen such a deadlock, but looking over
I86e66df59c64e81c4aa721a25250697f61237488 and the potential deadlock
that inspired it, I think there could be a similar problem in
EventThread. As I understand it, this is partially because of the
fact that EventThread does not unregister the old callback. This is
addressed in I3c1eccb36914f29560600d48bb08b1b8f2fe7c96.
Fix the deadlock with a similar approach as
I86e66df59c64e81c4aa721a25250697f61237488: hold on to the old
registration until after releasing the mutex.
Bug: 279209321
Test: infeasible
Change-Id: If490f88115aa298d31aa9ad392a1f9f9dc987549
| -rw-r--r-- | services/surfaceflinger/Scheduler/EventThread.cpp | 16 | ||||
| -rw-r--r-- | services/surfaceflinger/Scheduler/EventThread.h | 7 |
2 files changed, 19 insertions, 4 deletions
diff --git a/services/surfaceflinger/Scheduler/EventThread.cpp b/services/surfaceflinger/Scheduler/EventThread.cpp index 74665a70aa..1c0bf0d3d1 100644 --- a/services/surfaceflinger/Scheduler/EventThread.cpp +++ b/services/surfaceflinger/Scheduler/EventThread.cpp @@ -692,17 +692,27 @@ const char* EventThread::toCString(State state) { } void EventThread::onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule> schedule) { + // Hold onto the old registration until after releasing the mutex to avoid deadlock. + scheduler::VSyncCallbackRegistration oldRegistration = + onNewVsyncScheduleInternal(std::move(schedule)); +} + +scheduler::VSyncCallbackRegistration EventThread::onNewVsyncScheduleInternal( + std::shared_ptr<scheduler::VsyncSchedule> schedule) { std::lock_guard<std::mutex> lock(mMutex); const bool reschedule = mVsyncRegistration.cancel() == scheduler::CancelResult::Cancelled; mVsyncSchedule = std::move(schedule); - mVsyncRegistration = - scheduler::VSyncCallbackRegistration(mVsyncSchedule->getDispatch(), - createDispatchCallback(), mThreadName); + auto oldRegistration = + std::exchange(mVsyncRegistration, + scheduler::VSyncCallbackRegistration(mVsyncSchedule->getDispatch(), + createDispatchCallback(), + mThreadName)); if (reschedule) { mVsyncRegistration.schedule({.workDuration = mWorkDuration.get().count(), .readyDuration = mReadyDuration.count(), .earliestVsync = mLastVsyncCallbackTime.ns()}); } + return oldRegistration; } scheduler::VSyncDispatch::Callback EventThread::createDispatchCallback() { diff --git a/services/surfaceflinger/Scheduler/EventThread.h b/services/surfaceflinger/Scheduler/EventThread.h index 30869e9cd8..684745b71b 100644 --- a/services/surfaceflinger/Scheduler/EventThread.h +++ b/services/surfaceflinger/Scheduler/EventThread.h @@ -174,7 +174,7 @@ public: size_t getEventThreadConnectionCount() override; - void onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule>) override; + void onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule>) override EXCLUDES(mMutex); private: friend EventThreadTest; @@ -201,6 +201,11 @@ private: scheduler::VSyncDispatch::Callback createDispatchCallback(); + // Returns the old registration so it can be destructed outside the lock to + // avoid deadlock. + scheduler::VSyncCallbackRegistration onNewVsyncScheduleInternal( + std::shared_ptr<scheduler::VsyncSchedule>) EXCLUDES(mMutex); + const char* const mThreadName; TracedOrdinal<int> mVsyncTracer; TracedOrdinal<std::chrono::nanoseconds> mWorkDuration GUARDED_BY(mMutex); |