diff options
| -rw-r--r-- | services/surfaceflinger/Scheduler/DispSync.cpp | 62 | ||||
| -rw-r--r-- | services/surfaceflinger/Scheduler/DispSync.h | 21 | ||||
| -rw-r--r-- | services/surfaceflinger/Scheduler/Scheduler.cpp | 8 | ||||
| -rw-r--r-- | services/surfaceflinger/Scheduler/Scheduler.h | 13 | ||||
| -rw-r--r-- | services/surfaceflinger/Scheduler/VSyncModulator.h | 19 | ||||
| -rw-r--r-- | services/surfaceflinger/SurfaceFlinger.cpp | 62 | ||||
| -rw-r--r-- | services/surfaceflinger/SurfaceFlinger.h | 10 | ||||
| -rw-r--r-- | services/surfaceflinger/tests/unittests/mock/MockDispSync.h | 1 |
8 files changed, 140 insertions, 56 deletions
diff --git a/services/surfaceflinger/Scheduler/DispSync.cpp b/services/surfaceflinger/Scheduler/DispSync.cpp index cd6fa41940..95ff9d0c73 100644 --- a/services/surfaceflinger/Scheduler/DispSync.cpp +++ b/services/surfaceflinger/Scheduler/DispSync.cpp @@ -79,11 +79,7 @@ public: Mutex::Autolock lock(mMutex); mPhase = phase; - if (mReferenceTime != referenceTime) { - for (auto& eventListener : mEventListeners) { - eventListener.mHasFired = false; - } - } + const bool referenceTimeChanged = mReferenceTime != referenceTime; mReferenceTime = referenceTime; if (mPeriod != 0 && mPeriod != period && mReferenceTime != 0) { // Inflate the reference time to be the most recent predicted @@ -94,6 +90,13 @@ public: mReferenceTime = mReferenceTime + (numOldPeriods)*mPeriod; } mPeriod = period; + if (!mModelLocked && referenceTimeChanged) { + for (auto& eventListener : mEventListeners) { + eventListener.mHasFired = false; + eventListener.mLastEventTime = + mReferenceTime - mPeriod + mPhase + eventListener.mPhase; + } + } if (mTraceDetailedInfo) { ATRACE_INT64("DispSync:Period", mPeriod); ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2); @@ -115,11 +118,20 @@ public: void lockModel() { Mutex::Autolock lock(mMutex); mModelLocked = true; + ATRACE_INT("DispSync:ModelLocked", mModelLocked); } void unlockModel() { Mutex::Autolock lock(mMutex); + if (mModelLocked) { + for (auto& eventListener : mEventListeners) { + if (eventListener.mLastEventTime > mReferenceTime) { + eventListener.mHasFired = true; + } + } + } mModelLocked = false; + ATRACE_INT("DispSync:ModelLocked", mModelLocked); } virtual bool threadLoop() { @@ -247,6 +259,10 @@ public: listener.mLastCallbackTime = lastCallbackTime; } + if (!mModelLocked && listener.mLastEventTime > mReferenceTime) { + listener.mHasFired = true; + } + mEventListeners.push_back(listener); mCond.signal(); @@ -493,7 +509,6 @@ void DispSync::init(bool hasSyncFramework, int64_t dispSyncPresentTimeOffset) { ALOGE("Couldn't set SCHED_FIFO for DispSyncThread"); } - reset(); beginResync(); if (mTraceDetailedInfo && kEnableZeroPhaseTracer) { @@ -545,17 +560,15 @@ bool DispSync::addPresentFence(const std::shared_ptr<FenceTime>& fenceTime) { void DispSync::beginResync() { Mutex::Autolock lock(mMutex); ALOGV("[%s] beginResync", mName); - mThread->unlockModel(); - mModelUpdated = false; - mNumResyncSamples = 0; + resetLocked(); } -bool DispSync::addResyncSample(nsecs_t timestamp, bool* periodChanged) { +bool DispSync::addResyncSample(nsecs_t timestamp, bool* periodFlushed) { Mutex::Autolock lock(mMutex); ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp)); - *periodChanged = false; + *periodFlushed = false; const size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES; mResyncSamples[idx] = timestamp; if (mNumResyncSamples == 0) { @@ -569,16 +582,20 @@ bool DispSync::addResyncSample(nsecs_t timestamp, bool* periodChanged) { const nsecs_t lastTimestamp = mResyncSamples[priorIdx]; const nsecs_t observedVsync = std::abs(timestamp - lastTimestamp); - if (std::abs(observedVsync - mPendingPeriod) < std::abs(observedVsync - mPeriod)) { - // Observed vsync is closer to the pending period, so reset the - // model and flush the pending period. + if (std::abs(observedVsync - mPendingPeriod) <= std::abs(observedVsync - mIntendedPeriod)) { + // Either the observed vsync is closer to the pending period, (and + // thus we detected a period change), or the period change will + // no-op. In either case, reset the model and flush the pending + // period. resetLocked(); + mIntendedPeriod = mPendingPeriod; mPeriod = mPendingPeriod; mPendingPeriod = 0; if (mTraceDetailedInfo) { ATRACE_INT("DispSync:PendingPeriod", mPendingPeriod); + ATRACE_INT("DispSync:IntendedPeriod", mIntendedPeriod); } - *periodChanged = true; + *periodFlushed = true; } } // Always update the reference time with the most recent timestamp. @@ -609,6 +626,7 @@ bool DispSync::addResyncSample(nsecs_t timestamp, bool* periodChanged) { bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2) && mPendingPeriod == 0; ALOGV("[%s] addResyncSample returning %s", mName, modelLocked ? "locked" : "unlocked"); if (modelLocked) { + *periodFlushed = true; mThread->lockModel(); } return !modelLocked; @@ -643,10 +661,17 @@ status_t DispSync::changePhaseOffset(Callback* callback, nsecs_t phase) { void DispSync::setPeriod(nsecs_t period) { Mutex::Autolock lock(mMutex); + + const bool pendingPeriodShouldChange = + period != mIntendedPeriod || (period == mIntendedPeriod && mPendingPeriod != 0); + + if (pendingPeriodShouldChange) { + mPendingPeriod = period; + } if (mTraceDetailedInfo) { - ATRACE_INT("DispSync:PendingPeriod", period); + ATRACE_INT("DispSync:IntendedPeriod", mIntendedPeriod); + ATRACE_INT("DispSync:PendingPeriod", mPendingPeriod); } - mPendingPeriod = period; } nsecs_t DispSync::getPeriod() { @@ -764,6 +789,9 @@ void DispSync::resetErrorLocked() { mPresentSampleOffset = 0; mError = 0; mZeroErrSamplesCount = 0; + if (mTraceDetailedInfo) { + ATRACE_INT64("DispSync:Error", mError); + } for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { mPresentFences[i] = FenceTime::NO_FENCE; } diff --git a/services/surfaceflinger/Scheduler/DispSync.h b/services/surfaceflinger/Scheduler/DispSync.h index 8f8b8e7a99..3e33c7edc0 100644 --- a/services/surfaceflinger/Scheduler/DispSync.h +++ b/services/surfaceflinger/Scheduler/DispSync.h @@ -49,7 +49,7 @@ public: virtual void reset() = 0; virtual bool addPresentFence(const std::shared_ptr<FenceTime>&) = 0; virtual void beginResync() = 0; - virtual bool addResyncSample(nsecs_t timestamp, bool* periodChanged) = 0; + virtual bool addResyncSample(nsecs_t timestamp, bool* periodFlushed) = 0; virtual void endResync() = 0; virtual void setPeriod(nsecs_t period) = 0; virtual nsecs_t getPeriod() = 0; @@ -120,17 +120,19 @@ public: // from the hardware vsync events. void beginResync() override; // Adds a vsync sample to the dispsync model. The timestamp is the time - // of the vsync event that fired. periodChanged will return true if the + // of the vsync event that fired. periodFlushed will return true if the // vsync period was detected to have changed to mPendingPeriod. // // This method will return true if more vsync samples are needed to lock // down the DispSync model, and false otherwise. - bool addResyncSample(nsecs_t timestamp, bool* periodChanged) override; + // periodFlushed will be set to true if mPendingPeriod is flushed to + // mIntendedPeriod, and false otherwise. + bool addResyncSample(nsecs_t timestamp, bool* periodFlushed) override; void endResync() override; // The setPeriod method sets the vsync event model's period to a specific - // value. This should be used to prime the model when a display is first - // turned on. It should NOT be used after that. + // value. This should be used to prime the model when a display is first + // turned on, or when a refresh rate change is requested. void setPeriod(nsecs_t period) override; // The getPeriod method returns the current vsync period. @@ -205,6 +207,11 @@ private: // nanoseconds. nsecs_t mPeriod; + // mIntendedPeriod is the intended period of the modeled vsync events in + // nanoseconds. Under ideal conditions this should be similar if not the + // same as mPeriod, plus or minus an observed error. + nsecs_t mIntendedPeriod = 0; + // mPendingPeriod is the proposed period change in nanoseconds. // If mPendingPeriod differs from mPeriod and is nonzero, it will // be flushed to mPeriod when we detect that the hardware switched @@ -236,8 +243,8 @@ private: // process to store information about the hardware vsync event times used // to compute the model. nsecs_t mResyncSamples[MAX_RESYNC_SAMPLES] = {0}; - size_t mFirstResyncSample; - size_t mNumResyncSamples; + size_t mFirstResyncSample = 0; + size_t mNumResyncSamples = 0; int mNumResyncSamplesSincePresent; // These member variables store information about the present fences used diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp index 513436a270..e2a348e146 100644 --- a/services/surfaceflinger/Scheduler/Scheduler.cpp +++ b/services/surfaceflinger/Scheduler/Scheduler.cpp @@ -280,13 +280,13 @@ void Scheduler::setVsyncPeriod(const nsecs_t period) { } } -void Scheduler::addResyncSample(const nsecs_t timestamp, bool* periodChanged) { +void Scheduler::addResyncSample(const nsecs_t timestamp, bool* periodFlushed) { bool needsHwVsync = false; - *periodChanged = false; + *periodFlushed = false; { // Scope for the lock std::lock_guard<std::mutex> lock(mHWVsyncLock); if (mPrimaryHWVsyncEnabled) { - needsHwVsync = mPrimaryDispSync->addResyncSample(timestamp, periodChanged); + needsHwVsync = mPrimaryDispSync->addResyncSample(timestamp, periodFlushed); } } @@ -415,7 +415,7 @@ void Scheduler::resetKernelTimerCallback() { ATRACE_INT("ExpiredKernelIdleTimer", 0); std::lock_guard<std::mutex> lock(mCallbackLock); if (mGetVsyncPeriod) { - resyncToHardwareVsync(false, mGetVsyncPeriod()); + resyncToHardwareVsync(true, mGetVsyncPeriod()); } } diff --git a/services/surfaceflinger/Scheduler/Scheduler.h b/services/surfaceflinger/Scheduler/Scheduler.h index 96d4bd5214..d311f62fdb 100644 --- a/services/surfaceflinger/Scheduler/Scheduler.h +++ b/services/surfaceflinger/Scheduler/Scheduler.h @@ -135,13 +135,18 @@ public: void enableHardwareVsync(); void disableHardwareVsync(bool makeUnavailable); + // Resyncs the scheduler to hardware vsync. + // If makeAvailable is true, then hardware vsync will be turned on. + // Otherwise, if hardware vsync is not already enabled then this method will + // no-op. + // The period is the vsync period from the current display configuration. void resyncToHardwareVsync(bool makeAvailable, nsecs_t period); // Creates a callback for resyncing. ResyncCallback makeResyncCallback(GetVsyncPeriod&& getVsyncPeriod); void setRefreshSkipCount(int count); - // Passes a vsync sample to DispSync. periodChange will be true if DipSync - // detected that the vsync period changed, and false otherwise. - void addResyncSample(const nsecs_t timestamp, bool* periodChanged); + // Passes a vsync sample to DispSync. periodFlushed will be true if + // DispSync detected that the vsync period changed, and false otherwise. + void addResyncSample(const nsecs_t timestamp, bool* periodFlushed); void addPresentFence(const std::shared_ptr<FenceTime>& fenceTime); void setIgnorePresentFences(bool ignore); nsecs_t expectedPresentTime(); @@ -292,7 +297,7 @@ private: const scheduler::RefreshRateConfigs& mRefreshRateConfigs; // Global config to force HDR content to work on DEFAULT refreshRate - static constexpr bool mForceHDRContentToDefaultRefreshRate = true; + static constexpr bool mForceHDRContentToDefaultRefreshRate = false; }; } // namespace android diff --git a/services/surfaceflinger/Scheduler/VSyncModulator.h b/services/surfaceflinger/Scheduler/VSyncModulator.h index 81a7864cdb..41c3a3a605 100644 --- a/services/surfaceflinger/Scheduler/VSyncModulator.h +++ b/services/surfaceflinger/Scheduler/VSyncModulator.h @@ -35,6 +35,11 @@ private: // sending new transactions. const int MIN_EARLY_FRAME_COUNT_TRANSACTION = 2; + // Number of frames we'll keep the early gl phase offsets once they are activated. + // This acts as a low-pass filter to avoid scenarios where we rapidly + // switch in and out of gl composition. + const int MIN_EARLY_GL_FRAME_COUNT_TRANSACTION = 2; + public: struct Offsets { nsecs_t sf; @@ -116,7 +121,7 @@ public: // Called when we detect from vsync signals that the refresh rate changed. // This way we can move out of early offsets if no longer necessary. - void onRefreshRateChangeDetected() { + void onRefreshRateChangeCompleted() { if (!mRefreshRateChangePending) { return; } @@ -130,10 +135,14 @@ public: mRemainingEarlyFrameCount--; updateOffsetsNeeded = true; } - if (usedRenderEngine != mLastFrameUsedRenderEngine) { - mLastFrameUsedRenderEngine = usedRenderEngine; + if (usedRenderEngine) { + mRemainingRenderEngineUsageCount = MIN_EARLY_GL_FRAME_COUNT_TRANSACTION; + updateOffsetsNeeded = true; + } else if (mRemainingRenderEngineUsageCount > 0) { + mRemainingRenderEngineUsageCount--; updateOffsetsNeeded = true; } + if (updateOffsetsNeeded) { updateOffsets(); } @@ -145,7 +154,7 @@ public: if (mTransactionStart == Scheduler::TransactionStart::EARLY || mRemainingEarlyFrameCount > 0 || mRefreshRateChangePending) { return mEarlyOffsets; - } else if (mLastFrameUsedRenderEngine) { + } else if (mRemainingRenderEngineUsageCount > 0) { return mEarlyGlOffsets; } else { return mLateOffsets; @@ -195,9 +204,9 @@ private: std::atomic<Scheduler::TransactionStart> mTransactionStart = Scheduler::TransactionStart::NORMAL; - std::atomic<bool> mLastFrameUsedRenderEngine = false; std::atomic<bool> mRefreshRateChangePending = false; std::atomic<int> mRemainingEarlyFrameCount = 0; + std::atomic<int> mRemainingRenderEngineUsageCount = 0; }; } // namespace android diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp index dd75868443..87c1bf9afa 100644 --- a/services/surfaceflinger/SurfaceFlinger.cpp +++ b/services/surfaceflinger/SurfaceFlinger.cpp @@ -935,9 +935,11 @@ void SurfaceFlinger::setDesiredActiveConfig(const ActiveConfigInfo& info) { // Start receiving vsync samples now, so that we can detect a period // switch. mScheduler->resyncToHardwareVsync(true, getVsyncPeriod()); + // As we called to set period, we will call to onRefreshRateChangeCompleted once + // DispSync model is locked. + mVsyncModulator.onRefreshRateChangeInitiated(); mPhaseOffsets->setRefreshRateType(info.type); const auto [early, gl, late] = mPhaseOffsets->getCurrentOffsets(); - mVsyncModulator.onRefreshRateChangeInitiated(); mVsyncModulator.setPhaseOffsets(early, gl, late); } mDesiredActiveConfigChanged = true; @@ -970,7 +972,6 @@ void SurfaceFlinger::setActiveConfigInternal() { display->setActiveConfig(mUpcomingActiveConfig.configId); - mScheduler->resyncToHardwareVsync(true, getVsyncPeriod()); mPhaseOffsets->setRefreshRateType(mUpcomingActiveConfig.type); const auto [early, gl, late] = mPhaseOffsets->getCurrentOffsets(); mVsyncModulator.setPhaseOffsets(early, gl, late); @@ -982,6 +983,18 @@ void SurfaceFlinger::setActiveConfigInternal() { } } +void SurfaceFlinger::desiredActiveConfigChangeDone() { + std::lock_guard<std::mutex> lock(mActiveConfigLock); + mDesiredActiveConfig.event = Scheduler::ConfigEvent::None; + mDesiredActiveConfigChanged = false; + ATRACE_INT("DesiredActiveConfigChanged", mDesiredActiveConfigChanged); + + mScheduler->resyncToHardwareVsync(true, getVsyncPeriod()); + mPhaseOffsets->setRefreshRateType(mUpcomingActiveConfig.type); + const auto [early, gl, late] = mPhaseOffsets->getCurrentOffsets(); + mVsyncModulator.setPhaseOffsets(early, gl, late); +} + bool SurfaceFlinger::performSetActiveConfig() { ATRACE_CALL(); if (mCheckPendingFence) { @@ -1011,10 +1024,7 @@ bool SurfaceFlinger::performSetActiveConfig() { if (!display || display->getActiveConfig() == desiredActiveConfig.configId) { // display is not valid or we are already in the requested mode // on both cases there is nothing left to do - std::lock_guard<std::mutex> lock(mActiveConfigLock); - mDesiredActiveConfig.event = Scheduler::ConfigEvent::None; - mDesiredActiveConfigChanged = false; - ATRACE_INT("DesiredActiveConfigChanged", mDesiredActiveConfigChanged); + desiredActiveConfigChangeDone(); return false; } @@ -1022,11 +1032,7 @@ bool SurfaceFlinger::performSetActiveConfig() { // allowed configs might have change by the time we process the refresh. // Make sure the desired config is still allowed if (!isDisplayConfigAllowed(desiredActiveConfig.configId)) { - std::lock_guard<std::mutex> lock(mActiveConfigLock); - mDesiredActiveConfig.event = Scheduler::ConfigEvent::None; - mDesiredActiveConfig.configId = display->getActiveConfig(); - mDesiredActiveConfigChanged = false; - ATRACE_INT("DesiredActiveConfigChanged", mDesiredActiveConfigChanged); + desiredActiveConfigChangeDone(); return false; } mUpcomingActiveConfig = desiredActiveConfig; @@ -1448,10 +1454,10 @@ void SurfaceFlinger::onVsyncReceived(int32_t sequenceId, hwc2_display_t hwcDispl return; } - bool periodChanged = false; - mScheduler->addResyncSample(timestamp, &periodChanged); - if (periodChanged) { - mVsyncModulator.onRefreshRateChangeDetected(); + bool periodFlushed = false; + mScheduler->addResyncSample(timestamp, &periodFlushed); + if (periodFlushed) { + mVsyncModulator.onRefreshRateChangeCompleted(); } } @@ -1524,10 +1530,25 @@ void SurfaceFlinger::onRefreshReceived(int sequenceId, hwc2_display_t /*hwcDispl void SurfaceFlinger::setPrimaryVsyncEnabled(bool enabled) { ATRACE_CALL(); - Mutex::Autolock lock(mStateLock); + + // Enable / Disable HWVsync from the main thread to avoid race conditions with + // display power state. + postMessageAsync(new LambdaMessage( + [=]() NO_THREAD_SAFETY_ANALYSIS { setPrimaryVsyncEnabledInternal(enabled); })); +} + +void SurfaceFlinger::setPrimaryVsyncEnabledInternal(bool enabled) { + ATRACE_CALL(); + if (const auto displayId = getInternalDisplayIdLocked()) { - getHwComposer().setVsyncEnabled(*displayId, - enabled ? HWC2::Vsync::Enable : HWC2::Vsync::Disable); + sp<DisplayDevice> display = getDefaultDisplayDeviceLocked(); + if (display && display->isPoweredOn()) { + getHwComposer().setVsyncEnabled(*displayId, + enabled ? HWC2::Vsync::Enable : HWC2::Vsync::Disable); + } else { + // Cache the latest vsync state and apply it when screen is on again + mEnableHWVsyncScreenOn = enabled; + } } } @@ -4425,6 +4446,11 @@ void SurfaceFlinger::setPowerModeInternal(const sp<DisplayDevice>& display, int // Turn on the display getHwComposer().setPowerMode(*displayId, mode); if (display->isPrimary() && mode != HWC_POWER_MODE_DOZE_SUSPEND) { + if (mEnableHWVsyncScreenOn) { + setPrimaryVsyncEnabledInternal(mEnableHWVsyncScreenOn); + mEnableHWVsyncScreenOn = false; + } + mScheduler->onScreenAcquired(mAppConnectionHandle); mScheduler->resyncToHardwareVsync(true, getVsyncPeriod()); } diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h index ddfe88c928..46fc4d2cd1 100644 --- a/services/surfaceflinger/SurfaceFlinger.h +++ b/services/surfaceflinger/SurfaceFlinger.h @@ -293,6 +293,9 @@ public: // TODO: this should be made accessible only to EventThread void setPrimaryVsyncEnabled(bool enabled); + // main thread function to enable/disable h/w composer event + void setPrimaryVsyncEnabledInternal(bool enabled); + // called on the main thread by MessageQueue when an internal message // is received // TODO: this should be made accessible only to MessageQueue @@ -508,13 +511,15 @@ private: // Sets the desired active config bit. It obtains the lock, and sets mDesiredActiveConfig. void setDesiredActiveConfig(const ActiveConfigInfo& info) REQUIRES(mStateLock); // Once HWC has returned the present fence, this sets the active config and a new refresh - // rate in SF. It also triggers HWC vsync. + // rate in SF. void setActiveConfigInternal() REQUIRES(mStateLock); // Active config is updated on INVALIDATE call in a state machine-like manner. When the // desired config was set, HWC needs to update the panel on the next refresh, and when // we receive the fence back, we know that the process was complete. It returns whether // we need to wait for the next invalidate bool performSetActiveConfig() REQUIRES(mStateLock); + // Called when active config is no longer is progress + void desiredActiveConfigChangeDone() REQUIRES(mStateLock); // called on the main thread in response to setPowerMode() void setPowerModeInternal(const sp<DisplayDevice>& display, int mode) REQUIRES(mStateLock); @@ -1166,6 +1171,9 @@ private: // The Layer pointer is removed from the set when the destructor is called so there shouldn't // be any issues with a raw pointer referencing an invalid object. std::unordered_set<Layer*> mOffscreenLayers; + + // Flag to indicate whether to re-enable HWVsync when screen is on + bool mEnableHWVsyncScreenOn = false; }; } // namespace android diff --git a/services/surfaceflinger/tests/unittests/mock/MockDispSync.h b/services/surfaceflinger/tests/unittests/mock/MockDispSync.h index 12a349dd76..9ca116d735 100644 --- a/services/surfaceflinger/tests/unittests/mock/MockDispSync.h +++ b/services/surfaceflinger/tests/unittests/mock/MockDispSync.h @@ -35,6 +35,7 @@ public: MOCK_METHOD0(endResync, void()); MOCK_METHOD1(setPeriod, void(nsecs_t)); MOCK_METHOD0(getPeriod, nsecs_t()); + MOCK_METHOD0(getIntendedPeriod, nsecs_t()); MOCK_METHOD1(setRefreshSkipCount, void(int)); MOCK_CONST_METHOD1(computeNextRefresh, nsecs_t(int)); MOCK_METHOD1(setIgnorePresentFences, void(bool)); |