diff options
3 files changed, 86 insertions, 57 deletions
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp index f8cb3230af..5a5afd8fa6 100644 --- a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp +++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp @@ -219,6 +219,17 @@ bool VSyncPredictor::addVsyncTimestamp(nsecs_t timestamp) { return true; } +auto VSyncPredictor::getVsyncSequenceLocked(nsecs_t timestamp) const -> VsyncSequence { + const auto vsync = nextAnticipatedVSyncTimeFromLocked(timestamp); + if (!mLastVsyncSequence) return {vsync, 0}; + + const auto [slope, _] = getVSyncPredictionModelLocked(); + const auto [lastVsyncTime, lastVsyncSequence] = *mLastVsyncSequence; + const auto vsyncSequence = lastVsyncSequence + + static_cast<int64_t>(std::round((vsync - lastVsyncTime) / static_cast<float>(slope))); + return {vsync, vsyncSequence}; +} + nsecs_t VSyncPredictor::nextAnticipatedVSyncTimeFromLocked(nsecs_t timePoint) const { auto const [slope, intercept] = getVSyncPredictionModelLocked(); @@ -258,12 +269,17 @@ nsecs_t VSyncPredictor::nextAnticipatedVSyncTimeFromLocked(nsecs_t timePoint) co nsecs_t VSyncPredictor::nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const { std::lock_guard lock(mMutex); - // TODO(b/246164114): This implementation is not efficient at all. Refactor. - nsecs_t nextVsync = nextAnticipatedVSyncTimeFromLocked(timePoint); - while (!isVSyncInPhaseLocked(nextVsync, mDivisor)) { - nextVsync = nextAnticipatedVSyncTimeFromLocked(nextVsync + 1); + // update the mLastVsyncSequence for reference point + mLastVsyncSequence = getVsyncSequenceLocked(timePoint); + + const auto mod = mLastVsyncSequence->seq % mDivisor; + if (mod == 0) { + return mLastVsyncSequence->vsyncTime; } - return nextVsync; + + auto const [slope, intercept] = getVSyncPredictionModelLocked(); + const auto approximateNextVsync = mLastVsyncSequence->vsyncTime + slope * (mDivisor - mod); + return nextAnticipatedVSyncTimeFromLocked(approximateNextVsync - slope / 2); } /* @@ -289,51 +305,16 @@ bool VSyncPredictor::isVSyncInPhaseLocked(nsecs_t timePoint, unsigned divisor) c ATRACE_FORMAT("%s timePoint in: %.2f divisor: %zu", __func__, getTimePointIn(now, timePoint), divisor); - struct VsyncError { - nsecs_t vsyncTimestamp; - float error; - - bool operator<(const VsyncError& other) const { return error < other.error; } - }; - if (divisor <= 1 || timePoint == 0) { return true; } const nsecs_t period = mRateMap[mIdealPeriod].slope; const nsecs_t justBeforeTimePoint = timePoint - period / 2; - const nsecs_t dividedPeriod = mIdealPeriod / divisor; - - // If this is the first time we have asked about this divisor with the - // current vsync period, it is considered in phase and we store the closest - // vsync timestamp - const auto knownTimestampIter = mRateDivisorKnownTimestampMap.find(dividedPeriod); - if (knownTimestampIter == mRateDivisorKnownTimestampMap.end()) { - const auto vsync = nextAnticipatedVSyncTimeFromLocked(justBeforeTimePoint); - mRateDivisorKnownTimestampMap[dividedPeriod] = vsync; - ATRACE_FORMAT_INSTANT("(first) knownVsync in: %.2f", getTimePointIn(now, vsync)); - return true; - } - - // Find the next N vsync timestamp where N is the divisor. - // One of these vsyncs will be in phase. We return the one which is - // the most aligned with the last known in phase vsync - std::vector<VsyncError> vsyncs(static_cast<size_t>(divisor)); - const nsecs_t knownVsync = knownTimestampIter->second; - nsecs_t point = justBeforeTimePoint; - for (size_t i = 0; i < divisor; i++) { - const nsecs_t vsync = nextAnticipatedVSyncTimeFromLocked(point); - const auto numPeriods = static_cast<float>(vsync - knownVsync) / (period * divisor); - const auto error = std::abs(std::round(numPeriods) - numPeriods); - vsyncs[i] = {vsync, error}; - point = vsync + 1; - } - - const auto minVsyncError = std::min_element(vsyncs.begin(), vsyncs.end()); - mRateDivisorKnownTimestampMap[dividedPeriod] = minVsyncError->vsyncTimestamp; - ATRACE_FORMAT_INSTANT("knownVsync in: %.2f", - getTimePointIn(now, minVsyncError->vsyncTimestamp)); - return std::abs(minVsyncError->vsyncTimestamp - timePoint) < period / 2; + const auto vsyncSequence = getVsyncSequenceLocked(justBeforeTimePoint); + ATRACE_FORMAT_INSTANT("vsync in: %.2f sequence: %" PRId64, + getTimePointIn(now, vsyncSequence.vsyncTime), vsyncSequence.seq); + return vsyncSequence.seq % divisor == 0; } void VSyncPredictor::setDivisor(unsigned divisor) { @@ -382,6 +363,7 @@ void VSyncPredictor::clearTimestamps() { mTimestamps.clear(); mLastTimestampIndex = 0; } + mLastVsyncSequence.reset(); } bool VSyncPredictor::needsMoreSamples() const { diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.h b/services/surfaceflinger/Scheduler/VSyncPredictor.h index 305cdb0d56..d0e3098c5e 100644 --- a/services/surfaceflinger/Scheduler/VSyncPredictor.h +++ b/services/surfaceflinger/Scheduler/VSyncPredictor.h @@ -78,35 +78,37 @@ private: inline void traceInt64If(const char* name, int64_t value) const; inline void traceInt64(const char* name, int64_t value) const; - bool const mTraceOn; - - size_t const kHistorySize; - size_t const kMinimumSamplesForPrediction; - size_t const kOutlierTolerancePercent; - std::mutex mutable mMutex; size_t next(size_t i) const REQUIRES(mMutex); bool validate(nsecs_t timestamp) const REQUIRES(mMutex); - Model getVSyncPredictionModelLocked() const REQUIRES(mMutex); - nsecs_t nextAnticipatedVSyncTimeFromLocked(nsecs_t timePoint) const REQUIRES(mMutex); - bool isVSyncInPhaseLocked(nsecs_t timePoint, unsigned divisor) const REQUIRES(mMutex); + struct VsyncSequence { + nsecs_t vsyncTime; + int64_t seq; + }; + VsyncSequence getVsyncSequenceLocked(nsecs_t timestamp) const REQUIRES(mMutex); + + bool const mTraceOn; + size_t const kHistorySize; + size_t const kMinimumSamplesForPrediction; + size_t const kOutlierTolerancePercent; + std::mutex mutable mMutex; + nsecs_t mIdealPeriod GUARDED_BY(mMutex); std::optional<nsecs_t> mKnownTimestamp GUARDED_BY(mMutex); // Map between ideal vsync period and the calculated model std::unordered_map<nsecs_t, Model> mutable mRateMap GUARDED_BY(mMutex); - // Map between the divided vsync period and the last known vsync timestamp - std::unordered_map<nsecs_t, nsecs_t> mutable mRateDivisorKnownTimestampMap GUARDED_BY(mMutex); - size_t mLastTimestampIndex GUARDED_BY(mMutex) = 0; std::vector<nsecs_t> mTimestamps GUARDED_BY(mMutex); unsigned mDivisor GUARDED_BY(mMutex) = 1; + + mutable std::optional<VsyncSequence> mLastVsyncSequence GUARDED_BY(mMutex); }; } // namespace android::scheduler diff --git a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp index 3095e8aa9a..48d39cf3f5 100644 --- a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp +++ b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp @@ -468,7 +468,7 @@ TEST_F(VSyncPredictorTest, isVSyncInPhase) { const auto maxPeriods = 15; for (int divisor = 1; divisor < maxDivisor; divisor++) { for (int i = 0; i < maxPeriods; i++) { - const bool expectedInPhase = (i % divisor) == 0; + const bool expectedInPhase = ((kMinimumSamplesForPrediction - 1 + i) % divisor) == 0; EXPECT_THAT(expectedInPhase, tracker.isVSyncInPhase(mNow + i * mPeriod - bias, Fps::fromPeriodNsecs(divisor * mPeriod))) @@ -478,6 +478,28 @@ TEST_F(VSyncPredictorTest, isVSyncInPhase) { } } +TEST_F(VSyncPredictorTest, isVSyncInPhaseForDivisors) { + auto last = mNow; + for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) { + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(last + mPeriod)); + mNow += mPeriod; + last = mNow; + tracker.addVsyncTimestamp(mNow); + } + + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + mPeriod)); + + EXPECT_TRUE(tracker.isVSyncInPhase(mNow + 1 * mPeriod, Fps::fromPeriodNsecs(mPeriod * 2))); + EXPECT_FALSE(tracker.isVSyncInPhase(mNow + 2 * mPeriod, Fps::fromPeriodNsecs(mPeriod * 2))); + EXPECT_TRUE(tracker.isVSyncInPhase(mNow + 3 * mPeriod, Fps::fromPeriodNsecs(mPeriod * 2))); + + EXPECT_FALSE(tracker.isVSyncInPhase(mNow + 5 * mPeriod, Fps::fromPeriodNsecs(mPeriod * 4))); + EXPECT_TRUE(tracker.isVSyncInPhase(mNow + 3 * mPeriod, Fps::fromPeriodNsecs(mPeriod * 4))); + EXPECT_FALSE(tracker.isVSyncInPhase(mNow + 4 * mPeriod, Fps::fromPeriodNsecs(mPeriod * 4))); + EXPECT_FALSE(tracker.isVSyncInPhase(mNow + 6 * mPeriod, Fps::fromPeriodNsecs(mPeriod * 4))); + EXPECT_TRUE(tracker.isVSyncInPhase(mNow + 7 * mPeriod, Fps::fromPeriodNsecs(mPeriod * 4))); +} + TEST_F(VSyncPredictorTest, inconsistentVsyncValueIsFlushedEventually) { EXPECT_TRUE(tracker.addVsyncTimestamp(600)); EXPECT_TRUE(tracker.needsMoreSamples()); @@ -552,6 +574,29 @@ TEST_F(VSyncPredictorTest, setDivisorIsRespected) { EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 5100), Eq(mNow + 7 * mPeriod)); } +TEST_F(VSyncPredictorTest, setDivisorOfDivosorIsInPhase) { + auto last = mNow; + for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) { + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(last + mPeriod)); + mNow += mPeriod; + last = mNow; + tracker.addVsyncTimestamp(mNow); + } + + tracker.setDivisor(4); + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + 3 * mPeriod)); + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 3 * mPeriod), Eq(mNow + 7 * mPeriod)); + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 7 * mPeriod), Eq(mNow + 11 * mPeriod)); + + tracker.setDivisor(2); + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + 1 * mPeriod)); + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 1 * mPeriod), Eq(mNow + 3 * mPeriod)); + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 3 * mPeriod), Eq(mNow + 5 * mPeriod)); + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 5 * mPeriod), Eq(mNow + 7 * mPeriod)); + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 7 * mPeriod), Eq(mNow + 9 * mPeriod)); + EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 9 * mPeriod), Eq(mNow + 11 * mPeriod)); +} + } // namespace android::scheduler // TODO(b/129481165): remove the #pragma below and fix conversion issues |