2 * Copyright (C) 2013 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
18 //#define LOG_NDEBUG 0
20 // This is needed for stdint.h to define INT64_MAX in C++
21 #define __STDC_LIMIT_MACROS
25 #include <cutils/iosched_policy.h>
26 #include <cutils/log.h>
30 #include <utils/String8.h>
31 #include <utils/Thread.h>
32 #include <utils/Trace.h>
33 #include <utils/Vector.h>
36 #include "EventLog/EventLog.h"
45 // Setting this to true enables verbose tracing that can be used to debug
46 // vsync event model or phase issues.
47 static const bool kTraceDetailedInfo = false;
49 // Setting this to true adds a zero-phase tracer for correlating with hardware
51 static const bool kEnableZeroPhaseTracer = false;
53 // This is the threshold used to determine when hardware vsync events are
54 // needed to re-synchronize the software vsync model with the hardware. The
55 // error metric used is the mean of the squared difference between each
56 // present time and the nearest software-predicted vsync.
57 static const nsecs_t kErrorThreshold = 160000000000; // 400 usec squared
59 // This is the offset from the present fence timestamps to the corresponding
61 static const int64_t kPresentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS;
64 #define LOG_TAG "DispSyncThread"
65 class DispSyncThread: public Thread {
68 DispSyncThread(const char* name):
77 virtual ~DispSyncThread() {}
79 void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) {
80 if (kTraceDetailedInfo) ATRACE_CALL();
81 Mutex::Autolock lock(mMutex);
84 mReferenceTime = referenceTime;
85 ALOGV("[%s] updateModel: mPeriod = %" PRId64 ", mPhase = %" PRId64
86 " mReferenceTime = %" PRId64, mName, ns2us(mPeriod),
87 ns2us(mPhase), ns2us(mReferenceTime));
92 if (kTraceDetailedInfo) ATRACE_CALL();
93 Mutex::Autolock lock(mMutex);
98 virtual bool threadLoop() {
100 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
103 Vector<CallbackInvocation> callbackInvocations;
105 nsecs_t targetTime = 0;
108 Mutex::Autolock lock(mMutex);
110 if (kTraceDetailedInfo) {
111 ATRACE_INT64("DispSync:Frame", mFrameNumber);
113 ALOGV("[%s] Frame %" PRId64, mName, mFrameNumber);
121 err = mCond.wait(mMutex);
122 if (err != NO_ERROR) {
123 ALOGE("error waiting for new events: %s (%d)",
124 strerror(-err), err);
130 targetTime = computeNextEventTimeLocked(now);
132 bool isWakeup = false;
134 if (now < targetTime) {
135 if (kTraceDetailedInfo) ATRACE_NAME("DispSync waiting");
137 if (targetTime == INT64_MAX) {
138 ALOGV("[%s] Waiting forever", mName);
139 err = mCond.wait(mMutex);
141 ALOGV("[%s] Waiting until %" PRId64, mName,
143 err = mCond.waitRelative(mMutex, targetTime - now);
146 if (err == TIMED_OUT) {
148 } else if (err != NO_ERROR) {
149 ALOGE("error waiting for next event: %s (%d)",
150 strerror(-err), err);
155 now = systemTime(SYSTEM_TIME_MONOTONIC);
157 // Don't correct by more than 1.5 ms
158 static const nsecs_t kMaxWakeupLatency = us2ns(1500);
161 mWakeupLatency = ((mWakeupLatency * 63) +
162 (now - targetTime)) / 64;
163 mWakeupLatency = min(mWakeupLatency, kMaxWakeupLatency);
164 if (kTraceDetailedInfo) {
165 ATRACE_INT64("DispSync:WakeupLat", now - targetTime);
166 ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
170 callbackInvocations = gatherCallbackInvocationsLocked(now);
173 if (callbackInvocations.size() > 0) {
174 fireCallbackInvocations(callbackInvocations);
181 status_t addEventListener(const char* name, nsecs_t phase,
182 const sp<DispSync::Callback>& callback) {
183 if (kTraceDetailedInfo) ATRACE_CALL();
184 Mutex::Autolock lock(mMutex);
186 for (size_t i = 0; i < mEventListeners.size(); i++) {
187 if (mEventListeners[i].mCallback == callback) {
192 EventListener listener;
193 listener.mName = name;
194 listener.mPhase = phase;
195 listener.mCallback = callback;
197 // We want to allow the firstmost future event to fire without
198 // allowing any past events to fire
199 listener.mLastEventTime = systemTime() - mPeriod / 2 + mPhase -
202 mEventListeners.push(listener);
209 status_t removeEventListener(const sp<DispSync::Callback>& callback) {
210 if (kTraceDetailedInfo) ATRACE_CALL();
211 Mutex::Autolock lock(mMutex);
213 for (size_t i = 0; i < mEventListeners.size(); i++) {
214 if (mEventListeners[i].mCallback == callback) {
215 mEventListeners.removeAt(i);
224 // This method is only here to handle the kIgnorePresentFences case.
225 bool hasAnyEventListeners() {
226 if (kTraceDetailedInfo) ATRACE_CALL();
227 Mutex::Autolock lock(mMutex);
228 return !mEventListeners.empty();
233 struct EventListener {
236 nsecs_t mLastEventTime;
237 sp<DispSync::Callback> mCallback;
240 struct CallbackInvocation {
241 sp<DispSync::Callback> mCallback;
245 nsecs_t computeNextEventTimeLocked(nsecs_t now) {
246 if (kTraceDetailedInfo) ATRACE_CALL();
247 ALOGV("[%s] computeNextEventTimeLocked", mName);
248 nsecs_t nextEventTime = INT64_MAX;
249 for (size_t i = 0; i < mEventListeners.size(); i++) {
250 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
253 if (t < nextEventTime) {
258 ALOGV("[%s] nextEventTime = %" PRId64, mName, ns2us(nextEventTime));
259 return nextEventTime;
262 Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
263 if (kTraceDetailedInfo) ATRACE_CALL();
264 ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName,
267 Vector<CallbackInvocation> callbackInvocations;
268 nsecs_t onePeriodAgo = now - mPeriod;
270 for (size_t i = 0; i < mEventListeners.size(); i++) {
271 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
275 CallbackInvocation ci;
276 ci.mCallback = mEventListeners[i].mCallback;
278 ALOGV("[%s] [%s] Preparing to fire", mName,
279 mEventListeners[i].mName);
280 callbackInvocations.push(ci);
281 mEventListeners.editItemAt(i).mLastEventTime = t;
285 return callbackInvocations;
288 nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener,
290 if (kTraceDetailedInfo) ATRACE_CALL();
291 ALOGV("[%s] [%s] computeListenerNextEventTimeLocked(%" PRId64 ")",
292 mName, listener.mName, ns2us(baseTime));
294 nsecs_t lastEventTime = listener.mLastEventTime + mWakeupLatency;
295 ALOGV("[%s] lastEventTime: %" PRId64, mName, ns2us(lastEventTime));
296 if (baseTime < lastEventTime) {
297 baseTime = lastEventTime;
298 ALOGV("[%s] Clamping baseTime to lastEventTime -> %" PRId64, mName,
302 baseTime -= mReferenceTime;
303 ALOGV("[%s] Relative baseTime = %" PRId64, mName, ns2us(baseTime));
304 nsecs_t phase = mPhase + listener.mPhase;
305 ALOGV("[%s] Phase = %" PRId64, mName, ns2us(phase));
307 ALOGV("[%s] baseTime - phase = %" PRId64, mName, ns2us(baseTime));
309 // If our previous time is before the reference (because the reference
310 // has since been updated), the division by mPeriod will truncate
311 // towards zero instead of computing the floor. Since in all cases
312 // before the reference we want the next time to be effectively now, we
313 // set baseTime to -mPeriod so that numPeriods will be -1.
314 // When we add 1 and the phase, we will be at the correct event time for
317 ALOGV("[%s] Correcting negative baseTime", mName);
321 nsecs_t numPeriods = baseTime / mPeriod;
322 ALOGV("[%s] numPeriods = %" PRId64, mName, numPeriods);
323 nsecs_t t = (numPeriods + 1) * mPeriod + phase;
324 ALOGV("[%s] t = %" PRId64, mName, ns2us(t));
326 ALOGV("[%s] Absolute t = %" PRId64, mName, ns2us(t));
328 // Check that it's been slightly more than half a period since the last
329 // event so that we don't accidentally fall into double-rate vsyncs
330 if (t - listener.mLastEventTime < (3 * mPeriod / 5)) {
332 ALOGV("[%s] Modifying t -> %" PRId64, mName, ns2us(t));
336 ALOGV("[%s] Corrected for wakeup latency -> %" PRId64, mName, ns2us(t));
341 void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
342 if (kTraceDetailedInfo) ATRACE_CALL();
343 for (size_t i = 0; i < callbacks.size(); i++) {
344 callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
348 const char* const mName;
354 nsecs_t mReferenceTime;
355 nsecs_t mWakeupLatency;
357 int64_t mFrameNumber;
359 Vector<EventListener> mEventListeners;
366 #define LOG_TAG "DispSync"
368 class ZeroPhaseTracer : public DispSync::Callback {
370 ZeroPhaseTracer() : mParity(false) {}
372 virtual void onDispSyncEvent(nsecs_t /*when*/) {
374 ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0);
381 DispSync::DispSync(const char* name) :
383 mRefreshSkipCount(0),
384 mThread(new DispSyncThread(name)) {
386 mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
387 android_set_rt_ioprio(mThread->getTid(), 1);
392 if (kTraceDetailedInfo) {
393 // If we're not getting present fences then the ZeroPhaseTracer
394 // would prevent HW vsync event from ever being turned off.
395 // Even if we're just ignoring the fences, the zero-phase tracing is
396 // not needed because any time there is an event registered we will
397 // turn on the HW vsync events.
398 if (!kIgnorePresentFences && kEnableZeroPhaseTracer) {
399 addEventListener("ZeroPhaseTracer", 0, new ZeroPhaseTracer());
404 DispSync::~DispSync() {}
406 void DispSync::reset() {
407 Mutex::Autolock lock(mMutex);
411 mModelUpdated = false;
412 mNumResyncSamples = 0;
413 mFirstResyncSample = 0;
414 mNumResyncSamplesSincePresent = 0;
418 bool DispSync::addPresentFence(const sp<Fence>& fence) {
419 Mutex::Autolock lock(mMutex);
421 mPresentFences[mPresentSampleOffset] = fence;
422 mPresentTimes[mPresentSampleOffset] = 0;
423 mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES;
424 mNumResyncSamplesSincePresent = 0;
426 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
427 const sp<Fence>& f(mPresentFences[i]);
429 nsecs_t t = f->getSignalTime();
431 mPresentFences[i].clear();
432 mPresentTimes[i] = t + kPresentTimeOffset;
439 return !mModelUpdated || mError > kErrorThreshold;
442 void DispSync::beginResync() {
443 Mutex::Autolock lock(mMutex);
444 ALOGV("[%s] beginResync", mName);
445 mModelUpdated = false;
446 mNumResyncSamples = 0;
449 bool DispSync::addResyncSample(nsecs_t timestamp) {
450 Mutex::Autolock lock(mMutex);
452 ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp));
454 size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
455 mResyncSamples[idx] = timestamp;
456 if (mNumResyncSamples == 0) {
458 mReferenceTime = timestamp;
459 ALOGV("[%s] First resync sample: mPeriod = %" PRId64 ", mPhase = 0, "
460 "mReferenceTime = %" PRId64, mName, ns2us(mPeriod),
461 ns2us(mReferenceTime));
462 mThread->updateModel(mPeriod, mPhase, mReferenceTime);
465 if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
468 mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES;
473 if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) {
477 if (kIgnorePresentFences) {
478 // If we don't have the sync framework we will never have
479 // addPresentFence called. This means we have no way to know whether
480 // or not we're synchronized with the HW vsyncs, so we just request
481 // that the HW vsync events be turned on whenever we need to generate
483 return mThread->hasAnyEventListeners();
486 // Check against kErrorThreshold / 2 to add some hysteresis before having to
488 bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2);
489 ALOGV("[%s] addResyncSample returning %s", mName,
490 modelLocked ? "locked" : "unlocked");
494 void DispSync::endResync() {
497 status_t DispSync::addEventListener(const char* name, nsecs_t phase,
498 const sp<Callback>& callback) {
499 Mutex::Autolock lock(mMutex);
500 return mThread->addEventListener(name, phase, callback);
503 void DispSync::setRefreshSkipCount(int count) {
504 Mutex::Autolock lock(mMutex);
505 ALOGD("setRefreshSkipCount(%d)", count);
506 mRefreshSkipCount = count;
510 status_t DispSync::removeEventListener(const sp<Callback>& callback) {
511 Mutex::Autolock lock(mMutex);
512 return mThread->removeEventListener(callback);
515 void DispSync::setPeriod(nsecs_t period) {
516 Mutex::Autolock lock(mMutex);
520 mThread->updateModel(mPeriod, mPhase, mReferenceTime);
523 nsecs_t DispSync::getPeriod() {
524 // lock mutex as mPeriod changes multiple times in updateModelLocked
525 Mutex::Autolock lock(mMutex);
529 void DispSync::updateModelLocked() {
530 ALOGV("[%s] updateModelLocked %zu", mName, mNumResyncSamples);
531 if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
532 ALOGV("[%s] Computing...", mName);
533 nsecs_t durationSum = 0;
534 nsecs_t minDuration = INT64_MAX;
535 nsecs_t maxDuration = 0;
536 for (size_t i = 1; i < mNumResyncSamples; i++) {
537 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
538 size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
539 nsecs_t duration = mResyncSamples[idx] - mResyncSamples[prev];
540 durationSum += duration;
541 minDuration = min(minDuration, duration);
542 maxDuration = max(maxDuration, duration);
545 // Exclude the min and max from the average
546 durationSum -= minDuration + maxDuration;
547 mPeriod = durationSum / (mNumResyncSamples - 3);
549 ALOGV("[%s] mPeriod = %" PRId64, mName, ns2us(mPeriod));
551 double sampleAvgX = 0;
552 double sampleAvgY = 0;
553 double scale = 2.0 * M_PI / double(mPeriod);
554 // Intentionally skip the first sample
555 for (size_t i = 1; i < mNumResyncSamples; i++) {
556 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
557 nsecs_t sample = mResyncSamples[idx] - mReferenceTime;
558 double samplePhase = double(sample % mPeriod) * scale;
559 sampleAvgX += cos(samplePhase);
560 sampleAvgY += sin(samplePhase);
563 sampleAvgX /= double(mNumResyncSamples - 1);
564 sampleAvgY /= double(mNumResyncSamples - 1);
566 mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
568 ALOGV("[%s] mPhase = %" PRId64, mName, ns2us(mPhase));
570 if (mPhase < -(mPeriod / 2)) {
572 ALOGV("[%s] Adjusting mPhase -> %" PRId64, mName, ns2us(mPhase));
575 if (kTraceDetailedInfo) {
576 ATRACE_INT64("DispSync:Period", mPeriod);
577 ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2);
580 // Artificially inflate the period if requested.
581 mPeriod += mPeriod * mRefreshSkipCount;
583 mThread->updateModel(mPeriod, mPhase, mReferenceTime);
584 mModelUpdated = true;
588 void DispSync::updateErrorLocked() {
589 if (!mModelUpdated) {
593 // Need to compare present fences against the un-adjusted refresh period,
594 // since they might arrive between two events.
595 nsecs_t period = mPeriod / (1 + mRefreshSkipCount);
597 int numErrSamples = 0;
598 nsecs_t sqErrSum = 0;
600 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
601 nsecs_t sample = mPresentTimes[i] - mReferenceTime;
602 if (sample > mPhase) {
603 nsecs_t sampleErr = (sample - mPhase) % period;
604 if (sampleErr > period / 2) {
607 sqErrSum += sampleErr * sampleErr;
612 if (numErrSamples > 0) {
613 mError = sqErrSum / numErrSamples;
618 if (kTraceDetailedInfo) {
619 ATRACE_INT64("DispSync:Error", mError);
623 void DispSync::resetErrorLocked() {
624 mPresentSampleOffset = 0;
626 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
627 mPresentFences[i].clear();
628 mPresentTimes[i] = 0;
632 nsecs_t DispSync::computeNextRefresh(int periodOffset) const {
633 Mutex::Autolock lock(mMutex);
634 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
635 nsecs_t phase = mReferenceTime + mPhase;
636 return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase;
639 void DispSync::dump(String8& result) const {
640 Mutex::Autolock lock(mMutex);
641 result.appendFormat("present fences are %s\n",
642 kIgnorePresentFences ? "ignored" : "used");
643 result.appendFormat("mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n",
644 mPeriod, 1000000000.0 / mPeriod, mRefreshSkipCount);
645 result.appendFormat("mPhase: %" PRId64 " ns\n", mPhase);
646 result.appendFormat("mError: %" PRId64 " ns (sqrt=%.1f)\n",
647 mError, sqrt(mError));
648 result.appendFormat("mNumResyncSamplesSincePresent: %d (limit %d)\n",
649 mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT);
650 result.appendFormat("mNumResyncSamples: %zd (max %d)\n",
651 mNumResyncSamples, MAX_RESYNC_SAMPLES);
653 result.appendFormat("mResyncSamples:\n");
654 nsecs_t previous = -1;
655 for (size_t i = 0; i < mNumResyncSamples; i++) {
656 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
657 nsecs_t sampleTime = mResyncSamples[idx];
659 result.appendFormat(" %" PRId64 "\n", sampleTime);
661 result.appendFormat(" %" PRId64 " (+%" PRId64 ")\n",
662 sampleTime, sampleTime - previous);
664 previous = sampleTime;
667 result.appendFormat("mPresentFences / mPresentTimes [%d]:\n",
668 NUM_PRESENT_SAMPLES);
669 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
671 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
672 size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES;
673 bool signaled = mPresentFences[idx] == NULL;
674 nsecs_t presentTime = mPresentTimes[idx];
676 result.appendFormat(" [unsignaled fence]\n");
677 } else if (presentTime == 0) {
678 result.appendFormat(" 0\n");
679 } else if (previous == 0) {
680 result.appendFormat(" %" PRId64 " (%.3f ms ago)\n", presentTime,
681 (now - presentTime) / 1000000.0);
683 result.appendFormat(" %" PRId64 " (+%" PRId64 " / %.3f) (%.3f ms ago)\n",
684 presentTime, presentTime - previous,
685 (presentTime - previous) / (double) mPeriod,
686 (now - presentTime) / 1000000.0);
688 previous = presentTime;
691 result.appendFormat("current monotonic time: %" PRId64 "\n", now);
694 } // namespace android