1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
18 //#define LOG_NDEBUG 0
19
20 // This is needed for stdint.h to define INT64_MAX in C++
21 #define __STDC_LIMIT_MACROS
22
23 #include <math.h>
24
25 #include <algorithm>
26
27 #include <android-base/stringprintf.h>
28 #include <cutils/properties.h>
29 #include <log/log.h>
30 #include <utils/Thread.h>
31 #include <utils/Trace.h>
32
33 #include <ui/FenceTime.h>
34
35 #include "DispSync.h"
36 #include "EventLog/EventLog.h"
37 #include "SurfaceFlinger.h"
38
39 using android::base::StringAppendF;
40 using std::max;
41 using std::min;
42
43 namespace android {
44
45 DispSync::~DispSync() = default;
46 DispSync::Callback::~Callback() = default;
47
48 namespace impl {
49
50 // Setting this to true adds a zero-phase tracer for correlating with hardware
51 // vsync events
52 static const bool kEnableZeroPhaseTracer = false;
53
54 // This is the threshold used to determine when hardware vsync events are
55 // needed to re-synchronize the software vsync model with the hardware. The
56 // error metric used is the mean of the squared difference between each
57 // present time and the nearest software-predicted vsync.
58 static const nsecs_t kErrorThreshold = 160000000000; // 400 usec squared
59
60 #undef LOG_TAG
61 #define LOG_TAG "DispSyncThread"
62 class DispSyncThread : public Thread {
63 public:
DispSyncThread(const char * name,bool showTraceDetailedInfo)64 DispSyncThread(const char* name, bool showTraceDetailedInfo)
65 : mName(name),
66 mStop(false),
67 mModelLocked(false),
68 mPeriod(0),
69 mPhase(0),
70 mReferenceTime(0),
71 mWakeupLatency(0),
72 mFrameNumber(0),
73 mTraceDetailedInfo(showTraceDetailedInfo) {}
74
~DispSyncThread()75 virtual ~DispSyncThread() {}
76
updateModel(nsecs_t period,nsecs_t phase,nsecs_t referenceTime)77 void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) {
78 if (mTraceDetailedInfo) ATRACE_CALL();
79 Mutex::Autolock lock(mMutex);
80
81 mPhase = phase;
82 const bool referenceTimeChanged = mReferenceTime != referenceTime;
83 mReferenceTime = referenceTime;
84 if (mPeriod != 0 && mPeriod != period && mReferenceTime != 0) {
85 // Inflate the reference time to be the most recent predicted
86 // vsync before the current time.
87 const nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
88 const nsecs_t baseTime = now - mReferenceTime;
89 const nsecs_t numOldPeriods = baseTime / mPeriod;
90 mReferenceTime = mReferenceTime + (numOldPeriods)*mPeriod;
91 }
92 mPeriod = period;
93 if (!mModelLocked && referenceTimeChanged) {
94 for (auto& eventListener : mEventListeners) {
95 eventListener.mLastEventTime = mReferenceTime + mPhase + eventListener.mPhase;
96 // If mLastEventTime is after mReferenceTime (can happen when positive phase offsets
97 // are used) we treat it as like it happened in previous period.
98 if (eventListener.mLastEventTime > mReferenceTime) {
99 eventListener.mLastEventTime -= mPeriod;
100 }
101 }
102 }
103 if (mTraceDetailedInfo) {
104 ATRACE_INT64("DispSync:Period", mPeriod);
105 ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2);
106 ATRACE_INT64("DispSync:Reference Time", mReferenceTime);
107 }
108 ALOGV("[%s] updateModel: mPeriod = %" PRId64 ", mPhase = %" PRId64
109 " mReferenceTime = %" PRId64,
110 mName, ns2us(mPeriod), ns2us(mPhase), ns2us(mReferenceTime));
111 mCond.signal();
112 }
113
stop()114 void stop() {
115 if (mTraceDetailedInfo) ATRACE_CALL();
116 Mutex::Autolock lock(mMutex);
117 mStop = true;
118 mCond.signal();
119 }
120
lockModel()121 void lockModel() {
122 Mutex::Autolock lock(mMutex);
123 mModelLocked = true;
124 ATRACE_INT("DispSync:ModelLocked", mModelLocked);
125 }
126
unlockModel()127 void unlockModel() {
128 Mutex::Autolock lock(mMutex);
129 mModelLocked = false;
130 ATRACE_INT("DispSync:ModelLocked", mModelLocked);
131 }
132
threadLoop()133 virtual bool threadLoop() {
134 status_t err;
135 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
136
137 while (true) {
138 std::vector<CallbackInvocation> callbackInvocations;
139
140 nsecs_t targetTime = 0;
141
142 { // Scope for lock
143 Mutex::Autolock lock(mMutex);
144
145 if (mTraceDetailedInfo) {
146 ATRACE_INT64("DispSync:Frame", mFrameNumber);
147 }
148 ALOGV("[%s] Frame %" PRId64, mName, mFrameNumber);
149 ++mFrameNumber;
150
151 if (mStop) {
152 return false;
153 }
154
155 if (mPeriod == 0) {
156 err = mCond.wait(mMutex);
157 if (err != NO_ERROR) {
158 ALOGE("error waiting for new events: %s (%d)", strerror(-err), err);
159 return false;
160 }
161 continue;
162 }
163
164 targetTime = computeNextEventTimeLocked(now);
165
166 bool isWakeup = false;
167
168 if (now < targetTime) {
169 if (mTraceDetailedInfo) ATRACE_NAME("DispSync waiting");
170
171 if (targetTime == INT64_MAX) {
172 ALOGV("[%s] Waiting forever", mName);
173 err = mCond.wait(mMutex);
174 } else {
175 ALOGV("[%s] Waiting until %" PRId64, mName, ns2us(targetTime));
176 err = mCond.waitRelative(mMutex, targetTime - now);
177 }
178
179 if (err == TIMED_OUT) {
180 isWakeup = true;
181 } else if (err != NO_ERROR) {
182 ALOGE("error waiting for next event: %s (%d)", strerror(-err), err);
183 return false;
184 }
185 }
186
187 now = systemTime(SYSTEM_TIME_MONOTONIC);
188
189 // Don't correct by more than 1.5 ms
190 static const nsecs_t kMaxWakeupLatency = us2ns(1500);
191
192 if (isWakeup) {
193 mWakeupLatency = ((mWakeupLatency * 63) + (now - targetTime)) / 64;
194 mWakeupLatency = min(mWakeupLatency, kMaxWakeupLatency);
195 if (mTraceDetailedInfo) {
196 ATRACE_INT64("DispSync:WakeupLat", now - targetTime);
197 ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
198 }
199 }
200
201 callbackInvocations = gatherCallbackInvocationsLocked(now);
202 }
203
204 if (callbackInvocations.size() > 0) {
205 fireCallbackInvocations(callbackInvocations);
206 }
207 }
208
209 return false;
210 }
211
addEventListener(const char * name,nsecs_t phase,DispSync::Callback * callback,nsecs_t lastCallbackTime)212 status_t addEventListener(const char* name, nsecs_t phase, DispSync::Callback* callback,
213 nsecs_t lastCallbackTime) {
214 if (mTraceDetailedInfo) ATRACE_CALL();
215 Mutex::Autolock lock(mMutex);
216
217 for (size_t i = 0; i < mEventListeners.size(); i++) {
218 if (mEventListeners[i].mCallback == callback) {
219 return BAD_VALUE;
220 }
221 }
222
223 EventListener listener;
224 listener.mName = name;
225 listener.mPhase = phase;
226 listener.mCallback = callback;
227
228 // We want to allow the firstmost future event to fire without
229 // allowing any past events to fire. To do this extrapolate from
230 // mReferenceTime the most recent hardware vsync, and pin the
231 // last event time there.
232 const nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
233 if (mPeriod != 0) {
234 const nsecs_t baseTime = now - mReferenceTime;
235 const nsecs_t numPeriodsSinceReference = baseTime / mPeriod;
236 const nsecs_t predictedReference = mReferenceTime + numPeriodsSinceReference * mPeriod;
237 const nsecs_t phaseCorrection = mPhase + listener.mPhase;
238 const nsecs_t predictedLastEventTime = predictedReference + phaseCorrection;
239 if (predictedLastEventTime >= now) {
240 // Make sure that the last event time does not exceed the current time.
241 // If it would, then back the last event time by a period.
242 listener.mLastEventTime = predictedLastEventTime - mPeriod;
243 } else {
244 listener.mLastEventTime = predictedLastEventTime;
245 }
246 } else {
247 listener.mLastEventTime = now + mPhase - mWakeupLatency;
248 }
249
250 if (lastCallbackTime <= 0) {
251 // If there is no prior callback time, try to infer one based on the
252 // logical last event time.
253 listener.mLastCallbackTime = listener.mLastEventTime + mWakeupLatency;
254 } else {
255 listener.mLastCallbackTime = lastCallbackTime;
256 }
257
258 mEventListeners.push_back(listener);
259
260 mCond.signal();
261
262 return NO_ERROR;
263 }
264
removeEventListener(DispSync::Callback * callback,nsecs_t * outLastCallback)265 status_t removeEventListener(DispSync::Callback* callback, nsecs_t* outLastCallback) {
266 if (mTraceDetailedInfo) ATRACE_CALL();
267 Mutex::Autolock lock(mMutex);
268
269 for (std::vector<EventListener>::iterator it = mEventListeners.begin();
270 it != mEventListeners.end(); ++it) {
271 if (it->mCallback == callback) {
272 *outLastCallback = it->mLastCallbackTime;
273 mEventListeners.erase(it);
274 mCond.signal();
275 return NO_ERROR;
276 }
277 }
278
279 return BAD_VALUE;
280 }
281
changePhaseOffset(DispSync::Callback * callback,nsecs_t phase)282 status_t changePhaseOffset(DispSync::Callback* callback, nsecs_t phase) {
283 if (mTraceDetailedInfo) ATRACE_CALL();
284 Mutex::Autolock lock(mMutex);
285
286 for (auto& eventListener : mEventListeners) {
287 if (eventListener.mCallback == callback) {
288 const nsecs_t oldPhase = eventListener.mPhase;
289 eventListener.mPhase = phase;
290
291 // Pretend that the last time this event was handled at the same frame but with the
292 // new offset to allow for a seamless offset change without double-firing or
293 // skipping.
294 nsecs_t diff = oldPhase - phase;
295 eventListener.mLastEventTime -= diff;
296 eventListener.mLastCallbackTime -= diff;
297 mCond.signal();
298 return NO_ERROR;
299 }
300 }
301 return BAD_VALUE;
302 }
303
304 private:
305 struct EventListener {
306 const char* mName;
307 nsecs_t mPhase;
308 nsecs_t mLastEventTime;
309 nsecs_t mLastCallbackTime;
310 DispSync::Callback* mCallback;
311 };
312
313 struct CallbackInvocation {
314 DispSync::Callback* mCallback;
315 nsecs_t mEventTime;
316 };
317
computeNextEventTimeLocked(nsecs_t now)318 nsecs_t computeNextEventTimeLocked(nsecs_t now) {
319 if (mTraceDetailedInfo) ATRACE_CALL();
320 ALOGV("[%s] computeNextEventTimeLocked", mName);
321 nsecs_t nextEventTime = INT64_MAX;
322 for (size_t i = 0; i < mEventListeners.size(); i++) {
323 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], now);
324
325 if (t < nextEventTime) {
326 nextEventTime = t;
327 }
328 }
329
330 ALOGV("[%s] nextEventTime = %" PRId64, mName, ns2us(nextEventTime));
331 return nextEventTime;
332 }
333
334 // Check that the duration is close enough in length to a period without
335 // falling into double-rate vsyncs.
isCloseToPeriod(nsecs_t duration)336 bool isCloseToPeriod(nsecs_t duration) {
337 // Ratio of 3/5 is arbitrary, but it must be greater than 1/2.
338 return duration < (3 * mPeriod) / 5;
339 }
340
gatherCallbackInvocationsLocked(nsecs_t now)341 std::vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
342 if (mTraceDetailedInfo) ATRACE_CALL();
343 ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName, ns2us(now));
344
345 std::vector<CallbackInvocation> callbackInvocations;
346 nsecs_t onePeriodAgo = now - mPeriod;
347
348 for (auto& eventListener : mEventListeners) {
349 nsecs_t t = computeListenerNextEventTimeLocked(eventListener, onePeriodAgo);
350
351 if (t < now) {
352 if (isCloseToPeriod(now - eventListener.mLastCallbackTime)) {
353 eventListener.mLastEventTime = t;
354 ALOGV("[%s] [%s] Skipping event due to model error", mName,
355 eventListener.mName);
356 continue;
357 }
358
359 CallbackInvocation ci;
360 ci.mCallback = eventListener.mCallback;
361 ci.mEventTime = t;
362 ALOGV("[%s] [%s] Preparing to fire, latency: %" PRId64, mName, eventListener.mName,
363 t - eventListener.mLastEventTime);
364 callbackInvocations.push_back(ci);
365 eventListener.mLastEventTime = t;
366 eventListener.mLastCallbackTime = now;
367 }
368 }
369
370 return callbackInvocations;
371 }
372
computeListenerNextEventTimeLocked(const EventListener & listener,nsecs_t baseTime)373 nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener, nsecs_t baseTime) {
374 if (mTraceDetailedInfo) ATRACE_CALL();
375 ALOGV("[%s] [%s] computeListenerNextEventTimeLocked(%" PRId64 ")", mName, listener.mName,
376 ns2us(baseTime));
377
378 nsecs_t lastEventTime = listener.mLastEventTime + mWakeupLatency;
379 ALOGV("[%s] lastEventTime: %" PRId64, mName, ns2us(lastEventTime));
380 if (baseTime < lastEventTime) {
381 baseTime = lastEventTime;
382 ALOGV("[%s] Clamping baseTime to lastEventTime -> %" PRId64, mName, ns2us(baseTime));
383 }
384
385 baseTime -= mReferenceTime;
386 ALOGV("[%s] Relative baseTime = %" PRId64, mName, ns2us(baseTime));
387 nsecs_t phase = mPhase + listener.mPhase;
388 ALOGV("[%s] Phase = %" PRId64, mName, ns2us(phase));
389 baseTime -= phase;
390 ALOGV("[%s] baseTime - phase = %" PRId64, mName, ns2us(baseTime));
391
392 // If our previous time is before the reference (because the reference
393 // has since been updated), the division by mPeriod will truncate
394 // towards zero instead of computing the floor. Since in all cases
395 // before the reference we want the next time to be effectively now, we
396 // set baseTime to -mPeriod so that numPeriods will be -1.
397 // When we add 1 and the phase, we will be at the correct event time for
398 // this period.
399 if (baseTime < 0) {
400 ALOGV("[%s] Correcting negative baseTime", mName);
401 baseTime = -mPeriod;
402 }
403
404 nsecs_t numPeriods = baseTime / mPeriod;
405 ALOGV("[%s] numPeriods = %" PRId64, mName, numPeriods);
406 nsecs_t t = (numPeriods + 1) * mPeriod + phase;
407 ALOGV("[%s] t = %" PRId64, mName, ns2us(t));
408 t += mReferenceTime;
409 ALOGV("[%s] Absolute t = %" PRId64, mName, ns2us(t));
410
411 // Check that it's been slightly more than half a period since the last
412 // event so that we don't accidentally fall into double-rate vsyncs
413 if (isCloseToPeriod(t - listener.mLastEventTime)) {
414 t += mPeriod;
415 ALOGV("[%s] Modifying t -> %" PRId64, mName, ns2us(t));
416 }
417
418 t -= mWakeupLatency;
419 ALOGV("[%s] Corrected for wakeup latency -> %" PRId64, mName, ns2us(t));
420
421 return t;
422 }
423
fireCallbackInvocations(const std::vector<CallbackInvocation> & callbacks)424 void fireCallbackInvocations(const std::vector<CallbackInvocation>& callbacks) {
425 if (mTraceDetailedInfo) ATRACE_CALL();
426 for (size_t i = 0; i < callbacks.size(); i++) {
427 callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
428 }
429 }
430
431 const char* const mName;
432
433 bool mStop;
434 bool mModelLocked;
435
436 nsecs_t mPeriod;
437 nsecs_t mPhase;
438 nsecs_t mReferenceTime;
439 nsecs_t mWakeupLatency;
440
441 int64_t mFrameNumber;
442
443 std::vector<EventListener> mEventListeners;
444
445 Mutex mMutex;
446 Condition mCond;
447
448 // Flag to turn on logging in systrace.
449 const bool mTraceDetailedInfo;
450 };
451
452 #undef LOG_TAG
453 #define LOG_TAG "DispSync"
454
455 class ZeroPhaseTracer : public DispSync::Callback {
456 public:
ZeroPhaseTracer()457 ZeroPhaseTracer() : mParity(false) {}
458
onDispSyncEvent(nsecs_t)459 virtual void onDispSyncEvent(nsecs_t /*when*/) {
460 mParity = !mParity;
461 ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0);
462 }
463
464 private:
465 bool mParity;
466 };
467
DispSync(const char * name)468 DispSync::DispSync(const char* name) : mName(name), mRefreshSkipCount(0) {
469 // This flag offers the ability to turn on systrace logging from the shell.
470 char value[PROPERTY_VALUE_MAX];
471 property_get("debug.sf.dispsync_trace_detailed_info", value, "0");
472 mTraceDetailedInfo = atoi(value);
473 mThread = new DispSyncThread(name, mTraceDetailedInfo);
474 }
475
~DispSync()476 DispSync::~DispSync() {
477 mThread->stop();
478 mThread->requestExitAndWait();
479 }
480
init(bool hasSyncFramework,int64_t dispSyncPresentTimeOffset)481 void DispSync::init(bool hasSyncFramework, int64_t dispSyncPresentTimeOffset) {
482 mIgnorePresentFences = !hasSyncFramework;
483 mPresentTimeOffset = dispSyncPresentTimeOffset;
484 mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
485
486 // set DispSync to SCHED_FIFO to minimize jitter
487 struct sched_param param = {0};
488 param.sched_priority = 2;
489 if (sched_setscheduler(mThread->getTid(), SCHED_FIFO, ¶m) != 0) {
490 ALOGE("Couldn't set SCHED_FIFO for DispSyncThread");
491 }
492
493 beginResync();
494
495 if (mTraceDetailedInfo && kEnableZeroPhaseTracer) {
496 mZeroPhaseTracer = std::make_unique<ZeroPhaseTracer>();
497 addEventListener("ZeroPhaseTracer", 0, mZeroPhaseTracer.get(), 0);
498 }
499 }
500
reset()501 void DispSync::reset() {
502 Mutex::Autolock lock(mMutex);
503 resetLocked();
504 }
505
resetLocked()506 void DispSync::resetLocked() {
507 mPhase = 0;
508 const size_t lastSampleIdx = (mFirstResyncSample + mNumResyncSamples - 1) % MAX_RESYNC_SAMPLES;
509 // Keep the most recent sample, when we resync to hardware we'll overwrite this
510 // with a more accurate signal
511 if (mResyncSamples[lastSampleIdx] != 0) {
512 mReferenceTime = mResyncSamples[lastSampleIdx];
513 }
514 mModelUpdated = false;
515 for (size_t i = 0; i < MAX_RESYNC_SAMPLES; i++) {
516 mResyncSamples[i] = 0;
517 }
518 mNumResyncSamples = 0;
519 mFirstResyncSample = 0;
520 mNumResyncSamplesSincePresent = 0;
521 mThread->unlockModel();
522 resetErrorLocked();
523 }
524
addPresentFence(const std::shared_ptr<FenceTime> & fenceTime)525 bool DispSync::addPresentFence(const std::shared_ptr<FenceTime>& fenceTime) {
526 Mutex::Autolock lock(mMutex);
527
528 if (mIgnorePresentFences) {
529 return true;
530 }
531
532 mPresentFences[mPresentSampleOffset] = fenceTime;
533 mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES;
534 mNumResyncSamplesSincePresent = 0;
535
536 updateErrorLocked();
537
538 return !mModelUpdated || mError > kErrorThreshold;
539 }
540
beginResync()541 void DispSync::beginResync() {
542 Mutex::Autolock lock(mMutex);
543 ALOGV("[%s] beginResync", mName);
544 resetLocked();
545 }
546
addResyncSample(nsecs_t timestamp,bool * periodFlushed)547 bool DispSync::addResyncSample(nsecs_t timestamp, bool* periodFlushed) {
548 Mutex::Autolock lock(mMutex);
549
550 ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp));
551
552 *periodFlushed = false;
553 const size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
554 mResyncSamples[idx] = timestamp;
555 if (mNumResyncSamples == 0) {
556 mPhase = 0;
557 ALOGV("[%s] First resync sample: mPeriod = %" PRId64 ", mPhase = 0, "
558 "mReferenceTime = %" PRId64,
559 mName, ns2us(mPeriod), ns2us(timestamp));
560 } else if (mPendingPeriod > 0) {
561 // mNumResyncSamples > 0, so priorIdx won't overflow
562 const size_t priorIdx = (mFirstResyncSample + mNumResyncSamples - 1) % MAX_RESYNC_SAMPLES;
563 const nsecs_t lastTimestamp = mResyncSamples[priorIdx];
564
565 const nsecs_t observedVsync = std::abs(timestamp - lastTimestamp);
566 if (std::abs(observedVsync - mPendingPeriod) <= std::abs(observedVsync - mIntendedPeriod)) {
567 // Either the observed vsync is closer to the pending period, (and
568 // thus we detected a period change), or the period change will
569 // no-op. In either case, reset the model and flush the pending
570 // period.
571 resetLocked();
572 mIntendedPeriod = mPendingPeriod;
573 mPeriod = mPendingPeriod;
574 mPendingPeriod = 0;
575 if (mTraceDetailedInfo) {
576 ATRACE_INT("DispSync:PendingPeriod", mPendingPeriod);
577 ATRACE_INT("DispSync:IntendedPeriod", mIntendedPeriod);
578 }
579 *periodFlushed = true;
580 }
581 }
582 // Always update the reference time with the most recent timestamp.
583 mReferenceTime = timestamp;
584 mThread->updateModel(mPeriod, mPhase, mReferenceTime);
585
586 if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
587 mNumResyncSamples++;
588 } else {
589 mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES;
590 }
591
592 updateModelLocked();
593
594 if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) {
595 resetErrorLocked();
596 }
597
598 if (mIgnorePresentFences) {
599 // If we're ignoring the present fences we have no way to know whether
600 // or not we're synchronized with the HW vsyncs, so we just request
601 // that the HW vsync events be turned on.
602 return true;
603 }
604
605 // Check against kErrorThreshold / 2 to add some hysteresis before having to
606 // resync again
607 bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2) && mPendingPeriod == 0;
608 ALOGV("[%s] addResyncSample returning %s", mName, modelLocked ? "locked" : "unlocked");
609 if (modelLocked) {
610 *periodFlushed = true;
611 mThread->lockModel();
612 }
613 return !modelLocked;
614 }
615
endResync()616 void DispSync::endResync() {
617 mThread->lockModel();
618 }
619
addEventListener(const char * name,nsecs_t phase,Callback * callback,nsecs_t lastCallbackTime)620 status_t DispSync::addEventListener(const char* name, nsecs_t phase, Callback* callback,
621 nsecs_t lastCallbackTime) {
622 Mutex::Autolock lock(mMutex);
623 return mThread->addEventListener(name, phase, callback, lastCallbackTime);
624 }
625
setRefreshSkipCount(int count)626 void DispSync::setRefreshSkipCount(int count) {
627 Mutex::Autolock lock(mMutex);
628 ALOGD("setRefreshSkipCount(%d)", count);
629 mRefreshSkipCount = count;
630 updateModelLocked();
631 }
632
removeEventListener(Callback * callback,nsecs_t * outLastCallbackTime)633 status_t DispSync::removeEventListener(Callback* callback, nsecs_t* outLastCallbackTime) {
634 Mutex::Autolock lock(mMutex);
635 return mThread->removeEventListener(callback, outLastCallbackTime);
636 }
637
changePhaseOffset(Callback * callback,nsecs_t phase)638 status_t DispSync::changePhaseOffset(Callback* callback, nsecs_t phase) {
639 Mutex::Autolock lock(mMutex);
640 return mThread->changePhaseOffset(callback, phase);
641 }
642
setPeriod(nsecs_t period)643 void DispSync::setPeriod(nsecs_t period) {
644 Mutex::Autolock lock(mMutex);
645
646 const bool pendingPeriodShouldChange =
647 period != mIntendedPeriod || (period == mIntendedPeriod && mPendingPeriod != 0);
648
649 if (pendingPeriodShouldChange) {
650 mPendingPeriod = period;
651 }
652 if (mTraceDetailedInfo) {
653 ATRACE_INT("DispSync:IntendedPeriod", mIntendedPeriod);
654 ATRACE_INT("DispSync:PendingPeriod", mPendingPeriod);
655 }
656 }
657
getPeriod()658 nsecs_t DispSync::getPeriod() {
659 // lock mutex as mPeriod changes multiple times in updateModelLocked
660 Mutex::Autolock lock(mMutex);
661 return mPeriod;
662 }
663
updateModelLocked()664 void DispSync::updateModelLocked() {
665 ALOGV("[%s] updateModelLocked %zu", mName, mNumResyncSamples);
666 if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
667 ALOGV("[%s] Computing...", mName);
668 nsecs_t durationSum = 0;
669 nsecs_t minDuration = INT64_MAX;
670 nsecs_t maxDuration = 0;
671 // We skip the first 2 samples because the first vsync duration on some
672 // devices may be much more inaccurate than on other devices, e.g. due
673 // to delays in ramping up from a power collapse. By doing so this
674 // actually increases the accuracy of the DispSync model even though
675 // we're effectively relying on fewer sample points.
676 static constexpr size_t numSamplesSkipped = 2;
677 for (size_t i = numSamplesSkipped; i < mNumResyncSamples; i++) {
678 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
679 size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
680 nsecs_t duration = mResyncSamples[idx] - mResyncSamples[prev];
681 durationSum += duration;
682 minDuration = min(minDuration, duration);
683 maxDuration = max(maxDuration, duration);
684 }
685
686 // Exclude the min and max from the average
687 durationSum -= minDuration + maxDuration;
688 mPeriod = durationSum / (mNumResyncSamples - numSamplesSkipped - 2);
689
690 ALOGV("[%s] mPeriod = %" PRId64, mName, ns2us(mPeriod));
691
692 double sampleAvgX = 0;
693 double sampleAvgY = 0;
694 double scale = 2.0 * M_PI / double(mPeriod);
695 for (size_t i = numSamplesSkipped; i < mNumResyncSamples; i++) {
696 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
697 nsecs_t sample = mResyncSamples[idx] - mReferenceTime;
698 double samplePhase = double(sample % mPeriod) * scale;
699 sampleAvgX += cos(samplePhase);
700 sampleAvgY += sin(samplePhase);
701 }
702
703 sampleAvgX /= double(mNumResyncSamples - numSamplesSkipped);
704 sampleAvgY /= double(mNumResyncSamples - numSamplesSkipped);
705
706 mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
707
708 ALOGV("[%s] mPhase = %" PRId64, mName, ns2us(mPhase));
709
710 if (mPhase < -(mPeriod / 2)) {
711 mPhase += mPeriod;
712 ALOGV("[%s] Adjusting mPhase -> %" PRId64, mName, ns2us(mPhase));
713 }
714
715 // Artificially inflate the period if requested.
716 mPeriod += mPeriod * mRefreshSkipCount;
717
718 mThread->updateModel(mPeriod, mPhase, mReferenceTime);
719 mModelUpdated = true;
720 }
721 }
722
updateErrorLocked()723 void DispSync::updateErrorLocked() {
724 if (!mModelUpdated) {
725 return;
726 }
727
728 // Need to compare present fences against the un-adjusted refresh period,
729 // since they might arrive between two events.
730 nsecs_t period = mPeriod / (1 + mRefreshSkipCount);
731
732 int numErrSamples = 0;
733 nsecs_t sqErrSum = 0;
734
735 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
736 // Only check for the cached value of signal time to avoid unecessary
737 // syscalls. It is the responsibility of the DispSync owner to
738 // call getSignalTime() periodically so the cache is updated when the
739 // fence signals.
740 nsecs_t time = mPresentFences[i]->getCachedSignalTime();
741 if (time == Fence::SIGNAL_TIME_PENDING || time == Fence::SIGNAL_TIME_INVALID) {
742 continue;
743 }
744
745 nsecs_t sample = time - mReferenceTime;
746 if (sample <= mPhase) {
747 continue;
748 }
749
750 nsecs_t sampleErr = (sample - mPhase) % period;
751 if (sampleErr > period / 2) {
752 sampleErr -= period;
753 }
754 sqErrSum += sampleErr * sampleErr;
755 numErrSamples++;
756 }
757
758 if (numErrSamples > 0) {
759 mError = sqErrSum / numErrSamples;
760 mZeroErrSamplesCount = 0;
761 } else {
762 mError = 0;
763 // Use mod ACCEPTABLE_ZERO_ERR_SAMPLES_COUNT to avoid log spam.
764 mZeroErrSamplesCount++;
765 ALOGE_IF((mZeroErrSamplesCount % ACCEPTABLE_ZERO_ERR_SAMPLES_COUNT) == 0,
766 "No present times for model error.");
767 }
768
769 if (mTraceDetailedInfo) {
770 ATRACE_INT64("DispSync:Error", mError);
771 }
772 }
773
resetErrorLocked()774 void DispSync::resetErrorLocked() {
775 mPresentSampleOffset = 0;
776 mError = 0;
777 mZeroErrSamplesCount = 0;
778 if (mTraceDetailedInfo) {
779 ATRACE_INT64("DispSync:Error", mError);
780 }
781 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
782 mPresentFences[i] = FenceTime::NO_FENCE;
783 }
784 }
785
computeNextRefresh(int periodOffset) const786 nsecs_t DispSync::computeNextRefresh(int periodOffset) const {
787 Mutex::Autolock lock(mMutex);
788 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
789 nsecs_t phase = mReferenceTime + mPhase;
790 if (mPeriod == 0) {
791 return 0;
792 }
793 return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase;
794 }
795
setIgnorePresentFences(bool ignore)796 void DispSync::setIgnorePresentFences(bool ignore) {
797 Mutex::Autolock lock(mMutex);
798 if (mIgnorePresentFences != ignore) {
799 mIgnorePresentFences = ignore;
800 resetLocked();
801 }
802 }
803
dump(std::string & result) const804 void DispSync::dump(std::string& result) const {
805 Mutex::Autolock lock(mMutex);
806 StringAppendF(&result, "present fences are %s\n", mIgnorePresentFences ? "ignored" : "used");
807 StringAppendF(&result, "mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n", mPeriod,
808 1000000000.0 / mPeriod, mRefreshSkipCount);
809 StringAppendF(&result, "mPhase: %" PRId64 " ns\n", mPhase);
810 StringAppendF(&result, "mError: %" PRId64 " ns (sqrt=%.1f)\n", mError, sqrt(mError));
811 StringAppendF(&result, "mNumResyncSamplesSincePresent: %d (limit %d)\n",
812 mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT);
813 StringAppendF(&result, "mNumResyncSamples: %zd (max %d)\n", mNumResyncSamples,
814 MAX_RESYNC_SAMPLES);
815
816 result.append("mResyncSamples:\n");
817 nsecs_t previous = -1;
818 for (size_t i = 0; i < mNumResyncSamples; i++) {
819 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
820 nsecs_t sampleTime = mResyncSamples[idx];
821 if (i == 0) {
822 StringAppendF(&result, " %" PRId64 "\n", sampleTime);
823 } else {
824 StringAppendF(&result, " %" PRId64 " (+%" PRId64 ")\n", sampleTime,
825 sampleTime - previous);
826 }
827 previous = sampleTime;
828 }
829
830 StringAppendF(&result, "mPresentFences [%d]:\n", NUM_PRESENT_SAMPLES);
831 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
832 previous = Fence::SIGNAL_TIME_INVALID;
833 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
834 size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES;
835 nsecs_t presentTime = mPresentFences[idx]->getSignalTime();
836 if (presentTime == Fence::SIGNAL_TIME_PENDING) {
837 StringAppendF(&result, " [unsignaled fence]\n");
838 } else if (presentTime == Fence::SIGNAL_TIME_INVALID) {
839 StringAppendF(&result, " [invalid fence]\n");
840 } else if (previous == Fence::SIGNAL_TIME_PENDING ||
841 previous == Fence::SIGNAL_TIME_INVALID) {
842 StringAppendF(&result, " %" PRId64 " (%.3f ms ago)\n", presentTime,
843 (now - presentTime) / 1000000.0);
844 } else {
845 StringAppendF(&result, " %" PRId64 " (+%" PRId64 " / %.3f) (%.3f ms ago)\n",
846 presentTime, presentTime - previous,
847 (presentTime - previous) / (double)mPeriod,
848 (now - presentTime) / 1000000.0);
849 }
850 previous = presentTime;
851 }
852
853 StringAppendF(&result, "current monotonic time: %" PRId64 "\n", now);
854 }
855
expectedPresentTime()856 nsecs_t DispSync::expectedPresentTime() {
857 // The HWC doesn't currently have a way to report additional latency.
858 // Assume that whatever we submit now will appear right after the flip.
859 // For a smart panel this might be 1. This is expressed in frames,
860 // rather than time, because we expect to have a constant frame delay
861 // regardless of the refresh rate.
862 const uint32_t hwcLatency = 0;
863
864 // Ask DispSync when the next refresh will be (CLOCK_MONOTONIC).
865 return computeNextRefresh(hwcLatency);
866 }
867
868 } // namespace impl
869
870 } // namespace android
871