blob: 4cf9370437e1771ac06c5424a91c766e31c37b1e [file] [log] [blame]
Jamie Gennisfaf77cc2013-07-30 15:10:32 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_GRAPHICS
Tim Murray4a4e4a22016-04-19 16:29:23 +000018//#define LOG_NDEBUG 0
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070019
20// This is needed for stdint.h to define INT64_MAX in C++
21#define __STDC_LIMIT_MACROS
22
23#include <math.h>
24
25#include <cutils/log.h>
26
27#include <ui/Fence.h>
28
29#include <utils/String8.h>
30#include <utils/Thread.h>
31#include <utils/Trace.h>
32#include <utils/Vector.h>
33
34#include "DispSync.h"
35#include "EventLog/EventLog.h"
36
Tim Murray4a4e4a22016-04-19 16:29:23 +000037#include <algorithm>
38
39using std::max;
40using std::min;
41
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070042namespace android {
43
44// Setting this to true enables verbose tracing that can be used to debug
45// vsync event model or phase issues.
Andy McFadden5167ec62014-05-22 13:08:43 -070046static const bool kTraceDetailedInfo = false;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070047
Tim Murray4a4e4a22016-04-19 16:29:23 +000048// Setting this to true adds a zero-phase tracer for correlating with hardware
49// vsync events
50static const bool kEnableZeroPhaseTracer = false;
51
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070052// This is the threshold used to determine when hardware vsync events are
53// needed to re-synchronize the software vsync model with the hardware. The
54// error metric used is the mean of the squared difference between each
55// present time and the nearest software-predicted vsync.
Andy McFadden5167ec62014-05-22 13:08:43 -070056static const nsecs_t kErrorThreshold = 160000000000; // 400 usec squared
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070057
58// This is the offset from the present fence timestamps to the corresponding
59// vsync event.
Andy McFadden5167ec62014-05-22 13:08:43 -070060static const int64_t kPresentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070061
Tim Murray4a4e4a22016-04-19 16:29:23 +000062#undef LOG_TAG
63#define LOG_TAG "DispSyncThread"
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070064class DispSyncThread: public Thread {
65public:
66
Tim Murray4a4e4a22016-04-19 16:29:23 +000067 DispSyncThread(const char* name):
68 mName(name),
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070069 mStop(false),
70 mPeriod(0),
71 mPhase(0),
Haixia Shi676b1f62015-10-28 16:19:01 -070072 mReferenceTime(0),
Tim Murray4a4e4a22016-04-19 16:29:23 +000073 mWakeupLatency(0),
74 mFrameNumber(0) {}
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070075
76 virtual ~DispSyncThread() {}
77
Haixia Shi676b1f62015-10-28 16:19:01 -070078 void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) {
Tim Murray4a4e4a22016-04-19 16:29:23 +000079 if (kTraceDetailedInfo) ATRACE_CALL();
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070080 Mutex::Autolock lock(mMutex);
81 mPeriod = period;
82 mPhase = phase;
Haixia Shi676b1f62015-10-28 16:19:01 -070083 mReferenceTime = referenceTime;
Tim Murray4a4e4a22016-04-19 16:29:23 +000084 ALOGV("[%s] updateModel: mPeriod = %" PRId64 ", mPhase = %" PRId64
85 " mReferenceTime = %" PRId64, mName, ns2us(mPeriod),
86 ns2us(mPhase), ns2us(mReferenceTime));
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070087 mCond.signal();
88 }
89
90 void stop() {
Tim Murray4a4e4a22016-04-19 16:29:23 +000091 if (kTraceDetailedInfo) ATRACE_CALL();
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070092 Mutex::Autolock lock(mMutex);
93 mStop = true;
94 mCond.signal();
95 }
96
97 virtual bool threadLoop() {
98 status_t err;
99 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
100 nsecs_t nextEventTime = 0;
101
102 while (true) {
103 Vector<CallbackInvocation> callbackInvocations;
104
105 nsecs_t targetTime = 0;
106
107 { // Scope for lock
108 Mutex::Autolock lock(mMutex);
109
Tim Murray4a4e4a22016-04-19 16:29:23 +0000110 if (kTraceDetailedInfo) {
111 ATRACE_INT64("DispSync:Frame", mFrameNumber);
112 }
113 ALOGV("[%s] Frame %" PRId64, mName, mFrameNumber);
114 ++mFrameNumber;
115
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700116 if (mStop) {
117 return false;
118 }
119
120 if (mPeriod == 0) {
121 err = mCond.wait(mMutex);
122 if (err != NO_ERROR) {
123 ALOGE("error waiting for new events: %s (%d)",
124 strerror(-err), err);
125 return false;
126 }
127 continue;
128 }
129
130 nextEventTime = computeNextEventTimeLocked(now);
Jamie Gennis0d5c60e2013-10-09 17:49:37 -0700131 targetTime = nextEventTime;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700132
133 bool isWakeup = false;
134
135 if (now < targetTime) {
Tim Murray4a4e4a22016-04-19 16:29:23 +0000136 ALOGV("[%s] Waiting until %" PRId64, mName,
137 ns2us(targetTime));
138 if (kTraceDetailedInfo) ATRACE_NAME("DispSync waiting");
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700139 err = mCond.waitRelative(mMutex, targetTime - now);
140
141 if (err == TIMED_OUT) {
142 isWakeup = true;
143 } else if (err != NO_ERROR) {
144 ALOGE("error waiting for next event: %s (%d)",
145 strerror(-err), err);
146 return false;
147 }
148 }
149
150 now = systemTime(SYSTEM_TIME_MONOTONIC);
151
Tim Murray4a4e4a22016-04-19 16:29:23 +0000152 // Don't correct by more than 1.5 ms
153 static const nsecs_t kMaxWakeupLatency = us2ns(1500);
154
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700155 if (isWakeup) {
156 mWakeupLatency = ((mWakeupLatency * 63) +
157 (now - targetTime)) / 64;
Tim Murray4a4e4a22016-04-19 16:29:23 +0000158 mWakeupLatency = min(mWakeupLatency, kMaxWakeupLatency);
Andy McFadden5167ec62014-05-22 13:08:43 -0700159 if (kTraceDetailedInfo) {
Tim Murray4a4e4a22016-04-19 16:29:23 +0000160 ATRACE_INT64("DispSync:WakeupLat", now - targetTime);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700161 ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
162 }
163 }
164
165 callbackInvocations = gatherCallbackInvocationsLocked(now);
166 }
167
168 if (callbackInvocations.size() > 0) {
Andy McFadden645b1f72014-06-10 14:43:32 -0700169 fireCallbackInvocations(callbackInvocations);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700170 }
171 }
172
173 return false;
174 }
175
Tim Murray4a4e4a22016-04-19 16:29:23 +0000176 status_t addEventListener(const char* name, nsecs_t phase,
177 const sp<DispSync::Callback>& callback) {
178 if (kTraceDetailedInfo) ATRACE_CALL();
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700179 Mutex::Autolock lock(mMutex);
180
181 for (size_t i = 0; i < mEventListeners.size(); i++) {
182 if (mEventListeners[i].mCallback == callback) {
183 return BAD_VALUE;
184 }
185 }
186
187 EventListener listener;
Tim Murray4a4e4a22016-04-19 16:29:23 +0000188 listener.mName = name;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700189 listener.mPhase = phase;
190 listener.mCallback = callback;
Jamie Gennis629b9872013-10-29 13:36:12 -0700191
192 // We want to allow the firstmost future event to fire without
Tim Murray4a4e4a22016-04-19 16:29:23 +0000193 // allowing any past events to fire
194 listener.mLastEventTime = systemTime() - mPeriod / 2 + mPhase -
195 mWakeupLatency;
Jamie Gennis629b9872013-10-29 13:36:12 -0700196
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700197 mEventListeners.push(listener);
198
199 mCond.signal();
200
201 return NO_ERROR;
202 }
203
204 status_t removeEventListener(const sp<DispSync::Callback>& callback) {
Tim Murray4a4e4a22016-04-19 16:29:23 +0000205 if (kTraceDetailedInfo) ATRACE_CALL();
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700206 Mutex::Autolock lock(mMutex);
207
208 for (size_t i = 0; i < mEventListeners.size(); i++) {
209 if (mEventListeners[i].mCallback == callback) {
210 mEventListeners.removeAt(i);
211 mCond.signal();
212 return NO_ERROR;
213 }
214 }
215
216 return BAD_VALUE;
217 }
218
Andy McFadden5167ec62014-05-22 13:08:43 -0700219 // This method is only here to handle the kIgnorePresentFences case.
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700220 bool hasAnyEventListeners() {
Tim Murray4a4e4a22016-04-19 16:29:23 +0000221 if (kTraceDetailedInfo) ATRACE_CALL();
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700222 Mutex::Autolock lock(mMutex);
223 return !mEventListeners.empty();
224 }
225
226private:
227
228 struct EventListener {
Tim Murray4a4e4a22016-04-19 16:29:23 +0000229 const char* mName;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700230 nsecs_t mPhase;
231 nsecs_t mLastEventTime;
232 sp<DispSync::Callback> mCallback;
233 };
234
235 struct CallbackInvocation {
236 sp<DispSync::Callback> mCallback;
237 nsecs_t mEventTime;
238 };
239
240 nsecs_t computeNextEventTimeLocked(nsecs_t now) {
Tim Murray4a4e4a22016-04-19 16:29:23 +0000241 if (kTraceDetailedInfo) ATRACE_CALL();
242 ALOGV("[%s] computeNextEventTimeLocked", mName);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700243 nsecs_t nextEventTime = INT64_MAX;
244 for (size_t i = 0; i < mEventListeners.size(); i++) {
245 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
246 now);
247
248 if (t < nextEventTime) {
249 nextEventTime = t;
250 }
251 }
252
Tim Murray4a4e4a22016-04-19 16:29:23 +0000253 ALOGV("[%s] nextEventTime = %" PRId64, mName, ns2us(nextEventTime));
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700254 return nextEventTime;
255 }
256
257 Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
Tim Murray4a4e4a22016-04-19 16:29:23 +0000258 if (kTraceDetailedInfo) ATRACE_CALL();
259 ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName,
260 ns2us(now));
261
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700262 Vector<CallbackInvocation> callbackInvocations;
Tim Murray4a4e4a22016-04-19 16:29:23 +0000263 nsecs_t onePeriodAgo = now - mPeriod;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700264
265 for (size_t i = 0; i < mEventListeners.size(); i++) {
266 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
Tim Murray4a4e4a22016-04-19 16:29:23 +0000267 onePeriodAgo);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700268
Jamie Gennis0d5c60e2013-10-09 17:49:37 -0700269 if (t < now) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700270 CallbackInvocation ci;
271 ci.mCallback = mEventListeners[i].mCallback;
272 ci.mEventTime = t;
Tim Murray4a4e4a22016-04-19 16:29:23 +0000273 ALOGV("[%s] [%s] Preparing to fire", mName,
274 mEventListeners[i].mName);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700275 callbackInvocations.push(ci);
276 mEventListeners.editItemAt(i).mLastEventTime = t;
277 }
278 }
279
280 return callbackInvocations;
281 }
282
283 nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener,
Tim Murray4a4e4a22016-04-19 16:29:23 +0000284 nsecs_t baseTime) {
285 if (kTraceDetailedInfo) ATRACE_CALL();
286 ALOGV("[%s] [%s] computeListenerNextEventTimeLocked(%" PRId64 ")",
287 mName, listener.mName, ns2us(baseTime));
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700288
Tim Murray4a4e4a22016-04-19 16:29:23 +0000289 nsecs_t lastEventTime = listener.mLastEventTime + mWakeupLatency;
290 ALOGV("[%s] lastEventTime: %" PRId64, mName, ns2us(lastEventTime));
291 if (baseTime < lastEventTime) {
292 baseTime = lastEventTime;
293 ALOGV("[%s] Clamping baseTime to lastEventTime -> %" PRId64, mName,
294 ns2us(baseTime));
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700295 }
296
Tim Murray4a4e4a22016-04-19 16:29:23 +0000297 baseTime -= mReferenceTime;
298 ALOGV("[%s] Relative baseTime = %" PRId64, mName, ns2us(baseTime));
299 nsecs_t phase = mPhase + listener.mPhase;
300 ALOGV("[%s] Phase = %" PRId64, mName, ns2us(phase));
301 baseTime -= phase;
302 ALOGV("[%s] baseTime - phase = %" PRId64, mName, ns2us(baseTime));
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700303
Tim Murray4a4e4a22016-04-19 16:29:23 +0000304 // If our previous time is before the reference (because the reference
305 // has since been updated), the division by mPeriod will truncate
306 // towards zero instead of computing the floor. Since in all cases
307 // before the reference we want the next time to be effectively now, we
308 // set baseTime to -mPeriod so that numPeriods will be -1.
309 // When we add 1 and the phase, we will be at the correct event time for
310 // this period.
311 if (baseTime < 0) {
312 ALOGV("[%s] Correcting negative baseTime", mName);
313 baseTime = -mPeriod;
314 }
315
316 nsecs_t numPeriods = baseTime / mPeriod;
317 ALOGV("[%s] numPeriods = %" PRId64, mName, numPeriods);
318 nsecs_t t = (numPeriods + 1) * mPeriod + phase;
319 ALOGV("[%s] t = %" PRId64, mName, ns2us(t));
320 t += mReferenceTime;
321 ALOGV("[%s] Absolute t = %" PRId64, mName, ns2us(t));
322
323 // Check that it's been slightly more than half a period since the last
324 // event so that we don't accidentally fall into double-rate vsyncs
325 if (t - listener.mLastEventTime < (3 * mPeriod / 5)) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700326 t += mPeriod;
Tim Murray4a4e4a22016-04-19 16:29:23 +0000327 ALOGV("[%s] Modifying t -> %" PRId64, mName, ns2us(t));
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700328 }
329
Tim Murray4a4e4a22016-04-19 16:29:23 +0000330 t -= mWakeupLatency;
331 ALOGV("[%s] Corrected for wakeup latency -> %" PRId64, mName, ns2us(t));
332
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700333 return t;
334 }
335
336 void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
Tim Murray4a4e4a22016-04-19 16:29:23 +0000337 if (kTraceDetailedInfo) ATRACE_CALL();
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700338 for (size_t i = 0; i < callbacks.size(); i++) {
339 callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
340 }
341 }
342
Tim Murray4a4e4a22016-04-19 16:29:23 +0000343 const char* const mName;
344
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700345 bool mStop;
346
347 nsecs_t mPeriod;
348 nsecs_t mPhase;
Haixia Shi676b1f62015-10-28 16:19:01 -0700349 nsecs_t mReferenceTime;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700350 nsecs_t mWakeupLatency;
351
Tim Murray4a4e4a22016-04-19 16:29:23 +0000352 int64_t mFrameNumber;
353
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700354 Vector<EventListener> mEventListeners;
355
356 Mutex mMutex;
357 Condition mCond;
358};
359
Tim Murray4a4e4a22016-04-19 16:29:23 +0000360#undef LOG_TAG
361#define LOG_TAG "DispSync"
362
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700363class ZeroPhaseTracer : public DispSync::Callback {
364public:
365 ZeroPhaseTracer() : mParity(false) {}
366
Mark Salyzyn92dc3fc2014-03-12 13:12:44 -0700367 virtual void onDispSyncEvent(nsecs_t /*when*/) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700368 mParity = !mParity;
369 ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0);
370 }
371
372private:
373 bool mParity;
374};
375
Tim Murray4a4e4a22016-04-19 16:29:23 +0000376DispSync::DispSync(const char* name) :
377 mName(name),
Andy McFadden645b1f72014-06-10 14:43:32 -0700378 mRefreshSkipCount(0),
Tim Murray4a4e4a22016-04-19 16:29:23 +0000379 mThread(new DispSyncThread(name)) {
Andy McFadden645b1f72014-06-10 14:43:32 -0700380
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700381 mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
382
383 reset();
384 beginResync();
385
Andy McFadden5167ec62014-05-22 13:08:43 -0700386 if (kTraceDetailedInfo) {
387 // If we're not getting present fences then the ZeroPhaseTracer
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700388 // would prevent HW vsync event from ever being turned off.
Andy McFadden5167ec62014-05-22 13:08:43 -0700389 // Even if we're just ignoring the fences, the zero-phase tracing is
390 // not needed because any time there is an event registered we will
391 // turn on the HW vsync events.
Tim Murray4a4e4a22016-04-19 16:29:23 +0000392 if (!kIgnorePresentFences && kEnableZeroPhaseTracer) {
393 addEventListener("ZeroPhaseTracer", 0, new ZeroPhaseTracer());
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700394 }
395 }
396}
397
398DispSync::~DispSync() {}
399
400void DispSync::reset() {
401 Mutex::Autolock lock(mMutex);
402
Haixia Shi676b1f62015-10-28 16:19:01 -0700403 mPhase = 0;
404 mReferenceTime = 0;
405 mModelUpdated = false;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700406 mNumResyncSamples = 0;
407 mFirstResyncSample = 0;
408 mNumResyncSamplesSincePresent = 0;
409 resetErrorLocked();
410}
411
412bool DispSync::addPresentFence(const sp<Fence>& fence) {
413 Mutex::Autolock lock(mMutex);
414
415 mPresentFences[mPresentSampleOffset] = fence;
416 mPresentTimes[mPresentSampleOffset] = 0;
417 mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES;
418 mNumResyncSamplesSincePresent = 0;
419
420 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
421 const sp<Fence>& f(mPresentFences[i]);
422 if (f != NULL) {
423 nsecs_t t = f->getSignalTime();
424 if (t < INT64_MAX) {
425 mPresentFences[i].clear();
Andy McFadden5167ec62014-05-22 13:08:43 -0700426 mPresentTimes[i] = t + kPresentTimeOffset;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700427 }
428 }
429 }
430
431 updateErrorLocked();
432
Haixia Shi676b1f62015-10-28 16:19:01 -0700433 return !mModelUpdated || mError > kErrorThreshold;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700434}
435
436void DispSync::beginResync() {
437 Mutex::Autolock lock(mMutex);
Tim Murray4a4e4a22016-04-19 16:29:23 +0000438 ALOGV("[%s] beginResync", mName);
Haixia Shi676b1f62015-10-28 16:19:01 -0700439 mModelUpdated = false;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700440 mNumResyncSamples = 0;
441}
442
443bool DispSync::addResyncSample(nsecs_t timestamp) {
444 Mutex::Autolock lock(mMutex);
445
Tim Murray4a4e4a22016-04-19 16:29:23 +0000446 ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp));
447
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700448 size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
449 mResyncSamples[idx] = timestamp;
Haixia Shi664339a2015-10-28 13:22:22 -0700450 if (mNumResyncSamples == 0) {
Haixia Shi676b1f62015-10-28 16:19:01 -0700451 mPhase = 0;
452 mReferenceTime = timestamp;
Tim Murray4a4e4a22016-04-19 16:29:23 +0000453 ALOGV("[%s] First resync sample: mPeriod = %" PRId64 ", mPhase = 0, "
454 "mReferenceTime = %" PRId64, mName, ns2us(mPeriod),
455 ns2us(mReferenceTime));
456 mThread->updateModel(mPeriod, mPhase, mReferenceTime);
Haixia Shi664339a2015-10-28 13:22:22 -0700457 }
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700458
459 if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
460 mNumResyncSamples++;
461 } else {
462 mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES;
463 }
464
465 updateModelLocked();
466
467 if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) {
468 resetErrorLocked();
469 }
470
Andy McFadden5167ec62014-05-22 13:08:43 -0700471 if (kIgnorePresentFences) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700472 // If we don't have the sync framework we will never have
473 // addPresentFence called. This means we have no way to know whether
474 // or not we're synchronized with the HW vsyncs, so we just request
475 // that the HW vsync events be turned on whenever we need to generate
476 // SW vsync events.
477 return mThread->hasAnyEventListeners();
478 }
479
Tim Murray4a4e4a22016-04-19 16:29:23 +0000480 // Check against kErrorThreshold / 2 to add some hysteresis before having to
481 // resync again
482 bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2);
483 ALOGV("[%s] addResyncSample returning %s", mName,
484 modelLocked ? "locked" : "unlocked");
485 return !modelLocked;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700486}
487
488void DispSync::endResync() {
489}
490
Tim Murray4a4e4a22016-04-19 16:29:23 +0000491status_t DispSync::addEventListener(const char* name, nsecs_t phase,
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700492 const sp<Callback>& callback) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700493 Mutex::Autolock lock(mMutex);
Tim Murray4a4e4a22016-04-19 16:29:23 +0000494 return mThread->addEventListener(name, phase, callback);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700495}
496
Andy McFadden645b1f72014-06-10 14:43:32 -0700497void DispSync::setRefreshSkipCount(int count) {
498 Mutex::Autolock lock(mMutex);
499 ALOGD("setRefreshSkipCount(%d)", count);
500 mRefreshSkipCount = count;
501 updateModelLocked();
Ruchi Kandoif52b3c82014-04-24 16:42:35 -0700502}
503
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700504status_t DispSync::removeEventListener(const sp<Callback>& callback) {
505 Mutex::Autolock lock(mMutex);
506 return mThread->removeEventListener(callback);
507}
508
509void DispSync::setPeriod(nsecs_t period) {
510 Mutex::Autolock lock(mMutex);
511 mPeriod = period;
512 mPhase = 0;
Haixia Shi676b1f62015-10-28 16:19:01 -0700513 mReferenceTime = 0;
514 mThread->updateModel(mPeriod, mPhase, mReferenceTime);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700515}
516
Lajos Molnar67d8bd62014-09-11 14:58:45 -0700517nsecs_t DispSync::getPeriod() {
518 // lock mutex as mPeriod changes multiple times in updateModelLocked
519 Mutex::Autolock lock(mMutex);
520 return mPeriod;
521}
522
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700523void DispSync::updateModelLocked() {
Tim Murray4a4e4a22016-04-19 16:29:23 +0000524 ALOGV("[%s] updateModelLocked %zu", mName, mNumResyncSamples);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700525 if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
Tim Murray4a4e4a22016-04-19 16:29:23 +0000526 ALOGV("[%s] Computing...", mName);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700527 nsecs_t durationSum = 0;
Tim Murray4a4e4a22016-04-19 16:29:23 +0000528 nsecs_t minDuration = INT64_MAX;
529 nsecs_t maxDuration = 0;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700530 for (size_t i = 1; i < mNumResyncSamples; i++) {
531 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
532 size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
Tim Murray4a4e4a22016-04-19 16:29:23 +0000533 nsecs_t duration = mResyncSamples[idx] - mResyncSamples[prev];
534 durationSum += duration;
535 minDuration = min(minDuration, duration);
536 maxDuration = max(maxDuration, duration);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700537 }
538
Tim Murray4a4e4a22016-04-19 16:29:23 +0000539 // Exclude the min and max from the average
540 durationSum -= minDuration + maxDuration;
541 mPeriod = durationSum / (mNumResyncSamples - 3);
542
543 ALOGV("[%s] mPeriod = %" PRId64, mName, ns2us(mPeriod));
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700544
545 double sampleAvgX = 0;
546 double sampleAvgY = 0;
547 double scale = 2.0 * M_PI / double(mPeriod);
Tim Murray4a4e4a22016-04-19 16:29:23 +0000548 // Intentionally skip the first sample
549 for (size_t i = 1; i < mNumResyncSamples; i++) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700550 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
Haixia Shi676b1f62015-10-28 16:19:01 -0700551 nsecs_t sample = mResyncSamples[idx] - mReferenceTime;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700552 double samplePhase = double(sample % mPeriod) * scale;
553 sampleAvgX += cos(samplePhase);
554 sampleAvgY += sin(samplePhase);
555 }
556
Tim Murray4a4e4a22016-04-19 16:29:23 +0000557 sampleAvgX /= double(mNumResyncSamples - 1);
558 sampleAvgY /= double(mNumResyncSamples - 1);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700559
560 mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
561
Tim Murray4a4e4a22016-04-19 16:29:23 +0000562 ALOGV("[%s] mPhase = %" PRId64, mName, ns2us(mPhase));
563
564 if (mPhase < -(mPeriod / 2)) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700565 mPhase += mPeriod;
Tim Murray4a4e4a22016-04-19 16:29:23 +0000566 ALOGV("[%s] Adjusting mPhase -> %" PRId64, mName, ns2us(mPhase));
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700567 }
568
Andy McFadden5167ec62014-05-22 13:08:43 -0700569 if (kTraceDetailedInfo) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700570 ATRACE_INT64("DispSync:Period", mPeriod);
Tim Murray4a4e4a22016-04-19 16:29:23 +0000571 ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700572 }
573
Andy McFadden645b1f72014-06-10 14:43:32 -0700574 // Artificially inflate the period if requested.
575 mPeriod += mPeriod * mRefreshSkipCount;
576
Haixia Shi676b1f62015-10-28 16:19:01 -0700577 mThread->updateModel(mPeriod, mPhase, mReferenceTime);
578 mModelUpdated = true;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700579 }
580}
581
582void DispSync::updateErrorLocked() {
Haixia Shi676b1f62015-10-28 16:19:01 -0700583 if (!mModelUpdated) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700584 return;
585 }
586
Andy McFadden645b1f72014-06-10 14:43:32 -0700587 // Need to compare present fences against the un-adjusted refresh period,
588 // since they might arrive between two events.
589 nsecs_t period = mPeriod / (1 + mRefreshSkipCount);
590
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700591 int numErrSamples = 0;
592 nsecs_t sqErrSum = 0;
593
594 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
Haixia Shi676b1f62015-10-28 16:19:01 -0700595 nsecs_t sample = mPresentTimes[i] - mReferenceTime;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700596 if (sample > mPhase) {
Andy McFadden645b1f72014-06-10 14:43:32 -0700597 nsecs_t sampleErr = (sample - mPhase) % period;
598 if (sampleErr > period / 2) {
599 sampleErr -= period;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700600 }
601 sqErrSum += sampleErr * sampleErr;
602 numErrSamples++;
603 }
604 }
605
606 if (numErrSamples > 0) {
607 mError = sqErrSum / numErrSamples;
608 } else {
609 mError = 0;
610 }
611
Andy McFadden5167ec62014-05-22 13:08:43 -0700612 if (kTraceDetailedInfo) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700613 ATRACE_INT64("DispSync:Error", mError);
614 }
615}
616
617void DispSync::resetErrorLocked() {
618 mPresentSampleOffset = 0;
619 mError = 0;
620 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
621 mPresentFences[i].clear();
622 mPresentTimes[i] = 0;
623 }
624}
625
Andy McFadden41d67d72014-04-25 16:58:34 -0700626nsecs_t DispSync::computeNextRefresh(int periodOffset) const {
Andy McFadden150ecd82014-05-08 14:56:50 -0700627 Mutex::Autolock lock(mMutex);
Andy McFadden41d67d72014-04-25 16:58:34 -0700628 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
Haixia Shi676b1f62015-10-28 16:19:01 -0700629 nsecs_t phase = mReferenceTime + mPhase;
630 return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase;
Andy McFadden41d67d72014-04-25 16:58:34 -0700631}
632
Andy McFaddenc751e922014-05-08 14:53:26 -0700633void DispSync::dump(String8& result) const {
634 Mutex::Autolock lock(mMutex);
Andy McFadden5167ec62014-05-22 13:08:43 -0700635 result.appendFormat("present fences are %s\n",
636 kIgnorePresentFences ? "ignored" : "used");
Andy McFadden645b1f72014-06-10 14:43:32 -0700637 result.appendFormat("mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n",
638 mPeriod, 1000000000.0 / mPeriod, mRefreshSkipCount);
Andy McFadden5167ec62014-05-22 13:08:43 -0700639 result.appendFormat("mPhase: %" PRId64 " ns\n", mPhase);
640 result.appendFormat("mError: %" PRId64 " ns (sqrt=%.1f)\n",
Andy McFaddenc751e922014-05-08 14:53:26 -0700641 mError, sqrt(mError));
Andy McFadden5167ec62014-05-22 13:08:43 -0700642 result.appendFormat("mNumResyncSamplesSincePresent: %d (limit %d)\n",
Andy McFaddenc751e922014-05-08 14:53:26 -0700643 mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT);
Andy McFadden5167ec62014-05-22 13:08:43 -0700644 result.appendFormat("mNumResyncSamples: %zd (max %d)\n",
Andy McFaddenc751e922014-05-08 14:53:26 -0700645 mNumResyncSamples, MAX_RESYNC_SAMPLES);
646
647 result.appendFormat("mResyncSamples:\n");
648 nsecs_t previous = -1;
649 for (size_t i = 0; i < mNumResyncSamples; i++) {
650 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
651 nsecs_t sampleTime = mResyncSamples[idx];
652 if (i == 0) {
Andy McFadden5167ec62014-05-22 13:08:43 -0700653 result.appendFormat(" %" PRId64 "\n", sampleTime);
Andy McFaddenc751e922014-05-08 14:53:26 -0700654 } else {
Andy McFadden5167ec62014-05-22 13:08:43 -0700655 result.appendFormat(" %" PRId64 " (+%" PRId64 ")\n",
Andy McFaddenc751e922014-05-08 14:53:26 -0700656 sampleTime, sampleTime - previous);
657 }
658 previous = sampleTime;
659 }
660
661 result.appendFormat("mPresentFences / mPresentTimes [%d]:\n",
662 NUM_PRESENT_SAMPLES);
Andy McFadden5167ec62014-05-22 13:08:43 -0700663 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
Andy McFaddenc751e922014-05-08 14:53:26 -0700664 previous = 0;
665 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
666 size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES;
667 bool signaled = mPresentFences[idx] == NULL;
668 nsecs_t presentTime = mPresentTimes[idx];
669 if (!signaled) {
670 result.appendFormat(" [unsignaled fence]\n");
Andy McFadden5167ec62014-05-22 13:08:43 -0700671 } else if (presentTime == 0) {
672 result.appendFormat(" 0\n");
Andy McFaddenc751e922014-05-08 14:53:26 -0700673 } else if (previous == 0) {
Andy McFadden5167ec62014-05-22 13:08:43 -0700674 result.appendFormat(" %" PRId64 " (%.3f ms ago)\n", presentTime,
675 (now - presentTime) / 1000000.0);
Andy McFaddenc751e922014-05-08 14:53:26 -0700676 } else {
Andy McFadden5167ec62014-05-22 13:08:43 -0700677 result.appendFormat(" %" PRId64 " (+%" PRId64 " / %.3f) (%.3f ms ago)\n",
Andy McFaddenc751e922014-05-08 14:53:26 -0700678 presentTime, presentTime - previous,
Andy McFadden5167ec62014-05-22 13:08:43 -0700679 (presentTime - previous) / (double) mPeriod,
680 (now - presentTime) / 1000000.0);
Andy McFaddenc751e922014-05-08 14:53:26 -0700681 }
682 previous = presentTime;
683 }
Andy McFadden5167ec62014-05-22 13:08:43 -0700684
685 result.appendFormat("current monotonic time: %" PRId64 "\n", now);
Andy McFaddenc751e922014-05-08 14:53:26 -0700686}
687
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700688} // namespace android