blob: c51d207354657494e7573859c1d3e9b4e94d43ca [file] [log] [blame]
Jamie Gennisfaf77cc2013-07-30 15:10:32 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_GRAPHICS
18
19// This is needed for stdint.h to define INT64_MAX in C++
20#define __STDC_LIMIT_MACROS
21
22#include <math.h>
Andy McFadden232f5bc2014-05-08 14:53:26 -070023#include <inttypes.h>
Jamie Gennisfaf77cc2013-07-30 15:10:32 -070024
25#include <cutils/log.h>
26
27#include <ui/Fence.h>
28
29#include <utils/String8.h>
30#include <utils/Thread.h>
31#include <utils/Trace.h>
32#include <utils/Vector.h>
33
34#include "DispSync.h"
35#include "EventLog/EventLog.h"
36
37namespace android {
38
39// Setting this to true enables verbose tracing that can be used to debug
40// vsync event model or phase issues.
41static const bool traceDetailedInfo = false;
42
43// This is the threshold used to determine when hardware vsync events are
44// needed to re-synchronize the software vsync model with the hardware. The
45// error metric used is the mean of the squared difference between each
46// present time and the nearest software-predicted vsync.
47static const nsecs_t errorThreshold = 160000000000;
48
49// This works around the lack of support for the sync framework on some
50// devices.
51#ifdef RUNNING_WITHOUT_SYNC_FRAMEWORK
52static const bool runningWithoutSyncFramework = true;
53#else
54static const bool runningWithoutSyncFramework = false;
55#endif
56
57// This is the offset from the present fence timestamps to the corresponding
58// vsync event.
59static const int64_t presentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS;
60
61class DispSyncThread: public Thread {
62public:
63
64 DispSyncThread():
65 mStop(false),
66 mPeriod(0),
67 mPhase(0),
68 mWakeupLatency(0) {
69 }
70
71 virtual ~DispSyncThread() {}
72
73 void updateModel(nsecs_t period, nsecs_t phase) {
74 Mutex::Autolock lock(mMutex);
75 mPeriod = period;
76 mPhase = phase;
77 mCond.signal();
78 }
79
80 void stop() {
81 Mutex::Autolock lock(mMutex);
82 mStop = true;
83 mCond.signal();
84 }
85
86 virtual bool threadLoop() {
87 status_t err;
88 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
89 nsecs_t nextEventTime = 0;
90
91 while (true) {
92 Vector<CallbackInvocation> callbackInvocations;
93
94 nsecs_t targetTime = 0;
95
96 { // Scope for lock
97 Mutex::Autolock lock(mMutex);
98
99 if (mStop) {
100 return false;
101 }
102
103 if (mPeriod == 0) {
104 err = mCond.wait(mMutex);
105 if (err != NO_ERROR) {
106 ALOGE("error waiting for new events: %s (%d)",
107 strerror(-err), err);
108 return false;
109 }
110 continue;
111 }
112
113 nextEventTime = computeNextEventTimeLocked(now);
Jamie Gennis0d5c60e2013-10-09 17:49:37 -0700114 targetTime = nextEventTime;
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700115
116 bool isWakeup = false;
117
118 if (now < targetTime) {
119 err = mCond.waitRelative(mMutex, targetTime - now);
120
121 if (err == TIMED_OUT) {
122 isWakeup = true;
123 } else if (err != NO_ERROR) {
124 ALOGE("error waiting for next event: %s (%d)",
125 strerror(-err), err);
126 return false;
127 }
128 }
129
130 now = systemTime(SYSTEM_TIME_MONOTONIC);
131
132 if (isWakeup) {
133 mWakeupLatency = ((mWakeupLatency * 63) +
134 (now - targetTime)) / 64;
135 if (mWakeupLatency > 500000) {
136 // Don't correct by more than 500 us
137 mWakeupLatency = 500000;
138 }
139 if (traceDetailedInfo) {
140 ATRACE_INT64("DispSync:WakeupLat", now - nextEventTime);
141 ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
142 }
143 }
144
145 callbackInvocations = gatherCallbackInvocationsLocked(now);
146 }
147
148 if (callbackInvocations.size() > 0) {
149 fireCallbackInvocations(callbackInvocations);
150 }
151 }
152
153 return false;
154 }
155
156 status_t addEventListener(nsecs_t phase, const sp<DispSync::Callback>& callback) {
157 Mutex::Autolock lock(mMutex);
158
159 for (size_t i = 0; i < mEventListeners.size(); i++) {
160 if (mEventListeners[i].mCallback == callback) {
161 return BAD_VALUE;
162 }
163 }
164
165 EventListener listener;
166 listener.mPhase = phase;
167 listener.mCallback = callback;
Jamie Gennis41c3c492013-10-29 13:36:12 -0700168
169 // We want to allow the firstmost future event to fire without
170 // allowing any past events to fire. Because
171 // computeListenerNextEventTimeLocked filters out events within a half
172 // a period of the last event time, we need to initialize the last
173 // event time to a half a period in the past.
174 listener.mLastEventTime = systemTime(SYSTEM_TIME_MONOTONIC) - mPeriod / 2;
175
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700176 mEventListeners.push(listener);
177
178 mCond.signal();
179
180 return NO_ERROR;
181 }
182
183 status_t removeEventListener(const sp<DispSync::Callback>& callback) {
184 Mutex::Autolock lock(mMutex);
185
186 for (size_t i = 0; i < mEventListeners.size(); i++) {
187 if (mEventListeners[i].mCallback == callback) {
188 mEventListeners.removeAt(i);
189 mCond.signal();
190 return NO_ERROR;
191 }
192 }
193
194 return BAD_VALUE;
195 }
196
197 // This method is only here to handle the runningWithoutSyncFramework
198 // case.
199 bool hasAnyEventListeners() {
200 Mutex::Autolock lock(mMutex);
201 return !mEventListeners.empty();
202 }
203
204private:
205
206 struct EventListener {
207 nsecs_t mPhase;
208 nsecs_t mLastEventTime;
209 sp<DispSync::Callback> mCallback;
210 };
211
212 struct CallbackInvocation {
213 sp<DispSync::Callback> mCallback;
214 nsecs_t mEventTime;
215 };
216
217 nsecs_t computeNextEventTimeLocked(nsecs_t now) {
218 nsecs_t nextEventTime = INT64_MAX;
219 for (size_t i = 0; i < mEventListeners.size(); i++) {
220 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
221 now);
222
223 if (t < nextEventTime) {
224 nextEventTime = t;
225 }
226 }
227
228 return nextEventTime;
229 }
230
231 Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
232 Vector<CallbackInvocation> callbackInvocations;
233 nsecs_t ref = now - mPeriod;
234
235 for (size_t i = 0; i < mEventListeners.size(); i++) {
236 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
237 ref);
238
Jamie Gennis0d5c60e2013-10-09 17:49:37 -0700239 if (t < now) {
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700240 CallbackInvocation ci;
241 ci.mCallback = mEventListeners[i].mCallback;
242 ci.mEventTime = t;
243 callbackInvocations.push(ci);
244 mEventListeners.editItemAt(i).mLastEventTime = t;
245 }
246 }
247
248 return callbackInvocations;
249 }
250
251 nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener,
252 nsecs_t ref) {
253
254 nsecs_t lastEventTime = listener.mLastEventTime;
255 if (ref < lastEventTime) {
256 ref = lastEventTime;
257 }
258
259 nsecs_t phase = mPhase + listener.mPhase;
260 nsecs_t t = (((ref - phase) / mPeriod) + 1) * mPeriod + phase;
261
262 if (t - listener.mLastEventTime < mPeriod / 2) {
263 t += mPeriod;
264 }
265
266 return t;
267 }
268
269 void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
270 for (size_t i = 0; i < callbacks.size(); i++) {
271 callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
272 }
273 }
274
275 bool mStop;
276
277 nsecs_t mPeriod;
278 nsecs_t mPhase;
279 nsecs_t mWakeupLatency;
280
281 Vector<EventListener> mEventListeners;
282
283 Mutex mMutex;
284 Condition mCond;
285};
286
287class ZeroPhaseTracer : public DispSync::Callback {
288public:
289 ZeroPhaseTracer() : mParity(false) {}
290
291 virtual void onDispSyncEvent(nsecs_t when) {
292 mParity = !mParity;
293 ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0);
294 }
295
296private:
297 bool mParity;
298};
299
300DispSync::DispSync() {
301 mThread = new DispSyncThread();
302 mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
303
304 reset();
305 beginResync();
306
307 if (traceDetailedInfo) {
308 // If runningWithoutSyncFramework is true then the ZeroPhaseTracer
309 // would prevent HW vsync event from ever being turned off.
310 // Furthermore the zero-phase tracing is not needed because any time
311 // there is an event registered we will turn on the HW vsync events.
312 if (!runningWithoutSyncFramework) {
313 addEventListener(0, new ZeroPhaseTracer());
314 }
315 }
316}
317
318DispSync::~DispSync() {}
319
320void DispSync::reset() {
321 Mutex::Autolock lock(mMutex);
322
323 mNumResyncSamples = 0;
324 mFirstResyncSample = 0;
325 mNumResyncSamplesSincePresent = 0;
326 resetErrorLocked();
327}
328
329bool DispSync::addPresentFence(const sp<Fence>& fence) {
330 Mutex::Autolock lock(mMutex);
331
332 mPresentFences[mPresentSampleOffset] = fence;
333 mPresentTimes[mPresentSampleOffset] = 0;
334 mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES;
335 mNumResyncSamplesSincePresent = 0;
336
337 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
338 const sp<Fence>& f(mPresentFences[i]);
339 if (f != NULL) {
340 nsecs_t t = f->getSignalTime();
341 if (t < INT64_MAX) {
342 mPresentFences[i].clear();
343 mPresentTimes[i] = t + presentTimeOffset;
344 }
345 }
346 }
347
348 updateErrorLocked();
349
350 return mPeriod == 0 || mError > errorThreshold;
351}
352
353void DispSync::beginResync() {
354 Mutex::Autolock lock(mMutex);
355
356 mNumResyncSamples = 0;
357}
358
359bool DispSync::addResyncSample(nsecs_t timestamp) {
360 Mutex::Autolock lock(mMutex);
361
362 size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
363 mResyncSamples[idx] = timestamp;
364
365 if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
366 mNumResyncSamples++;
367 } else {
368 mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES;
369 }
370
371 updateModelLocked();
372
373 if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) {
374 resetErrorLocked();
375 }
376
377 if (runningWithoutSyncFramework) {
378 // If we don't have the sync framework we will never have
379 // addPresentFence called. This means we have no way to know whether
380 // or not we're synchronized with the HW vsyncs, so we just request
381 // that the HW vsync events be turned on whenever we need to generate
382 // SW vsync events.
383 return mThread->hasAnyEventListeners();
384 }
385
386 return mPeriod == 0 || mError > errorThreshold;
387}
388
389void DispSync::endResync() {
390}
391
392status_t DispSync::addEventListener(nsecs_t phase,
393 const sp<Callback>& callback) {
394
395 Mutex::Autolock lock(mMutex);
396 return mThread->addEventListener(phase, callback);
397}
398
399status_t DispSync::removeEventListener(const sp<Callback>& callback) {
400 Mutex::Autolock lock(mMutex);
401 return mThread->removeEventListener(callback);
402}
403
404void DispSync::setPeriod(nsecs_t period) {
405 Mutex::Autolock lock(mMutex);
406 mPeriod = period;
407 mPhase = 0;
Jesse Hall72f69d92013-10-10 14:48:22 -0700408 mThread->updateModel(mPeriod, mPhase);
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700409}
410
411void DispSync::updateModelLocked() {
412 if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
413 nsecs_t durationSum = 0;
414 for (size_t i = 1; i < mNumResyncSamples; i++) {
415 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
416 size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
417 durationSum += mResyncSamples[idx] - mResyncSamples[prev];
418 }
419
420 mPeriod = durationSum / (mNumResyncSamples - 1);
421
422 double sampleAvgX = 0;
423 double sampleAvgY = 0;
424 double scale = 2.0 * M_PI / double(mPeriod);
425 for (size_t i = 0; i < mNumResyncSamples; i++) {
426 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
427 nsecs_t sample = mResyncSamples[idx];
428 double samplePhase = double(sample % mPeriod) * scale;
429 sampleAvgX += cos(samplePhase);
430 sampleAvgY += sin(samplePhase);
431 }
432
433 sampleAvgX /= double(mNumResyncSamples);
434 sampleAvgY /= double(mNumResyncSamples);
435
436 mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
437
438 if (mPhase < 0) {
439 mPhase += mPeriod;
440 }
441
442 if (traceDetailedInfo) {
443 ATRACE_INT64("DispSync:Period", mPeriod);
444 ATRACE_INT64("DispSync:Phase", mPhase);
445 }
446
447 mThread->updateModel(mPeriod, mPhase);
448 }
449}
450
451void DispSync::updateErrorLocked() {
452 if (mPeriod == 0) {
453 return;
454 }
455
456 int numErrSamples = 0;
457 nsecs_t sqErrSum = 0;
458
459 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
460 nsecs_t sample = mPresentTimes[i];
461 if (sample > mPhase) {
462 nsecs_t sampleErr = (sample - mPhase) % mPeriod;
463 if (sampleErr > mPeriod / 2) {
464 sampleErr -= mPeriod;
465 }
466 sqErrSum += sampleErr * sampleErr;
467 numErrSamples++;
468 }
469 }
470
471 if (numErrSamples > 0) {
472 mError = sqErrSum / numErrSamples;
473 } else {
474 mError = 0;
475 }
476
477 if (traceDetailedInfo) {
478 ATRACE_INT64("DispSync:Error", mError);
479 }
480}
481
482void DispSync::resetErrorLocked() {
483 mPresentSampleOffset = 0;
484 mError = 0;
485 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
486 mPresentFences[i].clear();
487 mPresentTimes[i] = 0;
488 }
489}
490
Andy McFadden232f5bc2014-05-08 14:53:26 -0700491void DispSync::dump(String8& result) const {
492 Mutex::Autolock lock(mMutex);
493 result.appendFormat("mPeriod: %"PRId64" ns\n", mPeriod);
494 result.appendFormat("mPhase: %"PRId64" ns\n", mPhase);
495 result.appendFormat("mError: %"PRId64" ns (sqrt: %.1f)\n",
496 mError, sqrt(mError));
497 result.appendFormat("mNumResyncSamplesSincePresent: %d (max %d)\n",
498 mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT);
499 result.appendFormat("mNumResyncSamples: %d (max %d)\n",
500 mNumResyncSamples, MAX_RESYNC_SAMPLES);
501
502 result.appendFormat("mResyncSamples:\n");
503 nsecs_t previous = -1;
504 for (size_t i = 0; i < mNumResyncSamples; i++) {
505 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
506 nsecs_t sampleTime = mResyncSamples[idx];
507 if (i == 0) {
508 result.appendFormat(" %"PRId64"\n", sampleTime);
509 } else {
510 result.appendFormat(" %"PRId64" (+%"PRId64")\n",
511 sampleTime, sampleTime - previous);
512 }
513 previous = sampleTime;
514 }
515
516 result.appendFormat("mPresentFences / mPresentTimes [%d]:\n",
517 NUM_PRESENT_SAMPLES);
518 previous = 0;
519 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
520 size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES;
521 bool signaled = mPresentFences[idx] == NULL;
522 nsecs_t presentTime = mPresentTimes[idx];
523 if (!signaled) {
524 result.appendFormat(" [unsignaled fence]\n");
525 } else if (previous == 0) {
526 result.appendFormat(" %"PRId64"\n", presentTime);
527 } else {
528 result.appendFormat(" %"PRId64" (+%"PRId64" / %.3f)\n",
529 presentTime, presentTime - previous,
530 (presentTime - previous) / (double) mPeriod);
531 }
532 previous = presentTime;
533 }
534}
535
Jamie Gennisfaf77cc2013-07-30 15:10:32 -0700536} // namespace android