Revert "Revert "DispSync: Always resync after inactivity""
This reverts commit 67264e930992e43ef3351b04692d4ca59cbb01ad.
We've fixed the kernel issues this exposed.
bug 28198793
Change-Id: Ie895cc0a815094cce4bee3b2bf45800ee1e2fdc3
diff --git a/services/surfaceflinger/DispSync.cpp b/services/surfaceflinger/DispSync.cpp
index 5ba387d..4cf9370 100644
--- a/services/surfaceflinger/DispSync.cpp
+++ b/services/surfaceflinger/DispSync.cpp
@@ -15,6 +15,7 @@
*/
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+//#define LOG_NDEBUG 0
// This is needed for stdint.h to define INT64_MAX in C++
#define __STDC_LIMIT_MACROS
@@ -33,12 +34,21 @@
#include "DispSync.h"
#include "EventLog/EventLog.h"
+#include <algorithm>
+
+using std::max;
+using std::min;
+
namespace android {
// Setting this to true enables verbose tracing that can be used to debug
// vsync event model or phase issues.
static const bool kTraceDetailedInfo = false;
+// Setting this to true adds a zero-phase tracer for correlating with hardware
+// vsync events
+static const bool kEnableZeroPhaseTracer = false;
+
// This is the threshold used to determine when hardware vsync events are
// needed to re-synchronize the software vsync model with the hardware. The
// error metric used is the mean of the squared difference between each
@@ -49,28 +59,36 @@
// vsync event.
static const int64_t kPresentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS;
+#undef LOG_TAG
+#define LOG_TAG "DispSyncThread"
class DispSyncThread: public Thread {
public:
- DispSyncThread():
+ DispSyncThread(const char* name):
+ mName(name),
mStop(false),
mPeriod(0),
mPhase(0),
mReferenceTime(0),
- mWakeupLatency(0) {
- }
+ mWakeupLatency(0),
+ mFrameNumber(0) {}
virtual ~DispSyncThread() {}
void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) {
+ if (kTraceDetailedInfo) ATRACE_CALL();
Mutex::Autolock lock(mMutex);
mPeriod = period;
mPhase = phase;
mReferenceTime = referenceTime;
+ ALOGV("[%s] updateModel: mPeriod = %" PRId64 ", mPhase = %" PRId64
+ " mReferenceTime = %" PRId64, mName, ns2us(mPeriod),
+ ns2us(mPhase), ns2us(mReferenceTime));
mCond.signal();
}
void stop() {
+ if (kTraceDetailedInfo) ATRACE_CALL();
Mutex::Autolock lock(mMutex);
mStop = true;
mCond.signal();
@@ -89,6 +107,12 @@
{ // Scope for lock
Mutex::Autolock lock(mMutex);
+ if (kTraceDetailedInfo) {
+ ATRACE_INT64("DispSync:Frame", mFrameNumber);
+ }
+ ALOGV("[%s] Frame %" PRId64, mName, mFrameNumber);
+ ++mFrameNumber;
+
if (mStop) {
return false;
}
@@ -109,6 +133,9 @@
bool isWakeup = false;
if (now < targetTime) {
+ ALOGV("[%s] Waiting until %" PRId64, mName,
+ ns2us(targetTime));
+ if (kTraceDetailedInfo) ATRACE_NAME("DispSync waiting");
err = mCond.waitRelative(mMutex, targetTime - now);
if (err == TIMED_OUT) {
@@ -122,15 +149,15 @@
now = systemTime(SYSTEM_TIME_MONOTONIC);
+ // Don't correct by more than 1.5 ms
+ static const nsecs_t kMaxWakeupLatency = us2ns(1500);
+
if (isWakeup) {
mWakeupLatency = ((mWakeupLatency * 63) +
(now - targetTime)) / 64;
- if (mWakeupLatency > 500000) {
- // Don't correct by more than 500 us
- mWakeupLatency = 500000;
- }
+ mWakeupLatency = min(mWakeupLatency, kMaxWakeupLatency);
if (kTraceDetailedInfo) {
- ATRACE_INT64("DispSync:WakeupLat", now - nextEventTime);
+ ATRACE_INT64("DispSync:WakeupLat", now - targetTime);
ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
}
}
@@ -146,7 +173,9 @@
return false;
}
- status_t addEventListener(nsecs_t phase, const sp<DispSync::Callback>& callback) {
+ status_t addEventListener(const char* name, nsecs_t phase,
+ const sp<DispSync::Callback>& callback) {
+ if (kTraceDetailedInfo) ATRACE_CALL();
Mutex::Autolock lock(mMutex);
for (size_t i = 0; i < mEventListeners.size(); i++) {
@@ -156,15 +185,14 @@
}
EventListener listener;
+ listener.mName = name;
listener.mPhase = phase;
listener.mCallback = callback;
// We want to allow the firstmost future event to fire without
- // allowing any past events to fire. Because
- // computeListenerNextEventTimeLocked filters out events within a half
- // a period of the last event time, we need to initialize the last
- // event time to a half a period in the past.
- listener.mLastEventTime = systemTime(SYSTEM_TIME_MONOTONIC) - mPeriod / 2;
+ // allowing any past events to fire
+ listener.mLastEventTime = systemTime() - mPeriod / 2 + mPhase -
+ mWakeupLatency;
mEventListeners.push(listener);
@@ -174,6 +202,7 @@
}
status_t removeEventListener(const sp<DispSync::Callback>& callback) {
+ if (kTraceDetailedInfo) ATRACE_CALL();
Mutex::Autolock lock(mMutex);
for (size_t i = 0; i < mEventListeners.size(); i++) {
@@ -189,6 +218,7 @@
// This method is only here to handle the kIgnorePresentFences case.
bool hasAnyEventListeners() {
+ if (kTraceDetailedInfo) ATRACE_CALL();
Mutex::Autolock lock(mMutex);
return !mEventListeners.empty();
}
@@ -196,6 +226,7 @@
private:
struct EventListener {
+ const char* mName;
nsecs_t mPhase;
nsecs_t mLastEventTime;
sp<DispSync::Callback> mCallback;
@@ -207,6 +238,8 @@
};
nsecs_t computeNextEventTimeLocked(nsecs_t now) {
+ if (kTraceDetailedInfo) ATRACE_CALL();
+ ALOGV("[%s] computeNextEventTimeLocked", mName);
nsecs_t nextEventTime = INT64_MAX;
for (size_t i = 0; i < mEventListeners.size(); i++) {
nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
@@ -217,21 +250,28 @@
}
}
+ ALOGV("[%s] nextEventTime = %" PRId64, mName, ns2us(nextEventTime));
return nextEventTime;
}
Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
+ if (kTraceDetailedInfo) ATRACE_CALL();
+ ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName,
+ ns2us(now));
+
Vector<CallbackInvocation> callbackInvocations;
- nsecs_t ref = now - mPeriod;
+ nsecs_t onePeriodAgo = now - mPeriod;
for (size_t i = 0; i < mEventListeners.size(); i++) {
nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
- ref);
+ onePeriodAgo);
if (t < now) {
CallbackInvocation ci;
ci.mCallback = mEventListeners[i].mCallback;
ci.mEventTime = t;
+ ALOGV("[%s] [%s] Preparing to fire", mName,
+ mEventListeners[i].mName);
callbackInvocations.push(ci);
mEventListeners.editItemAt(i).mLastEventTime = t;
}
@@ -241,29 +281,67 @@
}
nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener,
- nsecs_t ref) {
+ nsecs_t baseTime) {
+ if (kTraceDetailedInfo) ATRACE_CALL();
+ ALOGV("[%s] [%s] computeListenerNextEventTimeLocked(%" PRId64 ")",
+ mName, listener.mName, ns2us(baseTime));
- nsecs_t lastEventTime = listener.mLastEventTime;
- if (ref < lastEventTime) {
- ref = lastEventTime;
+ nsecs_t lastEventTime = listener.mLastEventTime + mWakeupLatency;
+ ALOGV("[%s] lastEventTime: %" PRId64, mName, ns2us(lastEventTime));
+ if (baseTime < lastEventTime) {
+ baseTime = lastEventTime;
+ ALOGV("[%s] Clamping baseTime to lastEventTime -> %" PRId64, mName,
+ ns2us(baseTime));
}
- nsecs_t phase = mReferenceTime + mPhase + listener.mPhase;
- nsecs_t t = (((ref - phase) / mPeriod) + 1) * mPeriod + phase;
+ baseTime -= mReferenceTime;
+ ALOGV("[%s] Relative baseTime = %" PRId64, mName, ns2us(baseTime));
+ nsecs_t phase = mPhase + listener.mPhase;
+ ALOGV("[%s] Phase = %" PRId64, mName, ns2us(phase));
+ baseTime -= phase;
+ ALOGV("[%s] baseTime - phase = %" PRId64, mName, ns2us(baseTime));
- if (t - listener.mLastEventTime < mPeriod / 2) {
+ // If our previous time is before the reference (because the reference
+ // has since been updated), the division by mPeriod will truncate
+ // towards zero instead of computing the floor. Since in all cases
+ // before the reference we want the next time to be effectively now, we
+ // set baseTime to -mPeriod so that numPeriods will be -1.
+ // When we add 1 and the phase, we will be at the correct event time for
+ // this period.
+ if (baseTime < 0) {
+ ALOGV("[%s] Correcting negative baseTime", mName);
+ baseTime = -mPeriod;
+ }
+
+ nsecs_t numPeriods = baseTime / mPeriod;
+ ALOGV("[%s] numPeriods = %" PRId64, mName, numPeriods);
+ nsecs_t t = (numPeriods + 1) * mPeriod + phase;
+ ALOGV("[%s] t = %" PRId64, mName, ns2us(t));
+ t += mReferenceTime;
+ ALOGV("[%s] Absolute t = %" PRId64, mName, ns2us(t));
+
+ // Check that it's been slightly more than half a period since the last
+ // event so that we don't accidentally fall into double-rate vsyncs
+ if (t - listener.mLastEventTime < (3 * mPeriod / 5)) {
t += mPeriod;
+ ALOGV("[%s] Modifying t -> %" PRId64, mName, ns2us(t));
}
+ t -= mWakeupLatency;
+ ALOGV("[%s] Corrected for wakeup latency -> %" PRId64, mName, ns2us(t));
+
return t;
}
void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
+ if (kTraceDetailedInfo) ATRACE_CALL();
for (size_t i = 0; i < callbacks.size(); i++) {
callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
}
}
+ const char* const mName;
+
bool mStop;
nsecs_t mPeriod;
@@ -271,12 +349,17 @@
nsecs_t mReferenceTime;
nsecs_t mWakeupLatency;
+ int64_t mFrameNumber;
+
Vector<EventListener> mEventListeners;
Mutex mMutex;
Condition mCond;
};
+#undef LOG_TAG
+#define LOG_TAG "DispSync"
+
class ZeroPhaseTracer : public DispSync::Callback {
public:
ZeroPhaseTracer() : mParity(false) {}
@@ -290,9 +373,10 @@
bool mParity;
};
-DispSync::DispSync() :
+DispSync::DispSync(const char* name) :
+ mName(name),
mRefreshSkipCount(0),
- mThread(new DispSyncThread()) {
+ mThread(new DispSyncThread(name)) {
mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
@@ -305,8 +389,8 @@
// Even if we're just ignoring the fences, the zero-phase tracing is
// not needed because any time there is an event registered we will
// turn on the HW vsync events.
- if (!kIgnorePresentFences) {
- addEventListener(0, new ZeroPhaseTracer());
+ if (!kIgnorePresentFences && kEnableZeroPhaseTracer) {
+ addEventListener("ZeroPhaseTracer", 0, new ZeroPhaseTracer());
}
}
}
@@ -351,7 +435,7 @@
void DispSync::beginResync() {
Mutex::Autolock lock(mMutex);
-
+ ALOGV("[%s] beginResync", mName);
mModelUpdated = false;
mNumResyncSamples = 0;
}
@@ -359,11 +443,17 @@
bool DispSync::addResyncSample(nsecs_t timestamp) {
Mutex::Autolock lock(mMutex);
+ ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp));
+
size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
mResyncSamples[idx] = timestamp;
if (mNumResyncSamples == 0) {
mPhase = 0;
mReferenceTime = timestamp;
+ ALOGV("[%s] First resync sample: mPeriod = %" PRId64 ", mPhase = 0, "
+ "mReferenceTime = %" PRId64, mName, ns2us(mPeriod),
+ ns2us(mReferenceTime));
+ mThread->updateModel(mPeriod, mPhase, mReferenceTime);
}
if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
@@ -387,17 +477,21 @@
return mThread->hasAnyEventListeners();
}
- return !mModelUpdated || mError > kErrorThreshold;
+ // Check against kErrorThreshold / 2 to add some hysteresis before having to
+ // resync again
+ bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2);
+ ALOGV("[%s] addResyncSample returning %s", mName,
+ modelLocked ? "locked" : "unlocked");
+ return !modelLocked;
}
void DispSync::endResync() {
}
-status_t DispSync::addEventListener(nsecs_t phase,
+status_t DispSync::addEventListener(const char* name, nsecs_t phase,
const sp<Callback>& callback) {
-
Mutex::Autolock lock(mMutex);
- return mThread->addEventListener(phase, callback);
+ return mThread->addEventListener(name, phase, callback);
}
void DispSync::setRefreshSkipCount(int count) {
@@ -427,20 +521,32 @@
}
void DispSync::updateModelLocked() {
+ ALOGV("[%s] updateModelLocked %zu", mName, mNumResyncSamples);
if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
+ ALOGV("[%s] Computing...", mName);
nsecs_t durationSum = 0;
+ nsecs_t minDuration = INT64_MAX;
+ nsecs_t maxDuration = 0;
for (size_t i = 1; i < mNumResyncSamples; i++) {
size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
- durationSum += mResyncSamples[idx] - mResyncSamples[prev];
+ nsecs_t duration = mResyncSamples[idx] - mResyncSamples[prev];
+ durationSum += duration;
+ minDuration = min(minDuration, duration);
+ maxDuration = max(maxDuration, duration);
}
- mPeriod = durationSum / (mNumResyncSamples - 1);
+ // Exclude the min and max from the average
+ durationSum -= minDuration + maxDuration;
+ mPeriod = durationSum / (mNumResyncSamples - 3);
+
+ ALOGV("[%s] mPeriod = %" PRId64, mName, ns2us(mPeriod));
double sampleAvgX = 0;
double sampleAvgY = 0;
double scale = 2.0 * M_PI / double(mPeriod);
- for (size_t i = 0; i < mNumResyncSamples; i++) {
+ // Intentionally skip the first sample
+ for (size_t i = 1; i < mNumResyncSamples; i++) {
size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
nsecs_t sample = mResyncSamples[idx] - mReferenceTime;
double samplePhase = double(sample % mPeriod) * scale;
@@ -448,18 +554,21 @@
sampleAvgY += sin(samplePhase);
}
- sampleAvgX /= double(mNumResyncSamples);
- sampleAvgY /= double(mNumResyncSamples);
+ sampleAvgX /= double(mNumResyncSamples - 1);
+ sampleAvgY /= double(mNumResyncSamples - 1);
mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
- if (mPhase < 0) {
+ ALOGV("[%s] mPhase = %" PRId64, mName, ns2us(mPhase));
+
+ if (mPhase < -(mPeriod / 2)) {
mPhase += mPeriod;
+ ALOGV("[%s] Adjusting mPhase -> %" PRId64, mName, ns2us(mPhase));
}
if (kTraceDetailedInfo) {
ATRACE_INT64("DispSync:Period", mPeriod);
- ATRACE_INT64("DispSync:Phase", mPhase);
+ ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2);
}
// Artificially inflate the period if requested.
diff --git a/services/surfaceflinger/DispSync.h b/services/surfaceflinger/DispSync.h
index a8524b9..537c81b 100644
--- a/services/surfaceflinger/DispSync.h
+++ b/services/surfaceflinger/DispSync.h
@@ -26,11 +26,8 @@
namespace android {
// Ignore present (retire) fences if the device doesn't have support for the
-// sync framework, or if all phase offsets are zero. The latter is useful
-// because it allows us to avoid resync bursts on devices that don't need
-// phase-offset VSYNC events.
-#if defined(RUNNING_WITHOUT_SYNC_FRAMEWORK) || \
- (VSYNC_EVENT_PHASE_OFFSET_NS == 0 && SF_VSYNC_EVENT_PHASE_OFFSET_NS == 0)
+// sync framework
+#if defined(RUNNING_WITHOUT_SYNC_FRAMEWORK)
static const bool kIgnorePresentFences = true;
#else
static const bool kIgnorePresentFences = false;
@@ -64,7 +61,7 @@
virtual void onDispSyncEvent(nsecs_t when) = 0;
};
- DispSync();
+ DispSync(const char* name);
~DispSync();
// reset clears the resync samples and error value.
@@ -114,7 +111,8 @@
// given phase offset from the hardware vsync events. The callback is
// called from a separate thread and it should return reasonably quickly
// (i.e. within a few hundred microseconds).
- status_t addEventListener(nsecs_t phase, const sp<Callback>& callback);
+ status_t addEventListener(const char* name, nsecs_t phase,
+ const sp<Callback>& callback);
// removeEventListener removes an already-registered event callback. Once
// this method returns that callback will no longer be called by the
@@ -137,10 +135,12 @@
void resetErrorLocked();
enum { MAX_RESYNC_SAMPLES = 32 };
- enum { MIN_RESYNC_SAMPLES_FOR_UPDATE = 3 };
+ enum { MIN_RESYNC_SAMPLES_FOR_UPDATE = 6 };
enum { NUM_PRESENT_SAMPLES = 8 };
enum { MAX_RESYNC_SAMPLES_WITHOUT_PRESENT = 4 };
+ const char* const mName;
+
// mPeriod is the computed period of the modeled vsync events in
// nanoseconds.
nsecs_t mPeriod;
diff --git a/services/surfaceflinger/EventThread.cpp b/services/surfaceflinger/EventThread.cpp
index f760200..dd88adb 100644
--- a/services/surfaceflinger/EventThread.cpp
+++ b/services/surfaceflinger/EventThread.cpp
@@ -44,8 +44,9 @@
return;
}
-EventThread::EventThread(const sp<VSyncSource>& src)
+EventThread::EventThread(const sp<VSyncSource>& src, SurfaceFlinger& flinger)
: mVSyncSource(src),
+ mFlinger(flinger),
mUseSoftwareVSync(false),
mVsyncEnabled(false),
mDebugVsyncEnabled(false),
@@ -126,6 +127,9 @@
void EventThread::requestNextVsync(
const sp<EventThread::Connection>& connection) {
Mutex::Autolock _l(mLock);
+
+ mFlinger.resyncWithRateLimit();
+
if (connection->count < 0) {
connection->count = 0;
mCondition.broadcast();
diff --git a/services/surfaceflinger/EventThread.h b/services/surfaceflinger/EventThread.h
index 9ba179a..34654fa 100644
--- a/services/surfaceflinger/EventThread.h
+++ b/services/surfaceflinger/EventThread.h
@@ -77,7 +77,7 @@
public:
- EventThread(const sp<VSyncSource>& src);
+ EventThread(const sp<VSyncSource>& src, SurfaceFlinger& flinger);
sp<Connection> createEventConnection() const;
status_t registerDisplayEventConnection(const sp<Connection>& connection);
@@ -116,6 +116,7 @@
// constants
sp<VSyncSource> mVSyncSource;
PowerHAL mPowerHAL;
+ SurfaceFlinger& mFlinger;
mutable Mutex mLock;
mutable Condition mCondition;
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index ea9fe21..b6c86d3 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -149,6 +149,7 @@
mLastTransactionTime(0),
mBootFinished(false),
mForceFullDamage(false),
+ mPrimaryDispSync("PrimaryDispSync"),
mPrimaryHWVsyncEnabled(false),
mHWVsyncAvailable(false),
mDaltonize(false),
@@ -331,11 +332,12 @@
class DispSyncSource : public VSyncSource, private DispSync::Callback {
public:
DispSyncSource(DispSync* dispSync, nsecs_t phaseOffset, bool traceVsync,
- const char* label) :
+ const char* name) :
+ mName(name),
mValue(0),
mTraceVsync(traceVsync),
- mVsyncOnLabel(String8::format("VsyncOn-%s", label)),
- mVsyncEventLabel(String8::format("VSYNC-%s", label)),
+ mVsyncOnLabel(String8::format("VsyncOn-%s", name)),
+ mVsyncEventLabel(String8::format("VSYNC-%s", name)),
mDispSync(dispSync),
mCallbackMutex(),
mCallback(),
@@ -348,7 +350,7 @@
virtual void setVSyncEnabled(bool enable) {
Mutex::Autolock lock(mVsyncMutex);
if (enable) {
- status_t err = mDispSync->addEventListener(mPhaseOffset,
+ status_t err = mDispSync->addEventListener(mName, mPhaseOffset,
static_cast<DispSync::Callback*>(this));
if (err != NO_ERROR) {
ALOGE("error registering vsync callback: %s (%d)",
@@ -399,7 +401,7 @@
}
// Add a listener with the new offset
- err = mDispSync->addEventListener(mPhaseOffset,
+ err = mDispSync->addEventListener(mName, mPhaseOffset,
static_cast<DispSync::Callback*>(this));
if (err != NO_ERROR) {
ALOGE("error registering vsync callback: %s (%d)",
@@ -425,6 +427,8 @@
}
}
+ const char* const mName;
+
int mValue;
const bool mTraceVsync;
@@ -455,10 +459,10 @@
// start the EventThread
sp<VSyncSource> vsyncSrc = new DispSyncSource(&mPrimaryDispSync,
vsyncPhaseOffsetNs, true, "app");
- mEventThread = new EventThread(vsyncSrc);
+ mEventThread = new EventThread(vsyncSrc, *this);
sp<VSyncSource> sfVsyncSrc = new DispSyncSource(&mPrimaryDispSync,
sfVsyncPhaseOffsetNs, true, "sf");
- mSFEventThread = new EventThread(sfVsyncSrc);
+ mSFEventThread = new EventThread(sfVsyncSrc, *this);
mEventQueue.setEventThread(mSFEventThread);
// Get a RenderEngine for the given display / config (can't fail)
@@ -806,6 +810,13 @@
}
}
+void SurfaceFlinger::resyncWithRateLimit() {
+ static constexpr nsecs_t kIgnoreDelay = ms2ns(500);
+ if (systemTime() - mLastSwapTime > kIgnoreDelay) {
+ resyncToHardwareVsync(true);
+ }
+}
+
void SurfaceFlinger::onVSyncReceived(int32_t type, nsecs_t timestamp) {
bool needsHwVsync = false;
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 37110b9..41e42b7 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -407,8 +407,11 @@
* VSync
*/
void enableHardwareVsync();
- void disableHardwareVsync(bool makeUnavailable);
void resyncToHardwareVsync(bool makeAvailable);
+ void disableHardwareVsync(bool makeUnavailable);
+public:
+ void resyncWithRateLimit();
+private:
/* ------------------------------------------------------------------------
* Debugging & dumpsys
@@ -520,7 +523,7 @@
static const size_t NUM_BUCKETS = 8; // < 1-7, 7+
nsecs_t mFrameBuckets[NUM_BUCKETS];
nsecs_t mTotalTime;
- nsecs_t mLastSwapTime;
+ std::atomic<nsecs_t> mLastSwapTime;
};
}; // namespace android
diff --git a/services/surfaceflinger/SurfaceFlinger_hwc1.cpp b/services/surfaceflinger/SurfaceFlinger_hwc1.cpp
index a63ec50..ebf8a19 100644
--- a/services/surfaceflinger/SurfaceFlinger_hwc1.cpp
+++ b/services/surfaceflinger/SurfaceFlinger_hwc1.cpp
@@ -147,6 +147,7 @@
mLastTransactionTime(0),
mBootFinished(false),
mForceFullDamage(false),
+ mPrimaryDispSync("PrimaryDispSync"),
mPrimaryHWVsyncEnabled(false),
mHWVsyncAvailable(false),
mDaltonize(false),
@@ -328,11 +329,12 @@
class DispSyncSource : public VSyncSource, private DispSync::Callback {
public:
DispSyncSource(DispSync* dispSync, nsecs_t phaseOffset, bool traceVsync,
- const char* label) :
+ const char* name) :
+ mName(name),
mValue(0),
mTraceVsync(traceVsync),
- mVsyncOnLabel(String8::format("VsyncOn-%s", label)),
- mVsyncEventLabel(String8::format("VSYNC-%s", label)),
+ mVsyncOnLabel(String8::format("VsyncOn-%s", name)),
+ mVsyncEventLabel(String8::format("VSYNC-%s", name)),
mDispSync(dispSync),
mCallbackMutex(),
mCallback(),
@@ -345,7 +347,7 @@
virtual void setVSyncEnabled(bool enable) {
Mutex::Autolock lock(mVsyncMutex);
if (enable) {
- status_t err = mDispSync->addEventListener(mPhaseOffset,
+ status_t err = mDispSync->addEventListener(mName, mPhaseOffset,
static_cast<DispSync::Callback*>(this));
if (err != NO_ERROR) {
ALOGE("error registering vsync callback: %s (%d)",
@@ -396,7 +398,7 @@
}
// Add a listener with the new offset
- err = mDispSync->addEventListener(mPhaseOffset,
+ err = mDispSync->addEventListener(mName, mPhaseOffset,
static_cast<DispSync::Callback*>(this));
if (err != NO_ERROR) {
ALOGE("error registering vsync callback: %s (%d)",
@@ -422,6 +424,8 @@
}
}
+ const char* const mName;
+
int mValue;
const bool mTraceVsync;
@@ -451,10 +455,10 @@
// start the EventThread
sp<VSyncSource> vsyncSrc = new DispSyncSource(&mPrimaryDispSync,
vsyncPhaseOffsetNs, true, "app");
- mEventThread = new EventThread(vsyncSrc);
+ mEventThread = new EventThread(vsyncSrc, *this);
sp<VSyncSource> sfVsyncSrc = new DispSyncSource(&mPrimaryDispSync,
sfVsyncPhaseOffsetNs, true, "sf");
- mSFEventThread = new EventThread(sfVsyncSrc);
+ mSFEventThread = new EventThread(sfVsyncSrc, *this);
mEventQueue.setEventThread(mSFEventThread);
// Initialize the H/W composer object. There may or may not be an
@@ -839,6 +843,13 @@
}
}
+void SurfaceFlinger::resyncWithRateLimit() {
+ static constexpr nsecs_t kIgnoreDelay = ms2ns(500);
+ if (systemTime() - mLastSwapTime > kIgnoreDelay) {
+ resyncToHardwareVsync(true);
+ }
+}
+
void SurfaceFlinger::onVSyncReceived(int type, nsecs_t timestamp) {
bool needsHwVsync = false;