blob: b62c2188a6666da0f5d3a4e677e3c78fd7d5b940 [file] [log] [blame]
John Reckcec24ae2013-11-05 13:27:50 -08001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
John Reckcec24ae2013-11-05 13:27:50 -080017#include "RenderThread.h"
18
Chris Craik65fe5ee2015-01-26 18:06:29 -080019#include "../renderstate/RenderState.h"
John Reck4f02bf42014-01-03 18:09:17 -080020#include "CanvasContext.h"
John Reck3b202512014-06-23 13:13:08 -070021#include "EglManager.h"
John Reck4f02bf42014-01-03 18:09:17 -080022#include "RenderProxy.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050023#include "VulkanManager.h"
John Reck12efa552016-11-15 10:22:01 -080024#include "utils/FatVector.h"
John Reckcec24ae2013-11-05 13:27:50 -080025
Chris Craik65fe5ee2015-01-26 18:06:29 -080026#include <gui/DisplayEventReceiver.h>
John Reckb36016c2015-03-11 08:50:53 -070027#include <gui/ISurfaceComposer.h>
28#include <gui/SurfaceComposerClient.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080029#include <sys/resource.h>
John Reckcba287b2015-11-10 12:52:44 -080030#include <utils/Condition.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080031#include <utils/Log.h>
John Reckcba287b2015-11-10 12:52:44 -080032#include <utils/Mutex.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080033
John Reckcec24ae2013-11-05 13:27:50 -080034namespace android {
John Reckcec24ae2013-11-05 13:27:50 -080035namespace uirenderer {
36namespace renderthread {
37
John Recke45b1fd2014-04-15 09:50:16 -070038// Number of events to read at a time from the DisplayEventReceiver pipe.
39// The value should be large enough that we can quickly drain the pipe
40// using just a few large reads.
41static const size_t EVENT_BUFFER_SIZE = 100;
42
43// Slight delay to give the UI time to push us a new frame before we replay
John Recka733f892014-12-19 11:37:21 -080044static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4);
John Recke45b1fd2014-04-15 09:50:16 -070045
Chris Craikd41c4d82015-01-05 15:51:13 -080046TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {}
John Reck4f02bf42014-01-03 18:09:17 -080047
48RenderTask* TaskQueue::next() {
49 RenderTask* ret = mHead;
50 if (ret) {
51 mHead = ret->mNext;
52 if (!mHead) {
Chris Craikd41c4d82015-01-05 15:51:13 -080053 mTail = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080054 }
Chris Craikd41c4d82015-01-05 15:51:13 -080055 ret->mNext = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080056 }
57 return ret;
58}
59
60RenderTask* TaskQueue::peek() {
61 return mHead;
62}
63
64void TaskQueue::queue(RenderTask* task) {
65 // Since the RenderTask itself forms the linked list it is not allowed
66 // to have the same task queued twice
67 LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!");
68 if (mTail) {
69 // Fast path if we can just append
70 if (mTail->mRunAt <= task->mRunAt) {
71 mTail->mNext = task;
72 mTail = task;
73 } else {
74 // Need to find the proper insertion point
Chris Craikd41c4d82015-01-05 15:51:13 -080075 RenderTask* previous = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080076 RenderTask* next = mHead;
77 while (next && next->mRunAt <= task->mRunAt) {
78 previous = next;
79 next = next->mNext;
80 }
81 if (!previous) {
82 task->mNext = mHead;
83 mHead = task;
84 } else {
85 previous->mNext = task;
86 if (next) {
87 task->mNext = next;
88 } else {
89 mTail = task;
90 }
91 }
92 }
93 } else {
94 mTail = mHead = task;
95 }
96}
97
John Recka5dda642014-05-22 15:43:54 -070098void TaskQueue::queueAtFront(RenderTask* task) {
99 if (mTail) {
100 task->mNext = mHead;
101 mHead = task;
102 } else {
103 mTail = mHead = task;
104 }
105}
106
John Reck4f02bf42014-01-03 18:09:17 -0800107void TaskQueue::remove(RenderTask* task) {
108 // TaskQueue is strict here to enforce that users are keeping track of
109 // their RenderTasks due to how their memory is managed
110 LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task,
111 "Cannot remove a task that isn't in the queue!");
112
113 // If task is the head we can just call next() to pop it off
114 // Otherwise we need to scan through to find the task before it
115 if (peek() == task) {
116 next();
117 } else {
118 RenderTask* previous = mHead;
119 while (previous->mNext != task) {
120 previous = previous->mNext;
121 }
122 previous->mNext = task->mNext;
123 if (mTail == task) {
124 mTail = previous;
125 }
126 }
127}
128
John Recke45b1fd2014-04-15 09:50:16 -0700129class DispatchFrameCallbacks : public RenderTask {
130private:
131 RenderThread* mRenderThread;
132public:
Chih-Hung Hsiehc6baf562016-04-27 11:29:23 -0700133 explicit DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {}
John Recke45b1fd2014-04-15 09:50:16 -0700134
Chris Craikd41c4d82015-01-05 15:51:13 -0800135 virtual void run() override {
John Recke45b1fd2014-04-15 09:50:16 -0700136 mRenderThread->dispatchFrameCallbacks();
137 }
138};
139
John Reck6b507802015-11-03 10:09:59 -0800140static bool gHasRenderThreadInstance = false;
141
142bool RenderThread::hasInstance() {
143 return gHasRenderThreadInstance;
144}
145
146RenderThread& RenderThread::getInstance() {
147 // This is a pointer because otherwise __cxa_finalize
148 // will try to delete it like a Good Citizen but that causes us to crash
149 // because we don't want to delete the RenderThread normally.
150 static RenderThread* sInstance = new RenderThread();
151 gHasRenderThreadInstance = true;
152 return *sInstance;
153}
154
155RenderThread::RenderThread() : Thread(true)
John Recke45b1fd2014-04-15 09:50:16 -0700156 , mNextWakeup(LLONG_MAX)
Chris Craikd41c4d82015-01-05 15:51:13 -0800157 , mDisplayEventReceiver(nullptr)
John Recke45b1fd2014-04-15 09:50:16 -0700158 , mVsyncRequested(false)
159 , mFrameCallbackTaskPending(false)
Chris Craikd41c4d82015-01-05 15:51:13 -0800160 , mFrameCallbackTask(nullptr)
161 , mRenderState(nullptr)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500162 , mEglManager(nullptr)
163 , mVkManager(nullptr) {
Chris Craik2507c342015-05-04 14:36:49 -0700164 Properties::load();
John Recke45b1fd2014-04-15 09:50:16 -0700165 mFrameCallbackTask = new DispatchFrameCallbacks(this);
John Reckcec24ae2013-11-05 13:27:50 -0800166 mLooper = new Looper(false);
167 run("RenderThread");
168}
169
170RenderThread::~RenderThread() {
John Reck3b202512014-06-23 13:13:08 -0700171 LOG_ALWAYS_FATAL("Can't destroy the render thread");
John Reckcec24ae2013-11-05 13:27:50 -0800172}
173
John Recke45b1fd2014-04-15 09:50:16 -0700174void RenderThread::initializeDisplayEventReceiver() {
175 LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?");
176 mDisplayEventReceiver = new DisplayEventReceiver();
177 status_t status = mDisplayEventReceiver->initCheck();
178 LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver "
179 "failed with status: %d", status);
180
181 // Register the FD
182 mLooper->addFd(mDisplayEventReceiver->getFd(), 0,
183 Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this);
184}
185
John Reck3b202512014-06-23 13:13:08 -0700186void RenderThread::initThreadLocals() {
John Reckb36016c2015-03-11 08:50:53 -0700187 sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay(
188 ISurfaceComposer::eDisplayIdMain));
189 status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo);
190 LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n");
191 nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps);
192 mTimeLord.setFrameInterval(frameIntervalNanos);
John Reck3b202512014-06-23 13:13:08 -0700193 initializeDisplayEventReceiver();
194 mEglManager = new EglManager(*this);
John Reck0e89e2b2014-10-31 14:49:06 -0700195 mRenderState = new RenderState(*this);
John Reck2d5b8d72016-07-28 15:36:11 -0700196 mJankTracker = new JankTracker(mDisplayInfo);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500197 mVkManager = new VulkanManager(*this);
John Reck3b202512014-06-23 13:13:08 -0700198}
199
John Recke45b1fd2014-04-15 09:50:16 -0700200int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) {
201 if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) {
202 ALOGE("Display event receiver pipe was closed or an error occurred. "
203 "events=0x%x", events);
204 return 0; // remove the callback
205 }
206
207 if (!(events & Looper::EVENT_INPUT)) {
208 ALOGW("Received spurious callback for unhandled poll event. "
209 "events=0x%x", events);
210 return 1; // keep the callback
211 }
212
213 reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue();
214
215 return 1; // keep the callback
216}
217
218static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) {
219 DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE];
220 nsecs_t latest = 0;
221 ssize_t n;
222 while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) {
223 for (ssize_t i = 0; i < n; i++) {
224 const DisplayEventReceiver::Event& ev = buf[i];
225 switch (ev.header.type) {
226 case DisplayEventReceiver::DISPLAY_EVENT_VSYNC:
227 latest = ev.header.timestamp;
228 break;
229 }
230 }
231 }
232 if (n < 0) {
233 ALOGW("Failed to get events from display event receiver, status=%d", status_t(n));
234 }
235 return latest;
236}
237
John Recka733f892014-12-19 11:37:21 -0800238void RenderThread::drainDisplayEventQueue() {
John Recka5dda642014-05-22 15:43:54 -0700239 ATRACE_CALL();
John Recke45b1fd2014-04-15 09:50:16 -0700240 nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver);
241 if (vsyncEvent > 0) {
242 mVsyncRequested = false;
John Recka733f892014-12-19 11:37:21 -0800243 if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) {
John Recka5dda642014-05-22 15:43:54 -0700244 ATRACE_NAME("queue mFrameCallbackTask");
John Recke45b1fd2014-04-15 09:50:16 -0700245 mFrameCallbackTaskPending = true;
John Recka733f892014-12-19 11:37:21 -0800246 nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY);
247 queueAt(mFrameCallbackTask, runAt);
John Recke45b1fd2014-04-15 09:50:16 -0700248 }
249 }
250}
251
252void RenderThread::dispatchFrameCallbacks() {
John Recka5dda642014-05-22 15:43:54 -0700253 ATRACE_CALL();
John Recke45b1fd2014-04-15 09:50:16 -0700254 mFrameCallbackTaskPending = false;
255
256 std::set<IFrameCallback*> callbacks;
257 mFrameCallbacks.swap(callbacks);
258
John Recka733f892014-12-19 11:37:21 -0800259 if (callbacks.size()) {
260 // Assume one of them will probably animate again so preemptively
261 // request the next vsync in case it occurs mid-frame
262 requestVsync();
263 for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) {
264 (*it)->doFrame();
265 }
John Recke45b1fd2014-04-15 09:50:16 -0700266 }
267}
268
John Recka5dda642014-05-22 15:43:54 -0700269void RenderThread::requestVsync() {
270 if (!mVsyncRequested) {
271 mVsyncRequested = true;
272 status_t status = mDisplayEventReceiver->requestNextVsync();
273 LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
274 "requestNextVsync failed with status: %d", status);
275 }
276}
277
John Reckcec24ae2013-11-05 13:27:50 -0800278bool RenderThread::threadLoop() {
John Reck21be43e2014-08-14 10:25:16 -0700279 setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY);
John Reck3b202512014-06-23 13:13:08 -0700280 initThreadLocals();
John Recke45b1fd2014-04-15 09:50:16 -0700281
John Reck4f02bf42014-01-03 18:09:17 -0800282 int timeoutMillis = -1;
John Reckcec24ae2013-11-05 13:27:50 -0800283 for (;;) {
John Recke45b1fd2014-04-15 09:50:16 -0700284 int result = mLooper->pollOnce(timeoutMillis);
John Reck4f02bf42014-01-03 18:09:17 -0800285 LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR,
286 "RenderThread Looper POLL_ERROR!");
287
288 nsecs_t nextWakeup;
John Reck12efa552016-11-15 10:22:01 -0800289 {
290 FatVector<RenderTask*, 10> workQueue;
291 // Process our queue, if we have anything. By first acquiring
292 // all the pending events then processing them we avoid vsync
293 // starvation if more tasks are queued while we are processing tasks.
294 while (RenderTask* task = nextTask(&nextWakeup)) {
295 workQueue.push_back(task);
296 }
297 for (auto task : workQueue) {
298 task->run();
299 // task may have deleted itself, do not reference it again
300 }
John Reck4f02bf42014-01-03 18:09:17 -0800301 }
302 if (nextWakeup == LLONG_MAX) {
303 timeoutMillis = -1;
304 } else {
John Recka6260b82014-01-29 18:31:51 -0800305 nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC);
306 timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos);
John Reck4f02bf42014-01-03 18:09:17 -0800307 if (timeoutMillis < 0) {
308 timeoutMillis = 0;
309 }
John Reckcec24ae2013-11-05 13:27:50 -0800310 }
John Recka5dda642014-05-22 15:43:54 -0700311
312 if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) {
John Recka733f892014-12-19 11:37:21 -0800313 drainDisplayEventQueue();
John Recka5dda642014-05-22 15:43:54 -0700314 mFrameCallbacks.insert(
315 mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end());
316 mPendingRegistrationFrameCallbacks.clear();
317 requestVsync();
318 }
John Recka22c9b22015-01-14 10:40:15 -0800319
320 if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) {
321 // TODO: Clean this up. This is working around an issue where a combination
322 // of bad timing and slow drawing can result in dropping a stale vsync
323 // on the floor (correct!) but fails to schedule to listen for the
324 // next vsync (oops), so none of the callbacks are run.
325 requestVsync();
326 }
John Reckcec24ae2013-11-05 13:27:50 -0800327 }
328
329 return false;
330}
331
332void RenderThread::queue(RenderTask* task) {
333 AutoMutex _lock(mLock);
John Reck4f02bf42014-01-03 18:09:17 -0800334 mQueue.queue(task);
335 if (mNextWakeup && task->mRunAt < mNextWakeup) {
336 mNextWakeup = 0;
John Reckcec24ae2013-11-05 13:27:50 -0800337 mLooper->wake();
338 }
339}
340
Chris Craik0a24b142015-10-19 17:10:19 -0700341void RenderThread::queueAndWait(RenderTask* task) {
John Reckcba287b2015-11-10 12:52:44 -0800342 // These need to be local to the thread to avoid the Condition
343 // signaling the wrong thread. The easiest way to achieve that is to just
344 // make this on the stack, although that has a slight cost to it
345 Mutex mutex;
346 Condition condition;
347 SignalingRenderTask syncTask(task, &mutex, &condition);
348
349 AutoMutex _lock(mutex);
Chris Craik0a24b142015-10-19 17:10:19 -0700350 queue(&syncTask);
John Reckcba287b2015-11-10 12:52:44 -0800351 condition.wait(mutex);
Chris Craik0a24b142015-10-19 17:10:19 -0700352}
353
John Recka5dda642014-05-22 15:43:54 -0700354void RenderThread::queueAtFront(RenderTask* task) {
355 AutoMutex _lock(mLock);
356 mQueue.queueAtFront(task);
357 mLooper->wake();
358}
359
John Recka733f892014-12-19 11:37:21 -0800360void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) {
361 task->mRunAt = runAtNs;
John Reck4f02bf42014-01-03 18:09:17 -0800362 queue(task);
363}
364
365void RenderThread::remove(RenderTask* task) {
John Reckcec24ae2013-11-05 13:27:50 -0800366 AutoMutex _lock(mLock);
John Reck4f02bf42014-01-03 18:09:17 -0800367 mQueue.remove(task);
368}
369
John Recke45b1fd2014-04-15 09:50:16 -0700370void RenderThread::postFrameCallback(IFrameCallback* callback) {
John Recka5dda642014-05-22 15:43:54 -0700371 mPendingRegistrationFrameCallbacks.insert(callback);
John Recke45b1fd2014-04-15 09:50:16 -0700372}
373
John Reck01a5ea32014-12-03 13:01:07 -0800374bool RenderThread::removeFrameCallback(IFrameCallback* callback) {
375 size_t erased;
376 erased = mFrameCallbacks.erase(callback);
377 erased |= mPendingRegistrationFrameCallbacks.erase(callback);
378 return erased;
John Recka5dda642014-05-22 15:43:54 -0700379}
380
381void RenderThread::pushBackFrameCallback(IFrameCallback* callback) {
382 if (mFrameCallbacks.erase(callback)) {
383 mPendingRegistrationFrameCallbacks.insert(callback);
384 }
John Recke45b1fd2014-04-15 09:50:16 -0700385}
386
John Reck4f02bf42014-01-03 18:09:17 -0800387RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) {
388 AutoMutex _lock(mLock);
389 RenderTask* next = mQueue.peek();
390 if (!next) {
391 mNextWakeup = LLONG_MAX;
392 } else {
John Recka5dda642014-05-22 15:43:54 -0700393 mNextWakeup = next->mRunAt;
John Reck4f02bf42014-01-03 18:09:17 -0800394 // Most tasks won't be delayed, so avoid unnecessary systemTime() calls
395 if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) {
396 next = mQueue.next();
John Recka5dda642014-05-22 15:43:54 -0700397 } else {
Chris Craikd41c4d82015-01-05 15:51:13 -0800398 next = nullptr;
John Reckcec24ae2013-11-05 13:27:50 -0800399 }
John Reckcec24ae2013-11-05 13:27:50 -0800400 }
John Reck4f02bf42014-01-03 18:09:17 -0800401 if (nextWakeup) {
402 *nextWakeup = mNextWakeup;
403 }
404 return next;
John Reckcec24ae2013-11-05 13:27:50 -0800405}
406
John Reckcec24ae2013-11-05 13:27:50 -0800407} /* namespace renderthread */
408} /* namespace uirenderer */
409} /* namespace android */