John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 17 | #include "RenderThread.h" |
| 18 | |
Chris Craik | 65fe5ee | 2015-01-26 18:06:29 -0800 | [diff] [blame] | 19 | #include "../renderstate/RenderState.h" |
Derek Sollenberger | c4fbada | 2016-11-07 16:05:41 -0500 | [diff] [blame] | 20 | #include "../pipeline/skia/SkiaOpenGLReadback.h" |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 21 | #include "CanvasContext.h" |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 22 | #include "EglManager.h" |
Derek Sollenberger | c4fbada | 2016-11-07 16:05:41 -0500 | [diff] [blame] | 23 | #include "OpenGLReadback.h" |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 24 | #include "RenderProxy.h" |
Derek Sollenberger | 0e3cba3 | 2016-11-09 11:58:36 -0500 | [diff] [blame] | 25 | #include "VulkanManager.h" |
John Reck | 12efa55 | 2016-11-15 10:22:01 -0800 | [diff] [blame] | 26 | #include "utils/FatVector.h" |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 27 | |
Chris Craik | 65fe5ee | 2015-01-26 18:06:29 -0800 | [diff] [blame] | 28 | #include <gui/DisplayEventReceiver.h> |
John Reck | b36016c | 2015-03-11 08:50:53 -0700 | [diff] [blame] | 29 | #include <gui/ISurfaceComposer.h> |
| 30 | #include <gui/SurfaceComposerClient.h> |
Chris Craik | 65fe5ee | 2015-01-26 18:06:29 -0800 | [diff] [blame] | 31 | #include <sys/resource.h> |
John Reck | cba287b | 2015-11-10 12:52:44 -0800 | [diff] [blame] | 32 | #include <utils/Condition.h> |
Chris Craik | 65fe5ee | 2015-01-26 18:06:29 -0800 | [diff] [blame] | 33 | #include <utils/Log.h> |
John Reck | cba287b | 2015-11-10 12:52:44 -0800 | [diff] [blame] | 34 | #include <utils/Mutex.h> |
Chris Craik | 65fe5ee | 2015-01-26 18:06:29 -0800 | [diff] [blame] | 35 | |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 36 | namespace android { |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 37 | namespace uirenderer { |
| 38 | namespace renderthread { |
| 39 | |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 40 | // Number of events to read at a time from the DisplayEventReceiver pipe. |
| 41 | // The value should be large enough that we can quickly drain the pipe |
| 42 | // using just a few large reads. |
| 43 | static const size_t EVENT_BUFFER_SIZE = 100; |
| 44 | |
| 45 | // Slight delay to give the UI time to push us a new frame before we replay |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 46 | static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 47 | |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 48 | TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {} |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 49 | |
| 50 | RenderTask* TaskQueue::next() { |
| 51 | RenderTask* ret = mHead; |
| 52 | if (ret) { |
| 53 | mHead = ret->mNext; |
| 54 | if (!mHead) { |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 55 | mTail = nullptr; |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 56 | } |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 57 | ret->mNext = nullptr; |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 58 | } |
| 59 | return ret; |
| 60 | } |
| 61 | |
| 62 | RenderTask* TaskQueue::peek() { |
| 63 | return mHead; |
| 64 | } |
| 65 | |
| 66 | void TaskQueue::queue(RenderTask* task) { |
| 67 | // Since the RenderTask itself forms the linked list it is not allowed |
| 68 | // to have the same task queued twice |
| 69 | LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!"); |
| 70 | if (mTail) { |
| 71 | // Fast path if we can just append |
| 72 | if (mTail->mRunAt <= task->mRunAt) { |
| 73 | mTail->mNext = task; |
| 74 | mTail = task; |
| 75 | } else { |
| 76 | // Need to find the proper insertion point |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 77 | RenderTask* previous = nullptr; |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 78 | RenderTask* next = mHead; |
| 79 | while (next && next->mRunAt <= task->mRunAt) { |
| 80 | previous = next; |
| 81 | next = next->mNext; |
| 82 | } |
| 83 | if (!previous) { |
| 84 | task->mNext = mHead; |
| 85 | mHead = task; |
| 86 | } else { |
| 87 | previous->mNext = task; |
| 88 | if (next) { |
| 89 | task->mNext = next; |
| 90 | } else { |
| 91 | mTail = task; |
| 92 | } |
| 93 | } |
| 94 | } |
| 95 | } else { |
| 96 | mTail = mHead = task; |
| 97 | } |
| 98 | } |
| 99 | |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 100 | void TaskQueue::queueAtFront(RenderTask* task) { |
| 101 | if (mTail) { |
| 102 | task->mNext = mHead; |
| 103 | mHead = task; |
| 104 | } else { |
| 105 | mTail = mHead = task; |
| 106 | } |
| 107 | } |
| 108 | |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 109 | void TaskQueue::remove(RenderTask* task) { |
| 110 | // TaskQueue is strict here to enforce that users are keeping track of |
| 111 | // their RenderTasks due to how their memory is managed |
| 112 | LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task, |
| 113 | "Cannot remove a task that isn't in the queue!"); |
| 114 | |
| 115 | // If task is the head we can just call next() to pop it off |
| 116 | // Otherwise we need to scan through to find the task before it |
| 117 | if (peek() == task) { |
| 118 | next(); |
| 119 | } else { |
| 120 | RenderTask* previous = mHead; |
| 121 | while (previous->mNext != task) { |
| 122 | previous = previous->mNext; |
| 123 | } |
| 124 | previous->mNext = task->mNext; |
| 125 | if (mTail == task) { |
| 126 | mTail = previous; |
| 127 | } |
| 128 | } |
| 129 | } |
| 130 | |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 131 | class DispatchFrameCallbacks : public RenderTask { |
| 132 | private: |
| 133 | RenderThread* mRenderThread; |
| 134 | public: |
Chih-Hung Hsieh | c6baf56 | 2016-04-27 11:29:23 -0700 | [diff] [blame] | 135 | explicit DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {} |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 136 | |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 137 | virtual void run() override { |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 138 | mRenderThread->dispatchFrameCallbacks(); |
| 139 | } |
| 140 | }; |
| 141 | |
John Reck | 6b50780 | 2015-11-03 10:09:59 -0800 | [diff] [blame] | 142 | static bool gHasRenderThreadInstance = false; |
| 143 | |
| 144 | bool RenderThread::hasInstance() { |
| 145 | return gHasRenderThreadInstance; |
| 146 | } |
| 147 | |
| 148 | RenderThread& RenderThread::getInstance() { |
| 149 | // This is a pointer because otherwise __cxa_finalize |
| 150 | // will try to delete it like a Good Citizen but that causes us to crash |
| 151 | // because we don't want to delete the RenderThread normally. |
| 152 | static RenderThread* sInstance = new RenderThread(); |
| 153 | gHasRenderThreadInstance = true; |
| 154 | return *sInstance; |
| 155 | } |
| 156 | |
| 157 | RenderThread::RenderThread() : Thread(true) |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 158 | , mNextWakeup(LLONG_MAX) |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 159 | , mDisplayEventReceiver(nullptr) |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 160 | , mVsyncRequested(false) |
| 161 | , mFrameCallbackTaskPending(false) |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 162 | , mFrameCallbackTask(nullptr) |
| 163 | , mRenderState(nullptr) |
Derek Sollenberger | 0e3cba3 | 2016-11-09 11:58:36 -0500 | [diff] [blame] | 164 | , mEglManager(nullptr) |
| 165 | , mVkManager(nullptr) { |
Chris Craik | 2507c34 | 2015-05-04 14:36:49 -0700 | [diff] [blame] | 166 | Properties::load(); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 167 | mFrameCallbackTask = new DispatchFrameCallbacks(this); |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 168 | mLooper = new Looper(false); |
| 169 | run("RenderThread"); |
| 170 | } |
| 171 | |
| 172 | RenderThread::~RenderThread() { |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 173 | LOG_ALWAYS_FATAL("Can't destroy the render thread"); |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 174 | } |
| 175 | |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 176 | void RenderThread::initializeDisplayEventReceiver() { |
| 177 | LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?"); |
| 178 | mDisplayEventReceiver = new DisplayEventReceiver(); |
| 179 | status_t status = mDisplayEventReceiver->initCheck(); |
| 180 | LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver " |
| 181 | "failed with status: %d", status); |
| 182 | |
| 183 | // Register the FD |
| 184 | mLooper->addFd(mDisplayEventReceiver->getFd(), 0, |
| 185 | Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this); |
| 186 | } |
| 187 | |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 188 | void RenderThread::initThreadLocals() { |
John Reck | b36016c | 2015-03-11 08:50:53 -0700 | [diff] [blame] | 189 | sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay( |
| 190 | ISurfaceComposer::eDisplayIdMain)); |
| 191 | status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo); |
| 192 | LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n"); |
| 193 | nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps); |
| 194 | mTimeLord.setFrameInterval(frameIntervalNanos); |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 195 | initializeDisplayEventReceiver(); |
| 196 | mEglManager = new EglManager(*this); |
John Reck | 0e89e2b | 2014-10-31 14:49:06 -0700 | [diff] [blame] | 197 | mRenderState = new RenderState(*this); |
John Reck | 2d5b8d7 | 2016-07-28 15:36:11 -0700 | [diff] [blame] | 198 | mJankTracker = new JankTracker(mDisplayInfo); |
Derek Sollenberger | 0e3cba3 | 2016-11-09 11:58:36 -0500 | [diff] [blame] | 199 | mVkManager = new VulkanManager(*this); |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 200 | } |
| 201 | |
Derek Sollenberger | c4fbada | 2016-11-07 16:05:41 -0500 | [diff] [blame] | 202 | Readback& RenderThread::readback() { |
| 203 | |
| 204 | if (!mReadback) { |
| 205 | auto renderType = Properties::getRenderPipelineType(); |
| 206 | switch (renderType) { |
| 207 | case RenderPipelineType::OpenGL: |
| 208 | mReadback = new OpenGLReadbackImpl(*this); |
| 209 | break; |
| 210 | case RenderPipelineType::SkiaGL: |
| 211 | case RenderPipelineType::SkiaVulkan: |
| 212 | // It works to use the OpenGL pipeline for Vulkan but this is not |
| 213 | // ideal as it causes us to create an OpenGL context in addition |
| 214 | // to the Vulkan one. |
| 215 | mReadback = new skiapipeline::SkiaOpenGLReadback(*this); |
| 216 | break; |
| 217 | default: |
| 218 | LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType); |
| 219 | break; |
| 220 | } |
| 221 | } |
| 222 | |
| 223 | return *mReadback; |
| 224 | } |
| 225 | |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 226 | int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) { |
| 227 | if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) { |
| 228 | ALOGE("Display event receiver pipe was closed or an error occurred. " |
| 229 | "events=0x%x", events); |
| 230 | return 0; // remove the callback |
| 231 | } |
| 232 | |
| 233 | if (!(events & Looper::EVENT_INPUT)) { |
| 234 | ALOGW("Received spurious callback for unhandled poll event. " |
| 235 | "events=0x%x", events); |
| 236 | return 1; // keep the callback |
| 237 | } |
| 238 | |
| 239 | reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue(); |
| 240 | |
| 241 | return 1; // keep the callback |
| 242 | } |
| 243 | |
| 244 | static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) { |
| 245 | DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE]; |
| 246 | nsecs_t latest = 0; |
| 247 | ssize_t n; |
| 248 | while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) { |
| 249 | for (ssize_t i = 0; i < n; i++) { |
| 250 | const DisplayEventReceiver::Event& ev = buf[i]; |
| 251 | switch (ev.header.type) { |
| 252 | case DisplayEventReceiver::DISPLAY_EVENT_VSYNC: |
| 253 | latest = ev.header.timestamp; |
| 254 | break; |
| 255 | } |
| 256 | } |
| 257 | } |
| 258 | if (n < 0) { |
| 259 | ALOGW("Failed to get events from display event receiver, status=%d", status_t(n)); |
| 260 | } |
| 261 | return latest; |
| 262 | } |
| 263 | |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 264 | void RenderThread::drainDisplayEventQueue() { |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 265 | ATRACE_CALL(); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 266 | nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver); |
| 267 | if (vsyncEvent > 0) { |
| 268 | mVsyncRequested = false; |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 269 | if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) { |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 270 | ATRACE_NAME("queue mFrameCallbackTask"); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 271 | mFrameCallbackTaskPending = true; |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 272 | nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY); |
| 273 | queueAt(mFrameCallbackTask, runAt); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 274 | } |
| 275 | } |
| 276 | } |
| 277 | |
| 278 | void RenderThread::dispatchFrameCallbacks() { |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 279 | ATRACE_CALL(); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 280 | mFrameCallbackTaskPending = false; |
| 281 | |
| 282 | std::set<IFrameCallback*> callbacks; |
| 283 | mFrameCallbacks.swap(callbacks); |
| 284 | |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 285 | if (callbacks.size()) { |
| 286 | // Assume one of them will probably animate again so preemptively |
| 287 | // request the next vsync in case it occurs mid-frame |
| 288 | requestVsync(); |
| 289 | for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) { |
| 290 | (*it)->doFrame(); |
| 291 | } |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 292 | } |
| 293 | } |
| 294 | |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 295 | void RenderThread::requestVsync() { |
| 296 | if (!mVsyncRequested) { |
| 297 | mVsyncRequested = true; |
| 298 | status_t status = mDisplayEventReceiver->requestNextVsync(); |
| 299 | LOG_ALWAYS_FATAL_IF(status != NO_ERROR, |
| 300 | "requestNextVsync failed with status: %d", status); |
| 301 | } |
| 302 | } |
| 303 | |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 304 | bool RenderThread::threadLoop() { |
John Reck | 21be43e | 2014-08-14 10:25:16 -0700 | [diff] [blame] | 305 | setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY); |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 306 | initThreadLocals(); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 307 | |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 308 | int timeoutMillis = -1; |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 309 | for (;;) { |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 310 | int result = mLooper->pollOnce(timeoutMillis); |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 311 | LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR, |
| 312 | "RenderThread Looper POLL_ERROR!"); |
| 313 | |
| 314 | nsecs_t nextWakeup; |
John Reck | 12efa55 | 2016-11-15 10:22:01 -0800 | [diff] [blame] | 315 | { |
| 316 | FatVector<RenderTask*, 10> workQueue; |
| 317 | // Process our queue, if we have anything. By first acquiring |
| 318 | // all the pending events then processing them we avoid vsync |
| 319 | // starvation if more tasks are queued while we are processing tasks. |
| 320 | while (RenderTask* task = nextTask(&nextWakeup)) { |
| 321 | workQueue.push_back(task); |
| 322 | } |
| 323 | for (auto task : workQueue) { |
| 324 | task->run(); |
| 325 | // task may have deleted itself, do not reference it again |
| 326 | } |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 327 | } |
| 328 | if (nextWakeup == LLONG_MAX) { |
| 329 | timeoutMillis = -1; |
| 330 | } else { |
John Reck | a6260b8 | 2014-01-29 18:31:51 -0800 | [diff] [blame] | 331 | nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC); |
| 332 | timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos); |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 333 | if (timeoutMillis < 0) { |
| 334 | timeoutMillis = 0; |
| 335 | } |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 336 | } |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 337 | |
| 338 | if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) { |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 339 | drainDisplayEventQueue(); |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 340 | mFrameCallbacks.insert( |
| 341 | mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end()); |
| 342 | mPendingRegistrationFrameCallbacks.clear(); |
| 343 | requestVsync(); |
| 344 | } |
John Reck | a22c9b2 | 2015-01-14 10:40:15 -0800 | [diff] [blame] | 345 | |
| 346 | if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) { |
| 347 | // TODO: Clean this up. This is working around an issue where a combination |
| 348 | // of bad timing and slow drawing can result in dropping a stale vsync |
| 349 | // on the floor (correct!) but fails to schedule to listen for the |
| 350 | // next vsync (oops), so none of the callbacks are run. |
| 351 | requestVsync(); |
| 352 | } |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | return false; |
| 356 | } |
| 357 | |
| 358 | void RenderThread::queue(RenderTask* task) { |
| 359 | AutoMutex _lock(mLock); |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 360 | mQueue.queue(task); |
| 361 | if (mNextWakeup && task->mRunAt < mNextWakeup) { |
| 362 | mNextWakeup = 0; |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 363 | mLooper->wake(); |
| 364 | } |
| 365 | } |
| 366 | |
Chris Craik | 0a24b14 | 2015-10-19 17:10:19 -0700 | [diff] [blame] | 367 | void RenderThread::queueAndWait(RenderTask* task) { |
John Reck | cba287b | 2015-11-10 12:52:44 -0800 | [diff] [blame] | 368 | // These need to be local to the thread to avoid the Condition |
| 369 | // signaling the wrong thread. The easiest way to achieve that is to just |
| 370 | // make this on the stack, although that has a slight cost to it |
| 371 | Mutex mutex; |
| 372 | Condition condition; |
| 373 | SignalingRenderTask syncTask(task, &mutex, &condition); |
| 374 | |
| 375 | AutoMutex _lock(mutex); |
Chris Craik | 0a24b14 | 2015-10-19 17:10:19 -0700 | [diff] [blame] | 376 | queue(&syncTask); |
John Reck | cba287b | 2015-11-10 12:52:44 -0800 | [diff] [blame] | 377 | condition.wait(mutex); |
Chris Craik | 0a24b14 | 2015-10-19 17:10:19 -0700 | [diff] [blame] | 378 | } |
| 379 | |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 380 | void RenderThread::queueAtFront(RenderTask* task) { |
| 381 | AutoMutex _lock(mLock); |
| 382 | mQueue.queueAtFront(task); |
| 383 | mLooper->wake(); |
| 384 | } |
| 385 | |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 386 | void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) { |
| 387 | task->mRunAt = runAtNs; |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 388 | queue(task); |
| 389 | } |
| 390 | |
| 391 | void RenderThread::remove(RenderTask* task) { |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 392 | AutoMutex _lock(mLock); |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 393 | mQueue.remove(task); |
| 394 | } |
| 395 | |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 396 | void RenderThread::postFrameCallback(IFrameCallback* callback) { |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 397 | mPendingRegistrationFrameCallbacks.insert(callback); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 398 | } |
| 399 | |
John Reck | 01a5ea3 | 2014-12-03 13:01:07 -0800 | [diff] [blame] | 400 | bool RenderThread::removeFrameCallback(IFrameCallback* callback) { |
| 401 | size_t erased; |
| 402 | erased = mFrameCallbacks.erase(callback); |
| 403 | erased |= mPendingRegistrationFrameCallbacks.erase(callback); |
| 404 | return erased; |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 405 | } |
| 406 | |
| 407 | void RenderThread::pushBackFrameCallback(IFrameCallback* callback) { |
| 408 | if (mFrameCallbacks.erase(callback)) { |
| 409 | mPendingRegistrationFrameCallbacks.insert(callback); |
| 410 | } |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 411 | } |
| 412 | |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 413 | RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) { |
| 414 | AutoMutex _lock(mLock); |
| 415 | RenderTask* next = mQueue.peek(); |
| 416 | if (!next) { |
| 417 | mNextWakeup = LLONG_MAX; |
| 418 | } else { |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 419 | mNextWakeup = next->mRunAt; |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 420 | // Most tasks won't be delayed, so avoid unnecessary systemTime() calls |
| 421 | if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) { |
| 422 | next = mQueue.next(); |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 423 | } else { |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 424 | next = nullptr; |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 425 | } |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 426 | } |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 427 | if (nextWakeup) { |
| 428 | *nextWakeup = mNextWakeup; |
| 429 | } |
| 430 | return next; |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 431 | } |
| 432 | |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 433 | } /* namespace renderthread */ |
| 434 | } /* namespace uirenderer */ |
| 435 | } /* namespace android */ |