blob: bd34eb80dee84871134aa0a4f74adcfeda48f398 [file] [log] [blame]
John Reckcec24ae2013-11-05 13:27:50 -08001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
John Reckcec24ae2013-11-05 13:27:50 -080017#include "RenderThread.h"
18
Stan Iliev7bc3bc62017-05-24 13:28:36 -040019#include "hwui/Bitmap.h"
20#include "renderstate/RenderState.h"
21#include "renderthread/OpenGLPipeline.h"
22#include "pipeline/skia/SkiaOpenGLReadback.h"
23#include "pipeline/skia/SkiaOpenGLPipeline.h"
24#include "pipeline/skia/SkiaVulkanPipeline.h"
John Reck4f02bf42014-01-03 18:09:17 -080025#include "CanvasContext.h"
John Reck3b202512014-06-23 13:13:08 -070026#include "EglManager.h"
Derek Sollenbergerc4fbada2016-11-07 16:05:41 -050027#include "OpenGLReadback.h"
John Reck4f02bf42014-01-03 18:09:17 -080028#include "RenderProxy.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050029#include "VulkanManager.h"
John Reck12efa552016-11-15 10:22:01 -080030#include "utils/FatVector.h"
John Reckcec24ae2013-11-05 13:27:50 -080031
Chris Craik65fe5ee2015-01-26 18:06:29 -080032#include <gui/DisplayEventReceiver.h>
John Reckb36016c2015-03-11 08:50:53 -070033#include <gui/ISurfaceComposer.h>
34#include <gui/SurfaceComposerClient.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080035#include <sys/resource.h>
John Reckcba287b2015-11-10 12:52:44 -080036#include <utils/Condition.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080037#include <utils/Log.h>
John Reckcba287b2015-11-10 12:52:44 -080038#include <utils/Mutex.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080039
John Reckcec24ae2013-11-05 13:27:50 -080040namespace android {
John Reckcec24ae2013-11-05 13:27:50 -080041namespace uirenderer {
42namespace renderthread {
43
John Recke45b1fd2014-04-15 09:50:16 -070044// Number of events to read at a time from the DisplayEventReceiver pipe.
45// The value should be large enough that we can quickly drain the pipe
46// using just a few large reads.
47static const size_t EVENT_BUFFER_SIZE = 100;
48
49// Slight delay to give the UI time to push us a new frame before we replay
John Recka733f892014-12-19 11:37:21 -080050static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4);
John Recke45b1fd2014-04-15 09:50:16 -070051
Chris Craikd41c4d82015-01-05 15:51:13 -080052TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {}
John Reck4f02bf42014-01-03 18:09:17 -080053
54RenderTask* TaskQueue::next() {
55 RenderTask* ret = mHead;
56 if (ret) {
57 mHead = ret->mNext;
58 if (!mHead) {
Chris Craikd41c4d82015-01-05 15:51:13 -080059 mTail = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080060 }
Chris Craikd41c4d82015-01-05 15:51:13 -080061 ret->mNext = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080062 }
63 return ret;
64}
65
66RenderTask* TaskQueue::peek() {
67 return mHead;
68}
69
70void TaskQueue::queue(RenderTask* task) {
71 // Since the RenderTask itself forms the linked list it is not allowed
72 // to have the same task queued twice
73 LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!");
74 if (mTail) {
75 // Fast path if we can just append
76 if (mTail->mRunAt <= task->mRunAt) {
77 mTail->mNext = task;
78 mTail = task;
79 } else {
80 // Need to find the proper insertion point
Chris Craikd41c4d82015-01-05 15:51:13 -080081 RenderTask* previous = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080082 RenderTask* next = mHead;
83 while (next && next->mRunAt <= task->mRunAt) {
84 previous = next;
85 next = next->mNext;
86 }
87 if (!previous) {
88 task->mNext = mHead;
89 mHead = task;
90 } else {
91 previous->mNext = task;
92 if (next) {
93 task->mNext = next;
94 } else {
95 mTail = task;
96 }
97 }
98 }
99 } else {
100 mTail = mHead = task;
101 }
102}
103
John Recka5dda642014-05-22 15:43:54 -0700104void TaskQueue::queueAtFront(RenderTask* task) {
John Reck2f944482017-03-27 14:34:28 -0700105 LOG_ALWAYS_FATAL_IF(task->mNext || mHead == task, "Task is already in the queue!");
John Recka5dda642014-05-22 15:43:54 -0700106 if (mTail) {
107 task->mNext = mHead;
108 mHead = task;
109 } else {
110 mTail = mHead = task;
111 }
112}
113
John Reck4f02bf42014-01-03 18:09:17 -0800114void TaskQueue::remove(RenderTask* task) {
115 // TaskQueue is strict here to enforce that users are keeping track of
116 // their RenderTasks due to how their memory is managed
117 LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task,
118 "Cannot remove a task that isn't in the queue!");
119
120 // If task is the head we can just call next() to pop it off
121 // Otherwise we need to scan through to find the task before it
122 if (peek() == task) {
123 next();
124 } else {
125 RenderTask* previous = mHead;
126 while (previous->mNext != task) {
127 previous = previous->mNext;
128 }
129 previous->mNext = task->mNext;
130 if (mTail == task) {
131 mTail = previous;
132 }
133 }
134}
135
John Recke45b1fd2014-04-15 09:50:16 -0700136class DispatchFrameCallbacks : public RenderTask {
137private:
138 RenderThread* mRenderThread;
139public:
Chih-Hung Hsiehc6baf562016-04-27 11:29:23 -0700140 explicit DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {}
John Recke45b1fd2014-04-15 09:50:16 -0700141
Chris Craikd41c4d82015-01-05 15:51:13 -0800142 virtual void run() override {
John Recke45b1fd2014-04-15 09:50:16 -0700143 mRenderThread->dispatchFrameCallbacks();
144 }
145};
146
John Reck6b507802015-11-03 10:09:59 -0800147static bool gHasRenderThreadInstance = false;
148
149bool RenderThread::hasInstance() {
150 return gHasRenderThreadInstance;
151}
152
153RenderThread& RenderThread::getInstance() {
154 // This is a pointer because otherwise __cxa_finalize
155 // will try to delete it like a Good Citizen but that causes us to crash
156 // because we don't want to delete the RenderThread normally.
157 static RenderThread* sInstance = new RenderThread();
158 gHasRenderThreadInstance = true;
159 return *sInstance;
160}
161
162RenderThread::RenderThread() : Thread(true)
John Recke45b1fd2014-04-15 09:50:16 -0700163 , mNextWakeup(LLONG_MAX)
Chris Craikd41c4d82015-01-05 15:51:13 -0800164 , mDisplayEventReceiver(nullptr)
John Recke45b1fd2014-04-15 09:50:16 -0700165 , mVsyncRequested(false)
166 , mFrameCallbackTaskPending(false)
Chris Craikd41c4d82015-01-05 15:51:13 -0800167 , mFrameCallbackTask(nullptr)
168 , mRenderState(nullptr)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500169 , mEglManager(nullptr)
170 , mVkManager(nullptr) {
Chris Craik2507c342015-05-04 14:36:49 -0700171 Properties::load();
John Recke45b1fd2014-04-15 09:50:16 -0700172 mFrameCallbackTask = new DispatchFrameCallbacks(this);
John Reckcec24ae2013-11-05 13:27:50 -0800173 mLooper = new Looper(false);
174 run("RenderThread");
175}
176
177RenderThread::~RenderThread() {
John Reck3b202512014-06-23 13:13:08 -0700178 LOG_ALWAYS_FATAL("Can't destroy the render thread");
John Reckcec24ae2013-11-05 13:27:50 -0800179}
180
John Recke45b1fd2014-04-15 09:50:16 -0700181void RenderThread::initializeDisplayEventReceiver() {
182 LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?");
183 mDisplayEventReceiver = new DisplayEventReceiver();
184 status_t status = mDisplayEventReceiver->initCheck();
185 LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver "
186 "failed with status: %d", status);
187
188 // Register the FD
189 mLooper->addFd(mDisplayEventReceiver->getFd(), 0,
190 Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this);
191}
192
John Reck3b202512014-06-23 13:13:08 -0700193void RenderThread::initThreadLocals() {
John Reckb36016c2015-03-11 08:50:53 -0700194 sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay(
195 ISurfaceComposer::eDisplayIdMain));
196 status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo);
197 LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n");
198 nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps);
199 mTimeLord.setFrameInterval(frameIntervalNanos);
John Reck3b202512014-06-23 13:13:08 -0700200 initializeDisplayEventReceiver();
201 mEglManager = new EglManager(*this);
John Reck0e89e2b2014-10-31 14:49:06 -0700202 mRenderState = new RenderState(*this);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500203 mVkManager = new VulkanManager(*this);
Derek Sollenbergerf9e45d12017-06-01 13:07:39 -0400204 mCacheManager = new CacheManager(mDisplayInfo);
205}
206
207void RenderThread::dumpGraphicsMemory(int fd) {
John Reck34781b22017-07-05 16:39:36 -0700208 globalProfileData()->dump(fd);
Derek Sollenbergerf9e45d12017-06-01 13:07:39 -0400209
210 String8 cachesOutput;
211 String8 pipeline;
212 auto renderType = Properties::getRenderPipelineType();
213 switch (renderType) {
214 case RenderPipelineType::OpenGL: {
215 if (Caches::hasInstance()) {
216 cachesOutput.appendFormat("Caches:\n");
217 Caches::getInstance().dumpMemoryUsage(cachesOutput);
218 } else {
219 cachesOutput.appendFormat("No caches instance.");
220 }
221 pipeline.appendFormat("FrameBuilder");
222 break;
223 }
224 case RenderPipelineType::SkiaGL: {
225 mCacheManager->dumpMemoryUsage(cachesOutput, mRenderState);
226 pipeline.appendFormat("Skia (OpenGL)");
227 break;
228 }
229 case RenderPipelineType::SkiaVulkan: {
230 mCacheManager->dumpMemoryUsage(cachesOutput, mRenderState);
231 pipeline.appendFormat("Skia (Vulkan)");
232 break;
233 }
234 default:
235 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
236 break;
237 }
238
Josh Gao1c300862018-06-26 11:41:34 -0700239 dprintf(fd, "\n%s\n", cachesOutput.string());
240 dprintf(fd, "\nPipeline=%s\n", pipeline.string());
John Reck3b202512014-06-23 13:13:08 -0700241}
242
Derek Sollenbergerc4fbada2016-11-07 16:05:41 -0500243Readback& RenderThread::readback() {
244
245 if (!mReadback) {
246 auto renderType = Properties::getRenderPipelineType();
247 switch (renderType) {
248 case RenderPipelineType::OpenGL:
249 mReadback = new OpenGLReadbackImpl(*this);
250 break;
251 case RenderPipelineType::SkiaGL:
252 case RenderPipelineType::SkiaVulkan:
253 // It works to use the OpenGL pipeline for Vulkan but this is not
254 // ideal as it causes us to create an OpenGL context in addition
255 // to the Vulkan one.
256 mReadback = new skiapipeline::SkiaOpenGLReadback(*this);
257 break;
258 default:
259 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
260 break;
261 }
262 }
263
264 return *mReadback;
265}
266
Derek Sollenbergerf9e45d12017-06-01 13:07:39 -0400267void RenderThread::setGrContext(GrContext* context) {
268 mCacheManager->reset(context);
269 if (mGrContext.get()) {
270 mGrContext->releaseResourcesAndAbandonContext();
271 }
272 mGrContext.reset(context);
273}
274
John Recke45b1fd2014-04-15 09:50:16 -0700275int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) {
276 if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) {
277 ALOGE("Display event receiver pipe was closed or an error occurred. "
278 "events=0x%x", events);
279 return 0; // remove the callback
280 }
281
282 if (!(events & Looper::EVENT_INPUT)) {
283 ALOGW("Received spurious callback for unhandled poll event. "
284 "events=0x%x", events);
285 return 1; // keep the callback
286 }
287
288 reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue();
289
290 return 1; // keep the callback
291}
292
293static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) {
294 DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE];
295 nsecs_t latest = 0;
296 ssize_t n;
297 while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) {
298 for (ssize_t i = 0; i < n; i++) {
299 const DisplayEventReceiver::Event& ev = buf[i];
300 switch (ev.header.type) {
301 case DisplayEventReceiver::DISPLAY_EVENT_VSYNC:
302 latest = ev.header.timestamp;
303 break;
304 }
305 }
306 }
307 if (n < 0) {
308 ALOGW("Failed to get events from display event receiver, status=%d", status_t(n));
309 }
310 return latest;
311}
312
John Recka733f892014-12-19 11:37:21 -0800313void RenderThread::drainDisplayEventQueue() {
John Recka5dda642014-05-22 15:43:54 -0700314 ATRACE_CALL();
John Recke45b1fd2014-04-15 09:50:16 -0700315 nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver);
316 if (vsyncEvent > 0) {
317 mVsyncRequested = false;
John Recka733f892014-12-19 11:37:21 -0800318 if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) {
John Recka5dda642014-05-22 15:43:54 -0700319 ATRACE_NAME("queue mFrameCallbackTask");
John Recke45b1fd2014-04-15 09:50:16 -0700320 mFrameCallbackTaskPending = true;
John Recka733f892014-12-19 11:37:21 -0800321 nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY);
322 queueAt(mFrameCallbackTask, runAt);
John Recke45b1fd2014-04-15 09:50:16 -0700323 }
324 }
325}
326
327void RenderThread::dispatchFrameCallbacks() {
John Recka5dda642014-05-22 15:43:54 -0700328 ATRACE_CALL();
John Recke45b1fd2014-04-15 09:50:16 -0700329 mFrameCallbackTaskPending = false;
330
331 std::set<IFrameCallback*> callbacks;
332 mFrameCallbacks.swap(callbacks);
333
John Recka733f892014-12-19 11:37:21 -0800334 if (callbacks.size()) {
335 // Assume one of them will probably animate again so preemptively
336 // request the next vsync in case it occurs mid-frame
337 requestVsync();
338 for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) {
339 (*it)->doFrame();
340 }
John Recke45b1fd2014-04-15 09:50:16 -0700341 }
342}
343
John Recka5dda642014-05-22 15:43:54 -0700344void RenderThread::requestVsync() {
345 if (!mVsyncRequested) {
346 mVsyncRequested = true;
347 status_t status = mDisplayEventReceiver->requestNextVsync();
348 LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
349 "requestNextVsync failed with status: %d", status);
350 }
351}
352
John Reckcec24ae2013-11-05 13:27:50 -0800353bool RenderThread::threadLoop() {
John Reck21be43e2014-08-14 10:25:16 -0700354 setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY);
John Reck3b202512014-06-23 13:13:08 -0700355 initThreadLocals();
John Recke45b1fd2014-04-15 09:50:16 -0700356
John Reck4f02bf42014-01-03 18:09:17 -0800357 int timeoutMillis = -1;
John Reckcec24ae2013-11-05 13:27:50 -0800358 for (;;) {
John Recke45b1fd2014-04-15 09:50:16 -0700359 int result = mLooper->pollOnce(timeoutMillis);
John Reck4f02bf42014-01-03 18:09:17 -0800360 LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR,
361 "RenderThread Looper POLL_ERROR!");
362
363 nsecs_t nextWakeup;
John Reck12efa552016-11-15 10:22:01 -0800364 {
365 FatVector<RenderTask*, 10> workQueue;
366 // Process our queue, if we have anything. By first acquiring
367 // all the pending events then processing them we avoid vsync
368 // starvation if more tasks are queued while we are processing tasks.
369 while (RenderTask* task = nextTask(&nextWakeup)) {
370 workQueue.push_back(task);
371 }
372 for (auto task : workQueue) {
373 task->run();
374 // task may have deleted itself, do not reference it again
375 }
John Reck4f02bf42014-01-03 18:09:17 -0800376 }
377 if (nextWakeup == LLONG_MAX) {
378 timeoutMillis = -1;
379 } else {
John Recka6260b82014-01-29 18:31:51 -0800380 nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC);
381 timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos);
John Reck4f02bf42014-01-03 18:09:17 -0800382 if (timeoutMillis < 0) {
383 timeoutMillis = 0;
384 }
John Reckcec24ae2013-11-05 13:27:50 -0800385 }
John Recka5dda642014-05-22 15:43:54 -0700386
387 if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) {
John Recka733f892014-12-19 11:37:21 -0800388 drainDisplayEventQueue();
John Recka5dda642014-05-22 15:43:54 -0700389 mFrameCallbacks.insert(
390 mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end());
391 mPendingRegistrationFrameCallbacks.clear();
392 requestVsync();
393 }
John Recka22c9b22015-01-14 10:40:15 -0800394
395 if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) {
396 // TODO: Clean this up. This is working around an issue where a combination
397 // of bad timing and slow drawing can result in dropping a stale vsync
398 // on the floor (correct!) but fails to schedule to listen for the
399 // next vsync (oops), so none of the callbacks are run.
400 requestVsync();
401 }
John Reckcec24ae2013-11-05 13:27:50 -0800402 }
403
404 return false;
405}
406
407void RenderThread::queue(RenderTask* task) {
408 AutoMutex _lock(mLock);
John Reck4f02bf42014-01-03 18:09:17 -0800409 mQueue.queue(task);
410 if (mNextWakeup && task->mRunAt < mNextWakeup) {
411 mNextWakeup = 0;
John Reckcec24ae2013-11-05 13:27:50 -0800412 mLooper->wake();
413 }
414}
415
Chris Craik0a24b142015-10-19 17:10:19 -0700416void RenderThread::queueAndWait(RenderTask* task) {
John Reckcba287b2015-11-10 12:52:44 -0800417 // These need to be local to the thread to avoid the Condition
418 // signaling the wrong thread. The easiest way to achieve that is to just
419 // make this on the stack, although that has a slight cost to it
420 Mutex mutex;
421 Condition condition;
422 SignalingRenderTask syncTask(task, &mutex, &condition);
423
424 AutoMutex _lock(mutex);
Chris Craik0a24b142015-10-19 17:10:19 -0700425 queue(&syncTask);
Tom Cherry298a1462017-02-28 14:07:09 -0800426 while (!syncTask.hasRun()) {
427 condition.wait(mutex);
428 }
Chris Craik0a24b142015-10-19 17:10:19 -0700429}
430
John Recka5dda642014-05-22 15:43:54 -0700431void RenderThread::queueAtFront(RenderTask* task) {
432 AutoMutex _lock(mLock);
433 mQueue.queueAtFront(task);
434 mLooper->wake();
435}
436
John Recka733f892014-12-19 11:37:21 -0800437void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) {
438 task->mRunAt = runAtNs;
John Reck4f02bf42014-01-03 18:09:17 -0800439 queue(task);
440}
441
442void RenderThread::remove(RenderTask* task) {
John Reckcec24ae2013-11-05 13:27:50 -0800443 AutoMutex _lock(mLock);
John Reck4f02bf42014-01-03 18:09:17 -0800444 mQueue.remove(task);
445}
446
John Recke45b1fd2014-04-15 09:50:16 -0700447void RenderThread::postFrameCallback(IFrameCallback* callback) {
John Recka5dda642014-05-22 15:43:54 -0700448 mPendingRegistrationFrameCallbacks.insert(callback);
John Recke45b1fd2014-04-15 09:50:16 -0700449}
450
John Reck01a5ea32014-12-03 13:01:07 -0800451bool RenderThread::removeFrameCallback(IFrameCallback* callback) {
452 size_t erased;
453 erased = mFrameCallbacks.erase(callback);
454 erased |= mPendingRegistrationFrameCallbacks.erase(callback);
455 return erased;
John Recka5dda642014-05-22 15:43:54 -0700456}
457
458void RenderThread::pushBackFrameCallback(IFrameCallback* callback) {
459 if (mFrameCallbacks.erase(callback)) {
460 mPendingRegistrationFrameCallbacks.insert(callback);
461 }
John Recke45b1fd2014-04-15 09:50:16 -0700462}
463
John Reck4f02bf42014-01-03 18:09:17 -0800464RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) {
465 AutoMutex _lock(mLock);
466 RenderTask* next = mQueue.peek();
467 if (!next) {
468 mNextWakeup = LLONG_MAX;
469 } else {
John Recka5dda642014-05-22 15:43:54 -0700470 mNextWakeup = next->mRunAt;
John Reck4f02bf42014-01-03 18:09:17 -0800471 // Most tasks won't be delayed, so avoid unnecessary systemTime() calls
472 if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) {
473 next = mQueue.next();
John Recka5dda642014-05-22 15:43:54 -0700474 } else {
Chris Craikd41c4d82015-01-05 15:51:13 -0800475 next = nullptr;
John Reckcec24ae2013-11-05 13:27:50 -0800476 }
John Reckcec24ae2013-11-05 13:27:50 -0800477 }
John Reck4f02bf42014-01-03 18:09:17 -0800478 if (nextWakeup) {
479 *nextWakeup = mNextWakeup;
480 }
481 return next;
John Reckcec24ae2013-11-05 13:27:50 -0800482}
483
Stan Iliev7bc3bc62017-05-24 13:28:36 -0400484sk_sp<Bitmap> RenderThread::allocateHardwareBitmap(SkBitmap& skBitmap) {
485 auto renderType = Properties::getRenderPipelineType();
486 switch (renderType) {
487 case RenderPipelineType::OpenGL:
488 return OpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
489 case RenderPipelineType::SkiaGL:
490 return skiapipeline::SkiaOpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
491 case RenderPipelineType::SkiaVulkan:
492 return skiapipeline::SkiaVulkanPipeline::allocateHardwareBitmap(*this, skBitmap);
493 default:
494 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
495 break;
496 }
497 return nullptr;
498}
499
John Reckcec24ae2013-11-05 13:27:50 -0800500} /* namespace renderthread */
501} /* namespace uirenderer */
502} /* namespace android */