blob: 13af2c4d15e85649c629fd586ab12732cd2222d5 [file] [log] [blame]
John Reckcec24ae2013-11-05 13:27:50 -08001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
John Reckcec24ae2013-11-05 13:27:50 -080017#include "RenderThread.h"
18
Stan Iliev7bc3bc62017-05-24 13:28:36 -040019#include "hwui/Bitmap.h"
20#include "renderstate/RenderState.h"
21#include "renderthread/OpenGLPipeline.h"
22#include "pipeline/skia/SkiaOpenGLReadback.h"
23#include "pipeline/skia/SkiaOpenGLPipeline.h"
24#include "pipeline/skia/SkiaVulkanPipeline.h"
John Reck4f02bf42014-01-03 18:09:17 -080025#include "CanvasContext.h"
John Reck3b202512014-06-23 13:13:08 -070026#include "EglManager.h"
Derek Sollenbergerc4fbada2016-11-07 16:05:41 -050027#include "OpenGLReadback.h"
John Reck4f02bf42014-01-03 18:09:17 -080028#include "RenderProxy.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050029#include "VulkanManager.h"
John Reck12efa552016-11-15 10:22:01 -080030#include "utils/FatVector.h"
John Reckcec24ae2013-11-05 13:27:50 -080031
Chris Craik65fe5ee2015-01-26 18:06:29 -080032#include <gui/DisplayEventReceiver.h>
John Reckb36016c2015-03-11 08:50:53 -070033#include <gui/ISurfaceComposer.h>
34#include <gui/SurfaceComposerClient.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080035#include <sys/resource.h>
John Reckcba287b2015-11-10 12:52:44 -080036#include <utils/Condition.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080037#include <utils/Log.h>
John Reckcba287b2015-11-10 12:52:44 -080038#include <utils/Mutex.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080039
John Reckcec24ae2013-11-05 13:27:50 -080040namespace android {
John Reckcec24ae2013-11-05 13:27:50 -080041namespace uirenderer {
42namespace renderthread {
43
John Recke45b1fd2014-04-15 09:50:16 -070044// Number of events to read at a time from the DisplayEventReceiver pipe.
45// The value should be large enough that we can quickly drain the pipe
46// using just a few large reads.
47static const size_t EVENT_BUFFER_SIZE = 100;
48
49// Slight delay to give the UI time to push us a new frame before we replay
John Recka733f892014-12-19 11:37:21 -080050static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4);
John Recke45b1fd2014-04-15 09:50:16 -070051
Chris Craikd41c4d82015-01-05 15:51:13 -080052TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {}
John Reck4f02bf42014-01-03 18:09:17 -080053
54RenderTask* TaskQueue::next() {
55 RenderTask* ret = mHead;
56 if (ret) {
57 mHead = ret->mNext;
58 if (!mHead) {
Chris Craikd41c4d82015-01-05 15:51:13 -080059 mTail = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080060 }
Chris Craikd41c4d82015-01-05 15:51:13 -080061 ret->mNext = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080062 }
63 return ret;
64}
65
66RenderTask* TaskQueue::peek() {
67 return mHead;
68}
69
70void TaskQueue::queue(RenderTask* task) {
71 // Since the RenderTask itself forms the linked list it is not allowed
72 // to have the same task queued twice
73 LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!");
74 if (mTail) {
75 // Fast path if we can just append
76 if (mTail->mRunAt <= task->mRunAt) {
77 mTail->mNext = task;
78 mTail = task;
79 } else {
80 // Need to find the proper insertion point
Chris Craikd41c4d82015-01-05 15:51:13 -080081 RenderTask* previous = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080082 RenderTask* next = mHead;
83 while (next && next->mRunAt <= task->mRunAt) {
84 previous = next;
85 next = next->mNext;
86 }
87 if (!previous) {
88 task->mNext = mHead;
89 mHead = task;
90 } else {
91 previous->mNext = task;
92 if (next) {
93 task->mNext = next;
94 } else {
95 mTail = task;
96 }
97 }
98 }
99 } else {
100 mTail = mHead = task;
101 }
102}
103
John Recka5dda642014-05-22 15:43:54 -0700104void TaskQueue::queueAtFront(RenderTask* task) {
John Reck2f944482017-03-27 14:34:28 -0700105 LOG_ALWAYS_FATAL_IF(task->mNext || mHead == task, "Task is already in the queue!");
John Recka5dda642014-05-22 15:43:54 -0700106 if (mTail) {
107 task->mNext = mHead;
108 mHead = task;
109 } else {
110 mTail = mHead = task;
111 }
112}
113
John Reck4f02bf42014-01-03 18:09:17 -0800114void TaskQueue::remove(RenderTask* task) {
115 // TaskQueue is strict here to enforce that users are keeping track of
116 // their RenderTasks due to how their memory is managed
117 LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task,
118 "Cannot remove a task that isn't in the queue!");
119
120 // If task is the head we can just call next() to pop it off
121 // Otherwise we need to scan through to find the task before it
122 if (peek() == task) {
123 next();
124 } else {
125 RenderTask* previous = mHead;
126 while (previous->mNext != task) {
127 previous = previous->mNext;
128 }
129 previous->mNext = task->mNext;
130 if (mTail == task) {
131 mTail = previous;
132 }
133 }
134}
135
John Recke45b1fd2014-04-15 09:50:16 -0700136class DispatchFrameCallbacks : public RenderTask {
137private:
138 RenderThread* mRenderThread;
139public:
Chih-Hung Hsiehc6baf562016-04-27 11:29:23 -0700140 explicit DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {}
John Recke45b1fd2014-04-15 09:50:16 -0700141
Chris Craikd41c4d82015-01-05 15:51:13 -0800142 virtual void run() override {
John Recke45b1fd2014-04-15 09:50:16 -0700143 mRenderThread->dispatchFrameCallbacks();
144 }
145};
146
John Reck6b507802015-11-03 10:09:59 -0800147static bool gHasRenderThreadInstance = false;
148
149bool RenderThread::hasInstance() {
150 return gHasRenderThreadInstance;
151}
152
153RenderThread& RenderThread::getInstance() {
154 // This is a pointer because otherwise __cxa_finalize
155 // will try to delete it like a Good Citizen but that causes us to crash
156 // because we don't want to delete the RenderThread normally.
157 static RenderThread* sInstance = new RenderThread();
158 gHasRenderThreadInstance = true;
159 return *sInstance;
160}
161
162RenderThread::RenderThread() : Thread(true)
John Recke45b1fd2014-04-15 09:50:16 -0700163 , mNextWakeup(LLONG_MAX)
Chris Craikd41c4d82015-01-05 15:51:13 -0800164 , mDisplayEventReceiver(nullptr)
John Recke45b1fd2014-04-15 09:50:16 -0700165 , mVsyncRequested(false)
166 , mFrameCallbackTaskPending(false)
Chris Craikd41c4d82015-01-05 15:51:13 -0800167 , mFrameCallbackTask(nullptr)
168 , mRenderState(nullptr)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500169 , mEglManager(nullptr)
170 , mVkManager(nullptr) {
Chris Craik2507c342015-05-04 14:36:49 -0700171 Properties::load();
John Recke45b1fd2014-04-15 09:50:16 -0700172 mFrameCallbackTask = new DispatchFrameCallbacks(this);
John Reckcec24ae2013-11-05 13:27:50 -0800173 mLooper = new Looper(false);
174 run("RenderThread");
175}
176
177RenderThread::~RenderThread() {
John Reck3b202512014-06-23 13:13:08 -0700178 LOG_ALWAYS_FATAL("Can't destroy the render thread");
John Reckcec24ae2013-11-05 13:27:50 -0800179}
180
John Recke45b1fd2014-04-15 09:50:16 -0700181void RenderThread::initializeDisplayEventReceiver() {
182 LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?");
183 mDisplayEventReceiver = new DisplayEventReceiver();
184 status_t status = mDisplayEventReceiver->initCheck();
185 LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver "
186 "failed with status: %d", status);
187
188 // Register the FD
189 mLooper->addFd(mDisplayEventReceiver->getFd(), 0,
190 Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this);
191}
192
John Reck3b202512014-06-23 13:13:08 -0700193void RenderThread::initThreadLocals() {
John Reckb36016c2015-03-11 08:50:53 -0700194 sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay(
195 ISurfaceComposer::eDisplayIdMain));
196 status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo);
197 LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n");
198 nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps);
199 mTimeLord.setFrameInterval(frameIntervalNanos);
John Reck3b202512014-06-23 13:13:08 -0700200 initializeDisplayEventReceiver();
201 mEglManager = new EglManager(*this);
John Reck0e89e2b2014-10-31 14:49:06 -0700202 mRenderState = new RenderState(*this);
John Reck2d5b8d72016-07-28 15:36:11 -0700203 mJankTracker = new JankTracker(mDisplayInfo);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500204 mVkManager = new VulkanManager(*this);
Derek Sollenbergerf9e45d12017-06-01 13:07:39 -0400205 mCacheManager = new CacheManager(mDisplayInfo);
206}
207
208void RenderThread::dumpGraphicsMemory(int fd) {
209 jankTracker().dump(fd);
210
211 String8 cachesOutput;
212 String8 pipeline;
213 auto renderType = Properties::getRenderPipelineType();
214 switch (renderType) {
215 case RenderPipelineType::OpenGL: {
216 if (Caches::hasInstance()) {
217 cachesOutput.appendFormat("Caches:\n");
218 Caches::getInstance().dumpMemoryUsage(cachesOutput);
219 } else {
220 cachesOutput.appendFormat("No caches instance.");
221 }
222 pipeline.appendFormat("FrameBuilder");
223 break;
224 }
225 case RenderPipelineType::SkiaGL: {
226 mCacheManager->dumpMemoryUsage(cachesOutput, mRenderState);
227 pipeline.appendFormat("Skia (OpenGL)");
228 break;
229 }
230 case RenderPipelineType::SkiaVulkan: {
231 mCacheManager->dumpMemoryUsage(cachesOutput, mRenderState);
232 pipeline.appendFormat("Skia (Vulkan)");
233 break;
234 }
235 default:
236 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
237 break;
238 }
239
240 FILE *file = fdopen(fd, "a");
241 fprintf(file, "\n%s\n", cachesOutput.string());
242 fprintf(file, "\nPipeline=%s\n", pipeline.string());
243 fflush(file);
John Reck3b202512014-06-23 13:13:08 -0700244}
245
Derek Sollenbergerc4fbada2016-11-07 16:05:41 -0500246Readback& RenderThread::readback() {
247
248 if (!mReadback) {
249 auto renderType = Properties::getRenderPipelineType();
250 switch (renderType) {
251 case RenderPipelineType::OpenGL:
252 mReadback = new OpenGLReadbackImpl(*this);
253 break;
254 case RenderPipelineType::SkiaGL:
255 case RenderPipelineType::SkiaVulkan:
256 // It works to use the OpenGL pipeline for Vulkan but this is not
257 // ideal as it causes us to create an OpenGL context in addition
258 // to the Vulkan one.
259 mReadback = new skiapipeline::SkiaOpenGLReadback(*this);
260 break;
261 default:
262 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
263 break;
264 }
265 }
266
267 return *mReadback;
268}
269
Derek Sollenbergerf9e45d12017-06-01 13:07:39 -0400270void RenderThread::setGrContext(GrContext* context) {
271 mCacheManager->reset(context);
272 if (mGrContext.get()) {
273 mGrContext->releaseResourcesAndAbandonContext();
274 }
275 mGrContext.reset(context);
276}
277
John Recke45b1fd2014-04-15 09:50:16 -0700278int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) {
279 if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) {
280 ALOGE("Display event receiver pipe was closed or an error occurred. "
281 "events=0x%x", events);
282 return 0; // remove the callback
283 }
284
285 if (!(events & Looper::EVENT_INPUT)) {
286 ALOGW("Received spurious callback for unhandled poll event. "
287 "events=0x%x", events);
288 return 1; // keep the callback
289 }
290
291 reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue();
292
293 return 1; // keep the callback
294}
295
296static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) {
297 DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE];
298 nsecs_t latest = 0;
299 ssize_t n;
300 while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) {
301 for (ssize_t i = 0; i < n; i++) {
302 const DisplayEventReceiver::Event& ev = buf[i];
303 switch (ev.header.type) {
304 case DisplayEventReceiver::DISPLAY_EVENT_VSYNC:
305 latest = ev.header.timestamp;
306 break;
307 }
308 }
309 }
310 if (n < 0) {
311 ALOGW("Failed to get events from display event receiver, status=%d", status_t(n));
312 }
313 return latest;
314}
315
John Recka733f892014-12-19 11:37:21 -0800316void RenderThread::drainDisplayEventQueue() {
John Recka5dda642014-05-22 15:43:54 -0700317 ATRACE_CALL();
John Recke45b1fd2014-04-15 09:50:16 -0700318 nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver);
319 if (vsyncEvent > 0) {
320 mVsyncRequested = false;
John Recka733f892014-12-19 11:37:21 -0800321 if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) {
John Recka5dda642014-05-22 15:43:54 -0700322 ATRACE_NAME("queue mFrameCallbackTask");
John Recke45b1fd2014-04-15 09:50:16 -0700323 mFrameCallbackTaskPending = true;
John Recka733f892014-12-19 11:37:21 -0800324 nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY);
325 queueAt(mFrameCallbackTask, runAt);
John Recke45b1fd2014-04-15 09:50:16 -0700326 }
327 }
328}
329
330void RenderThread::dispatchFrameCallbacks() {
John Recka5dda642014-05-22 15:43:54 -0700331 ATRACE_CALL();
John Recke45b1fd2014-04-15 09:50:16 -0700332 mFrameCallbackTaskPending = false;
333
334 std::set<IFrameCallback*> callbacks;
335 mFrameCallbacks.swap(callbacks);
336
John Recka733f892014-12-19 11:37:21 -0800337 if (callbacks.size()) {
338 // Assume one of them will probably animate again so preemptively
339 // request the next vsync in case it occurs mid-frame
340 requestVsync();
341 for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) {
342 (*it)->doFrame();
343 }
John Recke45b1fd2014-04-15 09:50:16 -0700344 }
345}
346
John Recka5dda642014-05-22 15:43:54 -0700347void RenderThread::requestVsync() {
348 if (!mVsyncRequested) {
349 mVsyncRequested = true;
350 status_t status = mDisplayEventReceiver->requestNextVsync();
351 LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
352 "requestNextVsync failed with status: %d", status);
353 }
354}
355
John Reckcec24ae2013-11-05 13:27:50 -0800356bool RenderThread::threadLoop() {
John Reck21be43e2014-08-14 10:25:16 -0700357 setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY);
John Reck3b202512014-06-23 13:13:08 -0700358 initThreadLocals();
John Recke45b1fd2014-04-15 09:50:16 -0700359
John Reck4f02bf42014-01-03 18:09:17 -0800360 int timeoutMillis = -1;
John Reckcec24ae2013-11-05 13:27:50 -0800361 for (;;) {
John Recke45b1fd2014-04-15 09:50:16 -0700362 int result = mLooper->pollOnce(timeoutMillis);
John Reck4f02bf42014-01-03 18:09:17 -0800363 LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR,
364 "RenderThread Looper POLL_ERROR!");
365
366 nsecs_t nextWakeup;
John Reck12efa552016-11-15 10:22:01 -0800367 {
368 FatVector<RenderTask*, 10> workQueue;
369 // Process our queue, if we have anything. By first acquiring
370 // all the pending events then processing them we avoid vsync
371 // starvation if more tasks are queued while we are processing tasks.
372 while (RenderTask* task = nextTask(&nextWakeup)) {
373 workQueue.push_back(task);
374 }
375 for (auto task : workQueue) {
376 task->run();
377 // task may have deleted itself, do not reference it again
378 }
John Reck4f02bf42014-01-03 18:09:17 -0800379 }
380 if (nextWakeup == LLONG_MAX) {
381 timeoutMillis = -1;
382 } else {
John Recka6260b82014-01-29 18:31:51 -0800383 nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC);
384 timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos);
John Reck4f02bf42014-01-03 18:09:17 -0800385 if (timeoutMillis < 0) {
386 timeoutMillis = 0;
387 }
John Reckcec24ae2013-11-05 13:27:50 -0800388 }
John Recka5dda642014-05-22 15:43:54 -0700389
390 if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) {
John Recka733f892014-12-19 11:37:21 -0800391 drainDisplayEventQueue();
John Recka5dda642014-05-22 15:43:54 -0700392 mFrameCallbacks.insert(
393 mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end());
394 mPendingRegistrationFrameCallbacks.clear();
395 requestVsync();
396 }
John Recka22c9b22015-01-14 10:40:15 -0800397
398 if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) {
399 // TODO: Clean this up. This is working around an issue where a combination
400 // of bad timing and slow drawing can result in dropping a stale vsync
401 // on the floor (correct!) but fails to schedule to listen for the
402 // next vsync (oops), so none of the callbacks are run.
403 requestVsync();
404 }
John Reckcec24ae2013-11-05 13:27:50 -0800405 }
406
407 return false;
408}
409
410void RenderThread::queue(RenderTask* task) {
411 AutoMutex _lock(mLock);
John Reck4f02bf42014-01-03 18:09:17 -0800412 mQueue.queue(task);
413 if (mNextWakeup && task->mRunAt < mNextWakeup) {
414 mNextWakeup = 0;
John Reckcec24ae2013-11-05 13:27:50 -0800415 mLooper->wake();
416 }
417}
418
Chris Craik0a24b142015-10-19 17:10:19 -0700419void RenderThread::queueAndWait(RenderTask* task) {
John Reckcba287b2015-11-10 12:52:44 -0800420 // These need to be local to the thread to avoid the Condition
421 // signaling the wrong thread. The easiest way to achieve that is to just
422 // make this on the stack, although that has a slight cost to it
423 Mutex mutex;
424 Condition condition;
425 SignalingRenderTask syncTask(task, &mutex, &condition);
426
427 AutoMutex _lock(mutex);
Chris Craik0a24b142015-10-19 17:10:19 -0700428 queue(&syncTask);
Tom Cherry298a1462017-02-28 14:07:09 -0800429 while (!syncTask.hasRun()) {
430 condition.wait(mutex);
431 }
Chris Craik0a24b142015-10-19 17:10:19 -0700432}
433
John Recka5dda642014-05-22 15:43:54 -0700434void RenderThread::queueAtFront(RenderTask* task) {
435 AutoMutex _lock(mLock);
436 mQueue.queueAtFront(task);
437 mLooper->wake();
438}
439
John Recka733f892014-12-19 11:37:21 -0800440void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) {
441 task->mRunAt = runAtNs;
John Reck4f02bf42014-01-03 18:09:17 -0800442 queue(task);
443}
444
445void RenderThread::remove(RenderTask* task) {
John Reckcec24ae2013-11-05 13:27:50 -0800446 AutoMutex _lock(mLock);
John Reck4f02bf42014-01-03 18:09:17 -0800447 mQueue.remove(task);
448}
449
John Recke45b1fd2014-04-15 09:50:16 -0700450void RenderThread::postFrameCallback(IFrameCallback* callback) {
John Recka5dda642014-05-22 15:43:54 -0700451 mPendingRegistrationFrameCallbacks.insert(callback);
John Recke45b1fd2014-04-15 09:50:16 -0700452}
453
John Reck01a5ea32014-12-03 13:01:07 -0800454bool RenderThread::removeFrameCallback(IFrameCallback* callback) {
455 size_t erased;
456 erased = mFrameCallbacks.erase(callback);
457 erased |= mPendingRegistrationFrameCallbacks.erase(callback);
458 return erased;
John Recka5dda642014-05-22 15:43:54 -0700459}
460
461void RenderThread::pushBackFrameCallback(IFrameCallback* callback) {
462 if (mFrameCallbacks.erase(callback)) {
463 mPendingRegistrationFrameCallbacks.insert(callback);
464 }
John Recke45b1fd2014-04-15 09:50:16 -0700465}
466
John Reck4f02bf42014-01-03 18:09:17 -0800467RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) {
468 AutoMutex _lock(mLock);
469 RenderTask* next = mQueue.peek();
470 if (!next) {
471 mNextWakeup = LLONG_MAX;
472 } else {
John Recka5dda642014-05-22 15:43:54 -0700473 mNextWakeup = next->mRunAt;
John Reck4f02bf42014-01-03 18:09:17 -0800474 // Most tasks won't be delayed, so avoid unnecessary systemTime() calls
475 if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) {
476 next = mQueue.next();
John Recka5dda642014-05-22 15:43:54 -0700477 } else {
Chris Craikd41c4d82015-01-05 15:51:13 -0800478 next = nullptr;
John Reckcec24ae2013-11-05 13:27:50 -0800479 }
John Reckcec24ae2013-11-05 13:27:50 -0800480 }
John Reck4f02bf42014-01-03 18:09:17 -0800481 if (nextWakeup) {
482 *nextWakeup = mNextWakeup;
483 }
484 return next;
John Reckcec24ae2013-11-05 13:27:50 -0800485}
486
Stan Iliev7bc3bc62017-05-24 13:28:36 -0400487sk_sp<Bitmap> RenderThread::allocateHardwareBitmap(SkBitmap& skBitmap) {
488 auto renderType = Properties::getRenderPipelineType();
489 switch (renderType) {
490 case RenderPipelineType::OpenGL:
491 return OpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
492 case RenderPipelineType::SkiaGL:
493 return skiapipeline::SkiaOpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
494 case RenderPipelineType::SkiaVulkan:
495 return skiapipeline::SkiaVulkanPipeline::allocateHardwareBitmap(*this, skBitmap);
496 default:
497 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
498 break;
499 }
500 return nullptr;
501}
502
John Reckcec24ae2013-11-05 13:27:50 -0800503} /* namespace renderthread */
504} /* namespace uirenderer */
505} /* namespace android */