blob: 285a1a5f454001cb67ef720a4ada2e021386bd8e [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Greg Daniel26e0dca2018-09-18 10:33:19 -040019#include <private/gui/SyncFeatures.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Greg Daniel2ff202712018-06-14 11:50:10 -040037#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
38#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
39#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050040
John Reck1bcacfd2017-11-03 10:12:19 -070041VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050042
43void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050044 mRenderThread.setGrContext(nullptr);
45
Greg Daniel26e0dca2018-09-18 10:33:19 -040046 // We don't need to explicitly free the command buffer since it automatically gets freed when we
47 // delete the VkCommandPool below.
48 mDummyCB = VK_NULL_HANDLE;
49
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040051 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050052 mCommandPool = VK_NULL_HANDLE;
53 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050054
Greg Daniel2ff202712018-06-14 11:50:10 -040055 if (mDevice != VK_NULL_HANDLE) {
56 mDeviceWaitIdle(mDevice);
57 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070058 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
Greg Daniel2ff202712018-06-14 11:50:10 -040060 if (mInstance != VK_NULL_HANDLE) {
61 mDestroyInstance(mInstance, nullptr);
62 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050063
Greg Daniel2ff202712018-06-14 11:50:10 -040064 mGraphicsQueue = VK_NULL_HANDLE;
65 mPresentQueue = VK_NULL_HANDLE;
66 mDevice = VK_NULL_HANDLE;
67 mPhysicalDevice = VK_NULL_HANDLE;
68 mInstance = VK_NULL_HANDLE;
69}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050070
Greg Daniela227dbb2018-08-20 09:19:48 -040071bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040072 VkResult err;
73
74 constexpr VkApplicationInfo app_info = {
75 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
76 nullptr, // pNext
77 "android framework", // pApplicationName
78 0, // applicationVersion
79 "android framework", // pEngineName
80 0, // engineVerison
81 VK_MAKE_VERSION(1, 0, 0), // apiVersion
82 };
83
84 std::vector<const char*> instanceExtensions;
85 {
86 GET_PROC(EnumerateInstanceExtensionProperties);
87
88 uint32_t extensionCount = 0;
89 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
90 if (VK_SUCCESS != err) {
91 return false;
92 }
93 std::unique_ptr<VkExtensionProperties[]> extensions(
94 new VkExtensionProperties[extensionCount]);
95 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
96 if (VK_SUCCESS != err) {
97 return false;
98 }
99 bool hasKHRSurfaceExtension = false;
100 bool hasKHRAndroidSurfaceExtension = false;
101 for (uint32_t i = 0; i < extensionCount; ++i) {
102 instanceExtensions.push_back(extensions[i].extensionName);
103 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
104 hasKHRSurfaceExtension = true;
105 }
106 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
107 hasKHRAndroidSurfaceExtension = true;
108 }
109 }
110 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
111 this->destroy();
112 return false;
113 }
114 }
115
116 const VkInstanceCreateInfo instance_create = {
117 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
118 nullptr, // pNext
119 0, // flags
120 &app_info, // pApplicationInfo
121 0, // enabledLayerNameCount
122 nullptr, // ppEnabledLayerNames
123 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
124 instanceExtensions.data(), // ppEnabledExtensionNames
125 };
126
127 GET_PROC(CreateInstance);
128 err = mCreateInstance(&instance_create, nullptr, &mInstance);
129 if (err < 0) {
130 this->destroy();
131 return false;
132 }
133
134 GET_INST_PROC(DestroyInstance);
135 GET_INST_PROC(EnumeratePhysicalDevices);
136 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400137 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400138 GET_INST_PROC(CreateDevice);
139 GET_INST_PROC(EnumerateDeviceExtensionProperties);
140 GET_INST_PROC(CreateAndroidSurfaceKHR);
141 GET_INST_PROC(DestroySurfaceKHR);
142 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
143 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
144 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
145 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
146
147 uint32_t gpuCount;
148 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
149 if (err) {
150 this->destroy();
151 return false;
152 }
153 if (!gpuCount) {
154 this->destroy();
155 return false;
156 }
157 // Just returning the first physical device instead of getting the whole array. Since there
158 // should only be one device on android.
159 gpuCount = 1;
160 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
161 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
162 if (err && VK_INCOMPLETE != err) {
163 this->destroy();
164 return false;
165 }
166
167 // query to get the initial queue props size
168 uint32_t queueCount;
169 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
170 if (!queueCount) {
171 this->destroy();
172 return false;
173 }
174
175 // now get the actual queue props
176 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
177 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
178
179 // iterate to find the graphics queue
180 mGraphicsQueueIndex = queueCount;
181 for (uint32_t i = 0; i < queueCount; i++) {
182 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
183 mGraphicsQueueIndex = i;
184 break;
185 }
186 }
187 if (mGraphicsQueueIndex == queueCount) {
188 this->destroy();
189 return false;
190 }
191
192 // All physical devices and queue families on Android must be capable of
193 // presentation with any native window. So just use the first one.
194 mPresentQueueIndex = 0;
195
196 std::vector<const char*> deviceExtensions;
197 {
198 uint32_t extensionCount = 0;
199 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
200 nullptr);
201 if (VK_SUCCESS != err) {
202 this->destroy();
203 return false;
204 }
205 std::unique_ptr<VkExtensionProperties[]> extensions(
206 new VkExtensionProperties[extensionCount]);
207 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
208 extensions.get());
209 if (VK_SUCCESS != err) {
210 this->destroy();
211 return false;
212 }
213 bool hasKHRSwapchainExtension = false;
214 for (uint32_t i = 0; i < extensionCount; ++i) {
215 deviceExtensions.push_back(extensions[i].extensionName);
216 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
217 hasKHRSwapchainExtension = true;
218 }
219 }
220 if (!hasKHRSwapchainExtension) {
221 this->destroy();
222 return false;
223 }
224 }
225
Greg Daniela227dbb2018-08-20 09:19:48 -0400226 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
227 if (device != VK_NULL_HANDLE) {
228 return vkGetDeviceProcAddr(device, proc_name);
229 }
230 return vkGetInstanceProcAddr(instance, proc_name);
231 };
232 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
233 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
234
Greg Daniel26e0dca2018-09-18 10:33:19 -0400235 if (!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1)) {
236 this->destroy();
237 return false;
238 }
239
Greg Daniela227dbb2018-08-20 09:19:48 -0400240 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
241 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
242 features.pNext = nullptr;
243
244 // Setup all extension feature structs we may want to use.
245 void** tailPNext = &features.pNext;
246
247 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
248 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
249 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
250 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
251 LOG_ALWAYS_FATAL_IF(!blend);
252 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
253 blend->pNext = nullptr;
254 *tailPNext = blend;
255 tailPNext = &blend->pNext;
256 }
257
258 // query to get the physical device features
259 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400260 // this looks like it would slow things down,
261 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400262 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400263
264 float queuePriorities[1] = { 0.0 };
265
266 const VkDeviceQueueCreateInfo queueInfo[2] = {
267 {
268 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
269 nullptr, // pNext
270 0, // VkDeviceQueueCreateFlags
271 mGraphicsQueueIndex, // queueFamilyIndex
272 1, // queueCount
273 queuePriorities, // pQueuePriorities
274 },
275 {
276 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
277 nullptr, // pNext
278 0, // VkDeviceQueueCreateFlags
279 mPresentQueueIndex, // queueFamilyIndex
280 1, // queueCount
281 queuePriorities, // pQueuePriorities
282 }
283 };
284 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
285
286 const VkDeviceCreateInfo deviceInfo = {
287 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400288 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400289 0, // VkDeviceCreateFlags
290 queueInfoCount, // queueCreateInfoCount
291 queueInfo, // pQueueCreateInfos
292 0, // layerCount
293 nullptr, // ppEnabledLayerNames
294 (uint32_t) deviceExtensions.size(), // extensionCount
295 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400296 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400297 };
298
299 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
300 if (err) {
301 this->destroy();
302 return false;
303 }
304
305 GET_DEV_PROC(GetDeviceQueue);
306 GET_DEV_PROC(DeviceWaitIdle);
307 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500308 GET_DEV_PROC(CreateSwapchainKHR);
309 GET_DEV_PROC(DestroySwapchainKHR);
310 GET_DEV_PROC(GetSwapchainImagesKHR);
311 GET_DEV_PROC(AcquireNextImageKHR);
312 GET_DEV_PROC(QueuePresentKHR);
313 GET_DEV_PROC(CreateCommandPool);
314 GET_DEV_PROC(DestroyCommandPool);
315 GET_DEV_PROC(AllocateCommandBuffers);
316 GET_DEV_PROC(FreeCommandBuffers);
317 GET_DEV_PROC(ResetCommandBuffer);
318 GET_DEV_PROC(BeginCommandBuffer);
319 GET_DEV_PROC(EndCommandBuffer);
320 GET_DEV_PROC(CmdPipelineBarrier);
321 GET_DEV_PROC(GetDeviceQueue);
322 GET_DEV_PROC(QueueSubmit);
323 GET_DEV_PROC(QueueWaitIdle);
324 GET_DEV_PROC(DeviceWaitIdle);
325 GET_DEV_PROC(CreateSemaphore);
326 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400327 GET_DEV_PROC(ImportSemaphoreFdKHR);
328 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500329 GET_DEV_PROC(CreateFence);
330 GET_DEV_PROC(DestroyFence);
331 GET_DEV_PROC(WaitForFences);
332 GET_DEV_PROC(ResetFences);
333
Greg Daniel2ff202712018-06-14 11:50:10 -0400334 return true;
335}
336
Greg Daniela227dbb2018-08-20 09:19:48 -0400337static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
338 // All Vulkan structs that could be part of the features chain will start with the
339 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
340 // so we can get access to the pNext for the next struct.
341 struct CommonVulkanHeader {
342 VkStructureType sType;
343 void* pNext;
344 };
345
346 void* pNext = features.pNext;
347 while (pNext) {
348 void* current = pNext;
349 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
350 free(current);
351 }
352}
353
Greg Daniel2ff202712018-06-14 11:50:10 -0400354void VulkanManager::initialize() {
355 if (mDevice != VK_NULL_HANDLE) {
356 return;
357 }
358
Greg Daniela227dbb2018-08-20 09:19:48 -0400359 GET_PROC(EnumerateInstanceVersion);
360 uint32_t instanceVersion = 0;
361 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
362 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
363
364 GrVkExtensions extensions;
365 VkPhysicalDeviceFeatures2 features;
366 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400367
368 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
369
Greg Daniel2ff202712018-06-14 11:50:10 -0400370 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
371 if (device != VK_NULL_HANDLE) {
372 return vkGetDeviceProcAddr(device, proc_name);
373 }
374 return vkGetInstanceProcAddr(instance, proc_name);
375 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400376
377 GrVkBackendContext backendContext;
378 backendContext.fInstance = mInstance;
379 backendContext.fPhysicalDevice = mPhysicalDevice;
380 backendContext.fDevice = mDevice;
381 backendContext.fQueue = mGraphicsQueue;
382 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400383 backendContext.fInstanceVersion = instanceVersion;
384 backendContext.fVkExtensions = &extensions;
385 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400386 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400387
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500388 // create the command pool for the command buffers
389 if (VK_NULL_HANDLE == mCommandPool) {
390 VkCommandPoolCreateInfo commandPoolInfo;
391 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
392 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
393 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400394 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500395 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400396 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
397 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500398 SkASSERT(VK_SUCCESS == res);
399 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400400 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
401
402 if (!setupDummyCommandBuffer()) {
403 this->destroy();
404 return;
405 }
406 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
407
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500408
Greg Daniel2ff202712018-06-14 11:50:10 -0400409 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500410
Stan Ilievd495f432017-10-09 15:49:32 -0400411 GrContextOptions options;
412 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800413 // TODO: get a string describing the SPIR-V compiler version and use it here
414 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400415 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500416 LOG_ALWAYS_FATAL_IF(!grContext.get());
417 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400418
419 free_features_extensions_structs(features);
420
Greg Danielcd558522016-11-17 13:31:40 -0500421 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
422 mSwapBehavior = SwapBehavior::BufferAge;
423 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500424}
425
426// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
427// previous uses have finished before returning.
428VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
429 SkASSERT(surface->mBackbuffers);
430
431 ++surface->mCurrentBackbufferIndex;
432 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
433 surface->mCurrentBackbufferIndex = 0;
434 }
435
John Reck1bcacfd2017-11-03 10:12:19 -0700436 VulkanSurface::BackbufferInfo* backbuffer =
437 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500438
439 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
440 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400441 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500442 if (res != VK_SUCCESS) {
443 return nullptr;
444 }
445
446 return backbuffer;
447}
448
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500449SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
450 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
451 SkASSERT(backbuffer);
452
453 VkResult res;
454
Greg Daniel2ff202712018-06-14 11:50:10 -0400455 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500456 SkASSERT(VK_SUCCESS == res);
457
458 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
459 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400460 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700461 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
462 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500463
464 if (VK_ERROR_SURFACE_LOST_KHR == res) {
465 // need to figure out how to create a new vkSurface without the platformData*
466 // maybe use attach somehow? but need a Window
467 return nullptr;
468 }
469 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
470 // tear swapchain down and try again
471 if (!createSwapchain(surface)) {
472 return nullptr;
473 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500474 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400475 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500476 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500477
478 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400479 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700480 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
481 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500482
483 if (VK_SUCCESS != res) {
484 return nullptr;
485 }
486 }
487
488 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500489 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500490 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
John Reck1bcacfd2017-11-03 10:12:19 -0700491 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout)
492 ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
493 : VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500494 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700495 VkAccessFlags srcAccessMask =
496 (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? 0 : VK_ACCESS_MEMORY_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500497 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
498
499 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700500 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
501 NULL, // pNext
502 srcAccessMask, // outputMask
503 dstAccessMask, // inputMask
504 layout, // oldLayout
505 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
506 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400507 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700508 surface->mImages[backbuffer->mImageIndex], // image
509 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500510 };
511 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
512
513 VkCommandBufferBeginInfo info;
514 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
515 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
516 info.flags = 0;
517 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
518
John Reck1bcacfd2017-11-03 10:12:19 -0700519 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
520 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500521
522 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
523
524 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
525 // insert the layout transfer into the queue and wait on the acquire
526 VkSubmitInfo submitInfo;
527 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
528 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
529 submitInfo.waitSemaphoreCount = 1;
530 // Wait to make sure aquire semaphore set above has signaled.
531 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
532 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
533 submitInfo.commandBufferCount = 1;
534 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
535 submitInfo.signalSemaphoreCount = 0;
536
537 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400538 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500539
540 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500541 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400542 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
543 SkSurface::kFlushRead_BackendHandleAccess);
544 if (!backendRT.isValid()) {
545 SkASSERT(backendRT.isValid());
546 return nullptr;
547 }
548 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500549
550 surface->mBackbuffer = std::move(skSurface);
551 return surface->mBackbuffer.get();
552}
553
554void VulkanManager::destroyBuffers(VulkanSurface* surface) {
555 if (surface->mBackbuffers) {
556 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400557 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500558 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400559 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
560 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
561 mFreeCommandBuffers(mDevice, mCommandPool, 2,
562 surface->mBackbuffers[i].mTransitionCmdBuffers);
563 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
564 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500565 }
566 }
567
568 delete[] surface->mBackbuffers;
569 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500570 delete[] surface->mImageInfos;
571 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500572 delete[] surface->mImages;
573 surface->mImages = nullptr;
574}
575
576void VulkanManager::destroySurface(VulkanSurface* surface) {
577 // Make sure all submit commands have finished before starting to destroy objects.
578 if (VK_NULL_HANDLE != mPresentQueue) {
579 mQueueWaitIdle(mPresentQueue);
580 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400581 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500582
583 destroyBuffers(surface);
584
585 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400586 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500587 surface->mSwapchain = VK_NULL_HANDLE;
588 }
589
590 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400591 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500592 surface->mVkSurface = VK_NULL_HANDLE;
593 }
594 delete surface;
595}
596
597void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400598 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500599 SkASSERT(surface->mImageCount);
600 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400601 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500602
603 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
604
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500605 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500606 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500607 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500608 GrVkImageInfo info;
609 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500610 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500611 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
612 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
613 info.fFormat = format;
614 info.fLevelCount = 1;
615
Greg Danielac2d2322017-07-12 11:30:15 -0400616 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500617
Greg Danielcd558522016-11-17 13:31:40 -0500618 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700619 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400620 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
621 kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500622 }
623
624 SkASSERT(mCommandPool != VK_NULL_HANDLE);
625
626 // set up the backbuffers
627 VkSemaphoreCreateInfo semaphoreInfo;
628 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
629 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
630 semaphoreInfo.pNext = nullptr;
631 semaphoreInfo.flags = 0;
632 VkCommandBufferAllocateInfo commandBuffersInfo;
633 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
634 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
635 commandBuffersInfo.pNext = nullptr;
636 commandBuffersInfo.commandPool = mCommandPool;
637 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
638 commandBuffersInfo.commandBufferCount = 2;
639 VkFenceCreateInfo fenceInfo;
640 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
641 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
642 fenceInfo.pNext = nullptr;
643 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
644
645 // we create one additional backbuffer structure here, because we want to
646 // give the command buffers they contain a chance to finish before we cycle back
647 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
648 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
649 SkDEBUGCODE(VkResult res);
650 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400651 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700652 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400653 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700654 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400655 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700656 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400657 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700658 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400659 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700660 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500661 SkASSERT(VK_SUCCESS == res);
662 }
663 surface->mCurrentBackbufferIndex = surface->mImageCount;
664}
665
666bool VulkanManager::createSwapchain(VulkanSurface* surface) {
667 // check for capabilities
668 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400669 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700670 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500671 if (VK_SUCCESS != res) {
672 return false;
673 }
674
675 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400676 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700677 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500678 if (VK_SUCCESS != res) {
679 return false;
680 }
681
Ben Wagnereec27d52017-01-11 15:32:07 -0500682 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400683 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700684 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500685 if (VK_SUCCESS != res) {
686 return false;
687 }
688
689 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400690 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700691 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500692 if (VK_SUCCESS != res) {
693 return false;
694 }
695
Ben Wagnereec27d52017-01-11 15:32:07 -0500696 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400697 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700698 surface->mVkSurface, &presentModeCount,
699 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500700 if (VK_SUCCESS != res) {
701 return false;
702 }
703
704 VkExtent2D extent = caps.currentExtent;
705 // clamp width; to handle currentExtent of -1 and protect us from broken hints
706 if (extent.width < caps.minImageExtent.width) {
707 extent.width = caps.minImageExtent.width;
708 }
709 SkASSERT(extent.width <= caps.maxImageExtent.width);
710 // clamp height
711 if (extent.height < caps.minImageExtent.height) {
712 extent.height = caps.minImageExtent.height;
713 }
714 SkASSERT(extent.height <= caps.maxImageExtent.height);
715
716 uint32_t imageCount = caps.minImageCount + 2;
717 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
718 // Application must settle for fewer images than desired:
719 imageCount = caps.maxImageCount;
720 }
721
722 // Currently Skia requires the images to be color attchments and support all transfer
723 // operations.
724 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
725 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
726 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
727 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
728 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700729 SkASSERT(caps.supportedCompositeAlpha &
730 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500731 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700732 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
733 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
734 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500735
736 // Pick our surface format. For now, just make sure it matches our sRGB request:
737 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
738 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
739
740 bool wantSRGB = false;
741#ifdef ANDROID_ENABLE_LINEAR_BLENDING
742 wantSRGB = true;
743#endif
744 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
745 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
746 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
747 if (desiredFormat == surfaceFormats[i].format) {
748 surfaceFormat = surfaceFormats[i].format;
749 colorSpace = surfaceFormats[i].colorSpace;
750 }
751 }
752
753 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
754 return false;
755 }
756
757 // If mailbox mode is available, use it, as it is the lowest-latency non-
758 // tearing mode. If not, fall back to FIFO which is always available.
759 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
760 for (uint32_t i = 0; i < presentModeCount; ++i) {
761 // use mailbox
762 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
763 mode = presentModes[i];
764 break;
765 }
766 }
767
768 VkSwapchainCreateInfoKHR swapchainCreateInfo;
769 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
770 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
771 swapchainCreateInfo.surface = surface->mVkSurface;
772 swapchainCreateInfo.minImageCount = imageCount;
773 swapchainCreateInfo.imageFormat = surfaceFormat;
774 swapchainCreateInfo.imageColorSpace = colorSpace;
775 swapchainCreateInfo.imageExtent = extent;
776 swapchainCreateInfo.imageArrayLayers = 1;
777 swapchainCreateInfo.imageUsage = usageFlags;
778
Greg Daniel2ff202712018-06-14 11:50:10 -0400779 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
780 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500781 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
782 swapchainCreateInfo.queueFamilyIndexCount = 2;
783 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
784 } else {
785 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
786 swapchainCreateInfo.queueFamilyIndexCount = 0;
787 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
788 }
789
790 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
791 swapchainCreateInfo.compositeAlpha = composite_alpha;
792 swapchainCreateInfo.presentMode = mode;
793 swapchainCreateInfo.clipped = true;
794 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
795
Greg Daniel2ff202712018-06-14 11:50:10 -0400796 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500797 if (VK_SUCCESS != res) {
798 return false;
799 }
800
801 // destroy the old swapchain
802 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400803 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500804
805 destroyBuffers(surface);
806
Greg Daniel2ff202712018-06-14 11:50:10 -0400807 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500808 }
809
810 createBuffers(surface, surfaceFormat, extent);
811
812 return true;
813}
814
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500815VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
816 initialize();
817
818 if (!window) {
819 return nullptr;
820 }
821
822 VulkanSurface* surface = new VulkanSurface();
823
824 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
825 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
826 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
827 surfaceCreateInfo.pNext = nullptr;
828 surfaceCreateInfo.flags = 0;
829 surfaceCreateInfo.window = window;
830
Greg Daniel2ff202712018-06-14 11:50:10 -0400831 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
832 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500833 if (VK_SUCCESS != res) {
834 delete surface;
835 return nullptr;
836 }
837
John Reck1bcacfd2017-11-03 10:12:19 -0700838 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400839 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
840 // All physical devices and queue families on Android must be capable of
841 // presentation with any native window.
842 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500843
844 if (!createSwapchain(surface)) {
845 destroySurface(surface);
846 return nullptr;
847 }
848
849 return surface;
850}
851
852// Helper to know which src stage flags we need to set when transitioning to the present layout
853static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
854 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
855 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
856 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
857 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
858 return VK_PIPELINE_STAGE_TRANSFER_BIT;
859 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
860 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
861 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
862 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
863 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
864 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
865 return VK_PIPELINE_STAGE_HOST_BIT;
866 }
867
868 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
869 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
870}
871
872// Helper to know which src access mask we need to set when transitioning to the present layout
873static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
874 VkAccessFlags flags = 0;
875 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
876 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700877 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
878 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
879 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500880 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
881 flags = VK_ACCESS_HOST_WRITE_BIT;
882 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
883 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
884 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
885 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
886 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
887 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
888 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
889 flags = VK_ACCESS_TRANSFER_READ_BIT;
890 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
891 flags = VK_ACCESS_SHADER_READ_BIT;
892 }
893 return flags;
894}
895
896void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500897 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
898 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400899 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500900 }
901
Greg Daniel74ea2012017-11-10 11:32:58 -0500902 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700903 VulkanSurface::BackbufferInfo* backbuffer =
904 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400905
Greg Danielcd558522016-11-17 13:31:40 -0500906 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400907 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
908 SkSurface::kFlushRead_BackendHandleAccess);
909 SkASSERT(backendRT.isValid());
910
911 GrVkImageInfo imageInfo;
912 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
913
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500914 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400915 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500916
917 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
918 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400919 VkImageLayout layout = imageInfo.fImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500920 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
921 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
922 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
923 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
924
925 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700926 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
927 NULL, // pNext
928 srcAccessMask, // outputMask
929 dstAccessMask, // inputMask
930 layout, // oldLayout
931 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400932 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700933 mPresentQueueIndex, // dstQueueFamilyIndex
934 surface->mImages[backbuffer->mImageIndex], // image
935 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500936 };
937
938 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
939 VkCommandBufferBeginInfo info;
940 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
941 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
942 info.flags = 0;
943 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700944 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
945 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500946 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
947
Greg Danielcd558522016-11-17 13:31:40 -0500948 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500949
950 // insert the layout transfer into the queue and wait on the acquire
951 VkSubmitInfo submitInfo;
952 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
953 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
954 submitInfo.waitSemaphoreCount = 0;
955 submitInfo.pWaitDstStageMask = 0;
956 submitInfo.commandBufferCount = 1;
957 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
958 submitInfo.signalSemaphoreCount = 1;
959 // When this command buffer finishes we will signal this semaphore so that we know it is now
960 // safe to present the image to the screen.
961 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
962
963 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400964 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500965
966 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
967 // to the image is complete and that the layout has been change to present on the graphics
968 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700969 const VkPresentInfoKHR presentInfo = {
970 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
971 NULL, // pNext
972 1, // waitSemaphoreCount
973 &backbuffer->mRenderSemaphore, // pWaitSemaphores
974 1, // swapchainCount
975 &surface->mSwapchain, // pSwapchains
976 &backbuffer->mImageIndex, // pImageIndices
977 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500978 };
979
980 mQueuePresentKHR(mPresentQueue, &presentInfo);
981
982 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500983 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
984 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
985 surface->mCurrentTime++;
986}
987
988int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -0500989 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700990 VulkanSurface::BackbufferInfo* backbuffer =
991 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
992 if (mSwapBehavior == SwapBehavior::Discard ||
993 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -0500994 return 0;
995 }
996 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
997 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500998}
999
Greg Daniel26e0dca2018-09-18 10:33:19 -04001000bool VulkanManager::setupDummyCommandBuffer() {
1001 if (mDummyCB != VK_NULL_HANDLE) {
1002 return true;
1003 }
1004
1005 VkCommandBufferAllocateInfo commandBuffersInfo;
1006 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1007 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1008 commandBuffersInfo.pNext = nullptr;
1009 commandBuffersInfo.commandPool = mCommandPool;
1010 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1011 commandBuffersInfo.commandBufferCount = 1;
1012
1013 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1014 if (err != VK_SUCCESS) {
1015 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1016 // make sure the driver didn't set a value and then return a failure.
1017 mDummyCB = VK_NULL_HANDLE;
1018 return false;
1019 }
1020
1021 VkCommandBufferBeginInfo beginInfo;
1022 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1023 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1024 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1025
1026 mBeginCommandBuffer(mDummyCB, &beginInfo);
1027 mEndCommandBuffer(mDummyCB);
1028 return true;
1029}
1030
Stan Iliev564ca3e2018-09-04 22:00:00 +00001031status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001032 if (!hasVkContext()) {
1033 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1034 return INVALID_OPERATION;
1035 }
1036
1037 if (SyncFeatures::getInstance().useWaitSync() &&
1038 SyncFeatures::getInstance().useNativeFenceSync()) {
1039 // Block GPU on the fence.
1040 int fenceFd = fence->dup();
1041 if (fenceFd == -1) {
1042 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1043 return -errno;
1044 }
1045
1046 VkSemaphoreCreateInfo semaphoreInfo;
1047 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1048 semaphoreInfo.pNext = nullptr;
1049 semaphoreInfo.flags = 0;
1050 VkSemaphore semaphore;
1051 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1052 if (VK_SUCCESS != err) {
1053 ALOGE("Failed to create import semaphore, err: %d", err);
1054 return UNKNOWN_ERROR;
1055 }
1056 VkImportSemaphoreFdInfoKHR importInfo;
1057 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1058 importInfo.pNext = nullptr;
1059 importInfo.semaphore = semaphore;
1060 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1061 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1062 importInfo.fd = fenceFd;
1063
1064 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1065 if (VK_SUCCESS != err) {
1066 ALOGE("Failed to import semaphore, err: %d", err);
1067 return UNKNOWN_ERROR;
1068 }
1069
1070 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1071
1072 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1073
1074 VkSubmitInfo submitInfo;
1075 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1076 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1077 submitInfo.waitSemaphoreCount = 1;
1078 // Wait to make sure aquire semaphore set above has signaled.
1079 submitInfo.pWaitSemaphores = &semaphore;
1080 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1081 submitInfo.commandBufferCount = 1;
1082 submitInfo.pCommandBuffers = &mDummyCB;
1083 submitInfo.signalSemaphoreCount = 0;
1084
1085 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1086
1087 // On Android when we import a semaphore, it is imported using temporary permanence. That
1088 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1089 // state before importing. This means it will now be in an idle state with no pending
1090 // signal or wait operations, so it is safe to immediately delete it.
1091 mDestroySemaphore(mDevice, semaphore, nullptr);
1092 } else {
1093 // Block CPU on the fence.
1094 status_t err = fence->waitForever("VulkanManager::fenceWait");
1095 if (err != NO_ERROR) {
1096 ALOGE("VulkanManager::fenceWait: error waiting for fence: %d", err);
1097 return err;
1098 }
Stan Iliev564ca3e2018-09-04 22:00:00 +00001099 }
1100 return OK;
1101}
1102
1103status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001104 if (!hasVkContext()) {
1105 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1106 return INVALID_OPERATION;
1107 }
1108
1109 if (SyncFeatures::getInstance().useFenceSync()) {
1110 ALOGE("VulkanManager::createReleaseFence: Vk backend doesn't support non-native fences");
1111 return INVALID_OPERATION;
1112 }
1113
1114 if (!SyncFeatures::getInstance().useNativeFenceSync()) {
1115 return OK;
1116 }
1117
1118 VkExportSemaphoreCreateInfo exportInfo;
1119 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1120 exportInfo.pNext = nullptr;
1121 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1122
1123 VkSemaphoreCreateInfo semaphoreInfo;
1124 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1125 semaphoreInfo.pNext = &exportInfo;
1126 semaphoreInfo.flags = 0;
1127 VkSemaphore semaphore;
1128 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1129 if (VK_SUCCESS != err) {
1130 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1131 return INVALID_OPERATION;
1132 }
1133
1134 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1135
1136 VkSubmitInfo submitInfo;
1137 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1138 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1139 submitInfo.waitSemaphoreCount = 0;
1140 submitInfo.pWaitSemaphores = nullptr;
1141 submitInfo.pWaitDstStageMask = nullptr;
1142 submitInfo.commandBufferCount = 1;
1143 submitInfo.pCommandBuffers = &mDummyCB;
1144 submitInfo.signalSemaphoreCount = 1;
1145 submitInfo.pSignalSemaphores = &semaphore;
1146
1147 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1148
1149 VkSemaphoreGetFdInfoKHR getFdInfo;
1150 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1151 getFdInfo.pNext = nullptr;
1152 getFdInfo.semaphore = semaphore;
1153 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1154
1155 int fenceFd = 0;
1156
1157 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1158 if (VK_SUCCESS != err) {
1159 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1160 return INVALID_OPERATION;
1161 }
1162 nativeFence = new Fence(fenceFd);
1163
1164 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1165 // destroying the semaphore and creating a new one with the same handle, and the payloads
1166 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1167 // it and we don't need to wait on the command buffer we submitted to finish.
1168 mDestroySemaphore(mDevice, semaphore, nullptr);
1169
Stan Iliev564ca3e2018-09-04 22:00:00 +00001170 return OK;
1171}
1172
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001173} /* namespace renderthread */
1174} /* namespace uirenderer */
1175} /* namespace android */