blob: 4011329fa2da1b34aca5f985f41e1840f63f9009 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Derek Sollenbergera19b71a2019-02-15 16:36:30 -050019#include <android/sync.h>
Stan Iliev305e13a2018-11-13 11:14:48 -050020#include <gui/Surface.h>
21
Greg Danielcd558522016-11-17 13:31:40 -050022#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050023#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050024#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050025#include "utils/FatVector.h"
John Reck322b8ab2019-03-14 13:15:28 -070026#include "utils/TraceUtils.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027
Derek Sollenbergera19b71a2019-02-15 16:36:30 -050028#include <GrBackendSemaphore.h>
Greg Danielac2d2322017-07-12 11:30:15 -040029#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050030#include <GrContext.h>
31#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040032#include <GrTypes.h>
33#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050034#include <vk/GrVkTypes.h>
35
36namespace android {
37namespace uirenderer {
38namespace renderthread {
39
Bo Liu7b8c1eb2019-01-08 20:17:55 -080040static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
41 // All Vulkan structs that could be part of the features chain will start with the
42 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
43 // so we can get access to the pNext for the next struct.
44 struct CommonVulkanHeader {
45 VkStructureType sType;
46 void* pNext;
47 };
48
49 void* pNext = features.pNext;
50 while (pNext) {
51 void* current = pNext;
52 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
53 free(current);
54 }
55}
56
Greg Daniel2ff202712018-06-14 11:50:10 -040057#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
58#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
59#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050060
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050061void VulkanManager::destroy() {
Greg Daniel26e0dca2018-09-18 10:33:19 -040062 // We don't need to explicitly free the command buffer since it automatically gets freed when we
63 // delete the VkCommandPool below.
64 mDummyCB = VK_NULL_HANDLE;
65
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050066 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040067 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050068 mCommandPool = VK_NULL_HANDLE;
69 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050070
Greg Daniel2ff202712018-06-14 11:50:10 -040071 if (mDevice != VK_NULL_HANDLE) {
72 mDeviceWaitIdle(mDevice);
73 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070074 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050075
Greg Daniel2ff202712018-06-14 11:50:10 -040076 if (mInstance != VK_NULL_HANDLE) {
77 mDestroyInstance(mInstance, nullptr);
78 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050079
Greg Daniel2ff202712018-06-14 11:50:10 -040080 mGraphicsQueue = VK_NULL_HANDLE;
81 mPresentQueue = VK_NULL_HANDLE;
82 mDevice = VK_NULL_HANDLE;
83 mPhysicalDevice = VK_NULL_HANDLE;
84 mInstance = VK_NULL_HANDLE;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -080085 mInstanceExtensionsOwner.clear();
Bo Liu7b8c1eb2019-01-08 20:17:55 -080086 mInstanceExtensions.clear();
Roman Kiryanov74ace839e2019-03-07 18:22:19 -080087 mDeviceExtensionsOwner.clear();
Bo Liu7b8c1eb2019-01-08 20:17:55 -080088 mDeviceExtensions.clear();
89 free_features_extensions_structs(mPhysicalDeviceFeatures2);
90 mPhysicalDeviceFeatures2 = {};
Greg Daniel2ff202712018-06-14 11:50:10 -040091}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050092
Stan Iliev90276c82019-02-03 18:01:02 -050093void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040094 VkResult err;
95
96 constexpr VkApplicationInfo app_info = {
97 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
98 nullptr, // pNext
99 "android framework", // pApplicationName
100 0, // applicationVersion
101 "android framework", // pEngineName
102 0, // engineVerison
Greg Danieleaf310e2019-01-28 16:10:32 -0500103 mAPIVersion, // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -0400104 };
105
Greg Daniel2ff202712018-06-14 11:50:10 -0400106 {
107 GET_PROC(EnumerateInstanceExtensionProperties);
108
109 uint32_t extensionCount = 0;
110 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500111 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800112 mInstanceExtensionsOwner.resize(extensionCount);
113 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
114 mInstanceExtensionsOwner.data());
Stan Iliev90276c82019-02-03 18:01:02 -0500115 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400116 bool hasKHRSurfaceExtension = false;
117 bool hasKHRAndroidSurfaceExtension = false;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800118 for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
119 mInstanceExtensions.push_back(extension.extensionName);
120 if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400121 hasKHRSurfaceExtension = true;
122 }
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800123 if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400124 hasKHRAndroidSurfaceExtension = true;
125 }
126 }
Stan Iliev90276c82019-02-03 18:01:02 -0500127 LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400128 }
129
130 const VkInstanceCreateInfo instance_create = {
131 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
132 nullptr, // pNext
133 0, // flags
134 &app_info, // pApplicationInfo
135 0, // enabledLayerNameCount
136 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800137 (uint32_t) mInstanceExtensions.size(), // enabledExtensionNameCount
138 mInstanceExtensions.data(), // ppEnabledExtensionNames
Greg Daniel2ff202712018-06-14 11:50:10 -0400139 };
140
141 GET_PROC(CreateInstance);
142 err = mCreateInstance(&instance_create, nullptr, &mInstance);
Stan Iliev90276c82019-02-03 18:01:02 -0500143 LOG_ALWAYS_FATAL_IF(err < 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400144
145 GET_INST_PROC(DestroyInstance);
146 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400147 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400148 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400149 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500150 GET_INST_PROC(GetPhysicalDeviceImageFormatProperties2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400151 GET_INST_PROC(CreateDevice);
152 GET_INST_PROC(EnumerateDeviceExtensionProperties);
153 GET_INST_PROC(CreateAndroidSurfaceKHR);
154 GET_INST_PROC(DestroySurfaceKHR);
155 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
156 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
157 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
158 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
159
160 uint32_t gpuCount;
Stan Iliev90276c82019-02-03 18:01:02 -0500161 LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
162 LOG_ALWAYS_FATAL_IF(!gpuCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400163 // Just returning the first physical device instead of getting the whole array. Since there
164 // should only be one device on android.
165 gpuCount = 1;
166 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
167 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
Stan Iliev90276c82019-02-03 18:01:02 -0500168 LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400169
Greg Daniel96259622018-10-01 14:42:56 -0400170 VkPhysicalDeviceProperties physDeviceProperties;
171 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
Stan Iliev90276c82019-02-03 18:01:02 -0500172 LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
Stan Ilievbf99c442019-03-29 11:09:11 -0400173 mDriverVersion = physDeviceProperties.driverVersion;
Greg Daniel96259622018-10-01 14:42:56 -0400174
Greg Daniel2173f182019-04-01 09:29:44 -0400175 mIsQualcomm = physDeviceProperties.vendorID == 20803;
176
Greg Daniel2ff202712018-06-14 11:50:10 -0400177 // query to get the initial queue props size
178 uint32_t queueCount;
179 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500180 LOG_ALWAYS_FATAL_IF(!queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400181
182 // now get the actual queue props
183 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
184 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
185
186 // iterate to find the graphics queue
187 mGraphicsQueueIndex = queueCount;
188 for (uint32_t i = 0; i < queueCount; i++) {
189 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
190 mGraphicsQueueIndex = i;
191 break;
192 }
193 }
Stan Iliev90276c82019-02-03 18:01:02 -0500194 LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400195
196 // All physical devices and queue families on Android must be capable of
197 // presentation with any native window. So just use the first one.
198 mPresentQueueIndex = 0;
199
Greg Daniel2ff202712018-06-14 11:50:10 -0400200 {
201 uint32_t extensionCount = 0;
202 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
203 nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500204 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800205 mDeviceExtensionsOwner.resize(extensionCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400206 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800207 mDeviceExtensionsOwner.data());
Stan Iliev90276c82019-02-03 18:01:02 -0500208 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400209 bool hasKHRSwapchainExtension = false;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800210 for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
211 mDeviceExtensions.push_back(extension.extensionName);
212 if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400213 hasKHRSwapchainExtension = true;
214 }
215 }
Stan Iliev90276c82019-02-03 18:01:02 -0500216 LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400217 }
218
Greg Daniela227dbb2018-08-20 09:19:48 -0400219 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
220 if (device != VK_NULL_HANDLE) {
221 return vkGetDeviceProcAddr(device, proc_name);
222 }
223 return vkGetInstanceProcAddr(instance, proc_name);
224 };
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800225
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800226 grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
227 mInstanceExtensions.data(), mDeviceExtensions.size(), mDeviceExtensions.data());
Greg Daniela227dbb2018-08-20 09:19:48 -0400228
Stan Iliev90276c82019-02-03 18:01:02 -0500229 LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
Greg Daniel26e0dca2018-09-18 10:33:19 -0400230
Greg Daniela227dbb2018-08-20 09:19:48 -0400231 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
232 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
233 features.pNext = nullptr;
234
235 // Setup all extension feature structs we may want to use.
236 void** tailPNext = &features.pNext;
237
238 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
239 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
240 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
241 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
242 LOG_ALWAYS_FATAL_IF(!blend);
243 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
244 blend->pNext = nullptr;
245 *tailPNext = blend;
246 tailPNext = &blend->pNext;
247 }
248
Greg Daniel05036172018-11-28 17:08:04 -0500249 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
250 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) malloc(
251 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
252 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
253 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
254 ycbcrFeature->pNext = nullptr;
255 *tailPNext = ycbcrFeature;
256 tailPNext = &ycbcrFeature->pNext;
257
Greg Daniela227dbb2018-08-20 09:19:48 -0400258 // query to get the physical device features
259 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400260 // this looks like it would slow things down,
261 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400262 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400263
264 float queuePriorities[1] = { 0.0 };
265
Stan Iliev7e733362019-02-28 13:16:36 -0500266 void* queueNextPtr = nullptr;
267
268 VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
269
270 if (Properties::contextPriority != 0
271 && grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
272 memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
273 queuePriorityCreateInfo.sType =
274 VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
275 queuePriorityCreateInfo.pNext = nullptr;
276 switch (Properties::contextPriority) {
277 case EGL_CONTEXT_PRIORITY_LOW_IMG:
278 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
279 break;
280 case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
281 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
282 break;
283 case EGL_CONTEXT_PRIORITY_HIGH_IMG:
284 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
285 break;
286 default:
287 LOG_ALWAYS_FATAL("Unsupported context priority");
288 }
289 queueNextPtr = &queuePriorityCreateInfo;
290 }
291
Greg Daniel2ff202712018-06-14 11:50:10 -0400292 const VkDeviceQueueCreateInfo queueInfo[2] = {
293 {
294 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
Stan Iliev7e733362019-02-28 13:16:36 -0500295 queueNextPtr, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400296 0, // VkDeviceQueueCreateFlags
297 mGraphicsQueueIndex, // queueFamilyIndex
298 1, // queueCount
299 queuePriorities, // pQueuePriorities
300 },
301 {
302 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
Stan Iliev7e733362019-02-28 13:16:36 -0500303 queueNextPtr, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400304 0, // VkDeviceQueueCreateFlags
305 mPresentQueueIndex, // queueFamilyIndex
306 1, // queueCount
307 queuePriorities, // pQueuePriorities
308 }
309 };
310 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
311
312 const VkDeviceCreateInfo deviceInfo = {
313 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400314 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400315 0, // VkDeviceCreateFlags
316 queueInfoCount, // queueCreateInfoCount
317 queueInfo, // pQueueCreateInfos
318 0, // layerCount
319 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800320 (uint32_t) mDeviceExtensions.size(), // extensionCount
321 mDeviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400322 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400323 };
324
Stan Iliev90276c82019-02-03 18:01:02 -0500325 LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
Greg Daniel2ff202712018-06-14 11:50:10 -0400326
327 GET_DEV_PROC(GetDeviceQueue);
328 GET_DEV_PROC(DeviceWaitIdle);
329 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500330 GET_DEV_PROC(CreateCommandPool);
331 GET_DEV_PROC(DestroyCommandPool);
332 GET_DEV_PROC(AllocateCommandBuffers);
333 GET_DEV_PROC(FreeCommandBuffers);
334 GET_DEV_PROC(ResetCommandBuffer);
335 GET_DEV_PROC(BeginCommandBuffer);
336 GET_DEV_PROC(EndCommandBuffer);
337 GET_DEV_PROC(CmdPipelineBarrier);
338 GET_DEV_PROC(GetDeviceQueue);
339 GET_DEV_PROC(QueueSubmit);
340 GET_DEV_PROC(QueueWaitIdle);
341 GET_DEV_PROC(DeviceWaitIdle);
342 GET_DEV_PROC(CreateSemaphore);
343 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400344 GET_DEV_PROC(ImportSemaphoreFdKHR);
345 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500346 GET_DEV_PROC(CreateFence);
347 GET_DEV_PROC(DestroyFence);
348 GET_DEV_PROC(WaitForFences);
349 GET_DEV_PROC(ResetFences);
Greg Daniel2ff202712018-06-14 11:50:10 -0400350}
351
352void VulkanManager::initialize() {
353 if (mDevice != VK_NULL_HANDLE) {
354 return;
355 }
356
Greg Daniela227dbb2018-08-20 09:19:48 -0400357 GET_PROC(EnumerateInstanceVersion);
Greg Danieleaf310e2019-01-28 16:10:32 -0500358 uint32_t instanceVersion;
359 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
360 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniela227dbb2018-08-20 09:19:48 -0400361
Stan Iliev981afe72019-02-13 14:24:33 -0500362 this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400363
364 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
365
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500366 // create the command pool for the command buffers
367 if (VK_NULL_HANDLE == mCommandPool) {
368 VkCommandPoolCreateInfo commandPoolInfo;
369 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
370 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
371 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400372 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500373 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400374 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
375 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500376 SkASSERT(VK_SUCCESS == res);
377 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400378 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
379
380 if (!setupDummyCommandBuffer()) {
381 this->destroy();
Stan Iliev90276c82019-02-03 18:01:02 -0500382 // Pass through will crash on next line.
Greg Daniel26e0dca2018-09-18 10:33:19 -0400383 }
384 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
385
Greg Daniel2ff202712018-06-14 11:50:10 -0400386 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500387
Greg Danielcd558522016-11-17 13:31:40 -0500388 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
389 mSwapBehavior = SwapBehavior::BufferAge;
390 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500391}
392
Stan Iliev898123b2019-02-14 14:57:44 -0500393sk_sp<GrContext> VulkanManager::createContext(const GrContextOptions& options) {
Stan Iliev981afe72019-02-13 14:24:33 -0500394 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
395 if (device != VK_NULL_HANDLE) {
396 return vkGetDeviceProcAddr(device, proc_name);
397 }
398 return vkGetInstanceProcAddr(instance, proc_name);
399 };
400
401 GrVkBackendContext backendContext;
402 backendContext.fInstance = mInstance;
403 backendContext.fPhysicalDevice = mPhysicalDevice;
404 backendContext.fDevice = mDevice;
405 backendContext.fQueue = mGraphicsQueue;
406 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
407 backendContext.fMaxAPIVersion = mAPIVersion;
408 backendContext.fVkExtensions = &mExtensions;
409 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
410 backendContext.fGetProc = std::move(getProc);
411
412 return GrContext::MakeVulkan(backendContext, options);
413}
414
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800415VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
416 return VkFunctorInitParams{
417 .instance = mInstance,
418 .physical_device = mPhysicalDevice,
419 .device = mDevice,
420 .queue = mGraphicsQueue,
421 .graphics_queue_index = mGraphicsQueueIndex,
Greg Danieleaf310e2019-01-28 16:10:32 -0500422 .api_version = mAPIVersion,
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800423 .enabled_instance_extension_names = mInstanceExtensions.data(),
424 .enabled_instance_extension_names_length =
425 static_cast<uint32_t>(mInstanceExtensions.size()),
426 .enabled_device_extension_names = mDeviceExtensions.data(),
427 .enabled_device_extension_names_length =
428 static_cast<uint32_t>(mDeviceExtensions.size()),
429 .device_features_2 = &mPhysicalDeviceFeatures2,
430 };
431}
432
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500433Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500434
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500435 VulkanSurface::NativeBufferInfo* bufferInfo = surface->dequeueNativeBuffer();
436
437 if (bufferInfo == nullptr) {
438 ALOGE("VulkanSurface::dequeueNativeBuffer called with an invalid surface!");
439 return Frame(-1, -1, 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500440 }
441
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500442 LOG_ALWAYS_FATAL_IF(!bufferInfo->dequeued);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500443
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500444 if (bufferInfo->dequeue_fence != -1) {
Stan Iliev197843d2019-03-21 11:34:15 -0400445 struct sync_file_info* finfo = sync_file_info(bufferInfo->dequeue_fence);
446 bool isSignalPending = false;
447 if (finfo != NULL) {
448 isSignalPending = finfo->status != 1;
449 sync_file_info_free(finfo);
450 }
451 if (isSignalPending) {
452 int fence_clone = dup(bufferInfo->dequeue_fence);
453 if (fence_clone == -1) {
454 ALOGE("dup(fence) failed, stalling until signalled: %s (%d)", strerror(errno),
455 errno);
456 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
457 } else {
458 VkSemaphoreCreateInfo semaphoreInfo;
459 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
460 semaphoreInfo.pNext = nullptr;
461 semaphoreInfo.flags = 0;
462 VkSemaphore semaphore;
463 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
464 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err, "Failed to create import semaphore, err: %d",
465 err);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500466
Stan Iliev197843d2019-03-21 11:34:15 -0400467 VkImportSemaphoreFdInfoKHR importInfo;
468 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
469 importInfo.pNext = nullptr;
470 importInfo.semaphore = semaphore;
471 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
472 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
473 importInfo.fd = fence_clone;
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500474
Stan Iliev197843d2019-03-21 11:34:15 -0400475 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
476 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err, "Failed to import semaphore, err: %d", err);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500477
Stan Iliev197843d2019-03-21 11:34:15 -0400478 GrBackendSemaphore backendSemaphore;
479 backendSemaphore.initVulkan(semaphore);
480 bufferInfo->skSurface->wait(1, &backendSemaphore);
481 // The following flush blocks the GPU immediately instead of waiting for other
482 // drawing ops. It seems dequeue_fence is not respected otherwise.
483 //TODO: remove the flush after finding why backendSemaphore is not working.
484 bufferInfo->skSurface->flush();
485 }
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500486 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500487 }
488
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500489 int bufferAge = (mSwapBehavior == SwapBehavior::Discard) ? 0 : surface->getCurrentBuffersAge();
490 return Frame(surface->logicalWidth(), surface->logicalHeight(), bufferAge);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500491}
492
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500493void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) {
494 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
495 ATRACE_NAME("Finishing GPU work");
496 mDeviceWaitIdle(mDevice);
Stan Iliev305e13a2018-11-13 11:14:48 -0500497 }
498
Stan Ilievbc5f06b2019-03-26 15:14:34 -0400499 VulkanSurface::NativeBufferInfo* bufferInfo = surface->getCurrentBufferInfo();
500 if (!bufferInfo) {
501 // If VulkanSurface::dequeueNativeBuffer failed earlier, then swapBuffers is a no-op.
502 return;
503 }
504
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500505 VkExportSemaphoreCreateInfo exportInfo;
506 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
507 exportInfo.pNext = nullptr;
508 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500509
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500510 VkSemaphoreCreateInfo semaphoreInfo;
511 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
512 semaphoreInfo.pNext = &exportInfo;
513 semaphoreInfo.flags = 0;
514 VkSemaphore semaphore;
515 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
516 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to create semaphore");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500517
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500518 GrBackendSemaphore backendSemaphore;
519 backendSemaphore.initVulkan(semaphore);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500520
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500521 int fenceFd = -1;
522 GrSemaphoresSubmitted submitted =
523 bufferInfo->skSurface->flush(SkSurface::BackendSurfaceAccess::kPresent,
524 SkSurface::kNone_FlushFlags, 1, &backendSemaphore);
525 if (submitted == GrSemaphoresSubmitted::kYes) {
526 VkSemaphoreGetFdInfoKHR getFdInfo;
527 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
528 getFdInfo.pNext = nullptr;
529 getFdInfo.semaphore = semaphore;
530 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500531
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500532 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
533 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd");
534 } else {
535 ALOGE("VulkanManager::swapBuffers(): Semaphore submission failed");
536 mQueueWaitIdle(mGraphicsQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500537 }
538
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500539 surface->presentCurrentBuffer(dirtyRect, fenceFd);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500540
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500541 // Exporting a semaphore with copy transference via vkGetSemaphoreFdKHR, has the same effect of
542 // destroying the semaphore and creating a new one with the same handle, and the payloads
543 // ownership is move to the Fd we created. Thus the semaphore is in a state that we can delete
544 // it and we don't need to wait on the command buffer we submitted to finish.
545 mDestroySemaphore(mDevice, semaphore, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500546}
547
548void VulkanManager::destroySurface(VulkanSurface* surface) {
549 // Make sure all submit commands have finished before starting to destroy objects.
550 if (VK_NULL_HANDLE != mPresentQueue) {
551 mQueueWaitIdle(mPresentQueue);
552 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400553 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500554
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500555 delete surface;
556}
557
Stan Iliev987a80c02018-12-04 10:07:21 -0500558VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800559 sk_sp<SkColorSpace> surfaceColorSpace,
Stan Iliev981afe72019-02-13 14:24:33 -0500560 SkColorType surfaceColorType,
561 GrContext* grContext) {
562 LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500563 if (!window) {
564 return nullptr;
565 }
566
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500567 return VulkanSurface::Create(window, colorMode, surfaceColorType, surfaceColorSpace, grContext,
568 *this);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500569}
570
Greg Daniel26e0dca2018-09-18 10:33:19 -0400571bool VulkanManager::setupDummyCommandBuffer() {
572 if (mDummyCB != VK_NULL_HANDLE) {
573 return true;
574 }
575
576 VkCommandBufferAllocateInfo commandBuffersInfo;
577 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
578 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
579 commandBuffersInfo.pNext = nullptr;
580 commandBuffersInfo.commandPool = mCommandPool;
581 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
582 commandBuffersInfo.commandBufferCount = 1;
583
584 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
585 if (err != VK_SUCCESS) {
586 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
587 // make sure the driver didn't set a value and then return a failure.
588 mDummyCB = VK_NULL_HANDLE;
589 return false;
590 }
591
592 VkCommandBufferBeginInfo beginInfo;
593 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
594 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
595 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
596
597 mBeginCommandBuffer(mDummyCB, &beginInfo);
598 mEndCommandBuffer(mDummyCB);
599 return true;
600}
601
Stan Iliev564ca3e2018-09-04 22:00:00 +0000602status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -0400603 if (!hasVkContext()) {
604 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
605 return INVALID_OPERATION;
606 }
607
Stan Iliev7a081272018-10-26 17:54:18 -0400608 // Block GPU on the fence.
609 int fenceFd = fence->dup();
610 if (fenceFd == -1) {
611 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
612 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +0000613 }
Stan Iliev7a081272018-10-26 17:54:18 -0400614
615 VkSemaphoreCreateInfo semaphoreInfo;
616 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
617 semaphoreInfo.pNext = nullptr;
618 semaphoreInfo.flags = 0;
619 VkSemaphore semaphore;
620 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
621 if (VK_SUCCESS != err) {
622 ALOGE("Failed to create import semaphore, err: %d", err);
623 return UNKNOWN_ERROR;
624 }
625 VkImportSemaphoreFdInfoKHR importInfo;
626 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
627 importInfo.pNext = nullptr;
628 importInfo.semaphore = semaphore;
629 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
630 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
631 importInfo.fd = fenceFd;
632
633 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
634 if (VK_SUCCESS != err) {
635 ALOGE("Failed to import semaphore, err: %d", err);
636 return UNKNOWN_ERROR;
637 }
638
639 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
640
641 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
642
643 VkSubmitInfo submitInfo;
644 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
645 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
646 submitInfo.waitSemaphoreCount = 1;
647 // Wait to make sure aquire semaphore set above has signaled.
648 submitInfo.pWaitSemaphores = &semaphore;
649 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
650 submitInfo.commandBufferCount = 1;
651 submitInfo.pCommandBuffers = &mDummyCB;
652 submitInfo.signalSemaphoreCount = 0;
653
654 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
655
656 // On Android when we import a semaphore, it is imported using temporary permanence. That
657 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
658 // state before importing. This means it will now be in an idle state with no pending
659 // signal or wait operations, so it is safe to immediately delete it.
660 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +0000661 return OK;
662}
663
664status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -0400665 if (!hasVkContext()) {
666 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
667 return INVALID_OPERATION;
668 }
669
Greg Daniel26e0dca2018-09-18 10:33:19 -0400670 VkExportSemaphoreCreateInfo exportInfo;
671 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
672 exportInfo.pNext = nullptr;
673 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
674
675 VkSemaphoreCreateInfo semaphoreInfo;
676 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
677 semaphoreInfo.pNext = &exportInfo;
678 semaphoreInfo.flags = 0;
679 VkSemaphore semaphore;
680 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
681 if (VK_SUCCESS != err) {
682 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
683 return INVALID_OPERATION;
684 }
685
686 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
687
688 VkSubmitInfo submitInfo;
689 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
690 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
691 submitInfo.waitSemaphoreCount = 0;
692 submitInfo.pWaitSemaphores = nullptr;
693 submitInfo.pWaitDstStageMask = nullptr;
694 submitInfo.commandBufferCount = 1;
695 submitInfo.pCommandBuffers = &mDummyCB;
696 submitInfo.signalSemaphoreCount = 1;
697 submitInfo.pSignalSemaphores = &semaphore;
698
699 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
700
701 VkSemaphoreGetFdInfoKHR getFdInfo;
702 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
703 getFdInfo.pNext = nullptr;
704 getFdInfo.semaphore = semaphore;
705 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
706
707 int fenceFd = 0;
708
709 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
710 if (VK_SUCCESS != err) {
711 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
712 return INVALID_OPERATION;
713 }
714 nativeFence = new Fence(fenceFd);
715
716 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
717 // destroying the semaphore and creating a new one with the same handle, and the payloads
718 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
719 // it and we don't need to wait on the command buffer we submitted to finish.
720 mDestroySemaphore(mDevice, semaphore, nullptr);
721
Stan Iliev564ca3e2018-09-04 22:00:00 +0000722 return OK;
723}
724
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500725} /* namespace renderthread */
726} /* namespace uirenderer */
727} /* namespace android */