blob: 510016585afc397adb3620210812aa025aefeec7 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Derek Sollenbergera19b71a2019-02-15 16:36:30 -050019#include <android/sync.h>
Alec Mouri6db59a62019-08-02 17:05:26 -070020#include <EGL/egl.h>
21#include <EGL/eglext.h>
Stan Iliev305e13a2018-11-13 11:14:48 -050022
Greg Danielcd558522016-11-17 13:31:40 -050023#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050024#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050025#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050026#include "utils/FatVector.h"
John Reck322b8ab2019-03-14 13:15:28 -070027#include "utils/TraceUtils.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050028
Derek Sollenbergera19b71a2019-02-15 16:36:30 -050029#include <GrBackendSemaphore.h>
Greg Danielac2d2322017-07-12 11:30:15 -040030#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <GrContext.h>
32#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040033#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050034#include <vk/GrVkTypes.h>
35
36namespace android {
37namespace uirenderer {
38namespace renderthread {
39
Bo Liu7b8c1eb2019-01-08 20:17:55 -080040static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
41 // All Vulkan structs that could be part of the features chain will start with the
42 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
43 // so we can get access to the pNext for the next struct.
44 struct CommonVulkanHeader {
45 VkStructureType sType;
John Reck0fa0cbc2019-04-05 16:57:46 -070046 void* pNext;
Bo Liu7b8c1eb2019-01-08 20:17:55 -080047 };
48
49 void* pNext = features.pNext;
50 while (pNext) {
51 void* current = pNext;
52 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
53 free(current);
54 }
55}
56
Greg Daniel2ff202712018-06-14 11:50:10 -040057#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
58#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
59#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050060
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050061void VulkanManager::destroy() {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050062 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040063 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050064 mCommandPool = VK_NULL_HANDLE;
65 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050066
Greg Daniel2ff202712018-06-14 11:50:10 -040067 if (mDevice != VK_NULL_HANDLE) {
68 mDeviceWaitIdle(mDevice);
69 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070070 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050071
Greg Daniel2ff202712018-06-14 11:50:10 -040072 if (mInstance != VK_NULL_HANDLE) {
73 mDestroyInstance(mInstance, nullptr);
74 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050075
Greg Daniel2ff202712018-06-14 11:50:10 -040076 mGraphicsQueue = VK_NULL_HANDLE;
77 mPresentQueue = VK_NULL_HANDLE;
78 mDevice = VK_NULL_HANDLE;
79 mPhysicalDevice = VK_NULL_HANDLE;
80 mInstance = VK_NULL_HANDLE;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -080081 mInstanceExtensionsOwner.clear();
Bo Liu7b8c1eb2019-01-08 20:17:55 -080082 mInstanceExtensions.clear();
Roman Kiryanov74ace839e2019-03-07 18:22:19 -080083 mDeviceExtensionsOwner.clear();
Bo Liu7b8c1eb2019-01-08 20:17:55 -080084 mDeviceExtensions.clear();
85 free_features_extensions_structs(mPhysicalDeviceFeatures2);
86 mPhysicalDeviceFeatures2 = {};
Greg Daniel2ff202712018-06-14 11:50:10 -040087}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050088
Stan Iliev90276c82019-02-03 18:01:02 -050089void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040090 VkResult err;
91
92 constexpr VkApplicationInfo app_info = {
John Reck0fa0cbc2019-04-05 16:57:46 -070093 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
94 nullptr, // pNext
95 "android framework", // pApplicationName
96 0, // applicationVersion
97 "android framework", // pEngineName
98 0, // engineVerison
99 mAPIVersion, // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -0400100 };
101
Greg Daniel2ff202712018-06-14 11:50:10 -0400102 {
103 GET_PROC(EnumerateInstanceExtensionProperties);
104
105 uint32_t extensionCount = 0;
106 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500107 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800108 mInstanceExtensionsOwner.resize(extensionCount);
109 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
110 mInstanceExtensionsOwner.data());
Stan Iliev90276c82019-02-03 18:01:02 -0500111 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400112 bool hasKHRSurfaceExtension = false;
113 bool hasKHRAndroidSurfaceExtension = false;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800114 for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
115 mInstanceExtensions.push_back(extension.extensionName);
116 if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400117 hasKHRSurfaceExtension = true;
118 }
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800119 if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400120 hasKHRAndroidSurfaceExtension = true;
121 }
122 }
Stan Iliev90276c82019-02-03 18:01:02 -0500123 LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400124 }
125
126 const VkInstanceCreateInfo instance_create = {
John Reck0fa0cbc2019-04-05 16:57:46 -0700127 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
128 nullptr, // pNext
129 0, // flags
130 &app_info, // pApplicationInfo
131 0, // enabledLayerNameCount
132 nullptr, // ppEnabledLayerNames
133 (uint32_t)mInstanceExtensions.size(), // enabledExtensionNameCount
134 mInstanceExtensions.data(), // ppEnabledExtensionNames
Greg Daniel2ff202712018-06-14 11:50:10 -0400135 };
136
137 GET_PROC(CreateInstance);
138 err = mCreateInstance(&instance_create, nullptr, &mInstance);
Stan Iliev90276c82019-02-03 18:01:02 -0500139 LOG_ALWAYS_FATAL_IF(err < 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400140
141 GET_INST_PROC(DestroyInstance);
142 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400143 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400144 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400145 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500146 GET_INST_PROC(GetPhysicalDeviceImageFormatProperties2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400147 GET_INST_PROC(CreateDevice);
148 GET_INST_PROC(EnumerateDeviceExtensionProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400149
150 uint32_t gpuCount;
Stan Iliev90276c82019-02-03 18:01:02 -0500151 LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
152 LOG_ALWAYS_FATAL_IF(!gpuCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400153 // Just returning the first physical device instead of getting the whole array. Since there
154 // should only be one device on android.
155 gpuCount = 1;
156 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
157 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
Stan Iliev90276c82019-02-03 18:01:02 -0500158 LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400159
Greg Daniel96259622018-10-01 14:42:56 -0400160 VkPhysicalDeviceProperties physDeviceProperties;
161 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
Stan Iliev90276c82019-02-03 18:01:02 -0500162 LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
Stan Ilievbf99c442019-03-29 11:09:11 -0400163 mDriverVersion = physDeviceProperties.driverVersion;
Greg Daniel96259622018-10-01 14:42:56 -0400164
Greg Daniel2ff202712018-06-14 11:50:10 -0400165 // query to get the initial queue props size
166 uint32_t queueCount;
167 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500168 LOG_ALWAYS_FATAL_IF(!queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400169
170 // now get the actual queue props
171 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
172 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
173
174 // iterate to find the graphics queue
175 mGraphicsQueueIndex = queueCount;
176 for (uint32_t i = 0; i < queueCount; i++) {
177 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
178 mGraphicsQueueIndex = i;
179 break;
180 }
181 }
Stan Iliev90276c82019-02-03 18:01:02 -0500182 LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400183
184 // All physical devices and queue families on Android must be capable of
185 // presentation with any native window. So just use the first one.
186 mPresentQueueIndex = 0;
187
Greg Daniel2ff202712018-06-14 11:50:10 -0400188 {
189 uint32_t extensionCount = 0;
190 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
John Reck0fa0cbc2019-04-05 16:57:46 -0700191 nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500192 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800193 mDeviceExtensionsOwner.resize(extensionCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400194 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
John Reck0fa0cbc2019-04-05 16:57:46 -0700195 mDeviceExtensionsOwner.data());
Stan Iliev90276c82019-02-03 18:01:02 -0500196 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400197 bool hasKHRSwapchainExtension = false;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800198 for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
199 mDeviceExtensions.push_back(extension.extensionName);
200 if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400201 hasKHRSwapchainExtension = true;
202 }
203 }
Stan Iliev90276c82019-02-03 18:01:02 -0500204 LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400205 }
206
John Reck0fa0cbc2019-04-05 16:57:46 -0700207 auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
Greg Daniela227dbb2018-08-20 09:19:48 -0400208 if (device != VK_NULL_HANDLE) {
209 return vkGetDeviceProcAddr(device, proc_name);
210 }
211 return vkGetInstanceProcAddr(instance, proc_name);
212 };
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800213
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800214 grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
John Reck0fa0cbc2019-04-05 16:57:46 -0700215 mInstanceExtensions.data(), mDeviceExtensions.size(),
216 mDeviceExtensions.data());
Greg Daniela227dbb2018-08-20 09:19:48 -0400217
Stan Iliev90276c82019-02-03 18:01:02 -0500218 LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
Greg Daniel26e0dca2018-09-18 10:33:19 -0400219
Greg Daniela227dbb2018-08-20 09:19:48 -0400220 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
221 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
222 features.pNext = nullptr;
223
224 // Setup all extension feature structs we may want to use.
225 void** tailPNext = &features.pNext;
226
227 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
228 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
John Reck0fa0cbc2019-04-05 16:57:46 -0700229 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*)malloc(
Greg Daniela227dbb2018-08-20 09:19:48 -0400230 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
231 LOG_ALWAYS_FATAL_IF(!blend);
232 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
233 blend->pNext = nullptr;
234 *tailPNext = blend;
235 tailPNext = &blend->pNext;
236 }
237
Greg Daniel05036172018-11-28 17:08:04 -0500238 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
John Reck0fa0cbc2019-04-05 16:57:46 -0700239 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)malloc(
Greg Daniel05036172018-11-28 17:08:04 -0500240 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
241 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
242 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
243 ycbcrFeature->pNext = nullptr;
244 *tailPNext = ycbcrFeature;
245 tailPNext = &ycbcrFeature->pNext;
246
Greg Daniela227dbb2018-08-20 09:19:48 -0400247 // query to get the physical device features
248 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400249 // this looks like it would slow things down,
250 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400251 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400252
John Reck0fa0cbc2019-04-05 16:57:46 -0700253 float queuePriorities[1] = {0.0};
Greg Daniel2ff202712018-06-14 11:50:10 -0400254
Stan Iliev7e733362019-02-28 13:16:36 -0500255 void* queueNextPtr = nullptr;
256
257 VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
258
John Reck0fa0cbc2019-04-05 16:57:46 -0700259 if (Properties::contextPriority != 0 &&
260 grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
Stan Iliev7e733362019-02-28 13:16:36 -0500261 memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
262 queuePriorityCreateInfo.sType =
John Reck0fa0cbc2019-04-05 16:57:46 -0700263 VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
Stan Iliev7e733362019-02-28 13:16:36 -0500264 queuePriorityCreateInfo.pNext = nullptr;
265 switch (Properties::contextPriority) {
266 case EGL_CONTEXT_PRIORITY_LOW_IMG:
267 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
268 break;
269 case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
270 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
271 break;
272 case EGL_CONTEXT_PRIORITY_HIGH_IMG:
273 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
274 break;
275 default:
276 LOG_ALWAYS_FATAL("Unsupported context priority");
John Reck0fa0cbc2019-04-05 16:57:46 -0700277 }
278 queueNextPtr = &queuePriorityCreateInfo;
Stan Iliev7e733362019-02-28 13:16:36 -0500279 }
280
Greg Daniel2ff202712018-06-14 11:50:10 -0400281 const VkDeviceQueueCreateInfo queueInfo[2] = {
John Reck0fa0cbc2019-04-05 16:57:46 -0700282 {
283 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
284 queueNextPtr, // pNext
285 0, // VkDeviceQueueCreateFlags
286 mGraphicsQueueIndex, // queueFamilyIndex
287 1, // queueCount
288 queuePriorities, // pQueuePriorities
289 },
290 {
291 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
292 queueNextPtr, // pNext
293 0, // VkDeviceQueueCreateFlags
294 mPresentQueueIndex, // queueFamilyIndex
295 1, // queueCount
296 queuePriorities, // pQueuePriorities
297 }};
Greg Daniel2ff202712018-06-14 11:50:10 -0400298 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
299
300 const VkDeviceCreateInfo deviceInfo = {
John Reck0fa0cbc2019-04-05 16:57:46 -0700301 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
302 &features, // pNext
303 0, // VkDeviceCreateFlags
304 queueInfoCount, // queueCreateInfoCount
305 queueInfo, // pQueueCreateInfos
306 0, // layerCount
307 nullptr, // ppEnabledLayerNames
308 (uint32_t)mDeviceExtensions.size(), // extensionCount
309 mDeviceExtensions.data(), // ppEnabledExtensionNames
310 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400311 };
312
Stan Iliev90276c82019-02-03 18:01:02 -0500313 LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
Greg Daniel2ff202712018-06-14 11:50:10 -0400314
315 GET_DEV_PROC(GetDeviceQueue);
316 GET_DEV_PROC(DeviceWaitIdle);
317 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500318 GET_DEV_PROC(CreateCommandPool);
319 GET_DEV_PROC(DestroyCommandPool);
320 GET_DEV_PROC(AllocateCommandBuffers);
321 GET_DEV_PROC(FreeCommandBuffers);
322 GET_DEV_PROC(ResetCommandBuffer);
323 GET_DEV_PROC(BeginCommandBuffer);
324 GET_DEV_PROC(EndCommandBuffer);
325 GET_DEV_PROC(CmdPipelineBarrier);
326 GET_DEV_PROC(GetDeviceQueue);
327 GET_DEV_PROC(QueueSubmit);
328 GET_DEV_PROC(QueueWaitIdle);
329 GET_DEV_PROC(DeviceWaitIdle);
330 GET_DEV_PROC(CreateSemaphore);
331 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400332 GET_DEV_PROC(ImportSemaphoreFdKHR);
333 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500334 GET_DEV_PROC(CreateFence);
335 GET_DEV_PROC(DestroyFence);
336 GET_DEV_PROC(WaitForFences);
337 GET_DEV_PROC(ResetFences);
Greg Daniel2ff202712018-06-14 11:50:10 -0400338}
339
340void VulkanManager::initialize() {
341 if (mDevice != VK_NULL_HANDLE) {
342 return;
343 }
344
Greg Daniela227dbb2018-08-20 09:19:48 -0400345 GET_PROC(EnumerateInstanceVersion);
Greg Danieleaf310e2019-01-28 16:10:32 -0500346 uint32_t instanceVersion;
347 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
348 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniela227dbb2018-08-20 09:19:48 -0400349
Stan Iliev981afe72019-02-13 14:24:33 -0500350 this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400351
352 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
353
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500354 // create the command pool for the command buffers
355 if (VK_NULL_HANDLE == mCommandPool) {
356 VkCommandPoolCreateInfo commandPoolInfo;
357 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
358 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
359 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400360 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500361 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
John Reck0fa0cbc2019-04-05 16:57:46 -0700362 SkDEBUGCODE(VkResult res =)
363 mCreateCommandPool(mDevice, &commandPoolInfo, nullptr, &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500364 SkASSERT(VK_SUCCESS == res);
365 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400366 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
367
Greg Daniel2ff202712018-06-14 11:50:10 -0400368 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500369
Greg Danielcd558522016-11-17 13:31:40 -0500370 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
371 mSwapBehavior = SwapBehavior::BufferAge;
372 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500373}
374
Stan Iliev898123b2019-02-14 14:57:44 -0500375sk_sp<GrContext> VulkanManager::createContext(const GrContextOptions& options) {
John Reck0fa0cbc2019-04-05 16:57:46 -0700376 auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
Stan Iliev981afe72019-02-13 14:24:33 -0500377 if (device != VK_NULL_HANDLE) {
378 return vkGetDeviceProcAddr(device, proc_name);
379 }
380 return vkGetInstanceProcAddr(instance, proc_name);
381 };
382
383 GrVkBackendContext backendContext;
384 backendContext.fInstance = mInstance;
385 backendContext.fPhysicalDevice = mPhysicalDevice;
386 backendContext.fDevice = mDevice;
387 backendContext.fQueue = mGraphicsQueue;
388 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
389 backendContext.fMaxAPIVersion = mAPIVersion;
390 backendContext.fVkExtensions = &mExtensions;
391 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
392 backendContext.fGetProc = std::move(getProc);
393
394 return GrContext::MakeVulkan(backendContext, options);
395}
396
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800397VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
398 return VkFunctorInitParams{
399 .instance = mInstance,
400 .physical_device = mPhysicalDevice,
401 .device = mDevice,
402 .queue = mGraphicsQueue,
403 .graphics_queue_index = mGraphicsQueueIndex,
Greg Danieleaf310e2019-01-28 16:10:32 -0500404 .api_version = mAPIVersion,
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800405 .enabled_instance_extension_names = mInstanceExtensions.data(),
406 .enabled_instance_extension_names_length =
407 static_cast<uint32_t>(mInstanceExtensions.size()),
408 .enabled_device_extension_names = mDeviceExtensions.data(),
409 .enabled_device_extension_names_length =
410 static_cast<uint32_t>(mDeviceExtensions.size()),
411 .device_features_2 = &mPhysicalDeviceFeatures2,
412 };
413}
414
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500415Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) {
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500416 VulkanSurface::NativeBufferInfo* bufferInfo = surface->dequeueNativeBuffer();
417
418 if (bufferInfo == nullptr) {
419 ALOGE("VulkanSurface::dequeueNativeBuffer called with an invalid surface!");
420 return Frame(-1, -1, 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500421 }
422
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500423 LOG_ALWAYS_FATAL_IF(!bufferInfo->dequeued);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500424
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500425 if (bufferInfo->dequeue_fence != -1) {
Stan Iliev197843d2019-03-21 11:34:15 -0400426 struct sync_file_info* finfo = sync_file_info(bufferInfo->dequeue_fence);
427 bool isSignalPending = false;
428 if (finfo != NULL) {
429 isSignalPending = finfo->status != 1;
430 sync_file_info_free(finfo);
431 }
432 if (isSignalPending) {
433 int fence_clone = dup(bufferInfo->dequeue_fence);
434 if (fence_clone == -1) {
435 ALOGE("dup(fence) failed, stalling until signalled: %s (%d)", strerror(errno),
436 errno);
437 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
438 } else {
439 VkSemaphoreCreateInfo semaphoreInfo;
440 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
441 semaphoreInfo.pNext = nullptr;
442 semaphoreInfo.flags = 0;
443 VkSemaphore semaphore;
444 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
445 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err, "Failed to create import semaphore, err: %d",
446 err);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500447
Stan Iliev197843d2019-03-21 11:34:15 -0400448 VkImportSemaphoreFdInfoKHR importInfo;
449 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
450 importInfo.pNext = nullptr;
451 importInfo.semaphore = semaphore;
452 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
453 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
454 importInfo.fd = fence_clone;
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500455
Stan Iliev197843d2019-03-21 11:34:15 -0400456 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
457 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err, "Failed to import semaphore, err: %d", err);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500458
Stan Iliev197843d2019-03-21 11:34:15 -0400459 GrBackendSemaphore backendSemaphore;
460 backendSemaphore.initVulkan(semaphore);
461 bufferInfo->skSurface->wait(1, &backendSemaphore);
462 // The following flush blocks the GPU immediately instead of waiting for other
463 // drawing ops. It seems dequeue_fence is not respected otherwise.
John Reck0fa0cbc2019-04-05 16:57:46 -0700464 // TODO: remove the flush after finding why backendSemaphore is not working.
Stan Iliev197843d2019-03-21 11:34:15 -0400465 bufferInfo->skSurface->flush();
466 }
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500467 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500468 }
469
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500470 int bufferAge = (mSwapBehavior == SwapBehavior::Discard) ? 0 : surface->getCurrentBuffersAge();
471 return Frame(surface->logicalWidth(), surface->logicalHeight(), bufferAge);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500472}
473
Greg Danield92a9b12019-04-23 10:11:04 -0400474struct DestroySemaphoreInfo {
475 PFN_vkDestroySemaphore mDestroyFunction;
476 VkDevice mDevice;
477 VkSemaphore mSemaphore;
Greg Danielfd429392019-05-09 15:44:56 -0400478 // We need to make sure we don't delete the VkSemaphore until it is done being used by both Skia
479 // (including by the GPU) and inside the VulkanManager. So we always start with two refs, one
480 // owned by Skia and one owned by the VulkanManager. The refs are decremented each time
481 // destroy_semaphore is called with this object. Skia will call destroy_semaphore once it is
482 // done with the semaphore and the GPU has finished work on the semaphore. The VulkanManager
483 // calls destroy_semaphore after sending the semaphore to Skia and exporting it if need be.
484 int mRefs = 2;
Greg Danield92a9b12019-04-23 10:11:04 -0400485
486 DestroySemaphoreInfo(PFN_vkDestroySemaphore destroyFunction, VkDevice device,
487 VkSemaphore semaphore)
488 : mDestroyFunction(destroyFunction), mDevice(device), mSemaphore(semaphore) {}
489};
490
491static void destroy_semaphore(void* context) {
492 DestroySemaphoreInfo* info = reinterpret_cast<DestroySemaphoreInfo*>(context);
Greg Danielfd429392019-05-09 15:44:56 -0400493 --info->mRefs;
494 if (!info->mRefs) {
495 info->mDestroyFunction(info->mDevice, info->mSemaphore, nullptr);
496 delete info;
497 }
Greg Danield92a9b12019-04-23 10:11:04 -0400498}
499
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500500void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) {
501 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
502 ATRACE_NAME("Finishing GPU work");
503 mDeviceWaitIdle(mDevice);
Stan Iliev305e13a2018-11-13 11:14:48 -0500504 }
505
Stan Ilievbc5f06b2019-03-26 15:14:34 -0400506 VulkanSurface::NativeBufferInfo* bufferInfo = surface->getCurrentBufferInfo();
507 if (!bufferInfo) {
508 // If VulkanSurface::dequeueNativeBuffer failed earlier, then swapBuffers is a no-op.
509 return;
510 }
511
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500512 VkExportSemaphoreCreateInfo exportInfo;
513 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
514 exportInfo.pNext = nullptr;
515 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500516
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500517 VkSemaphoreCreateInfo semaphoreInfo;
518 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
519 semaphoreInfo.pNext = &exportInfo;
520 semaphoreInfo.flags = 0;
521 VkSemaphore semaphore;
522 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
523 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to create semaphore");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500524
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500525 GrBackendSemaphore backendSemaphore;
526 backendSemaphore.initVulkan(semaphore);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500527
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500528 int fenceFd = -1;
Greg Danield92a9b12019-04-23 10:11:04 -0400529 DestroySemaphoreInfo* destroyInfo = new DestroySemaphoreInfo(mDestroySemaphore, mDevice,
530 semaphore);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500531 GrSemaphoresSubmitted submitted =
532 bufferInfo->skSurface->flush(SkSurface::BackendSurfaceAccess::kPresent,
Greg Danield92a9b12019-04-23 10:11:04 -0400533 kNone_GrFlushFlags, 1, &backendSemaphore,
534 destroy_semaphore, destroyInfo);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500535 if (submitted == GrSemaphoresSubmitted::kYes) {
536 VkSemaphoreGetFdInfoKHR getFdInfo;
537 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
538 getFdInfo.pNext = nullptr;
539 getFdInfo.semaphore = semaphore;
540 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500541
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500542 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
543 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd");
544 } else {
545 ALOGE("VulkanManager::swapBuffers(): Semaphore submission failed");
546 mQueueWaitIdle(mGraphicsQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500547 }
Greg Danielfd429392019-05-09 15:44:56 -0400548 destroy_semaphore(destroyInfo);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500549
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500550 surface->presentCurrentBuffer(dirtyRect, fenceFd);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500551}
552
553void VulkanManager::destroySurface(VulkanSurface* surface) {
554 // Make sure all submit commands have finished before starting to destroy objects.
555 if (VK_NULL_HANDLE != mPresentQueue) {
556 mQueueWaitIdle(mPresentQueue);
557 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400558 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500559
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500560 delete surface;
561}
562
Stan Iliev987a80c02018-12-04 10:07:21 -0500563VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800564 sk_sp<SkColorSpace> surfaceColorSpace,
John Reck0fa0cbc2019-04-05 16:57:46 -0700565 SkColorType surfaceColorType, GrContext* grContext,
566 uint32_t extraBuffers) {
Stan Iliev981afe72019-02-13 14:24:33 -0500567 LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500568 if (!window) {
569 return nullptr;
570 }
571
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500572 return VulkanSurface::Create(window, colorMode, surfaceColorType, surfaceColorSpace, grContext,
John Reck0fa0cbc2019-04-05 16:57:46 -0700573 *this, extraBuffers);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500574}
575
Greg Danield92a9b12019-04-23 10:11:04 -0400576status_t VulkanManager::fenceWait(sp<Fence>& fence, GrContext* grContext) {
Greg Daniel26e0dca2018-09-18 10:33:19 -0400577 if (!hasVkContext()) {
578 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
579 return INVALID_OPERATION;
580 }
581
Stan Iliev7a081272018-10-26 17:54:18 -0400582 // Block GPU on the fence.
583 int fenceFd = fence->dup();
584 if (fenceFd == -1) {
585 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
586 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +0000587 }
Stan Iliev7a081272018-10-26 17:54:18 -0400588
589 VkSemaphoreCreateInfo semaphoreInfo;
590 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
591 semaphoreInfo.pNext = nullptr;
592 semaphoreInfo.flags = 0;
593 VkSemaphore semaphore;
594 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
595 if (VK_SUCCESS != err) {
596 ALOGE("Failed to create import semaphore, err: %d", err);
597 return UNKNOWN_ERROR;
598 }
599 VkImportSemaphoreFdInfoKHR importInfo;
600 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
601 importInfo.pNext = nullptr;
602 importInfo.semaphore = semaphore;
603 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
604 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
605 importInfo.fd = fenceFd;
606
607 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
608 if (VK_SUCCESS != err) {
Greg Danield92a9b12019-04-23 10:11:04 -0400609 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev7a081272018-10-26 17:54:18 -0400610 ALOGE("Failed to import semaphore, err: %d", err);
611 return UNKNOWN_ERROR;
612 }
613
Greg Danield92a9b12019-04-23 10:11:04 -0400614 GrBackendSemaphore beSemaphore;
615 beSemaphore.initVulkan(semaphore);
Stan Iliev7a081272018-10-26 17:54:18 -0400616
Greg Danield92a9b12019-04-23 10:11:04 -0400617 // Skia takes ownership of the semaphore and will delete it once the wait has finished.
618 grContext->wait(1, &beSemaphore);
619 grContext->flush();
Stan Iliev7a081272018-10-26 17:54:18 -0400620
Stan Iliev564ca3e2018-09-04 22:00:00 +0000621 return OK;
622}
623
Greg Danield92a9b12019-04-23 10:11:04 -0400624status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence, GrContext* grContext) {
Greg Daniel26e0dca2018-09-18 10:33:19 -0400625 if (!hasVkContext()) {
626 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
627 return INVALID_OPERATION;
628 }
629
Greg Daniel26e0dca2018-09-18 10:33:19 -0400630 VkExportSemaphoreCreateInfo exportInfo;
631 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
632 exportInfo.pNext = nullptr;
633 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
634
635 VkSemaphoreCreateInfo semaphoreInfo;
636 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
637 semaphoreInfo.pNext = &exportInfo;
638 semaphoreInfo.flags = 0;
639 VkSemaphore semaphore;
640 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
641 if (VK_SUCCESS != err) {
642 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
643 return INVALID_OPERATION;
644 }
645
Greg Danield92a9b12019-04-23 10:11:04 -0400646 GrBackendSemaphore backendSemaphore;
647 backendSemaphore.initVulkan(semaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400648
Greg Danield92a9b12019-04-23 10:11:04 -0400649 DestroySemaphoreInfo* destroyInfo = new DestroySemaphoreInfo(mDestroySemaphore, mDevice,
650 semaphore);
Greg Danielfd429392019-05-09 15:44:56 -0400651 // Even if Skia fails to submit the semaphore, it will still call the destroy_semaphore callback
652 // which will remove its ref to the semaphore. The VulkanManager must still release its ref,
653 // when it is done with the semaphore.
Greg Danield92a9b12019-04-23 10:11:04 -0400654 GrSemaphoresSubmitted submitted =
655 grContext->flush(kNone_GrFlushFlags, 1, &backendSemaphore,
656 destroy_semaphore, destroyInfo);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400657
Greg Danield92a9b12019-04-23 10:11:04 -0400658 if (submitted == GrSemaphoresSubmitted::kNo) {
659 ALOGE("VulkanManager::createReleaseFence: Failed to submit semaphore");
Greg Danielfd429392019-05-09 15:44:56 -0400660 destroy_semaphore(destroyInfo);
Greg Danield92a9b12019-04-23 10:11:04 -0400661 return INVALID_OPERATION;
662 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400663
664 VkSemaphoreGetFdInfoKHR getFdInfo;
665 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
666 getFdInfo.pNext = nullptr;
667 getFdInfo.semaphore = semaphore;
668 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
669
670 int fenceFd = 0;
671
672 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
Greg Danielfd429392019-05-09 15:44:56 -0400673 destroy_semaphore(destroyInfo);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400674 if (VK_SUCCESS != err) {
675 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
676 return INVALID_OPERATION;
677 }
678 nativeFence = new Fence(fenceFd);
679
Stan Iliev564ca3e2018-09-04 22:00:00 +0000680 return OK;
681}
682
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500683} /* namespace renderthread */
684} /* namespace uirenderer */
685} /* namespace android */