blob: 280f7d3489d2ec38a3a675fb792288baca3be0cb [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Derek Sollenbergera19b71a2019-02-15 16:36:30 -050019#include <android/sync.h>
Stan Iliev305e13a2018-11-13 11:14:48 -050020#include <gui/Surface.h>
21
Greg Danielcd558522016-11-17 13:31:40 -050022#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050023#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050024#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050025#include "utils/FatVector.h"
John Reck322b8ab2019-03-14 13:15:28 -070026#include "utils/TraceUtils.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027
Derek Sollenbergera19b71a2019-02-15 16:36:30 -050028#include <GrBackendSemaphore.h>
Greg Danielac2d2322017-07-12 11:30:15 -040029#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050030#include <GrContext.h>
31#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040032#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050033#include <vk/GrVkTypes.h>
34
35namespace android {
36namespace uirenderer {
37namespace renderthread {
38
Bo Liu7b8c1eb2019-01-08 20:17:55 -080039static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
40 // All Vulkan structs that could be part of the features chain will start with the
41 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
42 // so we can get access to the pNext for the next struct.
43 struct CommonVulkanHeader {
44 VkStructureType sType;
John Reck0fa0cbc2019-04-05 16:57:46 -070045 void* pNext;
Bo Liu7b8c1eb2019-01-08 20:17:55 -080046 };
47
48 void* pNext = features.pNext;
49 while (pNext) {
50 void* current = pNext;
51 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
52 free(current);
53 }
54}
55
Greg Daniel2ff202712018-06-14 11:50:10 -040056#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
57#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
58#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050060void VulkanManager::destroy() {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050061 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040062 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050063 mCommandPool = VK_NULL_HANDLE;
64 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050065
Greg Daniel2ff202712018-06-14 11:50:10 -040066 if (mDevice != VK_NULL_HANDLE) {
67 mDeviceWaitIdle(mDevice);
68 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070069 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050070
Greg Daniel2ff202712018-06-14 11:50:10 -040071 if (mInstance != VK_NULL_HANDLE) {
72 mDestroyInstance(mInstance, nullptr);
73 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050074
Greg Daniel2ff202712018-06-14 11:50:10 -040075 mGraphicsQueue = VK_NULL_HANDLE;
76 mPresentQueue = VK_NULL_HANDLE;
77 mDevice = VK_NULL_HANDLE;
78 mPhysicalDevice = VK_NULL_HANDLE;
79 mInstance = VK_NULL_HANDLE;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -080080 mInstanceExtensionsOwner.clear();
Bo Liu7b8c1eb2019-01-08 20:17:55 -080081 mInstanceExtensions.clear();
Roman Kiryanov74ace839e2019-03-07 18:22:19 -080082 mDeviceExtensionsOwner.clear();
Bo Liu7b8c1eb2019-01-08 20:17:55 -080083 mDeviceExtensions.clear();
84 free_features_extensions_structs(mPhysicalDeviceFeatures2);
85 mPhysicalDeviceFeatures2 = {};
Greg Daniel2ff202712018-06-14 11:50:10 -040086}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050087
Stan Iliev90276c82019-02-03 18:01:02 -050088void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040089 VkResult err;
90
91 constexpr VkApplicationInfo app_info = {
John Reck0fa0cbc2019-04-05 16:57:46 -070092 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
93 nullptr, // pNext
94 "android framework", // pApplicationName
95 0, // applicationVersion
96 "android framework", // pEngineName
97 0, // engineVerison
98 mAPIVersion, // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -040099 };
100
Greg Daniel2ff202712018-06-14 11:50:10 -0400101 {
102 GET_PROC(EnumerateInstanceExtensionProperties);
103
104 uint32_t extensionCount = 0;
105 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500106 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800107 mInstanceExtensionsOwner.resize(extensionCount);
108 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
109 mInstanceExtensionsOwner.data());
Stan Iliev90276c82019-02-03 18:01:02 -0500110 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400111 bool hasKHRSurfaceExtension = false;
112 bool hasKHRAndroidSurfaceExtension = false;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800113 for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
114 mInstanceExtensions.push_back(extension.extensionName);
115 if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400116 hasKHRSurfaceExtension = true;
117 }
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800118 if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400119 hasKHRAndroidSurfaceExtension = true;
120 }
121 }
Stan Iliev90276c82019-02-03 18:01:02 -0500122 LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400123 }
124
125 const VkInstanceCreateInfo instance_create = {
John Reck0fa0cbc2019-04-05 16:57:46 -0700126 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
127 nullptr, // pNext
128 0, // flags
129 &app_info, // pApplicationInfo
130 0, // enabledLayerNameCount
131 nullptr, // ppEnabledLayerNames
132 (uint32_t)mInstanceExtensions.size(), // enabledExtensionNameCount
133 mInstanceExtensions.data(), // ppEnabledExtensionNames
Greg Daniel2ff202712018-06-14 11:50:10 -0400134 };
135
136 GET_PROC(CreateInstance);
137 err = mCreateInstance(&instance_create, nullptr, &mInstance);
Stan Iliev90276c82019-02-03 18:01:02 -0500138 LOG_ALWAYS_FATAL_IF(err < 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400139
140 GET_INST_PROC(DestroyInstance);
141 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400142 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400143 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400144 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500145 GET_INST_PROC(GetPhysicalDeviceImageFormatProperties2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400146 GET_INST_PROC(CreateDevice);
147 GET_INST_PROC(EnumerateDeviceExtensionProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400148
149 uint32_t gpuCount;
Stan Iliev90276c82019-02-03 18:01:02 -0500150 LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
151 LOG_ALWAYS_FATAL_IF(!gpuCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400152 // Just returning the first physical device instead of getting the whole array. Since there
153 // should only be one device on android.
154 gpuCount = 1;
155 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
156 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
Stan Iliev90276c82019-02-03 18:01:02 -0500157 LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400158
Greg Daniel96259622018-10-01 14:42:56 -0400159 VkPhysicalDeviceProperties physDeviceProperties;
160 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
Stan Iliev90276c82019-02-03 18:01:02 -0500161 LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
Stan Ilievbf99c442019-03-29 11:09:11 -0400162 mDriverVersion = physDeviceProperties.driverVersion;
Greg Daniel96259622018-10-01 14:42:56 -0400163
Greg Daniel2ff202712018-06-14 11:50:10 -0400164 // query to get the initial queue props size
165 uint32_t queueCount;
166 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500167 LOG_ALWAYS_FATAL_IF(!queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400168
169 // now get the actual queue props
170 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
171 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
172
173 // iterate to find the graphics queue
174 mGraphicsQueueIndex = queueCount;
175 for (uint32_t i = 0; i < queueCount; i++) {
176 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
177 mGraphicsQueueIndex = i;
178 break;
179 }
180 }
Stan Iliev90276c82019-02-03 18:01:02 -0500181 LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400182
183 // All physical devices and queue families on Android must be capable of
184 // presentation with any native window. So just use the first one.
185 mPresentQueueIndex = 0;
186
Greg Daniel2ff202712018-06-14 11:50:10 -0400187 {
188 uint32_t extensionCount = 0;
189 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
John Reck0fa0cbc2019-04-05 16:57:46 -0700190 nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500191 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800192 mDeviceExtensionsOwner.resize(extensionCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400193 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
John Reck0fa0cbc2019-04-05 16:57:46 -0700194 mDeviceExtensionsOwner.data());
Stan Iliev90276c82019-02-03 18:01:02 -0500195 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400196 bool hasKHRSwapchainExtension = false;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800197 for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
198 mDeviceExtensions.push_back(extension.extensionName);
199 if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400200 hasKHRSwapchainExtension = true;
201 }
202 }
Stan Iliev90276c82019-02-03 18:01:02 -0500203 LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400204 }
205
John Reck0fa0cbc2019-04-05 16:57:46 -0700206 auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
Greg Daniela227dbb2018-08-20 09:19:48 -0400207 if (device != VK_NULL_HANDLE) {
208 return vkGetDeviceProcAddr(device, proc_name);
209 }
210 return vkGetInstanceProcAddr(instance, proc_name);
211 };
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800212
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800213 grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
John Reck0fa0cbc2019-04-05 16:57:46 -0700214 mInstanceExtensions.data(), mDeviceExtensions.size(),
215 mDeviceExtensions.data());
Greg Daniela227dbb2018-08-20 09:19:48 -0400216
Stan Iliev90276c82019-02-03 18:01:02 -0500217 LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
Greg Daniel26e0dca2018-09-18 10:33:19 -0400218
Greg Daniela227dbb2018-08-20 09:19:48 -0400219 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
220 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
221 features.pNext = nullptr;
222
223 // Setup all extension feature structs we may want to use.
224 void** tailPNext = &features.pNext;
225
226 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
227 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
John Reck0fa0cbc2019-04-05 16:57:46 -0700228 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*)malloc(
Greg Daniela227dbb2018-08-20 09:19:48 -0400229 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
230 LOG_ALWAYS_FATAL_IF(!blend);
231 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
232 blend->pNext = nullptr;
233 *tailPNext = blend;
234 tailPNext = &blend->pNext;
235 }
236
Greg Daniel05036172018-11-28 17:08:04 -0500237 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
John Reck0fa0cbc2019-04-05 16:57:46 -0700238 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)malloc(
Greg Daniel05036172018-11-28 17:08:04 -0500239 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
240 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
241 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
242 ycbcrFeature->pNext = nullptr;
243 *tailPNext = ycbcrFeature;
244 tailPNext = &ycbcrFeature->pNext;
245
Greg Daniela227dbb2018-08-20 09:19:48 -0400246 // query to get the physical device features
247 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400248 // this looks like it would slow things down,
249 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400250 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400251
John Reck0fa0cbc2019-04-05 16:57:46 -0700252 float queuePriorities[1] = {0.0};
Greg Daniel2ff202712018-06-14 11:50:10 -0400253
Stan Iliev7e733362019-02-28 13:16:36 -0500254 void* queueNextPtr = nullptr;
255
256 VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
257
John Reck0fa0cbc2019-04-05 16:57:46 -0700258 if (Properties::contextPriority != 0 &&
259 grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
Stan Iliev7e733362019-02-28 13:16:36 -0500260 memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
261 queuePriorityCreateInfo.sType =
John Reck0fa0cbc2019-04-05 16:57:46 -0700262 VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
Stan Iliev7e733362019-02-28 13:16:36 -0500263 queuePriorityCreateInfo.pNext = nullptr;
264 switch (Properties::contextPriority) {
265 case EGL_CONTEXT_PRIORITY_LOW_IMG:
266 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
267 break;
268 case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
269 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
270 break;
271 case EGL_CONTEXT_PRIORITY_HIGH_IMG:
272 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
273 break;
274 default:
275 LOG_ALWAYS_FATAL("Unsupported context priority");
John Reck0fa0cbc2019-04-05 16:57:46 -0700276 }
277 queueNextPtr = &queuePriorityCreateInfo;
Stan Iliev7e733362019-02-28 13:16:36 -0500278 }
279
Greg Daniel2ff202712018-06-14 11:50:10 -0400280 const VkDeviceQueueCreateInfo queueInfo[2] = {
John Reck0fa0cbc2019-04-05 16:57:46 -0700281 {
282 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
283 queueNextPtr, // pNext
284 0, // VkDeviceQueueCreateFlags
285 mGraphicsQueueIndex, // queueFamilyIndex
286 1, // queueCount
287 queuePriorities, // pQueuePriorities
288 },
289 {
290 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
291 queueNextPtr, // pNext
292 0, // VkDeviceQueueCreateFlags
293 mPresentQueueIndex, // queueFamilyIndex
294 1, // queueCount
295 queuePriorities, // pQueuePriorities
296 }};
Greg Daniel2ff202712018-06-14 11:50:10 -0400297 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
298
299 const VkDeviceCreateInfo deviceInfo = {
John Reck0fa0cbc2019-04-05 16:57:46 -0700300 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
301 &features, // pNext
302 0, // VkDeviceCreateFlags
303 queueInfoCount, // queueCreateInfoCount
304 queueInfo, // pQueueCreateInfos
305 0, // layerCount
306 nullptr, // ppEnabledLayerNames
307 (uint32_t)mDeviceExtensions.size(), // extensionCount
308 mDeviceExtensions.data(), // ppEnabledExtensionNames
309 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400310 };
311
Stan Iliev90276c82019-02-03 18:01:02 -0500312 LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
Greg Daniel2ff202712018-06-14 11:50:10 -0400313
314 GET_DEV_PROC(GetDeviceQueue);
315 GET_DEV_PROC(DeviceWaitIdle);
316 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500317 GET_DEV_PROC(CreateCommandPool);
318 GET_DEV_PROC(DestroyCommandPool);
319 GET_DEV_PROC(AllocateCommandBuffers);
320 GET_DEV_PROC(FreeCommandBuffers);
321 GET_DEV_PROC(ResetCommandBuffer);
322 GET_DEV_PROC(BeginCommandBuffer);
323 GET_DEV_PROC(EndCommandBuffer);
324 GET_DEV_PROC(CmdPipelineBarrier);
325 GET_DEV_PROC(GetDeviceQueue);
326 GET_DEV_PROC(QueueSubmit);
327 GET_DEV_PROC(QueueWaitIdle);
328 GET_DEV_PROC(DeviceWaitIdle);
329 GET_DEV_PROC(CreateSemaphore);
330 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400331 GET_DEV_PROC(ImportSemaphoreFdKHR);
332 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500333 GET_DEV_PROC(CreateFence);
334 GET_DEV_PROC(DestroyFence);
335 GET_DEV_PROC(WaitForFences);
336 GET_DEV_PROC(ResetFences);
Greg Daniel2ff202712018-06-14 11:50:10 -0400337}
338
339void VulkanManager::initialize() {
340 if (mDevice != VK_NULL_HANDLE) {
341 return;
342 }
343
Greg Daniela227dbb2018-08-20 09:19:48 -0400344 GET_PROC(EnumerateInstanceVersion);
Greg Danieleaf310e2019-01-28 16:10:32 -0500345 uint32_t instanceVersion;
346 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
347 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniela227dbb2018-08-20 09:19:48 -0400348
Stan Iliev981afe72019-02-13 14:24:33 -0500349 this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400350
351 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
352
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500353 // create the command pool for the command buffers
354 if (VK_NULL_HANDLE == mCommandPool) {
355 VkCommandPoolCreateInfo commandPoolInfo;
356 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
357 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
358 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400359 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500360 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
John Reck0fa0cbc2019-04-05 16:57:46 -0700361 SkDEBUGCODE(VkResult res =)
362 mCreateCommandPool(mDevice, &commandPoolInfo, nullptr, &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500363 SkASSERT(VK_SUCCESS == res);
364 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400365 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
366
Greg Daniel2ff202712018-06-14 11:50:10 -0400367 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500368
Greg Danielcd558522016-11-17 13:31:40 -0500369 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
370 mSwapBehavior = SwapBehavior::BufferAge;
371 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500372}
373
Stan Iliev898123b2019-02-14 14:57:44 -0500374sk_sp<GrContext> VulkanManager::createContext(const GrContextOptions& options) {
John Reck0fa0cbc2019-04-05 16:57:46 -0700375 auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
Stan Iliev981afe72019-02-13 14:24:33 -0500376 if (device != VK_NULL_HANDLE) {
377 return vkGetDeviceProcAddr(device, proc_name);
378 }
379 return vkGetInstanceProcAddr(instance, proc_name);
380 };
381
382 GrVkBackendContext backendContext;
383 backendContext.fInstance = mInstance;
384 backendContext.fPhysicalDevice = mPhysicalDevice;
385 backendContext.fDevice = mDevice;
386 backendContext.fQueue = mGraphicsQueue;
387 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
388 backendContext.fMaxAPIVersion = mAPIVersion;
389 backendContext.fVkExtensions = &mExtensions;
390 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
391 backendContext.fGetProc = std::move(getProc);
392
393 return GrContext::MakeVulkan(backendContext, options);
394}
395
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800396VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
397 return VkFunctorInitParams{
398 .instance = mInstance,
399 .physical_device = mPhysicalDevice,
400 .device = mDevice,
401 .queue = mGraphicsQueue,
402 .graphics_queue_index = mGraphicsQueueIndex,
Greg Danieleaf310e2019-01-28 16:10:32 -0500403 .api_version = mAPIVersion,
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800404 .enabled_instance_extension_names = mInstanceExtensions.data(),
405 .enabled_instance_extension_names_length =
406 static_cast<uint32_t>(mInstanceExtensions.size()),
407 .enabled_device_extension_names = mDeviceExtensions.data(),
408 .enabled_device_extension_names_length =
409 static_cast<uint32_t>(mDeviceExtensions.size()),
410 .device_features_2 = &mPhysicalDeviceFeatures2,
411 };
412}
413
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500414Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) {
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500415 VulkanSurface::NativeBufferInfo* bufferInfo = surface->dequeueNativeBuffer();
416
417 if (bufferInfo == nullptr) {
418 ALOGE("VulkanSurface::dequeueNativeBuffer called with an invalid surface!");
419 return Frame(-1, -1, 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500420 }
421
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500422 LOG_ALWAYS_FATAL_IF(!bufferInfo->dequeued);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500423
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500424 if (bufferInfo->dequeue_fence != -1) {
Stan Iliev197843d2019-03-21 11:34:15 -0400425 struct sync_file_info* finfo = sync_file_info(bufferInfo->dequeue_fence);
426 bool isSignalPending = false;
427 if (finfo != NULL) {
428 isSignalPending = finfo->status != 1;
429 sync_file_info_free(finfo);
430 }
431 if (isSignalPending) {
432 int fence_clone = dup(bufferInfo->dequeue_fence);
433 if (fence_clone == -1) {
434 ALOGE("dup(fence) failed, stalling until signalled: %s (%d)", strerror(errno),
435 errno);
436 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
437 } else {
438 VkSemaphoreCreateInfo semaphoreInfo;
439 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
440 semaphoreInfo.pNext = nullptr;
441 semaphoreInfo.flags = 0;
442 VkSemaphore semaphore;
443 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
444 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err, "Failed to create import semaphore, err: %d",
445 err);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500446
Stan Iliev197843d2019-03-21 11:34:15 -0400447 VkImportSemaphoreFdInfoKHR importInfo;
448 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
449 importInfo.pNext = nullptr;
450 importInfo.semaphore = semaphore;
451 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
452 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
453 importInfo.fd = fence_clone;
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500454
Stan Iliev197843d2019-03-21 11:34:15 -0400455 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
456 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err, "Failed to import semaphore, err: %d", err);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500457
Stan Iliev197843d2019-03-21 11:34:15 -0400458 GrBackendSemaphore backendSemaphore;
459 backendSemaphore.initVulkan(semaphore);
460 bufferInfo->skSurface->wait(1, &backendSemaphore);
461 // The following flush blocks the GPU immediately instead of waiting for other
462 // drawing ops. It seems dequeue_fence is not respected otherwise.
John Reck0fa0cbc2019-04-05 16:57:46 -0700463 // TODO: remove the flush after finding why backendSemaphore is not working.
Stan Iliev197843d2019-03-21 11:34:15 -0400464 bufferInfo->skSurface->flush();
465 }
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500466 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500467 }
468
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500469 int bufferAge = (mSwapBehavior == SwapBehavior::Discard) ? 0 : surface->getCurrentBuffersAge();
470 return Frame(surface->logicalWidth(), surface->logicalHeight(), bufferAge);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500471}
472
Greg Danield92a9b12019-04-23 10:11:04 -0400473struct DestroySemaphoreInfo {
474 PFN_vkDestroySemaphore mDestroyFunction;
475 VkDevice mDevice;
476 VkSemaphore mSemaphore;
Greg Danielfd429392019-05-09 15:44:56 -0400477 // We need to make sure we don't delete the VkSemaphore until it is done being used by both Skia
478 // (including by the GPU) and inside the VulkanManager. So we always start with two refs, one
479 // owned by Skia and one owned by the VulkanManager. The refs are decremented each time
480 // destroy_semaphore is called with this object. Skia will call destroy_semaphore once it is
481 // done with the semaphore and the GPU has finished work on the semaphore. The VulkanManager
482 // calls destroy_semaphore after sending the semaphore to Skia and exporting it if need be.
483 int mRefs = 2;
Greg Danield92a9b12019-04-23 10:11:04 -0400484
485 DestroySemaphoreInfo(PFN_vkDestroySemaphore destroyFunction, VkDevice device,
486 VkSemaphore semaphore)
487 : mDestroyFunction(destroyFunction), mDevice(device), mSemaphore(semaphore) {}
488};
489
490static void destroy_semaphore(void* context) {
491 DestroySemaphoreInfo* info = reinterpret_cast<DestroySemaphoreInfo*>(context);
Greg Danielfd429392019-05-09 15:44:56 -0400492 --info->mRefs;
493 if (!info->mRefs) {
494 info->mDestroyFunction(info->mDevice, info->mSemaphore, nullptr);
495 delete info;
496 }
Greg Danield92a9b12019-04-23 10:11:04 -0400497}
498
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500499void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) {
500 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
501 ATRACE_NAME("Finishing GPU work");
502 mDeviceWaitIdle(mDevice);
Stan Iliev305e13a2018-11-13 11:14:48 -0500503 }
504
Stan Ilievbc5f06b2019-03-26 15:14:34 -0400505 VulkanSurface::NativeBufferInfo* bufferInfo = surface->getCurrentBufferInfo();
506 if (!bufferInfo) {
507 // If VulkanSurface::dequeueNativeBuffer failed earlier, then swapBuffers is a no-op.
508 return;
509 }
510
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500511 VkExportSemaphoreCreateInfo exportInfo;
512 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
513 exportInfo.pNext = nullptr;
514 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500515
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500516 VkSemaphoreCreateInfo semaphoreInfo;
517 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
518 semaphoreInfo.pNext = &exportInfo;
519 semaphoreInfo.flags = 0;
520 VkSemaphore semaphore;
521 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
522 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to create semaphore");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500523
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500524 GrBackendSemaphore backendSemaphore;
525 backendSemaphore.initVulkan(semaphore);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500526
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500527 int fenceFd = -1;
Greg Danield92a9b12019-04-23 10:11:04 -0400528 DestroySemaphoreInfo* destroyInfo = new DestroySemaphoreInfo(mDestroySemaphore, mDevice,
529 semaphore);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500530 GrSemaphoresSubmitted submitted =
531 bufferInfo->skSurface->flush(SkSurface::BackendSurfaceAccess::kPresent,
Greg Danield92a9b12019-04-23 10:11:04 -0400532 kNone_GrFlushFlags, 1, &backendSemaphore,
533 destroy_semaphore, destroyInfo);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500534 if (submitted == GrSemaphoresSubmitted::kYes) {
535 VkSemaphoreGetFdInfoKHR getFdInfo;
536 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
537 getFdInfo.pNext = nullptr;
538 getFdInfo.semaphore = semaphore;
539 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500540
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500541 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
542 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd");
543 } else {
544 ALOGE("VulkanManager::swapBuffers(): Semaphore submission failed");
545 mQueueWaitIdle(mGraphicsQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500546 }
Greg Danielfd429392019-05-09 15:44:56 -0400547 destroy_semaphore(destroyInfo);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500548
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500549 surface->presentCurrentBuffer(dirtyRect, fenceFd);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500550}
551
552void VulkanManager::destroySurface(VulkanSurface* surface) {
553 // Make sure all submit commands have finished before starting to destroy objects.
554 if (VK_NULL_HANDLE != mPresentQueue) {
555 mQueueWaitIdle(mPresentQueue);
556 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400557 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500558
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500559 delete surface;
560}
561
Stan Iliev987a80c02018-12-04 10:07:21 -0500562VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800563 sk_sp<SkColorSpace> surfaceColorSpace,
John Reck0fa0cbc2019-04-05 16:57:46 -0700564 SkColorType surfaceColorType, GrContext* grContext,
565 uint32_t extraBuffers) {
Stan Iliev981afe72019-02-13 14:24:33 -0500566 LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500567 if (!window) {
568 return nullptr;
569 }
570
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500571 return VulkanSurface::Create(window, colorMode, surfaceColorType, surfaceColorSpace, grContext,
John Reck0fa0cbc2019-04-05 16:57:46 -0700572 *this, extraBuffers);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500573}
574
Greg Danield92a9b12019-04-23 10:11:04 -0400575status_t VulkanManager::fenceWait(sp<Fence>& fence, GrContext* grContext) {
Greg Daniel26e0dca2018-09-18 10:33:19 -0400576 if (!hasVkContext()) {
577 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
578 return INVALID_OPERATION;
579 }
580
Stan Iliev7a081272018-10-26 17:54:18 -0400581 // Block GPU on the fence.
582 int fenceFd = fence->dup();
583 if (fenceFd == -1) {
584 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
585 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +0000586 }
Stan Iliev7a081272018-10-26 17:54:18 -0400587
588 VkSemaphoreCreateInfo semaphoreInfo;
589 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
590 semaphoreInfo.pNext = nullptr;
591 semaphoreInfo.flags = 0;
592 VkSemaphore semaphore;
593 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
594 if (VK_SUCCESS != err) {
595 ALOGE("Failed to create import semaphore, err: %d", err);
596 return UNKNOWN_ERROR;
597 }
598 VkImportSemaphoreFdInfoKHR importInfo;
599 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
600 importInfo.pNext = nullptr;
601 importInfo.semaphore = semaphore;
602 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
603 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
604 importInfo.fd = fenceFd;
605
606 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
607 if (VK_SUCCESS != err) {
Greg Danield92a9b12019-04-23 10:11:04 -0400608 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev7a081272018-10-26 17:54:18 -0400609 ALOGE("Failed to import semaphore, err: %d", err);
610 return UNKNOWN_ERROR;
611 }
612
Greg Danield92a9b12019-04-23 10:11:04 -0400613 GrBackendSemaphore beSemaphore;
614 beSemaphore.initVulkan(semaphore);
Stan Iliev7a081272018-10-26 17:54:18 -0400615
Greg Danield92a9b12019-04-23 10:11:04 -0400616 // Skia takes ownership of the semaphore and will delete it once the wait has finished.
617 grContext->wait(1, &beSemaphore);
618 grContext->flush();
Stan Iliev7a081272018-10-26 17:54:18 -0400619
Stan Iliev564ca3e2018-09-04 22:00:00 +0000620 return OK;
621}
622
Greg Danield92a9b12019-04-23 10:11:04 -0400623status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence, GrContext* grContext) {
Greg Daniel26e0dca2018-09-18 10:33:19 -0400624 if (!hasVkContext()) {
625 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
626 return INVALID_OPERATION;
627 }
628
Greg Daniel26e0dca2018-09-18 10:33:19 -0400629 VkExportSemaphoreCreateInfo exportInfo;
630 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
631 exportInfo.pNext = nullptr;
632 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
633
634 VkSemaphoreCreateInfo semaphoreInfo;
635 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
636 semaphoreInfo.pNext = &exportInfo;
637 semaphoreInfo.flags = 0;
638 VkSemaphore semaphore;
639 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
640 if (VK_SUCCESS != err) {
641 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
642 return INVALID_OPERATION;
643 }
644
Greg Danield92a9b12019-04-23 10:11:04 -0400645 GrBackendSemaphore backendSemaphore;
646 backendSemaphore.initVulkan(semaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400647
Greg Danield92a9b12019-04-23 10:11:04 -0400648 DestroySemaphoreInfo* destroyInfo = new DestroySemaphoreInfo(mDestroySemaphore, mDevice,
649 semaphore);
Greg Danielfd429392019-05-09 15:44:56 -0400650 // Even if Skia fails to submit the semaphore, it will still call the destroy_semaphore callback
651 // which will remove its ref to the semaphore. The VulkanManager must still release its ref,
652 // when it is done with the semaphore.
Greg Danield92a9b12019-04-23 10:11:04 -0400653 GrSemaphoresSubmitted submitted =
654 grContext->flush(kNone_GrFlushFlags, 1, &backendSemaphore,
655 destroy_semaphore, destroyInfo);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400656
Greg Danield92a9b12019-04-23 10:11:04 -0400657 if (submitted == GrSemaphoresSubmitted::kNo) {
658 ALOGE("VulkanManager::createReleaseFence: Failed to submit semaphore");
Greg Danielfd429392019-05-09 15:44:56 -0400659 destroy_semaphore(destroyInfo);
Greg Danield92a9b12019-04-23 10:11:04 -0400660 return INVALID_OPERATION;
661 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400662
663 VkSemaphoreGetFdInfoKHR getFdInfo;
664 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
665 getFdInfo.pNext = nullptr;
666 getFdInfo.semaphore = semaphore;
667 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
668
669 int fenceFd = 0;
670
671 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
Greg Danielfd429392019-05-09 15:44:56 -0400672 destroy_semaphore(destroyInfo);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400673 if (VK_SUCCESS != err) {
674 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
675 return INVALID_OPERATION;
676 }
677 nativeFence = new Fence(fenceFd);
678
Stan Iliev564ca3e2018-09-04 22:00:00 +0000679 return OK;
680}
681
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500682} /* namespace renderthread */
683} /* namespace uirenderer */
684} /* namespace android */