blob: 488117253e7a4a8e47ef275869dc3a99684eb56e [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Greg Danielcd558522016-11-17 13:31:40 -050019#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050020#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050021#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050022#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050023
Greg Danielac2d2322017-07-12 11:30:15 -040024#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025#include <GrContext.h>
26#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040027#include <GrTypes.h>
28#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050029#include <vk/GrVkTypes.h>
30
31namespace android {
32namespace uirenderer {
33namespace renderthread {
34
Greg Daniel2ff202712018-06-14 11:50:10 -040035#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
36#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
37#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050038
John Reck1bcacfd2017-11-03 10:12:19 -070039VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050040
41void VulkanManager::destroy() {
Stan Iliev564ca3e2018-09-04 22:00:00 +000042 mRenderThread.renderState().onContextDestroyed();
Greg Daniel45ec62b2017-01-04 14:27:00 -050043 mRenderThread.setGrContext(nullptr);
44
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050045 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040046 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050047 mCommandPool = VK_NULL_HANDLE;
48 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050049
Greg Daniel2ff202712018-06-14 11:50:10 -040050 if (mDevice != VK_NULL_HANDLE) {
51 mDeviceWaitIdle(mDevice);
52 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070053 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050054
Greg Daniel2ff202712018-06-14 11:50:10 -040055 if (mInstance != VK_NULL_HANDLE) {
56 mDestroyInstance(mInstance, nullptr);
57 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050058
Greg Daniel2ff202712018-06-14 11:50:10 -040059 mGraphicsQueue = VK_NULL_HANDLE;
60 mPresentQueue = VK_NULL_HANDLE;
61 mDevice = VK_NULL_HANDLE;
62 mPhysicalDevice = VK_NULL_HANDLE;
63 mInstance = VK_NULL_HANDLE;
64}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050065
Greg Daniela227dbb2018-08-20 09:19:48 -040066bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040067 VkResult err;
68
69 constexpr VkApplicationInfo app_info = {
70 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
71 nullptr, // pNext
72 "android framework", // pApplicationName
73 0, // applicationVersion
74 "android framework", // pEngineName
75 0, // engineVerison
76 VK_MAKE_VERSION(1, 0, 0), // apiVersion
77 };
78
79 std::vector<const char*> instanceExtensions;
80 {
81 GET_PROC(EnumerateInstanceExtensionProperties);
82
83 uint32_t extensionCount = 0;
84 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
85 if (VK_SUCCESS != err) {
86 return false;
87 }
88 std::unique_ptr<VkExtensionProperties[]> extensions(
89 new VkExtensionProperties[extensionCount]);
90 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
91 if (VK_SUCCESS != err) {
92 return false;
93 }
94 bool hasKHRSurfaceExtension = false;
95 bool hasKHRAndroidSurfaceExtension = false;
96 for (uint32_t i = 0; i < extensionCount; ++i) {
97 instanceExtensions.push_back(extensions[i].extensionName);
98 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
99 hasKHRSurfaceExtension = true;
100 }
101 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
102 hasKHRAndroidSurfaceExtension = true;
103 }
104 }
105 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
106 this->destroy();
107 return false;
108 }
109 }
110
111 const VkInstanceCreateInfo instance_create = {
112 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
113 nullptr, // pNext
114 0, // flags
115 &app_info, // pApplicationInfo
116 0, // enabledLayerNameCount
117 nullptr, // ppEnabledLayerNames
118 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
119 instanceExtensions.data(), // ppEnabledExtensionNames
120 };
121
122 GET_PROC(CreateInstance);
123 err = mCreateInstance(&instance_create, nullptr, &mInstance);
124 if (err < 0) {
125 this->destroy();
126 return false;
127 }
128
129 GET_INST_PROC(DestroyInstance);
130 GET_INST_PROC(EnumeratePhysicalDevices);
131 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400132 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400133 GET_INST_PROC(CreateDevice);
134 GET_INST_PROC(EnumerateDeviceExtensionProperties);
135 GET_INST_PROC(CreateAndroidSurfaceKHR);
136 GET_INST_PROC(DestroySurfaceKHR);
137 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
138 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
139 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
140 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
141
142 uint32_t gpuCount;
143 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
144 if (err) {
145 this->destroy();
146 return false;
147 }
148 if (!gpuCount) {
149 this->destroy();
150 return false;
151 }
152 // Just returning the first physical device instead of getting the whole array. Since there
153 // should only be one device on android.
154 gpuCount = 1;
155 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
156 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
157 if (err && VK_INCOMPLETE != err) {
158 this->destroy();
159 return false;
160 }
161
162 // query to get the initial queue props size
163 uint32_t queueCount;
164 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
165 if (!queueCount) {
166 this->destroy();
167 return false;
168 }
169
170 // now get the actual queue props
171 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
172 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
173
174 // iterate to find the graphics queue
175 mGraphicsQueueIndex = queueCount;
176 for (uint32_t i = 0; i < queueCount; i++) {
177 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
178 mGraphicsQueueIndex = i;
179 break;
180 }
181 }
182 if (mGraphicsQueueIndex == queueCount) {
183 this->destroy();
184 return false;
185 }
186
187 // All physical devices and queue families on Android must be capable of
188 // presentation with any native window. So just use the first one.
189 mPresentQueueIndex = 0;
190
191 std::vector<const char*> deviceExtensions;
192 {
193 uint32_t extensionCount = 0;
194 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
195 nullptr);
196 if (VK_SUCCESS != err) {
197 this->destroy();
198 return false;
199 }
200 std::unique_ptr<VkExtensionProperties[]> extensions(
201 new VkExtensionProperties[extensionCount]);
202 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
203 extensions.get());
204 if (VK_SUCCESS != err) {
205 this->destroy();
206 return false;
207 }
208 bool hasKHRSwapchainExtension = false;
209 for (uint32_t i = 0; i < extensionCount; ++i) {
210 deviceExtensions.push_back(extensions[i].extensionName);
211 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
212 hasKHRSwapchainExtension = true;
213 }
214 }
215 if (!hasKHRSwapchainExtension) {
216 this->destroy();
217 return false;
218 }
219 }
220
Greg Daniela227dbb2018-08-20 09:19:48 -0400221 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
222 if (device != VK_NULL_HANDLE) {
223 return vkGetDeviceProcAddr(device, proc_name);
224 }
225 return vkGetInstanceProcAddr(instance, proc_name);
226 };
227 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
228 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
229
230 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
231 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
232 features.pNext = nullptr;
233
234 // Setup all extension feature structs we may want to use.
235 void** tailPNext = &features.pNext;
236
237 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
238 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
239 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
240 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
241 LOG_ALWAYS_FATAL_IF(!blend);
242 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
243 blend->pNext = nullptr;
244 *tailPNext = blend;
245 tailPNext = &blend->pNext;
246 }
247
248 // query to get the physical device features
249 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400250 // this looks like it would slow things down,
251 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400252 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400253
254 float queuePriorities[1] = { 0.0 };
255
256 const VkDeviceQueueCreateInfo queueInfo[2] = {
257 {
258 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
259 nullptr, // pNext
260 0, // VkDeviceQueueCreateFlags
261 mGraphicsQueueIndex, // queueFamilyIndex
262 1, // queueCount
263 queuePriorities, // pQueuePriorities
264 },
265 {
266 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
267 nullptr, // pNext
268 0, // VkDeviceQueueCreateFlags
269 mPresentQueueIndex, // queueFamilyIndex
270 1, // queueCount
271 queuePriorities, // pQueuePriorities
272 }
273 };
274 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
275
276 const VkDeviceCreateInfo deviceInfo = {
277 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400278 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400279 0, // VkDeviceCreateFlags
280 queueInfoCount, // queueCreateInfoCount
281 queueInfo, // pQueueCreateInfos
282 0, // layerCount
283 nullptr, // ppEnabledLayerNames
284 (uint32_t) deviceExtensions.size(), // extensionCount
285 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400286 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400287 };
288
289 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
290 if (err) {
291 this->destroy();
292 return false;
293 }
294
295 GET_DEV_PROC(GetDeviceQueue);
296 GET_DEV_PROC(DeviceWaitIdle);
297 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500298 GET_DEV_PROC(CreateSwapchainKHR);
299 GET_DEV_PROC(DestroySwapchainKHR);
300 GET_DEV_PROC(GetSwapchainImagesKHR);
301 GET_DEV_PROC(AcquireNextImageKHR);
302 GET_DEV_PROC(QueuePresentKHR);
303 GET_DEV_PROC(CreateCommandPool);
304 GET_DEV_PROC(DestroyCommandPool);
305 GET_DEV_PROC(AllocateCommandBuffers);
306 GET_DEV_PROC(FreeCommandBuffers);
307 GET_DEV_PROC(ResetCommandBuffer);
308 GET_DEV_PROC(BeginCommandBuffer);
309 GET_DEV_PROC(EndCommandBuffer);
310 GET_DEV_PROC(CmdPipelineBarrier);
311 GET_DEV_PROC(GetDeviceQueue);
312 GET_DEV_PROC(QueueSubmit);
313 GET_DEV_PROC(QueueWaitIdle);
314 GET_DEV_PROC(DeviceWaitIdle);
315 GET_DEV_PROC(CreateSemaphore);
316 GET_DEV_PROC(DestroySemaphore);
317 GET_DEV_PROC(CreateFence);
318 GET_DEV_PROC(DestroyFence);
319 GET_DEV_PROC(WaitForFences);
320 GET_DEV_PROC(ResetFences);
321
Greg Daniel2ff202712018-06-14 11:50:10 -0400322 return true;
323}
324
Greg Daniela227dbb2018-08-20 09:19:48 -0400325static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
326 // All Vulkan structs that could be part of the features chain will start with the
327 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
328 // so we can get access to the pNext for the next struct.
329 struct CommonVulkanHeader {
330 VkStructureType sType;
331 void* pNext;
332 };
333
334 void* pNext = features.pNext;
335 while (pNext) {
336 void* current = pNext;
337 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
338 free(current);
339 }
340}
341
Greg Daniel2ff202712018-06-14 11:50:10 -0400342void VulkanManager::initialize() {
343 if (mDevice != VK_NULL_HANDLE) {
344 return;
345 }
346
Greg Daniela227dbb2018-08-20 09:19:48 -0400347 GET_PROC(EnumerateInstanceVersion);
348 uint32_t instanceVersion = 0;
349 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
350 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
351
352 GrVkExtensions extensions;
353 VkPhysicalDeviceFeatures2 features;
354 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400355
356 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
357
Greg Daniel2ff202712018-06-14 11:50:10 -0400358 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
359 if (device != VK_NULL_HANDLE) {
360 return vkGetDeviceProcAddr(device, proc_name);
361 }
362 return vkGetInstanceProcAddr(instance, proc_name);
363 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400364
365 GrVkBackendContext backendContext;
366 backendContext.fInstance = mInstance;
367 backendContext.fPhysicalDevice = mPhysicalDevice;
368 backendContext.fDevice = mDevice;
369 backendContext.fQueue = mGraphicsQueue;
370 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400371 backendContext.fInstanceVersion = instanceVersion;
372 backendContext.fVkExtensions = &extensions;
373 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400374 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400375
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500376 // create the command pool for the command buffers
377 if (VK_NULL_HANDLE == mCommandPool) {
378 VkCommandPoolCreateInfo commandPoolInfo;
379 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
380 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
381 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400382 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500383 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400384 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
385 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500386 SkASSERT(VK_SUCCESS == res);
387 }
388
Greg Daniel2ff202712018-06-14 11:50:10 -0400389 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500390
Stan Ilievd495f432017-10-09 15:49:32 -0400391 GrContextOptions options;
392 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800393 // TODO: get a string describing the SPIR-V compiler version and use it here
394 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400395 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500396 LOG_ALWAYS_FATAL_IF(!grContext.get());
397 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400398
399 free_features_extensions_structs(features);
400
Greg Danielcd558522016-11-17 13:31:40 -0500401 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
402 mSwapBehavior = SwapBehavior::BufferAge;
403 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500404
Stan Iliev564ca3e2018-09-04 22:00:00 +0000405 mRenderThread.renderState().onContextCreated();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500406}
407
408// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
409// previous uses have finished before returning.
410VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
411 SkASSERT(surface->mBackbuffers);
412
413 ++surface->mCurrentBackbufferIndex;
414 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
415 surface->mCurrentBackbufferIndex = 0;
416 }
417
John Reck1bcacfd2017-11-03 10:12:19 -0700418 VulkanSurface::BackbufferInfo* backbuffer =
419 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500420
421 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
422 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400423 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500424 if (res != VK_SUCCESS) {
425 return nullptr;
426 }
427
428 return backbuffer;
429}
430
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500431SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
432 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
433 SkASSERT(backbuffer);
434
435 VkResult res;
436
Greg Daniel2ff202712018-06-14 11:50:10 -0400437 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500438 SkASSERT(VK_SUCCESS == res);
439
440 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
441 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400442 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700443 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
444 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500445
446 if (VK_ERROR_SURFACE_LOST_KHR == res) {
447 // need to figure out how to create a new vkSurface without the platformData*
448 // maybe use attach somehow? but need a Window
449 return nullptr;
450 }
451 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
452 // tear swapchain down and try again
453 if (!createSwapchain(surface)) {
454 return nullptr;
455 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500456 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400457 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500458 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500459
460 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400461 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700462 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
463 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500464
465 if (VK_SUCCESS != res) {
466 return nullptr;
467 }
468 }
469
470 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500471 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500472 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
John Reck1bcacfd2017-11-03 10:12:19 -0700473 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout)
474 ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
475 : VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500476 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700477 VkAccessFlags srcAccessMask =
478 (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? 0 : VK_ACCESS_MEMORY_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500479 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
480
481 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700482 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
483 NULL, // pNext
484 srcAccessMask, // outputMask
485 dstAccessMask, // inputMask
486 layout, // oldLayout
487 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
488 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400489 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700490 surface->mImages[backbuffer->mImageIndex], // image
491 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500492 };
493 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
494
495 VkCommandBufferBeginInfo info;
496 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
497 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
498 info.flags = 0;
499 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
500
John Reck1bcacfd2017-11-03 10:12:19 -0700501 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
502 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500503
504 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
505
506 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
507 // insert the layout transfer into the queue and wait on the acquire
508 VkSubmitInfo submitInfo;
509 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
510 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
511 submitInfo.waitSemaphoreCount = 1;
512 // Wait to make sure aquire semaphore set above has signaled.
513 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
514 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
515 submitInfo.commandBufferCount = 1;
516 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
517 submitInfo.signalSemaphoreCount = 0;
518
519 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400520 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500521
522 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500523 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400524 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
525 SkSurface::kFlushRead_BackendHandleAccess);
526 if (!backendRT.isValid()) {
527 SkASSERT(backendRT.isValid());
528 return nullptr;
529 }
530 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500531
532 surface->mBackbuffer = std::move(skSurface);
533 return surface->mBackbuffer.get();
534}
535
536void VulkanManager::destroyBuffers(VulkanSurface* surface) {
537 if (surface->mBackbuffers) {
538 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400539 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500540 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400541 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
542 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
543 mFreeCommandBuffers(mDevice, mCommandPool, 2,
544 surface->mBackbuffers[i].mTransitionCmdBuffers);
545 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
546 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500547 }
548 }
549
550 delete[] surface->mBackbuffers;
551 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500552 delete[] surface->mImageInfos;
553 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500554 delete[] surface->mImages;
555 surface->mImages = nullptr;
556}
557
558void VulkanManager::destroySurface(VulkanSurface* surface) {
559 // Make sure all submit commands have finished before starting to destroy objects.
560 if (VK_NULL_HANDLE != mPresentQueue) {
561 mQueueWaitIdle(mPresentQueue);
562 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400563 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500564
565 destroyBuffers(surface);
566
567 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400568 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500569 surface->mSwapchain = VK_NULL_HANDLE;
570 }
571
572 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400573 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500574 surface->mVkSurface = VK_NULL_HANDLE;
575 }
576 delete surface;
577}
578
579void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400580 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500581 SkASSERT(surface->mImageCount);
582 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400583 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500584
585 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
586
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500587 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500588 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500589 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500590 GrVkImageInfo info;
591 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500592 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500593 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
594 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
595 info.fFormat = format;
596 info.fLevelCount = 1;
597
Greg Danielac2d2322017-07-12 11:30:15 -0400598 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500599
Greg Danielcd558522016-11-17 13:31:40 -0500600 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700601 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400602 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
603 kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500604 }
605
606 SkASSERT(mCommandPool != VK_NULL_HANDLE);
607
608 // set up the backbuffers
609 VkSemaphoreCreateInfo semaphoreInfo;
610 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
611 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
612 semaphoreInfo.pNext = nullptr;
613 semaphoreInfo.flags = 0;
614 VkCommandBufferAllocateInfo commandBuffersInfo;
615 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
616 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
617 commandBuffersInfo.pNext = nullptr;
618 commandBuffersInfo.commandPool = mCommandPool;
619 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
620 commandBuffersInfo.commandBufferCount = 2;
621 VkFenceCreateInfo fenceInfo;
622 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
623 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
624 fenceInfo.pNext = nullptr;
625 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
626
627 // we create one additional backbuffer structure here, because we want to
628 // give the command buffers they contain a chance to finish before we cycle back
629 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
630 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
631 SkDEBUGCODE(VkResult res);
632 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400633 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700634 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400635 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700636 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400637 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700638 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400639 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700640 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400641 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700642 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500643 SkASSERT(VK_SUCCESS == res);
644 }
645 surface->mCurrentBackbufferIndex = surface->mImageCount;
646}
647
648bool VulkanManager::createSwapchain(VulkanSurface* surface) {
649 // check for capabilities
650 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400651 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700652 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500653 if (VK_SUCCESS != res) {
654 return false;
655 }
656
657 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400658 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700659 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500660 if (VK_SUCCESS != res) {
661 return false;
662 }
663
Ben Wagnereec27d52017-01-11 15:32:07 -0500664 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400665 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700666 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500667 if (VK_SUCCESS != res) {
668 return false;
669 }
670
671 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400672 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700673 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500674 if (VK_SUCCESS != res) {
675 return false;
676 }
677
Ben Wagnereec27d52017-01-11 15:32:07 -0500678 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400679 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700680 surface->mVkSurface, &presentModeCount,
681 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500682 if (VK_SUCCESS != res) {
683 return false;
684 }
685
686 VkExtent2D extent = caps.currentExtent;
687 // clamp width; to handle currentExtent of -1 and protect us from broken hints
688 if (extent.width < caps.minImageExtent.width) {
689 extent.width = caps.minImageExtent.width;
690 }
691 SkASSERT(extent.width <= caps.maxImageExtent.width);
692 // clamp height
693 if (extent.height < caps.minImageExtent.height) {
694 extent.height = caps.minImageExtent.height;
695 }
696 SkASSERT(extent.height <= caps.maxImageExtent.height);
697
698 uint32_t imageCount = caps.minImageCount + 2;
699 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
700 // Application must settle for fewer images than desired:
701 imageCount = caps.maxImageCount;
702 }
703
704 // Currently Skia requires the images to be color attchments and support all transfer
705 // operations.
706 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
707 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
708 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
709 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
710 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700711 SkASSERT(caps.supportedCompositeAlpha &
712 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500713 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700714 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
715 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
716 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500717
718 // Pick our surface format. For now, just make sure it matches our sRGB request:
719 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
720 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
721
722 bool wantSRGB = false;
723#ifdef ANDROID_ENABLE_LINEAR_BLENDING
724 wantSRGB = true;
725#endif
726 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
727 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
728 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
729 if (desiredFormat == surfaceFormats[i].format) {
730 surfaceFormat = surfaceFormats[i].format;
731 colorSpace = surfaceFormats[i].colorSpace;
732 }
733 }
734
735 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
736 return false;
737 }
738
739 // If mailbox mode is available, use it, as it is the lowest-latency non-
740 // tearing mode. If not, fall back to FIFO which is always available.
741 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
742 for (uint32_t i = 0; i < presentModeCount; ++i) {
743 // use mailbox
744 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
745 mode = presentModes[i];
746 break;
747 }
748 }
749
750 VkSwapchainCreateInfoKHR swapchainCreateInfo;
751 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
752 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
753 swapchainCreateInfo.surface = surface->mVkSurface;
754 swapchainCreateInfo.minImageCount = imageCount;
755 swapchainCreateInfo.imageFormat = surfaceFormat;
756 swapchainCreateInfo.imageColorSpace = colorSpace;
757 swapchainCreateInfo.imageExtent = extent;
758 swapchainCreateInfo.imageArrayLayers = 1;
759 swapchainCreateInfo.imageUsage = usageFlags;
760
Greg Daniel2ff202712018-06-14 11:50:10 -0400761 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
762 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500763 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
764 swapchainCreateInfo.queueFamilyIndexCount = 2;
765 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
766 } else {
767 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
768 swapchainCreateInfo.queueFamilyIndexCount = 0;
769 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
770 }
771
772 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
773 swapchainCreateInfo.compositeAlpha = composite_alpha;
774 swapchainCreateInfo.presentMode = mode;
775 swapchainCreateInfo.clipped = true;
776 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
777
Greg Daniel2ff202712018-06-14 11:50:10 -0400778 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500779 if (VK_SUCCESS != res) {
780 return false;
781 }
782
783 // destroy the old swapchain
784 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400785 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500786
787 destroyBuffers(surface);
788
Greg Daniel2ff202712018-06-14 11:50:10 -0400789 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500790 }
791
792 createBuffers(surface, surfaceFormat, extent);
793
794 return true;
795}
796
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500797VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
798 initialize();
799
800 if (!window) {
801 return nullptr;
802 }
803
804 VulkanSurface* surface = new VulkanSurface();
805
806 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
807 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
808 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
809 surfaceCreateInfo.pNext = nullptr;
810 surfaceCreateInfo.flags = 0;
811 surfaceCreateInfo.window = window;
812
Greg Daniel2ff202712018-06-14 11:50:10 -0400813 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
814 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500815 if (VK_SUCCESS != res) {
816 delete surface;
817 return nullptr;
818 }
819
John Reck1bcacfd2017-11-03 10:12:19 -0700820 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400821 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
822 // All physical devices and queue families on Android must be capable of
823 // presentation with any native window.
824 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500825
826 if (!createSwapchain(surface)) {
827 destroySurface(surface);
828 return nullptr;
829 }
830
831 return surface;
832}
833
834// Helper to know which src stage flags we need to set when transitioning to the present layout
835static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
836 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
837 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
838 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
839 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
840 return VK_PIPELINE_STAGE_TRANSFER_BIT;
841 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
842 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
843 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
844 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
845 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
846 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
847 return VK_PIPELINE_STAGE_HOST_BIT;
848 }
849
850 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
851 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
852}
853
854// Helper to know which src access mask we need to set when transitioning to the present layout
855static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
856 VkAccessFlags flags = 0;
857 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
858 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700859 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
860 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
861 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500862 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
863 flags = VK_ACCESS_HOST_WRITE_BIT;
864 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
865 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
866 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
867 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
868 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
869 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
870 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
871 flags = VK_ACCESS_TRANSFER_READ_BIT;
872 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
873 flags = VK_ACCESS_SHADER_READ_BIT;
874 }
875 return flags;
876}
877
878void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500879 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
880 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400881 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500882 }
883
Greg Daniel74ea2012017-11-10 11:32:58 -0500884 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700885 VulkanSurface::BackbufferInfo* backbuffer =
886 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400887
Greg Danielcd558522016-11-17 13:31:40 -0500888 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400889 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
890 SkSurface::kFlushRead_BackendHandleAccess);
891 SkASSERT(backendRT.isValid());
892
893 GrVkImageInfo imageInfo;
894 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
895
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500896 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400897 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500898
899 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
900 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400901 VkImageLayout layout = imageInfo.fImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500902 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
903 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
904 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
905 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
906
907 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700908 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
909 NULL, // pNext
910 srcAccessMask, // outputMask
911 dstAccessMask, // inputMask
912 layout, // oldLayout
913 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400914 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700915 mPresentQueueIndex, // dstQueueFamilyIndex
916 surface->mImages[backbuffer->mImageIndex], // image
917 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500918 };
919
920 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
921 VkCommandBufferBeginInfo info;
922 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
923 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
924 info.flags = 0;
925 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700926 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
927 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500928 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
929
Greg Danielcd558522016-11-17 13:31:40 -0500930 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500931
932 // insert the layout transfer into the queue and wait on the acquire
933 VkSubmitInfo submitInfo;
934 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
935 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
936 submitInfo.waitSemaphoreCount = 0;
937 submitInfo.pWaitDstStageMask = 0;
938 submitInfo.commandBufferCount = 1;
939 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
940 submitInfo.signalSemaphoreCount = 1;
941 // When this command buffer finishes we will signal this semaphore so that we know it is now
942 // safe to present the image to the screen.
943 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
944
945 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400946 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500947
948 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
949 // to the image is complete and that the layout has been change to present on the graphics
950 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700951 const VkPresentInfoKHR presentInfo = {
952 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
953 NULL, // pNext
954 1, // waitSemaphoreCount
955 &backbuffer->mRenderSemaphore, // pWaitSemaphores
956 1, // swapchainCount
957 &surface->mSwapchain, // pSwapchains
958 &backbuffer->mImageIndex, // pImageIndices
959 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500960 };
961
962 mQueuePresentKHR(mPresentQueue, &presentInfo);
963
964 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500965 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
966 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
967 surface->mCurrentTime++;
968}
969
970int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -0500971 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700972 VulkanSurface::BackbufferInfo* backbuffer =
973 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
974 if (mSwapBehavior == SwapBehavior::Discard ||
975 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -0500976 return 0;
977 }
978 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
979 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500980}
981
Stan Iliev564ca3e2018-09-04 22:00:00 +0000982status_t VulkanManager::fenceWait(sp<Fence>& fence) {
983 //TODO: Insert a wait on fence command into the Vulkan command buffer.
984 // Block CPU on the fence.
985 status_t err = fence->waitForever("VulkanManager::fenceWait");
986 if (err != NO_ERROR) {
987 ALOGE("VulkanManager::fenceWait: error waiting for fence: %d", err);
988 return err;
989 }
990 return OK;
991}
992
993status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
994 //TODO: Create a fence that is signaled, when all the pending Vulkan commands are flushed.
995 return OK;
996}
997
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500998} /* namespace renderthread */
999} /* namespace uirenderer */
1000} /* namespace android */