blob: e60d43e4001e3319a9396af33f0ba3951e5cca26 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Greg Danielcd558522016-11-17 13:31:40 -050019#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050020#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050021#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050022#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050023
Greg Danielac2d2322017-07-12 11:30:15 -040024#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025#include <GrContext.h>
26#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040027#include <GrTypes.h>
28#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050029#include <vk/GrVkTypes.h>
30
31namespace android {
32namespace uirenderer {
33namespace renderthread {
34
Greg Daniel2ff202712018-06-14 11:50:10 -040035#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
36#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
37#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050038
John Reck1bcacfd2017-11-03 10:12:19 -070039VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050040
41void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050042 mRenderThread.setGrContext(nullptr);
43
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050044 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040045 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050046 mCommandPool = VK_NULL_HANDLE;
47 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050048
Greg Daniel2ff202712018-06-14 11:50:10 -040049 if (mDevice != VK_NULL_HANDLE) {
50 mDeviceWaitIdle(mDevice);
51 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070052 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050053
Greg Daniel2ff202712018-06-14 11:50:10 -040054 if (mInstance != VK_NULL_HANDLE) {
55 mDestroyInstance(mInstance, nullptr);
56 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050057
Greg Daniel2ff202712018-06-14 11:50:10 -040058 mGraphicsQueue = VK_NULL_HANDLE;
59 mPresentQueue = VK_NULL_HANDLE;
60 mDevice = VK_NULL_HANDLE;
61 mPhysicalDevice = VK_NULL_HANDLE;
62 mInstance = VK_NULL_HANDLE;
63}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050064
Greg Daniela227dbb2018-08-20 09:19:48 -040065bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040066 VkResult err;
67
68 constexpr VkApplicationInfo app_info = {
69 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
70 nullptr, // pNext
71 "android framework", // pApplicationName
72 0, // applicationVersion
73 "android framework", // pEngineName
74 0, // engineVerison
75 VK_MAKE_VERSION(1, 0, 0), // apiVersion
76 };
77
78 std::vector<const char*> instanceExtensions;
79 {
80 GET_PROC(EnumerateInstanceExtensionProperties);
81
82 uint32_t extensionCount = 0;
83 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
84 if (VK_SUCCESS != err) {
85 return false;
86 }
87 std::unique_ptr<VkExtensionProperties[]> extensions(
88 new VkExtensionProperties[extensionCount]);
89 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
90 if (VK_SUCCESS != err) {
91 return false;
92 }
93 bool hasKHRSurfaceExtension = false;
94 bool hasKHRAndroidSurfaceExtension = false;
95 for (uint32_t i = 0; i < extensionCount; ++i) {
96 instanceExtensions.push_back(extensions[i].extensionName);
97 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
98 hasKHRSurfaceExtension = true;
99 }
100 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
101 hasKHRAndroidSurfaceExtension = true;
102 }
103 }
104 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
105 this->destroy();
106 return false;
107 }
108 }
109
110 const VkInstanceCreateInfo instance_create = {
111 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
112 nullptr, // pNext
113 0, // flags
114 &app_info, // pApplicationInfo
115 0, // enabledLayerNameCount
116 nullptr, // ppEnabledLayerNames
117 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
118 instanceExtensions.data(), // ppEnabledExtensionNames
119 };
120
121 GET_PROC(CreateInstance);
122 err = mCreateInstance(&instance_create, nullptr, &mInstance);
123 if (err < 0) {
124 this->destroy();
125 return false;
126 }
127
128 GET_INST_PROC(DestroyInstance);
129 GET_INST_PROC(EnumeratePhysicalDevices);
130 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400131 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400132 GET_INST_PROC(CreateDevice);
133 GET_INST_PROC(EnumerateDeviceExtensionProperties);
134 GET_INST_PROC(CreateAndroidSurfaceKHR);
135 GET_INST_PROC(DestroySurfaceKHR);
136 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
137 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
138 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
139 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
140
141 uint32_t gpuCount;
142 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
143 if (err) {
144 this->destroy();
145 return false;
146 }
147 if (!gpuCount) {
148 this->destroy();
149 return false;
150 }
151 // Just returning the first physical device instead of getting the whole array. Since there
152 // should only be one device on android.
153 gpuCount = 1;
154 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
155 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
156 if (err && VK_INCOMPLETE != err) {
157 this->destroy();
158 return false;
159 }
160
161 // query to get the initial queue props size
162 uint32_t queueCount;
163 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
164 if (!queueCount) {
165 this->destroy();
166 return false;
167 }
168
169 // now get the actual queue props
170 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
171 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
172
173 // iterate to find the graphics queue
174 mGraphicsQueueIndex = queueCount;
175 for (uint32_t i = 0; i < queueCount; i++) {
176 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
177 mGraphicsQueueIndex = i;
178 break;
179 }
180 }
181 if (mGraphicsQueueIndex == queueCount) {
182 this->destroy();
183 return false;
184 }
185
186 // All physical devices and queue families on Android must be capable of
187 // presentation with any native window. So just use the first one.
188 mPresentQueueIndex = 0;
189
190 std::vector<const char*> deviceExtensions;
191 {
192 uint32_t extensionCount = 0;
193 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
194 nullptr);
195 if (VK_SUCCESS != err) {
196 this->destroy();
197 return false;
198 }
199 std::unique_ptr<VkExtensionProperties[]> extensions(
200 new VkExtensionProperties[extensionCount]);
201 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
202 extensions.get());
203 if (VK_SUCCESS != err) {
204 this->destroy();
205 return false;
206 }
207 bool hasKHRSwapchainExtension = false;
208 for (uint32_t i = 0; i < extensionCount; ++i) {
209 deviceExtensions.push_back(extensions[i].extensionName);
210 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
211 hasKHRSwapchainExtension = true;
212 }
213 }
214 if (!hasKHRSwapchainExtension) {
215 this->destroy();
216 return false;
217 }
218 }
219
Greg Daniela227dbb2018-08-20 09:19:48 -0400220 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
221 if (device != VK_NULL_HANDLE) {
222 return vkGetDeviceProcAddr(device, proc_name);
223 }
224 return vkGetInstanceProcAddr(instance, proc_name);
225 };
226 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
227 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
228
229 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
230 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
231 features.pNext = nullptr;
232
233 // Setup all extension feature structs we may want to use.
234 void** tailPNext = &features.pNext;
235
236 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
237 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
238 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
239 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
240 LOG_ALWAYS_FATAL_IF(!blend);
241 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
242 blend->pNext = nullptr;
243 *tailPNext = blend;
244 tailPNext = &blend->pNext;
245 }
246
247 // query to get the physical device features
248 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400249 // this looks like it would slow things down,
250 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400251 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400252
253 float queuePriorities[1] = { 0.0 };
254
255 const VkDeviceQueueCreateInfo queueInfo[2] = {
256 {
257 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
258 nullptr, // pNext
259 0, // VkDeviceQueueCreateFlags
260 mGraphicsQueueIndex, // queueFamilyIndex
261 1, // queueCount
262 queuePriorities, // pQueuePriorities
263 },
264 {
265 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
266 nullptr, // pNext
267 0, // VkDeviceQueueCreateFlags
268 mPresentQueueIndex, // queueFamilyIndex
269 1, // queueCount
270 queuePriorities, // pQueuePriorities
271 }
272 };
273 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
274
275 const VkDeviceCreateInfo deviceInfo = {
276 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400277 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400278 0, // VkDeviceCreateFlags
279 queueInfoCount, // queueCreateInfoCount
280 queueInfo, // pQueueCreateInfos
281 0, // layerCount
282 nullptr, // ppEnabledLayerNames
283 (uint32_t) deviceExtensions.size(), // extensionCount
284 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400285 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400286 };
287
288 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
289 if (err) {
290 this->destroy();
291 return false;
292 }
293
294 GET_DEV_PROC(GetDeviceQueue);
295 GET_DEV_PROC(DeviceWaitIdle);
296 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500297 GET_DEV_PROC(CreateSwapchainKHR);
298 GET_DEV_PROC(DestroySwapchainKHR);
299 GET_DEV_PROC(GetSwapchainImagesKHR);
300 GET_DEV_PROC(AcquireNextImageKHR);
301 GET_DEV_PROC(QueuePresentKHR);
302 GET_DEV_PROC(CreateCommandPool);
303 GET_DEV_PROC(DestroyCommandPool);
304 GET_DEV_PROC(AllocateCommandBuffers);
305 GET_DEV_PROC(FreeCommandBuffers);
306 GET_DEV_PROC(ResetCommandBuffer);
307 GET_DEV_PROC(BeginCommandBuffer);
308 GET_DEV_PROC(EndCommandBuffer);
309 GET_DEV_PROC(CmdPipelineBarrier);
310 GET_DEV_PROC(GetDeviceQueue);
311 GET_DEV_PROC(QueueSubmit);
312 GET_DEV_PROC(QueueWaitIdle);
313 GET_DEV_PROC(DeviceWaitIdle);
314 GET_DEV_PROC(CreateSemaphore);
315 GET_DEV_PROC(DestroySemaphore);
316 GET_DEV_PROC(CreateFence);
317 GET_DEV_PROC(DestroyFence);
318 GET_DEV_PROC(WaitForFences);
319 GET_DEV_PROC(ResetFences);
320
Greg Daniel2ff202712018-06-14 11:50:10 -0400321 return true;
322}
323
Greg Daniela227dbb2018-08-20 09:19:48 -0400324static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
325 // All Vulkan structs that could be part of the features chain will start with the
326 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
327 // so we can get access to the pNext for the next struct.
328 struct CommonVulkanHeader {
329 VkStructureType sType;
330 void* pNext;
331 };
332
333 void* pNext = features.pNext;
334 while (pNext) {
335 void* current = pNext;
336 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
337 free(current);
338 }
339}
340
Greg Daniel2ff202712018-06-14 11:50:10 -0400341void VulkanManager::initialize() {
342 if (mDevice != VK_NULL_HANDLE) {
343 return;
344 }
345
Greg Daniela227dbb2018-08-20 09:19:48 -0400346 GET_PROC(EnumerateInstanceVersion);
347 uint32_t instanceVersion = 0;
348 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
349 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
350
351 GrVkExtensions extensions;
352 VkPhysicalDeviceFeatures2 features;
353 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400354
355 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
356
Greg Daniel2ff202712018-06-14 11:50:10 -0400357 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
358 if (device != VK_NULL_HANDLE) {
359 return vkGetDeviceProcAddr(device, proc_name);
360 }
361 return vkGetInstanceProcAddr(instance, proc_name);
362 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400363
364 GrVkBackendContext backendContext;
365 backendContext.fInstance = mInstance;
366 backendContext.fPhysicalDevice = mPhysicalDevice;
367 backendContext.fDevice = mDevice;
368 backendContext.fQueue = mGraphicsQueue;
369 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400370 backendContext.fInstanceVersion = instanceVersion;
371 backendContext.fVkExtensions = &extensions;
372 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400373 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400374
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500375 // create the command pool for the command buffers
376 if (VK_NULL_HANDLE == mCommandPool) {
377 VkCommandPoolCreateInfo commandPoolInfo;
378 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
379 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
380 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400381 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500382 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400383 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
384 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500385 SkASSERT(VK_SUCCESS == res);
386 }
387
Greg Daniel2ff202712018-06-14 11:50:10 -0400388 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500389
Stan Ilievd495f432017-10-09 15:49:32 -0400390 GrContextOptions options;
391 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800392 // TODO: get a string describing the SPIR-V compiler version and use it here
393 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400394 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500395 LOG_ALWAYS_FATAL_IF(!grContext.get());
396 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400397
398 free_features_extensions_structs(features);
399
Greg Danielcd558522016-11-17 13:31:40 -0500400 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
401 mSwapBehavior = SwapBehavior::BufferAge;
402 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500403}
404
405// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
406// previous uses have finished before returning.
407VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
408 SkASSERT(surface->mBackbuffers);
409
410 ++surface->mCurrentBackbufferIndex;
411 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
412 surface->mCurrentBackbufferIndex = 0;
413 }
414
John Reck1bcacfd2017-11-03 10:12:19 -0700415 VulkanSurface::BackbufferInfo* backbuffer =
416 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500417
418 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
419 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400420 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500421 if (res != VK_SUCCESS) {
422 return nullptr;
423 }
424
425 return backbuffer;
426}
427
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500428SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
429 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
430 SkASSERT(backbuffer);
431
432 VkResult res;
433
Greg Daniel2ff202712018-06-14 11:50:10 -0400434 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500435 SkASSERT(VK_SUCCESS == res);
436
437 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
438 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400439 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700440 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
441 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500442
443 if (VK_ERROR_SURFACE_LOST_KHR == res) {
444 // need to figure out how to create a new vkSurface without the platformData*
445 // maybe use attach somehow? but need a Window
446 return nullptr;
447 }
448 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
449 // tear swapchain down and try again
450 if (!createSwapchain(surface)) {
451 return nullptr;
452 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500453 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400454 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500455 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500456
457 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400458 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700459 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
460 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500461
462 if (VK_SUCCESS != res) {
463 return nullptr;
464 }
465 }
466
467 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500468 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500469 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
John Reck1bcacfd2017-11-03 10:12:19 -0700470 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout)
471 ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
472 : VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500473 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700474 VkAccessFlags srcAccessMask =
475 (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? 0 : VK_ACCESS_MEMORY_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500476 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
477
478 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700479 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
480 NULL, // pNext
481 srcAccessMask, // outputMask
482 dstAccessMask, // inputMask
483 layout, // oldLayout
484 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
485 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400486 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700487 surface->mImages[backbuffer->mImageIndex], // image
488 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500489 };
490 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
491
492 VkCommandBufferBeginInfo info;
493 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
494 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
495 info.flags = 0;
496 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
497
John Reck1bcacfd2017-11-03 10:12:19 -0700498 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
499 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500500
501 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
502
503 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
504 // insert the layout transfer into the queue and wait on the acquire
505 VkSubmitInfo submitInfo;
506 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
507 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
508 submitInfo.waitSemaphoreCount = 1;
509 // Wait to make sure aquire semaphore set above has signaled.
510 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
511 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
512 submitInfo.commandBufferCount = 1;
513 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
514 submitInfo.signalSemaphoreCount = 0;
515
516 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400517 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500518
519 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500520 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400521 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
522 SkSurface::kFlushRead_BackendHandleAccess);
523 if (!backendRT.isValid()) {
524 SkASSERT(backendRT.isValid());
525 return nullptr;
526 }
527 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500528
529 surface->mBackbuffer = std::move(skSurface);
530 return surface->mBackbuffer.get();
531}
532
533void VulkanManager::destroyBuffers(VulkanSurface* surface) {
534 if (surface->mBackbuffers) {
535 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400536 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500537 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400538 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
539 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
540 mFreeCommandBuffers(mDevice, mCommandPool, 2,
541 surface->mBackbuffers[i].mTransitionCmdBuffers);
542 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
543 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500544 }
545 }
546
547 delete[] surface->mBackbuffers;
548 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500549 delete[] surface->mImageInfos;
550 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500551 delete[] surface->mImages;
552 surface->mImages = nullptr;
553}
554
555void VulkanManager::destroySurface(VulkanSurface* surface) {
556 // Make sure all submit commands have finished before starting to destroy objects.
557 if (VK_NULL_HANDLE != mPresentQueue) {
558 mQueueWaitIdle(mPresentQueue);
559 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400560 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500561
562 destroyBuffers(surface);
563
564 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400565 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500566 surface->mSwapchain = VK_NULL_HANDLE;
567 }
568
569 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400570 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500571 surface->mVkSurface = VK_NULL_HANDLE;
572 }
573 delete surface;
574}
575
576void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400577 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500578 SkASSERT(surface->mImageCount);
579 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400580 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500581
582 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
583
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500584 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500585 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500586 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500587 GrVkImageInfo info;
588 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500589 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500590 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
591 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
592 info.fFormat = format;
593 info.fLevelCount = 1;
594
Greg Danielac2d2322017-07-12 11:30:15 -0400595 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500596
Greg Danielcd558522016-11-17 13:31:40 -0500597 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700598 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400599 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
600 kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500601 }
602
603 SkASSERT(mCommandPool != VK_NULL_HANDLE);
604
605 // set up the backbuffers
606 VkSemaphoreCreateInfo semaphoreInfo;
607 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
608 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
609 semaphoreInfo.pNext = nullptr;
610 semaphoreInfo.flags = 0;
611 VkCommandBufferAllocateInfo commandBuffersInfo;
612 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
613 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
614 commandBuffersInfo.pNext = nullptr;
615 commandBuffersInfo.commandPool = mCommandPool;
616 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
617 commandBuffersInfo.commandBufferCount = 2;
618 VkFenceCreateInfo fenceInfo;
619 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
620 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
621 fenceInfo.pNext = nullptr;
622 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
623
624 // we create one additional backbuffer structure here, because we want to
625 // give the command buffers they contain a chance to finish before we cycle back
626 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
627 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
628 SkDEBUGCODE(VkResult res);
629 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400630 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700631 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400632 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700633 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400634 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700635 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400636 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700637 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400638 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700639 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500640 SkASSERT(VK_SUCCESS == res);
641 }
642 surface->mCurrentBackbufferIndex = surface->mImageCount;
643}
644
645bool VulkanManager::createSwapchain(VulkanSurface* surface) {
646 // check for capabilities
647 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400648 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700649 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500650 if (VK_SUCCESS != res) {
651 return false;
652 }
653
654 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400655 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700656 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500657 if (VK_SUCCESS != res) {
658 return false;
659 }
660
Ben Wagnereec27d52017-01-11 15:32:07 -0500661 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400662 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700663 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500664 if (VK_SUCCESS != res) {
665 return false;
666 }
667
668 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400669 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700670 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500671 if (VK_SUCCESS != res) {
672 return false;
673 }
674
Ben Wagnereec27d52017-01-11 15:32:07 -0500675 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400676 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700677 surface->mVkSurface, &presentModeCount,
678 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500679 if (VK_SUCCESS != res) {
680 return false;
681 }
682
683 VkExtent2D extent = caps.currentExtent;
684 // clamp width; to handle currentExtent of -1 and protect us from broken hints
685 if (extent.width < caps.minImageExtent.width) {
686 extent.width = caps.minImageExtent.width;
687 }
688 SkASSERT(extent.width <= caps.maxImageExtent.width);
689 // clamp height
690 if (extent.height < caps.minImageExtent.height) {
691 extent.height = caps.minImageExtent.height;
692 }
693 SkASSERT(extent.height <= caps.maxImageExtent.height);
694
695 uint32_t imageCount = caps.minImageCount + 2;
696 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
697 // Application must settle for fewer images than desired:
698 imageCount = caps.maxImageCount;
699 }
700
701 // Currently Skia requires the images to be color attchments and support all transfer
702 // operations.
703 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
704 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
705 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
706 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
707 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700708 SkASSERT(caps.supportedCompositeAlpha &
709 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500710 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700711 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
712 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
713 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500714
715 // Pick our surface format. For now, just make sure it matches our sRGB request:
716 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
717 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
718
719 bool wantSRGB = false;
720#ifdef ANDROID_ENABLE_LINEAR_BLENDING
721 wantSRGB = true;
722#endif
723 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
724 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
725 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
726 if (desiredFormat == surfaceFormats[i].format) {
727 surfaceFormat = surfaceFormats[i].format;
728 colorSpace = surfaceFormats[i].colorSpace;
729 }
730 }
731
732 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
733 return false;
734 }
735
736 // If mailbox mode is available, use it, as it is the lowest-latency non-
737 // tearing mode. If not, fall back to FIFO which is always available.
738 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
739 for (uint32_t i = 0; i < presentModeCount; ++i) {
740 // use mailbox
741 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
742 mode = presentModes[i];
743 break;
744 }
745 }
746
747 VkSwapchainCreateInfoKHR swapchainCreateInfo;
748 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
749 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
750 swapchainCreateInfo.surface = surface->mVkSurface;
751 swapchainCreateInfo.minImageCount = imageCount;
752 swapchainCreateInfo.imageFormat = surfaceFormat;
753 swapchainCreateInfo.imageColorSpace = colorSpace;
754 swapchainCreateInfo.imageExtent = extent;
755 swapchainCreateInfo.imageArrayLayers = 1;
756 swapchainCreateInfo.imageUsage = usageFlags;
757
Greg Daniel2ff202712018-06-14 11:50:10 -0400758 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
759 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500760 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
761 swapchainCreateInfo.queueFamilyIndexCount = 2;
762 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
763 } else {
764 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
765 swapchainCreateInfo.queueFamilyIndexCount = 0;
766 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
767 }
768
769 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
770 swapchainCreateInfo.compositeAlpha = composite_alpha;
771 swapchainCreateInfo.presentMode = mode;
772 swapchainCreateInfo.clipped = true;
773 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
774
Greg Daniel2ff202712018-06-14 11:50:10 -0400775 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500776 if (VK_SUCCESS != res) {
777 return false;
778 }
779
780 // destroy the old swapchain
781 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400782 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500783
784 destroyBuffers(surface);
785
Greg Daniel2ff202712018-06-14 11:50:10 -0400786 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500787 }
788
789 createBuffers(surface, surfaceFormat, extent);
790
791 return true;
792}
793
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500794VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
795 initialize();
796
797 if (!window) {
798 return nullptr;
799 }
800
801 VulkanSurface* surface = new VulkanSurface();
802
803 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
804 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
805 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
806 surfaceCreateInfo.pNext = nullptr;
807 surfaceCreateInfo.flags = 0;
808 surfaceCreateInfo.window = window;
809
Greg Daniel2ff202712018-06-14 11:50:10 -0400810 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
811 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500812 if (VK_SUCCESS != res) {
813 delete surface;
814 return nullptr;
815 }
816
John Reck1bcacfd2017-11-03 10:12:19 -0700817 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400818 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
819 // All physical devices and queue families on Android must be capable of
820 // presentation with any native window.
821 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500822
823 if (!createSwapchain(surface)) {
824 destroySurface(surface);
825 return nullptr;
826 }
827
828 return surface;
829}
830
831// Helper to know which src stage flags we need to set when transitioning to the present layout
832static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
833 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
834 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
835 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
836 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
837 return VK_PIPELINE_STAGE_TRANSFER_BIT;
838 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
839 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
840 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
841 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
842 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
843 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
844 return VK_PIPELINE_STAGE_HOST_BIT;
845 }
846
847 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
848 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
849}
850
851// Helper to know which src access mask we need to set when transitioning to the present layout
852static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
853 VkAccessFlags flags = 0;
854 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
855 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700856 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
857 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
858 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500859 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
860 flags = VK_ACCESS_HOST_WRITE_BIT;
861 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
862 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
863 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
864 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
865 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
866 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
867 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
868 flags = VK_ACCESS_TRANSFER_READ_BIT;
869 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
870 flags = VK_ACCESS_SHADER_READ_BIT;
871 }
872 return flags;
873}
874
875void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500876 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
877 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400878 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500879 }
880
Greg Daniel74ea2012017-11-10 11:32:58 -0500881 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700882 VulkanSurface::BackbufferInfo* backbuffer =
883 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400884
Greg Danielcd558522016-11-17 13:31:40 -0500885 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400886 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
887 SkSurface::kFlushRead_BackendHandleAccess);
888 SkASSERT(backendRT.isValid());
889
890 GrVkImageInfo imageInfo;
891 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
892
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500893 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400894 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500895
896 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
897 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400898 VkImageLayout layout = imageInfo.fImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500899 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
900 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
901 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
902 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
903
904 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700905 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
906 NULL, // pNext
907 srcAccessMask, // outputMask
908 dstAccessMask, // inputMask
909 layout, // oldLayout
910 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400911 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700912 mPresentQueueIndex, // dstQueueFamilyIndex
913 surface->mImages[backbuffer->mImageIndex], // image
914 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500915 };
916
917 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
918 VkCommandBufferBeginInfo info;
919 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
920 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
921 info.flags = 0;
922 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700923 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
924 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500925 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
926
Greg Danielcd558522016-11-17 13:31:40 -0500927 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500928
929 // insert the layout transfer into the queue and wait on the acquire
930 VkSubmitInfo submitInfo;
931 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
932 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
933 submitInfo.waitSemaphoreCount = 0;
934 submitInfo.pWaitDstStageMask = 0;
935 submitInfo.commandBufferCount = 1;
936 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
937 submitInfo.signalSemaphoreCount = 1;
938 // When this command buffer finishes we will signal this semaphore so that we know it is now
939 // safe to present the image to the screen.
940 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
941
942 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400943 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500944
945 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
946 // to the image is complete and that the layout has been change to present on the graphics
947 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700948 const VkPresentInfoKHR presentInfo = {
949 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
950 NULL, // pNext
951 1, // waitSemaphoreCount
952 &backbuffer->mRenderSemaphore, // pWaitSemaphores
953 1, // swapchainCount
954 &surface->mSwapchain, // pSwapchains
955 &backbuffer->mImageIndex, // pImageIndices
956 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500957 };
958
959 mQueuePresentKHR(mPresentQueue, &presentInfo);
960
961 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500962 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
963 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
964 surface->mCurrentTime++;
965}
966
967int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -0500968 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700969 VulkanSurface::BackbufferInfo* backbuffer =
970 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
971 if (mSwapBehavior == SwapBehavior::Discard ||
972 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -0500973 return 0;
974 }
975 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
976 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500977}
978
Stan Iliev564ca3e2018-09-04 22:00:00 +0000979status_t VulkanManager::fenceWait(sp<Fence>& fence) {
980 //TODO: Insert a wait on fence command into the Vulkan command buffer.
981 // Block CPU on the fence.
982 status_t err = fence->waitForever("VulkanManager::fenceWait");
983 if (err != NO_ERROR) {
984 ALOGE("VulkanManager::fenceWait: error waiting for fence: %d", err);
985 return err;
986 }
987 return OK;
988}
989
990status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
991 //TODO: Create a fence that is signaled, when all the pending Vulkan commands are flushed.
992 return OK;
993}
994
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500995} /* namespace renderthread */
996} /* namespace uirenderer */
997} /* namespace android */