blob: a19edae26cb328fbfb353f96c1dcce29d0a2ed60 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
Greg Danielcd558522016-11-17 13:31:40 -050020#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050021#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050022#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050023#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050024
Greg Danielac2d2322017-07-12 11:30:15 -040025#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050026#include <GrContext.h>
27#include <GrTypes.h>
28#include <vk/GrVkTypes.h>
29
30namespace android {
31namespace uirenderer {
32namespace renderthread {
33
Greg Daniel2ff202712018-06-14 11:50:10 -040034#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
35#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
36#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050037
John Reck1bcacfd2017-11-03 10:12:19 -070038VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050039
40void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050041 mRenderThread.renderState().onVkContextDestroyed();
42 mRenderThread.setGrContext(nullptr);
43
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050044 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040045 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050046 mCommandPool = VK_NULL_HANDLE;
47 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050048
Greg Daniel2ff202712018-06-14 11:50:10 -040049 if (mDevice != VK_NULL_HANDLE) {
50 mDeviceWaitIdle(mDevice);
51 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070052 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050053
Greg Daniel2ff202712018-06-14 11:50:10 -040054 if (mInstance != VK_NULL_HANDLE) {
55 mDestroyInstance(mInstance, nullptr);
56 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050057
Greg Daniel2ff202712018-06-14 11:50:10 -040058 mGraphicsQueue = VK_NULL_HANDLE;
59 mPresentQueue = VK_NULL_HANDLE;
60 mDevice = VK_NULL_HANDLE;
61 mPhysicalDevice = VK_NULL_HANDLE;
62 mInstance = VK_NULL_HANDLE;
63}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050064
Greg Daniel2ff202712018-06-14 11:50:10 -040065bool VulkanManager::setupDevice(VkPhysicalDeviceFeatures& deviceFeatures) {
66 VkResult err;
67
68 constexpr VkApplicationInfo app_info = {
69 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
70 nullptr, // pNext
71 "android framework", // pApplicationName
72 0, // applicationVersion
73 "android framework", // pEngineName
74 0, // engineVerison
75 VK_MAKE_VERSION(1, 0, 0), // apiVersion
76 };
77
78 std::vector<const char*> instanceExtensions;
79 {
80 GET_PROC(EnumerateInstanceExtensionProperties);
81
82 uint32_t extensionCount = 0;
83 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
84 if (VK_SUCCESS != err) {
85 return false;
86 }
87 std::unique_ptr<VkExtensionProperties[]> extensions(
88 new VkExtensionProperties[extensionCount]);
89 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
90 if (VK_SUCCESS != err) {
91 return false;
92 }
93 bool hasKHRSurfaceExtension = false;
94 bool hasKHRAndroidSurfaceExtension = false;
95 for (uint32_t i = 0; i < extensionCount; ++i) {
96 instanceExtensions.push_back(extensions[i].extensionName);
97 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
98 hasKHRSurfaceExtension = true;
99 }
100 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
101 hasKHRAndroidSurfaceExtension = true;
102 }
103 }
104 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
105 this->destroy();
106 return false;
107 }
108 }
109
110 const VkInstanceCreateInfo instance_create = {
111 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
112 nullptr, // pNext
113 0, // flags
114 &app_info, // pApplicationInfo
115 0, // enabledLayerNameCount
116 nullptr, // ppEnabledLayerNames
117 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
118 instanceExtensions.data(), // ppEnabledExtensionNames
119 };
120
121 GET_PROC(CreateInstance);
122 err = mCreateInstance(&instance_create, nullptr, &mInstance);
123 if (err < 0) {
124 this->destroy();
125 return false;
126 }
127
128 GET_INST_PROC(DestroyInstance);
129 GET_INST_PROC(EnumeratePhysicalDevices);
130 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
131 GET_INST_PROC(GetPhysicalDeviceFeatures);
132 GET_INST_PROC(CreateDevice);
133 GET_INST_PROC(EnumerateDeviceExtensionProperties);
134 GET_INST_PROC(CreateAndroidSurfaceKHR);
135 GET_INST_PROC(DestroySurfaceKHR);
136 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
137 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
138 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
139 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
140
141 uint32_t gpuCount;
142 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
143 if (err) {
144 this->destroy();
145 return false;
146 }
147 if (!gpuCount) {
148 this->destroy();
149 return false;
150 }
151 // Just returning the first physical device instead of getting the whole array. Since there
152 // should only be one device on android.
153 gpuCount = 1;
154 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
155 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
156 if (err && VK_INCOMPLETE != err) {
157 this->destroy();
158 return false;
159 }
160
161 // query to get the initial queue props size
162 uint32_t queueCount;
163 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
164 if (!queueCount) {
165 this->destroy();
166 return false;
167 }
168
169 // now get the actual queue props
170 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
171 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
172
173 // iterate to find the graphics queue
174 mGraphicsQueueIndex = queueCount;
175 for (uint32_t i = 0; i < queueCount; i++) {
176 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
177 mGraphicsQueueIndex = i;
178 break;
179 }
180 }
181 if (mGraphicsQueueIndex == queueCount) {
182 this->destroy();
183 return false;
184 }
185
186 // All physical devices and queue families on Android must be capable of
187 // presentation with any native window. So just use the first one.
188 mPresentQueueIndex = 0;
189
190 std::vector<const char*> deviceExtensions;
191 {
192 uint32_t extensionCount = 0;
193 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
194 nullptr);
195 if (VK_SUCCESS != err) {
196 this->destroy();
197 return false;
198 }
199 std::unique_ptr<VkExtensionProperties[]> extensions(
200 new VkExtensionProperties[extensionCount]);
201 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
202 extensions.get());
203 if (VK_SUCCESS != err) {
204 this->destroy();
205 return false;
206 }
207 bool hasKHRSwapchainExtension = false;
208 for (uint32_t i = 0; i < extensionCount; ++i) {
209 deviceExtensions.push_back(extensions[i].extensionName);
210 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
211 hasKHRSwapchainExtension = true;
212 }
213 }
214 if (!hasKHRSwapchainExtension) {
215 this->destroy();
216 return false;
217 }
218 }
219
220 // query to get the physical device properties
221 mGetPhysicalDeviceFeatures(mPhysicalDevice, &deviceFeatures);
222 // this looks like it would slow things down,
223 // and we can't depend on it on all platforms
224 deviceFeatures.robustBufferAccess = VK_FALSE;
225
226 float queuePriorities[1] = { 0.0 };
227
228 const VkDeviceQueueCreateInfo queueInfo[2] = {
229 {
230 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
231 nullptr, // pNext
232 0, // VkDeviceQueueCreateFlags
233 mGraphicsQueueIndex, // queueFamilyIndex
234 1, // queueCount
235 queuePriorities, // pQueuePriorities
236 },
237 {
238 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
239 nullptr, // pNext
240 0, // VkDeviceQueueCreateFlags
241 mPresentQueueIndex, // queueFamilyIndex
242 1, // queueCount
243 queuePriorities, // pQueuePriorities
244 }
245 };
246 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
247
248 const VkDeviceCreateInfo deviceInfo = {
249 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
250 nullptr, // pNext
251 0, // VkDeviceCreateFlags
252 queueInfoCount, // queueCreateInfoCount
253 queueInfo, // pQueueCreateInfos
254 0, // layerCount
255 nullptr, // ppEnabledLayerNames
256 (uint32_t) deviceExtensions.size(), // extensionCount
257 deviceExtensions.data(), // ppEnabledExtensionNames
258 &deviceFeatures // ppEnabledFeatures
259 };
260
261 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
262 if (err) {
263 this->destroy();
264 return false;
265 }
266
267 GET_DEV_PROC(GetDeviceQueue);
268 GET_DEV_PROC(DeviceWaitIdle);
269 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500270 GET_DEV_PROC(CreateSwapchainKHR);
271 GET_DEV_PROC(DestroySwapchainKHR);
272 GET_DEV_PROC(GetSwapchainImagesKHR);
273 GET_DEV_PROC(AcquireNextImageKHR);
274 GET_DEV_PROC(QueuePresentKHR);
275 GET_DEV_PROC(CreateCommandPool);
276 GET_DEV_PROC(DestroyCommandPool);
277 GET_DEV_PROC(AllocateCommandBuffers);
278 GET_DEV_PROC(FreeCommandBuffers);
279 GET_DEV_PROC(ResetCommandBuffer);
280 GET_DEV_PROC(BeginCommandBuffer);
281 GET_DEV_PROC(EndCommandBuffer);
282 GET_DEV_PROC(CmdPipelineBarrier);
283 GET_DEV_PROC(GetDeviceQueue);
284 GET_DEV_PROC(QueueSubmit);
285 GET_DEV_PROC(QueueWaitIdle);
286 GET_DEV_PROC(DeviceWaitIdle);
287 GET_DEV_PROC(CreateSemaphore);
288 GET_DEV_PROC(DestroySemaphore);
289 GET_DEV_PROC(CreateFence);
290 GET_DEV_PROC(DestroyFence);
291 GET_DEV_PROC(WaitForFences);
292 GET_DEV_PROC(ResetFences);
293
Greg Daniel2ff202712018-06-14 11:50:10 -0400294 return true;
295}
296
297void VulkanManager::initialize() {
298 if (mDevice != VK_NULL_HANDLE) {
299 return;
300 }
301
302 std::vector<const char*> instanceExtensions;
303 std::vector<const char*> deviceExtensions;
304 VkPhysicalDeviceFeatures deviceFeatures;
305 LOG_ALWAYS_FATAL_IF(!this->setupDevice(deviceFeatures));
306
307 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
308
309 uint32_t extensionFlags = kKHR_surface_GrVkExtensionFlag |
310 kKHR_android_surface_GrVkExtensionFlag |
311 kKHR_swapchain_GrVkExtensionFlag;
312
313 uint32_t featureFlags = 0;
314 if (deviceFeatures.geometryShader) {
315 featureFlags |= kGeometryShader_GrVkFeatureFlag;
316 }
317 if (deviceFeatures.dualSrcBlend) {
318 featureFlags |= kDualSrcBlend_GrVkFeatureFlag;
319 }
320 if (deviceFeatures.sampleRateShading) {
321 featureFlags |= kSampleRateShading_GrVkFeatureFlag;
322 }
323
324 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
325 if (device != VK_NULL_HANDLE) {
326 return vkGetDeviceProcAddr(device, proc_name);
327 }
328 return vkGetInstanceProcAddr(instance, proc_name);
329 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400330
331 GrVkBackendContext backendContext;
332 backendContext.fInstance = mInstance;
333 backendContext.fPhysicalDevice = mPhysicalDevice;
334 backendContext.fDevice = mDevice;
335 backendContext.fQueue = mGraphicsQueue;
336 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
337 backendContext.fMinAPIVersion = VK_MAKE_VERSION(1, 0, 0);
338 backendContext.fExtensions = extensionFlags;
339 backendContext.fFeatures = featureFlags;
Greg Daniel4aa58672018-07-13 13:10:36 -0400340 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400341 backendContext.fOwnsInstanceAndDevice = false;
342
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500343 // create the command pool for the command buffers
344 if (VK_NULL_HANDLE == mCommandPool) {
345 VkCommandPoolCreateInfo commandPoolInfo;
346 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
347 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
348 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400349 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500350 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400351 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
352 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500353 SkASSERT(VK_SUCCESS == res);
354 }
355
Greg Daniel2ff202712018-06-14 11:50:10 -0400356 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500357
Stan Ilievd495f432017-10-09 15:49:32 -0400358 GrContextOptions options;
359 options.fDisableDistanceFieldPaths = true;
360 mRenderThread.cacheManager().configureContext(&options);
Greg Daniel2ff202712018-06-14 11:50:10 -0400361 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500362 LOG_ALWAYS_FATAL_IF(!grContext.get());
363 mRenderThread.setGrContext(grContext);
Greg Daniel85e09072018-04-09 12:36:45 -0400364 DeviceInfo::initialize(mRenderThread.getGrContext()->maxRenderTargetSize());
Greg Danielcd558522016-11-17 13:31:40 -0500365
366 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
367 mSwapBehavior = SwapBehavior::BufferAge;
368 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500369
370 mRenderThread.renderState().onVkContextCreated();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500371}
372
373// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
374// previous uses have finished before returning.
375VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
376 SkASSERT(surface->mBackbuffers);
377
378 ++surface->mCurrentBackbufferIndex;
379 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
380 surface->mCurrentBackbufferIndex = 0;
381 }
382
John Reck1bcacfd2017-11-03 10:12:19 -0700383 VulkanSurface::BackbufferInfo* backbuffer =
384 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500385
386 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
387 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400388 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500389 if (res != VK_SUCCESS) {
390 return nullptr;
391 }
392
393 return backbuffer;
394}
395
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500396SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
397 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
398 SkASSERT(backbuffer);
399
400 VkResult res;
401
Greg Daniel2ff202712018-06-14 11:50:10 -0400402 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500403 SkASSERT(VK_SUCCESS == res);
404
405 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
406 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400407 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700408 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
409 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500410
411 if (VK_ERROR_SURFACE_LOST_KHR == res) {
412 // need to figure out how to create a new vkSurface without the platformData*
413 // maybe use attach somehow? but need a Window
414 return nullptr;
415 }
416 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
417 // tear swapchain down and try again
418 if (!createSwapchain(surface)) {
419 return nullptr;
420 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500421 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400422 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500423 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500424
425 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400426 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700427 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
428 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500429
430 if (VK_SUCCESS != res) {
431 return nullptr;
432 }
433 }
434
435 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500436 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500437 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
John Reck1bcacfd2017-11-03 10:12:19 -0700438 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout)
439 ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
440 : VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500441 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700442 VkAccessFlags srcAccessMask =
443 (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? 0 : VK_ACCESS_MEMORY_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500444 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
445
446 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700447 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
448 NULL, // pNext
449 srcAccessMask, // outputMask
450 dstAccessMask, // inputMask
451 layout, // oldLayout
452 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
453 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400454 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700455 surface->mImages[backbuffer->mImageIndex], // image
456 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500457 };
458 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
459
460 VkCommandBufferBeginInfo info;
461 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
462 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
463 info.flags = 0;
464 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
465
John Reck1bcacfd2017-11-03 10:12:19 -0700466 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
467 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500468
469 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
470
471 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
472 // insert the layout transfer into the queue and wait on the acquire
473 VkSubmitInfo submitInfo;
474 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
475 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
476 submitInfo.waitSemaphoreCount = 1;
477 // Wait to make sure aquire semaphore set above has signaled.
478 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
479 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
480 submitInfo.commandBufferCount = 1;
481 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
482 submitInfo.signalSemaphoreCount = 0;
483
484 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400485 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500486
487 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500488 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400489 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
490 SkSurface::kFlushRead_BackendHandleAccess);
491 if (!backendRT.isValid()) {
492 SkASSERT(backendRT.isValid());
493 return nullptr;
494 }
495 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500496
497 surface->mBackbuffer = std::move(skSurface);
498 return surface->mBackbuffer.get();
499}
500
501void VulkanManager::destroyBuffers(VulkanSurface* surface) {
502 if (surface->mBackbuffers) {
503 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400504 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500505 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400506 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
507 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
508 mFreeCommandBuffers(mDevice, mCommandPool, 2,
509 surface->mBackbuffers[i].mTransitionCmdBuffers);
510 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
511 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500512 }
513 }
514
515 delete[] surface->mBackbuffers;
516 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500517 delete[] surface->mImageInfos;
518 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500519 delete[] surface->mImages;
520 surface->mImages = nullptr;
521}
522
523void VulkanManager::destroySurface(VulkanSurface* surface) {
524 // Make sure all submit commands have finished before starting to destroy objects.
525 if (VK_NULL_HANDLE != mPresentQueue) {
526 mQueueWaitIdle(mPresentQueue);
527 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400528 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500529
530 destroyBuffers(surface);
531
532 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400533 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500534 surface->mSwapchain = VK_NULL_HANDLE;
535 }
536
537 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400538 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500539 surface->mVkSurface = VK_NULL_HANDLE;
540 }
541 delete surface;
542}
543
544void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400545 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500546 SkASSERT(surface->mImageCount);
547 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400548 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500549
550 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
551
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500552 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500553 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500554 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500555 GrVkImageInfo info;
556 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500557 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500558 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
559 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
560 info.fFormat = format;
561 info.fLevelCount = 1;
562
Greg Danielac2d2322017-07-12 11:30:15 -0400563 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500564
Greg Danielcd558522016-11-17 13:31:40 -0500565 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700566 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400567 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
568 kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500569 }
570
571 SkASSERT(mCommandPool != VK_NULL_HANDLE);
572
573 // set up the backbuffers
574 VkSemaphoreCreateInfo semaphoreInfo;
575 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
576 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
577 semaphoreInfo.pNext = nullptr;
578 semaphoreInfo.flags = 0;
579 VkCommandBufferAllocateInfo commandBuffersInfo;
580 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
581 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
582 commandBuffersInfo.pNext = nullptr;
583 commandBuffersInfo.commandPool = mCommandPool;
584 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
585 commandBuffersInfo.commandBufferCount = 2;
586 VkFenceCreateInfo fenceInfo;
587 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
588 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
589 fenceInfo.pNext = nullptr;
590 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
591
592 // we create one additional backbuffer structure here, because we want to
593 // give the command buffers they contain a chance to finish before we cycle back
594 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
595 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
596 SkDEBUGCODE(VkResult res);
597 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400598 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700599 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400600 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700601 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400602 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700603 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400604 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700605 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400606 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700607 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500608 SkASSERT(VK_SUCCESS == res);
609 }
610 surface->mCurrentBackbufferIndex = surface->mImageCount;
611}
612
613bool VulkanManager::createSwapchain(VulkanSurface* surface) {
614 // check for capabilities
615 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400616 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700617 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500618 if (VK_SUCCESS != res) {
619 return false;
620 }
621
622 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400623 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700624 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500625 if (VK_SUCCESS != res) {
626 return false;
627 }
628
Ben Wagnereec27d52017-01-11 15:32:07 -0500629 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400630 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700631 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500632 if (VK_SUCCESS != res) {
633 return false;
634 }
635
636 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400637 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700638 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500639 if (VK_SUCCESS != res) {
640 return false;
641 }
642
Ben Wagnereec27d52017-01-11 15:32:07 -0500643 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400644 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700645 surface->mVkSurface, &presentModeCount,
646 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500647 if (VK_SUCCESS != res) {
648 return false;
649 }
650
651 VkExtent2D extent = caps.currentExtent;
652 // clamp width; to handle currentExtent of -1 and protect us from broken hints
653 if (extent.width < caps.minImageExtent.width) {
654 extent.width = caps.minImageExtent.width;
655 }
656 SkASSERT(extent.width <= caps.maxImageExtent.width);
657 // clamp height
658 if (extent.height < caps.minImageExtent.height) {
659 extent.height = caps.minImageExtent.height;
660 }
661 SkASSERT(extent.height <= caps.maxImageExtent.height);
662
663 uint32_t imageCount = caps.minImageCount + 2;
664 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
665 // Application must settle for fewer images than desired:
666 imageCount = caps.maxImageCount;
667 }
668
669 // Currently Skia requires the images to be color attchments and support all transfer
670 // operations.
671 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
672 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
673 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
674 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
675 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700676 SkASSERT(caps.supportedCompositeAlpha &
677 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500678 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700679 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
680 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
681 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500682
683 // Pick our surface format. For now, just make sure it matches our sRGB request:
684 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
685 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
686
687 bool wantSRGB = false;
688#ifdef ANDROID_ENABLE_LINEAR_BLENDING
689 wantSRGB = true;
690#endif
691 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
692 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
693 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
694 if (desiredFormat == surfaceFormats[i].format) {
695 surfaceFormat = surfaceFormats[i].format;
696 colorSpace = surfaceFormats[i].colorSpace;
697 }
698 }
699
700 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
701 return false;
702 }
703
704 // If mailbox mode is available, use it, as it is the lowest-latency non-
705 // tearing mode. If not, fall back to FIFO which is always available.
706 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
707 for (uint32_t i = 0; i < presentModeCount; ++i) {
708 // use mailbox
709 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
710 mode = presentModes[i];
711 break;
712 }
713 }
714
715 VkSwapchainCreateInfoKHR swapchainCreateInfo;
716 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
717 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
718 swapchainCreateInfo.surface = surface->mVkSurface;
719 swapchainCreateInfo.minImageCount = imageCount;
720 swapchainCreateInfo.imageFormat = surfaceFormat;
721 swapchainCreateInfo.imageColorSpace = colorSpace;
722 swapchainCreateInfo.imageExtent = extent;
723 swapchainCreateInfo.imageArrayLayers = 1;
724 swapchainCreateInfo.imageUsage = usageFlags;
725
Greg Daniel2ff202712018-06-14 11:50:10 -0400726 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
727 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500728 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
729 swapchainCreateInfo.queueFamilyIndexCount = 2;
730 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
731 } else {
732 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
733 swapchainCreateInfo.queueFamilyIndexCount = 0;
734 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
735 }
736
737 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
738 swapchainCreateInfo.compositeAlpha = composite_alpha;
739 swapchainCreateInfo.presentMode = mode;
740 swapchainCreateInfo.clipped = true;
741 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
742
Greg Daniel2ff202712018-06-14 11:50:10 -0400743 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500744 if (VK_SUCCESS != res) {
745 return false;
746 }
747
748 // destroy the old swapchain
749 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400750 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500751
752 destroyBuffers(surface);
753
Greg Daniel2ff202712018-06-14 11:50:10 -0400754 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500755 }
756
757 createBuffers(surface, surfaceFormat, extent);
758
759 return true;
760}
761
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500762VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
763 initialize();
764
765 if (!window) {
766 return nullptr;
767 }
768
769 VulkanSurface* surface = new VulkanSurface();
770
771 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
772 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
773 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
774 surfaceCreateInfo.pNext = nullptr;
775 surfaceCreateInfo.flags = 0;
776 surfaceCreateInfo.window = window;
777
Greg Daniel2ff202712018-06-14 11:50:10 -0400778 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
779 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500780 if (VK_SUCCESS != res) {
781 delete surface;
782 return nullptr;
783 }
784
John Reck1bcacfd2017-11-03 10:12:19 -0700785 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400786 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
787 // All physical devices and queue families on Android must be capable of
788 // presentation with any native window.
789 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500790
791 if (!createSwapchain(surface)) {
792 destroySurface(surface);
793 return nullptr;
794 }
795
796 return surface;
797}
798
799// Helper to know which src stage flags we need to set when transitioning to the present layout
800static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
801 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
802 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
803 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
804 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
805 return VK_PIPELINE_STAGE_TRANSFER_BIT;
806 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
807 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
808 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
809 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
810 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
811 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
812 return VK_PIPELINE_STAGE_HOST_BIT;
813 }
814
815 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
816 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
817}
818
819// Helper to know which src access mask we need to set when transitioning to the present layout
820static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
821 VkAccessFlags flags = 0;
822 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
823 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700824 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
825 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
826 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500827 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
828 flags = VK_ACCESS_HOST_WRITE_BIT;
829 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
830 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
831 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
832 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
833 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
834 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
835 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
836 flags = VK_ACCESS_TRANSFER_READ_BIT;
837 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
838 flags = VK_ACCESS_SHADER_READ_BIT;
839 }
840 return flags;
841}
842
843void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500844 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
845 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400846 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500847 }
848
Greg Daniel74ea2012017-11-10 11:32:58 -0500849 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700850 VulkanSurface::BackbufferInfo* backbuffer =
851 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400852
Greg Danielcd558522016-11-17 13:31:40 -0500853 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400854 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
855 SkSurface::kFlushRead_BackendHandleAccess);
856 SkASSERT(backendRT.isValid());
857
858 GrVkImageInfo imageInfo;
859 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
860
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500861 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400862 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500863
864 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
865 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400866 VkImageLayout layout = imageInfo.fImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500867 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
868 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
869 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
870 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
871
872 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700873 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
874 NULL, // pNext
875 srcAccessMask, // outputMask
876 dstAccessMask, // inputMask
877 layout, // oldLayout
878 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400879 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700880 mPresentQueueIndex, // dstQueueFamilyIndex
881 surface->mImages[backbuffer->mImageIndex], // image
882 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500883 };
884
885 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
886 VkCommandBufferBeginInfo info;
887 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
888 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
889 info.flags = 0;
890 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700891 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
892 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500893 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
894
Greg Danielcd558522016-11-17 13:31:40 -0500895 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500896
897 // insert the layout transfer into the queue and wait on the acquire
898 VkSubmitInfo submitInfo;
899 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
900 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
901 submitInfo.waitSemaphoreCount = 0;
902 submitInfo.pWaitDstStageMask = 0;
903 submitInfo.commandBufferCount = 1;
904 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
905 submitInfo.signalSemaphoreCount = 1;
906 // When this command buffer finishes we will signal this semaphore so that we know it is now
907 // safe to present the image to the screen.
908 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
909
910 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400911 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500912
913 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
914 // to the image is complete and that the layout has been change to present on the graphics
915 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700916 const VkPresentInfoKHR presentInfo = {
917 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
918 NULL, // pNext
919 1, // waitSemaphoreCount
920 &backbuffer->mRenderSemaphore, // pWaitSemaphores
921 1, // swapchainCount
922 &surface->mSwapchain, // pSwapchains
923 &backbuffer->mImageIndex, // pImageIndices
924 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500925 };
926
927 mQueuePresentKHR(mPresentQueue, &presentInfo);
928
929 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500930 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
931 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
932 surface->mCurrentTime++;
933}
934
935int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -0500936 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700937 VulkanSurface::BackbufferInfo* backbuffer =
938 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
939 if (mSwapBehavior == SwapBehavior::Discard ||
940 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -0500941 return 0;
942 }
943 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
944 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500945}
946
947} /* namespace renderthread */
948} /* namespace uirenderer */
949} /* namespace android */