blob: 375334f63169d440981206e1f20d2ce3ee0ce2a6 [file] [log] [blame]
Alexis Hetu1424ef62019-04-05 18:03:53 -04001// Copyright 2019 The SwiftShader Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#include "VkPipelineCache.hpp"
16#include <cstring>
17
Nicolas Capens157ba262019-12-10 17:49:14 -050018namespace vk {
Alexis Hetu1424ef62019-04-05 18:03:53 -040019
Alexis Hetu52edb172019-06-26 10:17:18 -040020PipelineCache::SpirvShaderKey::SpirvShaderKey(const VkShaderStageFlagBits pipelineStage,
Ben Clayton2ed93ab2019-12-17 20:38:03 +000021 const std::string &entryPointName,
22 const std::vector<uint32_t> &insns,
23 const vk::RenderPass *renderPass,
24 const uint32_t subpassIndex,
Ben Clayton4d23aa32020-04-14 20:33:52 +010025 const vk::SpecializationInfo &specializationInfo)
Ben Clayton2ed93ab2019-12-17 20:38:03 +000026 : pipelineStage(pipelineStage)
27 , entryPointName(entryPointName)
28 , insns(insns)
29 , renderPass(renderPass)
30 , subpassIndex(subpassIndex)
31 , specializationInfo(specializationInfo)
Alexis Hetu52edb172019-06-26 10:17:18 -040032{
33}
34
Ben Claytone6092f32019-07-29 19:44:13 +010035bool PipelineCache::SpirvShaderKey::operator<(const SpirvShaderKey &other) const
36{
37 if(pipelineStage != other.pipelineStage)
38 {
39 return pipelineStage < other.pipelineStage;
40 }
41
42 if(renderPass != other.renderPass)
43 {
44 return renderPass < other.renderPass;
45 }
46
47 if(subpassIndex != other.subpassIndex)
48 {
49 return subpassIndex < other.subpassIndex;
50 }
51
52 if(insns.size() != other.insns.size())
53 {
54 return insns.size() < other.insns.size();
55 }
56
57 if(entryPointName.size() != other.entryPointName.size())
58 {
59 return entryPointName.size() < other.entryPointName.size();
60 }
61
62 int cmp = memcmp(entryPointName.c_str(), other.entryPointName.c_str(), entryPointName.size());
63 if(cmp != 0)
64 {
65 return cmp < 0;
66 }
67
68 cmp = memcmp(insns.data(), other.insns.data(), insns.size() * sizeof(uint32_t));
69 if(cmp != 0)
70 {
71 return cmp < 0;
72 }
73
74 return (specializationInfo < other.specializationInfo);
Alexis Hetu52edb172019-06-26 10:17:18 -040075}
76
Ben Clayton2ed93ab2019-12-17 20:38:03 +000077PipelineCache::PipelineCache(const VkPipelineCacheCreateInfo *pCreateInfo, void *mem)
78 : dataSize(ComputeRequiredAllocationSize(pCreateInfo))
79 , data(reinterpret_cast<uint8_t *>(mem))
Alexis Hetu1424ef62019-04-05 18:03:53 -040080{
Ben Clayton2ed93ab2019-12-17 20:38:03 +000081 CacheHeader *header = reinterpret_cast<CacheHeader *>(mem);
Alexis Hetu1424ef62019-04-05 18:03:53 -040082 header->headerLength = sizeof(CacheHeader);
83 header->headerVersion = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
84 header->vendorID = VENDOR_ID;
85 header->deviceID = DEVICE_ID;
86 memcpy(header->pipelineCacheUUID, SWIFTSHADER_UUID, VK_UUID_SIZE);
87
88 if(pCreateInfo->pInitialData && (pCreateInfo->initialDataSize > 0))
89 {
90 memcpy(data + sizeof(CacheHeader), pCreateInfo->pInitialData, pCreateInfo->initialDataSize);
91 }
92}
93
Alexis Hetu52edb172019-06-26 10:17:18 -040094PipelineCache::~PipelineCache()
95{
96 spirvShaders.clear();
97 computePrograms.clear();
98}
99
Ben Clayton2ed93ab2019-12-17 20:38:03 +0000100void PipelineCache::destroy(const VkAllocationCallbacks *pAllocator)
Alexis Hetu1424ef62019-04-05 18:03:53 -0400101{
102 vk::deallocate(data, pAllocator);
103}
104
Ben Clayton2ed93ab2019-12-17 20:38:03 +0000105size_t PipelineCache::ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo *pCreateInfo)
Alexis Hetu1424ef62019-04-05 18:03:53 -0400106{
107 return pCreateInfo->initialDataSize + sizeof(CacheHeader);
108}
109
Ben Clayton2ed93ab2019-12-17 20:38:03 +0000110VkResult PipelineCache::getData(size_t *pDataSize, void *pData)
Alexis Hetu1424ef62019-04-05 18:03:53 -0400111{
112 if(!pData)
113 {
114 *pDataSize = dataSize;
115 return VK_SUCCESS;
116 }
117
118 if(*pDataSize != dataSize)
119 {
120 *pDataSize = 0;
121 return VK_INCOMPLETE;
122 }
123
124 if(*pDataSize > 0)
125 {
126 memcpy(pData, data, *pDataSize);
127 }
128
129 return VK_SUCCESS;
130}
131
Ben Clayton2ed93ab2019-12-17 20:38:03 +0000132VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches)
Alexis Hetu1424ef62019-04-05 18:03:53 -0400133{
134 for(uint32_t i = 0; i < srcCacheCount; i++)
135 {
Ben Clayton2ed93ab2019-12-17 20:38:03 +0000136 PipelineCache *srcCache = Cast(pSrcCaches[i]);
Alexis Hetu52edb172019-06-26 10:17:18 -0400137
138 {
Ben Clayton377573c2020-04-03 20:36:40 +0100139 marl::lock thisLock(spirvShadersMutex);
140 marl::lock srcLock(srcCache->spirvShadersMutex);
Alexis Hetu52edb172019-06-26 10:17:18 -0400141 spirvShaders.insert(srcCache->spirvShaders.begin(), srcCache->spirvShaders.end());
142 }
143
144 {
Ben Clayton377573c2020-04-03 20:36:40 +0100145 marl::lock thisLock(computeProgramsMutex);
146 marl::lock srcLock(srcCache->computeProgramsMutex);
Alexis Hetu52edb172019-06-26 10:17:18 -0400147 computePrograms.insert(srcCache->computePrograms.begin(), srcCache->computePrograms.end());
148 }
Alexis Hetu1424ef62019-04-05 18:03:53 -0400149 }
150
151 return VK_SUCCESS;
152}
153
Nicolas Capens157ba262019-12-10 17:49:14 -0500154} // namespace vk