blob: 8a886466874a138a0854772e8745467e68a4f3f6 [file] [log] [blame]
Chris Forbesaf4ed532018-12-06 18:33:27 -08001// Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#ifndef sw_SpirvShader_hpp
16#define sw_SpirvShader_hpp
17
Nicolas Capens125dba02019-04-24 02:03:22 -040018#include "SamplerCore.hpp"
Ben Claytonbc1c0672019-12-17 20:37:37 +000019#include "ShaderCore.hpp"
Ben Claytonab51bbf2019-02-20 14:36:27 +000020#include "SpirvID.hpp"
Nicolas Capens86509d92019-03-21 13:23:50 -040021#include "Device/Config.hpp"
Nicolas Capens9e735102019-04-18 15:03:06 -040022#include "Device/Sampler.hpp"
Ben Clayton25e06e02020-02-07 11:19:08 +000023#include "System/Debug.hpp"
Nicolas Capens68d9ad82020-04-02 05:22:04 -040024#include "System/Math.hpp"
Ben Claytonbc1c0672019-12-17 20:37:37 +000025#include "System/Types.hpp"
Antonio Maiorano42fd1592020-04-27 11:30:40 -040026#include "Vulkan/VkConfig.hpp"
Ben Claytonbc1c0672019-12-17 20:37:37 +000027#include "Vulkan/VkDescriptorSet.hpp"
Nicolas Capens86509d92019-03-21 13:23:50 -040028
Nicolas Capens78cc4f42020-04-08 23:44:07 -040029#define SPV_ENABLE_UTILITY_CODE
Nicolas Capens86509d92019-03-21 13:23:50 -040030#include <spirv/unified1/spirv.hpp>
Chris Forbesaf4ed532018-12-06 18:33:27 -080031
Ben Clayton76e9bc02019-02-26 15:02:18 +000032#include <array>
Ben Claytonc42b2202019-05-22 11:18:14 +010033#include <atomic>
34#include <cstdint>
Ben Clayton6fae32c2019-02-28 20:06:42 +000035#include <cstring>
Ben Claytonbc1c0672019-12-17 20:37:37 +000036#include <deque>
Ben Clayton49d81582019-03-12 20:05:04 +000037#include <functional>
Chris Forbesd5aed492019-02-02 15:18:52 -080038#include <memory>
Ben Claytonc42b2202019-05-22 11:18:14 +010039#include <string>
40#include <type_traits>
41#include <unordered_map>
42#include <unordered_set>
43#include <vector>
Chris Forbesaf4ed532018-12-06 18:33:27 -080044
Ben Claytonbc1c0672019-12-17 20:37:37 +000045#undef Yield // b/127920555
Nicolas Capensd3545372019-08-09 13:59:18 -040046
Nicolas Capens157ba262019-12-10 17:49:14 -050047namespace vk {
Ben Clayton76e9bc02019-02-26 15:02:18 +000048
Nicolas Capens157ba262019-12-10 17:49:14 -050049class PipelineLayout;
50class ImageView;
51class Sampler;
52class RenderPass;
53struct SampledImageDescriptor;
Ben Clayton24ea5152019-02-26 11:02:42 +000054
Ben Clayton7d0ce412019-12-03 13:26:31 +000055namespace dbg {
56class Context;
57} // namespace dbg
58
Nicolas Capens157ba262019-12-10 17:49:14 -050059} // namespace vk
60
61namespace sw {
62
63// Forward declarations.
64class SpirvRoutine;
65
66// Incrementally constructed complex bundle of rvalues
67// Effectively a restricted vector, supporting only:
Nicolas Capensff9f9b52020-04-14 00:46:38 -040068// - allocation to a (runtime-known) fixed component count
Nicolas Capens157ba262019-12-10 17:49:14 -050069// - in-place construction of elements
70// - const operator[]
71class Intermediate
72{
73public:
Nicolas Capensff9f9b52020-04-14 00:46:38 -040074 Intermediate(uint32_t componentCount)
Nicolas Capens2f4b6032020-04-09 02:01:50 -040075 : componentCount(componentCount)
76 , scalar(new rr::Value *[componentCount])
Ben Claytonbc1c0672019-12-17 20:37:37 +000077 {
Nicolas Capens2f4b6032020-04-09 02:01:50 -040078 for(auto i = 0u; i < componentCount; i++) { scalar[i] = nullptr; }
Nicolas Capens157ba262019-12-10 17:49:14 -050079 }
80
81 ~Intermediate()
82 {
83 delete[] scalar;
84 }
85
Ben Claytonfc951cd2019-05-15 17:16:56 +010086 // TypeHint is used as a hint for rr::PrintValue::Ty<sw::Intermediate> to
87 // decide the format used to print the intermediate data.
88 enum class TypeHint
89 {
90 Float,
91 Int,
92 UInt
93 };
Nicolas Capens157ba262019-12-10 17:49:14 -050094
Nicolas Capensb6e8c3f2020-05-01 23:28:37 -040095 void move(uint32_t i, RValue<SIMD::Float> &&scalar) { emplace(i, scalar.value(), TypeHint::Float); }
96 void move(uint32_t i, RValue<SIMD::Int> &&scalar) { emplace(i, scalar.value(), TypeHint::Int); }
97 void move(uint32_t i, RValue<SIMD::UInt> &&scalar) { emplace(i, scalar.value(), TypeHint::UInt); }
Ben Claytonfc951cd2019-05-15 17:16:56 +010098
Nicolas Capensb6e8c3f2020-05-01 23:28:37 -040099 void move(uint32_t i, const RValue<SIMD::Float> &scalar) { emplace(i, scalar.value(), TypeHint::Float); }
100 void move(uint32_t i, const RValue<SIMD::Int> &scalar) { emplace(i, scalar.value(), TypeHint::Int); }
101 void move(uint32_t i, const RValue<SIMD::UInt> &scalar) { emplace(i, scalar.value(), TypeHint::UInt); }
Nicolas Capens157ba262019-12-10 17:49:14 -0500102
103 // Value retrieval functions.
104 RValue<SIMD::Float> Float(uint32_t i) const
105 {
Nicolas Capensff9f9b52020-04-14 00:46:38 -0400106 ASSERT(i < componentCount);
Nicolas Capens157ba262019-12-10 17:49:14 -0500107 ASSERT(scalar[i] != nullptr);
108 return As<SIMD::Float>(scalar[i]); // TODO(b/128539387): RValue<SIMD::Float>(scalar)
109 }
110
111 RValue<SIMD::Int> Int(uint32_t i) const
112 {
Nicolas Capensff9f9b52020-04-14 00:46:38 -0400113 ASSERT(i < componentCount);
Nicolas Capens157ba262019-12-10 17:49:14 -0500114 ASSERT(scalar[i] != nullptr);
115 return As<SIMD::Int>(scalar[i]); // TODO(b/128539387): RValue<SIMD::Int>(scalar)
116 }
117
118 RValue<SIMD::UInt> UInt(uint32_t i) const
119 {
Nicolas Capensff9f9b52020-04-14 00:46:38 -0400120 ASSERT(i < componentCount);
Nicolas Capens157ba262019-12-10 17:49:14 -0500121 ASSERT(scalar[i] != nullptr);
122 return As<SIMD::UInt>(scalar[i]); // TODO(b/128539387): RValue<SIMD::UInt>(scalar)
123 }
124
125 // No copy/move construction or assignment
126 Intermediate(Intermediate const &) = delete;
127 Intermediate(Intermediate &&) = delete;
Ben Claytonbc1c0672019-12-17 20:37:37 +0000128 Intermediate &operator=(Intermediate const &) = delete;
129 Intermediate &operator=(Intermediate &&) = delete;
Nicolas Capens157ba262019-12-10 17:49:14 -0500130
Nicolas Capens2f4b6032020-04-09 02:01:50 -0400131 const uint32_t componentCount;
132
Nicolas Capens157ba262019-12-10 17:49:14 -0500133private:
Ben Claytonfc951cd2019-05-15 17:16:56 +0100134 void emplace(uint32_t i, rr::Value *value, TypeHint type)
Nicolas Capens157ba262019-12-10 17:49:14 -0500135 {
Nicolas Capensff9f9b52020-04-14 00:46:38 -0400136 ASSERT(i < componentCount);
Nicolas Capens157ba262019-12-10 17:49:14 -0500137 ASSERT(scalar[i] == nullptr);
138 scalar[i] = value;
Ben Claytonfc951cd2019-05-15 17:16:56 +0100139 RR_PRINT_ONLY(typeHint = type;)
Nicolas Capens157ba262019-12-10 17:49:14 -0500140 }
141
142 rr::Value **const scalar;
Ben Claytonfc951cd2019-05-15 17:16:56 +0100143
144#ifdef ENABLE_RR_PRINT
145 friend struct rr::PrintValue::Ty<sw::Intermediate>;
146 TypeHint typeHint = TypeHint::Float;
147#endif // ENABLE_RR_PRINT
Nicolas Capens157ba262019-12-10 17:49:14 -0500148};
149
150class SpirvShader
151{
152public:
153 using InsnStore = std::vector<uint32_t>;
154 InsnStore insns;
155
Nicolas Capens6c11cf22020-03-19 15:21:13 -0400156 using ImageSampler = void(void *texture, void *uvsIn, void *texelOut, void *constants);
Nicolas Capens157ba262019-12-10 17:49:14 -0500157
158 enum class YieldResult
159 {
160 ControlBarrier,
161 };
162
Nicolas Capens78cc4f42020-04-08 23:44:07 -0400163 class Type;
164 class Object;
165
166 // Pseudo-iterator over SPIRV instructions, designed to support range-based-for.
Nicolas Capens157ba262019-12-10 17:49:14 -0500167 class InsnIterator
168 {
Nicolas Capens157ba262019-12-10 17:49:14 -0500169 public:
Nicolas Capens78cc4f42020-04-08 23:44:07 -0400170 InsnIterator(InsnIterator const &other) = default;
171
172 InsnIterator() = default;
173
174 explicit InsnIterator(InsnStore::const_iterator iter)
175 : iter{ iter }
176 {
177 }
178
Nicolas Capens157ba262019-12-10 17:49:14 -0500179 spv::Op opcode() const
180 {
181 return static_cast<spv::Op>(*iter & spv::OpCodeMask);
182 }
183
184 uint32_t wordCount() const
185 {
186 return *iter >> spv::WordCountShift;
187 }
188
189 uint32_t word(uint32_t n) const
190 {
191 ASSERT(n < wordCount());
192 return iter[n];
193 }
194
Ben Claytonbc1c0672019-12-17 20:37:37 +0000195 uint32_t const *wordPointer(uint32_t n) const
Nicolas Capens157ba262019-12-10 17:49:14 -0500196 {
197 ASSERT(n < wordCount());
198 return &iter[n];
199 }
200
Ben Claytonbc1c0672019-12-17 20:37:37 +0000201 const char *string(uint32_t n) const
Nicolas Capens157ba262019-12-10 17:49:14 -0500202 {
Ben Claytonbc1c0672019-12-17 20:37:37 +0000203 return reinterpret_cast<const char *>(wordPointer(n));
Nicolas Capens157ba262019-12-10 17:49:14 -0500204 }
205
Nicolas Capens78cc4f42020-04-08 23:44:07 -0400206 bool hasResultAndType() const
207 {
208 bool hasResult = false, hasResultType = false;
209 spv::HasResultAndType(opcode(), &hasResult, &hasResultType);
210
211 return hasResultType;
212 }
213
214 SpirvID<Type> resultTypeId() const
215 {
216 ASSERT(hasResultAndType());
217 return word(1);
218 }
219
220 SpirvID<Object> resultId() const
221 {
222 ASSERT(hasResultAndType());
223 return word(2);
224 }
225
Nicolas Capens157ba262019-12-10 17:49:14 -0500226 bool operator==(InsnIterator const &other) const
227 {
228 return iter == other.iter;
229 }
230
231 bool operator!=(InsnIterator const &other) const
232 {
233 return iter != other.iter;
234 }
235
236 InsnIterator operator*() const
237 {
238 return *this;
239 }
240
241 InsnIterator &operator++()
242 {
243 iter += wordCount();
244 return *this;
245 }
246
247 InsnIterator const operator++(int)
248 {
Ben Claytonbc1c0672019-12-17 20:37:37 +0000249 InsnIterator ret{ *this };
Nicolas Capens157ba262019-12-10 17:49:14 -0500250 iter += wordCount();
251 return ret;
252 }
253
Nicolas Capens78cc4f42020-04-08 23:44:07 -0400254 private:
255 InsnStore::const_iterator iter;
Nicolas Capens157ba262019-12-10 17:49:14 -0500256 };
257
258 /* range-based-for interface */
259 InsnIterator begin() const
260 {
Ben Claytonbc1c0672019-12-17 20:37:37 +0000261 return InsnIterator{ insns.cbegin() + 5 };
Nicolas Capens157ba262019-12-10 17:49:14 -0500262 }
263
264 InsnIterator end() const
265 {
Ben Claytonbc1c0672019-12-17 20:37:37 +0000266 return InsnIterator{ insns.cend() };
Nicolas Capens157ba262019-12-10 17:49:14 -0500267 }
268
269 class Type
Chris Forbese4ef5f72019-02-15 16:00:08 -0800270 {
271 public:
Nicolas Capens157ba262019-12-10 17:49:14 -0500272 using ID = SpirvID<Type>;
Chris Forbese4ef5f72019-02-15 16:00:08 -0800273
Nicolas Capens157ba262019-12-10 17:49:14 -0500274 spv::Op opcode() const { return definition.opcode(); }
275
276 InsnIterator definition;
277 spv::StorageClass storageClass = static_cast<spv::StorageClass>(-1);
Nicolas Capensff9f9b52020-04-14 00:46:38 -0400278 uint32_t componentCount = 0;
Nicolas Capens157ba262019-12-10 17:49:14 -0500279 bool isBuiltInBlock = false;
280
281 // Inner element type for pointers, arrays, vectors and matrices.
282 ID element;
283 };
284
285 class Object
286 {
287 public:
288 using ID = SpirvID<Object>;
289
290 spv::Op opcode() const { return definition.opcode(); }
Nicolas Capens78cc4f42020-04-08 23:44:07 -0400291 Type::ID typeId() const { return definition.resultTypeId(); }
292 Object::ID id() const { return definition.resultId(); }
Nicolas Capens157ba262019-12-10 17:49:14 -0500293
294 InsnIterator definition;
Nicolas Capens2f4b6032020-04-09 02:01:50 -0400295 std::vector<uint32_t> constantValue;
Nicolas Capens157ba262019-12-10 17:49:14 -0500296
297 enum class Kind
Chris Forbese4ef5f72019-02-15 16:00:08 -0800298 {
Nicolas Capens157ba262019-12-10 17:49:14 -0500299 // Invalid default kind.
300 // If we get left with an object in this state, the module was
301 // broken.
302 Unknown,
303
304 // TODO: Better document this kind.
305 // A shader interface variable pointer.
306 // Pointer with uniform address across all lanes.
307 // Pointer held by SpirvRoutine::pointers
308 InterfaceVariable,
309
310 // Constant value held by Object::constantValue.
311 Constant,
312
313 // Value held by SpirvRoutine::intermediates.
314 Intermediate,
315
316 // Pointer held by SpirvRoutine::pointers
317 Pointer,
318
319 // A pointer to a vk::DescriptorSet*.
320 // Pointer held by SpirvRoutine::pointers.
321 DescriptorSet,
322 };
323
324 Kind kind = Kind::Unknown;
325 };
326
327 // Block is an interval of SPIR-V instructions, starting with the
328 // opening OpLabel, and ending with a termination instruction.
329 class Block
330 {
331 public:
332 using ID = SpirvID<Block>;
333 using Set = std::unordered_set<ID>;
334
335 // Edge represents the graph edge between two blocks.
336 struct Edge
337 {
338 ID from;
339 ID to;
340
Ben Claytonbc1c0672019-12-17 20:37:37 +0000341 bool operator==(const Edge &other) const { return from == other.from && to == other.to; }
Nicolas Capens157ba262019-12-10 17:49:14 -0500342
343 struct Hash
344 {
Ben Claytonbc1c0672019-12-17 20:37:37 +0000345 std::size_t operator()(const Edge &edge) const noexcept
Nicolas Capens157ba262019-12-10 17:49:14 -0500346 {
347 return std::hash<uint32_t>()(edge.from.value() * 31 + edge.to.value());
348 }
349 };
350 };
351
352 Block() = default;
Ben Claytonbc1c0672019-12-17 20:37:37 +0000353 Block(const Block &other) = default;
Nicolas Capens157ba262019-12-10 17:49:14 -0500354 explicit Block(InsnIterator begin, InsnIterator end);
355
356 /* range-based-for interface */
357 inline InsnIterator begin() const { return begin_; }
358 inline InsnIterator end() const { return end_; }
359
360 enum Kind
361 {
Ben Claytonbc1c0672019-12-17 20:37:37 +0000362 Simple, // OpBranch or other simple terminator.
363 StructuredBranchConditional, // OpSelectionMerge + OpBranchConditional
364 UnstructuredBranchConditional, // OpBranchConditional
365 StructuredSwitch, // OpSelectionMerge + OpSwitch
366 UnstructuredSwitch, // OpSwitch
367 Loop, // OpLoopMerge + [OpBranchConditional | OpBranch]
Nicolas Capens157ba262019-12-10 17:49:14 -0500368 };
369
370 Kind kind = Simple;
Ben Claytonbc1c0672019-12-17 20:37:37 +0000371 InsnIterator mergeInstruction; // Structured control flow merge instruction.
372 InsnIterator branchInstruction; // Branch instruction.
373 ID mergeBlock; // Structured flow merge block.
374 ID continueTarget; // Loop continue block.
375 Set ins; // Blocks that branch into this block.
376 Set outs; // Blocks that this block branches to.
Nicolas Capens157ba262019-12-10 17:49:14 -0500377 bool isLoopMerge = false;
Ben Claytonbc1c0672019-12-17 20:37:37 +0000378
Nicolas Capens157ba262019-12-10 17:49:14 -0500379 private:
380 InsnIterator begin_;
381 InsnIterator end_;
382 };
383
384 class Function
385 {
386 public:
387 using ID = SpirvID<Function>;
388
389 // Walks all reachable the blocks starting from id adding them to
390 // reachable.
Ben Claytonbc1c0672019-12-17 20:37:37 +0000391 void TraverseReachableBlocks(Block::ID id, Block::Set &reachable) const;
Nicolas Capens157ba262019-12-10 17:49:14 -0500392
393 // AssignBlockFields() performs the following for all reachable blocks:
394 // * Assigns Block::ins with the identifiers of all blocks that contain
395 // this block in their Block::outs.
396 // * Sets Block::isLoopMerge to true if the block is the merge of a
397 // another loop block.
398 void AssignBlockFields();
399
400 // ForeachBlockDependency calls f with each dependency of the given
401 // block. A dependency is an incoming block that is not a loop-back
402 // edge.
403 void ForeachBlockDependency(Block::ID blockId, std::function<void(Block::ID)> f) const;
404
405 // ExistsPath returns true if there's a direct or indirect flow from
406 // the 'from' block to the 'to' block that does not pass through
407 // notPassingThrough.
408 bool ExistsPath(Block::ID from, Block::ID to, Block::ID notPassingThrough) const;
409
410 Block const &getBlock(Block::ID id) const
411 {
412 auto it = blocks.find(id);
413 ASSERT_MSG(it != blocks.end(), "Unknown block %d", id.value());
414 return it->second;
Chris Forbese4ef5f72019-02-15 16:00:08 -0800415 }
416
Ben Claytonbc1c0672019-12-17 20:37:37 +0000417 Block::ID entry; // function entry point block.
418 HandleMap<Block> blocks; // blocks belonging to this function.
419 Type::ID type; // type of the function.
420 Type::ID result; // return type.
Nicolas Capens157ba262019-12-10 17:49:14 -0500421 };
Chris Forbese4ef5f72019-02-15 16:00:08 -0800422
Ben Clayton9c8823a2020-01-08 12:07:30 +0000423 using String = std::string;
424 using StringID = SpirvID<std::string>;
425
Ben Claytonb36dbbe2020-01-08 12:18:43 +0000426 class Extension
427 {
428 public:
429 using ID = SpirvID<Extension>;
430
431 enum Name
432 {
433 Unknown,
434 GLSLstd450,
Ben Claytoncd55f052020-01-14 11:56:00 +0000435 OpenCLDebugInfo100
Ben Claytonb36dbbe2020-01-08 12:18:43 +0000436 };
437
438 Name name;
439 };
440
Ben Claytonbc1c0672019-12-17 20:37:37 +0000441 struct TypeOrObject
442 {}; // Dummy struct to represent a Type or Object.
Ben Clayton093be462019-03-08 08:37:24 +0000443
Nicolas Capens157ba262019-12-10 17:49:14 -0500444 // TypeOrObjectID is an identifier that represents a Type or an Object,
445 // and supports implicit casting to and from Type::ID or Object::ID.
446 class TypeOrObjectID : public SpirvID<TypeOrObject>
447 {
448 public:
449 using Hash = std::hash<SpirvID<TypeOrObject>>;
450
Ben Claytonbc1c0672019-12-17 20:37:37 +0000451 inline TypeOrObjectID(uint32_t id)
452 : SpirvID(id)
453 {}
454 inline TypeOrObjectID(Type::ID id)
455 : SpirvID(id.value())
456 {}
457 inline TypeOrObjectID(Object::ID id)
458 : SpirvID(id.value())
459 {}
Nicolas Capens157ba262019-12-10 17:49:14 -0500460 inline operator Type::ID() const { return Type::ID(value()); }
461 inline operator Object::ID() const { return Object::ID(value()); }
462 };
463
464 // OpImageSample variants
465 enum Variant
466 {
467 None, // No Dref or Proj. Also used by OpImageFetch and OpImageQueryLod.
468 Dref,
469 Proj,
470 ProjDref,
471 VARIANT_LAST = ProjDref
472 };
473
474 // Compact representation of image instruction parameters that is passed to the
475 // trampoline function for retrieving/generating the corresponding sampling routine.
476 struct ImageInstruction
477 {
478 ImageInstruction(Variant variant, SamplerMethod samplerMethod)
Ben Claytonbc1c0672019-12-17 20:37:37 +0000479 : parameters(0)
Nicolas Capens157ba262019-12-10 17:49:14 -0500480 {
481 this->variant = variant;
482 this->samplerMethod = samplerMethod;
483 }
484
485 // Unmarshal from raw 32-bit data
Ben Claytonbc1c0672019-12-17 20:37:37 +0000486 ImageInstruction(uint32_t parameters)
487 : parameters(parameters)
488 {}
Nicolas Capens157ba262019-12-10 17:49:14 -0500489
490 SamplerFunction getSamplerFunction() const
491 {
492 return { static_cast<SamplerMethod>(samplerMethod), offset != 0, sample != 0 };
493 }
494
495 bool isDref() const
496 {
497 return (variant == Dref) || (variant == ProjDref);
498 }
499
500 bool isProj() const
501 {
502 return (variant == Proj) || (variant == ProjDref);
503 }
504
505 union
506 {
507 struct
508 {
509 uint32_t variant : BITS(VARIANT_LAST);
510 uint32_t samplerMethod : BITS(SAMPLER_METHOD_LAST);
511 uint32_t gatherComponent : 2;
512
513 // Parameters are passed to the sampling routine in this order:
Ben Claytonbc1c0672019-12-17 20:37:37 +0000514 uint32_t coordinates : 3; // 1-4 (does not contain projection component)
515 // uint32_t dref : 1; // Indicated by Variant::ProjDref|Dref
516 // uint32_t lodOrBias : 1; // Indicated by SamplerMethod::Lod|Bias|Fetch
517 uint32_t grad : 2; // 0-3 components (for each of dx / dy)
518 uint32_t offset : 2; // 0-3 components
519 uint32_t sample : 1; // 0-1 scalar integer
Nicolas Capens157ba262019-12-10 17:49:14 -0500520 };
521
522 uint32_t parameters;
523 };
524 };
525
526 static_assert(sizeof(ImageInstruction) == sizeof(uint32_t), "ImageInstruction must be 32-bit");
527
528 // This method is for retrieving an ID that uniquely identifies the
529 // shader entry point represented by this object.
530 uint64_t getSerialID() const
531 {
Ben Claytonbc1c0672019-12-17 20:37:37 +0000532 return ((uint64_t)entryPoint.value() << 32) | codeSerialID;
Nicolas Capens157ba262019-12-10 17:49:14 -0500533 }
534
535 SpirvShader(uint32_t codeSerialID,
536 VkShaderStageFlagBits stage,
537 const char *entryPointName,
538 InsnStore const &insns,
539 const vk::RenderPass *renderPass,
540 uint32_t subpassIndex,
Ben Clayton7d0ce412019-12-03 13:26:31 +0000541 bool robustBufferAccess,
542 const std::shared_ptr<vk::dbg::Context> &dbgctx);
Nicolas Capens157ba262019-12-10 17:49:14 -0500543
Ben Claytonb0ca2a82020-01-08 13:00:57 +0000544 ~SpirvShader();
545
Nicolas Capens157ba262019-12-10 17:49:14 -0500546 struct Modes
547 {
548 bool EarlyFragmentTests : 1;
549 bool DepthReplacing : 1;
550 bool DepthGreater : 1;
551 bool DepthLess : 1;
552 bool DepthUnchanged : 1;
553 bool ContainsKill : 1;
554 bool ContainsControlBarriers : 1;
555 bool NeedsCentroid : 1;
556
557 // Compute workgroup dimensions
Nicolas Capens57eb48a2020-05-15 17:07:07 -0400558 int WorkgroupSizeX = 1;
559 int WorkgroupSizeY = 1;
560 int WorkgroupSizeZ = 1;
Nicolas Capens157ba262019-12-10 17:49:14 -0500561 };
562
563 Modes const &getModes() const
564 {
565 return modes;
566 }
567
568 struct Capabilities
569 {
570 bool Matrix : 1;
571 bool Shader : 1;
572 bool ClipDistance : 1;
573 bool CullDistance : 1;
574 bool InputAttachment : 1;
575 bool Sampled1D : 1;
576 bool Image1D : 1;
577 bool ImageCubeArray : 1;
578 bool SampledBuffer : 1;
579 bool SampledCubeArray : 1;
580 bool ImageBuffer : 1;
581 bool StorageImageExtendedFormats : 1;
582 bool ImageQuery : 1;
583 bool DerivativeControl : 1;
584 bool GroupNonUniform : 1;
585 bool GroupNonUniformVote : 1;
586 bool GroupNonUniformBallot : 1;
587 bool GroupNonUniformShuffle : 1;
588 bool GroupNonUniformShuffleRelative : 1;
589 bool GroupNonUniformArithmetic : 1;
590 bool DeviceGroup : 1;
591 bool MultiView : 1;
Alexis Hetu1ee36c92020-02-20 14:07:26 -0500592 bool StencilExportEXT : 1;
Nicolas Capens157ba262019-12-10 17:49:14 -0500593 };
594
595 Capabilities const &getUsedCapabilities() const
596 {
597 return capabilities;
598 }
599
600 // getNumOutputClipDistances() returns the number of ClipDistances
601 // outputted by this shader.
602 unsigned int getNumOutputClipDistances() const
603 {
Nicolas Capens81bc9d92019-12-16 15:05:57 -0500604 if(getUsedCapabilities().ClipDistance)
Nicolas Capens157ba262019-12-10 17:49:14 -0500605 {
606 auto it = outputBuiltins.find(spv::BuiltInClipDistance);
607 if(it != outputBuiltins.end())
608 {
609 return it->second.SizeInComponents;
610 }
611 }
612 return 0;
613 }
614
615 // getNumOutputCullDistances() returns the number of CullDistances
616 // outputted by this shader.
617 unsigned int getNumOutputCullDistances() const
618 {
Nicolas Capens81bc9d92019-12-16 15:05:57 -0500619 if(getUsedCapabilities().CullDistance)
Nicolas Capens157ba262019-12-10 17:49:14 -0500620 {
621 auto it = outputBuiltins.find(spv::BuiltInCullDistance);
622 if(it != outputBuiltins.end())
623 {
624 return it->second.SizeInComponents;
625 }
626 }
627 return 0;
628 }
629
630 enum AttribType : unsigned char
631 {
632 ATTRIBTYPE_FLOAT,
633 ATTRIBTYPE_INT,
634 ATTRIBTYPE_UINT,
635 ATTRIBTYPE_UNUSED,
636
637 ATTRIBTYPE_LAST = ATTRIBTYPE_UINT
638 };
639
640 bool hasBuiltinInput(spv::BuiltIn b) const
641 {
642 return inputBuiltins.find(b) != inputBuiltins.end();
643 }
644
645 bool hasBuiltinOutput(spv::BuiltIn b) const
646 {
647 return outputBuiltins.find(b) != outputBuiltins.end();
648 }
649
650 struct Decorations
651 {
652 int32_t Location = -1;
653 int32_t Component = 0;
654 spv::BuiltIn BuiltIn = static_cast<spv::BuiltIn>(-1);
655 int32_t Offset = -1;
656 int32_t ArrayStride = -1;
657 int32_t MatrixStride = 1;
658
659 bool HasLocation : 1;
660 bool HasComponent : 1;
661 bool HasBuiltIn : 1;
662 bool HasOffset : 1;
663 bool HasArrayStride : 1;
664 bool HasMatrixStride : 1;
Ben Claytonbc1c0672019-12-17 20:37:37 +0000665 bool HasRowMajor : 1; // whether RowMajor bit is valid.
Nicolas Capens157ba262019-12-10 17:49:14 -0500666
667 bool Flat : 1;
668 bool Centroid : 1;
669 bool NoPerspective : 1;
670 bool Block : 1;
671 bool BufferBlock : 1;
672 bool RelaxedPrecision : 1;
Ben Claytonbc1c0672019-12-17 20:37:37 +0000673 bool RowMajor : 1; // RowMajor if true; ColMajor if false
674 bool InsideMatrix : 1; // pseudo-decoration for whether we're inside a matrix.
Nicolas Capens157ba262019-12-10 17:49:14 -0500675
676 Decorations()
Ben Claytonbc1c0672019-12-17 20:37:37 +0000677 : Location{ -1 }
678 , Component{ 0 }
679 , BuiltIn{ static_cast<spv::BuiltIn>(-1) }
680 , Offset{ -1 }
681 , ArrayStride{ -1 }
682 , MatrixStride{ -1 }
683 , HasLocation{ false }
684 , HasComponent{ false }
685 , HasBuiltIn{ false }
686 , HasOffset{ false }
687 , HasArrayStride{ false }
688 , HasMatrixStride{ false }
689 , HasRowMajor{ false }
690 , Flat{ false }
691 , Centroid{ false }
692 , NoPerspective{ false }
693 , Block{ false }
694 , BufferBlock{ false }
695 , RelaxedPrecision{ false }
696 , RowMajor{ false }
697 , InsideMatrix{ false }
Nicolas Capens157ba262019-12-10 17:49:14 -0500698 {
699 }
700
701 Decorations(Decorations const &) = default;
702
703 void Apply(Decorations const &src);
704
705 void Apply(spv::Decoration decoration, uint32_t arg);
706 };
707
708 std::unordered_map<TypeOrObjectID, Decorations, TypeOrObjectID::Hash> decorations;
709 std::unordered_map<Type::ID, std::vector<Decorations>> memberDecorations;
710
711 struct DescriptorDecorations
712 {
713 int32_t DescriptorSet = -1;
714 int32_t Binding = -1;
715 int32_t InputAttachmentIndex = -1;
716
717 void Apply(DescriptorDecorations const &src);
718 };
719
720 std::unordered_map<Object::ID, DescriptorDecorations> descriptorDecorations;
721 std::vector<VkFormat> inputAttachmentFormats;
722
723 struct InterfaceComponent
724 {
725 AttribType Type;
726
727 union
728 {
729 struct
730 {
731 bool Flat : 1;
732 bool Centroid : 1;
733 bool NoPerspective : 1;
734 };
735
736 uint8_t DecorationBits;
737 };
738
739 InterfaceComponent()
Ben Claytonbc1c0672019-12-17 20:37:37 +0000740 : Type{ ATTRIBTYPE_UNUSED }
741 , DecorationBits{ 0 }
Nicolas Capens157ba262019-12-10 17:49:14 -0500742 {
743 }
744 };
745
746 struct BuiltinMapping
747 {
748 Object::ID Id;
749 uint32_t FirstComponent;
750 uint32_t SizeInComponents;
751 };
752
753 struct WorkgroupMemory
754 {
755 // allocates a new variable of size bytes with the given identifier.
756 inline void allocate(Object::ID id, uint32_t size)
757 {
758 uint32_t offset = totalSize;
759 auto it = offsets.emplace(id, offset);
760 ASSERT_MSG(it.second, "WorkgroupMemory already has an allocation for object %d", int(id.value()));
761 totalSize += size;
762 }
763 // returns the byte offset of the variable with the given identifier.
764 inline uint32_t offsetOf(Object::ID id) const
765 {
766 auto it = offsets.find(id);
767 ASSERT_MSG(it != offsets.end(), "WorkgroupMemory has no allocation for object %d", int(id.value()));
768 return it->second;
769 }
770 // returns the total allocated size in bytes.
771 inline uint32_t size() const { return totalSize; }
Ben Claytonbc1c0672019-12-17 20:37:37 +0000772
Nicolas Capens157ba262019-12-10 17:49:14 -0500773 private:
Ben Claytonbc1c0672019-12-17 20:37:37 +0000774 uint32_t totalSize = 0; // in bytes
775 std::unordered_map<Object::ID, uint32_t> offsets; // in bytes
Nicolas Capens157ba262019-12-10 17:49:14 -0500776 };
777
778 std::vector<InterfaceComponent> inputs;
779 std::vector<InterfaceComponent> outputs;
780
781 void emitProlog(SpirvRoutine *routine) const;
782 void emit(SpirvRoutine *routine, RValue<SIMD::Int> const &activeLaneMask, RValue<SIMD::Int> const &storesAndAtomicsMask, const vk::DescriptorSet::Bindings &descriptorSets) const;
783 void emitEpilog(SpirvRoutine *routine) const;
784
Alexis Hetu4f438a52020-06-15 16:13:51 -0400785 bool containsImageWrite() const { return imageWriteEmitted; }
786
Nicolas Capens157ba262019-12-10 17:49:14 -0500787 using BuiltInHash = std::hash<std::underlying_type<spv::BuiltIn>::type>;
788 std::unordered_map<spv::BuiltIn, BuiltinMapping, BuiltInHash> inputBuiltins;
789 std::unordered_map<spv::BuiltIn, BuiltinMapping, BuiltInHash> outputBuiltins;
790 WorkgroupMemory workgroupMemory;
791
792private:
793 const uint32_t codeSerialID;
794 Modes modes = {};
795 Capabilities capabilities = {};
796 HandleMap<Type> types;
797 HandleMap<Object> defs;
798 HandleMap<Function> functions;
Ben Clayton9c8823a2020-01-08 12:07:30 +0000799 std::unordered_map<StringID, String> strings;
Ben Clayton8842c8f2020-01-13 16:57:48 +0000800 HandleMap<Extension> extensionsByID;
801 std::unordered_set<Extension::Name> extensionsImported;
Nicolas Capens157ba262019-12-10 17:49:14 -0500802 Function::ID entryPoint;
Alexis Hetu4f438a52020-06-15 16:13:51 -0400803 mutable bool imageWriteEmitted = false;
Nicolas Capens157ba262019-12-10 17:49:14 -0500804
805 const bool robustBufferAccess = true;
Ben Claytonbc1c0672019-12-17 20:37:37 +0000806 spv::ExecutionModel executionModel = spv::ExecutionModelMax; // Invalid prior to OpEntryPoint parsing.
Nicolas Capens157ba262019-12-10 17:49:14 -0500807
808 // DeclareType creates a Type for the given OpTypeX instruction, storing
809 // it into the types map. It is called from the analysis pass (constructor).
810 void DeclareType(InsnIterator insn);
811
812 void ProcessExecutionMode(InsnIterator it);
813
814 uint32_t ComputeTypeSize(InsnIterator insn);
815 void ApplyDecorationsForId(Decorations *d, TypeOrObjectID id) const;
816 void ApplyDecorationsForIdMember(Decorations *d, Type::ID id, uint32_t member) const;
817 void ApplyDecorationsForAccessChain(Decorations *d, DescriptorDecorations *dd, Object::ID baseId, uint32_t numIndexes, uint32_t const *indexIds) const;
818
819 // Creates an Object for the instruction's result in 'defs'.
820 void DefineResult(const InsnIterator &insn);
821
Ben Claytoncd55f052020-01-14 11:56:00 +0000822 // Processes the OpenCL.Debug.100 instruction for the initial definition
823 // pass of the SPIR-V.
824 void DefineOpenCLDebugInfo100(const InsnIterator &insn);
825
Nicolas Capens157ba262019-12-10 17:49:14 -0500826 // Returns true if data in the given storage class is word-interleaved
827 // by each SIMD vector lane, otherwise data is stored linerally.
828 //
829 // Each lane addresses a single word, picked by a base pointer and an
830 // integer offset.
831 //
832 // A word is currently 32 bits (single float, int32_t, uint32_t).
833 // A lane is a single element of a SIMD vector register.
834 //
835 // Storage interleaved by lane - (IsStorageInterleavedByLane() == true):
836 // ---------------------------------------------------------------------
837 //
838 // Address = PtrBase + sizeof(Word) * (SIMD::Width * LaneOffset + LaneIndex)
839 //
840 // Assuming SIMD::Width == 4:
841 //
842 // Lane[0] | Lane[1] | Lane[2] | Lane[3]
843 // ===========+===========+===========+==========
844 // LaneOffset=0: | Word[0] | Word[1] | Word[2] | Word[3]
845 // ---------------+-----------+-----------+-----------+----------
846 // LaneOffset=1: | Word[4] | Word[5] | Word[6] | Word[7]
847 // ---------------+-----------+-----------+-----------+----------
848 // LaneOffset=2: | Word[8] | Word[9] | Word[a] | Word[b]
849 // ---------------+-----------+-----------+-----------+----------
850 // LaneOffset=3: | Word[c] | Word[d] | Word[e] | Word[f]
851 //
852 //
853 // Linear storage - (IsStorageInterleavedByLane() == false):
854 // ---------------------------------------------------------
855 //
856 // Address = PtrBase + sizeof(Word) * LaneOffset
857 //
858 // Lane[0] | Lane[1] | Lane[2] | Lane[3]
859 // ===========+===========+===========+==========
860 // LaneOffset=0: | Word[0] | Word[0] | Word[0] | Word[0]
861 // ---------------+-----------+-----------+-----------+----------
862 // LaneOffset=1: | Word[1] | Word[1] | Word[1] | Word[1]
863 // ---------------+-----------+-----------+-----------+----------
864 // LaneOffset=2: | Word[2] | Word[2] | Word[2] | Word[2]
865 // ---------------+-----------+-----------+-----------+----------
866 // LaneOffset=3: | Word[3] | Word[3] | Word[3] | Word[3]
867 //
868 static bool IsStorageInterleavedByLane(spv::StorageClass storageClass);
869 static bool IsExplicitLayout(spv::StorageClass storageClass);
870
871 static sw::SIMD::Pointer InterleaveByLane(sw::SIMD::Pointer p);
872
873 // Output storage buffers and images should not be affected by helper invocations
874 static bool StoresInHelperInvocation(spv::StorageClass storageClass);
875
876 using InterfaceVisitor = std::function<void(Decorations const, AttribType)>;
877
Ben Claytonbc1c0672019-12-17 20:37:37 +0000878 void VisitInterface(Object::ID id, const InterfaceVisitor &v) const;
Nicolas Capens157ba262019-12-10 17:49:14 -0500879
Ben Claytonbc1c0672019-12-17 20:37:37 +0000880 int VisitInterfaceInner(Type::ID id, Decorations d, const InterfaceVisitor &v) const;
Nicolas Capens157ba262019-12-10 17:49:14 -0500881
882 // MemoryElement describes a scalar element within a structure, and is
883 // used by the callback function of VisitMemoryObject().
884 struct MemoryElement
885 {
Ben Claytonbc1c0672019-12-17 20:37:37 +0000886 uint32_t index; // index of the scalar element
887 uint32_t offset; // offset (in bytes) from the base of the object
888 const Type &type; // element type
Nicolas Capens157ba262019-12-10 17:49:14 -0500889 };
890
Ben Claytonbc1c0672019-12-17 20:37:37 +0000891 using MemoryVisitor = std::function<void(const MemoryElement &)>;
Nicolas Capens157ba262019-12-10 17:49:14 -0500892
893 // VisitMemoryObject() walks a type tree in an explicitly laid out
894 // storage class, calling the MemoryVisitor for each scalar element
895 // within the
Ben Claytonbc1c0672019-12-17 20:37:37 +0000896 void VisitMemoryObject(Object::ID id, const MemoryVisitor &v) const;
Nicolas Capens157ba262019-12-10 17:49:14 -0500897
898 // VisitMemoryObjectInner() is internally called by VisitMemoryObject()
Ben Claytonbc1c0672019-12-17 20:37:37 +0000899 void VisitMemoryObjectInner(Type::ID id, Decorations d, uint32_t &index, uint32_t offset, const MemoryVisitor &v) const;
Nicolas Capens157ba262019-12-10 17:49:14 -0500900
Ben Claytonbc1c0672019-12-17 20:37:37 +0000901 Object &CreateConstant(InsnIterator it);
Nicolas Capens157ba262019-12-10 17:49:14 -0500902
903 void ProcessInterfaceVariable(Object &object);
904
905 // EmitState holds control-flow state for the emit() pass.
906 class EmitState
907 {
908 public:
909 EmitState(SpirvRoutine *routine,
Ben Claytonbc1c0672019-12-17 20:37:37 +0000910 Function::ID function,
911 RValue<SIMD::Int> activeLaneMask,
912 RValue<SIMD::Int> storesAndAtomicsMask,
913 const vk::DescriptorSet::Bindings &descriptorSets,
914 bool robustBufferAccess,
915 spv::ExecutionModel executionModel)
916 : routine(routine)
917 , function(function)
Nicolas Capensb6e8c3f2020-05-01 23:28:37 -0400918 , activeLaneMaskValue(activeLaneMask.value())
919 , storesAndAtomicsMaskValue(storesAndAtomicsMask.value())
Ben Claytonbc1c0672019-12-17 20:37:37 +0000920 , descriptorSets(descriptorSets)
921 , robustBufferAccess(robustBufferAccess)
922 , executionModel(executionModel)
Nicolas Capens157ba262019-12-10 17:49:14 -0500923 {
924 ASSERT(executionModelToStage(executionModel) != VkShaderStageFlagBits(0)); // Must parse OpEntryPoint before emitting.
925 }
926
Ben Clayton7483e562020-04-24 23:00:40 +0100927 // Returns the mask describing the active lanes as updated by dynamic
928 // control flow. Active lanes include helper invocations, used for
929 // calculating fragment derivitives, which must not perform memory
930 // stores or atomic writes.
931 //
932 // Use activeStoresAndAtomicsMask() to consider both control flow and
933 // lanes which are permitted to perform memory stores and atomic
934 // operations
Nicolas Capens157ba262019-12-10 17:49:14 -0500935 RValue<SIMD::Int> activeLaneMask() const
936 {
937 ASSERT(activeLaneMaskValue != nullptr);
938 return RValue<SIMD::Int>(activeLaneMaskValue);
939 }
940
Ben Clayton7483e562020-04-24 23:00:40 +0100941 // Returns the immutable lane mask that describes which lanes are
942 // permitted to perform memory stores and atomic operations.
943 // Note that unlike activeStoresAndAtomicsMask() this mask *does not*
944 // consider lanes that have been made inactive due to control flow.
Nicolas Capens157ba262019-12-10 17:49:14 -0500945 RValue<SIMD::Int> storesAndAtomicsMask() const
946 {
947 ASSERT(storesAndAtomicsMaskValue != nullptr);
948 return RValue<SIMD::Int>(storesAndAtomicsMaskValue);
949 }
950
Ben Clayton7483e562020-04-24 23:00:40 +0100951 // Returns a lane mask that describes which lanes are permitted to
952 // perform memory stores and atomic operations, considering lanes that
953 // may have been made inactive due to control flow.
954 RValue<SIMD::Int> activeStoresAndAtomicsMask() const
955 {
956 return activeLaneMask() & storesAndAtomicsMask();
957 }
958
Nicolas Capens157ba262019-12-10 17:49:14 -0500959 // Add a new active lane mask edge from the current block to out.
960 // The edge mask value will be (mask AND activeLaneMaskValue).
961 // If multiple active lane masks are added for the same edge, then
962 // they will be ORed together.
963 void addOutputActiveLaneMaskEdge(Block::ID out, RValue<SIMD::Int> mask);
964
965 // Add a new active lane mask for the edge from -> to.
966 // If multiple active lane masks are added for the same edge, then
967 // they will be ORed together.
968 void addActiveLaneMaskEdge(Block::ID from, Block::ID to, RValue<SIMD::Int> mask);
969
Ben Claytonbc1c0672019-12-17 20:37:37 +0000970 SpirvRoutine *routine = nullptr; // The current routine being built.
971 Function::ID function; // The current function being built.
972 Block::ID block; // The current block being built.
973 rr::Value *activeLaneMaskValue = nullptr; // The current active lane mask.
974 rr::Value *storesAndAtomicsMaskValue = nullptr; // The current atomics mask.
975 Block::Set visited; // Blocks already built.
Nicolas Capens157ba262019-12-10 17:49:14 -0500976 std::unordered_map<Block::Edge, RValue<SIMD::Int>, Block::Edge::Hash> edgeActiveLaneMasks;
977 std::deque<Block::ID> *pending;
978
979 const vk::DescriptorSet::Bindings &descriptorSets;
980
981 OutOfBoundsBehavior getOutOfBoundsBehavior(spv::StorageClass storageClass) const;
982
Nicolas Capensff9f9b52020-04-14 00:46:38 -0400983 Intermediate &createIntermediate(Object::ID id, uint32_t componentCount)
Nicolas Capens157ba262019-12-10 17:49:14 -0500984 {
985 auto it = intermediates.emplace(std::piecewise_construct,
Ben Claytonbc1c0672019-12-17 20:37:37 +0000986 std::forward_as_tuple(id),
Nicolas Capensff9f9b52020-04-14 00:46:38 -0400987 std::forward_as_tuple(componentCount));
Nicolas Capens157ba262019-12-10 17:49:14 -0500988 ASSERT_MSG(it.second, "Intermediate %d created twice", id.value());
989 return it.first->second;
990 }
991
Ben Claytonbc1c0672019-12-17 20:37:37 +0000992 Intermediate const &getIntermediate(Object::ID id) const
Nicolas Capens157ba262019-12-10 17:49:14 -0500993 {
994 auto it = intermediates.find(id);
995 ASSERT_MSG(it != intermediates.end(), "Unknown intermediate %d", id.value());
996 return it->second;
997 }
998
999 void createPointer(Object::ID id, SIMD::Pointer ptr)
1000 {
1001 bool added = pointers.emplace(id, ptr).second;
1002 ASSERT_MSG(added, "Pointer %d created twice", id.value());
1003 }
1004
Ben Claytonbc1c0672019-12-17 20:37:37 +00001005 SIMD::Pointer const &getPointer(Object::ID id) const
Nicolas Capens157ba262019-12-10 17:49:14 -05001006 {
1007 auto it = pointers.find(id);
1008 ASSERT_MSG(it != pointers.end(), "Unknown pointer %d", id.value());
1009 return it->second;
1010 }
1011
1012 private:
1013 std::unordered_map<Object::ID, Intermediate> intermediates;
1014 std::unordered_map<Object::ID, SIMD::Pointer> pointers;
1015
1016 const bool robustBufferAccess = true; // Emit robustBufferAccess safe code.
1017 const spv::ExecutionModel executionModel = spv::ExecutionModelMax;
1018 };
1019
1020 // EmitResult is an enumerator of result values from the Emit functions.
1021 enum class EmitResult
1022 {
Ben Claytonbc1c0672019-12-17 20:37:37 +00001023 Continue, // No termination instructions.
1024 Terminator, // Reached a termination instruction.
Nicolas Capens157ba262019-12-10 17:49:14 -05001025 };
1026
1027 // Generic wrapper over either per-lane intermediate value, or a constant.
1028 // Constants are transparently widened to per-lane values in operator[].
1029 // This is appropriate in most cases -- if we're not going to do something
1030 // significantly different based on whether the value is uniform across lanes.
Nicolas Capense6f65d92020-04-08 21:55:43 -04001031 class Operand
Nicolas Capens157ba262019-12-10 17:49:14 -05001032 {
Nicolas Capens157ba262019-12-10 17:49:14 -05001033 public:
Nicolas Capens2f4b6032020-04-09 02:01:50 -04001034 Operand(const SpirvShader *shader, const EmitState *state, SpirvShader::Object::ID objectId);
Nicolas Capens20220a02020-04-09 02:48:16 -04001035 Operand(const Intermediate &value);
Nicolas Capens157ba262019-12-10 17:49:14 -05001036
Ben Clayton9b62c5e2019-03-08 09:32:34 +00001037 RValue<SIMD::Float> Float(uint32_t i) const
Chris Forbese4ef5f72019-02-15 16:00:08 -08001038 {
Nicolas Capens81bc9d92019-12-16 15:05:57 -05001039 if(intermediate)
Nicolas Capens157ba262019-12-10 17:49:14 -05001040 {
1041 return intermediate->Float(i);
1042 }
1043
1044 // Constructing a constant SIMD::Float is not guaranteed to preserve the data's exact
1045 // bit pattern, but SPIR-V provides 32-bit words representing "the bit pattern for the constant".
1046 // Thus we must first construct an integer constant, and bitcast to float.
Nicolas Capens2f4b6032020-04-09 02:01:50 -04001047 return As<SIMD::Float>(SIMD::UInt(constant[i]));
Chris Forbese4ef5f72019-02-15 16:00:08 -08001048 }
Nicolas Capens5851ef42019-03-19 14:28:18 -04001049
1050 RValue<SIMD::Int> Int(uint32_t i) const
1051 {
Nicolas Capens81bc9d92019-12-16 15:05:57 -05001052 if(intermediate)
Nicolas Capens157ba262019-12-10 17:49:14 -05001053 {
1054 return intermediate->Int(i);
1055 }
Nicolas Capens2f4b6032020-04-09 02:01:50 -04001056
1057 return SIMD::Int(constant[i]);
Nicolas Capens5851ef42019-03-19 14:28:18 -04001058 }
1059
1060 RValue<SIMD::UInt> UInt(uint32_t i) const
1061 {
Nicolas Capens81bc9d92019-12-16 15:05:57 -05001062 if(intermediate)
Nicolas Capens157ba262019-12-10 17:49:14 -05001063 {
1064 return intermediate->UInt(i);
1065 }
Nicolas Capens2f4b6032020-04-09 02:01:50 -04001066
1067 return SIMD::UInt(constant[i]);
Nicolas Capens5851ef42019-03-19 14:28:18 -04001068 }
Chris Forbese4ef5f72019-02-15 16:00:08 -08001069
Nicolas Capens2f4b6032020-04-09 02:01:50 -04001070 private:
Ben Claytonfc951cd2019-05-15 17:16:56 +01001071 RR_PRINT_ONLY(friend struct rr::PrintValue::Ty<Operand>;)
1072
Nicolas Capens2f4b6032020-04-09 02:01:50 -04001073 // Delegate constructor
1074 Operand(const EmitState *state, const Object &object);
1075
1076 const uint32_t *constant;
1077 const Intermediate *intermediate;
1078
1079 public:
1080 const uint32_t componentCount;
Chris Forbese4ef5f72019-02-15 16:00:08 -08001081 };
1082
Ben Claytonfc951cd2019-05-15 17:16:56 +01001083 RR_PRINT_ONLY(friend struct rr::PrintValue::Ty<Operand>;)
1084
Nicolas Capens157ba262019-12-10 17:49:14 -05001085 Type const &getType(Type::ID id) const
Chris Forbesaf4ed532018-12-06 18:33:27 -08001086 {
Nicolas Capens157ba262019-12-10 17:49:14 -05001087 auto it = types.find(id);
1088 ASSERT_MSG(it != types.end(), "Unknown type %d", id.value());
1089 return it->second;
1090 }
Chris Forbesaf4ed532018-12-06 18:33:27 -08001091
Nicolas Capens78cc4f42020-04-08 23:44:07 -04001092 Type const &getType(const Object &object) const
1093 {
1094 return getType(object.typeId());
1095 }
1096
Nicolas Capens157ba262019-12-10 17:49:14 -05001097 Object const &getObject(Object::ID id) const
Ben Claytonab51bbf2019-02-20 14:36:27 +00001098 {
Nicolas Capens157ba262019-12-10 17:49:14 -05001099 auto it = defs.find(id);
1100 ASSERT_MSG(it != defs.end(), "Unknown object %d", id.value());
1101 return it->second;
1102 }
Ben Clayton76e9bc02019-02-26 15:02:18 +00001103
Nicolas Capens157ba262019-12-10 17:49:14 -05001104 Function const &getFunction(Function::ID id) const
1105 {
1106 auto it = functions.find(id);
1107 ASSERT_MSG(it != functions.end(), "Unknown function %d", id.value());
1108 return it->second;
1109 }
Ben Clayton24ea5152019-02-26 11:02:42 +00001110
Ben Clayton9c8823a2020-01-08 12:07:30 +00001111 String const &getString(StringID id) const
1112 {
1113 auto it = strings.find(id);
1114 ASSERT_MSG(it != strings.end(), "Unknown string %d", id.value());
1115 return it->second;
1116 }
1117
Ben Claytonb36dbbe2020-01-08 12:18:43 +00001118 Extension const &getExtension(Extension::ID id) const
1119 {
Ben Clayton8842c8f2020-01-13 16:57:48 +00001120 auto it = extensionsByID.find(id);
1121 ASSERT_MSG(it != extensionsByID.end(), "Unknown extension %d", id.value());
Ben Claytonb36dbbe2020-01-08 12:18:43 +00001122 return it->second;
1123 }
1124
Nicolas Capens157ba262019-12-10 17:49:14 -05001125 // Returns a SIMD::Pointer to the underlying data for the given pointer
1126 // object.
1127 // Handles objects of the following kinds:
Nicolas Capens479d1432020-01-31 11:19:21 -05001128 // - DescriptorSet
1129 // - Pointer
1130 // - InterfaceVariable
Nicolas Capens157ba262019-12-10 17:49:14 -05001131 // Calling GetPointerToData with objects of any other kind will assert.
Nicolas Capens479d1432020-01-31 11:19:21 -05001132 SIMD::Pointer GetPointerToData(Object::ID id, Int arrayIndex, EmitState const *state) const;
Ben Clayton204a4102019-07-31 13:17:47 +01001133
Nicolas Capens157ba262019-12-10 17:49:14 -05001134 SIMD::Pointer WalkExplicitLayoutAccessChain(Object::ID id, uint32_t numIndexes, uint32_t const *indexIds, EmitState const *state) const;
1135 SIMD::Pointer WalkAccessChain(Object::ID id, uint32_t numIndexes, uint32_t const *indexIds, EmitState const *state) const;
Ben Clayton76e9bc02019-02-26 15:02:18 +00001136
Nicolas Capens157ba262019-12-10 17:49:14 -05001137 // Returns the *component* offset in the literal for the given access chain.
1138 uint32_t WalkLiteralAccessChain(Type::ID id, uint32_t numIndexes, uint32_t const *indexes) const;
Ben Claytonab51bbf2019-02-20 14:36:27 +00001139
Nicolas Capens157ba262019-12-10 17:49:14 -05001140 // Lookup the active lane mask for the edge from -> to.
1141 // If from is unreachable, then a mask of all zeros is returned.
1142 // Asserts if from is reachable and the edge does not exist.
1143 RValue<SIMD::Int> GetActiveLaneMaskEdge(EmitState *state, Block::ID from, Block::ID to) const;
Ben Clayton76e9bc02019-02-26 15:02:18 +00001144
Ben Clayton8a7067d2020-01-08 12:30:06 +00001145 // Updates the current active lane mask.
1146 void SetActiveLaneMask(RValue<SIMD::Int> mask, EmitState *state) const;
1147
Nicolas Capens157ba262019-12-10 17:49:14 -05001148 // Emit all the unvisited blocks (except for ignore) in DFS order,
1149 // starting with id.
1150 void EmitBlocks(Block::ID id, EmitState *state, Block::ID ignore = 0) const;
1151 void EmitNonLoop(EmitState *state) const;
1152 void EmitLoop(EmitState *state) const;
Ben Claytonab51bbf2019-02-20 14:36:27 +00001153
Nicolas Capens157ba262019-12-10 17:49:14 -05001154 void EmitInstructions(InsnIterator begin, InsnIterator end, EmitState *state) const;
1155 EmitResult EmitInstruction(InsnIterator insn, EmitState *state) const;
Ben Claytonea3cd1b2019-06-19 21:43:55 +01001156
Nicolas Capens157ba262019-12-10 17:49:14 -05001157 // Emit pass instructions:
1158 EmitResult EmitVariable(InsnIterator insn, EmitState *state) const;
1159 EmitResult EmitLoad(InsnIterator insn, EmitState *state) const;
1160 EmitResult EmitStore(InsnIterator insn, EmitState *state) const;
1161 EmitResult EmitAccessChain(InsnIterator insn, EmitState *state) const;
1162 EmitResult EmitCompositeConstruct(InsnIterator insn, EmitState *state) const;
1163 EmitResult EmitCompositeInsert(InsnIterator insn, EmitState *state) const;
1164 EmitResult EmitCompositeExtract(InsnIterator insn, EmitState *state) const;
1165 EmitResult EmitVectorShuffle(InsnIterator insn, EmitState *state) const;
1166 EmitResult EmitVectorTimesScalar(InsnIterator insn, EmitState *state) const;
1167 EmitResult EmitMatrixTimesVector(InsnIterator insn, EmitState *state) const;
1168 EmitResult EmitVectorTimesMatrix(InsnIterator insn, EmitState *state) const;
1169 EmitResult EmitMatrixTimesMatrix(InsnIterator insn, EmitState *state) const;
1170 EmitResult EmitOuterProduct(InsnIterator insn, EmitState *state) const;
1171 EmitResult EmitTranspose(InsnIterator insn, EmitState *state) const;
1172 EmitResult EmitVectorExtractDynamic(InsnIterator insn, EmitState *state) const;
1173 EmitResult EmitVectorInsertDynamic(InsnIterator insn, EmitState *state) const;
1174 EmitResult EmitUnaryOp(InsnIterator insn, EmitState *state) const;
1175 EmitResult EmitBinaryOp(InsnIterator insn, EmitState *state) const;
1176 EmitResult EmitDot(InsnIterator insn, EmitState *state) const;
1177 EmitResult EmitSelect(InsnIterator insn, EmitState *state) const;
1178 EmitResult EmitExtendedInstruction(InsnIterator insn, EmitState *state) const;
Ben Claytonb36dbbe2020-01-08 12:18:43 +00001179 EmitResult EmitExtGLSLstd450(InsnIterator insn, EmitState *state) const;
Ben Claytoncd55f052020-01-14 11:56:00 +00001180 EmitResult EmitOpenCLDebugInfo100(InsnIterator insn, EmitState *state) const;
Ben Claytonb0ca2a82020-01-08 13:00:57 +00001181 EmitResult EmitLine(InsnIterator insn, EmitState *state) const;
Nicolas Capens157ba262019-12-10 17:49:14 -05001182 EmitResult EmitAny(InsnIterator insn, EmitState *state) const;
1183 EmitResult EmitAll(InsnIterator insn, EmitState *state) const;
1184 EmitResult EmitBranch(InsnIterator insn, EmitState *state) const;
1185 EmitResult EmitBranchConditional(InsnIterator insn, EmitState *state) const;
1186 EmitResult EmitSwitch(InsnIterator insn, EmitState *state) const;
1187 EmitResult EmitUnreachable(InsnIterator insn, EmitState *state) const;
1188 EmitResult EmitReturn(InsnIterator insn, EmitState *state) const;
1189 EmitResult EmitKill(InsnIterator insn, EmitState *state) const;
1190 EmitResult EmitFunctionCall(InsnIterator insn, EmitState *state) const;
1191 EmitResult EmitPhi(InsnIterator insn, EmitState *state) const;
1192 EmitResult EmitImageSampleImplicitLod(Variant variant, InsnIterator insn, EmitState *state) const;
1193 EmitResult EmitImageSampleExplicitLod(Variant variant, InsnIterator insn, EmitState *state) const;
1194 EmitResult EmitImageGather(Variant variant, InsnIterator insn, EmitState *state) const;
1195 EmitResult EmitImageFetch(InsnIterator insn, EmitState *state) const;
1196 EmitResult EmitImageSample(ImageInstruction instruction, InsnIterator insn, EmitState *state) const;
1197 EmitResult EmitImageQuerySizeLod(InsnIterator insn, EmitState *state) const;
1198 EmitResult EmitImageQuerySize(InsnIterator insn, EmitState *state) const;
1199 EmitResult EmitImageQueryLod(InsnIterator insn, EmitState *state) const;
1200 EmitResult EmitImageQueryLevels(InsnIterator insn, EmitState *state) const;
1201 EmitResult EmitImageQuerySamples(InsnIterator insn, EmitState *state) const;
1202 EmitResult EmitImageRead(InsnIterator insn, EmitState *state) const;
1203 EmitResult EmitImageWrite(InsnIterator insn, EmitState *state) const;
1204 EmitResult EmitImageTexelPointer(InsnIterator insn, EmitState *state) const;
1205 EmitResult EmitAtomicOp(InsnIterator insn, EmitState *state) const;
1206 EmitResult EmitAtomicCompareExchange(InsnIterator insn, EmitState *state) const;
1207 EmitResult EmitSampledImageCombineOrSplit(InsnIterator insn, EmitState *state) const;
1208 EmitResult EmitCopyObject(InsnIterator insn, EmitState *state) const;
1209 EmitResult EmitCopyMemory(InsnIterator insn, EmitState *state) const;
1210 EmitResult EmitControlBarrier(InsnIterator insn, EmitState *state) const;
1211 EmitResult EmitMemoryBarrier(InsnIterator insn, EmitState *state) const;
1212 EmitResult EmitGroupNonUniform(InsnIterator insn, EmitState *state) const;
1213 EmitResult EmitArrayLength(InsnIterator insn, EmitState *state) const;
Ben Clayton30ee92e2019-08-13 14:21:44 +01001214
Nicolas Capens9b83ddb2020-04-06 15:54:03 -04001215 // Emits code to sample an image, regardless of whether any SIMD lanes are active.
1216 void EmitImageSampleUnconditional(Array<SIMD::Float> &out, ImageInstruction instruction, InsnIterator insn, EmitState *state) const;
1217
Nicolas Capens157ba262019-12-10 17:49:14 -05001218 void GetImageDimensions(EmitState const *state, Type const &resultTy, Object::ID imageId, Object::ID lodId, Intermediate &dst) const;
Nicolas Capens4a65df12020-05-07 10:06:46 -04001219 SIMD::Pointer GetTexelAddress(EmitState const *state, Pointer<Byte> imageBase, Int imageSizeInBytes, Operand const &coordinate, Type const &imageType, Pointer<Byte> descriptor, int texelSize, Object::ID sampleId, bool useStencilAspect, OutOfBoundsBehavior outOfBoundsBehavior) const;
Nicolas Capens157ba262019-12-10 17:49:14 -05001220 uint32_t GetConstScalarInt(Object::ID id) const;
1221 void EvalSpecConstantOp(InsnIterator insn);
1222 void EvalSpecConstantUnaryOp(InsnIterator insn);
1223 void EvalSpecConstantBinaryOp(InsnIterator insn);
Ben Clayton30ee92e2019-08-13 14:21:44 +01001224
Nicolas Capens0b77aa52020-04-09 02:48:16 -04001225 // Helper for implementing OpStore, which doesn't take an InsnIterator so it
1226 // can also store independent operands.
1227 void Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder, EmitState *state) const;
1228
Nicolas Capens157ba262019-12-10 17:49:14 -05001229 // LoadPhi loads the phi values from the alloca storage and places the
1230 // load values into the intermediate with the phi's result id.
1231 void LoadPhi(InsnIterator insn, EmitState *state) const;
Ben Claytonea3cd1b2019-06-19 21:43:55 +01001232
Nicolas Capens157ba262019-12-10 17:49:14 -05001233 // StorePhi updates the phi's alloca storage value using the incoming
1234 // values from blocks that are both in the OpPhi instruction and in
1235 // filter.
Ben Claytonbc1c0672019-12-17 20:37:37 +00001236 void StorePhi(Block::ID blockID, InsnIterator insn, EmitState *state, std::unordered_set<SpirvShader::Block::ID> const &filter) const;
Ben Claytonea3cd1b2019-06-19 21:43:55 +01001237
Nicolas Capens157ba262019-12-10 17:49:14 -05001238 // Emits a rr::Fence for the given MemorySemanticsMask.
1239 void Fence(spv::MemorySemanticsMask semantics) const;
1240
1241 // Helper for calling rr::Yield with res cast to an rr::Int.
1242 void Yield(YieldResult res) const;
1243
Ben Clayton0d6791c2020-04-22 21:55:27 +01001244 // WriteCFGGraphVizDotFile() writes a graphviz dot file of the shader's
1245 // control flow to the given file path.
1246 void WriteCFGGraphVizDotFile(const char *path) const;
1247
Nicolas Capens157ba262019-12-10 17:49:14 -05001248 // OpcodeName() returns the name of the opcode op.
1249 // If NDEBUG is defined, then OpcodeName() will only return the numerical code.
1250 static std::string OpcodeName(spv::Op op);
1251 static std::memory_order MemoryOrder(spv::MemorySemanticsMask memorySemantics);
1252
Ben Clayton54d16b82020-02-03 15:32:06 +00001253 // IsStatement() returns true if the given opcode actually performs
1254 // work (as opposed to declaring a type, defining a function start / end,
1255 // etc).
1256 static bool IsStatement(spv::Op op);
1257
Nicolas Capens78cc4f42020-04-08 23:44:07 -04001258 // HasTypeAndResult() returns true if the given opcode's instruction
1259 // has a result type ID and result ID, i.e. defines an Object.
1260 static bool HasTypeAndResult(spv::Op op);
1261
Nicolas Capens157ba262019-12-10 17:49:14 -05001262 // Helper as we often need to take dot products as part of doing other things.
Nicolas Capense6f65d92020-04-08 21:55:43 -04001263 SIMD::Float Dot(unsigned numComponents, Operand const &x, Operand const &y) const;
Nicolas Capens157ba262019-12-10 17:49:14 -05001264
Nicolas Capens157ba262019-12-10 17:49:14 -05001265 // Splits x into a floating-point significand in the range [0.5, 1.0)
1266 // and an integral exponent of two, such that:
1267 // x = significand * 2^exponent
1268 // Returns the pair <significand, exponent>
1269 std::pair<SIMD::Float, SIMD::Int> Frexp(RValue<SIMD::Float> val) const;
1270
1271 static ImageSampler *getImageSampler(uint32_t instruction, vk::SampledImageDescriptor const *imageDescriptor, const vk::Sampler *sampler);
1272 static std::shared_ptr<rr::Routine> emitSamplerRoutine(ImageInstruction instruction, const Sampler &samplerState);
1273
1274 // TODO(b/129523279): Eliminate conversion and use vk::Sampler members directly.
1275 static sw::FilterType convertFilterMode(const vk::Sampler *sampler);
1276 static sw::MipmapType convertMipmapMode(const vk::Sampler *sampler);
1277 static sw::AddressingMode convertAddressingMode(int coordinateIndex, const vk::Sampler *sampler, VkImageViewType imageViewType);
1278
1279 // Returns 0 when invalid.
1280 static VkShaderStageFlagBits executionModelToStage(spv::ExecutionModel model);
1281
Ben Claytonb0ca2a82020-01-08 13:00:57 +00001282 // Debugger API functions. When ENABLE_VK_DEBUGGER is not defined, these
1283 // are all no-ops.
1284
1285 // dbgInit() initializes the debugger code generation.
1286 // All other dbgXXX() functions are no-op until this is called.
1287 void dbgInit(const std::shared_ptr<vk::dbg::Context> &dbgctx);
1288
1289 // dbgTerm() terminates the debugger code generation.
1290 void dbgTerm();
1291
1292 // dbgCreateFile() generates a synthetic file containing the disassembly
1293 // of the SPIR-V shader. This is the file displayed in the debug
1294 // session.
1295 void dbgCreateFile();
1296
1297 // dbgBeginEmit() sets up the debugging state for the shader.
1298 void dbgBeginEmit(EmitState *state) const;
1299
1300 // dbgEndEmit() tears down the debugging state for the shader.
1301 void dbgEndEmit(EmitState *state) const;
1302
1303 // dbgBeginEmitInstruction() updates the current debugger location for
1304 // the given instruction.
1305 void dbgBeginEmitInstruction(InsnIterator insn, EmitState *state) const;
1306
1307 // dbgEndEmitInstruction() creates any new debugger variables for the
1308 // instruction that just completed.
1309 void dbgEndEmitInstruction(InsnIterator insn, EmitState *state) const;
1310
1311 // dbgExposeIntermediate() exposes the intermediate with the given ID to
1312 // the debugger.
1313 void dbgExposeIntermediate(Object::ID id, EmitState *state) const;
1314
1315 // dbgUpdateActiveLaneMask() updates the active lane masks to the
1316 // debugger.
1317 void dbgUpdateActiveLaneMask(RValue<SIMD::Int> mask, EmitState *state) const;
1318
1319 // dbgDeclareResult() associates resultId as the result of the given
1320 // instruction.
1321 void dbgDeclareResult(const InsnIterator &insn, Object::ID resultId) const;
1322
1323 // Impl holds forward declaration structs and pointers to state for the
1324 // private implementations in the corresponding SpirvShaderXXX.cpp files.
Ben Clayton0771f9b2020-01-08 12:00:25 +00001325 // This allows access to the private members of the SpirvShader, without
1326 // littering the header with implementation details.
1327 struct Impl
1328 {
Ben Claytonb0ca2a82020-01-08 13:00:57 +00001329 struct Debugger;
Ben Clayton0771f9b2020-01-08 12:00:25 +00001330 struct Group;
Ben Claytonb0ca2a82020-01-08 13:00:57 +00001331 Debugger *debugger = nullptr;
Ben Clayton0771f9b2020-01-08 12:00:25 +00001332 };
Ben Claytonb0ca2a82020-01-08 13:00:57 +00001333 Impl impl;
Nicolas Capens157ba262019-12-10 17:49:14 -05001334};
1335
1336class SpirvRoutine
1337{
1338public:
1339 SpirvRoutine(vk::PipelineLayout const *pipelineLayout);
1340
1341 using Variable = Array<SIMD::Float>;
1342
1343 struct SamplerCache
1344 {
1345 Pointer<Byte> imageDescriptor = nullptr;
1346 Pointer<Byte> sampler;
1347 Pointer<Byte> function;
Ben Claytonab51bbf2019-02-20 14:36:27 +00001348 };
1349
Ben Claytonbc1c0672019-12-17 20:37:37 +00001350 vk::PipelineLayout const *const pipelineLayout;
Nicolas Capens157ba262019-12-10 17:49:14 -05001351
1352 std::unordered_map<SpirvShader::Object::ID, Variable> variables;
1353 std::unordered_map<SpirvShader::Object::ID, SamplerCache> samplerCache;
Ben Claytonbc1c0672019-12-17 20:37:37 +00001354 Variable inputs = Variable{ MAX_INTERFACE_COMPONENTS };
1355 Variable outputs = Variable{ MAX_INTERFACE_COMPONENTS };
Nicolas Capens157ba262019-12-10 17:49:14 -05001356
1357 Pointer<Byte> workgroupMemory;
1358 Pointer<Pointer<Byte>> descriptorSets;
1359 Pointer<Int> descriptorDynamicOffsets;
1360 Pointer<Byte> pushConstants;
1361 Pointer<Byte> constants;
Ben Claytonbc1c0672019-12-17 20:37:37 +00001362 Int killMask = Int{ 0 };
Ben Clayton5beaef92019-12-03 12:23:35 +00001363
1364 // Shader invocation state.
1365 // Not all of these variables are used for every type of shader, and some
1366 // are only used when debugging. See b/146486064 for more information.
1367 // Give careful consideration to the runtime performance loss before adding
1368 // more state here.
Nicolas Capens157ba262019-12-10 17:49:14 -05001369 SIMD::Int windowSpacePosition[2];
Ben Claytonbc1c0672019-12-17 20:37:37 +00001370 Int viewID; // slice offset into input attachments for multiview, even if the shader doesn't use ViewIndex
Ben Clayton5beaef92019-12-03 12:23:35 +00001371 Int instanceID;
1372 SIMD::Int vertexIndex;
1373 std::array<SIMD::Float, 4> fragCoord;
1374 std::array<SIMD::Float, 4> pointCoord;
1375 SIMD::Int helperInvocation;
1376 Int4 numWorkgroups;
1377 Int4 workgroupID;
1378 Int4 workgroupSize;
1379 Int subgroupsPerWorkgroup;
1380 Int invocationsPerSubgroup;
1381 Int subgroupIndex;
1382 SIMD::Int localInvocationIndex;
1383 std::array<SIMD::Int, 3> localInvocationID;
1384 std::array<SIMD::Int, 3> globalInvocationID;
Nicolas Capens157ba262019-12-10 17:49:14 -05001385
Ben Claytonb0ca2a82020-01-08 13:00:57 +00001386 Pointer<Byte> dbgState; // Pointer to a debugger state.
1387
Nicolas Capensff9f9b52020-04-14 00:46:38 -04001388 void createVariable(SpirvShader::Object::ID id, uint32_t componentCount)
Nicolas Capens157ba262019-12-10 17:49:14 -05001389 {
Nicolas Capensff9f9b52020-04-14 00:46:38 -04001390 bool added = variables.emplace(id, Variable(componentCount)).second;
Nicolas Capens157ba262019-12-10 17:49:14 -05001391 ASSERT_MSG(added, "Variable %d created twice", id.value());
1392 }
1393
Ben Claytonbc1c0672019-12-17 20:37:37 +00001394 Variable &getVariable(SpirvShader::Object::ID id)
Nicolas Capens157ba262019-12-10 17:49:14 -05001395 {
1396 auto it = variables.find(id);
1397 ASSERT_MSG(it != variables.end(), "Unknown variables %d", id.value());
1398 return it->second;
1399 }
1400
1401 // setImmutableInputBuiltins() sets all the immutable input builtins,
1402 // common for all shader types.
1403 void setImmutableInputBuiltins(SpirvShader const *shader);
1404
1405 // setInputBuiltin() calls f() with the builtin and value if the shader
1406 // uses the input builtin, otherwise the call is a no-op.
1407 // F is a function with the signature:
1408 // void(const SpirvShader::BuiltinMapping& builtin, Array<SIMD::Float>& value)
Ben Claytonbc1c0672019-12-17 20:37:37 +00001409 template<typename F>
1410 inline void setInputBuiltin(SpirvShader const *shader, spv::BuiltIn id, F &&f)
Nicolas Capens157ba262019-12-10 17:49:14 -05001411 {
1412 auto it = shader->inputBuiltins.find(id);
Nicolas Capens81bc9d92019-12-16 15:05:57 -05001413 if(it != shader->inputBuiltins.end())
Nicolas Capens157ba262019-12-10 17:49:14 -05001414 {
Ben Claytonbc1c0672019-12-17 20:37:37 +00001415 const auto &builtin = it->second;
Nicolas Capens157ba262019-12-10 17:49:14 -05001416 f(builtin, getVariable(builtin.Id));
1417 }
1418 }
1419
1420private:
1421 // The phis are only accessible to SpirvShader as they are only used and
1422 // exist between calls to SpirvShader::emitProlog() and
1423 // SpirvShader::emitEpilog().
1424 friend class SpirvShader;
1425
1426 std::unordered_map<SpirvShader::Object::ID, Variable> phis;
Nicolas Capens157ba262019-12-10 17:49:14 -05001427};
1428
1429} // namespace sw
Chris Forbesaf4ed532018-12-06 18:33:27 -08001430
Chris Forbesc25b8072018-12-10 15:10:39 -08001431#endif // sw_SpirvShader_hpp