Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/glslang/SPIRV/SpvBuilder.cpp
21117 views
1
//
2
// Copyright (C) 2014-2015 LunarG, Inc.
3
// Copyright (C) 2015-2018 Google, Inc.
4
// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
5
//
6
// All rights reserved.
7
//
8
// Redistribution and use in source and binary forms, with or without
9
// modification, are permitted provided that the following conditions
10
// are met:
11
//
12
// Redistributions of source code must retain the above copyright
13
// notice, this list of conditions and the following disclaimer.
14
//
15
// Redistributions in binary form must reproduce the above
16
// copyright notice, this list of conditions and the following
17
// disclaimer in the documentation and/or other materials provided
18
// with the distribution.
19
//
20
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
21
// contributors may be used to endorse or promote products derived
22
// from this software without specific prior written permission.
23
//
24
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35
// POSSIBILITY OF SUCH DAMAGE.
36
37
//
38
// Helper for making SPIR-V IR. Generally, this is documented in the header
39
// SpvBuilder.h.
40
//
41
42
#include <cassert>
43
#include <cstdlib>
44
45
#include <unordered_set>
46
#include <algorithm>
47
48
#include "SpvBuilder.h"
49
#include "spvUtil.h"
50
#include "hex_float.h"
51
52
#ifndef _WIN32
53
#include <cstdio>
54
#endif
55
56
namespace spv {
57
58
Builder::Builder(unsigned int spvVersion, unsigned int magicNumber, SpvBuildLogger* buildLogger) :
59
spvVersion(spvVersion),
60
sourceLang(SourceLanguage::Unknown),
61
sourceVersion(0),
62
addressModel(AddressingModel::Logical),
63
memoryModel(MemoryModel::GLSL450),
64
builderNumber(magicNumber),
65
buildPoint(nullptr),
66
uniqueId(0),
67
entryPointFunction(nullptr),
68
generatingOpCodeForSpecConst(false),
69
logger(buildLogger)
70
{
71
clearAccessChain();
72
}
73
74
Builder::~Builder()
75
{
76
}
77
78
Id Builder::import(const char* name)
79
{
80
Instruction* import = new Instruction(getUniqueId(), NoType, Op::OpExtInstImport);
81
import->addStringOperand(name);
82
module.mapInstruction(import);
83
84
imports.push_back(std::unique_ptr<Instruction>(import));
85
return import->getResultId();
86
}
87
88
// For creating new groupedTypes (will return old type if the requested one was already made).
89
Id Builder::makeVoidType()
90
{
91
Instruction* type;
92
if (groupedTypes[enumCast(Op::OpTypeVoid)].size() == 0) {
93
Id typeId = getUniqueId();
94
type = new Instruction(typeId, NoType, Op::OpTypeVoid);
95
groupedTypes[enumCast(Op::OpTypeVoid)].push_back(type);
96
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
97
module.mapInstruction(type);
98
// Core OpTypeVoid used for debug void type
99
if (emitNonSemanticShaderDebugInfo)
100
debugTypeIdLookup[typeId] = typeId;
101
} else
102
type = groupedTypes[enumCast(Op::OpTypeVoid)].back();
103
104
return type->getResultId();
105
}
106
107
Id Builder::makeBoolType()
108
{
109
Instruction* type;
110
if (groupedTypes[enumCast(Op::OpTypeBool)].size() == 0) {
111
type = new Instruction(getUniqueId(), NoType, Op::OpTypeBool);
112
groupedTypes[enumCast(Op::OpTypeBool)].push_back(type);
113
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
114
module.mapInstruction(type);
115
116
if (emitNonSemanticShaderDebugInfo) {
117
auto const debugResultId = makeBoolDebugType(32);
118
debugTypeIdLookup[type->getResultId()] = debugResultId;
119
}
120
121
} else
122
type = groupedTypes[enumCast(Op::OpTypeBool)].back();
123
124
125
return type->getResultId();
126
}
127
128
Id Builder::makeSamplerType(const char* debugName)
129
{
130
Instruction* type;
131
if (groupedTypes[enumCast(Op::OpTypeSampler)].size() == 0) {
132
type = new Instruction(getUniqueId(), NoType, Op::OpTypeSampler);
133
groupedTypes[enumCast(Op::OpTypeSampler)].push_back(type);
134
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
135
module.mapInstruction(type);
136
} else
137
type = groupedTypes[enumCast(Op::OpTypeSampler)].back();
138
139
if (emitNonSemanticShaderDebugInfo)
140
{
141
auto const debugResultId = makeOpaqueDebugType(debugName);
142
debugTypeIdLookup[type->getResultId()] = debugResultId;
143
}
144
145
return type->getResultId();
146
}
147
148
Id Builder::makePointer(StorageClass storageClass, Id pointee)
149
{
150
// try to find it
151
Instruction* type;
152
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypePointer)].size(); ++t) {
153
type = groupedTypes[enumCast(Op::OpTypePointer)][t];
154
if (type->getImmediateOperand(0) == (unsigned)storageClass &&
155
type->getIdOperand(1) == pointee)
156
return type->getResultId();
157
}
158
159
// not found, make it
160
type = new Instruction(getUniqueId(), NoType, Op::OpTypePointer);
161
type->reserveOperands(2);
162
type->addImmediateOperand(storageClass);
163
type->addIdOperand(pointee);
164
groupedTypes[enumCast(Op::OpTypePointer)].push_back(type);
165
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
166
module.mapInstruction(type);
167
168
if (emitNonSemanticShaderDebugInfo) {
169
const Id debugResultId = makePointerDebugType(storageClass, pointee);
170
debugTypeIdLookup[type->getResultId()] = debugResultId;
171
}
172
173
return type->getResultId();
174
}
175
176
Id Builder::makeForwardPointer(StorageClass storageClass)
177
{
178
// Caching/uniquifying doesn't work here, because we don't know the
179
// pointee type and there can be multiple forward pointers of the same
180
// storage type. Somebody higher up in the stack must keep track.
181
Instruction* type = new Instruction(getUniqueId(), NoType, Op::OpTypeForwardPointer);
182
type->addImmediateOperand(storageClass);
183
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
184
module.mapInstruction(type);
185
186
if (emitNonSemanticShaderDebugInfo) {
187
const Id debugResultId = makeForwardPointerDebugType(storageClass);
188
debugTypeIdLookup[type->getResultId()] = debugResultId;
189
}
190
return type->getResultId();
191
}
192
193
Id Builder::makePointerFromForwardPointer(StorageClass storageClass, Id forwardPointerType, Id pointee)
194
{
195
// try to find it
196
Instruction* type;
197
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypePointer)].size(); ++t) {
198
type = groupedTypes[enumCast(Op::OpTypePointer)][t];
199
if (type->getImmediateOperand(0) == (unsigned)storageClass &&
200
type->getIdOperand(1) == pointee)
201
return type->getResultId();
202
}
203
204
type = new Instruction(forwardPointerType, NoType, Op::OpTypePointer);
205
type->reserveOperands(2);
206
type->addImmediateOperand(storageClass);
207
type->addIdOperand(pointee);
208
groupedTypes[enumCast(Op::OpTypePointer)].push_back(type);
209
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
210
module.mapInstruction(type);
211
212
// If we are emitting nonsemantic debuginfo, we need to patch the debug pointer type
213
// that was emitted alongside the forward pointer, now that we have a pointee debug
214
// type for it to point to.
215
if (emitNonSemanticShaderDebugInfo) {
216
Instruction *debugForwardPointer = module.getInstruction(getDebugType(forwardPointerType));
217
assert(getDebugType(pointee));
218
debugForwardPointer->setIdOperand(2, getDebugType(pointee));
219
}
220
221
return type->getResultId();
222
}
223
224
Id Builder::makeIntegerType(int width, bool hasSign)
225
{
226
// try to find it
227
Instruction* type;
228
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeInt)].size(); ++t) {
229
type = groupedTypes[enumCast(Op::OpTypeInt)][t];
230
if (type->getImmediateOperand(0) == (unsigned)width &&
231
type->getImmediateOperand(1) == (hasSign ? 1u : 0u))
232
return type->getResultId();
233
}
234
235
// not found, make it
236
type = new Instruction(getUniqueId(), NoType, Op::OpTypeInt);
237
type->reserveOperands(2);
238
type->addImmediateOperand(width);
239
type->addImmediateOperand(hasSign ? 1 : 0);
240
groupedTypes[enumCast(Op::OpTypeInt)].push_back(type);
241
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
242
module.mapInstruction(type);
243
244
// deal with capabilities
245
switch (width) {
246
case 8:
247
case 16:
248
// these are currently handled by storage-type declarations and post processing
249
break;
250
case 64:
251
addCapability(Capability::Int64);
252
break;
253
default:
254
break;
255
}
256
257
if (emitNonSemanticShaderDebugInfo)
258
{
259
auto const debugResultId = makeIntegerDebugType(width, hasSign);
260
debugTypeIdLookup[type->getResultId()] = debugResultId;
261
}
262
263
return type->getResultId();
264
}
265
266
Id Builder::makeFloatType(int width)
267
{
268
// try to find it
269
Instruction* type;
270
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeFloat)].size(); ++t) {
271
type = groupedTypes[enumCast(Op::OpTypeFloat)][t];
272
if (type->getNumOperands() != 1) {
273
continue;
274
}
275
if (type->getImmediateOperand(0) == (unsigned)width)
276
return type->getResultId();
277
}
278
279
// not found, make it
280
type = new Instruction(getUniqueId(), NoType, Op::OpTypeFloat);
281
type->addImmediateOperand(width);
282
groupedTypes[enumCast(Op::OpTypeFloat)].push_back(type);
283
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
284
module.mapInstruction(type);
285
286
// deal with capabilities
287
switch (width) {
288
case 16:
289
// currently handled by storage-type declarations and post processing
290
break;
291
case 64:
292
addCapability(Capability::Float64);
293
break;
294
default:
295
break;
296
}
297
298
if (emitNonSemanticShaderDebugInfo)
299
{
300
auto const debugResultId = makeFloatDebugType(width);
301
debugTypeIdLookup[type->getResultId()] = debugResultId;
302
}
303
304
return type->getResultId();
305
}
306
307
Id Builder::makeBFloat16Type()
308
{
309
// try to find it
310
Instruction* type;
311
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeFloat)].size(); ++t) {
312
type = groupedTypes[enumCast(Op::OpTypeFloat)][t];
313
if (type->getNumOperands() != 2) {
314
continue;
315
}
316
if (type->getImmediateOperand(0) == (unsigned)16 &&
317
type->getImmediateOperand(1) == FPEncoding::BFloat16KHR)
318
return type->getResultId();
319
}
320
321
// not found, make it
322
type = new Instruction(getUniqueId(), NoType, Op::OpTypeFloat);
323
type->addImmediateOperand(16);
324
type->addImmediateOperand(FPEncoding::BFloat16KHR);
325
groupedTypes[enumCast(Op::OpTypeFloat)].push_back(type);
326
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
327
module.mapInstruction(type);
328
329
addExtension(spv::E_SPV_KHR_bfloat16);
330
addCapability(Capability::BFloat16TypeKHR);
331
332
#if 0
333
// XXX not supported
334
if (emitNonSemanticShaderDebugInfo)
335
{
336
auto const debugResultId = makeFloatDebugType(width);
337
debugTypeIdLookup[type->getResultId()] = debugResultId;
338
}
339
#endif
340
341
return type->getResultId();
342
}
343
344
Id Builder::makeFloatE5M2Type()
345
{
346
// try to find it
347
Instruction* type;
348
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeFloat)].size(); ++t) {
349
type = groupedTypes[enumCast(Op::OpTypeFloat)][t];
350
if (type->getNumOperands() != 2) {
351
continue;
352
}
353
if (type->getImmediateOperand(0) == (unsigned)8 &&
354
type->getImmediateOperand(1) == FPEncoding::Float8E5M2EXT)
355
return type->getResultId();
356
}
357
358
// not found, make it
359
type = new Instruction(getUniqueId(), NoType, Op::OpTypeFloat);
360
type->addImmediateOperand(8);
361
type->addImmediateOperand(FPEncoding::Float8E5M2EXT);
362
groupedTypes[enumCast(Op::OpTypeFloat)].push_back(type);
363
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
364
module.mapInstruction(type);
365
366
addExtension(spv::E_SPV_EXT_float8);
367
addCapability(Capability::Float8EXT);
368
369
#if 0
370
// XXX not supported
371
if (emitNonSemanticShaderDebugInfo)
372
{
373
auto const debugResultId = makeFloatDebugType(width);
374
debugTypeIdLookup[type->getResultId()] = debugResultId;
375
}
376
#endif
377
378
return type->getResultId();
379
}
380
381
Id Builder::makeFloatE4M3Type()
382
{
383
// try to find it
384
Instruction* type;
385
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeFloat)].size(); ++t) {
386
type = groupedTypes[enumCast(Op::OpTypeFloat)][t];
387
if (type->getNumOperands() != 2) {
388
continue;
389
}
390
if (type->getImmediateOperand(0) == (unsigned)8 &&
391
type->getImmediateOperand(1) == FPEncoding::Float8E4M3EXT)
392
return type->getResultId();
393
}
394
395
// not found, make it
396
type = new Instruction(getUniqueId(), NoType, Op::OpTypeFloat);
397
type->addImmediateOperand(8);
398
type->addImmediateOperand(FPEncoding::Float8E4M3EXT);
399
groupedTypes[enumCast(Op::OpTypeFloat)].push_back(type);
400
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
401
module.mapInstruction(type);
402
403
addExtension(spv::E_SPV_EXT_float8);
404
addCapability(Capability::Float8EXT);
405
406
#if 0
407
// XXX not supported
408
if (emitNonSemanticShaderDebugInfo)
409
{
410
auto const debugResultId = makeFloatDebugType(width);
411
debugTypeIdLookup[type->getResultId()] = debugResultId;
412
}
413
#endif
414
415
return type->getResultId();
416
}
417
418
// Make a struct without checking for duplication.
419
// See makeStructResultType() for non-decorated structs
420
// needed as the result of some instructions, which does
421
// check for duplicates.
422
// For compiler-generated structs, debug info is ignored.
423
Id Builder::makeStructType(const std::vector<Id>& members, const std::vector<spv::StructMemberDebugInfo>& memberDebugInfo,
424
const char* name, bool const compilerGenerated)
425
{
426
// Don't look for previous one, because in the general case,
427
// structs can be duplicated except for decorations.
428
429
// not found, make it
430
Instruction* type = new Instruction(getUniqueId(), NoType, Op::OpTypeStruct);
431
for (int op = 0; op < (int)members.size(); ++op)
432
type->addIdOperand(members[op]);
433
groupedTypes[enumCast(Op::OpTypeStruct)].push_back(type);
434
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
435
module.mapInstruction(type);
436
addName(type->getResultId(), name);
437
438
if (emitNonSemanticShaderDebugInfo && !compilerGenerated) {
439
assert(members.size() == memberDebugInfo.size());
440
auto const debugResultId =
441
makeCompositeDebugType(members, memberDebugInfo, name, NonSemanticShaderDebugInfo100Structure);
442
debugTypeIdLookup[type->getResultId()] = debugResultId;
443
}
444
445
return type->getResultId();
446
}
447
448
// Make a struct for the simple results of several instructions,
449
// checking for duplication.
450
Id Builder::makeStructResultType(Id type0, Id type1)
451
{
452
// try to find it
453
Instruction* type;
454
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeStruct)].size(); ++t) {
455
type = groupedTypes[enumCast(Op::OpTypeStruct)][t];
456
if (type->getNumOperands() != 2)
457
continue;
458
if (type->getIdOperand(0) != type0 ||
459
type->getIdOperand(1) != type1)
460
continue;
461
return type->getResultId();
462
}
463
464
// not found, make it
465
std::vector<spv::Id> members;
466
members.push_back(type0);
467
members.push_back(type1);
468
469
return makeStructType(members, {}, "ResType");
470
}
471
472
Id Builder::makeVectorType(Id component, int size)
473
{
474
// try to find it
475
Instruction* type;
476
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeVector)].size(); ++t) {
477
type = groupedTypes[enumCast(Op::OpTypeVector)][t];
478
if (type->getIdOperand(0) == component &&
479
type->getImmediateOperand(1) == (unsigned)size)
480
return type->getResultId();
481
}
482
483
// not found, make it
484
type = new Instruction(getUniqueId(), NoType, Op::OpTypeVector);
485
type->reserveOperands(2);
486
type->addIdOperand(component);
487
type->addImmediateOperand(size);
488
groupedTypes[enumCast(Op::OpTypeVector)].push_back(type);
489
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
490
module.mapInstruction(type);
491
492
if (emitNonSemanticShaderDebugInfo)
493
{
494
auto const debugResultId = makeVectorDebugType(component, size);
495
debugTypeIdLookup[type->getResultId()] = debugResultId;
496
}
497
498
return type->getResultId();
499
}
500
501
Id Builder::makeMatrixType(Id component, int cols, int rows)
502
{
503
assert(cols <= maxMatrixSize && rows <= maxMatrixSize);
504
505
Id column = makeVectorType(component, rows);
506
507
// try to find it
508
Instruction* type;
509
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeMatrix)].size(); ++t) {
510
type = groupedTypes[enumCast(Op::OpTypeMatrix)][t];
511
if (type->getIdOperand(0) == column &&
512
type->getImmediateOperand(1) == (unsigned)cols)
513
return type->getResultId();
514
}
515
516
// not found, make it
517
type = new Instruction(getUniqueId(), NoType, Op::OpTypeMatrix);
518
type->reserveOperands(2);
519
type->addIdOperand(column);
520
type->addImmediateOperand(cols);
521
groupedTypes[enumCast(Op::OpTypeMatrix)].push_back(type);
522
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
523
module.mapInstruction(type);
524
525
if (emitNonSemanticShaderDebugInfo)
526
{
527
auto const debugResultId = makeMatrixDebugType(column, cols);
528
debugTypeIdLookup[type->getResultId()] = debugResultId;
529
}
530
531
return type->getResultId();
532
}
533
534
Id Builder::makeCooperativeMatrixTypeKHR(Id component, Id scope, Id rows, Id cols, Id use)
535
{
536
// try to find it
537
Instruction* type;
538
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeCooperativeMatrixKHR)].size(); ++t) {
539
type = groupedTypes[enumCast(Op::OpTypeCooperativeMatrixKHR)][t];
540
if (type->getIdOperand(0) == component &&
541
type->getIdOperand(1) == scope &&
542
type->getIdOperand(2) == rows &&
543
type->getIdOperand(3) == cols &&
544
type->getIdOperand(4) == use)
545
return type->getResultId();
546
}
547
548
// not found, make it
549
type = new Instruction(getUniqueId(), NoType, Op::OpTypeCooperativeMatrixKHR);
550
type->reserveOperands(5);
551
type->addIdOperand(component);
552
type->addIdOperand(scope);
553
type->addIdOperand(rows);
554
type->addIdOperand(cols);
555
type->addIdOperand(use);
556
groupedTypes[enumCast(Op::OpTypeCooperativeMatrixKHR)].push_back(type);
557
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
558
module.mapInstruction(type);
559
560
if (emitNonSemanticShaderDebugInfo)
561
{
562
// Find a name for one of the parameters. It can either come from debuginfo for another
563
// type, or an OpName from a constant.
564
auto const findName = [&](Id id) {
565
Id id2 = getDebugType(id);
566
for (auto &t : groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeBasic]) {
567
if (t->getResultId() == id2) {
568
for (auto &s : strings) {
569
if (s->getResultId() == t->getIdOperand(2)) {
570
return s->getNameString();
571
}
572
}
573
}
574
}
575
for (auto &t : names) {
576
if (t->getIdOperand(0) == id) {
577
return t->getNameString();
578
}
579
}
580
return "unknown";
581
};
582
std::string debugName = "coopmat<";
583
debugName += std::string(findName(component)) + ", ";
584
if (isConstantScalar(scope)) {
585
debugName += std::string("gl_Scope") + std::string(spv::ScopeToString((spv::Scope)getConstantScalar(scope))) + ", ";
586
} else {
587
debugName += std::string(findName(scope)) + ", ";
588
}
589
debugName += std::string(findName(rows)) + ", ";
590
debugName += std::string(findName(cols)) + ">";
591
// There's no nonsemantic debug info instruction for cooperative matrix types,
592
// use opaque composite instead.
593
auto const debugResultId = makeOpaqueDebugType(debugName.c_str());
594
debugTypeIdLookup[type->getResultId()] = debugResultId;
595
}
596
597
return type->getResultId();
598
}
599
600
Id Builder::makeCooperativeMatrixTypeNV(Id component, Id scope, Id rows, Id cols)
601
{
602
// try to find it
603
Instruction* type;
604
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeCooperativeMatrixNV)].size(); ++t) {
605
type = groupedTypes[enumCast(Op::OpTypeCooperativeMatrixNV)][t];
606
if (type->getIdOperand(0) == component && type->getIdOperand(1) == scope && type->getIdOperand(2) == rows &&
607
type->getIdOperand(3) == cols)
608
return type->getResultId();
609
}
610
611
// not found, make it
612
type = new Instruction(getUniqueId(), NoType, Op::OpTypeCooperativeMatrixNV);
613
type->reserveOperands(4);
614
type->addIdOperand(component);
615
type->addIdOperand(scope);
616
type->addIdOperand(rows);
617
type->addIdOperand(cols);
618
groupedTypes[enumCast(Op::OpTypeCooperativeMatrixNV)].push_back(type);
619
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
620
module.mapInstruction(type);
621
622
return type->getResultId();
623
}
624
625
Id Builder::makeCooperativeMatrixTypeWithSameShape(Id component, Id otherType)
626
{
627
Instruction* instr = module.getInstruction(otherType);
628
if (instr->getOpCode() == Op::OpTypeCooperativeMatrixNV) {
629
return makeCooperativeMatrixTypeNV(component, instr->getIdOperand(1), instr->getIdOperand(2), instr->getIdOperand(3));
630
} else {
631
assert(instr->getOpCode() == Op::OpTypeCooperativeMatrixKHR);
632
return makeCooperativeMatrixTypeKHR(component, instr->getIdOperand(1), instr->getIdOperand(2), instr->getIdOperand(3), instr->getIdOperand(4));
633
}
634
}
635
636
Id Builder::makeCooperativeVectorTypeNV(Id componentType, Id components)
637
{
638
// try to find it
639
Instruction* type;
640
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeCooperativeVectorNV)].size(); ++t) {
641
type = groupedTypes[enumCast(Op::OpTypeCooperativeVectorNV)][t];
642
if (type->getIdOperand(0) == componentType &&
643
type->getIdOperand(1) == components)
644
return type->getResultId();
645
}
646
647
// not found, make it
648
type = new Instruction(getUniqueId(), NoType, Op::OpTypeCooperativeVectorNV);
649
type->addIdOperand(componentType);
650
type->addIdOperand(components);
651
groupedTypes[enumCast(Op::OpTypeCooperativeVectorNV)].push_back(type);
652
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
653
module.mapInstruction(type);
654
655
return type->getResultId();
656
}
657
658
Id Builder::makeTensorTypeARM(Id elementType, Id rank)
659
{
660
// See if an OpTypeTensorARM with same element type and rank already exists.
661
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeTensorARM)].size(); ++t) {
662
const Instruction *type = groupedTypes[enumCast(Op::OpTypeTensorARM)][t];
663
if (type->getIdOperand(0) == elementType && type->getIdOperand(1) == rank)
664
return type->getResultId();
665
}
666
667
// Not found, make it.
668
std::unique_ptr<Instruction> type(new Instruction(getUniqueId(), NoType, Op::OpTypeTensorARM));
669
type->addIdOperand(elementType);
670
type->addIdOperand(rank);
671
groupedTypes[enumCast(Op::OpTypeTensorARM)].push_back(type.get());
672
module.mapInstruction(type.get());
673
Id resultID = type->getResultId();
674
constantsTypesGlobals.push_back(std::move(type));
675
return resultID;
676
}
677
678
Id Builder::makeGenericType(spv::Op opcode, std::vector<spv::IdImmediate>& operands)
679
{
680
// try to find it
681
Instruction* type;
682
for (int t = 0; t < (int)groupedTypes[enumCast(opcode)].size(); ++t) {
683
type = groupedTypes[enumCast(opcode)][t];
684
if (static_cast<size_t>(type->getNumOperands()) != operands.size())
685
continue; // Number mismatch, find next
686
687
bool match = true;
688
for (int op = 0; match && op < (int)operands.size(); ++op) {
689
match = (operands[op].isId ? type->getIdOperand(op) : type->getImmediateOperand(op)) == operands[op].word;
690
}
691
if (match)
692
return type->getResultId();
693
}
694
695
// not found, make it
696
type = new Instruction(getUniqueId(), NoType, opcode);
697
type->reserveOperands(operands.size());
698
for (size_t op = 0; op < operands.size(); ++op) {
699
if (operands[op].isId)
700
type->addIdOperand(operands[op].word);
701
else
702
type->addImmediateOperand(operands[op].word);
703
}
704
groupedTypes[enumCast(opcode)].push_back(type);
705
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
706
module.mapInstruction(type);
707
708
return type->getResultId();
709
}
710
711
// TODO: performance: track arrays per stride
712
// If a stride is supplied (non-zero) make an array.
713
// If no stride (0), reuse previous array types.
714
// 'size' is an Id of a constant or specialization constant of the array size
715
Id Builder::makeArrayType(Id element, Id sizeId, int stride)
716
{
717
Instruction* type;
718
if (stride == 0) {
719
// try to find existing type
720
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeArray)].size(); ++t) {
721
type = groupedTypes[enumCast(Op::OpTypeArray)][t];
722
if (type->getIdOperand(0) == element &&
723
type->getIdOperand(1) == sizeId &&
724
explicitlyLaidOut.find(type->getResultId()) == explicitlyLaidOut.end())
725
return type->getResultId();
726
}
727
}
728
729
// not found, make it
730
type = new Instruction(getUniqueId(), NoType, Op::OpTypeArray);
731
type->reserveOperands(2);
732
type->addIdOperand(element);
733
type->addIdOperand(sizeId);
734
groupedTypes[enumCast(Op::OpTypeArray)].push_back(type);
735
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
736
module.mapInstruction(type);
737
738
if (stride != 0) {
739
explicitlyLaidOut.insert(type->getResultId());
740
}
741
742
if (emitNonSemanticShaderDebugInfo)
743
{
744
auto const debugResultId = makeArrayDebugType(element, sizeId);
745
debugTypeIdLookup[type->getResultId()] = debugResultId;
746
}
747
748
return type->getResultId();
749
}
750
751
Id Builder::makeRuntimeArray(Id element)
752
{
753
Instruction* type = new Instruction(getUniqueId(), NoType, Op::OpTypeRuntimeArray);
754
type->addIdOperand(element);
755
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
756
module.mapInstruction(type);
757
758
if (emitNonSemanticShaderDebugInfo)
759
{
760
auto const debugResultId = makeArrayDebugType(element, makeUintConstant(0));
761
debugTypeIdLookup[type->getResultId()] = debugResultId;
762
}
763
764
return type->getResultId();
765
}
766
767
Id Builder::makeFunctionType(Id returnType, const std::vector<Id>& paramTypes)
768
{
769
// try to find it
770
Instruction* type;
771
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeFunction)].size(); ++t) {
772
type = groupedTypes[enumCast(Op::OpTypeFunction)][t];
773
if (type->getIdOperand(0) != returnType || (int)paramTypes.size() != type->getNumOperands() - 1)
774
continue;
775
bool mismatch = false;
776
for (int p = 0; p < (int)paramTypes.size(); ++p) {
777
if (paramTypes[p] != type->getIdOperand(p + 1)) {
778
mismatch = true;
779
break;
780
}
781
}
782
if (! mismatch)
783
{
784
// If compiling HLSL, glslang will create a wrapper function around the entrypoint. Accordingly, a void(void)
785
// function type is created for the wrapper function. However, nonsemantic shader debug information is disabled
786
// while creating the HLSL wrapper. Consequently, if we encounter another void(void) function, we need to create
787
// the associated debug function type if it hasn't been created yet.
788
if(emitNonSemanticShaderDebugInfo && getDebugType(type->getResultId()) == NoType) {
789
assert(sourceLang == spv::SourceLanguage::HLSL);
790
assert(getTypeClass(returnType) == Op::OpTypeVoid && paramTypes.size() == 0);
791
792
Id id = makeDebugFunctionType(returnType, {});
793
debugTypeIdLookup[type->getResultId()] = id;
794
}
795
return type->getResultId();
796
}
797
}
798
799
// not found, make it
800
Id typeId = getUniqueId();
801
type = new Instruction(typeId, NoType, Op::OpTypeFunction);
802
type->reserveOperands(paramTypes.size() + 1);
803
type->addIdOperand(returnType);
804
for (int p = 0; p < (int)paramTypes.size(); ++p)
805
type->addIdOperand(paramTypes[p]);
806
groupedTypes[enumCast(Op::OpTypeFunction)].push_back(type);
807
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
808
module.mapInstruction(type);
809
810
// make debug type and map it
811
if (emitNonSemanticShaderDebugInfo) {
812
Id debugTypeId = makeDebugFunctionType(returnType, paramTypes);
813
debugTypeIdLookup[typeId] = debugTypeId;
814
}
815
816
return type->getResultId();
817
}
818
819
Id Builder::makeDebugFunctionType(Id returnType, const std::vector<Id>& paramTypes)
820
{
821
assert(getDebugType(returnType) != NoType);
822
823
Id typeId = getUniqueId();
824
auto type = new Instruction(typeId, makeVoidType(), Op::OpExtInst);
825
type->reserveOperands(paramTypes.size() + 4);
826
type->addIdOperand(nonSemanticShaderDebugInfo);
827
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugTypeFunction);
828
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100FlagIsPublic));
829
type->addIdOperand(getDebugType(returnType));
830
for (auto const paramType : paramTypes) {
831
if (isPointerType(paramType) || isArrayType(paramType)) {
832
type->addIdOperand(getDebugType(getContainedTypeId(paramType)));
833
}
834
else {
835
type->addIdOperand(getDebugType(paramType));
836
}
837
}
838
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
839
module.mapInstruction(type);
840
return typeId;
841
}
842
843
Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, bool ms, unsigned sampled,
844
ImageFormat format, const char* debugName)
845
{
846
assert(sampled == 1 || sampled == 2);
847
848
// try to find it
849
Instruction* type;
850
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeImage)].size(); ++t) {
851
type = groupedTypes[enumCast(Op::OpTypeImage)][t];
852
if (type->getIdOperand(0) == sampledType &&
853
type->getImmediateOperand(1) == (unsigned int)dim &&
854
type->getImmediateOperand(2) == ( depth ? 1u : 0u) &&
855
type->getImmediateOperand(3) == (arrayed ? 1u : 0u) &&
856
type->getImmediateOperand(4) == ( ms ? 1u : 0u) &&
857
type->getImmediateOperand(5) == sampled &&
858
type->getImmediateOperand(6) == (unsigned int)format)
859
return type->getResultId();
860
}
861
862
// not found, make it
863
type = new Instruction(getUniqueId(), NoType, Op::OpTypeImage);
864
type->reserveOperands(7);
865
type->addIdOperand(sampledType);
866
type->addImmediateOperand( dim);
867
type->addImmediateOperand( depth ? 1 : 0);
868
type->addImmediateOperand(arrayed ? 1 : 0);
869
type->addImmediateOperand( ms ? 1 : 0);
870
type->addImmediateOperand(sampled);
871
type->addImmediateOperand((unsigned int)format);
872
873
groupedTypes[enumCast(Op::OpTypeImage)].push_back(type);
874
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
875
module.mapInstruction(type);
876
877
// deal with capabilities
878
switch (dim) {
879
case Dim::Buffer:
880
if (sampled == 1)
881
addCapability(Capability::SampledBuffer);
882
else
883
addCapability(Capability::ImageBuffer);
884
break;
885
case Dim::Dim1D:
886
if (sampled == 1)
887
addCapability(Capability::Sampled1D);
888
else
889
addCapability(Capability::Image1D);
890
break;
891
case Dim::Cube:
892
if (arrayed) {
893
if (sampled == 1)
894
addCapability(Capability::SampledCubeArray);
895
else
896
addCapability(Capability::ImageCubeArray);
897
}
898
break;
899
case Dim::Rect:
900
if (sampled == 1)
901
addCapability(Capability::SampledRect);
902
else
903
addCapability(Capability::ImageRect);
904
break;
905
case Dim::SubpassData:
906
addCapability(Capability::InputAttachment);
907
break;
908
default:
909
break;
910
}
911
912
if (ms) {
913
if (sampled == 2) {
914
// Images used with subpass data are not storage
915
// images, so don't require the capability for them.
916
if (dim != Dim::SubpassData)
917
addCapability(Capability::StorageImageMultisample);
918
if (arrayed)
919
addCapability(Capability::ImageMSArray);
920
}
921
}
922
923
if (emitNonSemanticShaderDebugInfo)
924
{
925
auto const debugResultId = makeOpaqueDebugType(debugName);
926
debugTypeIdLookup[type->getResultId()] = debugResultId;
927
}
928
929
return type->getResultId();
930
}
931
932
Id Builder::makeSampledImageType(Id imageType, const char* debugName)
933
{
934
// try to find it
935
Instruction* type;
936
for (int t = 0; t < (int)groupedTypes[enumCast(Op::OpTypeSampledImage)].size(); ++t) {
937
type = groupedTypes[enumCast(Op::OpTypeSampledImage)][t];
938
if (type->getIdOperand(0) == imageType)
939
return type->getResultId();
940
}
941
942
// not found, make it
943
type = new Instruction(getUniqueId(), NoType, Op::OpTypeSampledImage);
944
type->addIdOperand(imageType);
945
946
groupedTypes[enumCast(Op::OpTypeSampledImage)].push_back(type);
947
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
948
module.mapInstruction(type);
949
950
if (emitNonSemanticShaderDebugInfo)
951
{
952
auto const debugResultId = makeOpaqueDebugType(debugName);
953
debugTypeIdLookup[type->getResultId()] = debugResultId;
954
}
955
956
return type->getResultId();
957
}
958
959
Id Builder::makeDebugInfoNone()
960
{
961
if (debugInfoNone != 0)
962
return debugInfoNone;
963
964
Instruction* inst = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
965
inst->reserveOperands(2);
966
inst->addIdOperand(nonSemanticShaderDebugInfo);
967
inst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugInfoNone);
968
969
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(inst));
970
module.mapInstruction(inst);
971
972
debugInfoNone = inst->getResultId();
973
974
return debugInfoNone;
975
}
976
977
Id Builder::makeBoolDebugType(int const size)
978
{
979
// try to find it
980
Instruction* type;
981
for (int t = 0; t < (int)groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeBasic].size(); ++t) {
982
type = groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeBasic][t];
983
if (type->getIdOperand(0) == getStringId("bool") &&
984
type->getIdOperand(1) == static_cast<unsigned int>(size) &&
985
type->getIdOperand(2) == NonSemanticShaderDebugInfo100Boolean)
986
return type->getResultId();
987
}
988
989
type = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
990
type->reserveOperands(6);
991
type->addIdOperand(nonSemanticShaderDebugInfo);
992
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugTypeBasic);
993
994
type->addIdOperand(getStringId("bool")); // name id
995
type->addIdOperand(makeUintConstant(size)); // size id
996
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100Boolean)); // encoding id
997
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100None)); // flags id
998
999
groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeBasic].push_back(type);
1000
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1001
module.mapInstruction(type);
1002
1003
return type->getResultId();
1004
}
1005
1006
Id Builder::makeIntegerDebugType(int const width, bool const hasSign)
1007
{
1008
const char* typeName = nullptr;
1009
switch (width) {
1010
case 8: typeName = hasSign ? "int8_t" : "uint8_t"; break;
1011
case 16: typeName = hasSign ? "int16_t" : "uint16_t"; break;
1012
case 64: typeName = hasSign ? "int64_t" : "uint64_t"; break;
1013
default: typeName = hasSign ? "int" : "uint";
1014
}
1015
auto nameId = getStringId(typeName);
1016
// try to find it
1017
Instruction* type;
1018
for (int t = 0; t < (int)groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeBasic].size(); ++t) {
1019
type = groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeBasic][t];
1020
if (type->getIdOperand(0) == nameId &&
1021
type->getIdOperand(1) == static_cast<unsigned int>(width) &&
1022
type->getIdOperand(2) == (hasSign ? NonSemanticShaderDebugInfo100Signed : NonSemanticShaderDebugInfo100Unsigned))
1023
return type->getResultId();
1024
}
1025
1026
// not found, make it
1027
type = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1028
type->reserveOperands(6);
1029
type->addIdOperand(nonSemanticShaderDebugInfo);
1030
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugTypeBasic);
1031
type->addIdOperand(nameId); // name id
1032
type->addIdOperand(makeUintConstant(width)); // size id
1033
if(hasSign == true) {
1034
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100Signed)); // encoding id
1035
} else {
1036
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100Unsigned)); // encoding id
1037
}
1038
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100None)); // flags id
1039
1040
groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeBasic].push_back(type);
1041
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1042
module.mapInstruction(type);
1043
1044
return type->getResultId();
1045
}
1046
1047
Id Builder::makeFloatDebugType(int const width)
1048
{
1049
const char* typeName = nullptr;
1050
switch (width) {
1051
case 16: typeName = "float16_t"; break;
1052
case 64: typeName = "double"; break;
1053
default: typeName = "float"; break;
1054
}
1055
auto nameId = getStringId(typeName);
1056
// try to find it
1057
Instruction* type;
1058
for (int t = 0; t < (int)groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeBasic].size(); ++t) {
1059
type = groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeBasic][t];
1060
if (type->getIdOperand(0) == nameId &&
1061
type->getIdOperand(1) == static_cast<unsigned int>(width) &&
1062
type->getIdOperand(2) == NonSemanticShaderDebugInfo100Float)
1063
return type->getResultId();
1064
}
1065
1066
// not found, make it
1067
type = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1068
type->reserveOperands(6);
1069
type->addIdOperand(nonSemanticShaderDebugInfo);
1070
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugTypeBasic);
1071
type->addIdOperand(nameId); // name id
1072
type->addIdOperand(makeUintConstant(width)); // size id
1073
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100Float)); // encoding id
1074
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100None)); // flags id
1075
1076
groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeBasic].push_back(type);
1077
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1078
module.mapInstruction(type);
1079
1080
return type->getResultId();
1081
}
1082
1083
Id Builder::makeSequentialDebugType(Id const baseType, Id const componentCount, NonSemanticShaderDebugInfo100Instructions const sequenceType)
1084
{
1085
assert(sequenceType == NonSemanticShaderDebugInfo100DebugTypeArray ||
1086
sequenceType == NonSemanticShaderDebugInfo100DebugTypeVector);
1087
1088
// try to find it
1089
Instruction* type;
1090
for (int t = 0; t < (int)groupedDebugTypes[sequenceType].size(); ++t) {
1091
type = groupedDebugTypes[sequenceType][t];
1092
if (type->getIdOperand(0) == baseType &&
1093
type->getIdOperand(1) == makeUintConstant(componentCount))
1094
return type->getResultId();
1095
}
1096
1097
// not found, make it
1098
type = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1099
type->reserveOperands(4);
1100
type->addIdOperand(nonSemanticShaderDebugInfo);
1101
type->addImmediateOperand(sequenceType);
1102
type->addIdOperand(getDebugType(baseType)); // base type
1103
type->addIdOperand(componentCount); // component count
1104
1105
groupedDebugTypes[sequenceType].push_back(type);
1106
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1107
module.mapInstruction(type);
1108
1109
return type->getResultId();
1110
}
1111
1112
Id Builder::makeArrayDebugType(Id const baseType, Id const componentCount)
1113
{
1114
return makeSequentialDebugType(baseType, componentCount, NonSemanticShaderDebugInfo100DebugTypeArray);
1115
}
1116
1117
Id Builder::makeVectorDebugType(Id const baseType, int const componentCount)
1118
{
1119
return makeSequentialDebugType(baseType, makeUintConstant(componentCount), NonSemanticShaderDebugInfo100DebugTypeVector);
1120
}
1121
1122
Id Builder::makeMatrixDebugType(Id const vectorType, int const vectorCount, bool columnMajor)
1123
{
1124
// try to find it
1125
Instruction* type;
1126
for (int t = 0; t < (int)groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeMatrix].size(); ++t) {
1127
type = groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeMatrix][t];
1128
if (type->getIdOperand(0) == vectorType &&
1129
type->getIdOperand(1) == makeUintConstant(vectorCount))
1130
return type->getResultId();
1131
}
1132
1133
// not found, make it
1134
type = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1135
type->reserveOperands(5);
1136
type->addIdOperand(nonSemanticShaderDebugInfo);
1137
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugTypeMatrix);
1138
type->addIdOperand(getDebugType(vectorType)); // vector type id
1139
type->addIdOperand(makeUintConstant(vectorCount)); // component count id
1140
type->addIdOperand(makeBoolConstant(columnMajor)); // column-major id
1141
1142
groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeMatrix].push_back(type);
1143
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1144
module.mapInstruction(type);
1145
1146
return type->getResultId();
1147
}
1148
1149
Id Builder::makeMemberDebugType(Id const memberType, StructMemberDebugInfo const& debugTypeLoc)
1150
{
1151
assert(getDebugType(memberType) != NoType);
1152
1153
Instruction* type = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1154
type->reserveOperands(10);
1155
type->addIdOperand(nonSemanticShaderDebugInfo);
1156
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugTypeMember);
1157
type->addIdOperand(getStringId(debugTypeLoc.name)); // name id
1158
type->addIdOperand(debugTypeLoc.debugTypeOverride != 0 ? debugTypeLoc.debugTypeOverride
1159
: getDebugType(memberType)); // type id
1160
type->addIdOperand(makeDebugSource(currentFileId)); // source id
1161
type->addIdOperand(makeUintConstant(debugTypeLoc.line)); // line id TODO: currentLine is always zero
1162
type->addIdOperand(makeUintConstant(debugTypeLoc.column)); // TODO: column id
1163
type->addIdOperand(makeUintConstant(0)); // TODO: offset id
1164
type->addIdOperand(makeUintConstant(0)); // TODO: size id
1165
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100FlagIsPublic)); // flags id
1166
1167
groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeMember].push_back(type);
1168
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1169
module.mapInstruction(type);
1170
1171
return type->getResultId();
1172
}
1173
1174
Id Builder::makeCompositeDebugType(std::vector<Id> const& memberTypes, std::vector<StructMemberDebugInfo> const& memberDebugInfo,
1175
char const* const name, NonSemanticShaderDebugInfo100DebugCompositeType const tag)
1176
{
1177
// Create the debug member types.
1178
std::vector<Id> memberDebugTypes;
1179
assert(memberTypes.size() == memberDebugInfo.size());
1180
for (size_t i = 0; i < memberTypes.size(); i++) {
1181
if (getDebugType(memberTypes[i]) != NoType) {
1182
memberDebugTypes.emplace_back(makeMemberDebugType(memberTypes[i], memberDebugInfo[i]));
1183
}
1184
}
1185
1186
// Create The structure debug type.
1187
Instruction* type = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1188
type->reserveOperands(memberDebugTypes.size() + 11);
1189
type->addIdOperand(nonSemanticShaderDebugInfo);
1190
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugTypeComposite);
1191
type->addIdOperand(getStringId(name)); // name id
1192
type->addIdOperand(makeUintConstant(tag)); // tag id
1193
type->addIdOperand(makeDebugSource(currentFileId)); // source id
1194
type->addIdOperand(makeUintConstant(currentLine)); // line id TODO: currentLine always zero?
1195
type->addIdOperand(makeUintConstant(0)); // TODO: column id
1196
type->addIdOperand(makeDebugCompilationUnit()); // scope id
1197
type->addIdOperand(getStringId(name)); // linkage name id
1198
type->addIdOperand(makeUintConstant(0)); // TODO: size id
1199
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100FlagIsPublic)); // flags id
1200
for(auto const memberDebugType : memberDebugTypes) {
1201
type->addIdOperand(memberDebugType);
1202
}
1203
1204
groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeComposite].push_back(type);
1205
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1206
module.mapInstruction(type);
1207
1208
return type->getResultId();
1209
}
1210
1211
// The NonSemantic Shader Debug Info doesn't really have a dedicated opcode for opaque types. Instead, we use DebugTypeComposite.
1212
// To represent a source language opaque type, this instruction must have no Members operands, Size operand must be
1213
// DebugInfoNone, and Name must start with @ to avoid clashes with user defined names.
1214
Id Builder::makeOpaqueDebugType(char const* const name)
1215
{
1216
// Create The structure debug type.
1217
Instruction* type = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1218
type->reserveOperands(11);
1219
type->addIdOperand(nonSemanticShaderDebugInfo);
1220
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugTypeComposite);
1221
type->addIdOperand(getStringId(name)); // name id
1222
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100Structure)); // tag id
1223
type->addIdOperand(makeDebugSource(currentFileId)); // source id
1224
type->addIdOperand(makeUintConstant(currentLine)); // line id TODO: currentLine always zero?
1225
type->addIdOperand(makeUintConstant(0)); // TODO: column id
1226
type->addIdOperand(makeDebugCompilationUnit()); // scope id
1227
// Prepend '@' to opaque types.
1228
type->addIdOperand(getStringId('@' + std::string(name))); // linkage name id
1229
type->addIdOperand(makeDebugInfoNone()); // size id
1230
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100FlagIsPublic)); // flags id
1231
1232
groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypeComposite].push_back(type);
1233
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1234
module.mapInstruction(type);
1235
1236
return type->getResultId();
1237
}
1238
1239
Id Builder::makePointerDebugType(StorageClass storageClass, Id const baseType)
1240
{
1241
const Id debugBaseType = getDebugType(baseType);
1242
if (!debugBaseType) {
1243
return makeDebugInfoNone();
1244
}
1245
const Id scID = makeUintConstant(storageClass);
1246
for (Instruction* otherType : groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypePointer]) {
1247
if (otherType->getIdOperand(2) == debugBaseType &&
1248
otherType->getIdOperand(3) == scID) {
1249
return otherType->getResultId();
1250
}
1251
}
1252
1253
Instruction* type = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1254
type->reserveOperands(5);
1255
type->addIdOperand(nonSemanticShaderDebugInfo);
1256
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugTypePointer);
1257
type->addIdOperand(debugBaseType);
1258
type->addIdOperand(scID);
1259
type->addIdOperand(makeUintConstant(0));
1260
1261
groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypePointer].push_back(type);
1262
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1263
module.mapInstruction(type);
1264
1265
return type->getResultId();
1266
}
1267
1268
// Emit a OpExtInstWithForwardRefsKHR nonsemantic instruction for a pointer debug type
1269
// where we don't have the pointee yet. Since we don't have the pointee yet, it just
1270
// points to itself and we rely on patching it later.
1271
Id Builder::makeForwardPointerDebugType(StorageClass storageClass)
1272
{
1273
const Id scID = makeUintConstant(storageClass);
1274
1275
this->addExtension(spv::E_SPV_KHR_relaxed_extended_instruction);
1276
1277
Instruction *type = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInstWithForwardRefsKHR);
1278
type->addIdOperand(nonSemanticShaderDebugInfo);
1279
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugTypePointer);
1280
type->addIdOperand(type->getResultId());
1281
type->addIdOperand(scID);
1282
type->addIdOperand(makeUintConstant(0));
1283
1284
groupedDebugTypes[NonSemanticShaderDebugInfo100DebugTypePointer].push_back(type);
1285
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1286
module.mapInstruction(type);
1287
1288
return type->getResultId();
1289
}
1290
1291
Id Builder::makeDebugSource(const Id fileName) {
1292
if (debugSourceId.find(fileName) != debugSourceId.end())
1293
return debugSourceId[fileName];
1294
spv::Id resultId = getUniqueId();
1295
Instruction* sourceInst = new Instruction(resultId, makeVoidType(), Op::OpExtInst);
1296
sourceInst->reserveOperands(3);
1297
sourceInst->addIdOperand(nonSemanticShaderDebugInfo);
1298
sourceInst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugSource);
1299
sourceInst->addIdOperand(fileName);
1300
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(sourceInst));
1301
module.mapInstruction(sourceInst);
1302
if (emitNonSemanticShaderDebugSource) {
1303
const int maxWordCount = 0xFFFF;
1304
const int opSourceWordCount = 4;
1305
const int nonNullBytesPerInstruction = 4 * (maxWordCount - opSourceWordCount) - 1;
1306
auto processDebugSource = [&](std::string source) {
1307
if (source.size() > 0) {
1308
int nextByte = 0;
1309
while ((int)source.size() - nextByte > 0) {
1310
auto subString = source.substr(nextByte, nonNullBytesPerInstruction);
1311
auto sourceId = getStringId(subString);
1312
if (nextByte == 0) {
1313
// DebugSource
1314
sourceInst->addIdOperand(sourceId);
1315
} else {
1316
// DebugSourceContinued
1317
Instruction* sourceContinuedInst = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1318
sourceContinuedInst->reserveOperands(2);
1319
sourceContinuedInst->addIdOperand(nonSemanticShaderDebugInfo);
1320
sourceContinuedInst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugSourceContinued);
1321
sourceContinuedInst->addIdOperand(sourceId);
1322
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(sourceContinuedInst));
1323
module.mapInstruction(sourceContinuedInst);
1324
}
1325
nextByte += nonNullBytesPerInstruction;
1326
}
1327
} else {
1328
auto sourceId = getStringId(source);
1329
sourceInst->addIdOperand(sourceId);
1330
}
1331
};
1332
if (fileName == mainFileId) {
1333
processDebugSource(sourceText);
1334
} else {
1335
auto incItr = includeFiles.find(fileName);
1336
if (incItr != includeFiles.end()) {
1337
processDebugSource(*incItr->second);
1338
} else {
1339
// We omit the optional source text item if not available in glslang
1340
}
1341
}
1342
}
1343
debugSourceId[fileName] = resultId;
1344
return resultId;
1345
}
1346
1347
Id Builder::makeDebugCompilationUnit() {
1348
if (nonSemanticShaderCompilationUnitId != 0)
1349
return nonSemanticShaderCompilationUnitId;
1350
spv::Id resultId = getUniqueId();
1351
Instruction* sourceInst = new Instruction(resultId, makeVoidType(), Op::OpExtInst);
1352
sourceInst->reserveOperands(6);
1353
sourceInst->addIdOperand(nonSemanticShaderDebugInfo);
1354
sourceInst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugCompilationUnit);
1355
sourceInst->addIdOperand(makeUintConstant(1)); // TODO(greg-lunarg): Get rid of magic number
1356
sourceInst->addIdOperand(makeUintConstant(4)); // TODO(greg-lunarg): Get rid of magic number
1357
sourceInst->addIdOperand(makeDebugSource(mainFileId));
1358
sourceInst->addIdOperand(makeUintConstant(sourceLang));
1359
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(sourceInst));
1360
module.mapInstruction(sourceInst);
1361
nonSemanticShaderCompilationUnitId = resultId;
1362
1363
// We can reasonably assume that makeDebugCompilationUnit will be called before any of
1364
// debug-scope stack. Function scopes and lexical scopes will occur afterward.
1365
assert(currentDebugScopeId.empty());
1366
currentDebugScopeId.push(nonSemanticShaderCompilationUnitId);
1367
1368
return resultId;
1369
}
1370
1371
Id Builder::createDebugGlobalVariable(Id const type, char const*const name, Id const variable)
1372
{
1373
assert(type != 0);
1374
1375
Instruction* inst = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1376
inst->reserveOperands(11);
1377
inst->addIdOperand(nonSemanticShaderDebugInfo);
1378
inst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugGlobalVariable);
1379
inst->addIdOperand(getStringId(name)); // name id
1380
inst->addIdOperand(type); // type id
1381
inst->addIdOperand(makeDebugSource(currentFileId)); // source id
1382
inst->addIdOperand(makeUintConstant(currentLine)); // line id TODO: currentLine always zero?
1383
inst->addIdOperand(makeUintConstant(0)); // TODO: column id
1384
inst->addIdOperand(makeDebugCompilationUnit()); // scope id
1385
inst->addIdOperand(getStringId(name)); // linkage name id
1386
inst->addIdOperand(variable); // variable id
1387
inst->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100FlagIsDefinition)); // flags id
1388
1389
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(inst));
1390
module.mapInstruction(inst);
1391
1392
return inst->getResultId();
1393
}
1394
1395
Id Builder::createDebugLocalVariable(Id type, char const*const name, size_t const argNumber)
1396
{
1397
assert(name != nullptr);
1398
assert(!currentDebugScopeId.empty());
1399
1400
Instruction* inst = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1401
inst->reserveOperands(9);
1402
inst->addIdOperand(nonSemanticShaderDebugInfo);
1403
inst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugLocalVariable);
1404
inst->addIdOperand(getStringId(name)); // name id
1405
inst->addIdOperand(type); // type id
1406
inst->addIdOperand(makeDebugSource(currentFileId)); // source id
1407
inst->addIdOperand(makeUintConstant(currentLine)); // line id
1408
inst->addIdOperand(makeUintConstant(0)); // TODO: column id
1409
inst->addIdOperand(currentDebugScopeId.top()); // scope id
1410
inst->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100FlagIsLocal)); // flags id
1411
if(argNumber != 0) {
1412
inst->addIdOperand(makeUintConstant(static_cast<unsigned int>(argNumber)));
1413
}
1414
1415
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(inst));
1416
module.mapInstruction(inst);
1417
1418
return inst->getResultId();
1419
}
1420
1421
Id Builder::makeDebugExpression()
1422
{
1423
if (debugExpression != 0)
1424
return debugExpression;
1425
1426
Instruction* inst = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1427
inst->reserveOperands(2);
1428
inst->addIdOperand(nonSemanticShaderDebugInfo);
1429
inst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugExpression);
1430
1431
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(inst));
1432
module.mapInstruction(inst);
1433
1434
debugExpression = inst->getResultId();
1435
1436
return debugExpression;
1437
}
1438
1439
Id Builder::makeDebugDeclare(Id const debugLocalVariable, Id const pointer)
1440
{
1441
Instruction* inst = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1442
inst->reserveOperands(5);
1443
inst->addIdOperand(nonSemanticShaderDebugInfo);
1444
inst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugDeclare);
1445
inst->addIdOperand(debugLocalVariable); // debug local variable id
1446
inst->addIdOperand(pointer); // pointer to local variable id
1447
inst->addIdOperand(makeDebugExpression()); // expression id
1448
addInstruction(std::unique_ptr<Instruction>(inst));
1449
1450
return inst->getResultId();
1451
}
1452
1453
Id Builder::makeDebugValue(Id const debugLocalVariable, Id const value)
1454
{
1455
Instruction* inst = new Instruction(getUniqueId(), makeVoidType(), Op::OpExtInst);
1456
inst->reserveOperands(5);
1457
inst->addIdOperand(nonSemanticShaderDebugInfo);
1458
inst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugValue);
1459
inst->addIdOperand(debugLocalVariable); // debug local variable id
1460
inst->addIdOperand(value); // value of local variable id
1461
inst->addIdOperand(makeDebugExpression()); // expression id
1462
addInstruction(std::unique_ptr<Instruction>(inst));
1463
1464
return inst->getResultId();
1465
}
1466
1467
Id Builder::makeAccelerationStructureType()
1468
{
1469
Instruction *type;
1470
if (groupedTypes[enumCast(Op::OpTypeAccelerationStructureKHR)].size() == 0) {
1471
type = new Instruction(getUniqueId(), NoType, Op::OpTypeAccelerationStructureKHR);
1472
groupedTypes[enumCast(Op::OpTypeAccelerationStructureKHR)].push_back(type);
1473
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1474
module.mapInstruction(type);
1475
if (emitNonSemanticShaderDebugInfo) {
1476
spv::Id debugType = makeOpaqueDebugType("accelerationStructure");
1477
debugTypeIdLookup[type->getResultId()] = debugType;
1478
}
1479
} else {
1480
type = groupedTypes[enumCast(Op::OpTypeAccelerationStructureKHR)].back();
1481
}
1482
1483
return type->getResultId();
1484
}
1485
1486
Id Builder::makeRayQueryType()
1487
{
1488
Instruction *type;
1489
if (groupedTypes[enumCast(Op::OpTypeRayQueryKHR)].size() == 0) {
1490
type = new Instruction(getUniqueId(), NoType, Op::OpTypeRayQueryKHR);
1491
groupedTypes[enumCast(Op::OpTypeRayQueryKHR)].push_back(type);
1492
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1493
module.mapInstruction(type);
1494
if (emitNonSemanticShaderDebugInfo) {
1495
spv::Id debugType = makeOpaqueDebugType("rayQuery");
1496
debugTypeIdLookup[type->getResultId()] = debugType;
1497
}
1498
} else {
1499
type = groupedTypes[enumCast(Op::OpTypeRayQueryKHR)].back();
1500
}
1501
1502
return type->getResultId();
1503
}
1504
1505
Id Builder::makeHitObjectEXTType()
1506
{
1507
Instruction *type;
1508
if (groupedTypes[enumCast(Op::OpTypeHitObjectEXT)].size() == 0) {
1509
type = new Instruction(getUniqueId(), NoType, Op::OpTypeHitObjectEXT);
1510
groupedTypes[enumCast(Op::OpTypeHitObjectEXT)].push_back(type);
1511
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1512
module.mapInstruction(type);
1513
} else {
1514
type = groupedTypes[enumCast(Op::OpTypeHitObjectEXT)].back();
1515
}
1516
1517
return type->getResultId();
1518
}
1519
Id Builder::makeHitObjectNVType()
1520
{
1521
Instruction *type;
1522
if (groupedTypes[enumCast(Op::OpTypeHitObjectNV)].size() == 0) {
1523
type = new Instruction(getUniqueId(), NoType, Op::OpTypeHitObjectNV);
1524
groupedTypes[enumCast(Op::OpTypeHitObjectNV)].push_back(type);
1525
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
1526
module.mapInstruction(type);
1527
if (emitNonSemanticShaderDebugInfo) {
1528
spv::Id debugType = makeOpaqueDebugType("hitObjectNV");
1529
debugTypeIdLookup[type->getResultId()] = debugType;
1530
}
1531
} else {
1532
type = groupedTypes[enumCast(Op::OpTypeHitObjectNV)].back();
1533
}
1534
1535
return type->getResultId();
1536
}
1537
1538
Id Builder::getDerefTypeId(Id resultId) const
1539
{
1540
Id typeId = getTypeId(resultId);
1541
assert(isPointerType(typeId));
1542
1543
return module.getInstruction(typeId)->getIdOperand(1);
1544
}
1545
1546
Op Builder::getMostBasicTypeClass(Id typeId) const
1547
{
1548
Instruction* instr = module.getInstruction(typeId);
1549
1550
Op typeClass = instr->getOpCode();
1551
switch (typeClass)
1552
{
1553
case Op::OpTypeVector:
1554
case Op::OpTypeMatrix:
1555
case Op::OpTypeArray:
1556
case Op::OpTypeRuntimeArray:
1557
return getMostBasicTypeClass(instr->getIdOperand(0));
1558
case Op::OpTypePointer:
1559
return getMostBasicTypeClass(instr->getIdOperand(1));
1560
default:
1561
return typeClass;
1562
}
1563
}
1564
1565
unsigned int Builder::getNumTypeConstituents(Id typeId) const
1566
{
1567
Instruction* instr = module.getInstruction(typeId);
1568
1569
switch (instr->getOpCode())
1570
{
1571
case Op::OpTypeBool:
1572
case Op::OpTypeInt:
1573
case Op::OpTypeFloat:
1574
case Op::OpTypePointer:
1575
return 1;
1576
case Op::OpTypeVector:
1577
case Op::OpTypeMatrix:
1578
return instr->getImmediateOperand(1);
1579
case Op::OpTypeCooperativeVectorNV:
1580
case Op::OpTypeArray:
1581
{
1582
Id lengthId = instr->getIdOperand(1);
1583
return module.getInstruction(lengthId)->getImmediateOperand(0);
1584
}
1585
case Op::OpTypeStruct:
1586
return instr->getNumOperands();
1587
case Op::OpTypeCooperativeMatrixKHR:
1588
case Op::OpTypeCooperativeMatrixNV:
1589
// has only one constituent when used with OpCompositeConstruct.
1590
return 1;
1591
default:
1592
assert(0);
1593
return 1;
1594
}
1595
}
1596
1597
// Return the lowest-level type of scalar that an homogeneous composite is made out of.
1598
// Typically, this is just to find out if something is made out of ints or floats.
1599
// However, it includes returning a structure, if say, it is an array of structure.
1600
Id Builder::getScalarTypeId(Id typeId) const
1601
{
1602
Instruction* instr = module.getInstruction(typeId);
1603
1604
Op typeClass = instr->getOpCode();
1605
switch (typeClass)
1606
{
1607
case Op::OpTypeVoid:
1608
case Op::OpTypeBool:
1609
case Op::OpTypeInt:
1610
case Op::OpTypeFloat:
1611
case Op::OpTypeStruct:
1612
return instr->getResultId();
1613
case Op::OpTypeVector:
1614
case Op::OpTypeMatrix:
1615
case Op::OpTypeArray:
1616
case Op::OpTypeRuntimeArray:
1617
case Op::OpTypePointer:
1618
case Op::OpTypeCooperativeVectorNV:
1619
return getScalarTypeId(getContainedTypeId(typeId));
1620
default:
1621
assert(0);
1622
return NoResult;
1623
}
1624
}
1625
1626
// Return the type of 'member' of a composite.
1627
Id Builder::getContainedTypeId(Id typeId, int member) const
1628
{
1629
Instruction* instr = module.getInstruction(typeId);
1630
1631
Op typeClass = instr->getOpCode();
1632
switch (typeClass)
1633
{
1634
case Op::OpTypeVector:
1635
case Op::OpTypeMatrix:
1636
case Op::OpTypeArray:
1637
case Op::OpTypeRuntimeArray:
1638
case Op::OpTypeCooperativeMatrixKHR:
1639
case Op::OpTypeCooperativeMatrixNV:
1640
case Op::OpTypeCooperativeVectorNV:
1641
return instr->getIdOperand(0);
1642
case Op::OpTypePointer:
1643
return instr->getIdOperand(1);
1644
case Op::OpTypeStruct:
1645
return instr->getIdOperand(member);
1646
default:
1647
assert(0);
1648
return NoResult;
1649
}
1650
}
1651
1652
// Figure out the final resulting type of the access chain.
1653
Id Builder::getResultingAccessChainType() const
1654
{
1655
assert(accessChain.base != NoResult);
1656
Id typeId = getTypeId(accessChain.base);
1657
1658
assert(isPointerType(typeId));
1659
typeId = getContainedTypeId(typeId);
1660
1661
for (int i = 0; i < (int)accessChain.indexChain.size(); ++i) {
1662
if (isStructType(typeId)) {
1663
assert(isConstantScalar(accessChain.indexChain[i]));
1664
typeId = getContainedTypeId(typeId, getConstantScalar(accessChain.indexChain[i]));
1665
} else
1666
typeId = getContainedTypeId(typeId, accessChain.indexChain[i]);
1667
}
1668
1669
return typeId;
1670
}
1671
1672
// Return the immediately contained type of a given composite type.
1673
Id Builder::getContainedTypeId(Id typeId) const
1674
{
1675
return getContainedTypeId(typeId, 0);
1676
}
1677
1678
// Returns true if 'typeId' is or contains a scalar type declared with 'typeOp'
1679
// of width 'width'. The 'width' is only consumed for int and float types.
1680
// Returns false otherwise.
1681
bool Builder::containsType(Id typeId, spv::Op typeOp, unsigned int width) const
1682
{
1683
const Instruction& instr = *module.getInstruction(typeId);
1684
1685
Op typeClass = instr.getOpCode();
1686
switch (typeClass)
1687
{
1688
case Op::OpTypeInt:
1689
case Op::OpTypeFloat:
1690
return typeClass == typeOp && instr.getImmediateOperand(0) == width;
1691
case Op::OpTypeStruct:
1692
for (int m = 0; m < instr.getNumOperands(); ++m) {
1693
if (containsType(instr.getIdOperand(m), typeOp, width))
1694
return true;
1695
}
1696
return false;
1697
case Op::OpTypePointer:
1698
return false;
1699
case Op::OpTypeVector:
1700
case Op::OpTypeMatrix:
1701
case Op::OpTypeArray:
1702
case Op::OpTypeRuntimeArray:
1703
return containsType(getContainedTypeId(typeId), typeOp, width);
1704
default:
1705
return typeClass == typeOp;
1706
}
1707
}
1708
1709
// return true if the type is a pointer to PhysicalStorageBufferEXT or an
1710
// contains such a pointer. These require restrict/aliased decorations.
1711
bool Builder::containsPhysicalStorageBufferOrArray(Id typeId) const
1712
{
1713
const Instruction& instr = *module.getInstruction(typeId);
1714
1715
Op typeClass = instr.getOpCode();
1716
switch (typeClass)
1717
{
1718
case Op::OpTypePointer:
1719
return getTypeStorageClass(typeId) == StorageClass::PhysicalStorageBufferEXT;
1720
case Op::OpTypeArray:
1721
return containsPhysicalStorageBufferOrArray(getContainedTypeId(typeId));
1722
case Op::OpTypeStruct:
1723
for (int m = 0; m < instr.getNumOperands(); ++m) {
1724
if (containsPhysicalStorageBufferOrArray(instr.getIdOperand(m)))
1725
return true;
1726
}
1727
return false;
1728
default:
1729
return false;
1730
}
1731
}
1732
1733
// See if a scalar constant of this type has already been created, so it
1734
// can be reused rather than duplicated. (Required by the specification).
1735
Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value)
1736
{
1737
ScalarConstantKey key{ enumCast(typeClass), enumCast(opcode), typeId, value, 0 };
1738
auto it = groupedScalarConstantResultIDs.find(key);
1739
return (it != groupedScalarConstantResultIDs.end()) ? it->second : 0;
1740
}
1741
1742
// Version of findScalarConstant (see above) for scalars that take two operands (e.g. a 'double' or 'int64').
1743
Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2)
1744
{
1745
ScalarConstantKey key{ enumCast(typeClass), enumCast(opcode), typeId, v1, v2 };
1746
auto it = groupedScalarConstantResultIDs.find(key);
1747
return (it != groupedScalarConstantResultIDs.end()) ? it->second : 0;
1748
}
1749
1750
// Return true if consuming 'opcode' means consuming a constant.
1751
// "constant" here means after final transform to executable code,
1752
// the value consumed will be a constant, so includes specialization.
1753
bool Builder::isConstantOpCode(Op opcode) const
1754
{
1755
switch (opcode) {
1756
case Op::OpUndef:
1757
case Op::OpConstantTrue:
1758
case Op::OpConstantFalse:
1759
case Op::OpConstant:
1760
case Op::OpConstantComposite:
1761
case Op::OpConstantCompositeReplicateEXT:
1762
case Op::OpConstantSampler:
1763
case Op::OpConstantNull:
1764
case Op::OpSpecConstantTrue:
1765
case Op::OpSpecConstantFalse:
1766
case Op::OpSpecConstant:
1767
case Op::OpSpecConstantComposite:
1768
case Op::OpSpecConstantCompositeReplicateEXT:
1769
case Op::OpSpecConstantOp:
1770
return true;
1771
default:
1772
return false;
1773
}
1774
}
1775
1776
// Return true if consuming 'opcode' means consuming a specialization constant.
1777
bool Builder::isSpecConstantOpCode(Op opcode) const
1778
{
1779
switch (opcode) {
1780
case Op::OpSpecConstantTrue:
1781
case Op::OpSpecConstantFalse:
1782
case Op::OpSpecConstant:
1783
case Op::OpSpecConstantComposite:
1784
case Op::OpSpecConstantOp:
1785
case Op::OpSpecConstantCompositeReplicateEXT:
1786
return true;
1787
default:
1788
return false;
1789
}
1790
}
1791
1792
Id Builder::makeNullConstant(Id typeId)
1793
{
1794
Instruction* constant;
1795
1796
// See if we already made it.
1797
Id existing = NoResult;
1798
for (int i = 0; i < (int)nullConstants.size(); ++i) {
1799
constant = nullConstants[i];
1800
if (constant->getTypeId() == typeId)
1801
existing = constant->getResultId();
1802
}
1803
1804
if (existing != NoResult)
1805
return existing;
1806
1807
// Make it
1808
Instruction* c = new Instruction(getUniqueId(), typeId, Op::OpConstantNull);
1809
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
1810
nullConstants.push_back(c);
1811
module.mapInstruction(c);
1812
1813
return c->getResultId();
1814
}
1815
1816
Id Builder::makeBoolConstant(bool b, bool specConstant)
1817
{
1818
Id typeId = makeBoolType();
1819
Op opcode = specConstant ? (b ? Op::OpSpecConstantTrue : Op::OpSpecConstantFalse) : (b ? Op::OpConstantTrue : Op::OpConstantFalse);
1820
1821
// See if we already made it. Applies only to regular constants, because specialization constants
1822
// must remain distinct for the purpose of applying a SpecId decoration.
1823
if (!specConstant) {
1824
Id existing = findScalarConstant(Op::OpTypeBool, opcode, typeId, 0);
1825
if (existing)
1826
return existing;
1827
}
1828
1829
// Make it
1830
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
1831
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
1832
module.mapInstruction(c);
1833
1834
Id resultId = c->getResultId();
1835
if (!specConstant) {
1836
ScalarConstantKey key{enumCast(Op::OpTypeBool), enumCast(opcode), typeId, 0, 0};
1837
groupedScalarConstantResultIDs[key] = resultId;
1838
}
1839
return resultId;
1840
}
1841
1842
Id Builder::makeIntConstant(Id typeId, unsigned value, bool specConstant)
1843
{
1844
Op opcode = specConstant ? Op::OpSpecConstant : Op::OpConstant;
1845
1846
// See if we already made it. Applies only to regular constants, because specialization constants
1847
// must remain distinct for the purpose of applying a SpecId decoration.
1848
if (! specConstant) {
1849
Id existing = findScalarConstant(Op::OpTypeInt, opcode, typeId, value);
1850
if (existing)
1851
return existing;
1852
}
1853
1854
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
1855
c->addImmediateOperand(value);
1856
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
1857
module.mapInstruction(c);
1858
1859
Id resultId = c->getResultId();
1860
if (!specConstant) {
1861
ScalarConstantKey key{ enumCast(Op::OpTypeInt), enumCast(opcode), typeId, value, 0 };
1862
groupedScalarConstantResultIDs[key] = resultId;
1863
}
1864
return resultId;
1865
}
1866
1867
Id Builder::makeInt64Constant(Id typeId, unsigned long long value, bool specConstant)
1868
{
1869
Op opcode = specConstant ? Op::OpSpecConstant : Op::OpConstant;
1870
1871
unsigned op1 = value & 0xFFFFFFFF;
1872
unsigned op2 = value >> 32;
1873
1874
// See if we already made it. Applies only to regular constants, because specialization constants
1875
// must remain distinct for the purpose of applying a SpecId decoration.
1876
if (! specConstant) {
1877
Id existing = findScalarConstant(Op::OpTypeInt, opcode, typeId, op1, op2);
1878
if (existing)
1879
return existing;
1880
}
1881
1882
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
1883
c->reserveOperands(2);
1884
c->addImmediateOperand(op1);
1885
c->addImmediateOperand(op2);
1886
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
1887
module.mapInstruction(c);
1888
1889
Id resultId = c->getResultId();
1890
if (!specConstant) {
1891
ScalarConstantKey key{ enumCast(Op::OpTypeInt), enumCast(opcode), typeId, op1, op2 };
1892
groupedScalarConstantResultIDs[key] = resultId;
1893
}
1894
return resultId;
1895
}
1896
1897
Id Builder::makeFloatConstant(float f, bool specConstant)
1898
{
1899
Op opcode = specConstant ? Op::OpSpecConstant : Op::OpConstant;
1900
Id typeId = makeFloatType(32);
1901
union { float fl; unsigned int ui; } u;
1902
u.fl = f;
1903
unsigned value = u.ui;
1904
1905
// See if we already made it. Applies only to regular constants, because specialization constants
1906
// must remain distinct for the purpose of applying a SpecId decoration.
1907
if (! specConstant) {
1908
Id existing = findScalarConstant(Op::OpTypeFloat, opcode, typeId, value);
1909
if (existing)
1910
return existing;
1911
}
1912
1913
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
1914
c->addImmediateOperand(value);
1915
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
1916
module.mapInstruction(c);
1917
1918
Id resultId = c->getResultId();
1919
if (!specConstant) {
1920
ScalarConstantKey key{ enumCast(Op::OpTypeFloat), enumCast(opcode), typeId, value, 0 };
1921
groupedScalarConstantResultIDs[key] = resultId;
1922
}
1923
return resultId;
1924
}
1925
1926
Id Builder::makeDoubleConstant(double d, bool specConstant)
1927
{
1928
Op opcode = specConstant ? Op::OpSpecConstant : Op::OpConstant;
1929
Id typeId = makeFloatType(64);
1930
union { double db; unsigned long long ull; } u;
1931
u.db = d;
1932
unsigned long long value = u.ull;
1933
unsigned op1 = value & 0xFFFFFFFF;
1934
unsigned op2 = value >> 32;
1935
1936
// See if we already made it. Applies only to regular constants, because specialization constants
1937
// must remain distinct for the purpose of applying a SpecId decoration.
1938
if (! specConstant) {
1939
Id existing = findScalarConstant(Op::OpTypeFloat, opcode, typeId, op1, op2);
1940
if (existing)
1941
return existing;
1942
}
1943
1944
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
1945
c->reserveOperands(2);
1946
c->addImmediateOperand(op1);
1947
c->addImmediateOperand(op2);
1948
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
1949
module.mapInstruction(c);
1950
1951
Id resultId = c->getResultId();
1952
if (!specConstant) {
1953
ScalarConstantKey key{ enumCast(Op::OpTypeFloat), enumCast(opcode), typeId, op1, op2 };
1954
groupedScalarConstantResultIDs[key] = resultId;
1955
}
1956
return resultId;
1957
}
1958
1959
Id Builder::makeFloat16Constant(float f16, bool specConstant)
1960
{
1961
Op opcode = specConstant ? Op::OpSpecConstant : Op::OpConstant;
1962
Id typeId = makeFloatType(16);
1963
1964
spvutils::HexFloat<spvutils::FloatProxy<float>> fVal(f16);
1965
spvutils::HexFloat<spvutils::FloatProxy<spvutils::Float16>> f16Val(0);
1966
fVal.castTo(f16Val, spvutils::kRoundToZero);
1967
1968
unsigned value = f16Val.value().getAsFloat().get_value();
1969
1970
// See if we already made it. Applies only to regular constants, because specialization constants
1971
// must remain distinct for the purpose of applying a SpecId decoration.
1972
if (!specConstant) {
1973
Id existing = findScalarConstant(Op::OpTypeFloat, opcode, typeId, value);
1974
if (existing)
1975
return existing;
1976
}
1977
1978
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
1979
c->addImmediateOperand(value);
1980
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
1981
module.mapInstruction(c);
1982
1983
Id resultId = c->getResultId();
1984
if (!specConstant) {
1985
ScalarConstantKey key{ enumCast(Op::OpTypeFloat), enumCast(opcode), typeId, value, 0 };
1986
groupedScalarConstantResultIDs[key] = resultId;
1987
}
1988
return resultId;
1989
}
1990
1991
Id Builder::makeBFloat16Constant(float bf16, bool specConstant)
1992
{
1993
Op opcode = specConstant ? Op::OpSpecConstant : Op::OpConstant;
1994
Id typeId = makeBFloat16Type();
1995
1996
union {
1997
float f;
1998
uint32_t u;
1999
} un;
2000
un.f = bf16;
2001
2002
// take high 16b of fp32 value. This is effectively round-to-zero, other than certain NaNs.
2003
unsigned value = un.u >> 16;
2004
2005
// See if we already made it. Applies only to regular constants, because specialization constants
2006
// must remain distinct for the purpose of applying a SpecId decoration.
2007
if (!specConstant) {
2008
Id existing = findScalarConstant(Op::OpTypeFloat, opcode, typeId, value);
2009
if (existing)
2010
return existing;
2011
}
2012
2013
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
2014
c->addImmediateOperand(value);
2015
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
2016
module.mapInstruction(c);
2017
2018
Id resultId = c->getResultId();
2019
if (!specConstant) {
2020
ScalarConstantKey key{ enumCast(Op::OpTypeFloat), enumCast(opcode), typeId, value, 0 };
2021
groupedScalarConstantResultIDs[key] = resultId;
2022
}
2023
return resultId;
2024
}
2025
2026
Id Builder::makeFloatE5M2Constant(float fe5m2, bool specConstant)
2027
{
2028
Op opcode = specConstant ? Op::OpSpecConstant : Op::OpConstant;
2029
Id typeId = makeFloatE5M2Type();
2030
2031
spvutils::HexFloat<spvutils::FloatProxy<float>> fVal(fe5m2);
2032
spvutils::HexFloat<spvutils::FloatProxy<spvutils::FloatE5M2>> fe5m2Val(0);
2033
fVal.castTo(fe5m2Val, spvutils::kRoundToZero);
2034
2035
unsigned value = fe5m2Val.value().getAsFloat().get_value();
2036
2037
// See if we already made it. Applies only to regular constants, because specialization constants
2038
// must remain distinct for the purpose of applying a SpecId decoration.
2039
if (!specConstant) {
2040
Id existing = findScalarConstant(Op::OpTypeFloat, opcode, typeId, value);
2041
if (existing)
2042
return existing;
2043
}
2044
2045
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
2046
c->addImmediateOperand(value);
2047
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
2048
module.mapInstruction(c);
2049
2050
Id resultId = c->getResultId();
2051
if (!specConstant) {
2052
ScalarConstantKey key{enumCast(Op::OpTypeFloat), enumCast(opcode), typeId, value, 0};
2053
groupedScalarConstantResultIDs[key] = resultId;
2054
}
2055
return resultId;
2056
}
2057
2058
Id Builder::makeFloatE4M3Constant(float fe4m3, bool specConstant)
2059
{
2060
Op opcode = specConstant ? Op::OpSpecConstant : Op::OpConstant;
2061
Id typeId = makeFloatE4M3Type();
2062
2063
spvutils::HexFloat<spvutils::FloatProxy<float>> fVal(fe4m3);
2064
spvutils::HexFloat<spvutils::FloatProxy<spvutils::FloatE4M3>> fe4m3Val(0);
2065
fVal.castTo(fe4m3Val, spvutils::kRoundToZero);
2066
2067
unsigned value = fe4m3Val.value().getAsFloat().get_value();
2068
2069
// See if we already made it. Applies only to regular constants, because specialization constants
2070
// must remain distinct for the purpose of applying a SpecId decoration.
2071
if (!specConstant) {
2072
Id existing = findScalarConstant(Op::OpTypeFloat, opcode, typeId, value);
2073
if (existing)
2074
return existing;
2075
}
2076
2077
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
2078
c->addImmediateOperand(value);
2079
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
2080
module.mapInstruction(c);
2081
2082
Id resultId = c->getResultId();
2083
if (!specConstant) {
2084
ScalarConstantKey key{enumCast(Op::OpTypeFloat), enumCast(opcode), typeId, value, 0};
2085
groupedScalarConstantResultIDs[key] = resultId;
2086
}
2087
return resultId;
2088
}
2089
2090
Id Builder::makeFpConstant(Id type, double d, bool specConstant)
2091
{
2092
const int width = getScalarTypeWidth(type);
2093
2094
assert(isFloatType(type));
2095
2096
switch (width) {
2097
case 16:
2098
return makeFloat16Constant((float)d, specConstant);
2099
case 32:
2100
return makeFloatConstant((float)d, specConstant);
2101
case 64:
2102
return makeDoubleConstant(d, specConstant);
2103
default:
2104
break;
2105
}
2106
2107
assert(false);
2108
return NoResult;
2109
}
2110
2111
Id Builder::importNonSemanticShaderDebugInfoInstructions()
2112
{
2113
assert(emitNonSemanticShaderDebugInfo == true);
2114
2115
if(nonSemanticShaderDebugInfo == 0)
2116
{
2117
this->addExtension(spv::E_SPV_KHR_non_semantic_info);
2118
nonSemanticShaderDebugInfo = this->import("NonSemantic.Shader.DebugInfo.100");
2119
}
2120
2121
return nonSemanticShaderDebugInfo;
2122
}
2123
2124
Id Builder::findCompositeConstant(Op typeClass, Op opcode, Id typeId, const std::vector<Id>& comps, size_t numMembers)
2125
{
2126
Instruction* constant = nullptr;
2127
bool found = false;
2128
for (int i = 0; i < (int)groupedCompositeConstants[enumCast(typeClass)].size(); ++i) {
2129
constant = groupedCompositeConstants[enumCast(typeClass)][i];
2130
2131
if (constant->getTypeId() != typeId)
2132
continue;
2133
2134
if (constant->getOpCode() != opcode) {
2135
continue;
2136
}
2137
2138
if (constant->getNumOperands() != (int)numMembers)
2139
continue;
2140
2141
// same contents?
2142
bool mismatch = false;
2143
for (int op = 0; op < constant->getNumOperands(); ++op) {
2144
if (constant->getIdOperand(op) != comps[op]) {
2145
mismatch = true;
2146
break;
2147
}
2148
}
2149
if (! mismatch) {
2150
found = true;
2151
break;
2152
}
2153
}
2154
2155
return found ? constant->getResultId() : NoResult;
2156
}
2157
2158
Id Builder::findStructConstant(Id typeId, const std::vector<Id>& comps)
2159
{
2160
Instruction* constant = nullptr;
2161
bool found = false;
2162
for (int i = 0; i < (int)groupedStructConstants[typeId].size(); ++i) {
2163
constant = groupedStructConstants[typeId][i];
2164
2165
// same contents?
2166
bool mismatch = false;
2167
for (int op = 0; op < constant->getNumOperands(); ++op) {
2168
if (constant->getIdOperand(op) != comps[op]) {
2169
mismatch = true;
2170
break;
2171
}
2172
}
2173
if (! mismatch) {
2174
found = true;
2175
break;
2176
}
2177
}
2178
2179
return found ? constant->getResultId() : NoResult;
2180
}
2181
2182
// Comments in header
2183
Id Builder::makeCompositeConstant(Id typeId, const std::vector<Id>& members, bool specConstant)
2184
{
2185
assert(typeId);
2186
Op typeClass = getTypeClass(typeId);
2187
2188
bool replicate = false;
2189
size_t numMembers = members.size();
2190
if (useReplicatedComposites || typeClass == Op::OpTypeCooperativeVectorNV) {
2191
// use replicate if all members are the same
2192
replicate = numMembers > 0 &&
2193
std::equal(members.begin() + 1, members.end(), members.begin());
2194
2195
if (replicate) {
2196
numMembers = 1;
2197
addCapability(spv::Capability::ReplicatedCompositesEXT);
2198
addExtension(spv::E_SPV_EXT_replicated_composites);
2199
}
2200
}
2201
2202
Op opcode = replicate ?
2203
(specConstant ? Op::OpSpecConstantCompositeReplicateEXT : Op::OpConstantCompositeReplicateEXT) :
2204
(specConstant ? Op::OpSpecConstantComposite : Op::OpConstantComposite);
2205
2206
switch (typeClass) {
2207
case Op::OpTypeVector:
2208
case Op::OpTypeArray:
2209
case Op::OpTypeMatrix:
2210
case Op::OpTypeCooperativeMatrixKHR:
2211
case Op::OpTypeCooperativeMatrixNV:
2212
case Op::OpTypeCooperativeVectorNV:
2213
if (! specConstant) {
2214
Id existing = findCompositeConstant(typeClass, opcode, typeId, members, numMembers);
2215
if (existing)
2216
return existing;
2217
}
2218
break;
2219
case Op::OpTypeStruct:
2220
if (! specConstant) {
2221
Id existing = findStructConstant(typeId, members);
2222
if (existing)
2223
return existing;
2224
}
2225
break;
2226
default:
2227
assert(0);
2228
return makeFloatConstant(0.0);
2229
}
2230
2231
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
2232
c->reserveOperands(members.size());
2233
for (size_t op = 0; op < numMembers; ++op)
2234
c->addIdOperand(members[op]);
2235
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
2236
if (typeClass == Op::OpTypeStruct)
2237
groupedStructConstants[typeId].push_back(c);
2238
else
2239
groupedCompositeConstants[enumCast(typeClass)].push_back(c);
2240
module.mapInstruction(c);
2241
2242
return c->getResultId();
2243
}
2244
2245
Instruction* Builder::addEntryPoint(ExecutionModel model, Function* function, const char* name)
2246
{
2247
Instruction* entryPoint = new Instruction(Op::OpEntryPoint);
2248
entryPoint->reserveOperands(3);
2249
entryPoint->addImmediateOperand(model);
2250
entryPoint->addIdOperand(function->getId());
2251
entryPoint->addStringOperand(name);
2252
2253
entryPoints.push_back(std::unique_ptr<Instruction>(entryPoint));
2254
2255
return entryPoint;
2256
}
2257
2258
// Currently relying on the fact that all 'value' of interest are small non-negative values.
2259
void Builder::addExecutionMode(Function* entryPoint, ExecutionMode mode, int value1, int value2, int value3)
2260
{
2261
// entryPoint can be null if we are in compile-only mode
2262
if (!entryPoint)
2263
return;
2264
2265
Instruction* instr = new Instruction(Op::OpExecutionMode);
2266
instr->reserveOperands(3);
2267
instr->addIdOperand(entryPoint->getId());
2268
instr->addImmediateOperand(mode);
2269
if (value1 >= 0)
2270
instr->addImmediateOperand(value1);
2271
if (value2 >= 0)
2272
instr->addImmediateOperand(value2);
2273
if (value3 >= 0)
2274
instr->addImmediateOperand(value3);
2275
2276
executionModes.push_back(std::unique_ptr<Instruction>(instr));
2277
}
2278
2279
void Builder::addExecutionMode(Function* entryPoint, ExecutionMode mode, const std::vector<unsigned>& literals)
2280
{
2281
// entryPoint can be null if we are in compile-only mode
2282
if (!entryPoint)
2283
return;
2284
2285
Instruction* instr = new Instruction(Op::OpExecutionMode);
2286
instr->reserveOperands(literals.size() + 2);
2287
instr->addIdOperand(entryPoint->getId());
2288
instr->addImmediateOperand(mode);
2289
for (auto literal : literals)
2290
instr->addImmediateOperand(literal);
2291
2292
executionModes.push_back(std::unique_ptr<Instruction>(instr));
2293
}
2294
2295
void Builder::addExecutionModeId(Function* entryPoint, ExecutionMode mode, const std::vector<Id>& operandIds)
2296
{
2297
// entryPoint can be null if we are in compile-only mode
2298
if (!entryPoint)
2299
return;
2300
2301
Instruction* instr = new Instruction(Op::OpExecutionModeId);
2302
instr->reserveOperands(operandIds.size() + 2);
2303
instr->addIdOperand(entryPoint->getId());
2304
instr->addImmediateOperand(mode);
2305
for (auto operandId : operandIds)
2306
instr->addIdOperand(operandId);
2307
2308
executionModes.push_back(std::unique_ptr<Instruction>(instr));
2309
}
2310
2311
void Builder::addName(Id id, const char* string)
2312
{
2313
Instruction* name = new Instruction(Op::OpName);
2314
name->reserveOperands(2);
2315
name->addIdOperand(id);
2316
name->addStringOperand(string);
2317
2318
names.push_back(std::unique_ptr<Instruction>(name));
2319
}
2320
2321
void Builder::addMemberName(Id id, int memberNumber, const char* string)
2322
{
2323
Instruction* name = new Instruction(Op::OpMemberName);
2324
name->reserveOperands(3);
2325
name->addIdOperand(id);
2326
name->addImmediateOperand(memberNumber);
2327
name->addStringOperand(string);
2328
2329
names.push_back(std::unique_ptr<Instruction>(name));
2330
}
2331
2332
void Builder::addDecoration(Id id, Decoration decoration, int num)
2333
{
2334
if (decoration == spv::Decoration::Max)
2335
return;
2336
2337
Instruction* dec = new Instruction(Op::OpDecorate);
2338
dec->reserveOperands(2);
2339
dec->addIdOperand(id);
2340
dec->addImmediateOperand(decoration);
2341
if (num >= 0)
2342
dec->addImmediateOperand(num);
2343
2344
decorations.insert(std::unique_ptr<Instruction>(dec));
2345
}
2346
2347
void Builder::addDecoration(Id id, Decoration decoration, const char* s)
2348
{
2349
if (decoration == spv::Decoration::Max)
2350
return;
2351
2352
Instruction* dec = new Instruction(Op::OpDecorateString);
2353
dec->reserveOperands(3);
2354
dec->addIdOperand(id);
2355
dec->addImmediateOperand(decoration);
2356
dec->addStringOperand(s);
2357
2358
decorations.insert(std::unique_ptr<Instruction>(dec));
2359
}
2360
2361
void Builder::addDecoration(Id id, Decoration decoration, const std::vector<unsigned>& literals)
2362
{
2363
if (decoration == spv::Decoration::Max)
2364
return;
2365
2366
Instruction* dec = new Instruction(Op::OpDecorate);
2367
dec->reserveOperands(literals.size() + 2);
2368
dec->addIdOperand(id);
2369
dec->addImmediateOperand(decoration);
2370
for (auto literal : literals)
2371
dec->addImmediateOperand(literal);
2372
2373
decorations.insert(std::unique_ptr<Instruction>(dec));
2374
}
2375
2376
void Builder::addDecoration(Id id, Decoration decoration, const std::vector<const char*>& strings)
2377
{
2378
if (decoration == spv::Decoration::Max)
2379
return;
2380
2381
Instruction* dec = new Instruction(Op::OpDecorateString);
2382
dec->reserveOperands(strings.size() + 2);
2383
dec->addIdOperand(id);
2384
dec->addImmediateOperand(decoration);
2385
for (auto string : strings)
2386
dec->addStringOperand(string);
2387
2388
decorations.insert(std::unique_ptr<Instruction>(dec));
2389
}
2390
2391
void Builder::addLinkageDecoration(Id id, const char* name, spv::LinkageType linkType) {
2392
Instruction* dec = new Instruction(Op::OpDecorate);
2393
dec->reserveOperands(4);
2394
dec->addIdOperand(id);
2395
dec->addImmediateOperand(spv::Decoration::LinkageAttributes);
2396
dec->addStringOperand(name);
2397
dec->addImmediateOperand(linkType);
2398
2399
decorations.insert(std::unique_ptr<Instruction>(dec));
2400
}
2401
2402
void Builder::addDecorationId(Id id, Decoration decoration, Id idDecoration)
2403
{
2404
if (decoration == spv::Decoration::Max)
2405
return;
2406
2407
Instruction* dec = new Instruction(Op::OpDecorateId);
2408
dec->reserveOperands(3);
2409
dec->addIdOperand(id);
2410
dec->addImmediateOperand(decoration);
2411
dec->addIdOperand(idDecoration);
2412
2413
decorations.insert(std::unique_ptr<Instruction>(dec));
2414
}
2415
2416
void Builder::addDecorationId(Id id, Decoration decoration, const std::vector<Id>& operandIds)
2417
{
2418
if(decoration == spv::Decoration::Max)
2419
return;
2420
2421
Instruction* dec = new Instruction(Op::OpDecorateId);
2422
dec->reserveOperands(operandIds.size() + 2);
2423
dec->addIdOperand(id);
2424
dec->addImmediateOperand(decoration);
2425
2426
for (auto operandId : operandIds)
2427
dec->addIdOperand(operandId);
2428
2429
decorations.insert(std::unique_ptr<Instruction>(dec));
2430
}
2431
2432
void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, int num)
2433
{
2434
if (decoration == spv::Decoration::Max)
2435
return;
2436
2437
Instruction* dec = new Instruction(Op::OpMemberDecorate);
2438
dec->reserveOperands(3);
2439
dec->addIdOperand(id);
2440
dec->addImmediateOperand(member);
2441
dec->addImmediateOperand(decoration);
2442
if (num >= 0)
2443
dec->addImmediateOperand(num);
2444
2445
decorations.insert(std::unique_ptr<Instruction>(dec));
2446
}
2447
2448
void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, const char *s)
2449
{
2450
if (decoration == spv::Decoration::Max)
2451
return;
2452
2453
Instruction* dec = new Instruction(Op::OpMemberDecorateStringGOOGLE);
2454
dec->reserveOperands(4);
2455
dec->addIdOperand(id);
2456
dec->addImmediateOperand(member);
2457
dec->addImmediateOperand(decoration);
2458
dec->addStringOperand(s);
2459
2460
decorations.insert(std::unique_ptr<Instruction>(dec));
2461
}
2462
2463
void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, const std::vector<unsigned>& literals)
2464
{
2465
if (decoration == spv::Decoration::Max)
2466
return;
2467
2468
Instruction* dec = new Instruction(Op::OpMemberDecorate);
2469
dec->reserveOperands(literals.size() + 3);
2470
dec->addIdOperand(id);
2471
dec->addImmediateOperand(member);
2472
dec->addImmediateOperand(decoration);
2473
for (auto literal : literals)
2474
dec->addImmediateOperand(literal);
2475
2476
decorations.insert(std::unique_ptr<Instruction>(dec));
2477
}
2478
2479
void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, const std::vector<const char*>& strings)
2480
{
2481
if (decoration == spv::Decoration::Max)
2482
return;
2483
2484
Instruction* dec = new Instruction(Op::OpMemberDecorateString);
2485
dec->reserveOperands(strings.size() + 3);
2486
dec->addIdOperand(id);
2487
dec->addImmediateOperand(member);
2488
dec->addImmediateOperand(decoration);
2489
for (auto string : strings)
2490
dec->addStringOperand(string);
2491
2492
decorations.insert(std::unique_ptr<Instruction>(dec));
2493
}
2494
2495
void Builder::addInstruction(std::unique_ptr<Instruction> inst) {
2496
// Phis must appear first in their block, don't insert line tracking instructions
2497
// in front of them, just add the OpPhi and return.
2498
if (inst->getOpCode() == Op::OpPhi) {
2499
buildPoint->addInstruction(std::move(inst));
2500
return;
2501
}
2502
// Optionally insert OpDebugScope
2503
if (emitNonSemanticShaderDebugInfo && dirtyScopeTracker) {
2504
if (buildPoint->updateDebugScope(currentDebugScopeId.top())) {
2505
auto scopeInst = std::make_unique<Instruction>(getUniqueId(), makeVoidType(), Op::OpExtInst);
2506
scopeInst->reserveOperands(3);
2507
scopeInst->addIdOperand(nonSemanticShaderDebugInfo);
2508
scopeInst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugScope);
2509
scopeInst->addIdOperand(currentDebugScopeId.top());
2510
buildPoint->addInstruction(std::move(scopeInst));
2511
}
2512
2513
dirtyScopeTracker = false;
2514
}
2515
2516
// Insert OpLine/OpDebugLine if the debug source location has changed
2517
if (trackDebugInfo && dirtyLineTracker) {
2518
if (buildPoint->updateDebugSourceLocation(currentLine, 0, currentFileId)) {
2519
if (emitSpirvDebugInfo) {
2520
auto lineInst = std::make_unique<Instruction>(Op::OpLine);
2521
lineInst->reserveOperands(3);
2522
lineInst->addIdOperand(currentFileId);
2523
lineInst->addImmediateOperand(currentLine);
2524
lineInst->addImmediateOperand(0);
2525
buildPoint->addInstruction(std::move(lineInst));
2526
}
2527
if (emitNonSemanticShaderDebugInfo) {
2528
auto lineInst = std::make_unique<Instruction>(getUniqueId(), makeVoidType(), Op::OpExtInst);
2529
lineInst->reserveOperands(7);
2530
lineInst->addIdOperand(nonSemanticShaderDebugInfo);
2531
lineInst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugLine);
2532
lineInst->addIdOperand(makeDebugSource(currentFileId));
2533
lineInst->addIdOperand(makeUintConstant(currentLine));
2534
lineInst->addIdOperand(makeUintConstant(currentLine));
2535
lineInst->addIdOperand(makeUintConstant(0));
2536
lineInst->addIdOperand(makeUintConstant(0));
2537
buildPoint->addInstruction(std::move(lineInst));
2538
}
2539
}
2540
2541
dirtyLineTracker = false;
2542
}
2543
2544
buildPoint->addInstruction(std::move(inst));
2545
}
2546
2547
void Builder::addInstructionNoDebugInfo(std::unique_ptr<Instruction> inst) {
2548
buildPoint->addInstruction(std::move(inst));
2549
}
2550
2551
// Comments in header
2552
Function* Builder::makeEntryPoint(const char* entryPoint)
2553
{
2554
assert(! entryPointFunction);
2555
2556
auto const returnType = makeVoidType();
2557
2558
restoreNonSemanticShaderDebugInfo = emitNonSemanticShaderDebugInfo;
2559
if(sourceLang == spv::SourceLanguage::HLSL) {
2560
emitNonSemanticShaderDebugInfo = false;
2561
}
2562
2563
Block* entry = nullptr;
2564
entryPointFunction = makeFunctionEntry(NoPrecision, returnType, entryPoint, LinkageType::Max, {}, {}, &entry);
2565
2566
emitNonSemanticShaderDebugInfo = restoreNonSemanticShaderDebugInfo;
2567
2568
return entryPointFunction;
2569
}
2570
2571
// Comments in header
2572
Function* Builder::makeFunctionEntry(Decoration precision, Id returnType, const char* name, LinkageType linkType,
2573
const std::vector<Id>& paramTypes,
2574
const std::vector<std::vector<Decoration>>& decorations, Block** entry)
2575
{
2576
// Make the function and initial instructions in it
2577
Id typeId = makeFunctionType(returnType, paramTypes);
2578
Id firstParamId = paramTypes.size() == 0 ? 0 : getUniqueIds((int)paramTypes.size());
2579
Id funcId = getUniqueId();
2580
Function* function = new Function(funcId, returnType, typeId, firstParamId, linkType, name, module);
2581
2582
// Set up the precisions
2583
setPrecision(function->getId(), precision);
2584
function->setReturnPrecision(precision);
2585
for (unsigned p = 0; p < (unsigned)decorations.size(); ++p) {
2586
for (int d = 0; d < (int)decorations[p].size(); ++d) {
2587
addDecoration(firstParamId + p, decorations[p][d]);
2588
function->addParamPrecision(p, decorations[p][d]);
2589
}
2590
}
2591
2592
// reset last debug scope
2593
if (emitNonSemanticShaderDebugInfo) {
2594
dirtyScopeTracker = true;
2595
}
2596
2597
// CFG
2598
assert(entry != nullptr);
2599
*entry = new Block(getUniqueId(), *function);
2600
function->addBlock(*entry);
2601
setBuildPoint(*entry);
2602
2603
if (name)
2604
addName(function->getId(), name);
2605
2606
functions.push_back(std::unique_ptr<Function>(function));
2607
2608
return function;
2609
}
2610
2611
void Builder::setupFunctionDebugInfo(Function* function, const char* name, const std::vector<Id>& paramTypes,
2612
const std::vector<char const*>& paramNames)
2613
{
2614
2615
if (!emitNonSemanticShaderDebugInfo)
2616
return;
2617
2618
Id nameId = getStringId(unmangleFunctionName(name));
2619
Id funcTypeId = function->getFuncTypeId();
2620
assert(getDebugType(funcTypeId) != NoType);
2621
Id funcId = function->getId();
2622
2623
assert(funcId != 0);
2624
2625
// Make the debug function instruction
2626
Id debugFuncId = makeDebugFunction(function, nameId, funcTypeId);
2627
debugFuncIdLookup[funcId] = debugFuncId;
2628
currentDebugScopeId.push(debugFuncId);
2629
2630
// DebugScope and DebugLine for parameter DebugDeclares
2631
assert(paramTypes.size() == paramNames.size());
2632
if ((int)paramTypes.size() > 0) {
2633
Id firstParamId = function->getParamId(0);
2634
2635
for (size_t p = 0; p < paramTypes.size(); ++p) {
2636
bool passByRef = false;
2637
Id paramTypeId = paramTypes[p];
2638
2639
// For pointer-typed parameters, they are actually passed by reference and we need unwrap the pointer to get the actual parameter type.
2640
if (isPointerType(paramTypeId) || isArrayType(paramTypeId)) {
2641
passByRef = true;
2642
paramTypeId = getContainedTypeId(paramTypeId);
2643
}
2644
2645
auto const& paramName = paramNames[p];
2646
auto const debugLocalVariableId = createDebugLocalVariable(getDebugType(paramTypeId), paramName, p + 1);
2647
auto const paramId = static_cast<Id>(firstParamId + p);
2648
2649
if (passByRef) {
2650
makeDebugDeclare(debugLocalVariableId, paramId);
2651
} else {
2652
makeDebugValue(debugLocalVariableId, paramId);
2653
}
2654
}
2655
}
2656
2657
// Clear debug scope stack
2658
if (emitNonSemanticShaderDebugInfo)
2659
currentDebugScopeId.pop();
2660
}
2661
2662
Id Builder::makeDebugFunction([[maybe_unused]] Function* function, Id nameId, Id funcTypeId)
2663
{
2664
assert(function != nullptr);
2665
assert(nameId != 0);
2666
assert(funcTypeId != 0);
2667
assert(getDebugType(funcTypeId) != NoType);
2668
2669
Id funcId = getUniqueId();
2670
auto type = new Instruction(funcId, makeVoidType(), Op::OpExtInst);
2671
type->reserveOperands(11);
2672
type->addIdOperand(nonSemanticShaderDebugInfo);
2673
type->addImmediateOperand(NonSemanticShaderDebugInfo100DebugFunction);
2674
type->addIdOperand(nameId);
2675
type->addIdOperand(getDebugType(funcTypeId));
2676
type->addIdOperand(makeDebugSource(currentFileId)); // TODO: This points to file of definition instead of declaration
2677
type->addIdOperand(makeUintConstant(currentLine)); // TODO: This points to line of definition instead of declaration
2678
type->addIdOperand(makeUintConstant(0)); // column
2679
type->addIdOperand(makeDebugCompilationUnit()); // scope
2680
type->addIdOperand(nameId); // linkage name
2681
type->addIdOperand(makeUintConstant(NonSemanticShaderDebugInfo100FlagIsPublic));
2682
type->addIdOperand(makeUintConstant(currentLine));
2683
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
2684
module.mapInstruction(type);
2685
return funcId;
2686
}
2687
2688
Id Builder::makeDebugLexicalBlock(uint32_t line, uint32_t column) {
2689
assert(!currentDebugScopeId.empty());
2690
2691
Id lexId = getUniqueId();
2692
auto lex = new Instruction(lexId, makeVoidType(), Op::OpExtInst);
2693
lex->reserveOperands(6);
2694
lex->addIdOperand(nonSemanticShaderDebugInfo);
2695
lex->addImmediateOperand(NonSemanticShaderDebugInfo100DebugLexicalBlock);
2696
lex->addIdOperand(makeDebugSource(currentFileId));
2697
lex->addIdOperand(makeUintConstant(line));
2698
lex->addIdOperand(makeUintConstant(column)); // column
2699
lex->addIdOperand(currentDebugScopeId.top()); // scope
2700
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(lex));
2701
module.mapInstruction(lex);
2702
return lexId;
2703
}
2704
2705
std::string Builder::unmangleFunctionName(std::string const& name) const
2706
{
2707
assert(name.length() > 0);
2708
2709
if(name.rfind('(') != std::string::npos) {
2710
return name.substr(0, name.rfind('('));
2711
} else {
2712
return name;
2713
}
2714
}
2715
2716
// Comments in header
2717
void Builder::makeReturn(bool implicit, Id retVal)
2718
{
2719
if (retVal) {
2720
Instruction* inst = new Instruction(NoResult, NoType, Op::OpReturnValue);
2721
inst->addIdOperand(retVal);
2722
addInstruction(std::unique_ptr<Instruction>(inst));
2723
} else
2724
addInstruction(std::unique_ptr<Instruction>(new Instruction(NoResult, NoType, Op::OpReturn)));
2725
2726
if (! implicit)
2727
createAndSetNoPredecessorBlock("post-return");
2728
}
2729
2730
// Comments in header
2731
void Builder::enterLexicalBlock(uint32_t line, uint32_t column)
2732
{
2733
if (!emitNonSemanticShaderDebugInfo) {
2734
return;
2735
}
2736
2737
// Generate new lexical scope debug instruction
2738
Id lexId = makeDebugLexicalBlock(line, column);
2739
currentDebugScopeId.push(lexId);
2740
dirtyScopeTracker = true;
2741
}
2742
2743
// Comments in header
2744
void Builder::leaveLexicalBlock()
2745
{
2746
if (!emitNonSemanticShaderDebugInfo) {
2747
return;
2748
}
2749
2750
// Pop current scope from stack and clear current scope
2751
currentDebugScopeId.pop();
2752
dirtyScopeTracker = true;
2753
}
2754
2755
// Comments in header
2756
void Builder::enterFunction(Function const* function)
2757
{
2758
currentFunction = function;
2759
2760
// Save and disable debugInfo for HLSL entry point function. It is a wrapper
2761
// function with no user code in it.
2762
restoreNonSemanticShaderDebugInfo = emitNonSemanticShaderDebugInfo;
2763
if (sourceLang == spv::SourceLanguage::HLSL && function == entryPointFunction) {
2764
emitNonSemanticShaderDebugInfo = false;
2765
}
2766
2767
if (emitNonSemanticShaderDebugInfo) {
2768
// Initialize scope state
2769
Id funcId = function->getFuncId();
2770
Id debugFuncId = getDebugFunction(funcId);
2771
currentDebugScopeId.push(debugFuncId);
2772
// Create DebugFunctionDefinition
2773
spv::Id resultId = getUniqueId();
2774
Instruction* defInst = new Instruction(resultId, makeVoidType(), Op::OpExtInst);
2775
defInst->reserveOperands(4);
2776
defInst->addIdOperand(nonSemanticShaderDebugInfo);
2777
defInst->addImmediateOperand(NonSemanticShaderDebugInfo100DebugFunctionDefinition);
2778
defInst->addIdOperand(debugFuncId);
2779
defInst->addIdOperand(funcId);
2780
addInstruction(std::unique_ptr<Instruction>(defInst));
2781
}
2782
2783
if (auto linkType = function->getLinkType(); linkType != LinkageType::Max) {
2784
Id funcId = function->getFuncId();
2785
addCapability(Capability::Linkage);
2786
addLinkageDecoration(funcId, function->getExportName(), linkType);
2787
}
2788
}
2789
2790
// Comments in header
2791
void Builder::leaveFunction()
2792
{
2793
Block* block = buildPoint;
2794
Function& function = buildPoint->getParent();
2795
assert(block);
2796
2797
// If our function did not contain a return, add a return void now.
2798
if (! block->isTerminated()) {
2799
if (function.getReturnType() == makeVoidType())
2800
makeReturn(true);
2801
else {
2802
makeReturn(true, createUndefined(function.getReturnType()));
2803
}
2804
}
2805
2806
// Clear function scope from debug scope stack
2807
if (emitNonSemanticShaderDebugInfo)
2808
currentDebugScopeId.pop();
2809
2810
emitNonSemanticShaderDebugInfo = restoreNonSemanticShaderDebugInfo;
2811
2812
// Clear current function record
2813
currentFunction = nullptr;
2814
}
2815
2816
// Comments in header
2817
void Builder::makeStatementTerminator(spv::Op opcode, const char *name)
2818
{
2819
addInstruction(std::unique_ptr<Instruction>(new Instruction(opcode)));
2820
createAndSetNoPredecessorBlock(name);
2821
}
2822
2823
// Comments in header
2824
void Builder::makeStatementTerminator(spv::Op opcode, const std::vector<Id>& operands, const char* name)
2825
{
2826
// It's assumed that the terminator instruction is always of void return type
2827
// However in future if there is a need for non void return type, new helper
2828
// methods can be created.
2829
createNoResultOp(opcode, operands);
2830
createAndSetNoPredecessorBlock(name);
2831
}
2832
2833
void Builder::createConstVariable(Id type, const char* name, Id constant, bool isGlobal)
2834
{
2835
if (emitNonSemanticShaderDebugInfo) {
2836
Id debugType = getDebugType(type);
2837
if (isGlobal) {
2838
createDebugGlobalVariable(debugType, name, constant);
2839
}
2840
else {
2841
auto debugLocal = createDebugLocalVariable(debugType, name);
2842
makeDebugValue(debugLocal, constant);
2843
}
2844
}
2845
}
2846
2847
// Comments in header
2848
Id Builder::createVariable(Decoration precision, StorageClass storageClass, Id type, const char* name, Id initializer,
2849
bool const compilerGenerated)
2850
{
2851
Id pointerType = makePointer(storageClass, type);
2852
Instruction* inst = new Instruction(getUniqueId(), pointerType, Op::OpVariable);
2853
inst->addImmediateOperand(storageClass);
2854
if (initializer != NoResult)
2855
inst->addIdOperand(initializer);
2856
2857
if (storageClass == StorageClass::Function) {
2858
// Validation rules require the declaration in the entry block
2859
buildPoint->getParent().addLocalVariable(std::unique_ptr<Instruction>(inst));
2860
}
2861
else {
2862
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(inst));
2863
module.mapInstruction(inst);
2864
}
2865
2866
if (emitNonSemanticShaderDebugInfo && !compilerGenerated)
2867
{
2868
// For debug info, we prefer respecting how the variable is declared in source code.
2869
// We may emulate some local variables as global variable with private storage in SPIR-V, but we still want to
2870
// treat them as local variables in debug info.
2871
if (storageClass == StorageClass::Function || (currentFunction && storageClass == StorageClass::Private)) {
2872
auto const debugLocalVariableId = createDebugLocalVariable(getDebugType(type), name);
2873
makeDebugDeclare(debugLocalVariableId, inst->getResultId());
2874
}
2875
else {
2876
createDebugGlobalVariable(getDebugType(type), name, inst->getResultId());
2877
}
2878
}
2879
2880
if (name)
2881
addName(inst->getResultId(), name);
2882
setPrecision(inst->getResultId(), precision);
2883
2884
return inst->getResultId();
2885
}
2886
2887
// Comments in header
2888
Id Builder::createUndefined(Id type)
2889
{
2890
Instruction* inst = new Instruction(getUniqueId(), type, Op::OpUndef);
2891
addInstruction(std::unique_ptr<Instruction>(inst));
2892
return inst->getResultId();
2893
}
2894
2895
// av/vis/nonprivate are unnecessary and illegal for some storage classes.
2896
spv::MemoryAccessMask Builder::sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc)
2897
const
2898
{
2899
switch (sc) {
2900
case spv::StorageClass::Uniform:
2901
case spv::StorageClass::Workgroup:
2902
case spv::StorageClass::StorageBuffer:
2903
case spv::StorageClass::PhysicalStorageBufferEXT:
2904
break;
2905
default:
2906
memoryAccess = spv::MemoryAccessMask(memoryAccess &
2907
~(spv::MemoryAccessMask::MakePointerAvailableKHR |
2908
spv::MemoryAccessMask::MakePointerVisibleKHR |
2909
spv::MemoryAccessMask::NonPrivatePointerKHR));
2910
break;
2911
}
2912
return memoryAccess;
2913
}
2914
2915
// Comments in header
2916
void Builder::createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope,
2917
unsigned int alignment)
2918
{
2919
Instruction* store = new Instruction(Op::OpStore);
2920
store->reserveOperands(2);
2921
store->addIdOperand(lValue);
2922
store->addIdOperand(rValue);
2923
2924
memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue));
2925
2926
if (memoryAccess != MemoryAccessMask::MaskNone) {
2927
store->addImmediateOperand(memoryAccess);
2928
if (anySet(memoryAccess, spv::MemoryAccessMask::Aligned)) {
2929
store->addImmediateOperand(alignment);
2930
}
2931
if (anySet(memoryAccess, spv::MemoryAccessMask::MakePointerAvailableKHR)) {
2932
store->addIdOperand(makeUintConstant(scope));
2933
}
2934
}
2935
2936
addInstruction(std::unique_ptr<Instruction>(store));
2937
}
2938
2939
// Comments in header
2940
Id Builder::createLoad(Id lValue, spv::Decoration precision, spv::MemoryAccessMask memoryAccess,
2941
spv::Scope scope, unsigned int alignment)
2942
{
2943
Instruction* load = new Instruction(getUniqueId(), getDerefTypeId(lValue), Op::OpLoad);
2944
load->addIdOperand(lValue);
2945
2946
memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue));
2947
2948
if (memoryAccess != MemoryAccessMask::MaskNone) {
2949
load->addImmediateOperand(memoryAccess);
2950
if (anySet(memoryAccess, spv::MemoryAccessMask::Aligned)) {
2951
load->addImmediateOperand(alignment);
2952
}
2953
if (anySet(memoryAccess, spv::MemoryAccessMask::MakePointerVisibleKHR)) {
2954
load->addIdOperand(makeUintConstant(scope));
2955
}
2956
}
2957
2958
addInstruction(std::unique_ptr<Instruction>(load));
2959
setPrecision(load->getResultId(), precision);
2960
2961
return load->getResultId();
2962
}
2963
2964
// Comments in header
2965
Id Builder::createAccessChain(StorageClass storageClass, Id base, const std::vector<Id>& offsets)
2966
{
2967
// Figure out the final resulting type.
2968
Id typeId = getResultingAccessChainType();
2969
typeId = makePointer(storageClass, typeId);
2970
2971
// Make the instruction
2972
Instruction* chain = new Instruction(getUniqueId(), typeId, Op::OpAccessChain);
2973
chain->reserveOperands(offsets.size() + 1);
2974
chain->addIdOperand(base);
2975
for (int i = 0; i < (int)offsets.size(); ++i)
2976
chain->addIdOperand(offsets[i]);
2977
addInstruction(std::unique_ptr<Instruction>(chain));
2978
2979
return chain->getResultId();
2980
}
2981
2982
Id Builder::createArrayLength(Id base, unsigned int member, unsigned int bits)
2983
{
2984
spv::Id intType = makeUintType(bits);
2985
Instruction* length = new Instruction(getUniqueId(), intType, Op::OpArrayLength);
2986
length->reserveOperands(2);
2987
length->addIdOperand(base);
2988
length->addImmediateOperand(member);
2989
addInstruction(std::unique_ptr<Instruction>(length));
2990
2991
return length->getResultId();
2992
}
2993
2994
Id Builder::createCooperativeMatrixLengthKHR(Id type)
2995
{
2996
spv::Id intType = makeUintType(32);
2997
2998
// Generate code for spec constants if in spec constant operation
2999
// generation mode.
3000
if (generatingOpCodeForSpecConst) {
3001
return createSpecConstantOp(Op::OpCooperativeMatrixLengthKHR, intType, std::vector<Id>(1, type), std::vector<Id>());
3002
}
3003
3004
Instruction* length = new Instruction(getUniqueId(), intType, Op::OpCooperativeMatrixLengthKHR);
3005
length->addIdOperand(type);
3006
addInstruction(std::unique_ptr<Instruction>(length));
3007
3008
return length->getResultId();
3009
}
3010
3011
Id Builder::createCooperativeMatrixLengthNV(Id type)
3012
{
3013
spv::Id intType = makeUintType(32);
3014
3015
// Generate code for spec constants if in spec constant operation
3016
// generation mode.
3017
if (generatingOpCodeForSpecConst) {
3018
return createSpecConstantOp(Op::OpCooperativeMatrixLengthNV, intType, std::vector<Id>(1, type), std::vector<Id>());
3019
}
3020
3021
Instruction* length = new Instruction(getUniqueId(), intType, Op::OpCooperativeMatrixLengthNV);
3022
length->addIdOperand(type);
3023
addInstruction(std::unique_ptr<Instruction>(length));
3024
3025
return length->getResultId();
3026
}
3027
3028
Id Builder::createCompositeExtract(Id composite, Id typeId, unsigned index)
3029
{
3030
// Generate code for spec constants if in spec constant operation
3031
// generation mode.
3032
if (generatingOpCodeForSpecConst) {
3033
return createSpecConstantOp(Op::OpCompositeExtract, typeId, std::vector<Id>(1, composite),
3034
std::vector<Id>(1, index));
3035
}
3036
Instruction* extract = new Instruction(getUniqueId(), typeId, Op::OpCompositeExtract);
3037
extract->reserveOperands(2);
3038
extract->addIdOperand(composite);
3039
extract->addImmediateOperand(index);
3040
addInstruction(std::unique_ptr<Instruction>(extract));
3041
3042
return extract->getResultId();
3043
}
3044
3045
Id Builder::createCompositeExtract(Id composite, Id typeId, const std::vector<unsigned>& indexes)
3046
{
3047
// Generate code for spec constants if in spec constant operation
3048
// generation mode.
3049
if (generatingOpCodeForSpecConst) {
3050
return createSpecConstantOp(Op::OpCompositeExtract, typeId, std::vector<Id>(1, composite), indexes);
3051
}
3052
Instruction* extract = new Instruction(getUniqueId(), typeId, Op::OpCompositeExtract);
3053
extract->reserveOperands(indexes.size() + 1);
3054
extract->addIdOperand(composite);
3055
for (int i = 0; i < (int)indexes.size(); ++i)
3056
extract->addImmediateOperand(indexes[i]);
3057
addInstruction(std::unique_ptr<Instruction>(extract));
3058
3059
return extract->getResultId();
3060
}
3061
3062
Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, unsigned index)
3063
{
3064
Instruction* insert = new Instruction(getUniqueId(), typeId, Op::OpCompositeInsert);
3065
insert->reserveOperands(3);
3066
insert->addIdOperand(object);
3067
insert->addIdOperand(composite);
3068
insert->addImmediateOperand(index);
3069
addInstruction(std::unique_ptr<Instruction>(insert));
3070
3071
return insert->getResultId();
3072
}
3073
3074
Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, const std::vector<unsigned>& indexes)
3075
{
3076
Instruction* insert = new Instruction(getUniqueId(), typeId, Op::OpCompositeInsert);
3077
insert->reserveOperands(indexes.size() + 2);
3078
insert->addIdOperand(object);
3079
insert->addIdOperand(composite);
3080
for (int i = 0; i < (int)indexes.size(); ++i)
3081
insert->addImmediateOperand(indexes[i]);
3082
addInstruction(std::unique_ptr<Instruction>(insert));
3083
3084
return insert->getResultId();
3085
}
3086
3087
Id Builder::createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex)
3088
{
3089
Instruction* extract = new Instruction(getUniqueId(), typeId, Op::OpVectorExtractDynamic);
3090
extract->reserveOperands(2);
3091
extract->addIdOperand(vector);
3092
extract->addIdOperand(componentIndex);
3093
addInstruction(std::unique_ptr<Instruction>(extract));
3094
3095
return extract->getResultId();
3096
}
3097
3098
Id Builder::createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex)
3099
{
3100
Instruction* insert = new Instruction(getUniqueId(), typeId, Op::OpVectorInsertDynamic);
3101
insert->reserveOperands(3);
3102
insert->addIdOperand(vector);
3103
insert->addIdOperand(component);
3104
insert->addIdOperand(componentIndex);
3105
addInstruction(std::unique_ptr<Instruction>(insert));
3106
3107
return insert->getResultId();
3108
}
3109
3110
// An opcode that has no operands, no result id, and no type
3111
void Builder::createNoResultOp(Op opCode)
3112
{
3113
Instruction* op = new Instruction(opCode);
3114
addInstruction(std::unique_ptr<Instruction>(op));
3115
}
3116
3117
// An opcode that has one id operand, no result id, and no type
3118
void Builder::createNoResultOp(Op opCode, Id operand)
3119
{
3120
Instruction* op = new Instruction(opCode);
3121
op->addIdOperand(operand);
3122
addInstruction(std::unique_ptr<Instruction>(op));
3123
}
3124
3125
// An opcode that has one or more operands, no result id, and no type
3126
void Builder::createNoResultOp(Op opCode, const std::vector<Id>& operands)
3127
{
3128
Instruction* op = new Instruction(opCode);
3129
op->reserveOperands(operands.size());
3130
for (auto id : operands) {
3131
op->addIdOperand(id);
3132
}
3133
addInstruction(std::unique_ptr<Instruction>(op));
3134
}
3135
3136
// An opcode that has multiple operands, no result id, and no type
3137
void Builder::createNoResultOp(Op opCode, const std::vector<IdImmediate>& operands)
3138
{
3139
Instruction* op = new Instruction(opCode);
3140
op->reserveOperands(operands.size());
3141
for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
3142
if (it->isId)
3143
op->addIdOperand(it->word);
3144
else
3145
op->addImmediateOperand(it->word);
3146
}
3147
addInstruction(std::unique_ptr<Instruction>(op));
3148
}
3149
3150
void Builder::createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask semantics)
3151
{
3152
Instruction* op = new Instruction(Op::OpControlBarrier);
3153
op->reserveOperands(3);
3154
op->addIdOperand(makeUintConstant(execution));
3155
op->addIdOperand(makeUintConstant(memory));
3156
op->addIdOperand(makeUintConstant(semantics));
3157
addInstruction(std::unique_ptr<Instruction>(op));
3158
}
3159
3160
void Builder::createMemoryBarrier(Scope executionScope, MemorySemanticsMask memorySemantics)
3161
{
3162
Instruction* op = new Instruction(Op::OpMemoryBarrier);
3163
op->reserveOperands(2);
3164
op->addIdOperand(makeUintConstant((unsigned)executionScope));
3165
op->addIdOperand(makeUintConstant((unsigned)memorySemantics));
3166
addInstruction(std::unique_ptr<Instruction>(op));
3167
}
3168
3169
// An opcode that has one operands, a result id, and a type
3170
Id Builder::createUnaryOp(Op opCode, Id typeId, Id operand)
3171
{
3172
// Generate code for spec constants if in spec constant operation
3173
// generation mode.
3174
if (generatingOpCodeForSpecConst) {
3175
return createSpecConstantOp(opCode, typeId, std::vector<Id>(1, operand), std::vector<Id>());
3176
}
3177
Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
3178
op->addIdOperand(operand);
3179
addInstruction(std::unique_ptr<Instruction>(op));
3180
3181
return op->getResultId();
3182
}
3183
3184
Id Builder::createBinOp(Op opCode, Id typeId, Id left, Id right)
3185
{
3186
// Generate code for spec constants if in spec constant operation
3187
// generation mode.
3188
if (generatingOpCodeForSpecConst) {
3189
std::vector<Id> operands(2);
3190
operands[0] = left; operands[1] = right;
3191
return createSpecConstantOp(opCode, typeId, operands, std::vector<Id>());
3192
}
3193
Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
3194
op->reserveOperands(2);
3195
op->addIdOperand(left);
3196
op->addIdOperand(right);
3197
addInstruction(std::unique_ptr<Instruction>(op));
3198
3199
return op->getResultId();
3200
}
3201
3202
Id Builder::createTriOp(Op opCode, Id typeId, Id op1, Id op2, Id op3)
3203
{
3204
// Generate code for spec constants if in spec constant operation
3205
// generation mode.
3206
if (generatingOpCodeForSpecConst) {
3207
std::vector<Id> operands(3);
3208
operands[0] = op1;
3209
operands[1] = op2;
3210
operands[2] = op3;
3211
return createSpecConstantOp(
3212
opCode, typeId, operands, std::vector<Id>());
3213
}
3214
Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
3215
op->reserveOperands(3);
3216
op->addIdOperand(op1);
3217
op->addIdOperand(op2);
3218
op->addIdOperand(op3);
3219
addInstruction(std::unique_ptr<Instruction>(op));
3220
3221
return op->getResultId();
3222
}
3223
3224
Id Builder::createOp(Op opCode, Id typeId, const std::vector<Id>& operands)
3225
{
3226
Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
3227
op->reserveOperands(operands.size());
3228
for (auto id : operands)
3229
op->addIdOperand(id);
3230
addInstruction(std::unique_ptr<Instruction>(op));
3231
3232
return op->getResultId();
3233
}
3234
3235
Id Builder::createOp(Op opCode, Id typeId, const std::vector<IdImmediate>& operands)
3236
{
3237
Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
3238
op->reserveOperands(operands.size());
3239
for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
3240
if (it->isId)
3241
op->addIdOperand(it->word);
3242
else
3243
op->addImmediateOperand(it->word);
3244
}
3245
addInstruction(std::unique_ptr<Instruction>(op));
3246
3247
return op->getResultId();
3248
}
3249
3250
Id Builder::createSpecConstantOp(Op opCode, Id typeId, const std::vector<Id>& operands,
3251
const std::vector<unsigned>& literals)
3252
{
3253
Instruction* op = new Instruction(getUniqueId(), typeId, Op::OpSpecConstantOp);
3254
op->reserveOperands(operands.size() + literals.size() + 1);
3255
op->addImmediateOperand((unsigned) opCode);
3256
for (auto it = operands.cbegin(); it != operands.cend(); ++it)
3257
op->addIdOperand(*it);
3258
for (auto it = literals.cbegin(); it != literals.cend(); ++it)
3259
op->addImmediateOperand(*it);
3260
module.mapInstruction(op);
3261
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(op));
3262
3263
// OpSpecConstantOp's using 8 or 16 bit types require the associated capability
3264
if (containsType(typeId, Op::OpTypeInt, 8))
3265
addCapability(Capability::Int8);
3266
if (containsType(typeId, Op::OpTypeInt, 16))
3267
addCapability(Capability::Int16);
3268
if (containsType(typeId, Op::OpTypeFloat, 16))
3269
addCapability(Capability::Float16);
3270
3271
return op->getResultId();
3272
}
3273
3274
Id Builder::createFunctionCall(spv::Function* function, const std::vector<spv::Id>& args)
3275
{
3276
Instruction* op = new Instruction(getUniqueId(), function->getReturnType(), Op::OpFunctionCall);
3277
op->reserveOperands(args.size() + 1);
3278
op->addIdOperand(function->getId());
3279
for (int a = 0; a < (int)args.size(); ++a)
3280
op->addIdOperand(args[a]);
3281
addInstruction(std::unique_ptr<Instruction>(op));
3282
3283
return op->getResultId();
3284
}
3285
3286
// Comments in header
3287
Id Builder::createRvalueSwizzle(Decoration precision, Id typeId, Id source, const std::vector<unsigned>& channels)
3288
{
3289
if (channels.size() == 1)
3290
return setPrecision(createCompositeExtract(source, typeId, channels.front()), precision);
3291
3292
if (generatingOpCodeForSpecConst) {
3293
std::vector<Id> operands(2);
3294
operands[0] = operands[1] = source;
3295
return setPrecision(createSpecConstantOp(Op::OpVectorShuffle, typeId, operands, channels), precision);
3296
}
3297
Instruction* swizzle = new Instruction(getUniqueId(), typeId, Op::OpVectorShuffle);
3298
assert(isVector(source));
3299
swizzle->reserveOperands(channels.size() + 2);
3300
swizzle->addIdOperand(source);
3301
swizzle->addIdOperand(source);
3302
for (int i = 0; i < (int)channels.size(); ++i)
3303
swizzle->addImmediateOperand(channels[i]);
3304
addInstruction(std::unique_ptr<Instruction>(swizzle));
3305
3306
return setPrecision(swizzle->getResultId(), precision);
3307
}
3308
3309
// Comments in header
3310
Id Builder::createLvalueSwizzle(Id typeId, Id target, Id source, const std::vector<unsigned>& channels)
3311
{
3312
if (channels.size() == 1 && getNumComponents(source) == 1)
3313
return createCompositeInsert(source, target, typeId, channels.front());
3314
3315
Instruction* swizzle = new Instruction(getUniqueId(), typeId, Op::OpVectorShuffle);
3316
3317
assert(isVector(target));
3318
swizzle->reserveOperands(2);
3319
swizzle->addIdOperand(target);
3320
3321
assert(getNumComponents(source) == channels.size());
3322
assert(isVector(source));
3323
swizzle->addIdOperand(source);
3324
3325
// Set up an identity shuffle from the base value to the result value
3326
unsigned int components[4];
3327
int numTargetComponents = getNumComponents(target);
3328
for (int i = 0; i < numTargetComponents; ++i)
3329
components[i] = i;
3330
3331
// Punch in the l-value swizzle
3332
for (int i = 0; i < (int)channels.size(); ++i)
3333
components[channels[i]] = numTargetComponents + i;
3334
3335
// finish the instruction with these components selectors
3336
swizzle->reserveOperands(numTargetComponents);
3337
for (int i = 0; i < numTargetComponents; ++i)
3338
swizzle->addImmediateOperand(components[i]);
3339
addInstruction(std::unique_ptr<Instruction>(swizzle));
3340
3341
return swizzle->getResultId();
3342
}
3343
3344
// Comments in header
3345
void Builder::promoteScalar(Decoration precision, Id& left, Id& right)
3346
{
3347
int direction = getNumComponents(right) - getNumComponents(left);
3348
3349
if (direction > 0)
3350
left = smearScalar(precision, left, makeVectorType(getTypeId(left), getNumComponents(right)));
3351
else if (direction < 0)
3352
right = smearScalar(precision, right, makeVectorType(getTypeId(right), getNumComponents(left)));
3353
3354
return;
3355
}
3356
3357
// Comments in header
3358
Id Builder::smearScalar(Decoration precision, Id scalar, Id vectorType)
3359
{
3360
assert(getNumComponents(scalar) == 1);
3361
assert(getTypeId(scalar) == getScalarTypeId(vectorType));
3362
3363
int numComponents = getNumTypeComponents(vectorType);
3364
if (numComponents == 1 && !isCooperativeVectorType(vectorType))
3365
return scalar;
3366
3367
Instruction* smear = nullptr;
3368
if (generatingOpCodeForSpecConst) {
3369
auto members = std::vector<spv::Id>(numComponents, scalar);
3370
// Sometime even in spec-constant-op mode, the temporary vector created by
3371
// promoting a scalar might not be a spec constant. This should depend on
3372
// the scalar.
3373
// e.g.:
3374
// const vec2 spec_const_result = a_spec_const_vec2 + a_front_end_const_scalar;
3375
// In such cases, the temporary vector created from a_front_end_const_scalar
3376
// is not a spec constant vector, even though the binary operation node is marked
3377
// as 'specConstant' and we are in spec-constant-op mode.
3378
auto result_id = makeCompositeConstant(vectorType, members, isSpecConstant(scalar));
3379
smear = module.getInstruction(result_id);
3380
} else {
3381
bool replicate = (useReplicatedComposites || isCooperativeVectorType(vectorType)) && (numComponents > 0);
3382
3383
if (replicate) {
3384
numComponents = 1;
3385
addCapability(spv::Capability::ReplicatedCompositesEXT);
3386
addExtension(spv::E_SPV_EXT_replicated_composites);
3387
}
3388
3389
Op opcode = replicate ? Op::OpCompositeConstructReplicateEXT : Op::OpCompositeConstruct;
3390
3391
smear = new Instruction(getUniqueId(), vectorType, opcode);
3392
smear->reserveOperands(numComponents);
3393
for (int c = 0; c < numComponents; ++c)
3394
smear->addIdOperand(scalar);
3395
addInstruction(std::unique_ptr<Instruction>(smear));
3396
}
3397
3398
return setPrecision(smear->getResultId(), precision);
3399
}
3400
3401
// Comments in header
3402
Id Builder::createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector<Id>& args)
3403
{
3404
Instruction* inst = new Instruction(getUniqueId(), resultType, Op::OpExtInst);
3405
inst->reserveOperands(args.size() + 2);
3406
inst->addIdOperand(builtins);
3407
inst->addImmediateOperand(entryPoint);
3408
for (int arg = 0; arg < (int)args.size(); ++arg)
3409
inst->addIdOperand(args[arg]);
3410
3411
addInstruction(std::unique_ptr<Instruction>(inst));
3412
3413
return inst->getResultId();
3414
}
3415
3416
// Accept all parameters needed to create a texture instruction.
3417
// Create the correct instruction based on the inputs, and make the call.
3418
Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather,
3419
bool noImplicitLod, const TextureParameters& parameters, ImageOperandsMask signExtensionMask)
3420
{
3421
std::vector<Id> texArgs;
3422
3423
//
3424
// Set up the fixed arguments
3425
//
3426
bool explicitLod = false;
3427
texArgs.push_back(parameters.sampler);
3428
texArgs.push_back(parameters.coords);
3429
if (parameters.Dref != NoResult)
3430
texArgs.push_back(parameters.Dref);
3431
if (parameters.component != NoResult)
3432
texArgs.push_back(parameters.component);
3433
3434
if (parameters.granularity != NoResult)
3435
texArgs.push_back(parameters.granularity);
3436
if (parameters.coarse != NoResult)
3437
texArgs.push_back(parameters.coarse);
3438
3439
//
3440
// Set up the optional arguments
3441
//
3442
size_t optArgNum = texArgs.size(); // the position of the mask for the optional arguments, if any.
3443
ImageOperandsMask mask = ImageOperandsMask::MaskNone; // the mask operand
3444
if (parameters.bias) {
3445
mask = (ImageOperandsMask)(mask | ImageOperandsMask::Bias);
3446
texArgs.push_back(parameters.bias);
3447
}
3448
if (parameters.lod) {
3449
mask = (ImageOperandsMask)(mask | ImageOperandsMask::Lod);
3450
texArgs.push_back(parameters.lod);
3451
explicitLod = true;
3452
} else if (parameters.gradX) {
3453
mask = (ImageOperandsMask)(mask | ImageOperandsMask::Grad);
3454
texArgs.push_back(parameters.gradX);
3455
texArgs.push_back(parameters.gradY);
3456
explicitLod = true;
3457
} else if (noImplicitLod && ! fetch && ! gather) {
3458
// have to explicitly use lod of 0 if not allowed to have them be implicit, and
3459
// we would otherwise be about to issue an implicit instruction
3460
mask = (ImageOperandsMask)(mask | ImageOperandsMask::Lod);
3461
texArgs.push_back(makeFloatConstant(0.0));
3462
explicitLod = true;
3463
}
3464
if (parameters.offset) {
3465
if (isConstant(parameters.offset))
3466
mask = (ImageOperandsMask)(mask | ImageOperandsMask::ConstOffset);
3467
else {
3468
addCapability(Capability::ImageGatherExtended);
3469
mask = (ImageOperandsMask)(mask | ImageOperandsMask::Offset);
3470
}
3471
texArgs.push_back(parameters.offset);
3472
}
3473
if (parameters.offsets) {
3474
if (!isConstant(parameters.offsets) && sourceLang == spv::SourceLanguage::GLSL) {
3475
mask = (ImageOperandsMask)(mask | ImageOperandsMask::Offsets);
3476
} else {
3477
addCapability(Capability::ImageGatherExtended);
3478
mask = (ImageOperandsMask)(mask | ImageOperandsMask::ConstOffsets);
3479
}
3480
texArgs.push_back(parameters.offsets);
3481
}
3482
if (parameters.sample) {
3483
mask = (ImageOperandsMask)(mask | ImageOperandsMask::Sample);
3484
texArgs.push_back(parameters.sample);
3485
}
3486
if (parameters.lodClamp) {
3487
// capability if this bit is used
3488
addCapability(Capability::MinLod);
3489
3490
mask = (ImageOperandsMask)(mask | ImageOperandsMask::MinLod);
3491
texArgs.push_back(parameters.lodClamp);
3492
}
3493
if (parameters.nonprivate) {
3494
mask = mask | ImageOperandsMask::NonPrivateTexelKHR;
3495
}
3496
if (parameters.volatil) {
3497
mask = mask | ImageOperandsMask::VolatileTexelKHR;
3498
}
3499
if (parameters.nontemporal) {
3500
mask = mask | ImageOperandsMask::Nontemporal;
3501
}
3502
mask = mask | signExtensionMask;
3503
// insert the operand for the mask, if any bits were set.
3504
if (mask != ImageOperandsMask::MaskNone)
3505
texArgs.insert(texArgs.begin() + optArgNum, (Id)mask);
3506
3507
//
3508
// Set up the instruction
3509
//
3510
Op opCode = Op::OpNop; // All paths below need to set this
3511
if (fetch) {
3512
if (sparse)
3513
opCode = Op::OpImageSparseFetch;
3514
else
3515
opCode = Op::OpImageFetch;
3516
} else if (parameters.granularity && parameters.coarse) {
3517
opCode = Op::OpImageSampleFootprintNV;
3518
} else if (gather) {
3519
if (parameters.Dref)
3520
if (sparse)
3521
opCode = Op::OpImageSparseDrefGather;
3522
else
3523
opCode = Op::OpImageDrefGather;
3524
else
3525
if (sparse)
3526
opCode = Op::OpImageSparseGather;
3527
else
3528
opCode = Op::OpImageGather;
3529
} else if (explicitLod) {
3530
if (parameters.Dref) {
3531
if (proj)
3532
if (sparse)
3533
opCode = Op::OpImageSparseSampleProjDrefExplicitLod;
3534
else
3535
opCode = Op::OpImageSampleProjDrefExplicitLod;
3536
else
3537
if (sparse)
3538
opCode = Op::OpImageSparseSampleDrefExplicitLod;
3539
else
3540
opCode = Op::OpImageSampleDrefExplicitLod;
3541
} else {
3542
if (proj)
3543
if (sparse)
3544
opCode = Op::OpImageSparseSampleProjExplicitLod;
3545
else
3546
opCode = Op::OpImageSampleProjExplicitLod;
3547
else
3548
if (sparse)
3549
opCode = Op::OpImageSparseSampleExplicitLod;
3550
else
3551
opCode = Op::OpImageSampleExplicitLod;
3552
}
3553
} else {
3554
if (parameters.Dref) {
3555
if (proj)
3556
if (sparse)
3557
opCode = Op::OpImageSparseSampleProjDrefImplicitLod;
3558
else
3559
opCode = Op::OpImageSampleProjDrefImplicitLod;
3560
else
3561
if (sparse)
3562
opCode = Op::OpImageSparseSampleDrefImplicitLod;
3563
else
3564
opCode = Op::OpImageSampleDrefImplicitLod;
3565
} else {
3566
if (proj)
3567
if (sparse)
3568
opCode = Op::OpImageSparseSampleProjImplicitLod;
3569
else
3570
opCode = Op::OpImageSampleProjImplicitLod;
3571
else
3572
if (sparse)
3573
opCode = Op::OpImageSparseSampleImplicitLod;
3574
else
3575
opCode = Op::OpImageSampleImplicitLod;
3576
}
3577
}
3578
3579
// See if the result type is expecting a smeared result.
3580
// This happens when a legacy shadow*() call is made, which
3581
// gets a vec4 back instead of a float.
3582
Id smearedType = resultType;
3583
if (! isScalarType(resultType)) {
3584
switch (opCode) {
3585
case Op::OpImageSampleDrefImplicitLod:
3586
case Op::OpImageSampleDrefExplicitLod:
3587
case Op::OpImageSampleProjDrefImplicitLod:
3588
case Op::OpImageSampleProjDrefExplicitLod:
3589
resultType = getScalarTypeId(resultType);
3590
break;
3591
default:
3592
break;
3593
}
3594
}
3595
3596
Id typeId0 = 0;
3597
Id typeId1 = 0;
3598
3599
if (sparse) {
3600
typeId0 = resultType;
3601
typeId1 = getDerefTypeId(parameters.texelOut);
3602
resultType = makeStructResultType(typeId0, typeId1);
3603
}
3604
3605
// Build the SPIR-V instruction
3606
Instruction* textureInst = new Instruction(getUniqueId(), resultType, opCode);
3607
textureInst->reserveOperands(optArgNum + (texArgs.size() - (optArgNum + 1)));
3608
for (size_t op = 0; op < optArgNum; ++op)
3609
textureInst->addIdOperand(texArgs[op]);
3610
if (optArgNum < texArgs.size())
3611
textureInst->addImmediateOperand(texArgs[optArgNum]);
3612
for (size_t op = optArgNum + 1; op < texArgs.size(); ++op)
3613
textureInst->addIdOperand(texArgs[op]);
3614
setPrecision(textureInst->getResultId(), precision);
3615
addInstruction(std::unique_ptr<Instruction>(textureInst));
3616
3617
Id resultId = textureInst->getResultId();
3618
3619
if (sparse) {
3620
// set capability
3621
addCapability(Capability::SparseResidency);
3622
3623
// Decode the return type that was a special structure
3624
createStore(createCompositeExtract(resultId, typeId1, 1), parameters.texelOut);
3625
resultId = createCompositeExtract(resultId, typeId0, 0);
3626
setPrecision(resultId, precision);
3627
} else {
3628
// When a smear is needed, do it, as per what was computed
3629
// above when resultType was changed to a scalar type.
3630
if (resultType != smearedType)
3631
resultId = smearScalar(precision, resultId, smearedType);
3632
}
3633
3634
return resultId;
3635
}
3636
3637
// Comments in header
3638
Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameters, bool isUnsignedResult)
3639
{
3640
// Figure out the result type
3641
Id resultType = 0;
3642
switch (opCode) {
3643
case Op::OpImageQuerySize:
3644
case Op::OpImageQuerySizeLod:
3645
{
3646
int numComponents = 0;
3647
switch (getTypeDimensionality(getImageType(parameters.sampler))) {
3648
case Dim::Dim1D:
3649
case Dim::Buffer:
3650
numComponents = 1;
3651
break;
3652
case Dim::Dim2D:
3653
case Dim::Cube:
3654
case Dim::Rect:
3655
case Dim::SubpassData:
3656
numComponents = 2;
3657
break;
3658
case Dim::Dim3D:
3659
numComponents = 3;
3660
break;
3661
3662
default:
3663
assert(0);
3664
break;
3665
}
3666
if (isArrayedImageType(getImageType(parameters.sampler)))
3667
++numComponents;
3668
3669
Id intType = isUnsignedResult ? makeUintType(32) : makeIntType(32);
3670
if (numComponents == 1)
3671
resultType = intType;
3672
else
3673
resultType = makeVectorType(intType, numComponents);
3674
3675
break;
3676
}
3677
case Op::OpImageQueryLod:
3678
resultType = makeVectorType(getScalarTypeId(getTypeId(parameters.coords)), 2);
3679
break;
3680
case Op::OpImageQueryLevels:
3681
case Op::OpImageQuerySamples:
3682
resultType = isUnsignedResult ? makeUintType(32) : makeIntType(32);
3683
break;
3684
default:
3685
assert(0);
3686
break;
3687
}
3688
3689
Instruction* query = new Instruction(getUniqueId(), resultType, opCode);
3690
query->addIdOperand(parameters.sampler);
3691
if (parameters.coords)
3692
query->addIdOperand(parameters.coords);
3693
if (parameters.lod)
3694
query->addIdOperand(parameters.lod);
3695
addInstruction(std::unique_ptr<Instruction>(query));
3696
addCapability(Capability::ImageQuery);
3697
3698
return query->getResultId();
3699
}
3700
3701
// External comments in header.
3702
// Operates recursively to visit the composite's hierarchy.
3703
Id Builder::createCompositeCompare(Decoration precision, Id value1, Id value2, bool equal)
3704
{
3705
Id boolType = makeBoolType();
3706
Id valueType = getTypeId(value1);
3707
3708
Id resultId = NoResult;
3709
3710
int numConstituents = getNumTypeConstituents(valueType);
3711
3712
// Scalars and Vectors
3713
3714
if (isScalarType(valueType) || isVectorType(valueType)) {
3715
assert(valueType == getTypeId(value2));
3716
// These just need a single comparison, just have
3717
// to figure out what it is.
3718
Op op;
3719
switch (getMostBasicTypeClass(valueType)) {
3720
case Op::OpTypeFloat:
3721
op = equal ? Op::OpFOrdEqual : Op::OpFUnordNotEqual;
3722
break;
3723
case Op::OpTypeInt:
3724
default:
3725
op = equal ? Op::OpIEqual : Op::OpINotEqual;
3726
break;
3727
case Op::OpTypeBool:
3728
op = equal ? Op::OpLogicalEqual : Op::OpLogicalNotEqual;
3729
precision = NoPrecision;
3730
break;
3731
}
3732
3733
if (isScalarType(valueType)) {
3734
// scalar
3735
resultId = createBinOp(op, boolType, value1, value2);
3736
} else {
3737
// vector
3738
resultId = createBinOp(op, makeVectorType(boolType, numConstituents), value1, value2);
3739
setPrecision(resultId, precision);
3740
// reduce vector compares...
3741
resultId = createUnaryOp(equal ? Op::OpAll : Op::OpAny, boolType, resultId);
3742
}
3743
3744
return setPrecision(resultId, precision);
3745
}
3746
3747
// Only structs, arrays, and matrices should be left.
3748
// They share in common the reduction operation across their constituents.
3749
assert(isAggregateType(valueType) || isMatrixType(valueType));
3750
3751
// Compare each pair of constituents
3752
for (int constituent = 0; constituent < numConstituents; ++constituent) {
3753
std::vector<unsigned> indexes(1, constituent);
3754
Id constituentType1 = getContainedTypeId(getTypeId(value1), constituent);
3755
Id constituentType2 = getContainedTypeId(getTypeId(value2), constituent);
3756
Id constituent1 = createCompositeExtract(value1, constituentType1, indexes);
3757
Id constituent2 = createCompositeExtract(value2, constituentType2, indexes);
3758
3759
Id subResultId = createCompositeCompare(precision, constituent1, constituent2, equal);
3760
3761
if (constituent == 0)
3762
resultId = subResultId;
3763
else
3764
resultId = setPrecision(createBinOp(equal ? Op::OpLogicalAnd : Op::OpLogicalOr, boolType, resultId, subResultId),
3765
precision);
3766
}
3767
3768
return resultId;
3769
}
3770
3771
// OpCompositeConstruct
3772
Id Builder::createCompositeConstruct(Id typeId, const std::vector<Id>& constituents)
3773
{
3774
assert(isAggregateType(typeId) || (getNumTypeConstituents(typeId) > 1 &&
3775
getNumTypeConstituents(typeId) == constituents.size()) ||
3776
(isCooperativeVectorType(typeId) && constituents.size() == 1));
3777
3778
if (generatingOpCodeForSpecConst) {
3779
// Sometime, even in spec-constant-op mode, the constant composite to be
3780
// constructed may not be a specialization constant.
3781
// e.g.:
3782
// const mat2 m2 = mat2(a_spec_const, a_front_end_const, another_front_end_const, third_front_end_const);
3783
// The first column vector should be a spec constant one, as a_spec_const is a spec constant.
3784
// The second column vector should NOT be spec constant, as it does not contain any spec constants.
3785
// To handle such cases, we check the constituents of the constant vector to determine whether this
3786
// vector should be created as a spec constant.
3787
return makeCompositeConstant(typeId, constituents,
3788
std::any_of(constituents.begin(), constituents.end(),
3789
[&](spv::Id id) { return isSpecConstant(id); }));
3790
}
3791
3792
bool replicate = false;
3793
size_t numConstituents = constituents.size();
3794
3795
if (useReplicatedComposites || isCooperativeVectorType(typeId)) {
3796
replicate = numConstituents > 0 &&
3797
std::equal(constituents.begin() + 1, constituents.end(), constituents.begin());
3798
}
3799
3800
if (replicate) {
3801
numConstituents = 1;
3802
addCapability(spv::Capability::ReplicatedCompositesEXT);
3803
addExtension(spv::E_SPV_EXT_replicated_composites);
3804
}
3805
3806
Op opcode = replicate ? Op::OpCompositeConstructReplicateEXT : Op::OpCompositeConstruct;
3807
3808
Instruction* op = new Instruction(getUniqueId(), typeId, opcode);
3809
op->reserveOperands(constituents.size());
3810
for (size_t c = 0; c < numConstituents; ++c)
3811
op->addIdOperand(constituents[c]);
3812
addInstruction(std::unique_ptr<Instruction>(op));
3813
3814
return op->getResultId();
3815
}
3816
3817
// coopmat conversion
3818
Id Builder::createCooperativeMatrixConversion(Id typeId, Id source)
3819
{
3820
Instruction* op = new Instruction(getUniqueId(), typeId, Op::OpCooperativeMatrixConvertNV);
3821
op->addIdOperand(source);
3822
addInstruction(std::unique_ptr<Instruction>(op));
3823
3824
return op->getResultId();
3825
}
3826
3827
// coopmat reduce
3828
Id Builder::createCooperativeMatrixReduce(Op opcode, Id typeId, Id source, unsigned int mask, Id func)
3829
{
3830
Instruction* op = new Instruction(getUniqueId(), typeId, opcode);
3831
op->addIdOperand(source);
3832
op->addImmediateOperand(mask);
3833
op->addIdOperand(func);
3834
addInstruction(std::unique_ptr<Instruction>(op));
3835
3836
return op->getResultId();
3837
}
3838
3839
// coopmat per-element operation
3840
Id Builder::createCooperativeMatrixPerElementOp(Id typeId, const std::vector<Id>& operands)
3841
{
3842
Instruction* op = new Instruction(getUniqueId(), typeId, spv::Op::OpCooperativeMatrixPerElementOpNV);
3843
// skip operand[0], which is where the result is stored
3844
for (uint32_t i = 1; i < operands.size(); ++i) {
3845
op->addIdOperand(operands[i]);
3846
}
3847
addInstruction(std::unique_ptr<Instruction>(op));
3848
3849
return op->getResultId();
3850
}
3851
3852
// Vector or scalar constructor
3853
Id Builder::createConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId)
3854
{
3855
Id result = NoResult;
3856
unsigned int numTargetComponents = getNumTypeComponents(resultTypeId);
3857
unsigned int targetComponent = 0;
3858
3859
// Special case: when calling a vector constructor with a single scalar
3860
// argument, smear the scalar
3861
if (sources.size() == 1 && isScalar(sources[0]) && (numTargetComponents > 1 || isCooperativeVectorType(resultTypeId)))
3862
return smearScalar(precision, sources[0], resultTypeId);
3863
3864
// Special case: 2 vectors of equal size
3865
if (sources.size() == 1 && isVector(sources[0]) && numTargetComponents == getNumComponents(sources[0])) {
3866
assert(resultTypeId == getTypeId(sources[0]));
3867
return sources[0];
3868
}
3869
3870
// accumulate the arguments for OpCompositeConstruct
3871
std::vector<Id> constituents;
3872
Id scalarTypeId = getScalarTypeId(resultTypeId);
3873
3874
// lambda to store the result of visiting an argument component
3875
const auto latchResult = [&](Id comp) {
3876
if (numTargetComponents > 1)
3877
constituents.push_back(comp);
3878
else
3879
result = comp;
3880
++targetComponent;
3881
};
3882
3883
// lambda to visit a vector argument's components
3884
const auto accumulateVectorConstituents = [&](Id sourceArg) {
3885
unsigned int sourceSize = getNumComponents(sourceArg);
3886
unsigned int sourcesToUse = sourceSize;
3887
if (sourcesToUse + targetComponent > numTargetComponents)
3888
sourcesToUse = numTargetComponents - targetComponent;
3889
3890
for (unsigned int s = 0; s < sourcesToUse; ++s) {
3891
std::vector<unsigned> swiz;
3892
swiz.push_back(s);
3893
latchResult(createRvalueSwizzle(precision, scalarTypeId, sourceArg, swiz));
3894
}
3895
};
3896
3897
// lambda to visit a matrix argument's components
3898
const auto accumulateMatrixConstituents = [&](Id sourceArg) {
3899
unsigned int sourceSize = getNumColumns(sourceArg) * getNumRows(sourceArg);
3900
unsigned int sourcesToUse = sourceSize;
3901
if (sourcesToUse + targetComponent > numTargetComponents)
3902
sourcesToUse = numTargetComponents - targetComponent;
3903
3904
unsigned int col = 0;
3905
unsigned int row = 0;
3906
for (unsigned int s = 0; s < sourcesToUse; ++s) {
3907
if (row >= getNumRows(sourceArg)) {
3908
row = 0;
3909
col++;
3910
}
3911
std::vector<Id> indexes;
3912
indexes.push_back(col);
3913
indexes.push_back(row);
3914
latchResult(createCompositeExtract(sourceArg, scalarTypeId, indexes));
3915
row++;
3916
}
3917
};
3918
3919
// Go through the source arguments, each one could have either
3920
// a single or multiple components to contribute.
3921
for (unsigned int i = 0; i < sources.size(); ++i) {
3922
3923
if (isScalar(sources[i]) || isPointer(sources[i]))
3924
latchResult(sources[i]);
3925
else if (isVector(sources[i]) || isCooperativeVector(sources[i]))
3926
accumulateVectorConstituents(sources[i]);
3927
else if (isMatrix(sources[i]))
3928
accumulateMatrixConstituents(sources[i]);
3929
else
3930
assert(0);
3931
3932
if (targetComponent >= numTargetComponents)
3933
break;
3934
}
3935
3936
// If the result is a vector, make it from the gathered constituents.
3937
if (constituents.size() > 0) {
3938
result = createCompositeConstruct(resultTypeId, constituents);
3939
return setPrecision(result, precision);
3940
} else {
3941
// Precision was set when generating this component.
3942
return result;
3943
}
3944
}
3945
3946
// Comments in header
3947
Id Builder::createMatrixConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId)
3948
{
3949
Id componentTypeId = getScalarTypeId(resultTypeId);
3950
unsigned int numCols = getTypeNumColumns(resultTypeId);
3951
unsigned int numRows = getTypeNumRows(resultTypeId);
3952
3953
Instruction* instr = module.getInstruction(componentTypeId);
3954
const unsigned bitCount = instr->getImmediateOperand(0);
3955
3956
// Optimize matrix constructed from a bigger matrix
3957
if (isMatrix(sources[0]) && getNumColumns(sources[0]) >= numCols && getNumRows(sources[0]) >= numRows) {
3958
// To truncate the matrix to a smaller number of rows/columns, we need to:
3959
// 1. For each column, extract the column and truncate it to the required size using shuffle
3960
// 2. Assemble the resulting matrix from all columns
3961
Id matrix = sources[0];
3962
Id columnTypeId = getContainedTypeId(resultTypeId);
3963
Id sourceColumnTypeId = getContainedTypeId(getTypeId(matrix));
3964
3965
std::vector<unsigned> channels;
3966
for (unsigned int row = 0; row < numRows; ++row)
3967
channels.push_back(row);
3968
3969
std::vector<Id> matrixColumns;
3970
for (unsigned int col = 0; col < numCols; ++col) {
3971
std::vector<unsigned> indexes;
3972
indexes.push_back(col);
3973
Id colv = createCompositeExtract(matrix, sourceColumnTypeId, indexes);
3974
setPrecision(colv, precision);
3975
3976
if (numRows != getNumRows(matrix)) {
3977
matrixColumns.push_back(createRvalueSwizzle(precision, columnTypeId, colv, channels));
3978
} else {
3979
matrixColumns.push_back(colv);
3980
}
3981
}
3982
3983
return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision);
3984
}
3985
3986
// Detect a matrix being constructed from a repeated vector of the correct size.
3987
// Create the composite directly from it.
3988
if (sources.size() == numCols && isVector(sources[0]) && getNumComponents(sources[0]) == numRows &&
3989
std::equal(sources.begin() + 1, sources.end(), sources.begin())) {
3990
return setPrecision(createCompositeConstruct(resultTypeId, sources), precision);
3991
}
3992
3993
// Otherwise, will use a two step process
3994
// 1. make a compile-time 2D array of values
3995
// 2. construct a matrix from that array
3996
3997
// Step 1.
3998
3999
// initialize the array to the identity matrix
4000
Id ids[maxMatrixSize][maxMatrixSize];
4001
Id one = (bitCount == 64 ? makeDoubleConstant(1.0) : makeFloatConstant(1.0));
4002
Id zero = (bitCount == 64 ? makeDoubleConstant(0.0) : makeFloatConstant(0.0));
4003
for (int col = 0; col < 4; ++col) {
4004
for (int row = 0; row < 4; ++row) {
4005
if (col == row)
4006
ids[col][row] = one;
4007
else
4008
ids[col][row] = zero;
4009
}
4010
}
4011
4012
// modify components as dictated by the arguments
4013
if (sources.size() == 1 && isScalar(sources[0])) {
4014
// a single scalar; resets the diagonals
4015
for (int col = 0; col < 4; ++col)
4016
ids[col][col] = sources[0];
4017
} else if (isMatrix(sources[0])) {
4018
// constructing from another matrix; copy over the parts that exist in both the argument and constructee
4019
Id matrix = sources[0];
4020
unsigned int minCols = std::min(numCols, getNumColumns(matrix));
4021
unsigned int minRows = std::min(numRows, getNumRows(matrix));
4022
for (unsigned int col = 0; col < minCols; ++col) {
4023
std::vector<unsigned> indexes;
4024
indexes.push_back(col);
4025
for (unsigned int row = 0; row < minRows; ++row) {
4026
indexes.push_back(row);
4027
ids[col][row] = createCompositeExtract(matrix, componentTypeId, indexes);
4028
indexes.pop_back();
4029
setPrecision(ids[col][row], precision);
4030
}
4031
}
4032
} else {
4033
// fill in the matrix in column-major order with whatever argument components are available
4034
unsigned int row = 0;
4035
unsigned int col = 0;
4036
4037
for (unsigned int arg = 0; arg < sources.size() && col < numCols; ++arg) {
4038
Id argComp = sources[arg];
4039
for (unsigned int comp = 0; comp < getNumComponents(sources[arg]); ++comp) {
4040
if (getNumComponents(sources[arg]) > 1) {
4041
argComp = createCompositeExtract(sources[arg], componentTypeId, comp);
4042
setPrecision(argComp, precision);
4043
}
4044
ids[col][row++] = argComp;
4045
if (row == numRows) {
4046
row = 0;
4047
col++;
4048
}
4049
if (col == numCols) {
4050
// If more components are provided than fit the matrix, discard the rest.
4051
break;
4052
}
4053
}
4054
}
4055
}
4056
4057
// Step 2: Construct a matrix from that array.
4058
// First make the column vectors, then make the matrix.
4059
4060
// make the column vectors
4061
Id columnTypeId = getContainedTypeId(resultTypeId);
4062
std::vector<Id> matrixColumns;
4063
for (unsigned int col = 0; col < numCols; ++col) {
4064
std::vector<Id> vectorComponents;
4065
for (unsigned int row = 0; row < numRows; ++row)
4066
vectorComponents.push_back(ids[col][row]);
4067
Id column = createCompositeConstruct(columnTypeId, vectorComponents);
4068
setPrecision(column, precision);
4069
matrixColumns.push_back(column);
4070
}
4071
4072
// make the matrix
4073
return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision);
4074
}
4075
4076
// Comments in header
4077
Builder::If::If(Id cond, SelectionControlMask ctrl, Builder& gb) :
4078
builder(gb),
4079
condition(cond),
4080
control(ctrl),
4081
elseBlock(nullptr)
4082
{
4083
function = &builder.getBuildPoint()->getParent();
4084
4085
// make the blocks, but only put the then-block into the function,
4086
// the else-block and merge-block will be added later, in order, after
4087
// earlier code is emitted
4088
thenBlock = new Block(builder.getUniqueId(), *function);
4089
mergeBlock = new Block(builder.getUniqueId(), *function);
4090
4091
// Save the current block, so that we can add in the flow control split when
4092
// makeEndIf is called.
4093
headerBlock = builder.getBuildPoint();
4094
builder.createSelectionMerge(mergeBlock, control);
4095
4096
function->addBlock(thenBlock);
4097
builder.setBuildPoint(thenBlock);
4098
}
4099
4100
// Comments in header
4101
void Builder::If::makeBeginElse()
4102
{
4103
// Close out the "then" by having it jump to the mergeBlock
4104
builder.createBranch(true, mergeBlock);
4105
4106
// Make the first else block and add it to the function
4107
elseBlock = new Block(builder.getUniqueId(), *function);
4108
function->addBlock(elseBlock);
4109
4110
// Start building the else block
4111
builder.setBuildPoint(elseBlock);
4112
}
4113
4114
// Comments in header
4115
void Builder::If::makeEndIf()
4116
{
4117
// jump to the merge block
4118
builder.createBranch(true, mergeBlock);
4119
4120
// Go back to the headerBlock and make the flow control split
4121
builder.setBuildPoint(headerBlock);
4122
if (elseBlock)
4123
builder.createConditionalBranch(condition, thenBlock, elseBlock);
4124
else
4125
builder.createConditionalBranch(condition, thenBlock, mergeBlock);
4126
4127
// add the merge block to the function
4128
function->addBlock(mergeBlock);
4129
builder.setBuildPoint(mergeBlock);
4130
}
4131
4132
// Comments in header
4133
void Builder::makeSwitch(Id selector, SelectionControlMask control, int numSegments, const std::vector<int>& caseValues,
4134
const std::vector<int>& valueIndexToSegment, int defaultSegment,
4135
std::vector<Block*>& segmentBlocks)
4136
{
4137
Function& function = buildPoint->getParent();
4138
4139
// make all the blocks
4140
for (int s = 0; s < numSegments; ++s)
4141
segmentBlocks.push_back(new Block(getUniqueId(), function));
4142
4143
Block* mergeBlock = new Block(getUniqueId(), function);
4144
4145
// make and insert the switch's selection-merge instruction
4146
createSelectionMerge(mergeBlock, control);
4147
4148
// make the switch instruction
4149
Instruction* switchInst = new Instruction(NoResult, NoType, Op::OpSwitch);
4150
switchInst->reserveOperands((caseValues.size() * 2) + 2);
4151
switchInst->addIdOperand(selector);
4152
auto defaultOrMerge = (defaultSegment >= 0) ? segmentBlocks[defaultSegment] : mergeBlock;
4153
switchInst->addIdOperand(defaultOrMerge->getId());
4154
defaultOrMerge->addPredecessor(buildPoint);
4155
for (int i = 0; i < (int)caseValues.size(); ++i) {
4156
switchInst->addImmediateOperand(caseValues[i]);
4157
switchInst->addIdOperand(segmentBlocks[valueIndexToSegment[i]]->getId());
4158
segmentBlocks[valueIndexToSegment[i]]->addPredecessor(buildPoint);
4159
}
4160
addInstruction(std::unique_ptr<Instruction>(switchInst));
4161
4162
// push the merge block
4163
switchMerges.push(mergeBlock);
4164
}
4165
4166
// Comments in header
4167
void Builder::addSwitchBreak(bool implicit)
4168
{
4169
// branch to the top of the merge block stack
4170
createBranch(implicit, switchMerges.top());
4171
createAndSetNoPredecessorBlock("post-switch-break");
4172
}
4173
4174
// Comments in header
4175
void Builder::nextSwitchSegment(std::vector<Block*>& segmentBlock, int nextSegment)
4176
{
4177
int lastSegment = nextSegment - 1;
4178
if (lastSegment >= 0) {
4179
// Close out previous segment by jumping, if necessary, to next segment
4180
if (! buildPoint->isTerminated())
4181
createBranch(true, segmentBlock[nextSegment]);
4182
}
4183
Block* block = segmentBlock[nextSegment];
4184
block->getParent().addBlock(block);
4185
setBuildPoint(block);
4186
}
4187
4188
// Comments in header
4189
void Builder::endSwitch(std::vector<Block*>& /*segmentBlock*/)
4190
{
4191
// Close out previous segment by jumping, if necessary, to next segment
4192
if (! buildPoint->isTerminated())
4193
addSwitchBreak(true);
4194
4195
switchMerges.top()->getParent().addBlock(switchMerges.top());
4196
setBuildPoint(switchMerges.top());
4197
4198
switchMerges.pop();
4199
}
4200
4201
Block& Builder::makeNewBlock()
4202
{
4203
Function& function = buildPoint->getParent();
4204
auto block = new Block(getUniqueId(), function);
4205
function.addBlock(block);
4206
return *block;
4207
}
4208
4209
Builder::LoopBlocks& Builder::makeNewLoop()
4210
{
4211
// This verbosity is needed to simultaneously get the same behavior
4212
// everywhere (id's in the same order), have a syntax that works
4213
// across lots of versions of C++, have no warnings from pedantic
4214
// compilation modes, and leave the rest of the code alone.
4215
Block& head = makeNewBlock();
4216
Block& body = makeNewBlock();
4217
Block& merge = makeNewBlock();
4218
Block& continue_target = makeNewBlock();
4219
LoopBlocks blocks(head, body, merge, continue_target);
4220
loops.push(blocks);
4221
return loops.top();
4222
}
4223
4224
void Builder::createLoopContinue()
4225
{
4226
createBranch(false, &loops.top().continue_target);
4227
// Set up a block for dead code.
4228
createAndSetNoPredecessorBlock("post-loop-continue");
4229
}
4230
4231
void Builder::createLoopExit()
4232
{
4233
createBranch(false, &loops.top().merge);
4234
// Set up a block for dead code.
4235
createAndSetNoPredecessorBlock("post-loop-break");
4236
}
4237
4238
void Builder::closeLoop()
4239
{
4240
loops.pop();
4241
}
4242
4243
void Builder::clearAccessChain()
4244
{
4245
accessChain.base = NoResult;
4246
accessChain.indexChain.clear();
4247
accessChain.instr = NoResult;
4248
accessChain.swizzle.clear();
4249
accessChain.component = NoResult;
4250
accessChain.preSwizzleBaseType = NoType;
4251
accessChain.isRValue = false;
4252
accessChain.coherentFlags.clear();
4253
accessChain.alignment = 0;
4254
}
4255
4256
// Comments in header
4257
void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType,
4258
AccessChain::CoherentFlags coherentFlags, unsigned int alignment)
4259
{
4260
accessChain.coherentFlags |= coherentFlags;
4261
accessChain.alignment |= alignment;
4262
4263
// swizzles can be stacked in GLSL, but simplified to a single
4264
// one here; the base type doesn't change
4265
if (accessChain.preSwizzleBaseType == NoType)
4266
accessChain.preSwizzleBaseType = preSwizzleBaseType;
4267
4268
// if needed, propagate the swizzle for the current access chain
4269
if (accessChain.swizzle.size() > 0) {
4270
std::vector<unsigned> oldSwizzle = accessChain.swizzle;
4271
accessChain.swizzle.resize(0);
4272
for (unsigned int i = 0; i < swizzle.size(); ++i) {
4273
assert(swizzle[i] < oldSwizzle.size());
4274
accessChain.swizzle.push_back(oldSwizzle[swizzle[i]]);
4275
}
4276
} else
4277
accessChain.swizzle = swizzle;
4278
4279
// determine if we need to track this swizzle anymore
4280
simplifyAccessChainSwizzle();
4281
}
4282
4283
// Comments in header
4284
void Builder::accessChainStore(Id rvalue, Decoration nonUniform, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
4285
{
4286
assert(accessChain.isRValue == false);
4287
4288
transferAccessChainSwizzle(true);
4289
4290
// If a swizzle exists and is not full and is not dynamic, then the swizzle will be broken into individual stores.
4291
if (accessChain.swizzle.size() > 0 &&
4292
getNumTypeComponents(getResultingAccessChainType()) != accessChain.swizzle.size() &&
4293
accessChain.component == NoResult) {
4294
for (unsigned int i = 0; i < accessChain.swizzle.size(); ++i) {
4295
accessChain.indexChain.push_back(makeUintConstant(accessChain.swizzle[i]));
4296
accessChain.instr = NoResult;
4297
4298
Id base = collapseAccessChain();
4299
addDecoration(base, nonUniform);
4300
4301
accessChain.indexChain.pop_back();
4302
accessChain.instr = NoResult;
4303
4304
// dynamic component should be gone
4305
assert(accessChain.component == NoResult);
4306
4307
Id source = createCompositeExtract(rvalue, getContainedTypeId(getTypeId(rvalue)), i);
4308
4309
// take LSB of alignment
4310
alignment = alignment & ~(alignment & (alignment-1));
4311
if (getStorageClass(base) == StorageClass::PhysicalStorageBufferEXT) {
4312
memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessMask::Aligned);
4313
}
4314
4315
createStore(source, base, memoryAccess, scope, alignment);
4316
}
4317
}
4318
else {
4319
Id base = collapseAccessChain();
4320
addDecoration(base, nonUniform);
4321
4322
Id source = rvalue;
4323
4324
// dynamic component should be gone
4325
assert(accessChain.component == NoResult);
4326
4327
// If swizzle still exists, it may be out-of-order, we must load the target vector,
4328
// extract and insert elements to perform writeMask and/or swizzle.
4329
if (accessChain.swizzle.size() > 0) {
4330
Id tempBaseId = createLoad(base, spv::NoPrecision);
4331
source = createLvalueSwizzle(getTypeId(tempBaseId), tempBaseId, source, accessChain.swizzle);
4332
}
4333
4334
// take LSB of alignment
4335
alignment = alignment & ~(alignment & (alignment-1));
4336
if (getStorageClass(base) == StorageClass::PhysicalStorageBufferEXT) {
4337
memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessMask::Aligned);
4338
}
4339
4340
createStore(source, base, memoryAccess, scope, alignment);
4341
}
4342
}
4343
4344
// Comments in header
4345
Id Builder::accessChainLoad(Decoration precision, Decoration l_nonUniform,
4346
Decoration r_nonUniform, Id resultType, spv::MemoryAccessMask memoryAccess,
4347
spv::Scope scope, unsigned int alignment)
4348
{
4349
Id id;
4350
4351
if (accessChain.isRValue) {
4352
// transfer access chain, but try to stay in registers
4353
transferAccessChainSwizzle(false);
4354
if (accessChain.indexChain.size() > 0) {
4355
Id swizzleBase = accessChain.preSwizzleBaseType != NoType ? accessChain.preSwizzleBaseType : resultType;
4356
4357
// if all the accesses are constants, we can use OpCompositeExtract
4358
std::vector<unsigned> indexes;
4359
bool constant = true;
4360
for (int i = 0; i < (int)accessChain.indexChain.size(); ++i) {
4361
if (isConstantScalar(accessChain.indexChain[i]))
4362
indexes.push_back(getConstantScalar(accessChain.indexChain[i]));
4363
else {
4364
constant = false;
4365
break;
4366
}
4367
}
4368
4369
if (constant) {
4370
id = createCompositeExtract(accessChain.base, swizzleBase, indexes);
4371
setPrecision(id, precision);
4372
} else if (isCooperativeVector(accessChain.base)) {
4373
assert(accessChain.indexChain.size() == 1);
4374
id = createVectorExtractDynamic(accessChain.base, resultType, accessChain.indexChain[0]);
4375
} else {
4376
Id lValue = NoResult;
4377
if (spvVersion >= Spv_1_4 && isValidInitializer(accessChain.base)) {
4378
// make a new function variable for this r-value, using an initializer,
4379
// and mark it as NonWritable so that downstream it can be detected as a lookup
4380
// table
4381
lValue = createVariable(NoPrecision, StorageClass::Function, getTypeId(accessChain.base),
4382
"indexable", accessChain.base);
4383
addDecoration(lValue, Decoration::NonWritable);
4384
} else {
4385
lValue = createVariable(NoPrecision, StorageClass::Function, getTypeId(accessChain.base),
4386
"indexable");
4387
// store into it
4388
createStore(accessChain.base, lValue);
4389
}
4390
// move base to the new variable
4391
accessChain.base = lValue;
4392
accessChain.isRValue = false;
4393
4394
// load through the access chain
4395
id = createLoad(collapseAccessChain(), precision);
4396
}
4397
} else
4398
id = accessChain.base; // no precision, it was set when this was defined
4399
} else {
4400
transferAccessChainSwizzle(true);
4401
4402
// take LSB of alignment
4403
alignment = alignment & ~(alignment & (alignment-1));
4404
if (getStorageClass(accessChain.base) == StorageClass::PhysicalStorageBufferEXT) {
4405
memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessMask::Aligned);
4406
}
4407
4408
// load through the access chain
4409
id = collapseAccessChain();
4410
// Apply nonuniform both to the access chain and the loaded value.
4411
// Buffer accesses need the access chain decorated, and this is where
4412
// loaded image types get decorated. TODO: This should maybe move to
4413
// createImageTextureFunctionCall.
4414
addDecoration(id, l_nonUniform);
4415
id = createLoad(id, precision, memoryAccess, scope, alignment);
4416
addDecoration(id, r_nonUniform);
4417
}
4418
4419
// Done, unless there are swizzles to do
4420
if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult)
4421
return id;
4422
4423
// Do remaining swizzling
4424
4425
// Do the basic swizzle
4426
if (accessChain.swizzle.size() > 0) {
4427
Id swizzledType = getScalarTypeId(getTypeId(id));
4428
if (accessChain.swizzle.size() > 1)
4429
swizzledType = makeVectorType(swizzledType, (int)accessChain.swizzle.size());
4430
id = createRvalueSwizzle(precision, swizzledType, id, accessChain.swizzle);
4431
}
4432
4433
// Do the dynamic component
4434
if (accessChain.component != NoResult)
4435
id = setPrecision(createVectorExtractDynamic(id, resultType, accessChain.component), precision);
4436
4437
addDecoration(id, r_nonUniform);
4438
return id;
4439
}
4440
4441
Id Builder::accessChainGetLValue()
4442
{
4443
assert(accessChain.isRValue == false);
4444
4445
transferAccessChainSwizzle(true);
4446
Id lvalue = collapseAccessChain();
4447
4448
// If swizzle exists, it is out-of-order or not full, we must load the target vector,
4449
// extract and insert elements to perform writeMask and/or swizzle. This does not
4450
// go with getting a direct l-value pointer.
4451
assert(accessChain.swizzle.size() == 0);
4452
assert(accessChain.component == NoResult);
4453
4454
return lvalue;
4455
}
4456
4457
// comment in header
4458
Id Builder::accessChainGetInferredType()
4459
{
4460
// anything to operate on?
4461
if (accessChain.base == NoResult)
4462
return NoType;
4463
Id type = getTypeId(accessChain.base);
4464
4465
// do initial dereference
4466
if (! accessChain.isRValue)
4467
type = getContainedTypeId(type);
4468
4469
// dereference each index
4470
for (auto it = accessChain.indexChain.cbegin(); it != accessChain.indexChain.cend(); ++it) {
4471
if (isStructType(type))
4472
type = getContainedTypeId(type, getConstantScalar(*it));
4473
else
4474
type = getContainedTypeId(type);
4475
}
4476
4477
// dereference swizzle
4478
if (accessChain.swizzle.size() == 1)
4479
type = getContainedTypeId(type);
4480
else if (accessChain.swizzle.size() > 1)
4481
type = makeVectorType(getContainedTypeId(type), (int)accessChain.swizzle.size());
4482
4483
// dereference component selection
4484
if (accessChain.component)
4485
type = getContainedTypeId(type);
4486
4487
return type;
4488
}
4489
4490
void Builder::dump(std::vector<unsigned int>& out) const
4491
{
4492
// Header, before first instructions:
4493
out.push_back(MagicNumber);
4494
out.push_back(spvVersion);
4495
out.push_back(builderNumber);
4496
out.push_back(uniqueId + 1);
4497
out.push_back(0);
4498
4499
// Capabilities
4500
for (auto it = capabilities.cbegin(); it != capabilities.cend(); ++it) {
4501
Instruction capInst(0, 0, Op::OpCapability);
4502
capInst.addImmediateOperand(*it);
4503
capInst.dump(out);
4504
}
4505
4506
for (auto it = extensions.cbegin(); it != extensions.cend(); ++it) {
4507
Instruction extInst(0, 0, Op::OpExtension);
4508
extInst.addStringOperand(it->c_str());
4509
extInst.dump(out);
4510
}
4511
4512
dumpInstructions(out, imports);
4513
Instruction memInst(0, 0, Op::OpMemoryModel);
4514
memInst.addImmediateOperand(addressModel);
4515
memInst.addImmediateOperand(memoryModel);
4516
memInst.dump(out);
4517
4518
// Instructions saved up while building:
4519
dumpInstructions(out, entryPoints);
4520
dumpInstructions(out, executionModes);
4521
4522
// Debug instructions
4523
dumpInstructions(out, strings);
4524
dumpSourceInstructions(out);
4525
for (int e = 0; e < (int)sourceExtensions.size(); ++e) {
4526
Instruction sourceExtInst(0, 0, Op::OpSourceExtension);
4527
sourceExtInst.addStringOperand(sourceExtensions[e]);
4528
sourceExtInst.dump(out);
4529
}
4530
dumpInstructions(out, names);
4531
dumpModuleProcesses(out);
4532
4533
// Annotation instructions
4534
dumpInstructions(out, decorations);
4535
4536
dumpInstructions(out, constantsTypesGlobals);
4537
dumpInstructions(out, externals);
4538
4539
// The functions
4540
module.dump(out);
4541
}
4542
4543
//
4544
// Protected methods.
4545
//
4546
4547
// Turn the described access chain in 'accessChain' into an instruction(s)
4548
// computing its address. This *cannot* include complex swizzles, which must
4549
// be handled after this is called.
4550
//
4551
// Can generate code.
4552
Id Builder::collapseAccessChain()
4553
{
4554
assert(accessChain.isRValue == false);
4555
4556
// did we already emit an access chain for this?
4557
if (accessChain.instr != NoResult)
4558
return accessChain.instr;
4559
4560
// If we have a dynamic component, we can still transfer
4561
// that into a final operand to the access chain. We need to remap the
4562
// dynamic component through the swizzle to get a new dynamic component to
4563
// update.
4564
//
4565
// This was not done in transferAccessChainSwizzle() because it might
4566
// generate code.
4567
remapDynamicSwizzle();
4568
if (accessChain.component != NoResult) {
4569
// transfer the dynamic component to the access chain
4570
accessChain.indexChain.push_back(accessChain.component);
4571
accessChain.component = NoResult;
4572
}
4573
4574
// note that non-trivial swizzling is left pending
4575
4576
// do we have an access chain?
4577
if (accessChain.indexChain.size() == 0)
4578
return accessChain.base;
4579
4580
// emit the access chain
4581
StorageClass storageClass = (StorageClass)module.getStorageClass(getTypeId(accessChain.base));
4582
accessChain.instr = createAccessChain(storageClass, accessChain.base, accessChain.indexChain);
4583
4584
return accessChain.instr;
4585
}
4586
4587
// For a dynamic component selection of a swizzle.
4588
//
4589
// Turn the swizzle and dynamic component into just a dynamic component.
4590
//
4591
// Generates code.
4592
void Builder::remapDynamicSwizzle()
4593
{
4594
// do we have a swizzle to remap a dynamic component through?
4595
if (accessChain.component != NoResult && accessChain.swizzle.size() > 1) {
4596
// build a vector of the swizzle for the component to map into
4597
std::vector<Id> components;
4598
for (int c = 0; c < (int)accessChain.swizzle.size(); ++c)
4599
components.push_back(makeUintConstant(accessChain.swizzle[c]));
4600
Id mapType = makeVectorType(makeUintType(32), (int)accessChain.swizzle.size());
4601
Id map = makeCompositeConstant(mapType, components);
4602
4603
// use it
4604
accessChain.component = createVectorExtractDynamic(map, makeUintType(32), accessChain.component);
4605
accessChain.swizzle.clear();
4606
}
4607
}
4608
4609
// clear out swizzle if it is redundant, that is reselecting the same components
4610
// that would be present without the swizzle.
4611
void Builder::simplifyAccessChainSwizzle()
4612
{
4613
// If the swizzle has fewer components than the vector, it is subsetting, and must stay
4614
// to preserve that fact.
4615
if (getNumTypeComponents(accessChain.preSwizzleBaseType) > accessChain.swizzle.size())
4616
return;
4617
4618
// if components are out of order, it is a swizzle
4619
for (unsigned int i = 0; i < accessChain.swizzle.size(); ++i) {
4620
if (i != accessChain.swizzle[i])
4621
return;
4622
}
4623
4624
// otherwise, there is no need to track this swizzle
4625
accessChain.swizzle.clear();
4626
if (accessChain.component == NoResult)
4627
accessChain.preSwizzleBaseType = NoType;
4628
}
4629
4630
// To the extent any swizzling can become part of the chain
4631
// of accesses instead of a post operation, make it so.
4632
// If 'dynamic' is true, include transferring the dynamic component,
4633
// otherwise, leave it pending.
4634
//
4635
// Does not generate code. just updates the access chain.
4636
void Builder::transferAccessChainSwizzle(bool dynamic)
4637
{
4638
// non existent?
4639
if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult)
4640
return;
4641
4642
// too complex?
4643
// (this requires either a swizzle, or generating code for a dynamic component)
4644
if (accessChain.swizzle.size() > 1)
4645
return;
4646
4647
// single component, either in the swizzle and/or dynamic component
4648
if (accessChain.swizzle.size() == 1) {
4649
assert(accessChain.component == NoResult);
4650
// handle static component selection
4651
accessChain.indexChain.push_back(makeUintConstant(accessChain.swizzle.front()));
4652
accessChain.swizzle.clear();
4653
accessChain.preSwizzleBaseType = NoType;
4654
} else if (dynamic && accessChain.component != NoResult) {
4655
assert(accessChain.swizzle.size() == 0);
4656
// handle dynamic component
4657
accessChain.indexChain.push_back(accessChain.component);
4658
accessChain.preSwizzleBaseType = NoType;
4659
accessChain.component = NoResult;
4660
}
4661
}
4662
4663
// Utility method for creating a new block and setting the insert point to
4664
// be in it. This is useful for flow-control operations that need a "dummy"
4665
// block proceeding them (e.g. instructions after a discard, etc).
4666
void Builder::createAndSetNoPredecessorBlock(const char* /*name*/)
4667
{
4668
Block* block = new Block(getUniqueId(), buildPoint->getParent());
4669
block->setUnreachable();
4670
buildPoint->getParent().addBlock(block);
4671
setBuildPoint(block);
4672
4673
// if (name)
4674
// addName(block->getId(), name);
4675
}
4676
4677
// Comments in header
4678
void Builder::createBranch(bool implicit, Block* block)
4679
{
4680
Instruction* branch = new Instruction(Op::OpBranch);
4681
branch->addIdOperand(block->getId());
4682
if (implicit) {
4683
addInstructionNoDebugInfo(std::unique_ptr<Instruction>(branch));
4684
}
4685
else {
4686
addInstruction(std::unique_ptr<Instruction>(branch));
4687
}
4688
block->addPredecessor(buildPoint);
4689
}
4690
4691
void Builder::createSelectionMerge(Block* mergeBlock, SelectionControlMask control)
4692
{
4693
Instruction* merge = new Instruction(Op::OpSelectionMerge);
4694
merge->reserveOperands(2);
4695
merge->addIdOperand(mergeBlock->getId());
4696
merge->addImmediateOperand(control);
4697
addInstruction(std::unique_ptr<Instruction>(merge));
4698
}
4699
4700
void Builder::createLoopMerge(Block* mergeBlock, Block* continueBlock, LoopControlMask control,
4701
const std::vector<unsigned int>& operands)
4702
{
4703
Instruction* merge = new Instruction(Op::OpLoopMerge);
4704
merge->reserveOperands(operands.size() + 3);
4705
merge->addIdOperand(mergeBlock->getId());
4706
merge->addIdOperand(continueBlock->getId());
4707
merge->addImmediateOperand(control);
4708
for (int op = 0; op < (int)operands.size(); ++op)
4709
merge->addImmediateOperand(operands[op]);
4710
addInstruction(std::unique_ptr<Instruction>(merge));
4711
}
4712
4713
void Builder::createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock)
4714
{
4715
Instruction* branch = new Instruction(Op::OpBranchConditional);
4716
branch->reserveOperands(3);
4717
branch->addIdOperand(condition);
4718
branch->addIdOperand(thenBlock->getId());
4719
branch->addIdOperand(elseBlock->getId());
4720
4721
// A conditional branch is always attached to a condition expression
4722
addInstructionNoDebugInfo(std::unique_ptr<Instruction>(branch));
4723
4724
thenBlock->addPredecessor(buildPoint);
4725
elseBlock->addPredecessor(buildPoint);
4726
}
4727
4728
// OpSource
4729
// [OpSourceContinued]
4730
// ...
4731
void Builder::dumpSourceInstructions(const spv::Id fileId, const std::string& text,
4732
std::vector<unsigned int>& out) const
4733
{
4734
const int maxWordCount = 0xFFFF;
4735
const int opSourceWordCount = 4;
4736
const int nonNullBytesPerInstruction = 4 * (maxWordCount - opSourceWordCount) - 1;
4737
4738
if (sourceLang != SourceLanguage::Unknown) {
4739
// OpSource Language Version File Source
4740
Instruction sourceInst(NoResult, NoType, Op::OpSource);
4741
sourceInst.reserveOperands(3);
4742
sourceInst.addImmediateOperand(sourceLang);
4743
sourceInst.addImmediateOperand(sourceVersion);
4744
// File operand
4745
if (fileId != NoResult) {
4746
sourceInst.addIdOperand(fileId);
4747
// Source operand
4748
if (text.size() > 0) {
4749
int nextByte = 0;
4750
std::string subString;
4751
while ((int)text.size() - nextByte > 0) {
4752
subString = text.substr(nextByte, nonNullBytesPerInstruction);
4753
if (nextByte == 0) {
4754
// OpSource
4755
sourceInst.addStringOperand(subString.c_str());
4756
sourceInst.dump(out);
4757
} else {
4758
// OpSourcContinued
4759
Instruction sourceContinuedInst(Op::OpSourceContinued);
4760
sourceContinuedInst.addStringOperand(subString.c_str());
4761
sourceContinuedInst.dump(out);
4762
}
4763
nextByte += nonNullBytesPerInstruction;
4764
}
4765
} else
4766
sourceInst.dump(out);
4767
} else
4768
sourceInst.dump(out);
4769
}
4770
}
4771
4772
// Dump an OpSource[Continued] sequence for the source and every include file
4773
void Builder::dumpSourceInstructions(std::vector<unsigned int>& out) const
4774
{
4775
if (emitNonSemanticShaderDebugInfo) return;
4776
dumpSourceInstructions(mainFileId, sourceText, out);
4777
for (auto iItr = includeFiles.begin(); iItr != includeFiles.end(); ++iItr)
4778
dumpSourceInstructions(iItr->first, *iItr->second, out);
4779
}
4780
4781
template <class Range> void Builder::dumpInstructions(std::vector<unsigned int>& out, const Range& instructions) const
4782
{
4783
for (const auto& inst : instructions) {
4784
inst->dump(out);
4785
}
4786
}
4787
4788
void Builder::dumpModuleProcesses(std::vector<unsigned int>& out) const
4789
{
4790
for (int i = 0; i < (int)moduleProcesses.size(); ++i) {
4791
Instruction moduleProcessed(Op::OpModuleProcessed);
4792
moduleProcessed.addStringOperand(moduleProcesses[i]);
4793
moduleProcessed.dump(out);
4794
}
4795
}
4796
4797
bool Builder::DecorationInstructionLessThan::operator()(const std::unique_ptr<Instruction>& lhs,
4798
const std::unique_ptr<Instruction>& rhs) const
4799
{
4800
// Order by the id to which the decoration applies first. This is more intuitive.
4801
assert(lhs->isIdOperand(0) && rhs->isIdOperand(0));
4802
if (lhs->getIdOperand(0) != rhs->getIdOperand(0)) {
4803
return lhs->getIdOperand(0) < rhs->getIdOperand(0);
4804
}
4805
4806
if (lhs->getOpCode() != rhs->getOpCode())
4807
return lhs->getOpCode() < rhs->getOpCode();
4808
4809
// Now compare the operands.
4810
int minSize = std::min(lhs->getNumOperands(), rhs->getNumOperands());
4811
for (int i = 1; i < minSize; ++i) {
4812
if (lhs->isIdOperand(i) != rhs->isIdOperand(i)) {
4813
return lhs->isIdOperand(i) < rhs->isIdOperand(i);
4814
}
4815
4816
if (lhs->isIdOperand(i)) {
4817
if (lhs->getIdOperand(i) != rhs->getIdOperand(i)) {
4818
return lhs->getIdOperand(i) < rhs->getIdOperand(i);
4819
}
4820
} else {
4821
if (lhs->getImmediateOperand(i) != rhs->getImmediateOperand(i)) {
4822
return lhs->getImmediateOperand(i) < rhs->getImmediateOperand(i);
4823
}
4824
}
4825
}
4826
4827
if (lhs->getNumOperands() != rhs->getNumOperands())
4828
return lhs->getNumOperands() < rhs->getNumOperands();
4829
4830
// In this case they are equal.
4831
return false;
4832
}
4833
} // end spv namespace
4834
4835