Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp
4574 views
1
/*
2
* Copyright 2017 Red Hat Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*
22
* Authors: Karol Herbst <[email protected]>
23
*/
24
25
#include "compiler/nir/nir.h"
26
27
#include "util/u_debug.h"
28
29
#include "codegen/nv50_ir.h"
30
#include "codegen/nv50_ir_from_common.h"
31
#include "codegen/nv50_ir_lowering_helper.h"
32
#include "codegen/nv50_ir_util.h"
33
#include "tgsi/tgsi_from_mesa.h"
34
35
#if __cplusplus >= 201103L
36
#include <unordered_map>
37
#else
38
#include <tr1/unordered_map>
39
#endif
40
#include <cstring>
41
#include <list>
42
#include <vector>
43
44
namespace {
45
46
#if __cplusplus >= 201103L
47
using std::hash;
48
using std::unordered_map;
49
#else
50
using std::tr1::hash;
51
using std::tr1::unordered_map;
52
#endif
53
54
using namespace nv50_ir;
55
56
int
57
type_size(const struct glsl_type *type, bool bindless)
58
{
59
return glsl_count_attribute_slots(type, false);
60
}
61
62
static void
63
function_temp_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
64
{
65
assert(glsl_type_is_vector_or_scalar(type));
66
67
unsigned comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
68
unsigned length = glsl_get_vector_elements(type);
69
70
*size = comp_size * length;
71
*align = 0x10;
72
}
73
74
class Converter : public ConverterCommon
75
{
76
public:
77
Converter(Program *, nir_shader *, nv50_ir_prog_info *, nv50_ir_prog_info_out *);
78
79
bool run();
80
private:
81
typedef std::vector<LValue*> LValues;
82
typedef unordered_map<unsigned, LValues> NirDefMap;
83
typedef unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
84
typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
85
86
CacheMode convert(enum gl_access_qualifier);
87
TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
88
LValues& convert(nir_alu_dest *);
89
BasicBlock* convert(nir_block *);
90
LValues& convert(nir_dest *);
91
SVSemantic convert(nir_intrinsic_op);
92
Value* convert(nir_load_const_instr*, uint8_t);
93
LValues& convert(nir_register *);
94
LValues& convert(nir_ssa_def *);
95
96
Value* getSrc(nir_alu_src *, uint8_t component = 0);
97
Value* getSrc(nir_register *, uint8_t);
98
Value* getSrc(nir_src *, uint8_t, bool indirect = false);
99
Value* getSrc(nir_ssa_def *, uint8_t);
100
101
// returned value is the constant part of the given source (either the
102
// nir_src or the selected source component of an intrinsic). Even though
103
// this is mostly an optimization to be able to skip indirects in a few
104
// cases, sometimes we require immediate values or set some fileds on
105
// instructions (e.g. tex) in order for codegen to consume those.
106
// If the found value has not a constant part, the Value gets returned
107
// through the Value parameter.
108
uint32_t getIndirect(nir_src *, uint8_t, Value *&);
109
// isScalar indicates that the addressing is scalar, vec4 addressing is
110
// assumed otherwise
111
uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
112
bool isScalar = false);
113
114
uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
115
116
void setInterpolate(nv50_ir_varying *,
117
uint8_t,
118
bool centroid,
119
unsigned semantics);
120
121
Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
122
uint8_t c, Value *indirect0 = NULL,
123
Value *indirect1 = NULL, bool patch = false);
124
void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
125
Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
126
Value *indirect1 = NULL);
127
128
bool isFloatType(nir_alu_type);
129
bool isSignedType(nir_alu_type);
130
bool isResultFloat(nir_op);
131
bool isResultSigned(nir_op);
132
133
DataType getDType(nir_alu_instr *);
134
DataType getDType(nir_intrinsic_instr *);
135
DataType getDType(nir_intrinsic_instr *, bool isSigned);
136
DataType getDType(nir_op, uint8_t);
137
138
DataFile getFile(nir_intrinsic_op);
139
140
std::vector<DataType> getSTypes(nir_alu_instr *);
141
DataType getSType(nir_src &, bool isFloat, bool isSigned);
142
143
operation getOperation(nir_intrinsic_op);
144
operation getOperation(nir_op);
145
operation getOperation(nir_texop);
146
operation preOperationNeeded(nir_op);
147
148
int getSubOp(nir_intrinsic_op);
149
int getSubOp(nir_op);
150
151
CondCode getCondCode(nir_op);
152
153
bool assignSlots();
154
bool parseNIR();
155
156
bool visit(nir_alu_instr *);
157
bool visit(nir_block *);
158
bool visit(nir_cf_node *);
159
bool visit(nir_function *);
160
bool visit(nir_if *);
161
bool visit(nir_instr *);
162
bool visit(nir_intrinsic_instr *);
163
bool visit(nir_jump_instr *);
164
bool visit(nir_load_const_instr*);
165
bool visit(nir_loop *);
166
bool visit(nir_ssa_undef_instr *);
167
bool visit(nir_tex_instr *);
168
169
// tex stuff
170
Value* applyProjection(Value *src, Value *proj);
171
unsigned int getNIRArgCount(TexInstruction::Target&);
172
173
nir_shader *nir;
174
175
NirDefMap ssaDefs;
176
NirDefMap regDefs;
177
ImmediateMap immediates;
178
NirBlockMap blocks;
179
unsigned int curLoopDepth;
180
unsigned int curIfDepth;
181
182
BasicBlock *exit;
183
Value *zero;
184
Instruction *immInsertPos;
185
186
int clipVertexOutput;
187
188
union {
189
struct {
190
Value *position;
191
} fp;
192
};
193
};
194
195
Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info,
196
nv50_ir_prog_info_out *info_out)
197
: ConverterCommon(prog, info, info_out),
198
nir(nir),
199
curLoopDepth(0),
200
curIfDepth(0),
201
exit(NULL),
202
immInsertPos(NULL),
203
clipVertexOutput(-1)
204
{
205
zero = mkImm((uint32_t)0);
206
}
207
208
BasicBlock *
209
Converter::convert(nir_block *block)
210
{
211
NirBlockMap::iterator it = blocks.find(block->index);
212
if (it != blocks.end())
213
return it->second;
214
215
BasicBlock *bb = new BasicBlock(func);
216
blocks[block->index] = bb;
217
return bb;
218
}
219
220
bool
221
Converter::isFloatType(nir_alu_type type)
222
{
223
return nir_alu_type_get_base_type(type) == nir_type_float;
224
}
225
226
bool
227
Converter::isSignedType(nir_alu_type type)
228
{
229
return nir_alu_type_get_base_type(type) == nir_type_int;
230
}
231
232
bool
233
Converter::isResultFloat(nir_op op)
234
{
235
const nir_op_info &info = nir_op_infos[op];
236
if (info.output_type != nir_type_invalid)
237
return isFloatType(info.output_type);
238
239
ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
240
assert(false);
241
return true;
242
}
243
244
bool
245
Converter::isResultSigned(nir_op op)
246
{
247
switch (op) {
248
// there is no umul and we get wrong results if we treat all muls as signed
249
case nir_op_imul:
250
case nir_op_inot:
251
return false;
252
default:
253
const nir_op_info &info = nir_op_infos[op];
254
if (info.output_type != nir_type_invalid)
255
return isSignedType(info.output_type);
256
ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
257
assert(false);
258
return true;
259
}
260
}
261
262
DataType
263
Converter::getDType(nir_alu_instr *insn)
264
{
265
if (insn->dest.dest.is_ssa)
266
return getDType(insn->op, insn->dest.dest.ssa.bit_size);
267
else
268
return getDType(insn->op, insn->dest.dest.reg.reg->bit_size);
269
}
270
271
DataType
272
Converter::getDType(nir_intrinsic_instr *insn)
273
{
274
bool isSigned;
275
switch (insn->intrinsic) {
276
case nir_intrinsic_shared_atomic_imax:
277
case nir_intrinsic_shared_atomic_imin:
278
case nir_intrinsic_ssbo_atomic_imax:
279
case nir_intrinsic_ssbo_atomic_imin:
280
isSigned = true;
281
break;
282
default:
283
isSigned = false;
284
break;
285
}
286
287
return getDType(insn, isSigned);
288
}
289
290
DataType
291
Converter::getDType(nir_intrinsic_instr *insn, bool isSigned)
292
{
293
if (insn->dest.is_ssa)
294
return typeOfSize(insn->dest.ssa.bit_size / 8, false, isSigned);
295
else
296
return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, isSigned);
297
}
298
299
DataType
300
Converter::getDType(nir_op op, uint8_t bitSize)
301
{
302
DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
303
if (ty == TYPE_NONE) {
304
ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
305
assert(false);
306
}
307
return ty;
308
}
309
310
std::vector<DataType>
311
Converter::getSTypes(nir_alu_instr *insn)
312
{
313
const nir_op_info &info = nir_op_infos[insn->op];
314
std::vector<DataType> res(info.num_inputs);
315
316
for (uint8_t i = 0; i < info.num_inputs; ++i) {
317
if (info.input_types[i] != nir_type_invalid) {
318
res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
319
} else {
320
ERROR("getSType not implemented for %s idx %u\n", info.name, i);
321
assert(false);
322
res[i] = TYPE_NONE;
323
break;
324
}
325
}
326
327
return res;
328
}
329
330
DataType
331
Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
332
{
333
uint8_t bitSize;
334
if (src.is_ssa)
335
bitSize = src.ssa->bit_size;
336
else
337
bitSize = src.reg.reg->bit_size;
338
339
DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
340
if (ty == TYPE_NONE) {
341
const char *str;
342
if (isFloat)
343
str = "float";
344
else if (isSigned)
345
str = "int";
346
else
347
str = "uint";
348
ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
349
assert(false);
350
}
351
return ty;
352
}
353
354
DataFile
355
Converter::getFile(nir_intrinsic_op op)
356
{
357
switch (op) {
358
case nir_intrinsic_load_global:
359
case nir_intrinsic_store_global:
360
case nir_intrinsic_load_global_constant:
361
return FILE_MEMORY_GLOBAL;
362
case nir_intrinsic_load_scratch:
363
case nir_intrinsic_store_scratch:
364
return FILE_MEMORY_LOCAL;
365
case nir_intrinsic_load_shared:
366
case nir_intrinsic_store_shared:
367
return FILE_MEMORY_SHARED;
368
case nir_intrinsic_load_kernel_input:
369
return FILE_SHADER_INPUT;
370
default:
371
ERROR("couldn't get DateFile for op %s\n", nir_intrinsic_infos[op].name);
372
assert(false);
373
}
374
return FILE_NULL;
375
}
376
377
operation
378
Converter::getOperation(nir_op op)
379
{
380
switch (op) {
381
// basic ops with float and int variants
382
case nir_op_fabs:
383
case nir_op_iabs:
384
return OP_ABS;
385
case nir_op_fadd:
386
case nir_op_iadd:
387
return OP_ADD;
388
case nir_op_iand:
389
return OP_AND;
390
case nir_op_ifind_msb:
391
case nir_op_ufind_msb:
392
return OP_BFIND;
393
case nir_op_fceil:
394
return OP_CEIL;
395
case nir_op_fcos:
396
return OP_COS;
397
case nir_op_f2f32:
398
case nir_op_f2f64:
399
case nir_op_f2i32:
400
case nir_op_f2i64:
401
case nir_op_f2u32:
402
case nir_op_f2u64:
403
case nir_op_i2f32:
404
case nir_op_i2f64:
405
case nir_op_i2i32:
406
case nir_op_i2i64:
407
case nir_op_u2f32:
408
case nir_op_u2f64:
409
case nir_op_u2u32:
410
case nir_op_u2u64:
411
return OP_CVT;
412
case nir_op_fddx:
413
case nir_op_fddx_coarse:
414
case nir_op_fddx_fine:
415
return OP_DFDX;
416
case nir_op_fddy:
417
case nir_op_fddy_coarse:
418
case nir_op_fddy_fine:
419
return OP_DFDY;
420
case nir_op_fdiv:
421
case nir_op_idiv:
422
case nir_op_udiv:
423
return OP_DIV;
424
case nir_op_fexp2:
425
return OP_EX2;
426
case nir_op_ffloor:
427
return OP_FLOOR;
428
case nir_op_ffma:
429
return OP_FMA;
430
case nir_op_flog2:
431
return OP_LG2;
432
case nir_op_fmax:
433
case nir_op_imax:
434
case nir_op_umax:
435
return OP_MAX;
436
case nir_op_pack_64_2x32_split:
437
return OP_MERGE;
438
case nir_op_fmin:
439
case nir_op_imin:
440
case nir_op_umin:
441
return OP_MIN;
442
case nir_op_fmod:
443
case nir_op_imod:
444
case nir_op_umod:
445
case nir_op_frem:
446
case nir_op_irem:
447
return OP_MOD;
448
case nir_op_fmul:
449
case nir_op_imul:
450
case nir_op_imul_high:
451
case nir_op_umul_high:
452
return OP_MUL;
453
case nir_op_fneg:
454
case nir_op_ineg:
455
return OP_NEG;
456
case nir_op_inot:
457
return OP_NOT;
458
case nir_op_ior:
459
return OP_OR;
460
case nir_op_fpow:
461
return OP_POW;
462
case nir_op_frcp:
463
return OP_RCP;
464
case nir_op_frsq:
465
return OP_RSQ;
466
case nir_op_fsat:
467
return OP_SAT;
468
case nir_op_feq32:
469
case nir_op_ieq32:
470
case nir_op_fge32:
471
case nir_op_ige32:
472
case nir_op_uge32:
473
case nir_op_flt32:
474
case nir_op_ilt32:
475
case nir_op_ult32:
476
case nir_op_fneu32:
477
case nir_op_ine32:
478
return OP_SET;
479
case nir_op_ishl:
480
return OP_SHL;
481
case nir_op_ishr:
482
case nir_op_ushr:
483
return OP_SHR;
484
case nir_op_fsin:
485
return OP_SIN;
486
case nir_op_fsqrt:
487
return OP_SQRT;
488
case nir_op_ftrunc:
489
return OP_TRUNC;
490
case nir_op_ixor:
491
return OP_XOR;
492
default:
493
ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
494
assert(false);
495
return OP_NOP;
496
}
497
}
498
499
operation
500
Converter::getOperation(nir_texop op)
501
{
502
switch (op) {
503
case nir_texop_tex:
504
return OP_TEX;
505
case nir_texop_lod:
506
return OP_TXLQ;
507
case nir_texop_txb:
508
return OP_TXB;
509
case nir_texop_txd:
510
return OP_TXD;
511
case nir_texop_txf:
512
case nir_texop_txf_ms:
513
return OP_TXF;
514
case nir_texop_tg4:
515
return OP_TXG;
516
case nir_texop_txl:
517
return OP_TXL;
518
case nir_texop_query_levels:
519
case nir_texop_texture_samples:
520
case nir_texop_txs:
521
return OP_TXQ;
522
default:
523
ERROR("couldn't get operation for nir_texop %u\n", op);
524
assert(false);
525
return OP_NOP;
526
}
527
}
528
529
operation
530
Converter::getOperation(nir_intrinsic_op op)
531
{
532
switch (op) {
533
case nir_intrinsic_emit_vertex:
534
return OP_EMIT;
535
case nir_intrinsic_end_primitive:
536
return OP_RESTART;
537
case nir_intrinsic_bindless_image_atomic_add:
538
case nir_intrinsic_image_atomic_add:
539
case nir_intrinsic_bindless_image_atomic_and:
540
case nir_intrinsic_image_atomic_and:
541
case nir_intrinsic_bindless_image_atomic_comp_swap:
542
case nir_intrinsic_image_atomic_comp_swap:
543
case nir_intrinsic_bindless_image_atomic_exchange:
544
case nir_intrinsic_image_atomic_exchange:
545
case nir_intrinsic_bindless_image_atomic_imax:
546
case nir_intrinsic_image_atomic_imax:
547
case nir_intrinsic_bindless_image_atomic_umax:
548
case nir_intrinsic_image_atomic_umax:
549
case nir_intrinsic_bindless_image_atomic_imin:
550
case nir_intrinsic_image_atomic_imin:
551
case nir_intrinsic_bindless_image_atomic_umin:
552
case nir_intrinsic_image_atomic_umin:
553
case nir_intrinsic_bindless_image_atomic_or:
554
case nir_intrinsic_image_atomic_or:
555
case nir_intrinsic_bindless_image_atomic_xor:
556
case nir_intrinsic_image_atomic_xor:
557
case nir_intrinsic_bindless_image_atomic_inc_wrap:
558
case nir_intrinsic_image_atomic_inc_wrap:
559
case nir_intrinsic_bindless_image_atomic_dec_wrap:
560
case nir_intrinsic_image_atomic_dec_wrap:
561
return OP_SUREDP;
562
case nir_intrinsic_bindless_image_load:
563
case nir_intrinsic_image_load:
564
return OP_SULDP;
565
case nir_intrinsic_bindless_image_samples:
566
case nir_intrinsic_image_samples:
567
case nir_intrinsic_bindless_image_size:
568
case nir_intrinsic_image_size:
569
return OP_SUQ;
570
case nir_intrinsic_bindless_image_store:
571
case nir_intrinsic_image_store:
572
return OP_SUSTP;
573
default:
574
ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
575
assert(false);
576
return OP_NOP;
577
}
578
}
579
580
operation
581
Converter::preOperationNeeded(nir_op op)
582
{
583
switch (op) {
584
case nir_op_fcos:
585
case nir_op_fsin:
586
return OP_PRESIN;
587
default:
588
return OP_NOP;
589
}
590
}
591
592
int
593
Converter::getSubOp(nir_op op)
594
{
595
switch (op) {
596
case nir_op_imul_high:
597
case nir_op_umul_high:
598
return NV50_IR_SUBOP_MUL_HIGH;
599
case nir_op_ishl:
600
case nir_op_ishr:
601
case nir_op_ushr:
602
return NV50_IR_SUBOP_SHIFT_WRAP;
603
default:
604
return 0;
605
}
606
}
607
608
int
609
Converter::getSubOp(nir_intrinsic_op op)
610
{
611
switch (op) {
612
case nir_intrinsic_bindless_image_atomic_add:
613
case nir_intrinsic_global_atomic_add:
614
case nir_intrinsic_image_atomic_add:
615
case nir_intrinsic_shared_atomic_add:
616
case nir_intrinsic_ssbo_atomic_add:
617
return NV50_IR_SUBOP_ATOM_ADD;
618
case nir_intrinsic_bindless_image_atomic_and:
619
case nir_intrinsic_global_atomic_and:
620
case nir_intrinsic_image_atomic_and:
621
case nir_intrinsic_shared_atomic_and:
622
case nir_intrinsic_ssbo_atomic_and:
623
return NV50_IR_SUBOP_ATOM_AND;
624
case nir_intrinsic_bindless_image_atomic_comp_swap:
625
case nir_intrinsic_global_atomic_comp_swap:
626
case nir_intrinsic_image_atomic_comp_swap:
627
case nir_intrinsic_shared_atomic_comp_swap:
628
case nir_intrinsic_ssbo_atomic_comp_swap:
629
return NV50_IR_SUBOP_ATOM_CAS;
630
case nir_intrinsic_bindless_image_atomic_exchange:
631
case nir_intrinsic_global_atomic_exchange:
632
case nir_intrinsic_image_atomic_exchange:
633
case nir_intrinsic_shared_atomic_exchange:
634
case nir_intrinsic_ssbo_atomic_exchange:
635
return NV50_IR_SUBOP_ATOM_EXCH;
636
case nir_intrinsic_bindless_image_atomic_or:
637
case nir_intrinsic_global_atomic_or:
638
case nir_intrinsic_image_atomic_or:
639
case nir_intrinsic_shared_atomic_or:
640
case nir_intrinsic_ssbo_atomic_or:
641
return NV50_IR_SUBOP_ATOM_OR;
642
case nir_intrinsic_bindless_image_atomic_imax:
643
case nir_intrinsic_bindless_image_atomic_umax:
644
case nir_intrinsic_global_atomic_imax:
645
case nir_intrinsic_global_atomic_umax:
646
case nir_intrinsic_image_atomic_imax:
647
case nir_intrinsic_image_atomic_umax:
648
case nir_intrinsic_shared_atomic_imax:
649
case nir_intrinsic_shared_atomic_umax:
650
case nir_intrinsic_ssbo_atomic_imax:
651
case nir_intrinsic_ssbo_atomic_umax:
652
return NV50_IR_SUBOP_ATOM_MAX;
653
case nir_intrinsic_bindless_image_atomic_imin:
654
case nir_intrinsic_bindless_image_atomic_umin:
655
case nir_intrinsic_global_atomic_imin:
656
case nir_intrinsic_global_atomic_umin:
657
case nir_intrinsic_image_atomic_imin:
658
case nir_intrinsic_image_atomic_umin:
659
case nir_intrinsic_shared_atomic_imin:
660
case nir_intrinsic_shared_atomic_umin:
661
case nir_intrinsic_ssbo_atomic_imin:
662
case nir_intrinsic_ssbo_atomic_umin:
663
return NV50_IR_SUBOP_ATOM_MIN;
664
case nir_intrinsic_bindless_image_atomic_xor:
665
case nir_intrinsic_global_atomic_xor:
666
case nir_intrinsic_image_atomic_xor:
667
case nir_intrinsic_shared_atomic_xor:
668
case nir_intrinsic_ssbo_atomic_xor:
669
return NV50_IR_SUBOP_ATOM_XOR;
670
case nir_intrinsic_bindless_image_atomic_inc_wrap:
671
case nir_intrinsic_image_atomic_inc_wrap:
672
return NV50_IR_SUBOP_ATOM_INC;
673
case nir_intrinsic_bindless_image_atomic_dec_wrap:
674
case nir_intrinsic_image_atomic_dec_wrap:
675
return NV50_IR_SUBOP_ATOM_DEC;
676
677
case nir_intrinsic_group_memory_barrier:
678
case nir_intrinsic_memory_barrier:
679
case nir_intrinsic_memory_barrier_buffer:
680
case nir_intrinsic_memory_barrier_image:
681
return NV50_IR_SUBOP_MEMBAR(M, GL);
682
case nir_intrinsic_memory_barrier_shared:
683
return NV50_IR_SUBOP_MEMBAR(M, CTA);
684
685
case nir_intrinsic_vote_all:
686
return NV50_IR_SUBOP_VOTE_ALL;
687
case nir_intrinsic_vote_any:
688
return NV50_IR_SUBOP_VOTE_ANY;
689
case nir_intrinsic_vote_ieq:
690
return NV50_IR_SUBOP_VOTE_UNI;
691
default:
692
return 0;
693
}
694
}
695
696
CondCode
697
Converter::getCondCode(nir_op op)
698
{
699
switch (op) {
700
case nir_op_feq32:
701
case nir_op_ieq32:
702
return CC_EQ;
703
case nir_op_fge32:
704
case nir_op_ige32:
705
case nir_op_uge32:
706
return CC_GE;
707
case nir_op_flt32:
708
case nir_op_ilt32:
709
case nir_op_ult32:
710
return CC_LT;
711
case nir_op_fneu32:
712
return CC_NEU;
713
case nir_op_ine32:
714
return CC_NE;
715
default:
716
ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
717
assert(false);
718
return CC_FL;
719
}
720
}
721
722
Converter::LValues&
723
Converter::convert(nir_alu_dest *dest)
724
{
725
return convert(&dest->dest);
726
}
727
728
Converter::LValues&
729
Converter::convert(nir_dest *dest)
730
{
731
if (dest->is_ssa)
732
return convert(&dest->ssa);
733
if (dest->reg.indirect) {
734
ERROR("no support for indirects.");
735
assert(false);
736
}
737
return convert(dest->reg.reg);
738
}
739
740
Converter::LValues&
741
Converter::convert(nir_register *reg)
742
{
743
assert(!reg->num_array_elems);
744
745
NirDefMap::iterator it = regDefs.find(reg->index);
746
if (it != regDefs.end())
747
return it->second;
748
749
LValues newDef(reg->num_components);
750
for (uint8_t i = 0; i < reg->num_components; i++)
751
newDef[i] = getScratch(std::max(4, reg->bit_size / 8));
752
return regDefs[reg->index] = newDef;
753
}
754
755
Converter::LValues&
756
Converter::convert(nir_ssa_def *def)
757
{
758
NirDefMap::iterator it = ssaDefs.find(def->index);
759
if (it != ssaDefs.end())
760
return it->second;
761
762
LValues newDef(def->num_components);
763
for (uint8_t i = 0; i < def->num_components; i++)
764
newDef[i] = getSSA(std::max(4, def->bit_size / 8));
765
return ssaDefs[def->index] = newDef;
766
}
767
768
Value*
769
Converter::getSrc(nir_alu_src *src, uint8_t component)
770
{
771
if (src->abs || src->negate) {
772
ERROR("modifiers currently not supported on nir_alu_src\n");
773
assert(false);
774
}
775
return getSrc(&src->src, src->swizzle[component]);
776
}
777
778
Value*
779
Converter::getSrc(nir_register *reg, uint8_t idx)
780
{
781
NirDefMap::iterator it = regDefs.find(reg->index);
782
if (it == regDefs.end())
783
return convert(reg)[idx];
784
return it->second[idx];
785
}
786
787
Value*
788
Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
789
{
790
if (src->is_ssa)
791
return getSrc(src->ssa, idx);
792
793
if (src->reg.indirect) {
794
if (indirect)
795
return getSrc(src->reg.indirect, idx);
796
ERROR("no support for indirects.");
797
assert(false);
798
return NULL;
799
}
800
801
return getSrc(src->reg.reg, idx);
802
}
803
804
Value*
805
Converter::getSrc(nir_ssa_def *src, uint8_t idx)
806
{
807
ImmediateMap::iterator iit = immediates.find(src->index);
808
if (iit != immediates.end())
809
return convert((*iit).second, idx);
810
811
NirDefMap::iterator it = ssaDefs.find(src->index);
812
if (it == ssaDefs.end()) {
813
ERROR("SSA value %u not found\n", src->index);
814
assert(false);
815
return NULL;
816
}
817
return it->second[idx];
818
}
819
820
uint32_t
821
Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
822
{
823
nir_const_value *offset = nir_src_as_const_value(*src);
824
825
if (offset) {
826
indirect = NULL;
827
return offset[0].u32;
828
}
829
830
indirect = getSrc(src, idx, true);
831
return 0;
832
}
833
834
uint32_t
835
Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
836
{
837
int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
838
if (indirect && !isScalar)
839
indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
840
return idx;
841
}
842
843
static void
844
vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
845
{
846
assert(name && index);
847
848
if (slot >= VERT_ATTRIB_MAX) {
849
ERROR("invalid varying slot %u\n", slot);
850
assert(false);
851
return;
852
}
853
854
if (slot >= VERT_ATTRIB_GENERIC0 &&
855
slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
856
*name = TGSI_SEMANTIC_GENERIC;
857
*index = slot - VERT_ATTRIB_GENERIC0;
858
return;
859
}
860
861
if (slot >= VERT_ATTRIB_TEX0 &&
862
slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
863
*name = TGSI_SEMANTIC_TEXCOORD;
864
*index = slot - VERT_ATTRIB_TEX0;
865
return;
866
}
867
868
switch (slot) {
869
case VERT_ATTRIB_COLOR0:
870
*name = TGSI_SEMANTIC_COLOR;
871
*index = 0;
872
break;
873
case VERT_ATTRIB_COLOR1:
874
*name = TGSI_SEMANTIC_COLOR;
875
*index = 1;
876
break;
877
case VERT_ATTRIB_EDGEFLAG:
878
*name = TGSI_SEMANTIC_EDGEFLAG;
879
*index = 0;
880
break;
881
case VERT_ATTRIB_FOG:
882
*name = TGSI_SEMANTIC_FOG;
883
*index = 0;
884
break;
885
case VERT_ATTRIB_NORMAL:
886
*name = TGSI_SEMANTIC_NORMAL;
887
*index = 0;
888
break;
889
case VERT_ATTRIB_POS:
890
*name = TGSI_SEMANTIC_POSITION;
891
*index = 0;
892
break;
893
case VERT_ATTRIB_POINT_SIZE:
894
*name = TGSI_SEMANTIC_PSIZE;
895
*index = 0;
896
break;
897
default:
898
ERROR("unknown vert attrib slot %u\n", slot);
899
assert(false);
900
break;
901
}
902
}
903
904
void
905
Converter::setInterpolate(nv50_ir_varying *var,
906
uint8_t mode,
907
bool centroid,
908
unsigned semantic)
909
{
910
switch (mode) {
911
case INTERP_MODE_FLAT:
912
var->flat = 1;
913
break;
914
case INTERP_MODE_NONE:
915
if (semantic == TGSI_SEMANTIC_COLOR)
916
var->sc = 1;
917
else if (semantic == TGSI_SEMANTIC_POSITION)
918
var->linear = 1;
919
break;
920
case INTERP_MODE_NOPERSPECTIVE:
921
var->linear = 1;
922
break;
923
case INTERP_MODE_SMOOTH:
924
break;
925
}
926
var->centroid = centroid;
927
}
928
929
static uint16_t
930
calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
931
bool input, const nir_variable *var)
932
{
933
if (!type->is_array())
934
return type->count_attribute_slots(false);
935
936
uint16_t slots;
937
switch (stage) {
938
case Program::TYPE_GEOMETRY:
939
slots = type->count_attribute_slots(false);
940
if (input)
941
slots /= info.gs.vertices_in;
942
break;
943
case Program::TYPE_TESSELLATION_CONTROL:
944
case Program::TYPE_TESSELLATION_EVAL:
945
// remove first dimension
946
if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
947
slots = type->count_attribute_slots(false);
948
else
949
slots = type->fields.array->count_attribute_slots(false);
950
break;
951
default:
952
slots = type->count_attribute_slots(false);
953
break;
954
}
955
956
return slots;
957
}
958
959
static uint8_t
960
getMaskForType(const glsl_type *type, uint8_t slot) {
961
uint16_t comp = type->without_array()->components();
962
comp = comp ? comp : 4;
963
964
if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
965
comp *= 2;
966
if (comp > 4) {
967
if (slot % 2)
968
comp -= 4;
969
else
970
comp = 4;
971
}
972
}
973
974
return (1 << comp) - 1;
975
}
976
977
bool Converter::assignSlots() {
978
unsigned name;
979
unsigned index;
980
981
info->io.viewportId = -1;
982
info_out->numInputs = 0;
983
info_out->numOutputs = 0;
984
info_out->numSysVals = 0;
985
986
uint8_t i;
987
BITSET_FOREACH_SET(i, nir->info.system_values_read, SYSTEM_VALUE_MAX) {
988
info_out->sv[info_out->numSysVals].sn = tgsi_get_sysval_semantic(i);
989
info_out->sv[info_out->numSysVals].si = 0;
990
info_out->sv[info_out->numSysVals].input = 0; // TODO inferSysValDirection(sn);
991
992
switch (i) {
993
case SYSTEM_VALUE_INSTANCE_ID:
994
info_out->io.instanceId = info_out->numSysVals;
995
break;
996
case SYSTEM_VALUE_TESS_LEVEL_INNER:
997
case SYSTEM_VALUE_TESS_LEVEL_OUTER:
998
info_out->sv[info_out->numSysVals].patch = 1;
999
break;
1000
case SYSTEM_VALUE_VERTEX_ID:
1001
info_out->io.vertexId = info_out->numSysVals;
1002
break;
1003
default:
1004
break;
1005
}
1006
1007
info_out->numSysVals += 1;
1008
}
1009
1010
if (prog->getType() == Program::TYPE_COMPUTE)
1011
return true;
1012
1013
nir_foreach_shader_in_variable(var, nir) {
1014
const glsl_type *type = var->type;
1015
int slot = var->data.location;
1016
uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
1017
uint32_t vary = var->data.driver_location;
1018
1019
assert(vary + slots <= PIPE_MAX_SHADER_INPUTS);
1020
1021
switch(prog->getType()) {
1022
case Program::TYPE_FRAGMENT:
1023
tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1024
&name, &index);
1025
for (uint16_t i = 0; i < slots; ++i) {
1026
setInterpolate(&info_out->in[vary + i], var->data.interpolation,
1027
var->data.centroid | var->data.sample, name);
1028
}
1029
break;
1030
case Program::TYPE_GEOMETRY:
1031
tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1032
&name, &index);
1033
break;
1034
case Program::TYPE_TESSELLATION_CONTROL:
1035
case Program::TYPE_TESSELLATION_EVAL:
1036
tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1037
&name, &index);
1038
if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1039
info_out->numPatchConstants = MAX2(info_out->numPatchConstants, index + slots);
1040
break;
1041
case Program::TYPE_VERTEX:
1042
if (slot >= VERT_ATTRIB_GENERIC0)
1043
slot = VERT_ATTRIB_GENERIC0 + vary;
1044
vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1045
switch (name) {
1046
case TGSI_SEMANTIC_EDGEFLAG:
1047
info_out->io.edgeFlagIn = vary;
1048
break;
1049
default:
1050
break;
1051
}
1052
break;
1053
default:
1054
ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1055
return false;
1056
}
1057
1058
for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1059
nv50_ir_varying *v = &info_out->in[vary];
1060
1061
v->patch = var->data.patch;
1062
v->sn = name;
1063
v->si = index + i;
1064
v->mask |= getMaskForType(type, i) << var->data.location_frac;
1065
}
1066
info_out->numInputs = std::max<uint8_t>(info_out->numInputs, vary);
1067
}
1068
1069
nir_foreach_shader_out_variable(var, nir) {
1070
const glsl_type *type = var->type;
1071
int slot = var->data.location;
1072
uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1073
uint32_t vary = var->data.driver_location;
1074
1075
assert(vary < PIPE_MAX_SHADER_OUTPUTS);
1076
1077
switch(prog->getType()) {
1078
case Program::TYPE_FRAGMENT:
1079
tgsi_get_gl_frag_result_semantic((gl_frag_result)slot, &name, &index);
1080
switch (name) {
1081
case TGSI_SEMANTIC_COLOR:
1082
if (!var->data.fb_fetch_output)
1083
info_out->prop.fp.numColourResults++;
1084
if (var->data.location == FRAG_RESULT_COLOR &&
1085
nir->info.outputs_written & BITFIELD64_BIT(var->data.location))
1086
info_out->prop.fp.separateFragData = true;
1087
// sometimes we get FRAG_RESULT_DATAX with data.index 0
1088
// sometimes we get FRAG_RESULT_DATA0 with data.index X
1089
index = index == 0 ? var->data.index : index;
1090
break;
1091
case TGSI_SEMANTIC_POSITION:
1092
info_out->io.fragDepth = vary;
1093
info_out->prop.fp.writesDepth = true;
1094
break;
1095
case TGSI_SEMANTIC_SAMPLEMASK:
1096
info_out->io.sampleMask = vary;
1097
break;
1098
default:
1099
break;
1100
}
1101
break;
1102
case Program::TYPE_GEOMETRY:
1103
case Program::TYPE_TESSELLATION_CONTROL:
1104
case Program::TYPE_TESSELLATION_EVAL:
1105
case Program::TYPE_VERTEX:
1106
tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1107
&name, &index);
1108
1109
if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1110
name != TGSI_SEMANTIC_TESSOUTER)
1111
info_out->numPatchConstants = MAX2(info_out->numPatchConstants, index + slots);
1112
1113
switch (name) {
1114
case TGSI_SEMANTIC_CLIPDIST:
1115
info_out->io.genUserClip = -1;
1116
break;
1117
case TGSI_SEMANTIC_CLIPVERTEX:
1118
clipVertexOutput = vary;
1119
break;
1120
case TGSI_SEMANTIC_EDGEFLAG:
1121
info_out->io.edgeFlagOut = vary;
1122
break;
1123
case TGSI_SEMANTIC_POSITION:
1124
if (clipVertexOutput < 0)
1125
clipVertexOutput = vary;
1126
break;
1127
default:
1128
break;
1129
}
1130
break;
1131
default:
1132
ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1133
return false;
1134
}
1135
1136
for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1137
nv50_ir_varying *v = &info_out->out[vary];
1138
v->patch = var->data.patch;
1139
v->sn = name;
1140
v->si = index + i;
1141
v->mask |= getMaskForType(type, i) << var->data.location_frac;
1142
1143
if (nir->info.outputs_read & 1ull << slot)
1144
v->oread = 1;
1145
}
1146
info_out->numOutputs = std::max<uint8_t>(info_out->numOutputs, vary);
1147
}
1148
1149
if (info_out->io.genUserClip > 0) {
1150
info_out->io.clipDistances = info_out->io.genUserClip;
1151
1152
const unsigned int nOut = (info_out->io.genUserClip + 3) / 4;
1153
1154
for (unsigned int n = 0; n < nOut; ++n) {
1155
unsigned int i = info_out->numOutputs++;
1156
info_out->out[i].id = i;
1157
info_out->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1158
info_out->out[i].si = n;
1159
info_out->out[i].mask = ((1 << info_out->io.clipDistances) - 1) >> (n * 4);
1160
}
1161
}
1162
1163
return info->assignSlots(info_out) == 0;
1164
}
1165
1166
uint32_t
1167
Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1168
{
1169
DataType ty;
1170
int offset = nir_intrinsic_component(insn);
1171
bool input;
1172
1173
if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1174
ty = getDType(insn);
1175
else
1176
ty = getSType(insn->src[0], false, false);
1177
1178
switch (insn->intrinsic) {
1179
case nir_intrinsic_load_input:
1180
case nir_intrinsic_load_interpolated_input:
1181
case nir_intrinsic_load_per_vertex_input:
1182
input = true;
1183
break;
1184
case nir_intrinsic_load_output:
1185
case nir_intrinsic_load_per_vertex_output:
1186
case nir_intrinsic_store_output:
1187
case nir_intrinsic_store_per_vertex_output:
1188
input = false;
1189
break;
1190
default:
1191
ERROR("unknown intrinsic in getSlotAddress %s",
1192
nir_intrinsic_infos[insn->intrinsic].name);
1193
input = false;
1194
assert(false);
1195
break;
1196
}
1197
1198
if (typeSizeof(ty) == 8) {
1199
slot *= 2;
1200
slot += offset;
1201
if (slot >= 4) {
1202
idx += 1;
1203
slot -= 4;
1204
}
1205
} else {
1206
slot += offset;
1207
}
1208
1209
assert(slot < 4);
1210
assert(!input || idx < PIPE_MAX_SHADER_INPUTS);
1211
assert(input || idx < PIPE_MAX_SHADER_OUTPUTS);
1212
1213
const nv50_ir_varying *vary = input ? info_out->in : info_out->out;
1214
return vary[idx].slot[slot] * 4;
1215
}
1216
1217
Instruction *
1218
Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1219
uint32_t base, uint8_t c, Value *indirect0,
1220
Value *indirect1, bool patch)
1221
{
1222
unsigned int tySize = typeSizeof(ty);
1223
1224
if (tySize == 8 &&
1225
(file == FILE_MEMORY_CONST || file == FILE_MEMORY_BUFFER || indirect0)) {
1226
Value *lo = getSSA();
1227
Value *hi = getSSA();
1228
1229
Instruction *loi =
1230
mkLoad(TYPE_U32, lo,
1231
mkSymbol(file, i, TYPE_U32, base + c * tySize),
1232
indirect0);
1233
loi->setIndirect(0, 1, indirect1);
1234
loi->perPatch = patch;
1235
1236
Instruction *hii =
1237
mkLoad(TYPE_U32, hi,
1238
mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1239
indirect0);
1240
hii->setIndirect(0, 1, indirect1);
1241
hii->perPatch = patch;
1242
1243
return mkOp2(OP_MERGE, ty, def, lo, hi);
1244
} else {
1245
Instruction *ld =
1246
mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1247
ld->setIndirect(0, 1, indirect1);
1248
ld->perPatch = patch;
1249
return ld;
1250
}
1251
}
1252
1253
void
1254
Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1255
DataType ty, Value *src, uint8_t idx, uint8_t c,
1256
Value *indirect0, Value *indirect1)
1257
{
1258
uint8_t size = typeSizeof(ty);
1259
uint32_t address = getSlotAddress(insn, idx, c);
1260
1261
if (size == 8 && indirect0) {
1262
Value *split[2];
1263
mkSplit(split, 4, src);
1264
1265
if (op == OP_EXPORT) {
1266
split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1267
split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1268
}
1269
1270
mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1271
split[0])->perPatch = info_out->out[idx].patch;
1272
mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1273
split[1])->perPatch = info_out->out[idx].patch;
1274
} else {
1275
if (op == OP_EXPORT)
1276
src = mkMov(getSSA(size), src, ty)->getDef(0);
1277
mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1278
src)->perPatch = info_out->out[idx].patch;
1279
}
1280
}
1281
1282
bool
1283
Converter::parseNIR()
1284
{
1285
info_out->bin.tlsSpace = nir->scratch_size;
1286
info_out->io.clipDistances = nir->info.clip_distance_array_size;
1287
info_out->io.cullDistances = nir->info.cull_distance_array_size;
1288
info_out->io.layer_viewport_relative = nir->info.layer_viewport_relative;
1289
1290
switch(prog->getType()) {
1291
case Program::TYPE_COMPUTE:
1292
info->prop.cp.numThreads[0] = nir->info.workgroup_size[0];
1293
info->prop.cp.numThreads[1] = nir->info.workgroup_size[1];
1294
info->prop.cp.numThreads[2] = nir->info.workgroup_size[2];
1295
info_out->bin.smemSize = std::max(info_out->bin.smemSize, nir->info.shared_size);
1296
break;
1297
case Program::TYPE_FRAGMENT:
1298
info_out->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1299
prog->persampleInvocation =
1300
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
1301
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS);
1302
info_out->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1303
info_out->prop.fp.readsSampleLocations =
1304
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS);
1305
info_out->prop.fp.usesDiscard = nir->info.fs.uses_discard || nir->info.fs.uses_demote;
1306
info_out->prop.fp.usesSampleMaskIn =
1307
!BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN);
1308
break;
1309
case Program::TYPE_GEOMETRY:
1310
info_out->prop.gp.instanceCount = nir->info.gs.invocations;
1311
info_out->prop.gp.maxVertices = nir->info.gs.vertices_out;
1312
info_out->prop.gp.outputPrim = nir->info.gs.output_primitive;
1313
break;
1314
case Program::TYPE_TESSELLATION_CONTROL:
1315
case Program::TYPE_TESSELLATION_EVAL:
1316
if (nir->info.tess.primitive_mode == GL_ISOLINES)
1317
info_out->prop.tp.domain = GL_LINES;
1318
else
1319
info_out->prop.tp.domain = nir->info.tess.primitive_mode;
1320
info_out->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1321
info_out->prop.tp.outputPrim =
1322
nir->info.tess.point_mode ? PIPE_PRIM_POINTS : PIPE_PRIM_TRIANGLES;
1323
info_out->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1324
info_out->prop.tp.winding = !nir->info.tess.ccw;
1325
break;
1326
case Program::TYPE_VERTEX:
1327
info_out->prop.vp.usesDrawParameters =
1328
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_VERTEX) ||
1329
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
1330
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID);
1331
break;
1332
default:
1333
break;
1334
}
1335
1336
return true;
1337
}
1338
1339
bool
1340
Converter::visit(nir_function *function)
1341
{
1342
assert(function->impl);
1343
1344
// usually the blocks will set everything up, but main is special
1345
BasicBlock *entry = new BasicBlock(prog->main);
1346
exit = new BasicBlock(prog->main);
1347
blocks[nir_start_block(function->impl)->index] = entry;
1348
prog->main->setEntry(entry);
1349
prog->main->setExit(exit);
1350
1351
setPosition(entry, true);
1352
1353
if (info_out->io.genUserClip > 0) {
1354
for (int c = 0; c < 4; ++c)
1355
clipVtx[c] = getScratch();
1356
}
1357
1358
switch (prog->getType()) {
1359
case Program::TYPE_TESSELLATION_CONTROL:
1360
outBase = mkOp2v(
1361
OP_SUB, TYPE_U32, getSSA(),
1362
mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1363
mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1364
break;
1365
case Program::TYPE_FRAGMENT: {
1366
Symbol *sv = mkSysVal(SV_POSITION, 3);
1367
fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1368
fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
1369
break;
1370
}
1371
default:
1372
break;
1373
}
1374
1375
nir_index_ssa_defs(function->impl);
1376
foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1377
if (!visit(node))
1378
return false;
1379
}
1380
1381
bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1382
setPosition(exit, true);
1383
1384
if ((prog->getType() == Program::TYPE_VERTEX ||
1385
prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1386
&& info_out->io.genUserClip > 0)
1387
handleUserClipPlanes();
1388
1389
// TODO: for non main function this needs to be a OP_RETURN
1390
mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1391
return true;
1392
}
1393
1394
bool
1395
Converter::visit(nir_cf_node *node)
1396
{
1397
switch (node->type) {
1398
case nir_cf_node_block:
1399
return visit(nir_cf_node_as_block(node));
1400
case nir_cf_node_if:
1401
return visit(nir_cf_node_as_if(node));
1402
case nir_cf_node_loop:
1403
return visit(nir_cf_node_as_loop(node));
1404
default:
1405
ERROR("unknown nir_cf_node type %u\n", node->type);
1406
return false;
1407
}
1408
}
1409
1410
bool
1411
Converter::visit(nir_block *block)
1412
{
1413
if (!block->predecessors->entries && block->instr_list.is_empty())
1414
return true;
1415
1416
BasicBlock *bb = convert(block);
1417
1418
setPosition(bb, true);
1419
nir_foreach_instr(insn, block) {
1420
if (!visit(insn))
1421
return false;
1422
}
1423
return true;
1424
}
1425
1426
bool
1427
Converter::visit(nir_if *nif)
1428
{
1429
curIfDepth++;
1430
1431
DataType sType = getSType(nif->condition, false, false);
1432
Value *src = getSrc(&nif->condition, 0);
1433
1434
nir_block *lastThen = nir_if_last_then_block(nif);
1435
nir_block *lastElse = nir_if_last_else_block(nif);
1436
1437
BasicBlock *headBB = bb;
1438
BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1439
BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1440
1441
bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1442
bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1443
1444
bool insertJoins = lastThen->successors[0] == lastElse->successors[0];
1445
mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1446
1447
foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1448
if (!visit(node))
1449
return false;
1450
}
1451
1452
setPosition(convert(lastThen), true);
1453
if (!bb->isTerminated()) {
1454
BasicBlock *tailBB = convert(lastThen->successors[0]);
1455
mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1456
bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1457
} else {
1458
insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1459
}
1460
1461
foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1462
if (!visit(node))
1463
return false;
1464
}
1465
1466
setPosition(convert(lastElse), true);
1467
if (!bb->isTerminated()) {
1468
BasicBlock *tailBB = convert(lastElse->successors[0]);
1469
mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1470
bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1471
} else {
1472
insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1473
}
1474
1475
/* only insert joins for the most outer if */
1476
if (--curIfDepth)
1477
insertJoins = false;
1478
1479
/* we made sure that all threads would converge at the same block */
1480
if (insertJoins) {
1481
BasicBlock *conv = convert(lastThen->successors[0]);
1482
setPosition(headBB->getExit(), false);
1483
headBB->joinAt = mkFlow(OP_JOINAT, conv, CC_ALWAYS, NULL);
1484
setPosition(conv, false);
1485
mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1486
}
1487
1488
return true;
1489
}
1490
1491
// TODO: add convergency
1492
bool
1493
Converter::visit(nir_loop *loop)
1494
{
1495
curLoopDepth += 1;
1496
func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1497
1498
BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1499
BasicBlock *tailBB = convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1500
1501
bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1502
1503
mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1504
setPosition(loopBB, false);
1505
mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1506
1507
foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1508
if (!visit(node))
1509
return false;
1510
}
1511
1512
if (!bb->isTerminated()) {
1513
mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1514
bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1515
}
1516
1517
if (tailBB->cfg.incidentCount() == 0)
1518
loopBB->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1519
1520
curLoopDepth -= 1;
1521
1522
return true;
1523
}
1524
1525
bool
1526
Converter::visit(nir_instr *insn)
1527
{
1528
// we need an insertion point for on the fly generated immediate loads
1529
immInsertPos = bb->getExit();
1530
switch (insn->type) {
1531
case nir_instr_type_alu:
1532
return visit(nir_instr_as_alu(insn));
1533
case nir_instr_type_intrinsic:
1534
return visit(nir_instr_as_intrinsic(insn));
1535
case nir_instr_type_jump:
1536
return visit(nir_instr_as_jump(insn));
1537
case nir_instr_type_load_const:
1538
return visit(nir_instr_as_load_const(insn));
1539
case nir_instr_type_ssa_undef:
1540
return visit(nir_instr_as_ssa_undef(insn));
1541
case nir_instr_type_tex:
1542
return visit(nir_instr_as_tex(insn));
1543
default:
1544
ERROR("unknown nir_instr type %u\n", insn->type);
1545
return false;
1546
}
1547
return true;
1548
}
1549
1550
SVSemantic
1551
Converter::convert(nir_intrinsic_op intr)
1552
{
1553
switch (intr) {
1554
case nir_intrinsic_load_base_vertex:
1555
return SV_BASEVERTEX;
1556
case nir_intrinsic_load_base_instance:
1557
return SV_BASEINSTANCE;
1558
case nir_intrinsic_load_draw_id:
1559
return SV_DRAWID;
1560
case nir_intrinsic_load_front_face:
1561
return SV_FACE;
1562
case nir_intrinsic_is_helper_invocation:
1563
case nir_intrinsic_load_helper_invocation:
1564
return SV_THREAD_KILL;
1565
case nir_intrinsic_load_instance_id:
1566
return SV_INSTANCE_ID;
1567
case nir_intrinsic_load_invocation_id:
1568
return SV_INVOCATION_ID;
1569
case nir_intrinsic_load_workgroup_size:
1570
return SV_NTID;
1571
case nir_intrinsic_load_local_invocation_id:
1572
return SV_TID;
1573
case nir_intrinsic_load_num_workgroups:
1574
return SV_NCTAID;
1575
case nir_intrinsic_load_patch_vertices_in:
1576
return SV_VERTEX_COUNT;
1577
case nir_intrinsic_load_primitive_id:
1578
return SV_PRIMITIVE_ID;
1579
case nir_intrinsic_load_sample_id:
1580
return SV_SAMPLE_INDEX;
1581
case nir_intrinsic_load_sample_mask_in:
1582
return SV_SAMPLE_MASK;
1583
case nir_intrinsic_load_sample_pos:
1584
return SV_SAMPLE_POS;
1585
case nir_intrinsic_load_subgroup_eq_mask:
1586
return SV_LANEMASK_EQ;
1587
case nir_intrinsic_load_subgroup_ge_mask:
1588
return SV_LANEMASK_GE;
1589
case nir_intrinsic_load_subgroup_gt_mask:
1590
return SV_LANEMASK_GT;
1591
case nir_intrinsic_load_subgroup_le_mask:
1592
return SV_LANEMASK_LE;
1593
case nir_intrinsic_load_subgroup_lt_mask:
1594
return SV_LANEMASK_LT;
1595
case nir_intrinsic_load_subgroup_invocation:
1596
return SV_LANEID;
1597
case nir_intrinsic_load_tess_coord:
1598
return SV_TESS_COORD;
1599
case nir_intrinsic_load_tess_level_inner:
1600
return SV_TESS_INNER;
1601
case nir_intrinsic_load_tess_level_outer:
1602
return SV_TESS_OUTER;
1603
case nir_intrinsic_load_vertex_id:
1604
return SV_VERTEX_ID;
1605
case nir_intrinsic_load_workgroup_id:
1606
return SV_CTAID;
1607
case nir_intrinsic_load_work_dim:
1608
return SV_WORK_DIM;
1609
default:
1610
ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1611
nir_intrinsic_infos[intr].name);
1612
assert(false);
1613
return SV_LAST;
1614
}
1615
}
1616
1617
bool
1618
Converter::visit(nir_intrinsic_instr *insn)
1619
{
1620
nir_intrinsic_op op = insn->intrinsic;
1621
const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
1622
unsigned dest_components = nir_intrinsic_dest_components(insn);
1623
1624
switch (op) {
1625
case nir_intrinsic_load_uniform: {
1626
LValues &newDefs = convert(&insn->dest);
1627
const DataType dType = getDType(insn);
1628
Value *indirect;
1629
uint32_t coffset = getIndirect(insn, 0, 0, indirect);
1630
for (uint8_t i = 0; i < dest_components; ++i) {
1631
loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
1632
}
1633
break;
1634
}
1635
case nir_intrinsic_store_output:
1636
case nir_intrinsic_store_per_vertex_output: {
1637
Value *indirect;
1638
DataType dType = getSType(insn->src[0], false, false);
1639
uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1640
1641
for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
1642
if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1643
continue;
1644
1645
uint8_t offset = 0;
1646
Value *src = getSrc(&insn->src[0], i);
1647
switch (prog->getType()) {
1648
case Program::TYPE_FRAGMENT: {
1649
if (info_out->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1650
// TGSI uses a different interface than NIR, TGSI stores that
1651
// value in the z component, NIR in X
1652
offset += 2;
1653
src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1654
}
1655
break;
1656
}
1657
case Program::TYPE_GEOMETRY:
1658
case Program::TYPE_TESSELLATION_EVAL:
1659
case Program::TYPE_VERTEX: {
1660
if (info_out->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) {
1661
mkMov(clipVtx[i], src);
1662
src = clipVtx[i];
1663
}
1664
break;
1665
}
1666
default:
1667
break;
1668
}
1669
1670
storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
1671
}
1672
break;
1673
}
1674
case nir_intrinsic_load_input:
1675
case nir_intrinsic_load_interpolated_input:
1676
case nir_intrinsic_load_output: {
1677
LValues &newDefs = convert(&insn->dest);
1678
1679
// FBFetch
1680
if (prog->getType() == Program::TYPE_FRAGMENT &&
1681
op == nir_intrinsic_load_output) {
1682
std::vector<Value*> defs, srcs;
1683
uint8_t mask = 0;
1684
1685
srcs.push_back(getSSA());
1686
srcs.push_back(getSSA());
1687
Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1688
Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1689
mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1690
mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1691
1692
srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1693
srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1694
1695
for (uint8_t i = 0u; i < dest_components; ++i) {
1696
defs.push_back(newDefs[i]);
1697
mask |= 1 << i;
1698
}
1699
1700
TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1701
texi->tex.levelZero = 1;
1702
texi->tex.mask = mask;
1703
texi->tex.useOffsets = 0;
1704
texi->tex.r = 0xffff;
1705
texi->tex.s = 0xffff;
1706
1707
info_out->prop.fp.readsFramebuffer = true;
1708
break;
1709
}
1710
1711
const DataType dType = getDType(insn);
1712
Value *indirect;
1713
bool input = op != nir_intrinsic_load_output;
1714
operation nvirOp;
1715
uint32_t mode = 0;
1716
1717
uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1718
nv50_ir_varying& vary = input ? info_out->in[idx] : info_out->out[idx];
1719
1720
// see load_barycentric_* handling
1721
if (prog->getType() == Program::TYPE_FRAGMENT) {
1722
if (op == nir_intrinsic_load_interpolated_input) {
1723
ImmediateValue immMode;
1724
if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1725
mode = immMode.reg.data.u32;
1726
}
1727
if (mode == NV50_IR_INTERP_DEFAULT)
1728
mode |= translateInterpMode(&vary, nvirOp);
1729
else {
1730
if (vary.linear) {
1731
nvirOp = OP_LINTERP;
1732
mode |= NV50_IR_INTERP_LINEAR;
1733
} else {
1734
nvirOp = OP_PINTERP;
1735
mode |= NV50_IR_INTERP_PERSPECTIVE;
1736
}
1737
}
1738
}
1739
1740
for (uint8_t i = 0u; i < dest_components; ++i) {
1741
uint32_t address = getSlotAddress(insn, idx, i);
1742
Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
1743
if (prog->getType() == Program::TYPE_FRAGMENT) {
1744
int s = 1;
1745
if (typeSizeof(dType) == 8) {
1746
Value *lo = getSSA();
1747
Value *hi = getSSA();
1748
Instruction *interp;
1749
1750
interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1751
if (nvirOp == OP_PINTERP)
1752
interp->setSrc(s++, fp.position);
1753
if (mode & NV50_IR_INTERP_OFFSET)
1754
interp->setSrc(s++, getSrc(&insn->src[0], 0));
1755
interp->setInterpolate(mode);
1756
interp->setIndirect(0, 0, indirect);
1757
1758
Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
1759
interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1760
if (nvirOp == OP_PINTERP)
1761
interp->setSrc(s++, fp.position);
1762
if (mode & NV50_IR_INTERP_OFFSET)
1763
interp->setSrc(s++, getSrc(&insn->src[0], 0));
1764
interp->setInterpolate(mode);
1765
interp->setIndirect(0, 0, indirect);
1766
1767
mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
1768
} else {
1769
Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
1770
if (nvirOp == OP_PINTERP)
1771
interp->setSrc(s++, fp.position);
1772
if (mode & NV50_IR_INTERP_OFFSET)
1773
interp->setSrc(s++, getSrc(&insn->src[0], 0));
1774
interp->setInterpolate(mode);
1775
interp->setIndirect(0, 0, indirect);
1776
}
1777
} else {
1778
mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
1779
}
1780
}
1781
break;
1782
}
1783
case nir_intrinsic_load_barycentric_at_offset:
1784
case nir_intrinsic_load_barycentric_at_sample:
1785
case nir_intrinsic_load_barycentric_centroid:
1786
case nir_intrinsic_load_barycentric_pixel:
1787
case nir_intrinsic_load_barycentric_sample: {
1788
LValues &newDefs = convert(&insn->dest);
1789
uint32_t mode;
1790
1791
if (op == nir_intrinsic_load_barycentric_centroid ||
1792
op == nir_intrinsic_load_barycentric_sample) {
1793
mode = NV50_IR_INTERP_CENTROID;
1794
} else if (op == nir_intrinsic_load_barycentric_at_offset) {
1795
Value *offs[2];
1796
for (uint8_t c = 0; c < 2; c++) {
1797
offs[c] = getScratch();
1798
mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
1799
mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
1800
mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
1801
mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
1802
}
1803
mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
1804
1805
mode = NV50_IR_INTERP_OFFSET;
1806
} else if (op == nir_intrinsic_load_barycentric_pixel) {
1807
mode = NV50_IR_INTERP_DEFAULT;
1808
} else if (op == nir_intrinsic_load_barycentric_at_sample) {
1809
info_out->prop.fp.readsSampleLocations = true;
1810
Value *sample = getSSA();
1811
mkOp3(OP_SELP, TYPE_U32, sample, mkImm(0), getSrc(&insn->src[0], 0), mkImm(0))
1812
->subOp = 2;
1813
mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], sample)->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
1814
mode = NV50_IR_INTERP_OFFSET;
1815
} else {
1816
unreachable("all intrinsics already handled above");
1817
}
1818
1819
loadImm(newDefs[1], mode);
1820
break;
1821
}
1822
case nir_intrinsic_demote:
1823
case nir_intrinsic_discard:
1824
mkOp(OP_DISCARD, TYPE_NONE, NULL);
1825
break;
1826
case nir_intrinsic_demote_if:
1827
case nir_intrinsic_discard_if: {
1828
Value *pred = getSSA(1, FILE_PREDICATE);
1829
if (insn->num_components > 1) {
1830
ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1831
assert(false);
1832
return false;
1833
}
1834
mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1835
mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
1836
break;
1837
}
1838
case nir_intrinsic_load_base_vertex:
1839
case nir_intrinsic_load_base_instance:
1840
case nir_intrinsic_load_draw_id:
1841
case nir_intrinsic_load_front_face:
1842
case nir_intrinsic_is_helper_invocation:
1843
case nir_intrinsic_load_helper_invocation:
1844
case nir_intrinsic_load_instance_id:
1845
case nir_intrinsic_load_invocation_id:
1846
case nir_intrinsic_load_workgroup_size:
1847
case nir_intrinsic_load_local_invocation_id:
1848
case nir_intrinsic_load_num_workgroups:
1849
case nir_intrinsic_load_patch_vertices_in:
1850
case nir_intrinsic_load_primitive_id:
1851
case nir_intrinsic_load_sample_id:
1852
case nir_intrinsic_load_sample_mask_in:
1853
case nir_intrinsic_load_sample_pos:
1854
case nir_intrinsic_load_subgroup_eq_mask:
1855
case nir_intrinsic_load_subgroup_ge_mask:
1856
case nir_intrinsic_load_subgroup_gt_mask:
1857
case nir_intrinsic_load_subgroup_le_mask:
1858
case nir_intrinsic_load_subgroup_lt_mask:
1859
case nir_intrinsic_load_subgroup_invocation:
1860
case nir_intrinsic_load_tess_coord:
1861
case nir_intrinsic_load_tess_level_inner:
1862
case nir_intrinsic_load_tess_level_outer:
1863
case nir_intrinsic_load_vertex_id:
1864
case nir_intrinsic_load_workgroup_id:
1865
case nir_intrinsic_load_work_dim: {
1866
const DataType dType = getDType(insn);
1867
SVSemantic sv = convert(op);
1868
LValues &newDefs = convert(&insn->dest);
1869
1870
for (uint8_t i = 0u; i < nir_intrinsic_dest_components(insn); ++i) {
1871
Value *def;
1872
if (typeSizeof(dType) == 8)
1873
def = getSSA();
1874
else
1875
def = newDefs[i];
1876
1877
if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
1878
loadImm(def, 0u);
1879
} else {
1880
Symbol *sym = mkSysVal(sv, i);
1881
Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
1882
if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
1883
rdsv->perPatch = 1;
1884
}
1885
1886
if (typeSizeof(dType) == 8)
1887
mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
1888
}
1889
break;
1890
}
1891
// constants
1892
case nir_intrinsic_load_subgroup_size: {
1893
LValues &newDefs = convert(&insn->dest);
1894
loadImm(newDefs[0], 32u);
1895
break;
1896
}
1897
case nir_intrinsic_vote_all:
1898
case nir_intrinsic_vote_any:
1899
case nir_intrinsic_vote_ieq: {
1900
LValues &newDefs = convert(&insn->dest);
1901
Value *pred = getScratch(1, FILE_PREDICATE);
1902
mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1903
mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
1904
mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
1905
break;
1906
}
1907
case nir_intrinsic_ballot: {
1908
LValues &newDefs = convert(&insn->dest);
1909
Value *pred = getSSA(1, FILE_PREDICATE);
1910
mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1911
mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
1912
break;
1913
}
1914
case nir_intrinsic_read_first_invocation:
1915
case nir_intrinsic_read_invocation: {
1916
LValues &newDefs = convert(&insn->dest);
1917
const DataType dType = getDType(insn);
1918
Value *tmp = getScratch();
1919
1920
if (op == nir_intrinsic_read_first_invocation) {
1921
mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
1922
mkOp1(OP_BREV, TYPE_U32, tmp, tmp);
1923
mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
1924
} else
1925
tmp = getSrc(&insn->src[1], 0);
1926
1927
for (uint8_t i = 0; i < dest_components; ++i) {
1928
mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
1929
->subOp = NV50_IR_SUBOP_SHFL_IDX;
1930
}
1931
break;
1932
}
1933
case nir_intrinsic_load_per_vertex_input: {
1934
const DataType dType = getDType(insn);
1935
LValues &newDefs = convert(&insn->dest);
1936
Value *indirectVertex;
1937
Value *indirectOffset;
1938
uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1939
uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1940
1941
Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1942
mkImm(baseVertex), indirectVertex);
1943
for (uint8_t i = 0u; i < dest_components; ++i) {
1944
uint32_t address = getSlotAddress(insn, idx, i);
1945
loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
1946
indirectOffset, vtxBase, info_out->in[idx].patch);
1947
}
1948
break;
1949
}
1950
case nir_intrinsic_load_per_vertex_output: {
1951
const DataType dType = getDType(insn);
1952
LValues &newDefs = convert(&insn->dest);
1953
Value *indirectVertex;
1954
Value *indirectOffset;
1955
uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1956
uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1957
Value *vtxBase = NULL;
1958
1959
if (indirectVertex)
1960
vtxBase = indirectVertex;
1961
else
1962
vtxBase = loadImm(NULL, baseVertex);
1963
1964
vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
1965
1966
for (uint8_t i = 0u; i < dest_components; ++i) {
1967
uint32_t address = getSlotAddress(insn, idx, i);
1968
loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
1969
indirectOffset, vtxBase, info_out->in[idx].patch);
1970
}
1971
break;
1972
}
1973
case nir_intrinsic_emit_vertex: {
1974
if (info_out->io.genUserClip > 0)
1975
handleUserClipPlanes();
1976
uint32_t idx = nir_intrinsic_stream_id(insn);
1977
mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1978
break;
1979
}
1980
case nir_intrinsic_end_primitive: {
1981
uint32_t idx = nir_intrinsic_stream_id(insn);
1982
if (idx)
1983
break;
1984
mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1985
break;
1986
}
1987
case nir_intrinsic_load_ubo: {
1988
const DataType dType = getDType(insn);
1989
LValues &newDefs = convert(&insn->dest);
1990
Value *indirectIndex;
1991
Value *indirectOffset;
1992
uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
1993
uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
1994
1995
for (uint8_t i = 0u; i < dest_components; ++i) {
1996
loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
1997
indirectOffset, indirectIndex);
1998
}
1999
break;
2000
}
2001
case nir_intrinsic_get_ssbo_size: {
2002
LValues &newDefs = convert(&insn->dest);
2003
const DataType dType = getDType(insn);
2004
Value *indirectBuffer;
2005
uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2006
2007
Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
2008
mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
2009
break;
2010
}
2011
case nir_intrinsic_store_ssbo: {
2012
DataType sType = getSType(insn->src[0], false, false);
2013
Value *indirectBuffer;
2014
Value *indirectOffset;
2015
uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
2016
uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
2017
2018
for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2019
if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2020
continue;
2021
Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
2022
offset + i * typeSizeof(sType));
2023
mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
2024
->setIndirect(0, 1, indirectBuffer);
2025
}
2026
info_out->io.globalAccess |= 0x2;
2027
break;
2028
}
2029
case nir_intrinsic_load_ssbo: {
2030
const DataType dType = getDType(insn);
2031
LValues &newDefs = convert(&insn->dest);
2032
Value *indirectBuffer;
2033
Value *indirectOffset;
2034
uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2035
uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2036
2037
for (uint8_t i = 0u; i < dest_components; ++i)
2038
loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
2039
indirectOffset, indirectBuffer);
2040
2041
info_out->io.globalAccess |= 0x1;
2042
break;
2043
}
2044
case nir_intrinsic_shared_atomic_add:
2045
case nir_intrinsic_shared_atomic_and:
2046
case nir_intrinsic_shared_atomic_comp_swap:
2047
case nir_intrinsic_shared_atomic_exchange:
2048
case nir_intrinsic_shared_atomic_or:
2049
case nir_intrinsic_shared_atomic_imax:
2050
case nir_intrinsic_shared_atomic_imin:
2051
case nir_intrinsic_shared_atomic_umax:
2052
case nir_intrinsic_shared_atomic_umin:
2053
case nir_intrinsic_shared_atomic_xor: {
2054
const DataType dType = getDType(insn);
2055
LValues &newDefs = convert(&insn->dest);
2056
Value *indirectOffset;
2057
uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2058
Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
2059
Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2060
if (op == nir_intrinsic_shared_atomic_comp_swap)
2061
atom->setSrc(2, getSrc(&insn->src[2], 0));
2062
atom->setIndirect(0, 0, indirectOffset);
2063
atom->subOp = getSubOp(op);
2064
break;
2065
}
2066
case nir_intrinsic_ssbo_atomic_add:
2067
case nir_intrinsic_ssbo_atomic_and:
2068
case nir_intrinsic_ssbo_atomic_comp_swap:
2069
case nir_intrinsic_ssbo_atomic_exchange:
2070
case nir_intrinsic_ssbo_atomic_or:
2071
case nir_intrinsic_ssbo_atomic_imax:
2072
case nir_intrinsic_ssbo_atomic_imin:
2073
case nir_intrinsic_ssbo_atomic_umax:
2074
case nir_intrinsic_ssbo_atomic_umin:
2075
case nir_intrinsic_ssbo_atomic_xor: {
2076
const DataType dType = getDType(insn);
2077
LValues &newDefs = convert(&insn->dest);
2078
Value *indirectBuffer;
2079
Value *indirectOffset;
2080
uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2081
uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2082
2083
Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2084
Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2085
getSrc(&insn->src[2], 0));
2086
if (op == nir_intrinsic_ssbo_atomic_comp_swap)
2087
atom->setSrc(2, getSrc(&insn->src[3], 0));
2088
atom->setIndirect(0, 0, indirectOffset);
2089
atom->setIndirect(0, 1, indirectBuffer);
2090
atom->subOp = getSubOp(op);
2091
2092
info_out->io.globalAccess |= 0x2;
2093
break;
2094
}
2095
case nir_intrinsic_global_atomic_add:
2096
case nir_intrinsic_global_atomic_and:
2097
case nir_intrinsic_global_atomic_comp_swap:
2098
case nir_intrinsic_global_atomic_exchange:
2099
case nir_intrinsic_global_atomic_or:
2100
case nir_intrinsic_global_atomic_imax:
2101
case nir_intrinsic_global_atomic_imin:
2102
case nir_intrinsic_global_atomic_umax:
2103
case nir_intrinsic_global_atomic_umin:
2104
case nir_intrinsic_global_atomic_xor: {
2105
const DataType dType = getDType(insn);
2106
LValues &newDefs = convert(&insn->dest);
2107
Value *address;
2108
uint32_t offset = getIndirect(&insn->src[0], 0, address);
2109
2110
Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, dType, offset);
2111
Instruction *atom =
2112
mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2113
if (op == nir_intrinsic_global_atomic_comp_swap)
2114
atom->setSrc(2, getSrc(&insn->src[2], 0));
2115
atom->setIndirect(0, 0, address);
2116
atom->subOp = getSubOp(op);
2117
2118
info_out->io.globalAccess |= 0x2;
2119
break;
2120
}
2121
case nir_intrinsic_bindless_image_atomic_add:
2122
case nir_intrinsic_bindless_image_atomic_and:
2123
case nir_intrinsic_bindless_image_atomic_comp_swap:
2124
case nir_intrinsic_bindless_image_atomic_exchange:
2125
case nir_intrinsic_bindless_image_atomic_imax:
2126
case nir_intrinsic_bindless_image_atomic_umax:
2127
case nir_intrinsic_bindless_image_atomic_imin:
2128
case nir_intrinsic_bindless_image_atomic_umin:
2129
case nir_intrinsic_bindless_image_atomic_or:
2130
case nir_intrinsic_bindless_image_atomic_xor:
2131
case nir_intrinsic_bindless_image_atomic_inc_wrap:
2132
case nir_intrinsic_bindless_image_atomic_dec_wrap:
2133
case nir_intrinsic_bindless_image_load:
2134
case nir_intrinsic_bindless_image_samples:
2135
case nir_intrinsic_bindless_image_size:
2136
case nir_intrinsic_bindless_image_store:
2137
case nir_intrinsic_image_atomic_add:
2138
case nir_intrinsic_image_atomic_and:
2139
case nir_intrinsic_image_atomic_comp_swap:
2140
case nir_intrinsic_image_atomic_exchange:
2141
case nir_intrinsic_image_atomic_imax:
2142
case nir_intrinsic_image_atomic_umax:
2143
case nir_intrinsic_image_atomic_imin:
2144
case nir_intrinsic_image_atomic_umin:
2145
case nir_intrinsic_image_atomic_or:
2146
case nir_intrinsic_image_atomic_xor:
2147
case nir_intrinsic_image_atomic_inc_wrap:
2148
case nir_intrinsic_image_atomic_dec_wrap:
2149
case nir_intrinsic_image_load:
2150
case nir_intrinsic_image_samples:
2151
case nir_intrinsic_image_size:
2152
case nir_intrinsic_image_store: {
2153
std::vector<Value*> srcs, defs;
2154
Value *indirect;
2155
DataType ty;
2156
2157
uint32_t mask = 0;
2158
TexInstruction::Target target =
2159
convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
2160
unsigned int argCount = getNIRArgCount(target);
2161
uint16_t location = 0;
2162
2163
if (opInfo.has_dest) {
2164
LValues &newDefs = convert(&insn->dest);
2165
for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2166
defs.push_back(newDefs[i]);
2167
mask |= 1 << i;
2168
}
2169
}
2170
2171
int lod_src = -1;
2172
bool bindless = false;
2173
switch (op) {
2174
case nir_intrinsic_bindless_image_atomic_add:
2175
case nir_intrinsic_bindless_image_atomic_and:
2176
case nir_intrinsic_bindless_image_atomic_comp_swap:
2177
case nir_intrinsic_bindless_image_atomic_exchange:
2178
case nir_intrinsic_bindless_image_atomic_imax:
2179
case nir_intrinsic_bindless_image_atomic_umax:
2180
case nir_intrinsic_bindless_image_atomic_imin:
2181
case nir_intrinsic_bindless_image_atomic_umin:
2182
case nir_intrinsic_bindless_image_atomic_or:
2183
case nir_intrinsic_bindless_image_atomic_xor:
2184
case nir_intrinsic_bindless_image_atomic_inc_wrap:
2185
case nir_intrinsic_bindless_image_atomic_dec_wrap:
2186
ty = getDType(insn);
2187
bindless = true;
2188
info_out->io.globalAccess |= 0x2;
2189
mask = 0x1;
2190
break;
2191
case nir_intrinsic_image_atomic_add:
2192
case nir_intrinsic_image_atomic_and:
2193
case nir_intrinsic_image_atomic_comp_swap:
2194
case nir_intrinsic_image_atomic_exchange:
2195
case nir_intrinsic_image_atomic_imax:
2196
case nir_intrinsic_image_atomic_umax:
2197
case nir_intrinsic_image_atomic_imin:
2198
case nir_intrinsic_image_atomic_umin:
2199
case nir_intrinsic_image_atomic_or:
2200
case nir_intrinsic_image_atomic_xor:
2201
case nir_intrinsic_image_atomic_inc_wrap:
2202
case nir_intrinsic_image_atomic_dec_wrap:
2203
ty = getDType(insn);
2204
bindless = false;
2205
info_out->io.globalAccess |= 0x2;
2206
mask = 0x1;
2207
break;
2208
case nir_intrinsic_bindless_image_load:
2209
case nir_intrinsic_image_load:
2210
ty = TYPE_U32;
2211
bindless = op == nir_intrinsic_bindless_image_load;
2212
info_out->io.globalAccess |= 0x1;
2213
lod_src = 4;
2214
break;
2215
case nir_intrinsic_bindless_image_store:
2216
case nir_intrinsic_image_store:
2217
ty = TYPE_U32;
2218
bindless = op == nir_intrinsic_bindless_image_store;
2219
info_out->io.globalAccess |= 0x2;
2220
lod_src = 5;
2221
mask = 0xf;
2222
break;
2223
case nir_intrinsic_bindless_image_samples:
2224
mask = 0x8;
2225
FALLTHROUGH;
2226
case nir_intrinsic_image_samples:
2227
ty = TYPE_U32;
2228
bindless = op == nir_intrinsic_bindless_image_samples;
2229
mask = 0x8;
2230
break;
2231
case nir_intrinsic_bindless_image_size:
2232
case nir_intrinsic_image_size:
2233
assert(nir_src_as_uint(insn->src[1]) == 0);
2234
ty = TYPE_U32;
2235
bindless = op == nir_intrinsic_bindless_image_size;
2236
break;
2237
default:
2238
unreachable("unhandled image opcode");
2239
break;
2240
}
2241
2242
if (bindless)
2243
indirect = getSrc(&insn->src[0], 0);
2244
else
2245
location = getIndirect(&insn->src[0], 0, indirect);
2246
2247
// coords
2248
if (opInfo.num_srcs >= 2)
2249
for (unsigned int i = 0u; i < argCount; ++i)
2250
srcs.push_back(getSrc(&insn->src[1], i));
2251
2252
// the sampler is just another src added after coords
2253
if (opInfo.num_srcs >= 3 && target.isMS())
2254
srcs.push_back(getSrc(&insn->src[2], 0));
2255
2256
if (opInfo.num_srcs >= 4 && lod_src != 4) {
2257
unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2258
for (uint8_t i = 0u; i < components; ++i)
2259
srcs.push_back(getSrc(&insn->src[3], i));
2260
}
2261
2262
if (opInfo.num_srcs >= 5 && lod_src != 5)
2263
// 1 for aotmic swap
2264
for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2265
srcs.push_back(getSrc(&insn->src[4], i));
2266
2267
TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2268
texi->tex.bindless = bindless;
2269
texi->tex.format = nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn));
2270
texi->tex.mask = mask;
2271
texi->cache = convert(nir_intrinsic_access(insn));
2272
texi->setType(ty);
2273
texi->subOp = getSubOp(op);
2274
2275
if (indirect)
2276
texi->setIndirectR(indirect);
2277
2278
break;
2279
}
2280
case nir_intrinsic_store_scratch:
2281
case nir_intrinsic_store_shared: {
2282
DataType sType = getSType(insn->src[0], false, false);
2283
Value *indirectOffset;
2284
uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2285
2286
for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2287
if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2288
continue;
2289
Symbol *sym = mkSymbol(getFile(op), 0, sType, offset + i * typeSizeof(sType));
2290
mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
2291
}
2292
break;
2293
}
2294
case nir_intrinsic_load_kernel_input:
2295
case nir_intrinsic_load_scratch:
2296
case nir_intrinsic_load_shared: {
2297
const DataType dType = getDType(insn);
2298
LValues &newDefs = convert(&insn->dest);
2299
Value *indirectOffset;
2300
uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2301
2302
for (uint8_t i = 0u; i < dest_components; ++i)
2303
loadFrom(getFile(op), 0, dType, newDefs[i], offset, i, indirectOffset);
2304
2305
break;
2306
}
2307
case nir_intrinsic_control_barrier: {
2308
// TODO: add flag to shader_info
2309
info_out->numBarriers = 1;
2310
Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
2311
bar->fixed = 1;
2312
bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
2313
break;
2314
}
2315
case nir_intrinsic_group_memory_barrier:
2316
case nir_intrinsic_memory_barrier:
2317
case nir_intrinsic_memory_barrier_buffer:
2318
case nir_intrinsic_memory_barrier_image:
2319
case nir_intrinsic_memory_barrier_shared: {
2320
Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
2321
bar->fixed = 1;
2322
bar->subOp = getSubOp(op);
2323
break;
2324
}
2325
case nir_intrinsic_memory_barrier_tcs_patch:
2326
break;
2327
case nir_intrinsic_shader_clock: {
2328
const DataType dType = getDType(insn);
2329
LValues &newDefs = convert(&insn->dest);
2330
2331
loadImm(newDefs[0], 0u);
2332
mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
2333
break;
2334
}
2335
case nir_intrinsic_load_global:
2336
case nir_intrinsic_load_global_constant: {
2337
const DataType dType = getDType(insn);
2338
LValues &newDefs = convert(&insn->dest);
2339
Value *indirectOffset;
2340
uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2341
2342
for (auto i = 0u; i < dest_components; ++i)
2343
loadFrom(FILE_MEMORY_GLOBAL, 0, dType, newDefs[i], offset, i, indirectOffset);
2344
2345
info_out->io.globalAccess |= 0x1;
2346
break;
2347
}
2348
case nir_intrinsic_store_global: {
2349
DataType sType = getSType(insn->src[0], false, false);
2350
2351
for (auto i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2352
if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2353
continue;
2354
if (typeSizeof(sType) == 8) {
2355
Value *split[2];
2356
mkSplit(split, 4, getSrc(&insn->src[0], i));
2357
2358
Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType));
2359
mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[0]);
2360
2361
sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType) + 4);
2362
mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[1]);
2363
} else {
2364
Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, sType, i * typeSizeof(sType));
2365
mkStore(OP_STORE, sType, sym, getSrc(&insn->src[1], 0), getSrc(&insn->src[0], i));
2366
}
2367
}
2368
2369
info_out->io.globalAccess |= 0x2;
2370
break;
2371
}
2372
default:
2373
ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2374
return false;
2375
}
2376
2377
return true;
2378
}
2379
2380
bool
2381
Converter::visit(nir_jump_instr *insn)
2382
{
2383
switch (insn->type) {
2384
case nir_jump_return:
2385
// TODO: this only works in the main function
2386
mkFlow(OP_BRA, exit, CC_ALWAYS, NULL);
2387
bb->cfg.attach(&exit->cfg, Graph::Edge::CROSS);
2388
break;
2389
case nir_jump_break:
2390
case nir_jump_continue: {
2391
bool isBreak = insn->type == nir_jump_break;
2392
nir_block *block = insn->instr.block;
2393
BasicBlock *target = convert(block->successors[0]);
2394
mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2395
bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2396
break;
2397
}
2398
default:
2399
ERROR("unknown nir_jump_type %u\n", insn->type);
2400
return false;
2401
}
2402
2403
return true;
2404
}
2405
2406
Value*
2407
Converter::convert(nir_load_const_instr *insn, uint8_t idx)
2408
{
2409
Value *val;
2410
2411
if (immInsertPos)
2412
setPosition(immInsertPos, true);
2413
else
2414
setPosition(bb, false);
2415
2416
switch (insn->def.bit_size) {
2417
case 64:
2418
val = loadImm(getSSA(8), insn->value[idx].u64);
2419
break;
2420
case 32:
2421
val = loadImm(getSSA(4), insn->value[idx].u32);
2422
break;
2423
case 16:
2424
val = loadImm(getSSA(2), insn->value[idx].u16);
2425
break;
2426
case 8:
2427
val = loadImm(getSSA(1), insn->value[idx].u8);
2428
break;
2429
default:
2430
unreachable("unhandled bit size!\n");
2431
}
2432
setPosition(bb, true);
2433
return val;
2434
}
2435
2436
bool
2437
Converter::visit(nir_load_const_instr *insn)
2438
{
2439
assert(insn->def.bit_size <= 64);
2440
immediates[insn->def.index] = insn;
2441
return true;
2442
}
2443
2444
#define DEFAULT_CHECKS \
2445
if (insn->dest.dest.ssa.num_components > 1) { \
2446
ERROR("nir_alu_instr only supported with 1 component!\n"); \
2447
return false; \
2448
} \
2449
if (insn->dest.write_mask != 1) { \
2450
ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2451
return false; \
2452
}
2453
bool
2454
Converter::visit(nir_alu_instr *insn)
2455
{
2456
const nir_op op = insn->op;
2457
const nir_op_info &info = nir_op_infos[op];
2458
DataType dType = getDType(insn);
2459
const std::vector<DataType> sTypes = getSTypes(insn);
2460
2461
Instruction *oldPos = this->bb->getExit();
2462
2463
switch (op) {
2464
case nir_op_fabs:
2465
case nir_op_iabs:
2466
case nir_op_fadd:
2467
case nir_op_iadd:
2468
case nir_op_iand:
2469
case nir_op_fceil:
2470
case nir_op_fcos:
2471
case nir_op_fddx:
2472
case nir_op_fddx_coarse:
2473
case nir_op_fddx_fine:
2474
case nir_op_fddy:
2475
case nir_op_fddy_coarse:
2476
case nir_op_fddy_fine:
2477
case nir_op_fdiv:
2478
case nir_op_idiv:
2479
case nir_op_udiv:
2480
case nir_op_fexp2:
2481
case nir_op_ffloor:
2482
case nir_op_ffma:
2483
case nir_op_flog2:
2484
case nir_op_fmax:
2485
case nir_op_imax:
2486
case nir_op_umax:
2487
case nir_op_fmin:
2488
case nir_op_imin:
2489
case nir_op_umin:
2490
case nir_op_fmod:
2491
case nir_op_imod:
2492
case nir_op_umod:
2493
case nir_op_fmul:
2494
case nir_op_imul:
2495
case nir_op_imul_high:
2496
case nir_op_umul_high:
2497
case nir_op_fneg:
2498
case nir_op_ineg:
2499
case nir_op_inot:
2500
case nir_op_ior:
2501
case nir_op_pack_64_2x32_split:
2502
case nir_op_fpow:
2503
case nir_op_frcp:
2504
case nir_op_frem:
2505
case nir_op_irem:
2506
case nir_op_frsq:
2507
case nir_op_fsat:
2508
case nir_op_ishr:
2509
case nir_op_ushr:
2510
case nir_op_fsin:
2511
case nir_op_fsqrt:
2512
case nir_op_ftrunc:
2513
case nir_op_ishl:
2514
case nir_op_ixor: {
2515
DEFAULT_CHECKS;
2516
LValues &newDefs = convert(&insn->dest);
2517
operation preOp = preOperationNeeded(op);
2518
if (preOp != OP_NOP) {
2519
assert(info.num_inputs < 2);
2520
Value *tmp = getSSA(typeSizeof(dType));
2521
Instruction *i0 = mkOp(preOp, dType, tmp);
2522
Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2523
if (info.num_inputs) {
2524
i0->setSrc(0, getSrc(&insn->src[0]));
2525
i1->setSrc(0, tmp);
2526
}
2527
i1->subOp = getSubOp(op);
2528
} else {
2529
Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2530
for (unsigned s = 0u; s < info.num_inputs; ++s) {
2531
i->setSrc(s, getSrc(&insn->src[s]));
2532
}
2533
i->subOp = getSubOp(op);
2534
}
2535
break;
2536
}
2537
case nir_op_ifind_msb:
2538
case nir_op_ufind_msb: {
2539
DEFAULT_CHECKS;
2540
LValues &newDefs = convert(&insn->dest);
2541
dType = sTypes[0];
2542
mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2543
break;
2544
}
2545
case nir_op_fround_even: {
2546
DEFAULT_CHECKS;
2547
LValues &newDefs = convert(&insn->dest);
2548
mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2549
break;
2550
}
2551
// convert instructions
2552
case nir_op_f2f32:
2553
case nir_op_f2i32:
2554
case nir_op_f2u32:
2555
case nir_op_i2f32:
2556
case nir_op_i2i32:
2557
case nir_op_u2f32:
2558
case nir_op_u2u32:
2559
case nir_op_f2f64:
2560
case nir_op_f2i64:
2561
case nir_op_f2u64:
2562
case nir_op_i2f64:
2563
case nir_op_i2i64:
2564
case nir_op_u2f64:
2565
case nir_op_u2u64: {
2566
DEFAULT_CHECKS;
2567
LValues &newDefs = convert(&insn->dest);
2568
Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2569
if (op == nir_op_f2i32 || op == nir_op_f2i64 || op == nir_op_f2u32 || op == nir_op_f2u64)
2570
i->rnd = ROUND_Z;
2571
i->sType = sTypes[0];
2572
break;
2573
}
2574
// compare instructions
2575
case nir_op_feq32:
2576
case nir_op_ieq32:
2577
case nir_op_fge32:
2578
case nir_op_ige32:
2579
case nir_op_uge32:
2580
case nir_op_flt32:
2581
case nir_op_ilt32:
2582
case nir_op_ult32:
2583
case nir_op_fneu32:
2584
case nir_op_ine32: {
2585
DEFAULT_CHECKS;
2586
LValues &newDefs = convert(&insn->dest);
2587
Instruction *i = mkCmp(getOperation(op),
2588
getCondCode(op),
2589
dType,
2590
newDefs[0],
2591
dType,
2592
getSrc(&insn->src[0]),
2593
getSrc(&insn->src[1]));
2594
if (info.num_inputs == 3)
2595
i->setSrc(2, getSrc(&insn->src[2]));
2596
i->sType = sTypes[0];
2597
break;
2598
}
2599
case nir_op_mov:
2600
case nir_op_vec2:
2601
case nir_op_vec3:
2602
case nir_op_vec4:
2603
case nir_op_vec8:
2604
case nir_op_vec16: {
2605
LValues &newDefs = convert(&insn->dest);
2606
for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2607
mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2608
}
2609
break;
2610
}
2611
// (un)pack
2612
case nir_op_pack_64_2x32: {
2613
LValues &newDefs = convert(&insn->dest);
2614
Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2615
merge->setSrc(0, getSrc(&insn->src[0], 0));
2616
merge->setSrc(1, getSrc(&insn->src[0], 1));
2617
break;
2618
}
2619
case nir_op_pack_half_2x16_split: {
2620
LValues &newDefs = convert(&insn->dest);
2621
Value *tmpH = getSSA();
2622
Value *tmpL = getSSA();
2623
2624
mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2625
mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2626
mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2627
break;
2628
}
2629
case nir_op_unpack_half_2x16_split_x:
2630
case nir_op_unpack_half_2x16_split_y: {
2631
LValues &newDefs = convert(&insn->dest);
2632
Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2633
if (op == nir_op_unpack_half_2x16_split_y)
2634
cvt->subOp = 1;
2635
break;
2636
}
2637
case nir_op_unpack_64_2x32: {
2638
LValues &newDefs = convert(&insn->dest);
2639
mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2640
break;
2641
}
2642
case nir_op_unpack_64_2x32_split_x: {
2643
LValues &newDefs = convert(&insn->dest);
2644
mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2645
break;
2646
}
2647
case nir_op_unpack_64_2x32_split_y: {
2648
LValues &newDefs = convert(&insn->dest);
2649
mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2650
break;
2651
}
2652
// special instructions
2653
case nir_op_fsign:
2654
case nir_op_isign: {
2655
DEFAULT_CHECKS;
2656
DataType iType;
2657
if (::isFloatType(dType))
2658
iType = TYPE_F32;
2659
else
2660
iType = TYPE_S32;
2661
2662
LValues &newDefs = convert(&insn->dest);
2663
LValue *val0 = getScratch();
2664
LValue *val1 = getScratch();
2665
mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2666
mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2667
2668
if (dType == TYPE_F64) {
2669
mkOp2(OP_SUB, iType, val0, val0, val1);
2670
mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2671
} else if (dType == TYPE_S64 || dType == TYPE_U64) {
2672
mkOp2(OP_SUB, iType, val0, val1, val0);
2673
mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2674
mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2675
} else if (::isFloatType(dType))
2676
mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2677
else
2678
mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2679
break;
2680
}
2681
case nir_op_fcsel:
2682
case nir_op_b32csel: {
2683
DEFAULT_CHECKS;
2684
LValues &newDefs = convert(&insn->dest);
2685
mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2686
break;
2687
}
2688
case nir_op_ibitfield_extract:
2689
case nir_op_ubitfield_extract: {
2690
DEFAULT_CHECKS;
2691
Value *tmp = getSSA();
2692
LValues &newDefs = convert(&insn->dest);
2693
mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2694
mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2695
break;
2696
}
2697
case nir_op_bfm: {
2698
DEFAULT_CHECKS;
2699
LValues &newDefs = convert(&insn->dest);
2700
mkOp2(OP_BMSK, dType, newDefs[0], getSrc(&insn->src[1]), getSrc(&insn->src[0]))->subOp = NV50_IR_SUBOP_BMSK_W;
2701
break;
2702
}
2703
case nir_op_bitfield_insert: {
2704
DEFAULT_CHECKS;
2705
LValues &newDefs = convert(&insn->dest);
2706
LValue *temp = getSSA();
2707
mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2708
mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2709
break;
2710
}
2711
case nir_op_bit_count: {
2712
DEFAULT_CHECKS;
2713
LValues &newDefs = convert(&insn->dest);
2714
mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2715
break;
2716
}
2717
case nir_op_bitfield_reverse: {
2718
DEFAULT_CHECKS;
2719
LValues &newDefs = convert(&insn->dest);
2720
mkOp1(OP_BREV, TYPE_U32, newDefs[0], getSrc(&insn->src[0]));
2721
break;
2722
}
2723
case nir_op_find_lsb: {
2724
DEFAULT_CHECKS;
2725
LValues &newDefs = convert(&insn->dest);
2726
Value *tmp = getSSA();
2727
mkOp1(OP_BREV, TYPE_U32, tmp, getSrc(&insn->src[0]));
2728
mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2729
break;
2730
}
2731
case nir_op_extract_u8: {
2732
DEFAULT_CHECKS;
2733
LValues &newDefs = convert(&insn->dest);
2734
Value *prmt = getSSA();
2735
mkOp2(OP_OR, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x4440));
2736
mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2737
break;
2738
}
2739
case nir_op_extract_i8: {
2740
DEFAULT_CHECKS;
2741
LValues &newDefs = convert(&insn->dest);
2742
Value *prmt = getSSA();
2743
mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x1111), loadImm(NULL, 0x8880));
2744
mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2745
break;
2746
}
2747
case nir_op_extract_u16: {
2748
DEFAULT_CHECKS;
2749
LValues &newDefs = convert(&insn->dest);
2750
Value *prmt = getSSA();
2751
mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x22), loadImm(NULL, 0x4410));
2752
mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2753
break;
2754
}
2755
case nir_op_extract_i16: {
2756
DEFAULT_CHECKS;
2757
LValues &newDefs = convert(&insn->dest);
2758
Value *prmt = getSSA();
2759
mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x2222), loadImm(NULL, 0x9910));
2760
mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2761
break;
2762
}
2763
case nir_op_urol: {
2764
DEFAULT_CHECKS;
2765
LValues &newDefs = convert(&insn->dest);
2766
mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2767
getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2768
->subOp = NV50_IR_SUBOP_SHF_L |
2769
NV50_IR_SUBOP_SHF_W |
2770
NV50_IR_SUBOP_SHF_HI;
2771
break;
2772
}
2773
case nir_op_uror: {
2774
DEFAULT_CHECKS;
2775
LValues &newDefs = convert(&insn->dest);
2776
mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2777
getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2778
->subOp = NV50_IR_SUBOP_SHF_R |
2779
NV50_IR_SUBOP_SHF_W |
2780
NV50_IR_SUBOP_SHF_LO;
2781
break;
2782
}
2783
// boolean conversions
2784
case nir_op_b2f32: {
2785
DEFAULT_CHECKS;
2786
LValues &newDefs = convert(&insn->dest);
2787
mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2788
break;
2789
}
2790
case nir_op_b2f64: {
2791
DEFAULT_CHECKS;
2792
LValues &newDefs = convert(&insn->dest);
2793
Value *tmp = getSSA(4);
2794
mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2795
mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2796
break;
2797
}
2798
case nir_op_f2b32:
2799
case nir_op_i2b32: {
2800
DEFAULT_CHECKS;
2801
LValues &newDefs = convert(&insn->dest);
2802
Value *src1;
2803
if (typeSizeof(sTypes[0]) == 8) {
2804
src1 = loadImm(getSSA(8), 0.0);
2805
} else {
2806
src1 = zero;
2807
}
2808
CondCode cc = op == nir_op_f2b32 ? CC_NEU : CC_NE;
2809
mkCmp(OP_SET, cc, TYPE_U32, newDefs[0], sTypes[0], getSrc(&insn->src[0]), src1);
2810
break;
2811
}
2812
case nir_op_b2i32: {
2813
DEFAULT_CHECKS;
2814
LValues &newDefs = convert(&insn->dest);
2815
mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2816
break;
2817
}
2818
case nir_op_b2i64: {
2819
DEFAULT_CHECKS;
2820
LValues &newDefs = convert(&insn->dest);
2821
LValue *def = getScratch();
2822
mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2823
mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2824
break;
2825
}
2826
default:
2827
ERROR("unknown nir_op %s\n", info.name);
2828
assert(false);
2829
return false;
2830
}
2831
2832
if (!oldPos) {
2833
oldPos = this->bb->getEntry();
2834
oldPos->precise = insn->exact;
2835
}
2836
2837
if (unlikely(!oldPos))
2838
return true;
2839
2840
while (oldPos->next) {
2841
oldPos = oldPos->next;
2842
oldPos->precise = insn->exact;
2843
}
2844
oldPos->saturate = insn->dest.saturate;
2845
2846
return true;
2847
}
2848
#undef DEFAULT_CHECKS
2849
2850
bool
2851
Converter::visit(nir_ssa_undef_instr *insn)
2852
{
2853
LValues &newDefs = convert(&insn->def);
2854
for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
2855
mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
2856
}
2857
return true;
2858
}
2859
2860
#define CASE_SAMPLER(ty) \
2861
case GLSL_SAMPLER_DIM_ ## ty : \
2862
if (isArray && !isShadow) \
2863
return TEX_TARGET_ ## ty ## _ARRAY; \
2864
else if (!isArray && isShadow) \
2865
return TEX_TARGET_## ty ## _SHADOW; \
2866
else if (isArray && isShadow) \
2867
return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2868
else \
2869
return TEX_TARGET_ ## ty
2870
2871
TexTarget
2872
Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
2873
{
2874
switch (dim) {
2875
CASE_SAMPLER(1D);
2876
CASE_SAMPLER(2D);
2877
CASE_SAMPLER(CUBE);
2878
case GLSL_SAMPLER_DIM_3D:
2879
return TEX_TARGET_3D;
2880
case GLSL_SAMPLER_DIM_MS:
2881
if (isArray)
2882
return TEX_TARGET_2D_MS_ARRAY;
2883
return TEX_TARGET_2D_MS;
2884
case GLSL_SAMPLER_DIM_RECT:
2885
if (isShadow)
2886
return TEX_TARGET_RECT_SHADOW;
2887
return TEX_TARGET_RECT;
2888
case GLSL_SAMPLER_DIM_BUF:
2889
return TEX_TARGET_BUFFER;
2890
case GLSL_SAMPLER_DIM_EXTERNAL:
2891
return TEX_TARGET_2D;
2892
default:
2893
ERROR("unknown glsl_sampler_dim %u\n", dim);
2894
assert(false);
2895
return TEX_TARGET_COUNT;
2896
}
2897
}
2898
#undef CASE_SAMPLER
2899
2900
Value*
2901
Converter::applyProjection(Value *src, Value *proj)
2902
{
2903
if (!proj)
2904
return src;
2905
return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
2906
}
2907
2908
unsigned int
2909
Converter::getNIRArgCount(TexInstruction::Target& target)
2910
{
2911
unsigned int result = target.getArgCount();
2912
if (target.isCube() && target.isArray())
2913
result--;
2914
if (target.isMS())
2915
result--;
2916
return result;
2917
}
2918
2919
CacheMode
2920
Converter::convert(enum gl_access_qualifier access)
2921
{
2922
if (access & ACCESS_VOLATILE)
2923
return CACHE_CV;
2924
if (access & ACCESS_COHERENT)
2925
return CACHE_CG;
2926
return CACHE_CA;
2927
}
2928
2929
bool
2930
Converter::visit(nir_tex_instr *insn)
2931
{
2932
switch (insn->op) {
2933
case nir_texop_lod:
2934
case nir_texop_query_levels:
2935
case nir_texop_tex:
2936
case nir_texop_texture_samples:
2937
case nir_texop_tg4:
2938
case nir_texop_txb:
2939
case nir_texop_txd:
2940
case nir_texop_txf:
2941
case nir_texop_txf_ms:
2942
case nir_texop_txl:
2943
case nir_texop_txs: {
2944
LValues &newDefs = convert(&insn->dest);
2945
std::vector<Value*> srcs;
2946
std::vector<Value*> defs;
2947
std::vector<nir_src*> offsets;
2948
uint8_t mask = 0;
2949
bool lz = false;
2950
Value *proj = NULL;
2951
TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
2952
operation op = getOperation(insn->op);
2953
2954
int r, s;
2955
int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
2956
int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
2957
int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
2958
int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
2959
int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
2960
int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
2961
int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
2962
int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
2963
int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
2964
int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
2965
int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
2966
int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
2967
int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
2968
2969
bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
2970
assert((sampHandleIdx != -1) == (texHandleIdx != -1));
2971
2972
if (projIdx != -1)
2973
proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
2974
2975
srcs.resize(insn->coord_components);
2976
for (uint8_t i = 0u; i < insn->coord_components; ++i)
2977
srcs[i] = applyProjection(getSrc(&insn->src[coordsIdx].src, i), proj);
2978
2979
// sometimes we get less args than target.getArgCount, but codegen expects the latter
2980
if (insn->coord_components) {
2981
uint32_t argCount = target.getArgCount();
2982
2983
if (target.isMS())
2984
argCount -= 1;
2985
2986
for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
2987
srcs.push_back(getSSA());
2988
}
2989
2990
if (insn->op == nir_texop_texture_samples)
2991
srcs.push_back(zero);
2992
else if (!insn->num_srcs)
2993
srcs.push_back(loadImm(NULL, 0));
2994
if (biasIdx != -1)
2995
srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
2996
if (lodIdx != -1)
2997
srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
2998
else if (op == OP_TXF)
2999
lz = true;
3000
if (msIdx != -1)
3001
srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
3002
if (offsetIdx != -1)
3003
offsets.push_back(&insn->src[offsetIdx].src);
3004
if (compIdx != -1)
3005
srcs.push_back(applyProjection(getSrc(&insn->src[compIdx].src, 0), proj));
3006
if (texOffIdx != -1) {
3007
srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
3008
texOffIdx = srcs.size() - 1;
3009
}
3010
if (sampOffIdx != -1) {
3011
srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
3012
sampOffIdx = srcs.size() - 1;
3013
}
3014
if (bindless) {
3015
// currently we use the lower bits
3016
Value *split[2];
3017
Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
3018
3019
mkSplit(split, 4, handle);
3020
3021
srcs.push_back(split[0]);
3022
texOffIdx = srcs.size() - 1;
3023
}
3024
3025
r = bindless ? 0xff : insn->texture_index;
3026
s = bindless ? 0x1f : insn->sampler_index;
3027
3028
defs.resize(newDefs.size());
3029
for (uint8_t d = 0u; d < newDefs.size(); ++d) {
3030
defs[d] = newDefs[d];
3031
mask |= 1 << d;
3032
}
3033
if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
3034
lz = true;
3035
3036
TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
3037
texi->tex.levelZero = lz;
3038
texi->tex.mask = mask;
3039
texi->tex.bindless = bindless;
3040
3041
if (texOffIdx != -1)
3042
texi->tex.rIndirectSrc = texOffIdx;
3043
if (sampOffIdx != -1)
3044
texi->tex.sIndirectSrc = sampOffIdx;
3045
3046
switch (insn->op) {
3047
case nir_texop_tg4:
3048
if (!target.isShadow())
3049
texi->tex.gatherComp = insn->component;
3050
break;
3051
case nir_texop_txs:
3052
texi->tex.query = TXQ_DIMS;
3053
break;
3054
case nir_texop_texture_samples:
3055
texi->tex.mask = 0x4;
3056
texi->tex.query = TXQ_TYPE;
3057
break;
3058
case nir_texop_query_levels:
3059
texi->tex.mask = 0x8;
3060
texi->tex.query = TXQ_DIMS;
3061
break;
3062
default:
3063
break;
3064
}
3065
3066
texi->tex.useOffsets = offsets.size();
3067
if (texi->tex.useOffsets) {
3068
for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
3069
for (uint32_t c = 0u; c < 3; ++c) {
3070
uint8_t s2 = std::min(c, target.getDim() - 1);
3071
texi->offset[s][c].set(getSrc(offsets[s], s2));
3072
texi->offset[s][c].setInsn(texi);
3073
}
3074
}
3075
}
3076
3077
if (op == OP_TXG && offsetIdx == -1) {
3078
if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
3079
texi->tex.useOffsets = 4;
3080
setPosition(texi, false);
3081
for (uint8_t i = 0; i < 4; ++i) {
3082
for (uint8_t j = 0; j < 2; ++j) {
3083
texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
3084
texi->offset[i][j].setInsn(texi);
3085
}
3086
}
3087
setPosition(texi, true);
3088
}
3089
}
3090
3091
if (ddxIdx != -1 && ddyIdx != -1) {
3092
for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
3093
texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
3094
texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
3095
}
3096
}
3097
3098
break;
3099
}
3100
default:
3101
ERROR("unknown nir_texop %u\n", insn->op);
3102
return false;
3103
}
3104
return true;
3105
}
3106
3107
bool
3108
Converter::run()
3109
{
3110
bool progress;
3111
3112
if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
3113
nir_print_shader(nir, stderr);
3114
3115
struct nir_lower_subgroups_options subgroup_options = {};
3116
subgroup_options.subgroup_size = 32;
3117
subgroup_options.ballot_bit_size = 32;
3118
subgroup_options.ballot_components = 1;
3119
subgroup_options.lower_elect = true;
3120
3121
/* prepare for IO lowering */
3122
NIR_PASS_V(nir, nir_opt_deref);
3123
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
3124
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3125
3126
/* codegen assumes vec4 alignment for memory */
3127
NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_function_temp, function_temp_type_info);
3128
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_function_temp, nir_address_format_32bit_offset);
3129
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
3130
3131
NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
3132
type_size, (nir_lower_io_options)0);
3133
3134
NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
3135
3136
NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
3137
NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
3138
NIR_PASS_V(nir, nir_lower_phis_to_scalar, false);
3139
3140
/*TODO: improve this lowering/optimisation loop so that we can use
3141
* nir_opt_idiv_const effectively before this.
3142
*/
3143
nir_lower_idiv_options idiv_options = {
3144
.imprecise_32bit_lowering = false,
3145
.allow_fp16 = true,
3146
};
3147
NIR_PASS(progress, nir, nir_lower_idiv, &idiv_options);
3148
3149
do {
3150
progress = false;
3151
NIR_PASS(progress, nir, nir_copy_prop);
3152
NIR_PASS(progress, nir, nir_opt_remove_phis);
3153
NIR_PASS(progress, nir, nir_opt_trivial_continues);
3154
NIR_PASS(progress, nir, nir_opt_cse);
3155
NIR_PASS(progress, nir, nir_opt_algebraic);
3156
NIR_PASS(progress, nir, nir_opt_constant_folding);
3157
NIR_PASS(progress, nir, nir_copy_prop);
3158
NIR_PASS(progress, nir, nir_opt_dce);
3159
NIR_PASS(progress, nir, nir_opt_dead_cf);
3160
} while (progress);
3161
3162
NIR_PASS_V(nir, nir_lower_bool_to_int32);
3163
NIR_PASS_V(nir, nir_convert_from_ssa, true);
3164
3165
// Garbage collect dead instructions
3166
nir_sweep(nir);
3167
3168
if (!parseNIR()) {
3169
ERROR("Couldn't prase NIR!\n");
3170
return false;
3171
}
3172
3173
if (!assignSlots()) {
3174
ERROR("Couldn't assign slots!\n");
3175
return false;
3176
}
3177
3178
if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
3179
nir_print_shader(nir, stderr);
3180
3181
nir_foreach_function(function, nir) {
3182
if (!visit(function))
3183
return false;
3184
}
3185
3186
return true;
3187
}
3188
3189
} // unnamed namespace
3190
3191
namespace nv50_ir {
3192
3193
bool
3194
Program::makeFromNIR(struct nv50_ir_prog_info *info,
3195
struct nv50_ir_prog_info_out *info_out)
3196
{
3197
nir_shader *nir = (nir_shader*)info->bin.source;
3198
Converter converter(this, nir, info, info_out);
3199
bool result = converter.run();
3200
if (!result)
3201
return result;
3202
LoweringHelper lowering;
3203
lowering.run(this);
3204
tlsSize = info_out->bin.tlsSpace;
3205
return result;
3206
}
3207
3208
} // namespace nv50_ir
3209
3210
static nir_shader_compiler_options
3211
nvir_nir_shader_compiler_options(int chipset)
3212
{
3213
nir_shader_compiler_options op = {};
3214
op.lower_fdiv = (chipset >= NVISA_GV100_CHIPSET);
3215
op.lower_ffma16 = false;
3216
op.lower_ffma32 = false;
3217
op.lower_ffma64 = false;
3218
op.fuse_ffma16 = false; /* nir doesn't track mad vs fma */
3219
op.fuse_ffma32 = false; /* nir doesn't track mad vs fma */
3220
op.fuse_ffma64 = false; /* nir doesn't track mad vs fma */
3221
op.lower_flrp16 = (chipset >= NVISA_GV100_CHIPSET);
3222
op.lower_flrp32 = true;
3223
op.lower_flrp64 = true;
3224
op.lower_fpow = false; // TODO: nir's lowering is broken, or we could use it
3225
op.lower_fsat = false;
3226
op.lower_fsqrt = false; // TODO: only before gm200
3227
op.lower_sincos = false;
3228
op.lower_fmod = true;
3229
op.lower_bitfield_extract = false;
3230
op.lower_bitfield_extract_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3231
op.lower_bitfield_insert = false;
3232
op.lower_bitfield_insert_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3233
op.lower_bitfield_insert_to_bitfield_select = false;
3234
op.lower_bitfield_reverse = false;
3235
op.lower_bit_count = false;
3236
op.lower_ifind_msb = false;
3237
op.lower_find_lsb = false;
3238
op.lower_uadd_carry = true; // TODO
3239
op.lower_usub_borrow = true; // TODO
3240
op.lower_mul_high = false;
3241
op.lower_fneg = false;
3242
op.lower_ineg = false;
3243
op.lower_scmp = true; // TODO: not implemented yet
3244
op.lower_vector_cmp = false;
3245
op.lower_bitops = false;
3246
op.lower_isign = (chipset >= NVISA_GV100_CHIPSET);
3247
op.lower_fsign = (chipset >= NVISA_GV100_CHIPSET);
3248
op.lower_fdph = false;
3249
op.lower_fdot = false;
3250
op.fdot_replicates = false; // TODO
3251
op.lower_ffloor = false; // TODO
3252
op.lower_ffract = true;
3253
op.lower_fceil = false; // TODO
3254
op.lower_ftrunc = false;
3255
op.lower_ldexp = true;
3256
op.lower_pack_half_2x16 = true;
3257
op.lower_pack_unorm_2x16 = true;
3258
op.lower_pack_snorm_2x16 = true;
3259
op.lower_pack_unorm_4x8 = true;
3260
op.lower_pack_snorm_4x8 = true;
3261
op.lower_unpack_half_2x16 = true;
3262
op.lower_unpack_unorm_2x16 = true;
3263
op.lower_unpack_snorm_2x16 = true;
3264
op.lower_unpack_unorm_4x8 = true;
3265
op.lower_unpack_snorm_4x8 = true;
3266
op.lower_pack_split = false;
3267
op.lower_extract_byte = (chipset < NVISA_GM107_CHIPSET);
3268
op.lower_extract_word = (chipset < NVISA_GM107_CHIPSET);
3269
op.lower_insert_byte = true;
3270
op.lower_insert_word = true;
3271
op.lower_all_io_to_temps = false;
3272
op.lower_all_io_to_elements = false;
3273
op.vertex_id_zero_based = false;
3274
op.lower_base_vertex = false;
3275
op.lower_helper_invocation = false;
3276
op.optimize_sample_mask_in = false;
3277
op.lower_cs_local_index_from_id = true;
3278
op.lower_cs_local_id_from_index = false;
3279
op.lower_device_index_to_zero = false; // TODO
3280
op.lower_wpos_pntc = false; // TODO
3281
op.lower_hadd = true; // TODO
3282
op.lower_add_sat = true; // TODO
3283
op.vectorize_io = false;
3284
op.lower_to_scalar = false;
3285
op.unify_interfaces = false;
3286
op.use_interpolated_input_intrinsics = true;
3287
op.lower_mul_2x32_64 = true; // TODO
3288
op.lower_rotate = (chipset < NVISA_GV100_CHIPSET);
3289
op.has_imul24 = false;
3290
op.intel_vec4 = false;
3291
op.max_unroll_iterations = 32;
3292
op.lower_int64_options = (nir_lower_int64_options) (
3293
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul64 : 0) |
3294
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_isign64 : 0) |
3295
nir_lower_divmod64 |
3296
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_high64 : 0) |
3297
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_mov64 : 0) |
3298
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_icmp64 : 0) |
3299
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_iabs64 : 0) |
3300
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ineg64 : 0) |
3301
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_logic64 : 0) |
3302
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_minmax64 : 0) |
3303
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_shift64 : 0) |
3304
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_2x32_64 : 0) |
3305
((chipset >= NVISA_GM107_CHIPSET) ? nir_lower_extract64 : 0) |
3306
nir_lower_ufind_msb64
3307
);
3308
op.lower_doubles_options = (nir_lower_doubles_options) (
3309
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drcp : 0) |
3310
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsqrt : 0) |
3311
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drsq : 0) |
3312
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dfract : 0) |
3313
nir_lower_dmod |
3314
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsub : 0) |
3315
((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ddiv : 0)
3316
);
3317
return op;
3318
}
3319
3320
static const nir_shader_compiler_options gf100_nir_shader_compiler_options =
3321
nvir_nir_shader_compiler_options(NVISA_GF100_CHIPSET);
3322
static const nir_shader_compiler_options gm107_nir_shader_compiler_options =
3323
nvir_nir_shader_compiler_options(NVISA_GM107_CHIPSET);
3324
static const nir_shader_compiler_options gv100_nir_shader_compiler_options =
3325
nvir_nir_shader_compiler_options(NVISA_GV100_CHIPSET);
3326
3327
const nir_shader_compiler_options *
3328
nv50_ir_nir_shader_compiler_options(int chipset)
3329
{
3330
if (chipset >= NVISA_GV100_CHIPSET)
3331
return &gv100_nir_shader_compiler_options;
3332
if (chipset >= NVISA_GM107_CHIPSET)
3333
return &gm107_nir_shader_compiler_options;
3334
return &gf100_nir_shader_compiler_options;
3335
}
3336
3337