Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/nouveau/codegen/nv50_ir_from_tgsi.cpp
4574 views
1
/*
2
* Copyright 2011 Christoph Bumiller
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*/
22
23
#include "tgsi/tgsi_build.h"
24
#include "tgsi/tgsi_dump.h"
25
#include "tgsi/tgsi_scan.h"
26
#include "tgsi/tgsi_util.h"
27
28
#include <set>
29
30
#include "codegen/nv50_ir.h"
31
#include "codegen/nv50_ir_from_common.h"
32
#include "codegen/nv50_ir_util.h"
33
34
namespace tgsi {
35
36
class Source;
37
38
static nv50_ir::operation translateOpcode(uint opcode);
39
static nv50_ir::DataFile translateFile(uint file);
40
static nv50_ir::TexTarget translateTexture(uint texTarg);
41
static nv50_ir::SVSemantic translateSysVal(uint sysval);
42
static nv50_ir::CacheMode translateCacheMode(uint qualifier);
43
44
class Instruction
45
{
46
public:
47
Instruction(const struct tgsi_full_instruction *inst) : insn(inst) { }
48
49
class SrcRegister
50
{
51
public:
52
SrcRegister(const struct tgsi_full_src_register *src)
53
: reg(src->Register),
54
fsr(src)
55
{ }
56
57
SrcRegister(const struct tgsi_src_register& src) : reg(src), fsr(NULL) { }
58
59
SrcRegister(const struct tgsi_ind_register& ind)
60
: reg(tgsi_util_get_src_from_ind(&ind)),
61
fsr(NULL)
62
{ }
63
64
struct tgsi_src_register offsetToSrc(struct tgsi_texture_offset off)
65
{
66
struct tgsi_src_register reg;
67
memset(&reg, 0, sizeof(reg));
68
reg.Index = off.Index;
69
reg.File = off.File;
70
reg.SwizzleX = off.SwizzleX;
71
reg.SwizzleY = off.SwizzleY;
72
reg.SwizzleZ = off.SwizzleZ;
73
return reg;
74
}
75
76
SrcRegister(const struct tgsi_texture_offset& off) :
77
reg(offsetToSrc(off)),
78
fsr(NULL)
79
{ }
80
81
uint getFile() const { return reg.File; }
82
83
bool is2D() const { return reg.Dimension; }
84
85
bool isIndirect(int dim) const
86
{
87
return (dim && fsr) ? fsr->Dimension.Indirect : reg.Indirect;
88
}
89
90
int getIndex(int dim) const
91
{
92
return (dim && fsr) ? fsr->Dimension.Index : reg.Index;
93
}
94
95
int getSwizzle(int chan) const
96
{
97
return tgsi_util_get_src_register_swizzle(&reg, chan);
98
}
99
100
int getArrayId() const
101
{
102
if (isIndirect(0))
103
return fsr->Indirect.ArrayID;
104
return 0;
105
}
106
107
nv50_ir::Modifier getMod(int chan) const;
108
109
SrcRegister getIndirect(int dim) const
110
{
111
assert(fsr && isIndirect(dim));
112
if (dim)
113
return SrcRegister(fsr->DimIndirect);
114
return SrcRegister(fsr->Indirect);
115
}
116
117
uint32_t getValueU32(int c, const uint32_t *data) const
118
{
119
assert(reg.File == TGSI_FILE_IMMEDIATE);
120
assert(!reg.Absolute);
121
assert(!reg.Negate);
122
return data[reg.Index * 4 + getSwizzle(c)];
123
}
124
125
private:
126
const struct tgsi_src_register reg;
127
const struct tgsi_full_src_register *fsr;
128
};
129
130
class DstRegister
131
{
132
public:
133
DstRegister(const struct tgsi_full_dst_register *dst)
134
: reg(dst->Register),
135
fdr(dst)
136
{ }
137
138
DstRegister(const struct tgsi_dst_register& dst) : reg(dst), fdr(NULL) { }
139
140
uint getFile() const { return reg.File; }
141
142
bool is2D() const { return reg.Dimension; }
143
144
bool isIndirect(int dim) const
145
{
146
return (dim && fdr) ? fdr->Dimension.Indirect : reg.Indirect;
147
}
148
149
int getIndex(int dim) const
150
{
151
return (dim && fdr) ? fdr->Dimension.Dimension : reg.Index;
152
}
153
154
unsigned int getMask() const { return reg.WriteMask; }
155
156
bool isMasked(int chan) const { return !(getMask() & (1 << chan)); }
157
158
SrcRegister getIndirect(int dim) const
159
{
160
assert(fdr && isIndirect(dim));
161
if (dim)
162
return SrcRegister(fdr->DimIndirect);
163
return SrcRegister(fdr->Indirect);
164
}
165
166
struct tgsi_full_src_register asSrc()
167
{
168
assert(fdr);
169
return tgsi_full_src_register_from_dst(fdr);
170
}
171
172
int getArrayId() const
173
{
174
if (isIndirect(0))
175
return fdr->Indirect.ArrayID;
176
return 0;
177
}
178
179
private:
180
const struct tgsi_dst_register reg;
181
const struct tgsi_full_dst_register *fdr;
182
};
183
184
inline uint getOpcode() const { return insn->Instruction.Opcode; }
185
186
unsigned int srcCount() const { return insn->Instruction.NumSrcRegs; }
187
unsigned int dstCount() const { return insn->Instruction.NumDstRegs; }
188
189
// mask of used components of source s
190
unsigned int srcMask(unsigned int s) const;
191
unsigned int texOffsetMask() const;
192
193
SrcRegister getSrc(unsigned int s) const
194
{
195
assert(s < srcCount());
196
return SrcRegister(&insn->Src[s]);
197
}
198
199
DstRegister getDst(unsigned int d) const
200
{
201
assert(d < dstCount());
202
return DstRegister(&insn->Dst[d]);
203
}
204
205
SrcRegister getTexOffset(unsigned int i) const
206
{
207
assert(i < TGSI_FULL_MAX_TEX_OFFSETS);
208
return SrcRegister(insn->TexOffsets[i]);
209
}
210
211
unsigned int getNumTexOffsets() const { return insn->Texture.NumOffsets; }
212
213
bool checkDstSrcAliasing() const;
214
215
inline nv50_ir::operation getOP() const {
216
return translateOpcode(getOpcode()); }
217
218
nv50_ir::DataType inferSrcType() const;
219
nv50_ir::DataType inferDstType() const;
220
221
nv50_ir::CondCode getSetCond() const;
222
223
nv50_ir::TexInstruction::Target getTexture(const Source *, int s) const;
224
225
const nv50_ir::TexInstruction::ImgFormatDesc *getImageFormat() const {
226
return nv50_ir::TexInstruction::translateImgFormat((enum pipe_format)insn->Memory.Format);
227
}
228
229
nv50_ir::TexTarget getImageTarget() const {
230
return translateTexture(insn->Memory.Texture);
231
}
232
233
nv50_ir::CacheMode getCacheMode() const {
234
if (!insn->Instruction.Memory)
235
return nv50_ir::CACHE_CA;
236
return translateCacheMode(insn->Memory.Qualifier);
237
}
238
239
inline uint getLabel() { return insn->Label.Label; }
240
241
unsigned getSaturate() const { return insn->Instruction.Saturate; }
242
243
void print() const
244
{
245
tgsi_dump_instruction(insn, 1);
246
}
247
248
private:
249
const struct tgsi_full_instruction *insn;
250
};
251
252
unsigned int Instruction::texOffsetMask() const
253
{
254
const struct tgsi_instruction_texture *tex = &insn->Texture;
255
assert(insn->Instruction.Texture);
256
257
switch (tex->Texture) {
258
case TGSI_TEXTURE_BUFFER:
259
case TGSI_TEXTURE_1D:
260
case TGSI_TEXTURE_SHADOW1D:
261
case TGSI_TEXTURE_1D_ARRAY:
262
case TGSI_TEXTURE_SHADOW1D_ARRAY:
263
return 0x1;
264
case TGSI_TEXTURE_2D:
265
case TGSI_TEXTURE_SHADOW2D:
266
case TGSI_TEXTURE_2D_ARRAY:
267
case TGSI_TEXTURE_SHADOW2D_ARRAY:
268
case TGSI_TEXTURE_RECT:
269
case TGSI_TEXTURE_SHADOWRECT:
270
case TGSI_TEXTURE_2D_MSAA:
271
case TGSI_TEXTURE_2D_ARRAY_MSAA:
272
return 0x3;
273
case TGSI_TEXTURE_3D:
274
return 0x7;
275
default:
276
assert(!"Unexpected texture target");
277
return 0xf;
278
}
279
}
280
281
unsigned int Instruction::srcMask(unsigned int s) const
282
{
283
unsigned int mask = insn->Dst[0].Register.WriteMask;
284
285
switch (insn->Instruction.Opcode) {
286
case TGSI_OPCODE_COS:
287
case TGSI_OPCODE_SIN:
288
return (mask & 0x8) | ((mask & 0x7) ? 0x1 : 0x0);
289
case TGSI_OPCODE_DP2:
290
return 0x3;
291
case TGSI_OPCODE_DP3:
292
return 0x7;
293
case TGSI_OPCODE_DP4:
294
case TGSI_OPCODE_KILL_IF: /* WriteMask ignored */
295
return 0xf;
296
case TGSI_OPCODE_DST:
297
return mask & (s ? 0xa : 0x6);
298
case TGSI_OPCODE_EX2:
299
case TGSI_OPCODE_EXP:
300
case TGSI_OPCODE_LG2:
301
case TGSI_OPCODE_LOG:
302
case TGSI_OPCODE_POW:
303
case TGSI_OPCODE_RCP:
304
case TGSI_OPCODE_RSQ:
305
return 0x1;
306
case TGSI_OPCODE_IF:
307
case TGSI_OPCODE_UIF:
308
return 0x1;
309
case TGSI_OPCODE_LIT:
310
return 0xb;
311
case TGSI_OPCODE_TEX2:
312
case TGSI_OPCODE_TXB2:
313
case TGSI_OPCODE_TXL2:
314
return (s == 0) ? 0xf : 0x3;
315
case TGSI_OPCODE_TEX:
316
case TGSI_OPCODE_TXB:
317
case TGSI_OPCODE_TXD:
318
case TGSI_OPCODE_TXL:
319
case TGSI_OPCODE_TXP:
320
case TGSI_OPCODE_TXF:
321
case TGSI_OPCODE_TG4:
322
case TGSI_OPCODE_TEX_LZ:
323
case TGSI_OPCODE_TXF_LZ:
324
case TGSI_OPCODE_LODQ:
325
{
326
const struct tgsi_instruction_texture *tex = &insn->Texture;
327
328
assert(insn->Instruction.Texture);
329
330
mask = 0x7;
331
if (insn->Instruction.Opcode != TGSI_OPCODE_TEX &&
332
insn->Instruction.Opcode != TGSI_OPCODE_TEX_LZ &&
333
insn->Instruction.Opcode != TGSI_OPCODE_TXF_LZ &&
334
insn->Instruction.Opcode != TGSI_OPCODE_TXD)
335
mask |= 0x8; /* bias, lod or proj */
336
337
switch (tex->Texture) {
338
case TGSI_TEXTURE_1D:
339
mask &= 0x9;
340
break;
341
case TGSI_TEXTURE_SHADOW1D:
342
mask &= 0xd;
343
break;
344
case TGSI_TEXTURE_1D_ARRAY:
345
case TGSI_TEXTURE_2D:
346
case TGSI_TEXTURE_RECT:
347
mask &= 0xb;
348
break;
349
case TGSI_TEXTURE_CUBE_ARRAY:
350
case TGSI_TEXTURE_SHADOW2D_ARRAY:
351
case TGSI_TEXTURE_SHADOWCUBE:
352
case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
353
mask |= 0x8;
354
break;
355
default:
356
break;
357
}
358
}
359
return mask;
360
case TGSI_OPCODE_TXQ:
361
return 1;
362
case TGSI_OPCODE_D2I:
363
case TGSI_OPCODE_D2U:
364
case TGSI_OPCODE_D2F:
365
case TGSI_OPCODE_DSLT:
366
case TGSI_OPCODE_DSGE:
367
case TGSI_OPCODE_DSEQ:
368
case TGSI_OPCODE_DSNE:
369
case TGSI_OPCODE_U64SEQ:
370
case TGSI_OPCODE_U64SNE:
371
case TGSI_OPCODE_I64SLT:
372
case TGSI_OPCODE_U64SLT:
373
case TGSI_OPCODE_I64SGE:
374
case TGSI_OPCODE_U64SGE:
375
case TGSI_OPCODE_I642F:
376
case TGSI_OPCODE_U642F:
377
switch (util_bitcount(mask)) {
378
case 1: return 0x3;
379
case 2: return 0xf;
380
default:
381
assert(!"unexpected mask");
382
return 0xf;
383
}
384
case TGSI_OPCODE_I2D:
385
case TGSI_OPCODE_U2D:
386
case TGSI_OPCODE_F2D: {
387
unsigned int x = 0;
388
if ((mask & 0x3) == 0x3)
389
x |= 1;
390
if ((mask & 0xc) == 0xc)
391
x |= 2;
392
return x;
393
}
394
case TGSI_OPCODE_PK2H:
395
return 0x3;
396
case TGSI_OPCODE_UP2H:
397
return 0x1;
398
default:
399
break;
400
}
401
402
return mask;
403
}
404
405
nv50_ir::Modifier Instruction::SrcRegister::getMod(int chan) const
406
{
407
nv50_ir::Modifier m(0);
408
409
if (reg.Absolute)
410
m = m | nv50_ir::Modifier(NV50_IR_MOD_ABS);
411
if (reg.Negate)
412
m = m | nv50_ir::Modifier(NV50_IR_MOD_NEG);
413
return m;
414
}
415
416
static nv50_ir::DataFile translateFile(uint file)
417
{
418
switch (file) {
419
case TGSI_FILE_CONSTANT: return nv50_ir::FILE_MEMORY_CONST;
420
case TGSI_FILE_INPUT: return nv50_ir::FILE_SHADER_INPUT;
421
case TGSI_FILE_OUTPUT: return nv50_ir::FILE_SHADER_OUTPUT;
422
case TGSI_FILE_TEMPORARY: return nv50_ir::FILE_GPR;
423
case TGSI_FILE_ADDRESS: return nv50_ir::FILE_ADDRESS;
424
case TGSI_FILE_IMMEDIATE: return nv50_ir::FILE_IMMEDIATE;
425
case TGSI_FILE_SYSTEM_VALUE: return nv50_ir::FILE_SYSTEM_VALUE;
426
case TGSI_FILE_BUFFER: return nv50_ir::FILE_MEMORY_BUFFER;
427
case TGSI_FILE_IMAGE: return nv50_ir::FILE_MEMORY_GLOBAL;
428
case TGSI_FILE_MEMORY: return nv50_ir::FILE_MEMORY_GLOBAL;
429
case TGSI_FILE_SAMPLER:
430
case TGSI_FILE_NULL:
431
default:
432
return nv50_ir::FILE_NULL;
433
}
434
}
435
436
static nv50_ir::SVSemantic translateSysVal(uint sysval)
437
{
438
switch (sysval) {
439
case TGSI_SEMANTIC_FACE: return nv50_ir::SV_FACE;
440
case TGSI_SEMANTIC_PSIZE: return nv50_ir::SV_POINT_SIZE;
441
case TGSI_SEMANTIC_PRIMID: return nv50_ir::SV_PRIMITIVE_ID;
442
case TGSI_SEMANTIC_INSTANCEID: return nv50_ir::SV_INSTANCE_ID;
443
case TGSI_SEMANTIC_VERTEXID: return nv50_ir::SV_VERTEX_ID;
444
case TGSI_SEMANTIC_GRID_SIZE: return nv50_ir::SV_NCTAID;
445
case TGSI_SEMANTIC_BLOCK_ID: return nv50_ir::SV_CTAID;
446
case TGSI_SEMANTIC_BLOCK_SIZE: return nv50_ir::SV_NTID;
447
case TGSI_SEMANTIC_THREAD_ID: return nv50_ir::SV_TID;
448
case TGSI_SEMANTIC_SAMPLEID: return nv50_ir::SV_SAMPLE_INDEX;
449
case TGSI_SEMANTIC_SAMPLEPOS: return nv50_ir::SV_SAMPLE_POS;
450
case TGSI_SEMANTIC_SAMPLEMASK: return nv50_ir::SV_SAMPLE_MASK;
451
case TGSI_SEMANTIC_INVOCATIONID: return nv50_ir::SV_INVOCATION_ID;
452
case TGSI_SEMANTIC_TESSCOORD: return nv50_ir::SV_TESS_COORD;
453
case TGSI_SEMANTIC_TESSOUTER: return nv50_ir::SV_TESS_OUTER;
454
case TGSI_SEMANTIC_TESSINNER: return nv50_ir::SV_TESS_INNER;
455
case TGSI_SEMANTIC_VERTICESIN: return nv50_ir::SV_VERTEX_COUNT;
456
case TGSI_SEMANTIC_HELPER_INVOCATION: return nv50_ir::SV_THREAD_KILL;
457
case TGSI_SEMANTIC_BASEVERTEX: return nv50_ir::SV_BASEVERTEX;
458
case TGSI_SEMANTIC_BASEINSTANCE: return nv50_ir::SV_BASEINSTANCE;
459
case TGSI_SEMANTIC_DRAWID: return nv50_ir::SV_DRAWID;
460
case TGSI_SEMANTIC_WORK_DIM: return nv50_ir::SV_WORK_DIM;
461
case TGSI_SEMANTIC_SUBGROUP_INVOCATION: return nv50_ir::SV_LANEID;
462
case TGSI_SEMANTIC_SUBGROUP_EQ_MASK: return nv50_ir::SV_LANEMASK_EQ;
463
case TGSI_SEMANTIC_SUBGROUP_LT_MASK: return nv50_ir::SV_LANEMASK_LT;
464
case TGSI_SEMANTIC_SUBGROUP_LE_MASK: return nv50_ir::SV_LANEMASK_LE;
465
case TGSI_SEMANTIC_SUBGROUP_GT_MASK: return nv50_ir::SV_LANEMASK_GT;
466
case TGSI_SEMANTIC_SUBGROUP_GE_MASK: return nv50_ir::SV_LANEMASK_GE;
467
default:
468
assert(0);
469
return nv50_ir::SV_CLOCK;
470
}
471
}
472
473
#define NV50_IR_TEX_TARG_CASE(a, b) \
474
case TGSI_TEXTURE_##a: return nv50_ir::TEX_TARGET_##b;
475
476
static nv50_ir::TexTarget translateTexture(uint tex)
477
{
478
switch (tex) {
479
NV50_IR_TEX_TARG_CASE(1D, 1D);
480
NV50_IR_TEX_TARG_CASE(2D, 2D);
481
NV50_IR_TEX_TARG_CASE(2D_MSAA, 2D_MS);
482
NV50_IR_TEX_TARG_CASE(3D, 3D);
483
NV50_IR_TEX_TARG_CASE(CUBE, CUBE);
484
NV50_IR_TEX_TARG_CASE(RECT, RECT);
485
NV50_IR_TEX_TARG_CASE(1D_ARRAY, 1D_ARRAY);
486
NV50_IR_TEX_TARG_CASE(2D_ARRAY, 2D_ARRAY);
487
NV50_IR_TEX_TARG_CASE(2D_ARRAY_MSAA, 2D_MS_ARRAY);
488
NV50_IR_TEX_TARG_CASE(CUBE_ARRAY, CUBE_ARRAY);
489
NV50_IR_TEX_TARG_CASE(SHADOW1D, 1D_SHADOW);
490
NV50_IR_TEX_TARG_CASE(SHADOW2D, 2D_SHADOW);
491
NV50_IR_TEX_TARG_CASE(SHADOWCUBE, CUBE_SHADOW);
492
NV50_IR_TEX_TARG_CASE(SHADOWRECT, RECT_SHADOW);
493
NV50_IR_TEX_TARG_CASE(SHADOW1D_ARRAY, 1D_ARRAY_SHADOW);
494
NV50_IR_TEX_TARG_CASE(SHADOW2D_ARRAY, 2D_ARRAY_SHADOW);
495
NV50_IR_TEX_TARG_CASE(SHADOWCUBE_ARRAY, CUBE_ARRAY_SHADOW);
496
NV50_IR_TEX_TARG_CASE(BUFFER, BUFFER);
497
498
case TGSI_TEXTURE_UNKNOWN:
499
default:
500
assert(!"invalid texture target");
501
return nv50_ir::TEX_TARGET_2D;
502
}
503
}
504
505
static nv50_ir::CacheMode translateCacheMode(uint qualifier)
506
{
507
if (qualifier & TGSI_MEMORY_VOLATILE)
508
return nv50_ir::CACHE_CV;
509
if (qualifier & TGSI_MEMORY_COHERENT)
510
return nv50_ir::CACHE_CG;
511
return nv50_ir::CACHE_CA;
512
}
513
514
nv50_ir::DataType Instruction::inferSrcType() const
515
{
516
switch (getOpcode()) {
517
case TGSI_OPCODE_UIF:
518
case TGSI_OPCODE_AND:
519
case TGSI_OPCODE_OR:
520
case TGSI_OPCODE_XOR:
521
case TGSI_OPCODE_NOT:
522
case TGSI_OPCODE_SHL:
523
case TGSI_OPCODE_U2F:
524
case TGSI_OPCODE_U2D:
525
case TGSI_OPCODE_U2I64:
526
case TGSI_OPCODE_UADD:
527
case TGSI_OPCODE_UDIV:
528
case TGSI_OPCODE_UMOD:
529
case TGSI_OPCODE_UMAD:
530
case TGSI_OPCODE_UMUL:
531
case TGSI_OPCODE_UMUL_HI:
532
case TGSI_OPCODE_UMAX:
533
case TGSI_OPCODE_UMIN:
534
case TGSI_OPCODE_USEQ:
535
case TGSI_OPCODE_USGE:
536
case TGSI_OPCODE_USLT:
537
case TGSI_OPCODE_USNE:
538
case TGSI_OPCODE_USHR:
539
case TGSI_OPCODE_ATOMUADD:
540
case TGSI_OPCODE_ATOMXCHG:
541
case TGSI_OPCODE_ATOMCAS:
542
case TGSI_OPCODE_ATOMAND:
543
case TGSI_OPCODE_ATOMOR:
544
case TGSI_OPCODE_ATOMXOR:
545
case TGSI_OPCODE_ATOMUMIN:
546
case TGSI_OPCODE_ATOMUMAX:
547
case TGSI_OPCODE_ATOMDEC_WRAP:
548
case TGSI_OPCODE_ATOMINC_WRAP:
549
case TGSI_OPCODE_UBFE:
550
case TGSI_OPCODE_UMSB:
551
case TGSI_OPCODE_UP2H:
552
case TGSI_OPCODE_VOTE_ALL:
553
case TGSI_OPCODE_VOTE_ANY:
554
case TGSI_OPCODE_VOTE_EQ:
555
return nv50_ir::TYPE_U32;
556
case TGSI_OPCODE_I2F:
557
case TGSI_OPCODE_I2D:
558
case TGSI_OPCODE_I2I64:
559
case TGSI_OPCODE_IDIV:
560
case TGSI_OPCODE_IMUL_HI:
561
case TGSI_OPCODE_IMAX:
562
case TGSI_OPCODE_IMIN:
563
case TGSI_OPCODE_IABS:
564
case TGSI_OPCODE_INEG:
565
case TGSI_OPCODE_ISGE:
566
case TGSI_OPCODE_ISHR:
567
case TGSI_OPCODE_ISLT:
568
case TGSI_OPCODE_ISSG:
569
case TGSI_OPCODE_MOD:
570
case TGSI_OPCODE_UARL:
571
case TGSI_OPCODE_ATOMIMIN:
572
case TGSI_OPCODE_ATOMIMAX:
573
case TGSI_OPCODE_IBFE:
574
case TGSI_OPCODE_IMSB:
575
return nv50_ir::TYPE_S32;
576
case TGSI_OPCODE_D2F:
577
case TGSI_OPCODE_D2I:
578
case TGSI_OPCODE_D2U:
579
case TGSI_OPCODE_D2I64:
580
case TGSI_OPCODE_D2U64:
581
case TGSI_OPCODE_DABS:
582
case TGSI_OPCODE_DNEG:
583
case TGSI_OPCODE_DADD:
584
case TGSI_OPCODE_DMUL:
585
case TGSI_OPCODE_DDIV:
586
case TGSI_OPCODE_DMAX:
587
case TGSI_OPCODE_DMIN:
588
case TGSI_OPCODE_DSLT:
589
case TGSI_OPCODE_DSGE:
590
case TGSI_OPCODE_DSEQ:
591
case TGSI_OPCODE_DSNE:
592
case TGSI_OPCODE_DRCP:
593
case TGSI_OPCODE_DSQRT:
594
case TGSI_OPCODE_DMAD:
595
case TGSI_OPCODE_DFMA:
596
case TGSI_OPCODE_DFRAC:
597
case TGSI_OPCODE_DRSQ:
598
case TGSI_OPCODE_DTRUNC:
599
case TGSI_OPCODE_DCEIL:
600
case TGSI_OPCODE_DFLR:
601
case TGSI_OPCODE_DROUND:
602
return nv50_ir::TYPE_F64;
603
case TGSI_OPCODE_U64SEQ:
604
case TGSI_OPCODE_U64SNE:
605
case TGSI_OPCODE_U64SLT:
606
case TGSI_OPCODE_U64SGE:
607
case TGSI_OPCODE_U64MIN:
608
case TGSI_OPCODE_U64MAX:
609
case TGSI_OPCODE_U64ADD:
610
case TGSI_OPCODE_U64MUL:
611
case TGSI_OPCODE_U64SHL:
612
case TGSI_OPCODE_U64SHR:
613
case TGSI_OPCODE_U64DIV:
614
case TGSI_OPCODE_U64MOD:
615
case TGSI_OPCODE_U642F:
616
case TGSI_OPCODE_U642D:
617
return nv50_ir::TYPE_U64;
618
case TGSI_OPCODE_I64ABS:
619
case TGSI_OPCODE_I64SSG:
620
case TGSI_OPCODE_I64NEG:
621
case TGSI_OPCODE_I64SLT:
622
case TGSI_OPCODE_I64SGE:
623
case TGSI_OPCODE_I64MIN:
624
case TGSI_OPCODE_I64MAX:
625
case TGSI_OPCODE_I64SHR:
626
case TGSI_OPCODE_I64DIV:
627
case TGSI_OPCODE_I64MOD:
628
case TGSI_OPCODE_I642F:
629
case TGSI_OPCODE_I642D:
630
return nv50_ir::TYPE_S64;
631
default:
632
return nv50_ir::TYPE_F32;
633
}
634
}
635
636
nv50_ir::DataType Instruction::inferDstType() const
637
{
638
switch (getOpcode()) {
639
case TGSI_OPCODE_D2U:
640
case TGSI_OPCODE_F2U: return nv50_ir::TYPE_U32;
641
case TGSI_OPCODE_D2I:
642
case TGSI_OPCODE_F2I: return nv50_ir::TYPE_S32;
643
case TGSI_OPCODE_FSEQ:
644
case TGSI_OPCODE_FSGE:
645
case TGSI_OPCODE_FSLT:
646
case TGSI_OPCODE_FSNE:
647
case TGSI_OPCODE_DSEQ:
648
case TGSI_OPCODE_DSGE:
649
case TGSI_OPCODE_DSLT:
650
case TGSI_OPCODE_DSNE:
651
case TGSI_OPCODE_I64SLT:
652
case TGSI_OPCODE_I64SGE:
653
case TGSI_OPCODE_U64SEQ:
654
case TGSI_OPCODE_U64SNE:
655
case TGSI_OPCODE_U64SLT:
656
case TGSI_OPCODE_U64SGE:
657
case TGSI_OPCODE_PK2H:
658
return nv50_ir::TYPE_U32;
659
case TGSI_OPCODE_I2F:
660
case TGSI_OPCODE_U2F:
661
case TGSI_OPCODE_D2F:
662
case TGSI_OPCODE_I642F:
663
case TGSI_OPCODE_U642F:
664
case TGSI_OPCODE_UP2H:
665
return nv50_ir::TYPE_F32;
666
case TGSI_OPCODE_I2D:
667
case TGSI_OPCODE_U2D:
668
case TGSI_OPCODE_F2D:
669
case TGSI_OPCODE_I642D:
670
case TGSI_OPCODE_U642D:
671
return nv50_ir::TYPE_F64;
672
case TGSI_OPCODE_I2I64:
673
case TGSI_OPCODE_U2I64:
674
case TGSI_OPCODE_F2I64:
675
case TGSI_OPCODE_D2I64:
676
return nv50_ir::TYPE_S64;
677
case TGSI_OPCODE_F2U64:
678
case TGSI_OPCODE_D2U64:
679
return nv50_ir::TYPE_U64;
680
default:
681
return inferSrcType();
682
}
683
}
684
685
nv50_ir::CondCode Instruction::getSetCond() const
686
{
687
using namespace nv50_ir;
688
689
switch (getOpcode()) {
690
case TGSI_OPCODE_SLT:
691
case TGSI_OPCODE_ISLT:
692
case TGSI_OPCODE_USLT:
693
case TGSI_OPCODE_FSLT:
694
case TGSI_OPCODE_DSLT:
695
case TGSI_OPCODE_I64SLT:
696
case TGSI_OPCODE_U64SLT:
697
return CC_LT;
698
case TGSI_OPCODE_SLE:
699
return CC_LE;
700
case TGSI_OPCODE_SGE:
701
case TGSI_OPCODE_ISGE:
702
case TGSI_OPCODE_USGE:
703
case TGSI_OPCODE_FSGE:
704
case TGSI_OPCODE_DSGE:
705
case TGSI_OPCODE_I64SGE:
706
case TGSI_OPCODE_U64SGE:
707
return CC_GE;
708
case TGSI_OPCODE_SGT:
709
return CC_GT;
710
case TGSI_OPCODE_SEQ:
711
case TGSI_OPCODE_USEQ:
712
case TGSI_OPCODE_FSEQ:
713
case TGSI_OPCODE_DSEQ:
714
case TGSI_OPCODE_U64SEQ:
715
return CC_EQ;
716
case TGSI_OPCODE_SNE:
717
case TGSI_OPCODE_FSNE:
718
case TGSI_OPCODE_DSNE:
719
case TGSI_OPCODE_U64SNE:
720
return CC_NEU;
721
case TGSI_OPCODE_USNE:
722
return CC_NE;
723
default:
724
return CC_ALWAYS;
725
}
726
}
727
728
#define NV50_IR_OPCODE_CASE(a, b) case TGSI_OPCODE_##a: return nv50_ir::OP_##b
729
730
static nv50_ir::operation translateOpcode(uint opcode)
731
{
732
switch (opcode) {
733
NV50_IR_OPCODE_CASE(ARL, SHL);
734
NV50_IR_OPCODE_CASE(MOV, MOV);
735
736
NV50_IR_OPCODE_CASE(RCP, RCP);
737
NV50_IR_OPCODE_CASE(RSQ, RSQ);
738
NV50_IR_OPCODE_CASE(SQRT, SQRT);
739
740
NV50_IR_OPCODE_CASE(MUL, MUL);
741
NV50_IR_OPCODE_CASE(ADD, ADD);
742
743
NV50_IR_OPCODE_CASE(MIN, MIN);
744
NV50_IR_OPCODE_CASE(MAX, MAX);
745
NV50_IR_OPCODE_CASE(SLT, SET);
746
NV50_IR_OPCODE_CASE(SGE, SET);
747
NV50_IR_OPCODE_CASE(MAD, MAD);
748
NV50_IR_OPCODE_CASE(FMA, FMA);
749
750
NV50_IR_OPCODE_CASE(FLR, FLOOR);
751
NV50_IR_OPCODE_CASE(ROUND, CVT);
752
NV50_IR_OPCODE_CASE(EX2, EX2);
753
NV50_IR_OPCODE_CASE(LG2, LG2);
754
NV50_IR_OPCODE_CASE(POW, POW);
755
756
NV50_IR_OPCODE_CASE(COS, COS);
757
NV50_IR_OPCODE_CASE(DDX, DFDX);
758
NV50_IR_OPCODE_CASE(DDX_FINE, DFDX);
759
NV50_IR_OPCODE_CASE(DDY, DFDY);
760
NV50_IR_OPCODE_CASE(DDY_FINE, DFDY);
761
NV50_IR_OPCODE_CASE(KILL, DISCARD);
762
NV50_IR_OPCODE_CASE(DEMOTE, DISCARD);
763
764
NV50_IR_OPCODE_CASE(SEQ, SET);
765
NV50_IR_OPCODE_CASE(SGT, SET);
766
NV50_IR_OPCODE_CASE(SIN, SIN);
767
NV50_IR_OPCODE_CASE(SLE, SET);
768
NV50_IR_OPCODE_CASE(SNE, SET);
769
NV50_IR_OPCODE_CASE(TEX, TEX);
770
NV50_IR_OPCODE_CASE(TXD, TXD);
771
NV50_IR_OPCODE_CASE(TXP, TEX);
772
773
NV50_IR_OPCODE_CASE(CAL, CALL);
774
NV50_IR_OPCODE_CASE(RET, RET);
775
NV50_IR_OPCODE_CASE(CMP, SLCT);
776
777
NV50_IR_OPCODE_CASE(TXB, TXB);
778
779
NV50_IR_OPCODE_CASE(DIV, DIV);
780
781
NV50_IR_OPCODE_CASE(TXL, TXL);
782
NV50_IR_OPCODE_CASE(TEX_LZ, TXL);
783
784
NV50_IR_OPCODE_CASE(CEIL, CEIL);
785
NV50_IR_OPCODE_CASE(I2F, CVT);
786
NV50_IR_OPCODE_CASE(NOT, NOT);
787
NV50_IR_OPCODE_CASE(TRUNC, TRUNC);
788
NV50_IR_OPCODE_CASE(SHL, SHL);
789
790
NV50_IR_OPCODE_CASE(AND, AND);
791
NV50_IR_OPCODE_CASE(OR, OR);
792
NV50_IR_OPCODE_CASE(MOD, MOD);
793
NV50_IR_OPCODE_CASE(XOR, XOR);
794
NV50_IR_OPCODE_CASE(TXF, TXF);
795
NV50_IR_OPCODE_CASE(TXF_LZ, TXF);
796
NV50_IR_OPCODE_CASE(TXQ, TXQ);
797
NV50_IR_OPCODE_CASE(TXQS, TXQ);
798
NV50_IR_OPCODE_CASE(TG4, TXG);
799
NV50_IR_OPCODE_CASE(LODQ, TXLQ);
800
801
NV50_IR_OPCODE_CASE(EMIT, EMIT);
802
NV50_IR_OPCODE_CASE(ENDPRIM, RESTART);
803
804
NV50_IR_OPCODE_CASE(KILL_IF, DISCARD);
805
806
NV50_IR_OPCODE_CASE(F2I, CVT);
807
NV50_IR_OPCODE_CASE(FSEQ, SET);
808
NV50_IR_OPCODE_CASE(FSGE, SET);
809
NV50_IR_OPCODE_CASE(FSLT, SET);
810
NV50_IR_OPCODE_CASE(FSNE, SET);
811
NV50_IR_OPCODE_CASE(IDIV, DIV);
812
NV50_IR_OPCODE_CASE(IMAX, MAX);
813
NV50_IR_OPCODE_CASE(IMIN, MIN);
814
NV50_IR_OPCODE_CASE(IABS, ABS);
815
NV50_IR_OPCODE_CASE(INEG, NEG);
816
NV50_IR_OPCODE_CASE(ISGE, SET);
817
NV50_IR_OPCODE_CASE(ISHR, SHR);
818
NV50_IR_OPCODE_CASE(ISLT, SET);
819
NV50_IR_OPCODE_CASE(F2U, CVT);
820
NV50_IR_OPCODE_CASE(U2F, CVT);
821
NV50_IR_OPCODE_CASE(UADD, ADD);
822
NV50_IR_OPCODE_CASE(UDIV, DIV);
823
NV50_IR_OPCODE_CASE(UMAD, MAD);
824
NV50_IR_OPCODE_CASE(UMAX, MAX);
825
NV50_IR_OPCODE_CASE(UMIN, MIN);
826
NV50_IR_OPCODE_CASE(UMOD, MOD);
827
NV50_IR_OPCODE_CASE(UMUL, MUL);
828
NV50_IR_OPCODE_CASE(USEQ, SET);
829
NV50_IR_OPCODE_CASE(USGE, SET);
830
NV50_IR_OPCODE_CASE(USHR, SHR);
831
NV50_IR_OPCODE_CASE(USLT, SET);
832
NV50_IR_OPCODE_CASE(USNE, SET);
833
834
NV50_IR_OPCODE_CASE(DABS, ABS);
835
NV50_IR_OPCODE_CASE(DNEG, NEG);
836
NV50_IR_OPCODE_CASE(DADD, ADD);
837
NV50_IR_OPCODE_CASE(DMUL, MUL);
838
NV50_IR_OPCODE_CASE(DDIV, DIV);
839
NV50_IR_OPCODE_CASE(DMAX, MAX);
840
NV50_IR_OPCODE_CASE(DMIN, MIN);
841
NV50_IR_OPCODE_CASE(DSLT, SET);
842
NV50_IR_OPCODE_CASE(DSGE, SET);
843
NV50_IR_OPCODE_CASE(DSEQ, SET);
844
NV50_IR_OPCODE_CASE(DSNE, SET);
845
NV50_IR_OPCODE_CASE(DRCP, RCP);
846
NV50_IR_OPCODE_CASE(DSQRT, SQRT);
847
NV50_IR_OPCODE_CASE(DMAD, MAD);
848
NV50_IR_OPCODE_CASE(DFMA, FMA);
849
NV50_IR_OPCODE_CASE(D2I, CVT);
850
NV50_IR_OPCODE_CASE(D2U, CVT);
851
NV50_IR_OPCODE_CASE(I2D, CVT);
852
NV50_IR_OPCODE_CASE(U2D, CVT);
853
NV50_IR_OPCODE_CASE(DRSQ, RSQ);
854
NV50_IR_OPCODE_CASE(DTRUNC, TRUNC);
855
NV50_IR_OPCODE_CASE(DCEIL, CEIL);
856
NV50_IR_OPCODE_CASE(DFLR, FLOOR);
857
NV50_IR_OPCODE_CASE(DROUND, CVT);
858
859
NV50_IR_OPCODE_CASE(U64SEQ, SET);
860
NV50_IR_OPCODE_CASE(U64SNE, SET);
861
NV50_IR_OPCODE_CASE(U64SLT, SET);
862
NV50_IR_OPCODE_CASE(U64SGE, SET);
863
NV50_IR_OPCODE_CASE(I64SLT, SET);
864
NV50_IR_OPCODE_CASE(I64SGE, SET);
865
NV50_IR_OPCODE_CASE(I2I64, CVT);
866
NV50_IR_OPCODE_CASE(U2I64, CVT);
867
NV50_IR_OPCODE_CASE(F2I64, CVT);
868
NV50_IR_OPCODE_CASE(F2U64, CVT);
869
NV50_IR_OPCODE_CASE(D2I64, CVT);
870
NV50_IR_OPCODE_CASE(D2U64, CVT);
871
NV50_IR_OPCODE_CASE(I642F, CVT);
872
NV50_IR_OPCODE_CASE(U642F, CVT);
873
NV50_IR_OPCODE_CASE(I642D, CVT);
874
NV50_IR_OPCODE_CASE(U642D, CVT);
875
876
NV50_IR_OPCODE_CASE(I64MIN, MIN);
877
NV50_IR_OPCODE_CASE(U64MIN, MIN);
878
NV50_IR_OPCODE_CASE(I64MAX, MAX);
879
NV50_IR_OPCODE_CASE(U64MAX, MAX);
880
NV50_IR_OPCODE_CASE(I64ABS, ABS);
881
NV50_IR_OPCODE_CASE(I64NEG, NEG);
882
NV50_IR_OPCODE_CASE(U64ADD, ADD);
883
NV50_IR_OPCODE_CASE(U64MUL, MUL);
884
NV50_IR_OPCODE_CASE(U64SHL, SHL);
885
NV50_IR_OPCODE_CASE(I64SHR, SHR);
886
NV50_IR_OPCODE_CASE(U64SHR, SHR);
887
888
NV50_IR_OPCODE_CASE(IMUL_HI, MUL);
889
NV50_IR_OPCODE_CASE(UMUL_HI, MUL);
890
891
NV50_IR_OPCODE_CASE(SAMPLE, TEX);
892
NV50_IR_OPCODE_CASE(SAMPLE_B, TXB);
893
NV50_IR_OPCODE_CASE(SAMPLE_C, TEX);
894
NV50_IR_OPCODE_CASE(SAMPLE_C_LZ, TEX);
895
NV50_IR_OPCODE_CASE(SAMPLE_D, TXD);
896
NV50_IR_OPCODE_CASE(SAMPLE_L, TXL);
897
NV50_IR_OPCODE_CASE(SAMPLE_I, TXF);
898
NV50_IR_OPCODE_CASE(SAMPLE_I_MS, TXF);
899
NV50_IR_OPCODE_CASE(GATHER4, TXG);
900
NV50_IR_OPCODE_CASE(SVIEWINFO, TXQ);
901
902
NV50_IR_OPCODE_CASE(ATOMUADD, ATOM);
903
NV50_IR_OPCODE_CASE(ATOMXCHG, ATOM);
904
NV50_IR_OPCODE_CASE(ATOMCAS, ATOM);
905
NV50_IR_OPCODE_CASE(ATOMAND, ATOM);
906
NV50_IR_OPCODE_CASE(ATOMOR, ATOM);
907
NV50_IR_OPCODE_CASE(ATOMXOR, ATOM);
908
NV50_IR_OPCODE_CASE(ATOMUMIN, ATOM);
909
NV50_IR_OPCODE_CASE(ATOMUMAX, ATOM);
910
NV50_IR_OPCODE_CASE(ATOMIMIN, ATOM);
911
NV50_IR_OPCODE_CASE(ATOMIMAX, ATOM);
912
NV50_IR_OPCODE_CASE(ATOMFADD, ATOM);
913
NV50_IR_OPCODE_CASE(ATOMDEC_WRAP, ATOM);
914
NV50_IR_OPCODE_CASE(ATOMINC_WRAP, ATOM);
915
916
NV50_IR_OPCODE_CASE(TEX2, TEX);
917
NV50_IR_OPCODE_CASE(TXB2, TXB);
918
NV50_IR_OPCODE_CASE(TXL2, TXL);
919
920
NV50_IR_OPCODE_CASE(IBFE, EXTBF);
921
NV50_IR_OPCODE_CASE(UBFE, EXTBF);
922
NV50_IR_OPCODE_CASE(BFI, INSBF);
923
NV50_IR_OPCODE_CASE(BREV, EXTBF);
924
NV50_IR_OPCODE_CASE(POPC, POPCNT);
925
NV50_IR_OPCODE_CASE(LSB, BFIND);
926
NV50_IR_OPCODE_CASE(IMSB, BFIND);
927
NV50_IR_OPCODE_CASE(UMSB, BFIND);
928
929
NV50_IR_OPCODE_CASE(VOTE_ALL, VOTE);
930
NV50_IR_OPCODE_CASE(VOTE_ANY, VOTE);
931
NV50_IR_OPCODE_CASE(VOTE_EQ, VOTE);
932
933
NV50_IR_OPCODE_CASE(BALLOT, VOTE);
934
NV50_IR_OPCODE_CASE(READ_INVOC, SHFL);
935
NV50_IR_OPCODE_CASE(READ_FIRST, SHFL);
936
937
NV50_IR_OPCODE_CASE(END, EXIT);
938
939
default:
940
return nv50_ir::OP_NOP;
941
}
942
}
943
944
static uint16_t opcodeToSubOp(uint opcode)
945
{
946
switch (opcode) {
947
case TGSI_OPCODE_ATOMUADD: return NV50_IR_SUBOP_ATOM_ADD;
948
case TGSI_OPCODE_ATOMXCHG: return NV50_IR_SUBOP_ATOM_EXCH;
949
case TGSI_OPCODE_ATOMCAS: return NV50_IR_SUBOP_ATOM_CAS;
950
case TGSI_OPCODE_ATOMAND: return NV50_IR_SUBOP_ATOM_AND;
951
case TGSI_OPCODE_ATOMOR: return NV50_IR_SUBOP_ATOM_OR;
952
case TGSI_OPCODE_ATOMXOR: return NV50_IR_SUBOP_ATOM_XOR;
953
case TGSI_OPCODE_ATOMUMIN: return NV50_IR_SUBOP_ATOM_MIN;
954
case TGSI_OPCODE_ATOMIMIN: return NV50_IR_SUBOP_ATOM_MIN;
955
case TGSI_OPCODE_ATOMUMAX: return NV50_IR_SUBOP_ATOM_MAX;
956
case TGSI_OPCODE_ATOMIMAX: return NV50_IR_SUBOP_ATOM_MAX;
957
case TGSI_OPCODE_ATOMFADD: return NV50_IR_SUBOP_ATOM_ADD;
958
case TGSI_OPCODE_ATOMDEC_WRAP: return NV50_IR_SUBOP_ATOM_DEC;
959
case TGSI_OPCODE_ATOMINC_WRAP: return NV50_IR_SUBOP_ATOM_INC;
960
case TGSI_OPCODE_IMUL_HI:
961
case TGSI_OPCODE_UMUL_HI:
962
return NV50_IR_SUBOP_MUL_HIGH;
963
case TGSI_OPCODE_VOTE_ALL: return NV50_IR_SUBOP_VOTE_ALL;
964
case TGSI_OPCODE_VOTE_ANY: return NV50_IR_SUBOP_VOTE_ANY;
965
case TGSI_OPCODE_VOTE_EQ: return NV50_IR_SUBOP_VOTE_UNI;
966
default:
967
return 0;
968
}
969
}
970
971
bool Instruction::checkDstSrcAliasing() const
972
{
973
if (insn->Dst[0].Register.Indirect) // no danger if indirect, using memory
974
return false;
975
976
for (int s = 0; s < TGSI_FULL_MAX_SRC_REGISTERS; ++s) {
977
if (insn->Src[s].Register.File == TGSI_FILE_NULL)
978
break;
979
if (insn->Src[s].Register.File == insn->Dst[0].Register.File &&
980
insn->Src[s].Register.Index == insn->Dst[0].Register.Index)
981
return true;
982
}
983
return false;
984
}
985
986
class Source
987
{
988
public:
989
Source(struct nv50_ir_prog_info *, struct nv50_ir_prog_info_out *, nv50_ir::Program *);
990
~Source();
991
992
public:
993
bool scanSource();
994
unsigned fileSize(unsigned file) const { return scan.file_max[file] + 1; }
995
996
public:
997
struct tgsi_shader_info scan;
998
struct tgsi_full_instruction *insns;
999
const struct tgsi_token *tokens;
1000
struct nv50_ir_prog_info *info;
1001
struct nv50_ir_prog_info_out *info_out;
1002
1003
nv50_ir::DynArray tempArrays;
1004
nv50_ir::DynArray immdArrays;
1005
1006
typedef nv50_ir::BuildUtil::Location Location;
1007
// these registers are per-subroutine, cannot be used for parameter passing
1008
std::set<Location> locals;
1009
1010
std::set<int> indirectTempArrays;
1011
std::map<int, int> indirectTempOffsets;
1012
std::map<int, std::pair<int, int> > tempArrayInfo;
1013
std::vector<int> tempArrayId;
1014
1015
std::map<int, int> bufferIds;
1016
std::map<int, int> imageIds;
1017
1018
int clipVertexOutput;
1019
1020
struct TextureView {
1021
uint8_t target; // TGSI_TEXTURE_*
1022
};
1023
std::vector<TextureView> textureViews;
1024
1025
/*
1026
struct Resource {
1027
uint8_t target; // TGSI_TEXTURE_*
1028
bool raw;
1029
uint8_t slot; // $surface index
1030
};
1031
std::vector<Resource> resources;
1032
*/
1033
1034
struct MemoryFile {
1035
uint8_t mem_type; // TGSI_MEMORY_TYPE_*
1036
};
1037
std::vector<MemoryFile> memoryFiles;
1038
1039
std::vector<bool> bufferAtomics;
1040
1041
struct {
1042
uint16_t count; /* count of inline immediates */
1043
uint32_t *data; /* inline immediate data */
1044
} immd;
1045
1046
private:
1047
int gmemSlot;
1048
nv50_ir::Program *prog;
1049
int inferSysValDirection(unsigned sn) const;
1050
bool scanDeclaration(const struct tgsi_full_declaration *);
1051
bool scanInstruction(const struct tgsi_full_instruction *);
1052
void scanInstructionSrc(const Instruction& insn,
1053
const Instruction::SrcRegister& src,
1054
unsigned mask);
1055
void scanProperty(const struct tgsi_full_property *);
1056
void scanImmediate(const struct tgsi_full_immediate *);
1057
1058
inline bool isEdgeFlagPassthrough(const Instruction&) const;
1059
};
1060
1061
Source::Source(struct nv50_ir_prog_info *info, struct nv50_ir_prog_info_out *info_out,
1062
nv50_ir::Program *prog)
1063
: insns(NULL), info(info), info_out(info_out), clipVertexOutput(-1),
1064
gmemSlot(0), prog(prog)
1065
{
1066
tokens = (const struct tgsi_token *)info->bin.source;
1067
1068
if (info->dbgFlags & NV50_IR_DEBUG_BASIC)
1069
tgsi_dump(tokens, 0);
1070
1071
tgsi_scan_shader(tokens, &scan);
1072
1073
immd.count = 0;
1074
immd.data = (uint32_t *)MALLOC(scan.immediate_count * 16);
1075
}
1076
1077
Source::~Source()
1078
{
1079
if (insns)
1080
FREE(insns);
1081
1082
if (immd.data)
1083
FREE(immd.data);
1084
}
1085
1086
bool Source::scanSource()
1087
{
1088
unsigned insnCount = 0;
1089
struct tgsi_parse_context parse;
1090
1091
insns = (struct tgsi_full_instruction *)MALLOC(scan.num_instructions *
1092
sizeof(insns[0]));
1093
if (!insns)
1094
return false;
1095
1096
textureViews.resize(scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1);
1097
//resources.resize(scan.file_max[TGSI_FILE_RESOURCE] + 1);
1098
tempArrayId.resize(scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1099
memoryFiles.resize(scan.file_max[TGSI_FILE_MEMORY] + 1);
1100
bufferAtomics.resize(scan.file_max[TGSI_FILE_BUFFER] + 1);
1101
1102
info_out->numInputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1103
info_out->numOutputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1104
info_out->numSysVals = scan.file_max[TGSI_FILE_SYSTEM_VALUE] + 1;
1105
1106
if (info->type == PIPE_SHADER_FRAGMENT) {
1107
info_out->prop.fp.writesDepth = scan.writes_z;
1108
info_out->prop.fp.usesDiscard = scan.uses_kill || info->io.alphaRefBase;
1109
} else
1110
if (info->type == PIPE_SHADER_GEOMETRY) {
1111
info_out->prop.gp.instanceCount = 1; // default value
1112
}
1113
1114
info->io.viewportId = -1;
1115
1116
tgsi_parse_init(&parse, tokens);
1117
while (!tgsi_parse_end_of_tokens(&parse)) {
1118
tgsi_parse_token(&parse);
1119
1120
switch (parse.FullToken.Token.Type) {
1121
case TGSI_TOKEN_TYPE_IMMEDIATE:
1122
scanImmediate(&parse.FullToken.FullImmediate);
1123
break;
1124
case TGSI_TOKEN_TYPE_DECLARATION:
1125
scanDeclaration(&parse.FullToken.FullDeclaration);
1126
break;
1127
case TGSI_TOKEN_TYPE_INSTRUCTION:
1128
insns[insnCount++] = parse.FullToken.FullInstruction;
1129
scanInstruction(&parse.FullToken.FullInstruction);
1130
break;
1131
case TGSI_TOKEN_TYPE_PROPERTY:
1132
scanProperty(&parse.FullToken.FullProperty);
1133
break;
1134
default:
1135
INFO("unknown TGSI token type: %d\n", parse.FullToken.Token.Type);
1136
break;
1137
}
1138
}
1139
tgsi_parse_free(&parse);
1140
1141
if (indirectTempArrays.size()) {
1142
int tempBase = 0;
1143
for (std::set<int>::const_iterator it = indirectTempArrays.begin();
1144
it != indirectTempArrays.end(); ++it) {
1145
std::pair<int, int>& info = tempArrayInfo[*it];
1146
indirectTempOffsets.insert(std::make_pair(*it, tempBase - info.first));
1147
tempBase += info.second;
1148
}
1149
info_out->bin.tlsSpace += tempBase * 16;
1150
}
1151
1152
if (info_out->io.genUserClip > 0) {
1153
info_out->io.clipDistances = info_out->io.genUserClip;
1154
1155
const unsigned int nOut = (info_out->io.genUserClip + 3) / 4;
1156
1157
for (unsigned int n = 0; n < nOut; ++n) {
1158
unsigned int i = info_out->numOutputs++;
1159
info_out->out[i].id = i;
1160
info_out->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1161
info_out->out[i].si = n;
1162
info_out->out[i].mask = ((1 << info_out->io.clipDistances) - 1) >> (n * 4);
1163
}
1164
}
1165
1166
return info->assignSlots(info_out) == 0;
1167
}
1168
1169
void Source::scanProperty(const struct tgsi_full_property *prop)
1170
{
1171
switch (prop->Property.PropertyName) {
1172
case TGSI_PROPERTY_GS_OUTPUT_PRIM:
1173
info_out->prop.gp.outputPrim = prop->u[0].Data;
1174
break;
1175
case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
1176
info_out->prop.gp.maxVertices = prop->u[0].Data;
1177
break;
1178
case TGSI_PROPERTY_GS_INVOCATIONS:
1179
info_out->prop.gp.instanceCount = prop->u[0].Data;
1180
break;
1181
case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
1182
info_out->prop.fp.separateFragData = true;
1183
break;
1184
case TGSI_PROPERTY_FS_COORD_ORIGIN:
1185
case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
1186
case TGSI_PROPERTY_FS_DEPTH_LAYOUT:
1187
case TGSI_PROPERTY_GS_INPUT_PRIM:
1188
case TGSI_PROPERTY_FS_BLEND_EQUATION_ADVANCED:
1189
// we don't care
1190
break;
1191
case TGSI_PROPERTY_VS_PROHIBIT_UCPS:
1192
info_out->io.genUserClip = -1;
1193
break;
1194
case TGSI_PROPERTY_TCS_VERTICES_OUT:
1195
info_out->prop.tp.outputPatchSize = prop->u[0].Data;
1196
break;
1197
case TGSI_PROPERTY_TES_PRIM_MODE:
1198
info_out->prop.tp.domain = prop->u[0].Data;
1199
break;
1200
case TGSI_PROPERTY_TES_SPACING:
1201
info_out->prop.tp.partitioning = prop->u[0].Data;
1202
break;
1203
case TGSI_PROPERTY_TES_VERTEX_ORDER_CW:
1204
info_out->prop.tp.winding = prop->u[0].Data;
1205
break;
1206
case TGSI_PROPERTY_TES_POINT_MODE:
1207
if (prop->u[0].Data)
1208
info_out->prop.tp.outputPrim = PIPE_PRIM_POINTS;
1209
else
1210
info_out->prop.tp.outputPrim = PIPE_PRIM_TRIANGLES; /* anything but points */
1211
break;
1212
case TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH:
1213
info->prop.cp.numThreads[0] = prop->u[0].Data;
1214
break;
1215
case TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT:
1216
info->prop.cp.numThreads[1] = prop->u[0].Data;
1217
break;
1218
case TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH:
1219
info->prop.cp.numThreads[2] = prop->u[0].Data;
1220
break;
1221
case TGSI_PROPERTY_NUM_CLIPDIST_ENABLED:
1222
info_out->io.clipDistances = prop->u[0].Data;
1223
break;
1224
case TGSI_PROPERTY_NUM_CULLDIST_ENABLED:
1225
info_out->io.cullDistances = prop->u[0].Data;
1226
break;
1227
case TGSI_PROPERTY_NEXT_SHADER:
1228
/* Do not need to know the next shader stage. */
1229
break;
1230
case TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL:
1231
info_out->prop.fp.earlyFragTests = prop->u[0].Data;
1232
break;
1233
case TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE:
1234
info_out->prop.fp.postDepthCoverage = prop->u[0].Data;
1235
break;
1236
case TGSI_PROPERTY_MUL_ZERO_WINS:
1237
info->io.mul_zero_wins = prop->u[0].Data;
1238
break;
1239
case TGSI_PROPERTY_LAYER_VIEWPORT_RELATIVE:
1240
info_out->io.layer_viewport_relative = prop->u[0].Data;
1241
break;
1242
default:
1243
INFO("unhandled TGSI property %d\n", prop->Property.PropertyName);
1244
break;
1245
}
1246
}
1247
1248
void Source::scanImmediate(const struct tgsi_full_immediate *imm)
1249
{
1250
const unsigned n = immd.count++;
1251
1252
assert(n < scan.immediate_count);
1253
1254
for (int c = 0; c < 4; ++c)
1255
immd.data[n * 4 + c] = imm->u[c].Uint;
1256
}
1257
1258
int Source::inferSysValDirection(unsigned sn) const
1259
{
1260
switch (sn) {
1261
case TGSI_SEMANTIC_INSTANCEID:
1262
case TGSI_SEMANTIC_VERTEXID:
1263
return 1;
1264
case TGSI_SEMANTIC_LAYER:
1265
#if 0
1266
case TGSI_SEMANTIC_VIEWPORTINDEX:
1267
return 0;
1268
#endif
1269
case TGSI_SEMANTIC_PRIMID:
1270
return (info->type == PIPE_SHADER_FRAGMENT) ? 1 : 0;
1271
default:
1272
return 0;
1273
}
1274
}
1275
1276
bool Source::scanDeclaration(const struct tgsi_full_declaration *decl)
1277
{
1278
unsigned i, c;
1279
unsigned sn = TGSI_SEMANTIC_GENERIC;
1280
unsigned si = 0;
1281
const unsigned first = decl->Range.First, last = decl->Range.Last;
1282
const int arrayId = decl->Array.ArrayID;
1283
1284
if (decl->Declaration.Semantic) {
1285
sn = decl->Semantic.Name;
1286
si = decl->Semantic.Index;
1287
}
1288
1289
if (decl->Declaration.Local || decl->Declaration.File == TGSI_FILE_ADDRESS) {
1290
for (i = first; i <= last; ++i) {
1291
for (c = 0; c < 4; ++c) {
1292
locals.insert(
1293
Location(decl->Declaration.File, decl->Dim.Index2D, i, c));
1294
}
1295
}
1296
}
1297
1298
switch (decl->Declaration.File) {
1299
case TGSI_FILE_INPUT:
1300
if (info->type == PIPE_SHADER_VERTEX) {
1301
// all vertex attributes are equal
1302
for (i = first; i <= last; ++i) {
1303
info_out->in[i].sn = TGSI_SEMANTIC_GENERIC;
1304
info_out->in[i].si = i;
1305
}
1306
} else {
1307
for (i = first; i <= last; ++i, ++si) {
1308
info_out->in[i].id = i;
1309
info_out->in[i].sn = sn;
1310
info_out->in[i].si = si;
1311
if (info->type == PIPE_SHADER_FRAGMENT) {
1312
// translate interpolation mode
1313
switch (decl->Interp.Interpolate) {
1314
case TGSI_INTERPOLATE_CONSTANT:
1315
info_out->in[i].flat = 1;
1316
break;
1317
case TGSI_INTERPOLATE_COLOR:
1318
info_out->in[i].sc = 1;
1319
break;
1320
case TGSI_INTERPOLATE_LINEAR:
1321
info_out->in[i].linear = 1;
1322
break;
1323
default:
1324
break;
1325
}
1326
if (decl->Interp.Location)
1327
info_out->in[i].centroid = 1;
1328
}
1329
1330
if (sn == TGSI_SEMANTIC_PATCH)
1331
info_out->in[i].patch = 1;
1332
if (sn == TGSI_SEMANTIC_PATCH)
1333
info_out->numPatchConstants = MAX2(info_out->numPatchConstants, si + 1);
1334
}
1335
}
1336
break;
1337
case TGSI_FILE_OUTPUT:
1338
for (i = first; i <= last; ++i, ++si) {
1339
switch (sn) {
1340
case TGSI_SEMANTIC_POSITION:
1341
if (info->type == PIPE_SHADER_FRAGMENT)
1342
info_out->io.fragDepth = i;
1343
else
1344
if (clipVertexOutput < 0)
1345
clipVertexOutput = i;
1346
break;
1347
case TGSI_SEMANTIC_COLOR:
1348
if (info->type == PIPE_SHADER_FRAGMENT)
1349
info_out->prop.fp.numColourResults++;
1350
break;
1351
case TGSI_SEMANTIC_EDGEFLAG:
1352
info_out->io.edgeFlagOut = i;
1353
break;
1354
case TGSI_SEMANTIC_CLIPVERTEX:
1355
clipVertexOutput = i;
1356
break;
1357
case TGSI_SEMANTIC_CLIPDIST:
1358
info_out->io.genUserClip = -1;
1359
break;
1360
case TGSI_SEMANTIC_SAMPLEMASK:
1361
info_out->io.sampleMask = i;
1362
break;
1363
case TGSI_SEMANTIC_VIEWPORT_INDEX:
1364
info->io.viewportId = i;
1365
break;
1366
case TGSI_SEMANTIC_PATCH:
1367
info_out->numPatchConstants = MAX2(info_out->numPatchConstants, si + 1);
1368
FALLTHROUGH;
1369
case TGSI_SEMANTIC_TESSOUTER:
1370
case TGSI_SEMANTIC_TESSINNER:
1371
info_out->out[i].patch = 1;
1372
break;
1373
default:
1374
break;
1375
}
1376
info_out->out[i].id = i;
1377
info_out->out[i].sn = sn;
1378
info_out->out[i].si = si;
1379
}
1380
break;
1381
case TGSI_FILE_SYSTEM_VALUE:
1382
switch (sn) {
1383
case TGSI_SEMANTIC_INSTANCEID:
1384
info_out->io.instanceId = first;
1385
break;
1386
case TGSI_SEMANTIC_VERTEXID:
1387
info_out->io.vertexId = first;
1388
break;
1389
case TGSI_SEMANTIC_BASEVERTEX:
1390
case TGSI_SEMANTIC_BASEINSTANCE:
1391
case TGSI_SEMANTIC_DRAWID:
1392
info_out->prop.vp.usesDrawParameters = true;
1393
break;
1394
case TGSI_SEMANTIC_SAMPLEID:
1395
case TGSI_SEMANTIC_SAMPLEPOS:
1396
prog->persampleInvocation = true;
1397
break;
1398
case TGSI_SEMANTIC_SAMPLEMASK:
1399
info_out->prop.fp.usesSampleMaskIn = true;
1400
break;
1401
default:
1402
break;
1403
}
1404
for (i = first; i <= last; ++i, ++si) {
1405
info_out->sv[i].sn = sn;
1406
info_out->sv[i].si = si;
1407
info_out->sv[i].input = inferSysValDirection(sn);
1408
1409
switch (sn) {
1410
case TGSI_SEMANTIC_TESSOUTER:
1411
case TGSI_SEMANTIC_TESSINNER:
1412
info_out->sv[i].patch = 1;
1413
break;
1414
}
1415
}
1416
break;
1417
/*
1418
case TGSI_FILE_RESOURCE:
1419
for (i = first; i <= last; ++i) {
1420
resources[i].target = decl->Resource.Resource;
1421
resources[i].raw = decl->Resource.Raw;
1422
resources[i].slot = i;
1423
}
1424
break;
1425
*/
1426
case TGSI_FILE_SAMPLER_VIEW:
1427
for (i = first; i <= last; ++i)
1428
textureViews[i].target = decl->SamplerView.Resource;
1429
break;
1430
case TGSI_FILE_MEMORY:
1431
for (i = first; i <= last; ++i)
1432
memoryFiles[i].mem_type = decl->Declaration.MemType;
1433
break;
1434
case TGSI_FILE_NULL:
1435
case TGSI_FILE_TEMPORARY:
1436
for (i = first; i <= last; ++i)
1437
tempArrayId[i] = arrayId;
1438
if (arrayId)
1439
tempArrayInfo.insert(std::make_pair(arrayId, std::make_pair(
1440
first, last - first + 1)));
1441
break;
1442
case TGSI_FILE_BUFFER:
1443
for (i = first; i <= last; ++i)
1444
bufferAtomics[i] = decl->Declaration.Atomic;
1445
if (info->type == PIPE_SHADER_COMPUTE && info->target < NVISA_GF100_CHIPSET) {
1446
for (i = first; i <= last; i++) {
1447
bufferIds.insert(std::make_pair(i, gmemSlot));
1448
info_out->prop.cp.gmem[gmemSlot++] = {.valid = 1, .image = 0, .slot = i};
1449
assert(gmemSlot < 16);
1450
}
1451
}
1452
break;
1453
case TGSI_FILE_IMAGE:
1454
if (info->type == PIPE_SHADER_COMPUTE && info->target < NVISA_GF100_CHIPSET) {
1455
for (i = first; i <= last; i++) {
1456
imageIds.insert(std::make_pair(i, gmemSlot));
1457
info_out->prop.cp.gmem[gmemSlot++] = {.valid = 1, .image = 1, .slot = i};
1458
assert(gmemSlot < 16);
1459
}
1460
}
1461
break;
1462
case TGSI_FILE_ADDRESS:
1463
case TGSI_FILE_CONSTANT:
1464
case TGSI_FILE_IMMEDIATE:
1465
case TGSI_FILE_SAMPLER:
1466
break;
1467
default:
1468
ERROR("unhandled TGSI_FILE %d\n", decl->Declaration.File);
1469
return false;
1470
}
1471
return true;
1472
}
1473
1474
inline bool Source::isEdgeFlagPassthrough(const Instruction& insn) const
1475
{
1476
return insn.getOpcode() == TGSI_OPCODE_MOV &&
1477
insn.getDst(0).getIndex(0) == info_out->io.edgeFlagOut &&
1478
insn.getSrc(0).getFile() == TGSI_FILE_INPUT;
1479
}
1480
1481
void Source::scanInstructionSrc(const Instruction& insn,
1482
const Instruction::SrcRegister& src,
1483
unsigned mask)
1484
{
1485
if (src.getFile() == TGSI_FILE_TEMPORARY) {
1486
if (src.isIndirect(0))
1487
indirectTempArrays.insert(src.getArrayId());
1488
} else
1489
if (src.getFile() == TGSI_FILE_OUTPUT) {
1490
if (src.isIndirect(0)) {
1491
// We don't know which one is accessed, just mark everything for
1492
// reading. This is an extremely unlikely occurrence.
1493
for (unsigned i = 0; i < info_out->numOutputs; ++i)
1494
info_out->out[i].oread = 1;
1495
} else {
1496
info_out->out[src.getIndex(0)].oread = 1;
1497
}
1498
}
1499
if (src.getFile() == TGSI_FILE_SYSTEM_VALUE) {
1500
if (info_out->sv[src.getIndex(0)].sn == TGSI_SEMANTIC_SAMPLEPOS)
1501
info_out->prop.fp.readsSampleLocations = true;
1502
}
1503
if (src.getFile() != TGSI_FILE_INPUT)
1504
return;
1505
1506
if (src.isIndirect(0)) {
1507
for (unsigned i = 0; i < info_out->numInputs; ++i)
1508
info_out->in[i].mask = 0xf;
1509
} else {
1510
const int i = src.getIndex(0);
1511
for (unsigned c = 0; c < 4; ++c) {
1512
if (!(mask & (1 << c)))
1513
continue;
1514
int k = src.getSwizzle(c);
1515
if (k <= TGSI_SWIZZLE_W)
1516
info_out->in[i].mask |= 1 << k;
1517
}
1518
switch (info_out->in[i].sn) {
1519
case TGSI_SEMANTIC_PSIZE:
1520
case TGSI_SEMANTIC_PRIMID:
1521
case TGSI_SEMANTIC_FOG:
1522
info_out->in[i].mask &= 0x1;
1523
break;
1524
case TGSI_SEMANTIC_PCOORD:
1525
info_out->in[i].mask &= 0x3;
1526
break;
1527
default:
1528
break;
1529
}
1530
}
1531
}
1532
1533
bool Source::scanInstruction(const struct tgsi_full_instruction *inst)
1534
{
1535
Instruction insn(inst);
1536
1537
if (insn.getOpcode() == TGSI_OPCODE_BARRIER)
1538
info_out->numBarriers = 1;
1539
1540
if (insn.getOpcode() == TGSI_OPCODE_FBFETCH)
1541
info_out->prop.fp.readsFramebuffer = true;
1542
1543
if (insn.getOpcode() == TGSI_OPCODE_INTERP_SAMPLE)
1544
info_out->prop.fp.readsSampleLocations = true;
1545
1546
if (insn.getOpcode() == TGSI_OPCODE_DEMOTE)
1547
info_out->prop.fp.usesDiscard = true;
1548
1549
if (insn.dstCount()) {
1550
Instruction::DstRegister dst = insn.getDst(0);
1551
1552
if (insn.getOpcode() == TGSI_OPCODE_STORE &&
1553
dst.getFile() != TGSI_FILE_MEMORY) {
1554
info_out->io.globalAccess |= 0x2;
1555
1556
if (dst.getFile() == TGSI_FILE_INPUT) {
1557
// TODO: Handle indirect somehow?
1558
const int i = dst.getIndex(0);
1559
info_out->in[i].mask |= 1;
1560
}
1561
}
1562
1563
if (dst.getFile() == TGSI_FILE_OUTPUT) {
1564
if (dst.isIndirect(0))
1565
for (unsigned i = 0; i < info_out->numOutputs; ++i)
1566
info_out->out[i].mask = 0xf;
1567
else
1568
info_out->out[dst.getIndex(0)].mask |= dst.getMask();
1569
1570
if (info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PSIZE ||
1571
info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PRIMID ||
1572
info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_LAYER ||
1573
info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_VIEWPORT_INDEX ||
1574
info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_FOG)
1575
info_out->out[dst.getIndex(0)].mask &= 1;
1576
1577
if (isEdgeFlagPassthrough(insn))
1578
info_out->io.edgeFlagIn = insn.getSrc(0).getIndex(0);
1579
} else
1580
if (dst.getFile() == TGSI_FILE_TEMPORARY) {
1581
if (dst.isIndirect(0))
1582
indirectTempArrays.insert(dst.getArrayId());
1583
} else
1584
if (dst.getFile() == TGSI_FILE_BUFFER ||
1585
dst.getFile() == TGSI_FILE_IMAGE ||
1586
(dst.getFile() == TGSI_FILE_MEMORY &&
1587
memoryFiles[dst.getIndex(0)].mem_type == TGSI_MEMORY_TYPE_GLOBAL)) {
1588
info_out->io.globalAccess |= 0x2;
1589
}
1590
}
1591
1592
if (insn.srcCount() && (
1593
insn.getSrc(0).getFile() != TGSI_FILE_MEMORY ||
1594
memoryFiles[insn.getSrc(0).getIndex(0)].mem_type ==
1595
TGSI_MEMORY_TYPE_GLOBAL)) {
1596
switch (insn.getOpcode()) {
1597
case TGSI_OPCODE_ATOMUADD:
1598
case TGSI_OPCODE_ATOMXCHG:
1599
case TGSI_OPCODE_ATOMCAS:
1600
case TGSI_OPCODE_ATOMAND:
1601
case TGSI_OPCODE_ATOMOR:
1602
case TGSI_OPCODE_ATOMXOR:
1603
case TGSI_OPCODE_ATOMUMIN:
1604
case TGSI_OPCODE_ATOMIMIN:
1605
case TGSI_OPCODE_ATOMUMAX:
1606
case TGSI_OPCODE_ATOMIMAX:
1607
case TGSI_OPCODE_ATOMFADD:
1608
case TGSI_OPCODE_ATOMDEC_WRAP:
1609
case TGSI_OPCODE_ATOMINC_WRAP:
1610
case TGSI_OPCODE_LOAD:
1611
info_out->io.globalAccess |= (insn.getOpcode() == TGSI_OPCODE_LOAD) ?
1612
0x1 : 0x2;
1613
break;
1614
}
1615
}
1616
1617
1618
for (unsigned s = 0; s < insn.srcCount(); ++s)
1619
scanInstructionSrc(insn, insn.getSrc(s), insn.srcMask(s));
1620
1621
for (unsigned s = 0; s < insn.getNumTexOffsets(); ++s)
1622
scanInstructionSrc(insn, insn.getTexOffset(s), insn.texOffsetMask());
1623
1624
return true;
1625
}
1626
1627
nv50_ir::TexInstruction::Target
1628
Instruction::getTexture(const tgsi::Source *code, int s) const
1629
{
1630
// XXX: indirect access
1631
unsigned int r;
1632
1633
switch (getSrc(s).getFile()) {
1634
/*
1635
case TGSI_FILE_RESOURCE:
1636
r = getSrc(s).getIndex(0);
1637
return translateTexture(code->resources.at(r).target);
1638
*/
1639
case TGSI_FILE_SAMPLER_VIEW:
1640
r = getSrc(s).getIndex(0);
1641
return translateTexture(code->textureViews.at(r).target);
1642
default:
1643
return translateTexture(insn->Texture.Texture);
1644
}
1645
}
1646
1647
} // namespace tgsi
1648
1649
namespace {
1650
1651
using namespace nv50_ir;
1652
1653
class Converter : public ConverterCommon
1654
{
1655
public:
1656
Converter(Program *, const tgsi::Source *, nv50_ir_prog_info_out *);
1657
~Converter();
1658
1659
bool run();
1660
1661
private:
1662
Value *shiftAddress(Value *);
1663
Value *getVertexBase(int s);
1664
Value *getOutputBase(int s);
1665
DataArray *getArrayForFile(unsigned file, int idx);
1666
Value *fetchSrc(int s, int c);
1667
Value *fetchDst(int d, int c);
1668
Value *acquireDst(int d, int c);
1669
void storeDst(int d, int c, Value *);
1670
1671
Value *fetchSrc(const tgsi::Instruction::SrcRegister src, int c, Value *ptr);
1672
void storeDst(const tgsi::Instruction::DstRegister dst, int c,
1673
Value *val, Value *ptr);
1674
1675
void adjustTempIndex(int arrayId, int &idx, int &idx2d) const;
1676
Value *applySrcMod(Value *, int s, int c);
1677
1678
Symbol *makeSym(uint file, int fileIndex, int idx, int c, uint32_t addr);
1679
Symbol *srcToSym(tgsi::Instruction::SrcRegister, int c);
1680
Symbol *dstToSym(tgsi::Instruction::DstRegister, int c);
1681
1682
bool isSubGroupMask(uint8_t semantic);
1683
1684
bool handleInstruction(const struct tgsi_full_instruction *);
1685
void exportOutputs();
1686
inline bool isEndOfSubroutine(uint ip);
1687
1688
void loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask);
1689
1690
// R,S,L,C,Dx,Dy encode TGSI sources for respective values (0xSf for auto)
1691
void setTexRS(TexInstruction *, unsigned int& s, int R, int S);
1692
void handleTEX(Value *dst0[4], int R, int S, int L, int C, int Dx, int Dy);
1693
void handleTXF(Value *dst0[4], int R, int L_M);
1694
void handleTXQ(Value *dst0[4], enum TexQuery, int R);
1695
void handleFBFETCH(Value *dst0[4]);
1696
void handleLIT(Value *dst0[4]);
1697
1698
// Symbol *getResourceBase(int r);
1699
void getImageCoords(std::vector<Value *>&, int s);
1700
int remapImageId(int);
1701
int remapBufferId(int);
1702
1703
void handleLOAD(Value *dst0[4]);
1704
void handleSTORE();
1705
void handleATOM(Value *dst0[4], DataType, uint16_t subOp);
1706
1707
void handleINTERP(Value *dst0[4]);
1708
1709
Value *interpolate(tgsi::Instruction::SrcRegister, int c, Value *ptr);
1710
1711
void insertConvergenceOps(BasicBlock *conv, BasicBlock *fork);
1712
1713
Value *buildDot(int dim);
1714
1715
class BindArgumentsPass : public Pass {
1716
public:
1717
BindArgumentsPass(Converter &conv) : conv(conv), sub(NULL) { }
1718
1719
private:
1720
Converter &conv;
1721
Subroutine *sub;
1722
1723
inline const Location *getValueLocation(Subroutine *, Value *);
1724
1725
template<typename T> inline void
1726
updateCallArgs(Instruction *i, void (Instruction::*setArg)(int, Value *),
1727
T (Function::*proto));
1728
1729
template<typename T> inline void
1730
updatePrototype(BitSet *set, void (Function::*updateSet)(),
1731
T (Function::*proto));
1732
1733
protected:
1734
bool visit(Function *);
1735
bool visit(BasicBlock *bb) { return false; }
1736
};
1737
1738
private:
1739
const tgsi::Source *code;
1740
1741
uint ip; // instruction pointer
1742
1743
tgsi::Instruction tgsi;
1744
1745
DataType dstTy;
1746
DataType srcTy;
1747
1748
DataArray tData; // TGSI_FILE_TEMPORARY
1749
DataArray lData; // TGSI_FILE_TEMPORARY, for indirect arrays
1750
DataArray aData; // TGSI_FILE_ADDRESS
1751
DataArray oData; // TGSI_FILE_OUTPUT (if outputs in registers)
1752
1753
Value *zero;
1754
1755
Value *vtxBase[5]; // base address of vertex in primitive (for TP/GP)
1756
uint8_t vtxBaseValid;
1757
1758
Stack condBBs; // fork BB, then else clause BB
1759
Stack joinBBs; // fork BB, for inserting join ops on ENDIF
1760
Stack loopBBs; // loop headers
1761
Stack breakBBs; // end of / after loop
1762
1763
Value *viewport;
1764
};
1765
1766
Symbol *
1767
Converter::srcToSym(tgsi::Instruction::SrcRegister src, int c)
1768
{
1769
const int swz = src.getSwizzle(c);
1770
1771
/* TODO: Use Array ID when it's available for the index */
1772
return makeSym(src.getFile(),
1773
src.is2D() ? src.getIndex(1) : 0,
1774
src.getIndex(0), swz,
1775
src.getIndex(0) * 16 + swz * 4);
1776
}
1777
1778
Symbol *
1779
Converter::dstToSym(tgsi::Instruction::DstRegister dst, int c)
1780
{
1781
/* TODO: Use Array ID when it's available for the index */
1782
return makeSym(dst.getFile(),
1783
dst.is2D() ? dst.getIndex(1) : 0,
1784
dst.getIndex(0), c,
1785
dst.getIndex(0) * 16 + c * 4);
1786
}
1787
1788
Symbol *
1789
Converter::makeSym(uint tgsiFile, int fileIdx, int idx, int c, uint32_t address)
1790
{
1791
Symbol *sym = new_Symbol(prog, tgsi::translateFile(tgsiFile));
1792
1793
sym->reg.fileIndex = fileIdx;
1794
1795
if (tgsiFile == TGSI_FILE_MEMORY) {
1796
switch (code->memoryFiles[fileIdx].mem_type) {
1797
case TGSI_MEMORY_TYPE_GLOBAL:
1798
/* No-op this is the default for TGSI_FILE_MEMORY */
1799
sym->setFile(FILE_MEMORY_GLOBAL);
1800
break;
1801
case TGSI_MEMORY_TYPE_SHARED:
1802
sym->setFile(FILE_MEMORY_SHARED);
1803
address += info->prop.cp.inputOffset;
1804
break;
1805
case TGSI_MEMORY_TYPE_INPUT:
1806
assert(prog->getType() == Program::TYPE_COMPUTE);
1807
assert(idx == -1);
1808
sym->setFile(FILE_SHADER_INPUT);
1809
address += info->prop.cp.inputOffset;
1810
break;
1811
default:
1812
assert(0); /* TODO: Add support for global and private memory */
1813
}
1814
}
1815
1816
if (idx >= 0) {
1817
if (sym->reg.file == FILE_SHADER_INPUT)
1818
sym->setOffset(info_out->in[idx].slot[c] * 4);
1819
else
1820
if (sym->reg.file == FILE_SHADER_OUTPUT)
1821
sym->setOffset(info_out->out[idx].slot[c] * 4);
1822
else
1823
if (sym->reg.file == FILE_SYSTEM_VALUE)
1824
sym->setSV(tgsi::translateSysVal(info_out->sv[idx].sn), c);
1825
else
1826
sym->setOffset(address);
1827
} else {
1828
sym->setOffset(address);
1829
}
1830
return sym;
1831
}
1832
1833
Value *
1834
Converter::interpolate(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
1835
{
1836
operation op;
1837
1838
// XXX: no way to know interpolation mode if we don't know what's accessed
1839
const uint8_t mode = translateInterpMode(&info_out->in[ptr ? 0 :
1840
src.getIndex(0)], op);
1841
1842
Instruction *insn = new_Instruction(func, op, TYPE_F32);
1843
1844
insn->setDef(0, getScratch());
1845
insn->setSrc(0, srcToSym(src, c));
1846
if (op == OP_PINTERP)
1847
insn->setSrc(1, fragCoord[3]);
1848
if (ptr)
1849
insn->setIndirect(0, 0, ptr);
1850
1851
insn->setInterpolate(mode);
1852
1853
bb->insertTail(insn);
1854
return insn->getDef(0);
1855
}
1856
1857
Value *
1858
Converter::applySrcMod(Value *val, int s, int c)
1859
{
1860
Modifier m = tgsi.getSrc(s).getMod(c);
1861
DataType ty = tgsi.inferSrcType();
1862
1863
if (m & Modifier(NV50_IR_MOD_ABS))
1864
val = mkOp1v(OP_ABS, ty, getScratch(), val);
1865
1866
if (m & Modifier(NV50_IR_MOD_NEG))
1867
val = mkOp1v(OP_NEG, ty, getScratch(), val);
1868
1869
return val;
1870
}
1871
1872
Value *
1873
Converter::getVertexBase(int s)
1874
{
1875
assert(s < 5);
1876
if (!(vtxBaseValid & (1 << s))) {
1877
const int index = tgsi.getSrc(s).getIndex(1);
1878
Value *rel = NULL;
1879
if (tgsi.getSrc(s).isIndirect(1))
1880
rel = fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL);
1881
vtxBaseValid |= 1 << s;
1882
vtxBase[s] = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1883
mkImm(index), rel);
1884
}
1885
return vtxBase[s];
1886
}
1887
1888
Value *
1889
Converter::getOutputBase(int s)
1890
{
1891
assert(s < 5);
1892
if (!(vtxBaseValid & (1 << s))) {
1893
Value *offset = loadImm(NULL, tgsi.getSrc(s).getIndex(1));
1894
if (tgsi.getSrc(s).isIndirect(1))
1895
offset = mkOp2v(OP_ADD, TYPE_U32, getSSA(),
1896
fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL),
1897
offset);
1898
vtxBaseValid |= 1 << s;
1899
vtxBase[s] = mkOp2v(OP_ADD, TYPE_U32, getSSA(), outBase, offset);
1900
}
1901
return vtxBase[s];
1902
}
1903
1904
Value *
1905
Converter::fetchSrc(int s, int c)
1906
{
1907
Value *res;
1908
Value *ptr = NULL, *dimRel = NULL;
1909
1910
tgsi::Instruction::SrcRegister src = tgsi.getSrc(s);
1911
1912
if (src.isIndirect(0))
1913
ptr = fetchSrc(src.getIndirect(0), 0, NULL);
1914
1915
if (src.is2D()) {
1916
switch (src.getFile()) {
1917
case TGSI_FILE_OUTPUT:
1918
dimRel = getOutputBase(s);
1919
break;
1920
case TGSI_FILE_INPUT:
1921
dimRel = getVertexBase(s);
1922
break;
1923
case TGSI_FILE_CONSTANT:
1924
// on NVC0, this is valid and c{I+J}[k] == cI[(J << 16) + k]
1925
if (src.isIndirect(1))
1926
dimRel = fetchSrc(src.getIndirect(1), 0, 0);
1927
break;
1928
default:
1929
break;
1930
}
1931
}
1932
1933
res = fetchSrc(src, c, ptr);
1934
1935
if (dimRel)
1936
res->getInsn()->setIndirect(0, 1, dimRel);
1937
1938
return applySrcMod(res, s, c);
1939
}
1940
1941
Value *
1942
Converter::fetchDst(int d, int c)
1943
{
1944
Value *res;
1945
Value *ptr = NULL, *dimRel = NULL;
1946
1947
tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
1948
1949
if (dst.isIndirect(0))
1950
ptr = fetchSrc(dst.getIndirect(0), 0, NULL);
1951
1952
if (dst.is2D()) {
1953
switch (dst.getFile()) {
1954
case TGSI_FILE_OUTPUT:
1955
assert(0); // TODO
1956
dimRel = NULL;
1957
break;
1958
case TGSI_FILE_INPUT:
1959
assert(0); // TODO
1960
dimRel = NULL;
1961
break;
1962
case TGSI_FILE_CONSTANT:
1963
// on NVC0, this is valid and c{I+J}[k] == cI[(J << 16) + k]
1964
if (dst.isIndirect(1))
1965
dimRel = fetchSrc(dst.getIndirect(1), 0, 0);
1966
break;
1967
default:
1968
break;
1969
}
1970
}
1971
1972
struct tgsi_full_src_register fsr = dst.asSrc();
1973
tgsi::Instruction::SrcRegister src(&fsr);
1974
res = fetchSrc(src, c, ptr);
1975
1976
if (dimRel)
1977
res->getInsn()->setIndirect(0, 1, dimRel);
1978
1979
return res;
1980
}
1981
1982
Converter::DataArray *
1983
Converter::getArrayForFile(unsigned file, int idx)
1984
{
1985
switch (file) {
1986
case TGSI_FILE_TEMPORARY:
1987
return idx == 0 ? &tData : &lData;
1988
case TGSI_FILE_ADDRESS:
1989
return &aData;
1990
case TGSI_FILE_OUTPUT:
1991
assert(prog->getType() == Program::TYPE_FRAGMENT);
1992
return &oData;
1993
default:
1994
assert(!"invalid/unhandled TGSI source file");
1995
return NULL;
1996
}
1997
}
1998
1999
Value *
2000
Converter::shiftAddress(Value *index)
2001
{
2002
if (!index)
2003
return NULL;
2004
return mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), index, mkImm(4));
2005
}
2006
2007
void
2008
Converter::adjustTempIndex(int arrayId, int &idx, int &idx2d) const
2009
{
2010
std::map<int, int>::const_iterator it =
2011
code->indirectTempOffsets.find(arrayId);
2012
if (it == code->indirectTempOffsets.end())
2013
return;
2014
2015
idx2d = 1;
2016
idx += it->second;
2017
}
2018
2019
bool
2020
Converter::isSubGroupMask(uint8_t semantic)
2021
{
2022
switch (semantic) {
2023
case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
2024
case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
2025
case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
2026
case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
2027
case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
2028
return true;
2029
default:
2030
return false;
2031
}
2032
}
2033
2034
Value *
2035
Converter::fetchSrc(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
2036
{
2037
int idx2d = src.is2D() ? src.getIndex(1) : 0;
2038
int idx = src.getIndex(0);
2039
const int swz = src.getSwizzle(c);
2040
Instruction *ld;
2041
2042
switch (src.getFile()) {
2043
case TGSI_FILE_IMMEDIATE:
2044
assert(!ptr);
2045
return loadImm(NULL, code->immd.data[idx * 4 + swz]);
2046
case TGSI_FILE_CONSTANT:
2047
return mkLoadv(TYPE_U32, srcToSym(src, c), shiftAddress(ptr));
2048
case TGSI_FILE_INPUT:
2049
if (prog->getType() == Program::TYPE_FRAGMENT) {
2050
// don't load masked inputs, won't be assigned a slot
2051
if (!ptr && !(info_out->in[idx].mask & (1 << swz)))
2052
return loadImm(NULL, swz == TGSI_SWIZZLE_W ? 1.0f : 0.0f);
2053
return interpolate(src, c, shiftAddress(ptr));
2054
} else
2055
if (prog->getType() == Program::TYPE_GEOMETRY) {
2056
if (!ptr && info_out->in[idx].sn == TGSI_SEMANTIC_PRIMID)
2057
return mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_PRIMITIVE_ID, 0));
2058
// XXX: This is going to be a problem with scalar arrays, i.e. when
2059
// we cannot assume that the address is given in units of vec4.
2060
//
2061
// nv50 and nvc0 need different things here, so let the lowering
2062
// passes decide what to do with the address
2063
if (ptr)
2064
return mkLoadv(TYPE_U32, srcToSym(src, c), ptr);
2065
}
2066
ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
2067
ld->perPatch = info_out->in[idx].patch;
2068
return ld->getDef(0);
2069
case TGSI_FILE_OUTPUT:
2070
assert(prog->getType() == Program::TYPE_TESSELLATION_CONTROL);
2071
ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
2072
ld->perPatch = info_out->out[idx].patch;
2073
return ld->getDef(0);
2074
case TGSI_FILE_SYSTEM_VALUE:
2075
assert(!ptr);
2076
if (info_out->sv[idx].sn == TGSI_SEMANTIC_THREAD_ID &&
2077
info->prop.cp.numThreads[swz] == 1)
2078
return loadImm(NULL, 0u);
2079
if (isSubGroupMask(info_out->sv[idx].sn) && swz > 0)
2080
return loadImm(NULL, 0u);
2081
if (info_out->sv[idx].sn == TGSI_SEMANTIC_SUBGROUP_SIZE)
2082
return loadImm(NULL, 32u);
2083
ld = mkOp1(OP_RDSV, TYPE_U32, getSSA(), srcToSym(src, c));
2084
ld->perPatch = info_out->sv[idx].patch;
2085
return ld->getDef(0);
2086
case TGSI_FILE_TEMPORARY: {
2087
int arrayid = src.getArrayId();
2088
if (!arrayid)
2089
arrayid = code->tempArrayId[idx];
2090
adjustTempIndex(arrayid, idx, idx2d);
2091
}
2092
FALLTHROUGH;
2093
default:
2094
return getArrayForFile(src.getFile(), idx2d)->load(
2095
sub.cur->values, idx, swz, shiftAddress(ptr));
2096
}
2097
}
2098
2099
Value *
2100
Converter::acquireDst(int d, int c)
2101
{
2102
const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
2103
const unsigned f = dst.getFile();
2104
int idx = dst.getIndex(0);
2105
int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2106
2107
if (dst.isMasked(c) || f == TGSI_FILE_BUFFER || f == TGSI_FILE_MEMORY ||
2108
f == TGSI_FILE_IMAGE)
2109
return NULL;
2110
2111
if (dst.isIndirect(0) ||
2112
f == TGSI_FILE_SYSTEM_VALUE ||
2113
(f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT))
2114
return getScratch();
2115
2116
if (f == TGSI_FILE_TEMPORARY) {
2117
int arrayid = dst.getArrayId();
2118
if (!arrayid)
2119
arrayid = code->tempArrayId[idx];
2120
adjustTempIndex(arrayid, idx, idx2d);
2121
}
2122
2123
return getArrayForFile(f, idx2d)-> acquire(sub.cur->values, idx, c);
2124
}
2125
2126
void
2127
Converter::storeDst(int d, int c, Value *val)
2128
{
2129
const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
2130
2131
if (tgsi.getSaturate()) {
2132
mkOp1(OP_SAT, dstTy, val, val);
2133
}
2134
2135
Value *ptr = NULL;
2136
if (dst.isIndirect(0))
2137
ptr = shiftAddress(fetchSrc(dst.getIndirect(0), 0, NULL));
2138
2139
if (info_out->io.genUserClip > 0 &&
2140
dst.getFile() == TGSI_FILE_OUTPUT &&
2141
!dst.isIndirect(0) && dst.getIndex(0) == code->clipVertexOutput) {
2142
mkMov(clipVtx[c], val);
2143
val = clipVtx[c];
2144
}
2145
2146
storeDst(dst, c, val, ptr);
2147
}
2148
2149
void
2150
Converter::storeDst(const tgsi::Instruction::DstRegister dst, int c,
2151
Value *val, Value *ptr)
2152
{
2153
const unsigned f = dst.getFile();
2154
int idx = dst.getIndex(0);
2155
int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2156
2157
if (f == TGSI_FILE_SYSTEM_VALUE) {
2158
assert(!ptr);
2159
mkOp2(OP_WRSV, TYPE_U32, NULL, dstToSym(dst, c), val);
2160
} else
2161
if (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT) {
2162
2163
if (ptr || (info_out->out[idx].mask & (1 << c))) {
2164
/* Save the viewport index into a scratch register so that it can be
2165
exported at EMIT time */
2166
if (info_out->out[idx].sn == TGSI_SEMANTIC_VIEWPORT_INDEX &&
2167
prog->getType() == Program::TYPE_GEOMETRY &&
2168
viewport != NULL)
2169
mkOp1(OP_MOV, TYPE_U32, viewport, val);
2170
else
2171
mkStore(OP_EXPORT, TYPE_U32, dstToSym(dst, c), ptr, val)->perPatch =
2172
info_out->out[idx].patch;
2173
}
2174
} else
2175
if (f == TGSI_FILE_TEMPORARY ||
2176
f == TGSI_FILE_ADDRESS ||
2177
f == TGSI_FILE_OUTPUT) {
2178
if (f == TGSI_FILE_TEMPORARY) {
2179
int arrayid = dst.getArrayId();
2180
if (!arrayid)
2181
arrayid = code->tempArrayId[idx];
2182
adjustTempIndex(arrayid, idx, idx2d);
2183
}
2184
2185
getArrayForFile(f, idx2d)->store(sub.cur->values, idx, c, ptr, val);
2186
} else {
2187
assert(!"invalid dst file");
2188
}
2189
}
2190
2191
#define FOR_EACH_DST_ENABLED_CHANNEL(d, chan, inst) \
2192
for (chan = 0; chan < 4; ++chan) \
2193
if (!inst.getDst(d).isMasked(chan))
2194
2195
Value *
2196
Converter::buildDot(int dim)
2197
{
2198
assert(dim > 0);
2199
2200
Value *src0 = fetchSrc(0, 0), *src1 = fetchSrc(1, 0);
2201
Value *dotp = getScratch();
2202
2203
mkOp2(OP_MUL, TYPE_F32, dotp, src0, src1)
2204
->dnz = info->io.mul_zero_wins;
2205
2206
for (int c = 1; c < dim; ++c) {
2207
src0 = fetchSrc(0, c);
2208
src1 = fetchSrc(1, c);
2209
mkOp3(OP_MAD, TYPE_F32, dotp, src0, src1, dotp)
2210
->dnz = info->io.mul_zero_wins;
2211
}
2212
return dotp;
2213
}
2214
2215
void
2216
Converter::insertConvergenceOps(BasicBlock *conv, BasicBlock *fork)
2217
{
2218
FlowInstruction *join = new_FlowInstruction(func, OP_JOIN, NULL);
2219
join->fixed = 1;
2220
conv->insertHead(join);
2221
2222
assert(!fork->joinAt);
2223
fork->joinAt = new_FlowInstruction(func, OP_JOINAT, conv);
2224
fork->insertBefore(fork->getExit(), fork->joinAt);
2225
}
2226
2227
void
2228
Converter::setTexRS(TexInstruction *tex, unsigned int& s, int R, int S)
2229
{
2230
unsigned rIdx = 0, sIdx = 0;
2231
2232
if (R >= 0 && tgsi.getSrc(R).getFile() != TGSI_FILE_SAMPLER) {
2233
// This is the bindless case. We have to get the actual value and pass
2234
// it in. This will be the complete handle.
2235
tex->tex.rIndirectSrc = s;
2236
tex->setSrc(s++, fetchSrc(R, 0));
2237
tex->setTexture(tgsi.getTexture(code, R), 0xff, 0x1f);
2238
tex->tex.bindless = true;
2239
return;
2240
}
2241
2242
if (R >= 0)
2243
rIdx = tgsi.getSrc(R).getIndex(0);
2244
if (S >= 0)
2245
sIdx = tgsi.getSrc(S).getIndex(0);
2246
2247
tex->setTexture(tgsi.getTexture(code, R), rIdx, sIdx);
2248
2249
if (tgsi.getSrc(R).isIndirect(0)) {
2250
tex->tex.rIndirectSrc = s;
2251
tex->setSrc(s++, fetchSrc(tgsi.getSrc(R).getIndirect(0), 0, NULL));
2252
}
2253
if (S >= 0 && tgsi.getSrc(S).isIndirect(0)) {
2254
tex->tex.sIndirectSrc = s;
2255
tex->setSrc(s++, fetchSrc(tgsi.getSrc(S).getIndirect(0), 0, NULL));
2256
}
2257
}
2258
2259
void
2260
Converter::handleTXQ(Value *dst0[4], enum TexQuery query, int R)
2261
{
2262
TexInstruction *tex = new_TexInstruction(func, OP_TXQ);
2263
tex->tex.query = query;
2264
unsigned int c, d;
2265
2266
for (d = 0, c = 0; c < 4; ++c) {
2267
if (!dst0[c])
2268
continue;
2269
tex->tex.mask |= 1 << c;
2270
tex->setDef(d++, dst0[c]);
2271
}
2272
if (query == TXQ_DIMS)
2273
tex->setSrc((c = 0), fetchSrc(0, 0)); // mip level
2274
else
2275
tex->setSrc((c = 0), zero);
2276
2277
setTexRS(tex, ++c, R, -1);
2278
2279
bb->insertTail(tex);
2280
}
2281
2282
void
2283
Converter::loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask)
2284
{
2285
Value *proj = fetchSrc(0, 3);
2286
Instruction *insn = proj->getUniqueInsn();
2287
int c;
2288
2289
if (insn->op == OP_PINTERP) {
2290
bb->insertTail(insn = cloneForward(func, insn));
2291
insn->op = OP_LINTERP;
2292
insn->setInterpolate(NV50_IR_INTERP_LINEAR | insn->getSampleMode());
2293
insn->setSrc(1, NULL);
2294
proj = insn->getDef(0);
2295
}
2296
proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), proj);
2297
2298
for (c = 0; c < 4; ++c) {
2299
if (!(mask & (1 << c)))
2300
continue;
2301
if ((insn = src[c]->getUniqueInsn())->op != OP_PINTERP)
2302
continue;
2303
mask &= ~(1 << c);
2304
2305
bb->insertTail(insn = cloneForward(func, insn));
2306
insn->setInterpolate(NV50_IR_INTERP_PERSPECTIVE | insn->getSampleMode());
2307
insn->setSrc(1, proj);
2308
dst[c] = insn->getDef(0);
2309
}
2310
if (!mask)
2311
return;
2312
2313
proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), fetchSrc(0, 3));
2314
2315
for (c = 0; c < 4; ++c)
2316
if (mask & (1 << c))
2317
dst[c] = mkOp2v(OP_MUL, TYPE_F32, getSSA(), src[c], proj);
2318
}
2319
2320
// order of nv50 ir sources: x y z layer lod/bias shadow
2321
// order of TGSI TEX sources: x y z layer shadow lod/bias
2322
// lowering will finally set the hw specific order (like array first on nvc0)
2323
void
2324
Converter::handleTEX(Value *dst[4], int R, int S, int L, int C, int Dx, int Dy)
2325
{
2326
Value *arg[4], *src[8];
2327
Value *lod = NULL, *shd = NULL;
2328
unsigned int s, c, d;
2329
TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2330
2331
TexInstruction::Target tgt = tgsi.getTexture(code, R);
2332
2333
for (s = 0; s < tgt.getArgCount(); ++s)
2334
arg[s] = src[s] = fetchSrc(0, s);
2335
2336
if (tgsi.getOpcode() == TGSI_OPCODE_TEX_LZ)
2337
lod = loadImm(NULL, 0);
2338
else if (texi->op == OP_TXL || texi->op == OP_TXB)
2339
lod = fetchSrc(L >> 4, L & 3);
2340
2341
if (C == 0x0f)
2342
C = 0x00 | MAX2(tgt.getArgCount(), 2); // guess DC src
2343
2344
if (tgt == TEX_TARGET_CUBE_ARRAY_SHADOW) {
2345
switch (tgsi.getOpcode()) {
2346
case TGSI_OPCODE_TG4: shd = fetchSrc(1, 0); break;
2347
case TGSI_OPCODE_TEX2: shd = fetchSrc(1, 0); break;
2348
case TGSI_OPCODE_TXB2: shd = fetchSrc(1, 1); break;
2349
case TGSI_OPCODE_TXL2: shd = fetchSrc(1, 1); break;
2350
default: assert(!"unexpected opcode with cube array shadow"); break;
2351
}
2352
}
2353
else if (tgt.isShadow())
2354
shd = fetchSrc(C >> 4, C & 3);
2355
2356
if (texi->op == OP_TXD) {
2357
for (c = 0; c < tgt.getDim() + tgt.isCube(); ++c) {
2358
texi->dPdx[c].set(fetchSrc(Dx >> 4, (Dx & 3) + c));
2359
texi->dPdy[c].set(fetchSrc(Dy >> 4, (Dy & 3) + c));
2360
}
2361
}
2362
2363
// cube textures don't care about projection value, it's divided out
2364
if (tgsi.getOpcode() == TGSI_OPCODE_TXP && !tgt.isCube() && !tgt.isArray()) {
2365
unsigned int n = tgt.getDim();
2366
if (shd) {
2367
arg[n] = shd;
2368
++n;
2369
assert(tgt.getDim() == tgt.getArgCount());
2370
}
2371
loadProjTexCoords(src, arg, (1 << n) - 1);
2372
if (shd)
2373
shd = src[n - 1];
2374
}
2375
2376
for (c = 0, d = 0; c < 4; ++c) {
2377
if (dst[c]) {
2378
texi->setDef(d++, dst[c]);
2379
texi->tex.mask |= 1 << c;
2380
} else {
2381
// NOTE: maybe hook up def too, for CSE
2382
}
2383
}
2384
for (s = 0; s < tgt.getArgCount(); ++s)
2385
texi->setSrc(s, src[s]);
2386
if (lod)
2387
texi->setSrc(s++, lod);
2388
if (shd)
2389
texi->setSrc(s++, shd);
2390
2391
setTexRS(texi, s, R, S);
2392
2393
if (tgsi.getOpcode() == TGSI_OPCODE_SAMPLE_C_LZ)
2394
texi->tex.levelZero = true;
2395
if (prog->getType() != Program::TYPE_FRAGMENT &&
2396
(tgsi.getOpcode() == TGSI_OPCODE_TEX ||
2397
tgsi.getOpcode() == TGSI_OPCODE_TEX2 ||
2398
tgsi.getOpcode() == TGSI_OPCODE_TXP))
2399
texi->tex.levelZero = true;
2400
if (tgsi.getOpcode() == TGSI_OPCODE_TG4 && !tgt.isShadow())
2401
texi->tex.gatherComp = tgsi.getSrc(1).getValueU32(0, code->immd.data);
2402
2403
texi->tex.useOffsets = tgsi.getNumTexOffsets();
2404
for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2405
for (c = 0; c < 3; ++c) {
2406
texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2407
texi->offset[s][c].setInsn(texi);
2408
}
2409
}
2410
2411
bb->insertTail(texi);
2412
}
2413
2414
// 1st source: xyz = coordinates, w = lod/sample
2415
// 2nd source: offset
2416
void
2417
Converter::handleTXF(Value *dst[4], int R, int L_M)
2418
{
2419
TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2420
int ms;
2421
unsigned int c, d, s;
2422
2423
texi->tex.target = tgsi.getTexture(code, R);
2424
2425
ms = texi->tex.target.isMS() ? 1 : 0;
2426
texi->tex.levelZero = ms; /* MS textures don't have mip-maps */
2427
2428
for (c = 0, d = 0; c < 4; ++c) {
2429
if (dst[c]) {
2430
texi->setDef(d++, dst[c]);
2431
texi->tex.mask |= 1 << c;
2432
}
2433
}
2434
for (c = 0; c < (texi->tex.target.getArgCount() - ms); ++c)
2435
texi->setSrc(c, fetchSrc(0, c));
2436
if (!ms && tgsi.getOpcode() == TGSI_OPCODE_TXF_LZ)
2437
texi->setSrc(c++, loadImm(NULL, 0));
2438
else
2439
texi->setSrc(c++, fetchSrc(L_M >> 4, L_M & 3)); // lod or ms
2440
2441
setTexRS(texi, c, R, -1);
2442
2443
texi->tex.useOffsets = tgsi.getNumTexOffsets();
2444
for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2445
for (c = 0; c < 3; ++c) {
2446
texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2447
texi->offset[s][c].setInsn(texi);
2448
}
2449
}
2450
2451
bb->insertTail(texi);
2452
}
2453
2454
void
2455
Converter::handleFBFETCH(Value *dst[4])
2456
{
2457
TexInstruction *texi = new_TexInstruction(func, OP_TXF);
2458
unsigned int c, d;
2459
2460
texi->tex.target = TEX_TARGET_2D_MS_ARRAY;
2461
texi->tex.levelZero = 1;
2462
texi->tex.useOffsets = 0;
2463
2464
for (c = 0, d = 0; c < 4; ++c) {
2465
if (dst[c]) {
2466
texi->setDef(d++, dst[c]);
2467
texi->tex.mask |= 1 << c;
2468
}
2469
}
2470
2471
Value *x = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 0));
2472
Value *y = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 1));
2473
Value *z = mkOp1v(OP_RDSV, TYPE_U32, getScratch(), mkSysVal(SV_LAYER, 0));
2474
Value *ms = mkOp1v(OP_RDSV, TYPE_U32, getScratch(), mkSysVal(SV_SAMPLE_INDEX, 0));
2475
2476
mkCvt(OP_CVT, TYPE_U32, x, TYPE_F32, x)->rnd = ROUND_Z;
2477
mkCvt(OP_CVT, TYPE_U32, y, TYPE_F32, y)->rnd = ROUND_Z;
2478
texi->setSrc(0, x);
2479
texi->setSrc(1, y);
2480
texi->setSrc(2, z);
2481
texi->setSrc(3, ms);
2482
2483
texi->tex.r = texi->tex.s = -1;
2484
2485
bb->insertTail(texi);
2486
}
2487
2488
void
2489
Converter::handleLIT(Value *dst0[4])
2490
{
2491
Value *val0 = NULL;
2492
unsigned int mask = tgsi.getDst(0).getMask();
2493
2494
if (mask & (1 << 0))
2495
loadImm(dst0[0], 1.0f);
2496
2497
if (mask & (1 << 3))
2498
loadImm(dst0[3], 1.0f);
2499
2500
if (mask & (3 << 1)) {
2501
val0 = getScratch();
2502
mkOp2(OP_MAX, TYPE_F32, val0, fetchSrc(0, 0), zero);
2503
if (mask & (1 << 1))
2504
mkMov(dst0[1], val0);
2505
}
2506
2507
if (mask & (1 << 2)) {
2508
Value *src1 = fetchSrc(0, 1), *src3 = fetchSrc(0, 3);
2509
Value *val1 = getScratch(), *val3 = getScratch();
2510
2511
Value *pos128 = loadImm(NULL, +127.999999f);
2512
Value *neg128 = loadImm(NULL, -127.999999f);
2513
2514
mkOp2(OP_MAX, TYPE_F32, val1, src1, zero);
2515
mkOp2(OP_MAX, TYPE_F32, val3, src3, neg128);
2516
mkOp2(OP_MIN, TYPE_F32, val3, val3, pos128);
2517
mkOp2(OP_POW, TYPE_F32, val3, val1, val3);
2518
2519
mkCmp(OP_SLCT, CC_GT, TYPE_F32, dst0[2], TYPE_F32, val3, zero, val0);
2520
}
2521
}
2522
2523
/* Keep this around for now as reference when adding img support
2524
static inline bool
2525
isResourceSpecial(const int r)
2526
{
2527
return (r == TGSI_RESOURCE_GLOBAL ||
2528
r == TGSI_RESOURCE_LOCAL ||
2529
r == TGSI_RESOURCE_PRIVATE ||
2530
r == TGSI_RESOURCE_INPUT);
2531
}
2532
2533
static inline bool
2534
isResourceRaw(const tgsi::Source *code, const int r)
2535
{
2536
return isResourceSpecial(r) || code->resources[r].raw;
2537
}
2538
2539
static inline nv50_ir::TexTarget
2540
getResourceTarget(const tgsi::Source *code, int r)
2541
{
2542
if (isResourceSpecial(r))
2543
return nv50_ir::TEX_TARGET_BUFFER;
2544
return tgsi::translateTexture(code->resources.at(r).target);
2545
}
2546
2547
Symbol *
2548
Converter::getResourceBase(const int r)
2549
{
2550
Symbol *sym = NULL;
2551
2552
switch (r) {
2553
case TGSI_RESOURCE_GLOBAL:
2554
sym = new_Symbol(prog, nv50_ir::FILE_MEMORY_GLOBAL,
2555
info->io.auxCBSlot);
2556
break;
2557
case TGSI_RESOURCE_LOCAL:
2558
assert(prog->getType() == Program::TYPE_COMPUTE);
2559
sym = mkSymbol(nv50_ir::FILE_MEMORY_SHARED, 0, TYPE_U32,
2560
info->prop.cp.sharedOffset);
2561
break;
2562
case TGSI_RESOURCE_PRIVATE:
2563
sym = mkSymbol(nv50_ir::FILE_MEMORY_LOCAL, 0, TYPE_U32,
2564
info->bin.tlsSpace);
2565
break;
2566
case TGSI_RESOURCE_INPUT:
2567
assert(prog->getType() == Program::TYPE_COMPUTE);
2568
sym = mkSymbol(nv50_ir::FILE_SHADER_INPUT, 0, TYPE_U32,
2569
info->prop.cp.inputOffset);
2570
break;
2571
default:
2572
sym = new_Symbol(prog,
2573
nv50_ir::FILE_MEMORY_GLOBAL, code->resources.at(r).slot);
2574
break;
2575
}
2576
return sym;
2577
}
2578
2579
void
2580
Converter::getResourceCoords(std::vector<Value *> &coords, int r, int s)
2581
{
2582
const int arg =
2583
TexInstruction::Target(getResourceTarget(code, r)).getArgCount();
2584
2585
for (int c = 0; c < arg; ++c)
2586
coords.push_back(fetchSrc(s, c));
2587
2588
// NOTE: TGSI_RESOURCE_GLOBAL needs FILE_GPR; this is an nv50 quirk
2589
if (r == TGSI_RESOURCE_LOCAL ||
2590
r == TGSI_RESOURCE_PRIVATE ||
2591
r == TGSI_RESOURCE_INPUT)
2592
coords[0] = mkOp1v(OP_MOV, TYPE_U32, getScratch(4, FILE_ADDRESS),
2593
coords[0]);
2594
}
2595
2596
static inline int
2597
partitionLoadStore(uint8_t comp[2], uint8_t size[2], uint8_t mask)
2598
{
2599
int n = 0;
2600
2601
while (mask) {
2602
if (mask & 1) {
2603
size[n]++;
2604
} else {
2605
if (size[n])
2606
comp[n = 1] = size[0] + 1;
2607
else
2608
comp[n]++;
2609
}
2610
mask >>= 1;
2611
}
2612
if (size[0] == 3) {
2613
n = 1;
2614
size[0] = (comp[0] == 1) ? 1 : 2;
2615
size[1] = 3 - size[0];
2616
comp[1] = comp[0] + size[0];
2617
}
2618
return n + 1;
2619
}
2620
*/
2621
void
2622
Converter::getImageCoords(std::vector<Value *> &coords, int s)
2623
{
2624
TexInstruction::Target t =
2625
TexInstruction::Target(tgsi.getImageTarget());
2626
const int arg = t.getDim() + (t.isArray() || t.isCube());
2627
2628
for (int c = 0; c < arg; ++c)
2629
coords.push_back(fetchSrc(s, c));
2630
2631
if (t.isMS())
2632
coords.push_back(fetchSrc(s, 3));
2633
}
2634
2635
int
2636
Converter::remapBufferId(int id)
2637
{
2638
std::map<int, int>::const_iterator it = code->bufferIds.find(id);
2639
if (it != code->bufferIds.end())
2640
return it->second;
2641
return id;
2642
}
2643
2644
int
2645
Converter::remapImageId(int id)
2646
{
2647
std::map<int, int>::const_iterator it = code->imageIds.find(id);
2648
if (it != code->imageIds.end())
2649
return it->second;
2650
return id;
2651
}
2652
2653
// For raw loads, granularity is 4 byte.
2654
// Usage of the texture read mask on OP_SULDP is not allowed.
2655
void
2656
Converter::handleLOAD(Value *dst0[4])
2657
{
2658
int r = tgsi.getSrc(0).getIndex(0);
2659
int c;
2660
std::vector<Value *> off, src, ldv, def;
2661
Value *ind = NULL;
2662
2663
if (tgsi.getSrc(0).isIndirect(0))
2664
ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
2665
2666
switch (tgsi.getSrc(0).getFile()) {
2667
case TGSI_FILE_BUFFER:
2668
r = remapBufferId(r);
2669
/* fallthrough */
2670
case TGSI_FILE_MEMORY:
2671
for (c = 0; c < 4; ++c) {
2672
if (!dst0[c])
2673
continue;
2674
2675
Value *off;
2676
Symbol *sym;
2677
uint32_t src0_component_offset = tgsi.getSrc(0).getSwizzle(c) * 4;
2678
2679
if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE) {
2680
off = NULL;
2681
sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2682
tgsi.getSrc(1).getValueU32(0, code->immd.data) +
2683
src0_component_offset);
2684
} else {
2685
// yzw are ignored for buffers
2686
off = fetchSrc(1, 0);
2687
sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2688
src0_component_offset);
2689
}
2690
2691
Instruction *ld = mkLoad(TYPE_U32, dst0[c], sym, off);
2692
if (tgsi.getSrc(0).getFile() == TGSI_FILE_BUFFER &&
2693
code->bufferAtomics[tgsi.getSrc(0).getIndex(0)])
2694
ld->cache = nv50_ir::CACHE_CG;
2695
else
2696
ld->cache = tgsi.getCacheMode();
2697
if (ind)
2698
ld->setIndirect(0, 1, ind);
2699
}
2700
break;
2701
default: {
2702
r = remapImageId(r);
2703
getImageCoords(off, 1);
2704
def.resize(4);
2705
2706
for (c = 0; c < 4; ++c) {
2707
if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2708
def[c] = getScratch();
2709
else
2710
def[c] = dst0[c];
2711
}
2712
2713
bool bindless = tgsi.getSrc(0).getFile() != TGSI_FILE_IMAGE;
2714
if (bindless)
2715
ind = fetchSrc(0, 0);
2716
2717
TexInstruction *ld =
2718
mkTex(OP_SULDP, tgsi.getImageTarget(), 0, 0, def, off);
2719
ld->tex.mask = tgsi.getDst(0).getMask();
2720
ld->tex.format = tgsi.getImageFormat();
2721
ld->cache = tgsi.getCacheMode();
2722
ld->tex.bindless = bindless;
2723
if (!bindless)
2724
ld->tex.r = r;
2725
if (ind)
2726
ld->setIndirectR(ind);
2727
2728
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2729
if (dst0[c] != def[c])
2730
mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2731
break;
2732
}
2733
}
2734
2735
2736
/* Keep this around for now as reference when adding img support
2737
getResourceCoords(off, r, 1);
2738
2739
if (isResourceRaw(code, r)) {
2740
uint8_t mask = 0;
2741
uint8_t comp[2] = { 0, 0 };
2742
uint8_t size[2] = { 0, 0 };
2743
2744
Symbol *base = getResourceBase(r);
2745
2746
// determine the base and size of the at most 2 load ops
2747
for (c = 0; c < 4; ++c)
2748
if (!tgsi.getDst(0).isMasked(c))
2749
mask |= 1 << (tgsi.getSrc(0).getSwizzle(c) - TGSI_SWIZZLE_X);
2750
2751
int n = partitionLoadStore(comp, size, mask);
2752
2753
src = off;
2754
2755
def.resize(4); // index by component, the ones we need will be non-NULL
2756
for (c = 0; c < 4; ++c) {
2757
if (dst0[c] && tgsi.getSrc(0).getSwizzle(c) == (TGSI_SWIZZLE_X + c))
2758
def[c] = dst0[c];
2759
else
2760
if (mask & (1 << c))
2761
def[c] = getScratch();
2762
}
2763
2764
const bool useLd = isResourceSpecial(r) ||
2765
(info->io.nv50styleSurfaces &&
2766
code->resources[r].target == TGSI_TEXTURE_BUFFER);
2767
2768
for (int i = 0; i < n; ++i) {
2769
ldv.assign(def.begin() + comp[i], def.begin() + comp[i] + size[i]);
2770
2771
if (comp[i]) // adjust x component of source address if necessary
2772
src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2773
off[0], mkImm(comp[i] * 4));
2774
else
2775
src[0] = off[0];
2776
2777
if (useLd) {
2778
Instruction *ld =
2779
mkLoad(typeOfSize(size[i] * 4), ldv[0], base, src[0]);
2780
for (size_t c = 1; c < ldv.size(); ++c)
2781
ld->setDef(c, ldv[c]);
2782
} else {
2783
mkTex(OP_SULDB, getResourceTarget(code, r), code->resources[r].slot,
2784
0, ldv, src)->dType = typeOfSize(size[i] * 4);
2785
}
2786
}
2787
} else {
2788
def.resize(4);
2789
for (c = 0; c < 4; ++c) {
2790
if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2791
def[c] = getScratch();
2792
else
2793
def[c] = dst0[c];
2794
}
2795
2796
mkTex(OP_SULDP, getResourceTarget(code, r), code->resources[r].slot, 0,
2797
def, off);
2798
}
2799
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2800
if (dst0[c] != def[c])
2801
mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2802
*/
2803
}
2804
2805
// For formatted stores, the write mask on OP_SUSTP can be used.
2806
// Raw stores have to be split.
2807
void
2808
Converter::handleSTORE()
2809
{
2810
int r = tgsi.getDst(0).getIndex(0);
2811
int c;
2812
std::vector<Value *> off, src, dummy;
2813
Value *ind = NULL;
2814
2815
if (tgsi.getDst(0).isIndirect(0))
2816
ind = fetchSrc(tgsi.getDst(0).getIndirect(0), 0, 0);
2817
2818
switch (tgsi.getDst(0).getFile()) {
2819
case TGSI_FILE_BUFFER:
2820
r = remapBufferId(r);
2821
/* fallthrough */
2822
case TGSI_FILE_MEMORY:
2823
for (c = 0; c < 4; ++c) {
2824
if (!(tgsi.getDst(0).getMask() & (1 << c)))
2825
continue;
2826
2827
Symbol *sym;
2828
Value *off;
2829
if (tgsi.getSrc(0).getFile() == TGSI_FILE_IMMEDIATE) {
2830
off = NULL;
2831
sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c,
2832
tgsi.getSrc(0).getValueU32(0, code->immd.data) + 4 * c);
2833
} else {
2834
// yzw are ignored for buffers
2835
off = fetchSrc(0, 0);
2836
sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c, 4 * c);
2837
}
2838
2839
Instruction *st = mkStore(OP_STORE, TYPE_U32, sym, off, fetchSrc(1, c));
2840
st->cache = tgsi.getCacheMode();
2841
if (ind)
2842
st->setIndirect(0, 1, ind);
2843
}
2844
break;
2845
default: {
2846
r = remapImageId(r);
2847
getImageCoords(off, 0);
2848
src = off;
2849
2850
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2851
src.push_back(fetchSrc(1, c));
2852
2853
bool bindless = tgsi.getDst(0).getFile() != TGSI_FILE_IMAGE;
2854
if (bindless)
2855
ind = fetchDst(0, 0);
2856
2857
TexInstruction *st =
2858
mkTex(OP_SUSTP, tgsi.getImageTarget(), 0, 0, dummy, src);
2859
st->tex.mask = tgsi.getDst(0).getMask();
2860
st->tex.format = tgsi.getImageFormat();
2861
st->cache = tgsi.getCacheMode();
2862
st->tex.bindless = bindless;
2863
if (!bindless)
2864
st->tex.r = r;
2865
if (ind)
2866
st->setIndirectR(ind);
2867
2868
break;
2869
}
2870
}
2871
2872
/* Keep this around for now as reference when adding img support
2873
getResourceCoords(off, r, 0);
2874
src = off;
2875
const int s = src.size();
2876
2877
if (isResourceRaw(code, r)) {
2878
uint8_t comp[2] = { 0, 0 };
2879
uint8_t size[2] = { 0, 0 };
2880
2881
int n = partitionLoadStore(comp, size, tgsi.getDst(0).getMask());
2882
2883
Symbol *base = getResourceBase(r);
2884
2885
const bool useSt = isResourceSpecial(r) ||
2886
(info->io.nv50styleSurfaces &&
2887
code->resources[r].target == TGSI_TEXTURE_BUFFER);
2888
2889
for (int i = 0; i < n; ++i) {
2890
if (comp[i]) // adjust x component of source address if necessary
2891
src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2892
off[0], mkImm(comp[i] * 4));
2893
else
2894
src[0] = off[0];
2895
2896
const DataType stTy = typeOfSize(size[i] * 4);
2897
2898
if (useSt) {
2899
Instruction *st =
2900
mkStore(OP_STORE, stTy, base, NULL, fetchSrc(1, comp[i]));
2901
for (c = 1; c < size[i]; ++c)
2902
st->setSrc(1 + c, fetchSrc(1, comp[i] + c));
2903
st->setIndirect(0, 0, src[0]);
2904
} else {
2905
// attach values to be stored
2906
src.resize(s + size[i]);
2907
for (c = 0; c < size[i]; ++c)
2908
src[s + c] = fetchSrc(1, comp[i] + c);
2909
mkTex(OP_SUSTB, getResourceTarget(code, r), code->resources[r].slot,
2910
0, dummy, src)->setType(stTy);
2911
}
2912
}
2913
} else {
2914
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2915
src.push_back(fetchSrc(1, c));
2916
2917
mkTex(OP_SUSTP, getResourceTarget(code, r), code->resources[r].slot, 0,
2918
dummy, src)->tex.mask = tgsi.getDst(0).getMask();
2919
}
2920
*/
2921
}
2922
2923
// XXX: These only work on resources with the single-component u32/s32 formats.
2924
// Therefore the result is replicated. This might not be intended by TGSI, but
2925
// operating on more than 1 component would produce undefined results because
2926
// they do not exist.
2927
void
2928
Converter::handleATOM(Value *dst0[4], DataType ty, uint16_t subOp)
2929
{
2930
int r = tgsi.getSrc(0).getIndex(0);
2931
std::vector<Value *> srcv;
2932
std::vector<Value *> defv;
2933
LValue *dst = getScratch();
2934
Value *ind = NULL;
2935
2936
if (tgsi.getSrc(0).isIndirect(0))
2937
ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
2938
2939
switch (tgsi.getSrc(0).getFile()) {
2940
case TGSI_FILE_BUFFER:
2941
r = remapBufferId(r);
2942
/* fallthrough */
2943
case TGSI_FILE_MEMORY:
2944
for (int c = 0; c < 4; ++c) {
2945
if (!dst0[c])
2946
continue;
2947
2948
Instruction *insn;
2949
Value *off = fetchSrc(1, c);
2950
Value *sym;
2951
if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE)
2952
sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2953
tgsi.getSrc(1).getValueU32(c, code->immd.data));
2954
else
2955
sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c, 0);
2956
if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2957
insn = mkOp3(OP_ATOM, ty, dst, sym, fetchSrc(2, c), fetchSrc(3, c));
2958
else
2959
insn = mkOp2(OP_ATOM, ty, dst, sym, fetchSrc(2, c));
2960
if (tgsi.getSrc(1).getFile() != TGSI_FILE_IMMEDIATE)
2961
insn->setIndirect(0, 0, off);
2962
if (ind)
2963
insn->setIndirect(0, 1, ind);
2964
insn->subOp = subOp;
2965
}
2966
for (int c = 0; c < 4; ++c)
2967
if (dst0[c])
2968
dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2969
break;
2970
default: {
2971
r = remapImageId(r);
2972
getImageCoords(srcv, 1);
2973
defv.push_back(dst);
2974
srcv.push_back(fetchSrc(2, 0));
2975
2976
if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2977
srcv.push_back(fetchSrc(3, 0));
2978
2979
bool bindless = tgsi.getSrc(0).getFile() != TGSI_FILE_IMAGE;
2980
if (bindless)
2981
ind = fetchSrc(0, 0);
2982
2983
TexInstruction *tex = mkTex(OP_SUREDP, tgsi.getImageTarget(),
2984
0, 0, defv, srcv);
2985
tex->subOp = subOp;
2986
tex->tex.mask = 1;
2987
tex->tex.format = tgsi.getImageFormat();
2988
tex->setType(ty);
2989
tex->tex.bindless = bindless;
2990
if (!bindless)
2991
tex->tex.r = r;
2992
if (ind)
2993
tex->setIndirectR(ind);
2994
2995
for (int c = 0; c < 4; ++c)
2996
if (dst0[c])
2997
dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2998
break;
2999
}
3000
}
3001
3002
/* Keep this around for now as reference when adding img support
3003
getResourceCoords(srcv, r, 1);
3004
3005
if (isResourceSpecial(r)) {
3006
assert(r != TGSI_RESOURCE_INPUT);
3007
Instruction *insn;
3008
insn = mkOp2(OP_ATOM, ty, dst, getResourceBase(r), fetchSrc(2, 0));
3009
insn->subOp = subOp;
3010
if (subOp == NV50_IR_SUBOP_ATOM_CAS)
3011
insn->setSrc(2, fetchSrc(3, 0));
3012
insn->setIndirect(0, 0, srcv.at(0));
3013
} else {
3014
operation op = isResourceRaw(code, r) ? OP_SUREDB : OP_SUREDP;
3015
TexTarget targ = getResourceTarget(code, r);
3016
int idx = code->resources[r].slot;
3017
defv.push_back(dst);
3018
srcv.push_back(fetchSrc(2, 0));
3019
if (subOp == NV50_IR_SUBOP_ATOM_CAS)
3020
srcv.push_back(fetchSrc(3, 0));
3021
TexInstruction *tex = mkTex(op, targ, idx, 0, defv, srcv);
3022
tex->subOp = subOp;
3023
tex->tex.mask = 1;
3024
tex->setType(ty);
3025
}
3026
3027
for (int c = 0; c < 4; ++c)
3028
if (dst0[c])
3029
dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
3030
*/
3031
}
3032
3033
void
3034
Converter::handleINTERP(Value *dst[4])
3035
{
3036
// Check whether the input is linear. All other attributes ignored.
3037
Instruction *insn;
3038
Value *offset = NULL, *ptr = NULL, *w = NULL;
3039
Symbol *sym[4] = { NULL };
3040
bool linear;
3041
operation op = OP_NOP;
3042
int c, mode = 0;
3043
3044
tgsi::Instruction::SrcRegister src = tgsi.getSrc(0);
3045
3046
// In some odd cases, in large part due to varying packing, the source
3047
// might not actually be an input. This is illegal TGSI, but it's easier to
3048
// account for it here than it is to fix it where the TGSI is being
3049
// generated. In that case, it's going to be a straight up mov (or sequence
3050
// of mov's) from the input in question. We follow the mov chain to see
3051
// which input we need to use.
3052
if (src.getFile() != TGSI_FILE_INPUT) {
3053
if (src.isIndirect(0)) {
3054
ERROR("Ignoring indirect input interpolation\n");
3055
return;
3056
}
3057
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3058
Value *val = fetchSrc(0, c);
3059
assert(val->defs.size() == 1);
3060
insn = val->getInsn();
3061
while (insn->op == OP_MOV) {
3062
assert(insn->getSrc(0)->defs.size() == 1);
3063
insn = insn->getSrc(0)->getInsn();
3064
if (!insn) {
3065
ERROR("Miscompiling shader due to unhandled INTERP\n");
3066
return;
3067
}
3068
}
3069
if (insn->op != OP_LINTERP && insn->op != OP_PINTERP) {
3070
ERROR("Trying to interpolate non-input, this is not allowed.\n");
3071
return;
3072
}
3073
sym[c] = insn->getSrc(0)->asSym();
3074
assert(sym[c]);
3075
op = insn->op;
3076
mode = insn->ipa;
3077
ptr = insn->getIndirect(0, 0);
3078
}
3079
} else {
3080
if (src.isIndirect(0))
3081
ptr = shiftAddress(fetchSrc(src.getIndirect(0), 0, NULL));
3082
3083
// We can assume that the fixed index will point to an input of the same
3084
// interpolation type in case of an indirect.
3085
// TODO: Make use of ArrayID.
3086
linear = info_out->in[src.getIndex(0)].linear;
3087
if (linear) {
3088
op = OP_LINTERP;
3089
mode = NV50_IR_INTERP_LINEAR;
3090
} else {
3091
op = OP_PINTERP;
3092
mode = NV50_IR_INTERP_PERSPECTIVE;
3093
}
3094
}
3095
3096
switch (tgsi.getOpcode()) {
3097
case TGSI_OPCODE_INTERP_CENTROID:
3098
mode |= NV50_IR_INTERP_CENTROID;
3099
break;
3100
case TGSI_OPCODE_INTERP_SAMPLE: {
3101
// When using a non-MS buffer, we're supposed to always use the center
3102
// (i.e. sample 0). This adds a SELP which will be always true or false
3103
// based on a data fixup.
3104
Value *sample = getScratch();
3105
mkOp3(OP_SELP, TYPE_U32, sample, mkImm(0), fetchSrc(1, 0), mkImm(0))
3106
->subOp = 2;
3107
3108
insn = mkOp1(OP_PIXLD, TYPE_U32, (offset = getScratch()), sample);
3109
insn->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
3110
mode |= NV50_IR_INTERP_OFFSET;
3111
break;
3112
}
3113
case TGSI_OPCODE_INTERP_OFFSET: {
3114
// The input in src1.xy is float, but we need a single 32-bit value
3115
// where the upper and lower 16 bits are encoded in S0.12 format. We need
3116
// to clamp the input coordinates to (-0.5, 0.4375), multiply by 4096,
3117
// and then convert to s32.
3118
Value *offs[2];
3119
for (c = 0; c < 2; c++) {
3120
offs[c] = getScratch();
3121
mkOp2(OP_MIN, TYPE_F32, offs[c], fetchSrc(1, c), loadImm(NULL, 0.4375f));
3122
mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
3123
mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
3124
mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
3125
}
3126
offset = mkOp3v(OP_INSBF, TYPE_U32, getScratch(),
3127
offs[1], mkImm(0x1010), offs[0]);
3128
mode |= NV50_IR_INTERP_OFFSET;
3129
break;
3130
}
3131
}
3132
3133
if (op == OP_PINTERP) {
3134
if (offset) {
3135
w = mkOp2v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 3), offset);
3136
mkOp1(OP_RCP, TYPE_F32, w, w);
3137
} else {
3138
w = fragCoord[3];
3139
}
3140
}
3141
3142
3143
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3144
insn = mkOp1(op, TYPE_F32, dst[c], sym[c] ? sym[c] : srcToSym(src, c));
3145
if (op == OP_PINTERP)
3146
insn->setSrc(1, w);
3147
if (offset)
3148
insn->setSrc(op == OP_PINTERP ? 2 : 1, offset);
3149
if (ptr)
3150
insn->setIndirect(0, 0, ptr);
3151
3152
insn->setInterpolate(mode);
3153
}
3154
}
3155
3156
bool
3157
Converter::isEndOfSubroutine(uint ip)
3158
{
3159
assert(ip < code->scan.num_instructions);
3160
tgsi::Instruction insn(&code->insns[ip]);
3161
return (insn.getOpcode() == TGSI_OPCODE_END ||
3162
insn.getOpcode() == TGSI_OPCODE_ENDSUB ||
3163
// does END occur at end of main or the very end ?
3164
insn.getOpcode() == TGSI_OPCODE_BGNSUB);
3165
}
3166
3167
bool
3168
Converter::handleInstruction(const struct tgsi_full_instruction *insn)
3169
{
3170
Instruction *geni;
3171
3172
Value *dst0[4], *rDst0[4];
3173
Value *src0, *src1, *src2, *src3;
3174
Value *val0 = NULL, *val1 = NULL;
3175
int c;
3176
3177
tgsi = tgsi::Instruction(insn);
3178
3179
bool useScratchDst = tgsi.checkDstSrcAliasing();
3180
3181
operation op = tgsi.getOP();
3182
dstTy = tgsi.inferDstType();
3183
srcTy = tgsi.inferSrcType();
3184
3185
unsigned int mask = tgsi.dstCount() ? tgsi.getDst(0).getMask() : 0;
3186
3187
if (tgsi.dstCount() && tgsi.getOpcode() != TGSI_OPCODE_STORE) {
3188
for (c = 0; c < 4; ++c) {
3189
rDst0[c] = acquireDst(0, c);
3190
dst0[c] = (useScratchDst && rDst0[c]) ? getScratch() : rDst0[c];
3191
}
3192
}
3193
3194
switch (tgsi.getOpcode()) {
3195
case TGSI_OPCODE_ADD:
3196
case TGSI_OPCODE_UADD:
3197
case TGSI_OPCODE_AND:
3198
case TGSI_OPCODE_DIV:
3199
case TGSI_OPCODE_IDIV:
3200
case TGSI_OPCODE_UDIV:
3201
case TGSI_OPCODE_MAX:
3202
case TGSI_OPCODE_MIN:
3203
case TGSI_OPCODE_IMAX:
3204
case TGSI_OPCODE_IMIN:
3205
case TGSI_OPCODE_UMAX:
3206
case TGSI_OPCODE_UMIN:
3207
case TGSI_OPCODE_MOD:
3208
case TGSI_OPCODE_UMOD:
3209
case TGSI_OPCODE_MUL:
3210
case TGSI_OPCODE_UMUL:
3211
case TGSI_OPCODE_IMUL_HI:
3212
case TGSI_OPCODE_UMUL_HI:
3213
case TGSI_OPCODE_OR:
3214
case TGSI_OPCODE_SHL:
3215
case TGSI_OPCODE_ISHR:
3216
case TGSI_OPCODE_USHR:
3217
case TGSI_OPCODE_XOR:
3218
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3219
src0 = fetchSrc(0, c);
3220
src1 = fetchSrc(1, c);
3221
geni = mkOp2(op, dstTy, dst0[c], src0, src1);
3222
geni->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3223
if (op == OP_MUL && dstTy == TYPE_F32)
3224
geni->dnz = info->io.mul_zero_wins;
3225
geni->precise = insn->Instruction.Precise;
3226
}
3227
break;
3228
case TGSI_OPCODE_MAD:
3229
case TGSI_OPCODE_UMAD:
3230
case TGSI_OPCODE_FMA:
3231
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3232
src0 = fetchSrc(0, c);
3233
src1 = fetchSrc(1, c);
3234
src2 = fetchSrc(2, c);
3235
geni = mkOp3(op, dstTy, dst0[c], src0, src1, src2);
3236
if (dstTy == TYPE_F32)
3237
geni->dnz = info->io.mul_zero_wins;
3238
geni->precise = insn->Instruction.Precise;
3239
}
3240
break;
3241
case TGSI_OPCODE_MOV:
3242
case TGSI_OPCODE_CEIL:
3243
case TGSI_OPCODE_FLR:
3244
case TGSI_OPCODE_TRUNC:
3245
case TGSI_OPCODE_RCP:
3246
case TGSI_OPCODE_SQRT:
3247
case TGSI_OPCODE_IABS:
3248
case TGSI_OPCODE_INEG:
3249
case TGSI_OPCODE_NOT:
3250
case TGSI_OPCODE_DDX:
3251
case TGSI_OPCODE_DDY:
3252
case TGSI_OPCODE_DDX_FINE:
3253
case TGSI_OPCODE_DDY_FINE:
3254
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3255
mkOp1(op, dstTy, dst0[c], fetchSrc(0, c));
3256
break;
3257
case TGSI_OPCODE_RSQ:
3258
src0 = fetchSrc(0, 0);
3259
val0 = getScratch();
3260
mkOp1(OP_ABS, TYPE_F32, val0, src0);
3261
mkOp1(OP_RSQ, TYPE_F32, val0, val0);
3262
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3263
mkMov(dst0[c], val0);
3264
break;
3265
case TGSI_OPCODE_ARL:
3266
case TGSI_OPCODE_ARR:
3267
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3268
const RoundMode rnd =
3269
tgsi.getOpcode() == TGSI_OPCODE_ARR ? ROUND_N : ROUND_M;
3270
src0 = fetchSrc(0, c);
3271
mkCvt(OP_CVT, TYPE_S32, dst0[c], TYPE_F32, src0)->rnd = rnd;
3272
}
3273
break;
3274
case TGSI_OPCODE_UARL:
3275
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3276
mkOp1(OP_MOV, TYPE_U32, dst0[c], fetchSrc(0, c));
3277
break;
3278
case TGSI_OPCODE_POW:
3279
val0 = mkOp2v(op, TYPE_F32, getScratch(), fetchSrc(0, 0), fetchSrc(1, 0));
3280
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3281
mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3282
break;
3283
case TGSI_OPCODE_EX2:
3284
case TGSI_OPCODE_LG2:
3285
val0 = mkOp1(op, TYPE_F32, getScratch(), fetchSrc(0, 0))->getDef(0);
3286
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3287
mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3288
break;
3289
case TGSI_OPCODE_COS:
3290
case TGSI_OPCODE_SIN:
3291
val0 = getScratch();
3292
if (mask & 7) {
3293
mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 0));
3294
mkOp1(op, TYPE_F32, val0, val0);
3295
for (c = 0; c < 3; ++c)
3296
if (dst0[c])
3297
mkMov(dst0[c], val0);
3298
}
3299
if (dst0[3]) {
3300
mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 3));
3301
mkOp1(op, TYPE_F32, dst0[3], val0);
3302
}
3303
break;
3304
case TGSI_OPCODE_EXP:
3305
src0 = fetchSrc(0, 0);
3306
val0 = mkOp1v(OP_FLOOR, TYPE_F32, getSSA(), src0);
3307
if (dst0[1])
3308
mkOp2(OP_SUB, TYPE_F32, dst0[1], src0, val0);
3309
if (dst0[0])
3310
mkOp1(OP_EX2, TYPE_F32, dst0[0], val0);
3311
if (dst0[2])
3312
mkOp1(OP_EX2, TYPE_F32, dst0[2], src0);
3313
if (dst0[3])
3314
loadImm(dst0[3], 1.0f);
3315
break;
3316
case TGSI_OPCODE_LOG:
3317
src0 = mkOp1v(OP_ABS, TYPE_F32, getSSA(), fetchSrc(0, 0));
3318
val0 = mkOp1v(OP_LG2, TYPE_F32, dst0[2] ? dst0[2] : getSSA(), src0);
3319
if (dst0[0] || dst0[1])
3320
val1 = mkOp1v(OP_FLOOR, TYPE_F32, dst0[0] ? dst0[0] : getSSA(), val0);
3321
if (dst0[1]) {
3322
mkOp1(OP_EX2, TYPE_F32, dst0[1], val1);
3323
mkOp1(OP_RCP, TYPE_F32, dst0[1], dst0[1]);
3324
mkOp2(OP_MUL, TYPE_F32, dst0[1], dst0[1], src0)
3325
->dnz = info->io.mul_zero_wins;
3326
}
3327
if (dst0[3])
3328
loadImm(dst0[3], 1.0f);
3329
break;
3330
case TGSI_OPCODE_DP2:
3331
val0 = buildDot(2);
3332
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3333
mkMov(dst0[c], val0);
3334
break;
3335
case TGSI_OPCODE_DP3:
3336
val0 = buildDot(3);
3337
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3338
mkMov(dst0[c], val0);
3339
break;
3340
case TGSI_OPCODE_DP4:
3341
val0 = buildDot(4);
3342
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3343
mkMov(dst0[c], val0);
3344
break;
3345
case TGSI_OPCODE_DST:
3346
if (dst0[0])
3347
loadImm(dst0[0], 1.0f);
3348
if (dst0[1]) {
3349
src0 = fetchSrc(0, 1);
3350
src1 = fetchSrc(1, 1);
3351
mkOp2(OP_MUL, TYPE_F32, dst0[1], src0, src1)
3352
->dnz = info->io.mul_zero_wins;
3353
}
3354
if (dst0[2])
3355
mkMov(dst0[2], fetchSrc(0, 2));
3356
if (dst0[3])
3357
mkMov(dst0[3], fetchSrc(1, 3));
3358
break;
3359
case TGSI_OPCODE_LRP:
3360
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3361
src0 = fetchSrc(0, c);
3362
src1 = fetchSrc(1, c);
3363
src2 = fetchSrc(2, c);
3364
mkOp3(OP_MAD, TYPE_F32, dst0[c],
3365
mkOp2v(OP_SUB, TYPE_F32, getSSA(), src1, src2), src0, src2)
3366
->dnz = info->io.mul_zero_wins;
3367
}
3368
break;
3369
case TGSI_OPCODE_LIT:
3370
handleLIT(dst0);
3371
break;
3372
case TGSI_OPCODE_ISSG:
3373
case TGSI_OPCODE_SSG:
3374
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3375
src0 = fetchSrc(0, c);
3376
val0 = getScratch();
3377
val1 = getScratch();
3378
mkCmp(OP_SET, CC_GT, srcTy, val0, srcTy, src0, zero);
3379
mkCmp(OP_SET, CC_LT, srcTy, val1, srcTy, src0, zero);
3380
if (srcTy == TYPE_F32)
3381
mkOp2(OP_SUB, TYPE_F32, dst0[c], val0, val1);
3382
else
3383
mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
3384
}
3385
break;
3386
case TGSI_OPCODE_UCMP:
3387
srcTy = TYPE_U32;
3388
FALLTHROUGH;
3389
case TGSI_OPCODE_CMP:
3390
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3391
src0 = fetchSrc(0, c);
3392
src1 = fetchSrc(1, c);
3393
src2 = fetchSrc(2, c);
3394
if (src1 == src2)
3395
mkMov(dst0[c], src1);
3396
else
3397
mkCmp(OP_SLCT, (srcTy == TYPE_F32) ? CC_LT : CC_NE,
3398
srcTy, dst0[c], srcTy, src1, src2, src0);
3399
}
3400
break;
3401
case TGSI_OPCODE_FRC:
3402
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3403
src0 = fetchSrc(0, c);
3404
val0 = getScratch();
3405
mkOp1(OP_FLOOR, TYPE_F32, val0, src0);
3406
mkOp2(OP_SUB, TYPE_F32, dst0[c], src0, val0);
3407
}
3408
break;
3409
case TGSI_OPCODE_ROUND:
3410
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3411
mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F32, fetchSrc(0, c))
3412
->rnd = ROUND_NI;
3413
break;
3414
case TGSI_OPCODE_SLT:
3415
case TGSI_OPCODE_SGE:
3416
case TGSI_OPCODE_SEQ:
3417
case TGSI_OPCODE_SGT:
3418
case TGSI_OPCODE_SLE:
3419
case TGSI_OPCODE_SNE:
3420
case TGSI_OPCODE_FSEQ:
3421
case TGSI_OPCODE_FSGE:
3422
case TGSI_OPCODE_FSLT:
3423
case TGSI_OPCODE_FSNE:
3424
case TGSI_OPCODE_ISGE:
3425
case TGSI_OPCODE_ISLT:
3426
case TGSI_OPCODE_USEQ:
3427
case TGSI_OPCODE_USGE:
3428
case TGSI_OPCODE_USLT:
3429
case TGSI_OPCODE_USNE:
3430
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3431
src0 = fetchSrc(0, c);
3432
src1 = fetchSrc(1, c);
3433
mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
3434
}
3435
break;
3436
case TGSI_OPCODE_VOTE_ALL:
3437
case TGSI_OPCODE_VOTE_ANY:
3438
case TGSI_OPCODE_VOTE_EQ:
3439
val0 = new_LValue(func, FILE_PREDICATE);
3440
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3441
mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, c), zero);
3442
mkOp1(op, dstTy, val0, val0)
3443
->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3444
mkCvt(OP_CVT, TYPE_U32, dst0[c], TYPE_U8, val0);
3445
}
3446
break;
3447
case TGSI_OPCODE_BALLOT:
3448
if (!tgsi.getDst(0).isMasked(0)) {
3449
val0 = new_LValue(func, FILE_PREDICATE);
3450
mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, 0), zero);
3451
mkOp1(op, TYPE_U32, dst0[0], val0)->subOp = NV50_IR_SUBOP_VOTE_ANY;
3452
}
3453
if (!tgsi.getDst(0).isMasked(1))
3454
mkMov(dst0[1], zero, TYPE_U32);
3455
break;
3456
case TGSI_OPCODE_READ_FIRST:
3457
// ReadFirstInvocationARB(src) is implemented as
3458
// ReadInvocationARB(src, findLSB(ballot(true)))
3459
val0 = getScratch();
3460
mkOp1(OP_VOTE, TYPE_U32, val0, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
3461
mkOp1(OP_BREV, TYPE_U32, val0, val0);
3462
mkOp1(OP_BFIND, TYPE_U32, val0, val0)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3463
src1 = val0;
3464
FALLTHROUGH;
3465
case TGSI_OPCODE_READ_INVOC:
3466
if (tgsi.getOpcode() == TGSI_OPCODE_READ_INVOC)
3467
src1 = fetchSrc(1, 0);
3468
else
3469
src1 = val0;
3470
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3471
geni = mkOp3(op, dstTy, dst0[c], fetchSrc(0, c), src1, mkImm(0x1f));
3472
geni->subOp = NV50_IR_SUBOP_SHFL_IDX;
3473
}
3474
break;
3475
case TGSI_OPCODE_CLOCK:
3476
// Stick the 32-bit clock into the high dword of the logical result.
3477
if (!tgsi.getDst(0).isMasked(0))
3478
mkOp1(OP_MOV, TYPE_U32, dst0[0], zero);
3479
if (!tgsi.getDst(0).isMasked(1))
3480
mkOp1(OP_RDSV, TYPE_U32, dst0[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
3481
break;
3482
case TGSI_OPCODE_READ_HELPER:
3483
if (!tgsi.getDst(0).isMasked(0))
3484
mkOp1(OP_RDSV, TYPE_U32, dst0[0], mkSysVal(SV_THREAD_KILL, 0))
3485
->fixed = 1;
3486
break;
3487
case TGSI_OPCODE_KILL_IF:
3488
val0 = new_LValue(func, FILE_PREDICATE);
3489
mask = 0;
3490
for (c = 0; c < 4; ++c) {
3491
const int s = tgsi.getSrc(0).getSwizzle(c);
3492
if (mask & (1 << s))
3493
continue;
3494
mask |= 1 << s;
3495
mkCmp(OP_SET, CC_LT, TYPE_F32, val0, TYPE_F32, fetchSrc(0, c), zero);
3496
mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, val0);
3497
}
3498
break;
3499
case TGSI_OPCODE_KILL:
3500
case TGSI_OPCODE_DEMOTE:
3501
// TODO: Should we make KILL exit that invocation? Some old shaders
3502
// don't like that.
3503
mkOp(OP_DISCARD, TYPE_NONE, NULL);
3504
break;
3505
case TGSI_OPCODE_TEX:
3506
case TGSI_OPCODE_TEX_LZ:
3507
case TGSI_OPCODE_TXB:
3508
case TGSI_OPCODE_TXL:
3509
case TGSI_OPCODE_TXP:
3510
case TGSI_OPCODE_LODQ:
3511
// R S L C Dx Dy
3512
handleTEX(dst0, 1, 1, 0x03, 0x0f, 0x00, 0x00);
3513
break;
3514
case TGSI_OPCODE_TXD:
3515
handleTEX(dst0, 3, 3, 0x03, 0x0f, 0x10, 0x20);
3516
break;
3517
case TGSI_OPCODE_TG4:
3518
handleTEX(dst0, 2, 2, 0x03, 0x0f, 0x00, 0x00);
3519
break;
3520
case TGSI_OPCODE_TEX2:
3521
handleTEX(dst0, 2, 2, 0x03, 0x10, 0x00, 0x00);
3522
break;
3523
case TGSI_OPCODE_TXB2:
3524
case TGSI_OPCODE_TXL2:
3525
handleTEX(dst0, 2, 2, 0x10, 0x0f, 0x00, 0x00);
3526
break;
3527
case TGSI_OPCODE_SAMPLE:
3528
case TGSI_OPCODE_SAMPLE_B:
3529
case TGSI_OPCODE_SAMPLE_D:
3530
case TGSI_OPCODE_SAMPLE_L:
3531
case TGSI_OPCODE_SAMPLE_C:
3532
case TGSI_OPCODE_SAMPLE_C_LZ:
3533
handleTEX(dst0, 1, 2, 0x30, 0x30, 0x30, 0x40);
3534
break;
3535
case TGSI_OPCODE_TXF_LZ:
3536
case TGSI_OPCODE_TXF:
3537
handleTXF(dst0, 1, 0x03);
3538
break;
3539
case TGSI_OPCODE_SAMPLE_I:
3540
handleTXF(dst0, 1, 0x03);
3541
break;
3542
case TGSI_OPCODE_SAMPLE_I_MS:
3543
handleTXF(dst0, 1, 0x20);
3544
break;
3545
case TGSI_OPCODE_TXQ:
3546
case TGSI_OPCODE_SVIEWINFO:
3547
handleTXQ(dst0, TXQ_DIMS, 1);
3548
break;
3549
case TGSI_OPCODE_TXQS:
3550
// The TXQ_TYPE query returns samples in its 3rd arg, but we need it to
3551
// be in .x
3552
dst0[1] = dst0[2] = dst0[3] = NULL;
3553
std::swap(dst0[0], dst0[2]);
3554
handleTXQ(dst0, TXQ_TYPE, 0);
3555
std::swap(dst0[0], dst0[2]);
3556
break;
3557
case TGSI_OPCODE_FBFETCH:
3558
handleFBFETCH(dst0);
3559
break;
3560
case TGSI_OPCODE_F2I:
3561
case TGSI_OPCODE_F2U:
3562
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3563
mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c))->rnd = ROUND_Z;
3564
break;
3565
case TGSI_OPCODE_I2F:
3566
case TGSI_OPCODE_U2F:
3567
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3568
mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c));
3569
break;
3570
case TGSI_OPCODE_PK2H:
3571
val0 = getScratch();
3572
val1 = getScratch();
3573
mkCvt(OP_CVT, TYPE_F16, val0, TYPE_F32, fetchSrc(0, 0));
3574
mkCvt(OP_CVT, TYPE_F16, val1, TYPE_F32, fetchSrc(0, 1));
3575
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3576
mkOp3(OP_INSBF, TYPE_U32, dst0[c], val1, mkImm(0x1010), val0);
3577
break;
3578
case TGSI_OPCODE_UP2H:
3579
src0 = fetchSrc(0, 0);
3580
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3581
geni = mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F16, src0);
3582
geni->subOp = c & 1;
3583
}
3584
break;
3585
case TGSI_OPCODE_EMIT:
3586
/* export the saved viewport index */
3587
if (viewport != NULL) {
3588
Symbol *vpSym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_U32,
3589
info_out->out[info->io.viewportId].slot[0] * 4);
3590
mkStore(OP_EXPORT, TYPE_U32, vpSym, NULL, viewport);
3591
}
3592
/* handle user clip planes for each emitted vertex */
3593
if (info_out->io.genUserClip > 0)
3594
handleUserClipPlanes();
3595
FALLTHROUGH;
3596
case TGSI_OPCODE_ENDPRIM:
3597
{
3598
// get vertex stream (must be immediate)
3599
unsigned int stream = tgsi.getSrc(0).getValueU32(0, code->immd.data);
3600
if (stream && op == OP_RESTART)
3601
break;
3602
if (info_out->prop.gp.maxVertices == 0)
3603
break;
3604
src0 = mkImm(stream);
3605
mkOp1(op, TYPE_U32, NULL, src0)->fixed = 1;
3606
break;
3607
}
3608
case TGSI_OPCODE_IF:
3609
case TGSI_OPCODE_UIF:
3610
{
3611
BasicBlock *ifBB = new BasicBlock(func);
3612
3613
bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
3614
condBBs.push(bb);
3615
joinBBs.push(bb);
3616
3617
mkFlow(OP_BRA, NULL, CC_NOT_P, fetchSrc(0, 0))->setType(srcTy);
3618
3619
setPosition(ifBB, true);
3620
}
3621
break;
3622
case TGSI_OPCODE_ELSE:
3623
{
3624
BasicBlock *elseBB = new BasicBlock(func);
3625
BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3626
3627
forkBB->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
3628
condBBs.push(bb);
3629
3630
forkBB->getExit()->asFlow()->target.bb = elseBB;
3631
if (!bb->isTerminated())
3632
mkFlow(OP_BRA, NULL, CC_ALWAYS, NULL);
3633
3634
setPosition(elseBB, true);
3635
}
3636
break;
3637
case TGSI_OPCODE_ENDIF:
3638
{
3639
BasicBlock *convBB = new BasicBlock(func);
3640
BasicBlock *prevBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3641
BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(joinBBs.pop().u.p);
3642
3643
if (!bb->isTerminated()) {
3644
// we only want join if none of the clauses ended with CONT/BREAK/RET
3645
if (prevBB->getExit()->op == OP_BRA && joinBBs.getSize() < 6)
3646
insertConvergenceOps(convBB, forkBB);
3647
mkFlow(OP_BRA, convBB, CC_ALWAYS, NULL);
3648
bb->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3649
}
3650
3651
if (prevBB->getExit()->op == OP_BRA) {
3652
prevBB->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3653
prevBB->getExit()->asFlow()->target.bb = convBB;
3654
}
3655
setPosition(convBB, true);
3656
}
3657
break;
3658
case TGSI_OPCODE_BGNLOOP:
3659
{
3660
BasicBlock *lbgnBB = new BasicBlock(func);
3661
BasicBlock *lbrkBB = new BasicBlock(func);
3662
3663
loopBBs.push(lbgnBB);
3664
breakBBs.push(lbrkBB);
3665
if (loopBBs.getSize() > func->loopNestingBound)
3666
func->loopNestingBound++;
3667
3668
mkFlow(OP_PREBREAK, lbrkBB, CC_ALWAYS, NULL);
3669
3670
bb->cfg.attach(&lbgnBB->cfg, Graph::Edge::TREE);
3671
setPosition(lbgnBB, true);
3672
mkFlow(OP_PRECONT, lbgnBB, CC_ALWAYS, NULL);
3673
}
3674
break;
3675
case TGSI_OPCODE_ENDLOOP:
3676
{
3677
BasicBlock *loopBB = reinterpret_cast<BasicBlock *>(loopBBs.pop().u.p);
3678
3679
if (!bb->isTerminated()) {
3680
mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
3681
bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
3682
}
3683
setPosition(reinterpret_cast<BasicBlock *>(breakBBs.pop().u.p), true);
3684
3685
// If the loop never breaks (e.g. only has RET's inside), then there
3686
// will be no way to get to the break bb. However BGNLOOP will have
3687
// already made a PREBREAK to it, so it must be in the CFG.
3688
if (getBB()->cfg.incidentCount() == 0)
3689
loopBB->cfg.attach(&getBB()->cfg, Graph::Edge::TREE);
3690
}
3691
break;
3692
case TGSI_OPCODE_BRK:
3693
{
3694
if (bb->isTerminated())
3695
break;
3696
BasicBlock *brkBB = reinterpret_cast<BasicBlock *>(breakBBs.peek().u.p);
3697
mkFlow(OP_BREAK, brkBB, CC_ALWAYS, NULL);
3698
bb->cfg.attach(&brkBB->cfg, Graph::Edge::CROSS);
3699
}
3700
break;
3701
case TGSI_OPCODE_CONT:
3702
{
3703
if (bb->isTerminated())
3704
break;
3705
BasicBlock *contBB = reinterpret_cast<BasicBlock *>(loopBBs.peek().u.p);
3706
mkFlow(OP_CONT, contBB, CC_ALWAYS, NULL);
3707
contBB->explicitCont = true;
3708
bb->cfg.attach(&contBB->cfg, Graph::Edge::BACK);
3709
}
3710
break;
3711
case TGSI_OPCODE_BGNSUB:
3712
{
3713
Subroutine *s = getSubroutine(ip);
3714
BasicBlock *entry = new BasicBlock(s->f);
3715
BasicBlock *leave = new BasicBlock(s->f);
3716
3717
// multiple entrypoints possible, keep the graph connected
3718
if (prog->getType() == Program::TYPE_COMPUTE)
3719
prog->main->call.attach(&s->f->call, Graph::Edge::TREE);
3720
3721
sub.cur = s;
3722
s->f->setEntry(entry);
3723
s->f->setExit(leave);
3724
setPosition(entry, true);
3725
return true;
3726
}
3727
case TGSI_OPCODE_ENDSUB:
3728
{
3729
sub.cur = getSubroutine(prog->main);
3730
setPosition(BasicBlock::get(sub.cur->f->cfg.getRoot()), true);
3731
return true;
3732
}
3733
case TGSI_OPCODE_CAL:
3734
{
3735
Subroutine *s = getSubroutine(tgsi.getLabel());
3736
mkFlow(OP_CALL, s->f, CC_ALWAYS, NULL);
3737
func->call.attach(&s->f->call, Graph::Edge::TREE);
3738
return true;
3739
}
3740
case TGSI_OPCODE_RET:
3741
{
3742
if (bb->isTerminated())
3743
return true;
3744
BasicBlock *leave = BasicBlock::get(func->cfgExit);
3745
3746
if (!isEndOfSubroutine(ip + 1)) {
3747
// insert a PRERET at the entry if this is an early return
3748
// (only needed for sharing code in the epilogue)
3749
BasicBlock *root = BasicBlock::get(func->cfg.getRoot());
3750
if (root->getEntry() == NULL || root->getEntry()->op != OP_PRERET) {
3751
BasicBlock *pos = getBB();
3752
setPosition(root, false);
3753
mkFlow(OP_PRERET, leave, CC_ALWAYS, NULL)->fixed = 1;
3754
setPosition(pos, true);
3755
}
3756
}
3757
mkFlow(OP_RET, NULL, CC_ALWAYS, NULL)->fixed = 1;
3758
bb->cfg.attach(&leave->cfg, Graph::Edge::CROSS);
3759
}
3760
break;
3761
case TGSI_OPCODE_END:
3762
{
3763
// attach and generate epilogue code
3764
BasicBlock *epilogue = BasicBlock::get(func->cfgExit);
3765
bb->cfg.attach(&epilogue->cfg, Graph::Edge::TREE);
3766
setPosition(epilogue, true);
3767
if (prog->getType() == Program::TYPE_FRAGMENT)
3768
exportOutputs();
3769
if ((prog->getType() == Program::TYPE_VERTEX ||
3770
prog->getType() == Program::TYPE_TESSELLATION_EVAL
3771
) && info_out->io.genUserClip > 0)
3772
handleUserClipPlanes();
3773
mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
3774
}
3775
break;
3776
case TGSI_OPCODE_SWITCH:
3777
case TGSI_OPCODE_CASE:
3778
ERROR("switch/case opcode encountered, should have been lowered\n");
3779
abort();
3780
break;
3781
case TGSI_OPCODE_LOAD:
3782
handleLOAD(dst0);
3783
break;
3784
case TGSI_OPCODE_STORE:
3785
handleSTORE();
3786
break;
3787
case TGSI_OPCODE_BARRIER:
3788
geni = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
3789
geni->fixed = 1;
3790
geni->subOp = NV50_IR_SUBOP_BAR_SYNC;
3791
break;
3792
case TGSI_OPCODE_MEMBAR:
3793
{
3794
uint32_t level = tgsi.getSrc(0).getValueU32(0, code->immd.data);
3795
geni = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
3796
geni->fixed = 1;
3797
if (!(level & ~(TGSI_MEMBAR_THREAD_GROUP | TGSI_MEMBAR_SHARED)))
3798
geni->subOp = NV50_IR_SUBOP_MEMBAR(M, CTA);
3799
else
3800
geni->subOp = NV50_IR_SUBOP_MEMBAR(M, GL);
3801
}
3802
break;
3803
case TGSI_OPCODE_ATOMUADD:
3804
case TGSI_OPCODE_ATOMXCHG:
3805
case TGSI_OPCODE_ATOMCAS:
3806
case TGSI_OPCODE_ATOMAND:
3807
case TGSI_OPCODE_ATOMOR:
3808
case TGSI_OPCODE_ATOMXOR:
3809
case TGSI_OPCODE_ATOMUMIN:
3810
case TGSI_OPCODE_ATOMIMIN:
3811
case TGSI_OPCODE_ATOMUMAX:
3812
case TGSI_OPCODE_ATOMIMAX:
3813
case TGSI_OPCODE_ATOMFADD:
3814
case TGSI_OPCODE_ATOMDEC_WRAP:
3815
case TGSI_OPCODE_ATOMINC_WRAP:
3816
handleATOM(dst0, dstTy, tgsi::opcodeToSubOp(tgsi.getOpcode()));
3817
break;
3818
case TGSI_OPCODE_RESQ:
3819
if (tgsi.getSrc(0).getFile() == TGSI_FILE_BUFFER) {
3820
Value *ind = NULL;
3821
if (tgsi.getSrc(0).isIndirect(0))
3822
ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
3823
geni = mkOp1(OP_BUFQ, TYPE_U32, dst0[0],
3824
makeSym(tgsi.getSrc(0).getFile(),
3825
tgsi.getSrc(0).getIndex(0), -1, 0, 0));
3826
if (ind)
3827
geni->setIndirect(0, 1, ind);
3828
} else {
3829
TexInstruction *texi = new_TexInstruction(func, OP_SUQ);
3830
for (int c = 0, d = 0; c < 4; ++c) {
3831
if (dst0[c]) {
3832
texi->setDef(d++, dst0[c]);
3833
texi->tex.mask |= 1 << c;
3834
}
3835
}
3836
if (tgsi.getSrc(0).getFile() == TGSI_FILE_IMAGE) {
3837
texi->tex.r = tgsi.getSrc(0).getIndex(0);
3838
if (tgsi.getSrc(0).isIndirect(0))
3839
texi->setIndirectR(fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, NULL));
3840
} else {
3841
texi->tex.bindless = true;
3842
texi->setIndirectR(fetchSrc(0, 0));
3843
}
3844
texi->tex.target = tgsi.getImageTarget();
3845
3846
bb->insertTail(texi);
3847
}
3848
break;
3849
case TGSI_OPCODE_IBFE:
3850
case TGSI_OPCODE_UBFE:
3851
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3852
src0 = fetchSrc(0, c);
3853
val0 = getScratch();
3854
if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE &&
3855
tgsi.getSrc(2).getFile() == TGSI_FILE_IMMEDIATE) {
3856
loadImm(val0, (tgsi.getSrc(2).getValueU32(c, code->immd.data) << 8) |
3857
tgsi.getSrc(1).getValueU32(c, code->immd.data));
3858
} else {
3859
src1 = fetchSrc(1, c);
3860
src2 = fetchSrc(2, c);
3861
mkOp3(OP_INSBF, TYPE_U32, val0, src2, mkImm(0x808), src1);
3862
}
3863
mkOp2(OP_EXTBF, dstTy, dst0[c], src0, val0);
3864
}
3865
break;
3866
case TGSI_OPCODE_BFI:
3867
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3868
src0 = fetchSrc(0, c);
3869
src1 = fetchSrc(1, c);
3870
src2 = fetchSrc(2, c);
3871
src3 = fetchSrc(3, c);
3872
val0 = getScratch();
3873
mkOp3(OP_INSBF, TYPE_U32, val0, src3, mkImm(0x808), src2);
3874
mkOp3(OP_INSBF, TYPE_U32, dst0[c], src1, val0, src0);
3875
}
3876
break;
3877
case TGSI_OPCODE_LSB:
3878
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3879
src0 = fetchSrc(0, c);
3880
val0 = getScratch();
3881
mkOp1(OP_BREV, TYPE_U32, val0, src0);
3882
geni = mkOp1(OP_BFIND, TYPE_U32, dst0[c], val0);
3883
geni->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3884
}
3885
break;
3886
case TGSI_OPCODE_IMSB:
3887
case TGSI_OPCODE_UMSB:
3888
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3889
src0 = fetchSrc(0, c);
3890
mkOp1(OP_BFIND, srcTy, dst0[c], src0);
3891
}
3892
break;
3893
case TGSI_OPCODE_BREV:
3894
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3895
src0 = fetchSrc(0, c);
3896
mkOp1(OP_BREV, TYPE_U32, dst0[c], src0);
3897
}
3898
break;
3899
case TGSI_OPCODE_POPC:
3900
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3901
src0 = fetchSrc(0, c);
3902
mkOp2(OP_POPCNT, TYPE_U32, dst0[c], src0, src0);
3903
}
3904
break;
3905
case TGSI_OPCODE_INTERP_CENTROID:
3906
case TGSI_OPCODE_INTERP_SAMPLE:
3907
case TGSI_OPCODE_INTERP_OFFSET:
3908
handleINTERP(dst0);
3909
break;
3910
case TGSI_OPCODE_I642F:
3911
case TGSI_OPCODE_U642F:
3912
case TGSI_OPCODE_D2I:
3913
case TGSI_OPCODE_D2U:
3914
case TGSI_OPCODE_D2F: {
3915
int pos = 0;
3916
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3917
Value *dreg = getSSA(8);
3918
src0 = fetchSrc(0, pos);
3919
src1 = fetchSrc(0, pos + 1);
3920
mkOp2(OP_MERGE, TYPE_U64, dreg, src0, src1);
3921
Instruction *cvt = mkCvt(OP_CVT, dstTy, dst0[c], srcTy, dreg);
3922
if (!isFloatType(dstTy))
3923
cvt->rnd = ROUND_Z;
3924
pos += 2;
3925
}
3926
break;
3927
}
3928
case TGSI_OPCODE_I2I64:
3929
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3930
dst0[c] = fetchSrc(0, c / 2);
3931
mkOp2(OP_SHR, TYPE_S32, dst0[c + 1], dst0[c], loadImm(NULL, 31));
3932
c++;
3933
}
3934
break;
3935
case TGSI_OPCODE_U2I64:
3936
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3937
dst0[c] = fetchSrc(0, c / 2);
3938
dst0[c + 1] = zero;
3939
c++;
3940
}
3941
break;
3942
case TGSI_OPCODE_F2I64:
3943
case TGSI_OPCODE_F2U64:
3944
case TGSI_OPCODE_I2D:
3945
case TGSI_OPCODE_U2D:
3946
case TGSI_OPCODE_F2D:
3947
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3948
Value *dreg = getSSA(8);
3949
Instruction *cvt = mkCvt(OP_CVT, dstTy, dreg, srcTy, fetchSrc(0, c / 2));
3950
if (!isFloatType(dstTy))
3951
cvt->rnd = ROUND_Z;
3952
mkSplit(&dst0[c], 4, dreg);
3953
c++;
3954
}
3955
break;
3956
case TGSI_OPCODE_D2I64:
3957
case TGSI_OPCODE_D2U64:
3958
case TGSI_OPCODE_I642D:
3959
case TGSI_OPCODE_U642D:
3960
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3961
src0 = getSSA(8);
3962
Value *dst = getSSA(8), *tmp[2];
3963
tmp[0] = fetchSrc(0, c);
3964
tmp[1] = fetchSrc(0, c + 1);
3965
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3966
Instruction *cvt = mkCvt(OP_CVT, dstTy, dst, srcTy, src0);
3967
if (!isFloatType(dstTy))
3968
cvt->rnd = ROUND_Z;
3969
mkSplit(&dst0[c], 4, dst);
3970
c++;
3971
}
3972
break;
3973
case TGSI_OPCODE_I64NEG:
3974
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3975
src0 = getSSA(8);
3976
Value *dst = getSSA(8), *tmp[2];
3977
tmp[0] = fetchSrc(0, c);
3978
tmp[1] = fetchSrc(0, c + 1);
3979
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3980
mkOp2(OP_SUB, dstTy, dst, zero, src0);
3981
mkSplit(&dst0[c], 4, dst);
3982
c++;
3983
}
3984
break;
3985
case TGSI_OPCODE_I64ABS:
3986
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3987
src0 = getSSA(8);
3988
Value *neg = getSSA(8), *srcComp[2], *negComp[2];
3989
srcComp[0] = fetchSrc(0, c);
3990
srcComp[1] = fetchSrc(0, c + 1);
3991
mkOp2(OP_MERGE, TYPE_U64, src0, srcComp[0], srcComp[1]);
3992
mkOp2(OP_SUB, dstTy, neg, zero, src0);
3993
mkSplit(negComp, 4, neg);
3994
mkCmp(OP_SLCT, CC_LT, TYPE_S32, dst0[c], TYPE_S32,
3995
negComp[0], srcComp[0], srcComp[1]);
3996
mkCmp(OP_SLCT, CC_LT, TYPE_S32, dst0[c + 1], TYPE_S32,
3997
negComp[1], srcComp[1], srcComp[1]);
3998
c++;
3999
}
4000
break;
4001
case TGSI_OPCODE_DABS:
4002
case TGSI_OPCODE_DNEG:
4003
case TGSI_OPCODE_DRCP:
4004
case TGSI_OPCODE_DSQRT:
4005
case TGSI_OPCODE_DRSQ:
4006
case TGSI_OPCODE_DTRUNC:
4007
case TGSI_OPCODE_DCEIL:
4008
case TGSI_OPCODE_DFLR:
4009
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4010
src0 = getSSA(8);
4011
Value *dst = getSSA(8), *tmp[2];
4012
tmp[0] = fetchSrc(0, c);
4013
tmp[1] = fetchSrc(0, c + 1);
4014
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4015
mkOp1(op, dstTy, dst, src0);
4016
mkSplit(&dst0[c], 4, dst);
4017
c++;
4018
}
4019
break;
4020
case TGSI_OPCODE_DFRAC:
4021
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4022
src0 = getSSA(8);
4023
Value *dst = getSSA(8), *tmp[2];
4024
tmp[0] = fetchSrc(0, c);
4025
tmp[1] = fetchSrc(0, c + 1);
4026
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4027
mkOp1(OP_FLOOR, TYPE_F64, dst, src0);
4028
mkOp2(OP_SUB, TYPE_F64, dst, src0, dst);
4029
mkSplit(&dst0[c], 4, dst);
4030
c++;
4031
}
4032
break;
4033
case TGSI_OPCODE_U64SEQ:
4034
case TGSI_OPCODE_U64SNE:
4035
case TGSI_OPCODE_U64SLT:
4036
case TGSI_OPCODE_U64SGE:
4037
case TGSI_OPCODE_I64SLT:
4038
case TGSI_OPCODE_I64SGE:
4039
case TGSI_OPCODE_DSLT:
4040
case TGSI_OPCODE_DSGE:
4041
case TGSI_OPCODE_DSEQ:
4042
case TGSI_OPCODE_DSNE: {
4043
int pos = 0;
4044
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4045
Value *tmp[2];
4046
4047
src0 = getSSA(8);
4048
src1 = getSSA(8);
4049
tmp[0] = fetchSrc(0, pos);
4050
tmp[1] = fetchSrc(0, pos + 1);
4051
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4052
tmp[0] = fetchSrc(1, pos);
4053
tmp[1] = fetchSrc(1, pos + 1);
4054
mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4055
mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
4056
pos += 2;
4057
}
4058
break;
4059
}
4060
case TGSI_OPCODE_U64MIN:
4061
case TGSI_OPCODE_U64MAX:
4062
case TGSI_OPCODE_I64MIN:
4063
case TGSI_OPCODE_I64MAX: {
4064
dstTy = isSignedIntType(dstTy) ? TYPE_S32 : TYPE_U32;
4065
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4066
Value *flag = getSSA(1, FILE_FLAGS);
4067
src0 = fetchSrc(0, c + 1);
4068
src1 = fetchSrc(1, c + 1);
4069
geni = mkOp2(op, dstTy, dst0[c + 1], src0, src1);
4070
geni->subOp = NV50_IR_SUBOP_MINMAX_HIGH;
4071
geni->setFlagsDef(1, flag);
4072
4073
src0 = fetchSrc(0, c);
4074
src1 = fetchSrc(1, c);
4075
geni = mkOp2(op, TYPE_U32, dst0[c], src0, src1);
4076
geni->subOp = NV50_IR_SUBOP_MINMAX_LOW;
4077
geni->setFlagsSrc(2, flag);
4078
4079
c++;
4080
}
4081
break;
4082
}
4083
case TGSI_OPCODE_U64SHL:
4084
case TGSI_OPCODE_I64SHR:
4085
case TGSI_OPCODE_U64SHR:
4086
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4087
src0 = getSSA(8);
4088
Value *dst = getSSA(8), *tmp[2];
4089
tmp[0] = fetchSrc(0, c);
4090
tmp[1] = fetchSrc(0, c + 1);
4091
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4092
// Theoretically src1 is a 64-bit value but in practice only the low
4093
// bits matter. The IR expects this to be a 32-bit value.
4094
src1 = fetchSrc(1, c);
4095
mkOp2(op, dstTy, dst, src0, src1);
4096
mkSplit(&dst0[c], 4, dst);
4097
c++;
4098
}
4099
break;
4100
case TGSI_OPCODE_U64ADD:
4101
case TGSI_OPCODE_U64MUL:
4102
case TGSI_OPCODE_DADD:
4103
case TGSI_OPCODE_DMUL:
4104
case TGSI_OPCODE_DDIV:
4105
case TGSI_OPCODE_DMAX:
4106
case TGSI_OPCODE_DMIN:
4107
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4108
src0 = getSSA(8);
4109
src1 = getSSA(8);
4110
Value *dst = getSSA(8), *tmp[2];
4111
tmp[0] = fetchSrc(0, c);
4112
tmp[1] = fetchSrc(0, c + 1);
4113
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4114
tmp[0] = fetchSrc(1, c);
4115
tmp[1] = fetchSrc(1, c + 1);
4116
mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4117
mkOp2(op, dstTy, dst, src0, src1);
4118
mkSplit(&dst0[c], 4, dst);
4119
c++;
4120
}
4121
break;
4122
case TGSI_OPCODE_DMAD:
4123
case TGSI_OPCODE_DFMA:
4124
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4125
src0 = getSSA(8);
4126
src1 = getSSA(8);
4127
src2 = getSSA(8);
4128
Value *dst = getSSA(8), *tmp[2];
4129
tmp[0] = fetchSrc(0, c);
4130
tmp[1] = fetchSrc(0, c + 1);
4131
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4132
tmp[0] = fetchSrc(1, c);
4133
tmp[1] = fetchSrc(1, c + 1);
4134
mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4135
tmp[0] = fetchSrc(2, c);
4136
tmp[1] = fetchSrc(2, c + 1);
4137
mkOp2(OP_MERGE, TYPE_U64, src2, tmp[0], tmp[1]);
4138
mkOp3(op, dstTy, dst, src0, src1, src2);
4139
mkSplit(&dst0[c], 4, dst);
4140
c++;
4141
}
4142
break;
4143
case TGSI_OPCODE_DROUND:
4144
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4145
src0 = getSSA(8);
4146
Value *dst = getSSA(8), *tmp[2];
4147
tmp[0] = fetchSrc(0, c);
4148
tmp[1] = fetchSrc(0, c + 1);
4149
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4150
mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F64, src0)
4151
->rnd = ROUND_NI;
4152
mkSplit(&dst0[c], 4, dst);
4153
c++;
4154
}
4155
break;
4156
case TGSI_OPCODE_DSSG:
4157
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4158
src0 = getSSA(8);
4159
Value *dst = getSSA(8), *dstF32 = getSSA(), *tmp[2];
4160
tmp[0] = fetchSrc(0, c);
4161
tmp[1] = fetchSrc(0, c + 1);
4162
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4163
4164
val0 = getScratch();
4165
val1 = getScratch();
4166
// The zero is wrong here since it's only 32-bit, but it works out in
4167
// the end since it gets replaced with $r63.
4168
mkCmp(OP_SET, CC_GT, TYPE_F32, val0, TYPE_F64, src0, zero);
4169
mkCmp(OP_SET, CC_LT, TYPE_F32, val1, TYPE_F64, src0, zero);
4170
mkOp2(OP_SUB, TYPE_F32, dstF32, val0, val1);
4171
mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F32, dstF32);
4172
mkSplit(&dst0[c], 4, dst);
4173
c++;
4174
}
4175
break;
4176
case TGSI_OPCODE_I64SSG:
4177
FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4178
src0 = getSSA(8);
4179
Value *tmp[2];
4180
tmp[0] = fetchSrc(0, c);
4181
tmp[1] = fetchSrc(0, c + 1);
4182
mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4183
4184
val0 = getScratch();
4185
val1 = getScratch();
4186
mkCmp(OP_SET, CC_GT, TYPE_U32, val0, TYPE_S64, src0, zero);
4187
mkCmp(OP_SET, CC_LT, TYPE_U32, val1, TYPE_S64, src0, zero);
4188
mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
4189
mkOp2(OP_SHR, TYPE_S32, dst0[c + 1], dst0[c], loadImm(0, 31));
4190
c++;
4191
}
4192
break;
4193
default:
4194
ERROR("unhandled TGSI opcode: %u\n", tgsi.getOpcode());
4195
assert(0);
4196
break;
4197
}
4198
4199
if (tgsi.dstCount() && tgsi.getOpcode() != TGSI_OPCODE_STORE) {
4200
for (c = 0; c < 4; ++c) {
4201
if (!dst0[c])
4202
continue;
4203
if (dst0[c] != rDst0[c])
4204
mkMov(rDst0[c], dst0[c]);
4205
storeDst(0, c, rDst0[c]);
4206
}
4207
}
4208
vtxBaseValid = 0;
4209
4210
return true;
4211
}
4212
4213
void
4214
Converter::exportOutputs()
4215
{
4216
if (info->io.alphaRefBase) {
4217
for (unsigned int i = 0; i < info_out->numOutputs; ++i) {
4218
if (info_out->out[i].sn != TGSI_SEMANTIC_COLOR ||
4219
info_out->out[i].si != 0)
4220
continue;
4221
const unsigned int c = 3;
4222
if (!oData.exists(sub.cur->values, i, c))
4223
continue;
4224
Value *val = oData.load(sub.cur->values, i, c, NULL);
4225
if (!val)
4226
continue;
4227
4228
Symbol *ref = mkSymbol(FILE_MEMORY_CONST, info->io.auxCBSlot,
4229
TYPE_U32, info->io.alphaRefBase);
4230
Value *pred = new_LValue(func, FILE_PREDICATE);
4231
mkCmp(OP_SET, CC_TR, TYPE_U32, pred, TYPE_F32, val,
4232
mkLoadv(TYPE_U32, ref, NULL))
4233
->subOp = 1;
4234
mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_NOT_P, pred);
4235
}
4236
}
4237
4238
for (unsigned int i = 0; i < info_out->numOutputs; ++i) {
4239
for (unsigned int c = 0; c < 4; ++c) {
4240
if (!oData.exists(sub.cur->values, i, c))
4241
continue;
4242
Symbol *sym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_F32,
4243
info_out->out[i].slot[c] * 4);
4244
Value *val = oData.load(sub.cur->values, i, c, NULL);
4245
if (val) {
4246
if (info_out->out[i].sn == TGSI_SEMANTIC_POSITION)
4247
mkOp1(OP_SAT, TYPE_F32, val, val);
4248
mkStore(OP_EXPORT, TYPE_F32, sym, NULL, val);
4249
}
4250
}
4251
}
4252
}
4253
4254
Converter::Converter(Program *ir, const tgsi::Source *code, nv50_ir_prog_info_out *info_out)
4255
: ConverterCommon(ir, code->info, info_out),
4256
code(code),
4257
tgsi(NULL),
4258
tData(this), lData(this), aData(this), oData(this)
4259
{
4260
const unsigned tSize = code->fileSize(TGSI_FILE_TEMPORARY);
4261
const unsigned aSize = code->fileSize(TGSI_FILE_ADDRESS);
4262
const unsigned oSize = code->fileSize(TGSI_FILE_OUTPUT);
4263
4264
tData.setup(TGSI_FILE_TEMPORARY, 0, 0, tSize, 4, 4, FILE_GPR, 0);
4265
lData.setup(TGSI_FILE_TEMPORARY, 1, 0, tSize, 4, 4, FILE_MEMORY_LOCAL, 0);
4266
aData.setup(TGSI_FILE_ADDRESS, 0, 0, aSize, 4, 4, FILE_GPR, 0);
4267
oData.setup(TGSI_FILE_OUTPUT, 0, 0, oSize, 4, 4, FILE_GPR, 0);
4268
4269
zero = mkImm((uint32_t)0);
4270
4271
vtxBaseValid = 0;
4272
}
4273
4274
Converter::~Converter()
4275
{
4276
}
4277
4278
inline const Converter::Location *
4279
Converter::BindArgumentsPass::getValueLocation(Subroutine *s, Value *v)
4280
{
4281
ValueMap::l_iterator it = s->values.l.find(v);
4282
return it == s->values.l.end() ? NULL : &it->second;
4283
}
4284
4285
template<typename T> inline void
4286
Converter::BindArgumentsPass::updateCallArgs(
4287
Instruction *i, void (Instruction::*setArg)(int, Value *),
4288
T (Function::*proto))
4289
{
4290
Function *g = i->asFlow()->target.fn;
4291
Subroutine *subg = conv.getSubroutine(g);
4292
4293
for (unsigned a = 0; a < (g->*proto).size(); ++a) {
4294
Value *v = (g->*proto)[a].get();
4295
const Converter::Location &l = *getValueLocation(subg, v);
4296
Converter::DataArray *array = conv.getArrayForFile(l.array, l.arrayIdx);
4297
4298
(i->*setArg)(a, array->acquire(sub->values, l.i, l.c));
4299
}
4300
}
4301
4302
template<typename T> inline void
4303
Converter::BindArgumentsPass::updatePrototype(
4304
BitSet *set, void (Function::*updateSet)(), T (Function::*proto))
4305
{
4306
(func->*updateSet)();
4307
4308
for (unsigned i = 0; i < set->getSize(); ++i) {
4309
Value *v = func->getLValue(i);
4310
const Converter::Location *l = getValueLocation(sub, v);
4311
4312
// only include values with a matching TGSI register
4313
if (set->test(i) && l && !conv.code->locals.count(*l))
4314
(func->*proto).push_back(v);
4315
}
4316
}
4317
4318
bool
4319
Converter::BindArgumentsPass::visit(Function *f)
4320
{
4321
sub = conv.getSubroutine(f);
4322
4323
for (ArrayList::Iterator bi = f->allBBlocks.iterator();
4324
!bi.end(); bi.next()) {
4325
for (Instruction *i = BasicBlock::get(bi)->getFirst();
4326
i; i = i->next) {
4327
if (i->op == OP_CALL && !i->asFlow()->builtin) {
4328
updateCallArgs(i, &Instruction::setSrc, &Function::ins);
4329
updateCallArgs(i, &Instruction::setDef, &Function::outs);
4330
}
4331
}
4332
}
4333
4334
if (func == prog->main /* && prog->getType() != Program::TYPE_COMPUTE */)
4335
return true;
4336
updatePrototype(&BasicBlock::get(f->cfg.getRoot())->liveSet,
4337
&Function::buildLiveSets, &Function::ins);
4338
updatePrototype(&BasicBlock::get(f->cfgExit)->defSet,
4339
&Function::buildDefSets, &Function::outs);
4340
4341
return true;
4342
}
4343
4344
bool
4345
Converter::run()
4346
{
4347
BasicBlock *entry = new BasicBlock(prog->main);
4348
BasicBlock *leave = new BasicBlock(prog->main);
4349
4350
prog->main->setEntry(entry);
4351
prog->main->setExit(leave);
4352
4353
setPosition(entry, true);
4354
sub.cur = getSubroutine(prog->main);
4355
4356
if (info_out->io.genUserClip > 0) {
4357
for (int c = 0; c < 4; ++c)
4358
clipVtx[c] = getScratch();
4359
}
4360
4361
switch (prog->getType()) {
4362
case Program::TYPE_TESSELLATION_CONTROL:
4363
outBase = mkOp2v(
4364
OP_SUB, TYPE_U32, getSSA(),
4365
mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
4366
mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
4367
break;
4368
case Program::TYPE_FRAGMENT: {
4369
Symbol *sv = mkSysVal(SV_POSITION, 3);
4370
fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
4371
mkOp1(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
4372
break;
4373
}
4374
default:
4375
break;
4376
}
4377
4378
if (info->io.viewportId >= 0)
4379
viewport = getScratch();
4380
else
4381
viewport = NULL;
4382
4383
for (ip = 0; ip < code->scan.num_instructions; ++ip) {
4384
if (!handleInstruction(&code->insns[ip]))
4385
return false;
4386
}
4387
4388
if (!BindArgumentsPass(*this).run(prog))
4389
return false;
4390
4391
return true;
4392
}
4393
4394
} // unnamed namespace
4395
4396
namespace nv50_ir {
4397
4398
bool
4399
Program::makeFromTGSI(struct nv50_ir_prog_info *info,
4400
struct nv50_ir_prog_info_out *info_out)
4401
{
4402
tgsi::Source src(info, info_out, this);
4403
if (!src.scanSource())
4404
return false;
4405
tlsSize = info_out->bin.tlsSpace;
4406
4407
Converter builder(this, &src, info_out);
4408
return builder.run();
4409
}
4410
4411
} // namespace nv50_ir
4412
4413