Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/nouveau/nvc0/nvc0_program.c
4574 views
1
/*
2
* Copyright 2010 Christoph Bumiller
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*/
22
23
#include "pipe/p_defines.h"
24
25
#include "compiler/nir/nir.h"
26
#include "tgsi/tgsi_ureg.h"
27
#include "util/blob.h"
28
29
#include "nvc0/nvc0_context.h"
30
31
#include "codegen/nv50_ir_driver.h"
32
#include "nvc0/nve4_compute.h"
33
34
/* NOTE: Using a[0x270] in FP may cause an error even if we're using less than
35
* 124 scalar varying values.
36
*/
37
static uint32_t
38
nvc0_shader_input_address(unsigned sn, unsigned si)
39
{
40
switch (sn) {
41
case TGSI_SEMANTIC_TESSOUTER: return 0x000 + si * 0x4;
42
case TGSI_SEMANTIC_TESSINNER: return 0x010 + si * 0x4;
43
case TGSI_SEMANTIC_PATCH: return 0x020 + si * 0x10;
44
case TGSI_SEMANTIC_PRIMID: return 0x060;
45
case TGSI_SEMANTIC_LAYER: return 0x064;
46
case TGSI_SEMANTIC_VIEWPORT_INDEX:return 0x068;
47
case TGSI_SEMANTIC_PSIZE: return 0x06c;
48
case TGSI_SEMANTIC_POSITION: return 0x070;
49
case TGSI_SEMANTIC_GENERIC: return 0x080 + si * 0x10;
50
case TGSI_SEMANTIC_FOG: return 0x2e8;
51
case TGSI_SEMANTIC_COLOR: return 0x280 + si * 0x10;
52
case TGSI_SEMANTIC_BCOLOR: return 0x2a0 + si * 0x10;
53
case TGSI_SEMANTIC_CLIPDIST: return 0x2c0 + si * 0x10;
54
case TGSI_SEMANTIC_CLIPVERTEX: return 0x270;
55
case TGSI_SEMANTIC_PCOORD: return 0x2e0;
56
case TGSI_SEMANTIC_TESSCOORD: return 0x2f0;
57
case TGSI_SEMANTIC_INSTANCEID: return 0x2f8;
58
case TGSI_SEMANTIC_VERTEXID: return 0x2fc;
59
case TGSI_SEMANTIC_TEXCOORD: return 0x300 + si * 0x10;
60
default:
61
assert(!"invalid TGSI input semantic");
62
return ~0;
63
}
64
}
65
66
static uint32_t
67
nvc0_shader_output_address(unsigned sn, unsigned si)
68
{
69
switch (sn) {
70
case TGSI_SEMANTIC_TESSOUTER: return 0x000 + si * 0x4;
71
case TGSI_SEMANTIC_TESSINNER: return 0x010 + si * 0x4;
72
case TGSI_SEMANTIC_PATCH: return 0x020 + si * 0x10;
73
case TGSI_SEMANTIC_PRIMID: return 0x060;
74
case TGSI_SEMANTIC_LAYER: return 0x064;
75
case TGSI_SEMANTIC_VIEWPORT_INDEX:return 0x068;
76
case TGSI_SEMANTIC_PSIZE: return 0x06c;
77
case TGSI_SEMANTIC_POSITION: return 0x070;
78
case TGSI_SEMANTIC_GENERIC: return 0x080 + si * 0x10;
79
case TGSI_SEMANTIC_FOG: return 0x2e8;
80
case TGSI_SEMANTIC_COLOR: return 0x280 + si * 0x10;
81
case TGSI_SEMANTIC_BCOLOR: return 0x2a0 + si * 0x10;
82
case TGSI_SEMANTIC_CLIPDIST: return 0x2c0 + si * 0x10;
83
case TGSI_SEMANTIC_CLIPVERTEX: return 0x270;
84
case TGSI_SEMANTIC_TEXCOORD: return 0x300 + si * 0x10;
85
case TGSI_SEMANTIC_VIEWPORT_MASK: return 0x3a0;
86
case TGSI_SEMANTIC_EDGEFLAG: return ~0;
87
default:
88
assert(!"invalid TGSI output semantic");
89
return ~0;
90
}
91
}
92
93
static int
94
nvc0_vp_assign_input_slots(struct nv50_ir_prog_info_out *info)
95
{
96
unsigned i, c, n;
97
98
for (n = 0, i = 0; i < info->numInputs; ++i) {
99
switch (info->in[i].sn) {
100
case TGSI_SEMANTIC_INSTANCEID: /* for SM4 only, in TGSI they're SVs */
101
case TGSI_SEMANTIC_VERTEXID:
102
info->in[i].mask = 0x1;
103
info->in[i].slot[0] =
104
nvc0_shader_input_address(info->in[i].sn, 0) / 4;
105
continue;
106
default:
107
break;
108
}
109
for (c = 0; c < 4; ++c)
110
info->in[i].slot[c] = (0x80 + n * 0x10 + c * 0x4) / 4;
111
++n;
112
}
113
114
return 0;
115
}
116
117
static int
118
nvc0_sp_assign_input_slots(struct nv50_ir_prog_info_out *info)
119
{
120
unsigned offset;
121
unsigned i, c;
122
123
for (i = 0; i < info->numInputs; ++i) {
124
offset = nvc0_shader_input_address(info->in[i].sn, info->in[i].si);
125
126
for (c = 0; c < 4; ++c)
127
info->in[i].slot[c] = (offset + c * 0x4) / 4;
128
}
129
130
return 0;
131
}
132
133
static int
134
nvc0_fp_assign_output_slots(struct nv50_ir_prog_info_out *info)
135
{
136
unsigned count = info->prop.fp.numColourResults * 4;
137
unsigned i, c;
138
139
/* Compute the relative position of each color output, since skipped MRT
140
* positions will not have registers allocated to them.
141
*/
142
unsigned colors[8] = {0};
143
for (i = 0; i < info->numOutputs; ++i)
144
if (info->out[i].sn == TGSI_SEMANTIC_COLOR)
145
colors[info->out[i].si] = 1;
146
for (i = 0, c = 0; i < 8; i++)
147
if (colors[i])
148
colors[i] = c++;
149
for (i = 0; i < info->numOutputs; ++i)
150
if (info->out[i].sn == TGSI_SEMANTIC_COLOR)
151
for (c = 0; c < 4; ++c)
152
info->out[i].slot[c] = colors[info->out[i].si] * 4 + c;
153
154
if (info->io.sampleMask < PIPE_MAX_SHADER_OUTPUTS)
155
info->out[info->io.sampleMask].slot[0] = count++;
156
else
157
if (info->target >= 0xe0)
158
count++; /* on Kepler, depth is always last colour reg + 2 */
159
160
if (info->io.fragDepth < PIPE_MAX_SHADER_OUTPUTS)
161
info->out[info->io.fragDepth].slot[2] = count;
162
163
return 0;
164
}
165
166
static int
167
nvc0_sp_assign_output_slots(struct nv50_ir_prog_info_out *info)
168
{
169
unsigned offset;
170
unsigned i, c;
171
172
for (i = 0; i < info->numOutputs; ++i) {
173
offset = nvc0_shader_output_address(info->out[i].sn, info->out[i].si);
174
175
for (c = 0; c < 4; ++c)
176
info->out[i].slot[c] = (offset + c * 0x4) / 4;
177
}
178
179
return 0;
180
}
181
182
static int
183
nvc0_program_assign_varying_slots(struct nv50_ir_prog_info_out *info)
184
{
185
int ret;
186
187
if (info->type == PIPE_SHADER_VERTEX)
188
ret = nvc0_vp_assign_input_slots(info);
189
else
190
ret = nvc0_sp_assign_input_slots(info);
191
if (ret)
192
return ret;
193
194
if (info->type == PIPE_SHADER_FRAGMENT)
195
ret = nvc0_fp_assign_output_slots(info);
196
else
197
ret = nvc0_sp_assign_output_slots(info);
198
return ret;
199
}
200
201
static inline void
202
nvc0_vtgp_hdr_update_oread(struct nvc0_program *vp, uint8_t slot)
203
{
204
uint8_t min = (vp->hdr[4] >> 12) & 0xff;
205
uint8_t max = (vp->hdr[4] >> 24);
206
207
min = MIN2(min, slot);
208
max = MAX2(max, slot);
209
210
vp->hdr[4] = (max << 24) | (min << 12);
211
}
212
213
/* Common part of header generation for VP, TCP, TEP and GP. */
214
static int
215
nvc0_vtgp_gen_header(struct nvc0_program *vp, struct nv50_ir_prog_info_out *info)
216
{
217
unsigned i, c, a;
218
219
for (i = 0; i < info->numInputs; ++i) {
220
if (info->in[i].patch)
221
continue;
222
for (c = 0; c < 4; ++c) {
223
a = info->in[i].slot[c];
224
if (info->in[i].mask & (1 << c))
225
vp->hdr[5 + a / 32] |= 1 << (a % 32);
226
}
227
}
228
229
for (i = 0; i < info->numOutputs; ++i) {
230
if (info->out[i].patch)
231
continue;
232
for (c = 0; c < 4; ++c) {
233
if (!(info->out[i].mask & (1 << c)))
234
continue;
235
assert(info->out[i].slot[c] >= 0x40 / 4);
236
a = info->out[i].slot[c] - 0x40 / 4;
237
vp->hdr[13 + a / 32] |= 1 << (a % 32);
238
if (info->out[i].oread)
239
nvc0_vtgp_hdr_update_oread(vp, info->out[i].slot[c]);
240
}
241
}
242
243
for (i = 0; i < info->numSysVals; ++i) {
244
switch (info->sv[i].sn) {
245
case TGSI_SEMANTIC_PRIMID:
246
vp->hdr[5] |= 1 << 24;
247
break;
248
case TGSI_SEMANTIC_INSTANCEID:
249
vp->hdr[10] |= 1 << 30;
250
break;
251
case TGSI_SEMANTIC_VERTEXID:
252
vp->hdr[10] |= 1 << 31;
253
break;
254
case TGSI_SEMANTIC_TESSCOORD:
255
/* We don't have the mask, nor the slots populated. While this could
256
* be achieved, the vast majority of the time if either of the coords
257
* are read, then both will be read.
258
*/
259
nvc0_vtgp_hdr_update_oread(vp, 0x2f0 / 4);
260
nvc0_vtgp_hdr_update_oread(vp, 0x2f4 / 4);
261
break;
262
default:
263
break;
264
}
265
}
266
267
vp->vp.clip_enable = (1 << info->io.clipDistances) - 1;
268
vp->vp.cull_enable =
269
((1 << info->io.cullDistances) - 1) << info->io.clipDistances;
270
for (i = 0; i < info->io.cullDistances; ++i)
271
vp->vp.clip_mode |= 1 << ((info->io.clipDistances + i) * 4);
272
273
if (info->io.genUserClip < 0)
274
vp->vp.num_ucps = PIPE_MAX_CLIP_PLANES + 1; /* prevent rebuilding */
275
276
vp->vp.layer_viewport_relative = info->io.layer_viewport_relative;
277
278
return 0;
279
}
280
281
static int
282
nvc0_vp_gen_header(struct nvc0_program *vp, struct nv50_ir_prog_info_out *info)
283
{
284
vp->hdr[0] = 0x20061 | (1 << 10);
285
vp->hdr[4] = 0xff000;
286
287
return nvc0_vtgp_gen_header(vp, info);
288
}
289
290
static void
291
nvc0_tp_get_tess_mode(struct nvc0_program *tp, struct nv50_ir_prog_info_out *info)
292
{
293
if (info->prop.tp.outputPrim == PIPE_PRIM_MAX) {
294
tp->tp.tess_mode = ~0;
295
return;
296
}
297
switch (info->prop.tp.domain) {
298
case PIPE_PRIM_LINES:
299
tp->tp.tess_mode = NVC0_3D_TESS_MODE_PRIM_ISOLINES;
300
break;
301
case PIPE_PRIM_TRIANGLES:
302
tp->tp.tess_mode = NVC0_3D_TESS_MODE_PRIM_TRIANGLES;
303
break;
304
case PIPE_PRIM_QUADS:
305
tp->tp.tess_mode = NVC0_3D_TESS_MODE_PRIM_QUADS;
306
break;
307
default:
308
tp->tp.tess_mode = ~0;
309
return;
310
}
311
312
/* It seems like lines want the "CW" bit to indicate they're connected, and
313
* spit out errors in dmesg when the "CONNECTED" bit is set.
314
*/
315
if (info->prop.tp.outputPrim != PIPE_PRIM_POINTS) {
316
if (info->prop.tp.domain == PIPE_PRIM_LINES)
317
tp->tp.tess_mode |= NVC0_3D_TESS_MODE_CW;
318
else
319
tp->tp.tess_mode |= NVC0_3D_TESS_MODE_CONNECTED;
320
}
321
322
/* Winding only matters for triangles/quads, not lines. */
323
if (info->prop.tp.domain != PIPE_PRIM_LINES &&
324
info->prop.tp.outputPrim != PIPE_PRIM_POINTS &&
325
info->prop.tp.winding > 0)
326
tp->tp.tess_mode |= NVC0_3D_TESS_MODE_CW;
327
328
switch (info->prop.tp.partitioning) {
329
case PIPE_TESS_SPACING_EQUAL:
330
tp->tp.tess_mode |= NVC0_3D_TESS_MODE_SPACING_EQUAL;
331
break;
332
case PIPE_TESS_SPACING_FRACTIONAL_ODD:
333
tp->tp.tess_mode |= NVC0_3D_TESS_MODE_SPACING_FRACTIONAL_ODD;
334
break;
335
case PIPE_TESS_SPACING_FRACTIONAL_EVEN:
336
tp->tp.tess_mode |= NVC0_3D_TESS_MODE_SPACING_FRACTIONAL_EVEN;
337
break;
338
default:
339
assert(!"invalid tessellator partitioning");
340
break;
341
}
342
}
343
344
static int
345
nvc0_tcp_gen_header(struct nvc0_program *tcp, struct nv50_ir_prog_info_out *info)
346
{
347
unsigned opcs = 6; /* output patch constants (at least the TessFactors) */
348
349
if (info->numPatchConstants)
350
opcs = 8 + info->numPatchConstants * 4;
351
352
tcp->hdr[0] = 0x20061 | (2 << 10);
353
354
tcp->hdr[1] = opcs << 24;
355
tcp->hdr[2] = info->prop.tp.outputPatchSize << 24;
356
357
tcp->hdr[4] = 0xff000; /* initial min/max parallel output read address */
358
359
nvc0_vtgp_gen_header(tcp, info);
360
361
if (info->target >= NVISA_GM107_CHIPSET) {
362
/* On GM107+, the number of output patch components has moved in the TCP
363
* header, but it seems like blob still also uses the old position.
364
* Also, the high 8-bits are located in between the min/max parallel
365
* field and has to be set after updating the outputs. */
366
tcp->hdr[3] = (opcs & 0x0f) << 28;
367
tcp->hdr[4] |= (opcs & 0xf0) << 16;
368
}
369
370
nvc0_tp_get_tess_mode(tcp, info);
371
372
return 0;
373
}
374
375
static int
376
nvc0_tep_gen_header(struct nvc0_program *tep, struct nv50_ir_prog_info_out *info)
377
{
378
tep->hdr[0] = 0x20061 | (3 << 10);
379
tep->hdr[4] = 0xff000;
380
381
nvc0_vtgp_gen_header(tep, info);
382
383
nvc0_tp_get_tess_mode(tep, info);
384
385
tep->hdr[18] |= 0x3 << 12; /* ? */
386
387
return 0;
388
}
389
390
static int
391
nvc0_gp_gen_header(struct nvc0_program *gp, struct nv50_ir_prog_info_out *info)
392
{
393
gp->hdr[0] = 0x20061 | (4 << 10);
394
395
gp->hdr[2] = MIN2(info->prop.gp.instanceCount, 32) << 24;
396
397
switch (info->prop.gp.outputPrim) {
398
case PIPE_PRIM_POINTS:
399
gp->hdr[3] = 0x01000000;
400
gp->hdr[0] |= 0xf0000000;
401
break;
402
case PIPE_PRIM_LINE_STRIP:
403
gp->hdr[3] = 0x06000000;
404
gp->hdr[0] |= 0x10000000;
405
break;
406
case PIPE_PRIM_TRIANGLE_STRIP:
407
gp->hdr[3] = 0x07000000;
408
gp->hdr[0] |= 0x10000000;
409
break;
410
default:
411
assert(0);
412
break;
413
}
414
415
gp->hdr[4] = CLAMP(info->prop.gp.maxVertices, 1, 1024);
416
417
return nvc0_vtgp_gen_header(gp, info);
418
}
419
420
#define NVC0_INTERP_FLAT (1 << 0)
421
#define NVC0_INTERP_PERSPECTIVE (2 << 0)
422
#define NVC0_INTERP_LINEAR (3 << 0)
423
#define NVC0_INTERP_CENTROID (1 << 2)
424
425
static uint8_t
426
nvc0_hdr_interp_mode(const struct nv50_ir_varying *var)
427
{
428
if (var->linear)
429
return NVC0_INTERP_LINEAR;
430
if (var->flat)
431
return NVC0_INTERP_FLAT;
432
return NVC0_INTERP_PERSPECTIVE;
433
}
434
435
static int
436
nvc0_fp_gen_header(struct nvc0_program *fp, struct nv50_ir_prog_info_out *info)
437
{
438
unsigned i, c, a, m;
439
440
/* just 00062 on Kepler */
441
fp->hdr[0] = 0x20062 | (5 << 10);
442
fp->hdr[5] = 0x80000000; /* getting a trap if FRAG_COORD_UMASK.w = 0 */
443
444
if (info->prop.fp.usesDiscard)
445
fp->hdr[0] |= 0x8000;
446
if (!info->prop.fp.separateFragData)
447
fp->hdr[0] |= 0x4000;
448
if (info->io.sampleMask < PIPE_MAX_SHADER_OUTPUTS)
449
fp->hdr[19] |= 0x1;
450
if (info->prop.fp.writesDepth) {
451
fp->hdr[19] |= 0x2;
452
fp->flags[0] = 0x11; /* deactivate ZCULL */
453
}
454
455
for (i = 0; i < info->numInputs; ++i) {
456
m = nvc0_hdr_interp_mode(&info->in[i]);
457
if (info->in[i].sn == TGSI_SEMANTIC_COLOR) {
458
fp->fp.colors |= 1 << info->in[i].si;
459
if (info->in[i].sc)
460
fp->fp.color_interp[info->in[i].si] = m | (info->in[i].mask << 4);
461
}
462
for (c = 0; c < 4; ++c) {
463
if (!(info->in[i].mask & (1 << c)))
464
continue;
465
a = info->in[i].slot[c];
466
if (info->in[i].slot[0] >= (0x060 / 4) &&
467
info->in[i].slot[0] <= (0x07c / 4)) {
468
fp->hdr[5] |= 1 << (24 + (a - 0x060 / 4));
469
} else
470
if (info->in[i].slot[0] >= (0x2c0 / 4) &&
471
info->in[i].slot[0] <= (0x2fc / 4)) {
472
fp->hdr[14] |= (1 << (a - 0x280 / 4)) & 0x07ff0000;
473
} else {
474
if (info->in[i].slot[c] < (0x040 / 4) ||
475
info->in[i].slot[c] > (0x380 / 4))
476
continue;
477
a *= 2;
478
if (info->in[i].slot[0] >= (0x300 / 4))
479
a -= 32;
480
fp->hdr[4 + a / 32] |= m << (a % 32);
481
}
482
}
483
}
484
/* GM20x+ needs TGSI_SEMANTIC_POSITION to access sample locations */
485
if (info->prop.fp.readsSampleLocations && info->target >= NVISA_GM200_CHIPSET)
486
fp->hdr[5] |= 0x30000000;
487
488
for (i = 0; i < info->numOutputs; ++i) {
489
if (info->out[i].sn == TGSI_SEMANTIC_COLOR)
490
fp->hdr[18] |= 0xf << (4 * info->out[i].si);
491
}
492
493
/* There are no "regular" attachments, but the shader still needs to be
494
* executed. It seems like it wants to think that it has some color
495
* outputs in order to actually run.
496
*/
497
if (info->prop.fp.numColourResults == 0 && !info->prop.fp.writesDepth)
498
fp->hdr[18] |= 0xf;
499
500
fp->fp.early_z = info->prop.fp.earlyFragTests;
501
fp->fp.sample_mask_in = info->prop.fp.usesSampleMaskIn;
502
fp->fp.reads_framebuffer = info->prop.fp.readsFramebuffer;
503
fp->fp.post_depth_coverage = info->prop.fp.postDepthCoverage;
504
505
/* Mark position xy and layer as read */
506
if (fp->fp.reads_framebuffer)
507
fp->hdr[5] |= 0x32000000;
508
509
return 0;
510
}
511
512
static struct nvc0_transform_feedback_state *
513
nvc0_program_create_tfb_state(const struct nv50_ir_prog_info_out *info,
514
const struct pipe_stream_output_info *pso)
515
{
516
struct nvc0_transform_feedback_state *tfb;
517
unsigned b, i, c;
518
519
tfb = MALLOC_STRUCT(nvc0_transform_feedback_state);
520
if (!tfb)
521
return NULL;
522
for (b = 0; b < 4; ++b) {
523
tfb->stride[b] = pso->stride[b] * 4;
524
tfb->varying_count[b] = 0;
525
}
526
memset(tfb->varying_index, 0xff, sizeof(tfb->varying_index)); /* = skip */
527
528
for (i = 0; i < pso->num_outputs; ++i) {
529
unsigned s = pso->output[i].start_component;
530
unsigned p = pso->output[i].dst_offset;
531
const unsigned r = pso->output[i].register_index;
532
b = pso->output[i].output_buffer;
533
534
if (r >= info->numOutputs)
535
continue;
536
537
for (c = 0; c < pso->output[i].num_components; ++c)
538
tfb->varying_index[b][p++] = info->out[r].slot[s + c];
539
540
tfb->varying_count[b] = MAX2(tfb->varying_count[b], p);
541
tfb->stream[b] = pso->output[i].stream;
542
}
543
for (b = 0; b < 4; ++b) // zero unused indices (looks nicer)
544
for (c = tfb->varying_count[b]; c & 3; ++c)
545
tfb->varying_index[b][c] = 0;
546
547
return tfb;
548
}
549
550
#ifndef NDEBUG
551
static void
552
nvc0_program_dump(struct nvc0_program *prog)
553
{
554
unsigned pos;
555
556
if (prog->type != PIPE_SHADER_COMPUTE) {
557
_debug_printf("dumping HDR for type %i\n", prog->type);
558
for (pos = 0; pos < ARRAY_SIZE(prog->hdr); ++pos)
559
_debug_printf("HDR[%02"PRIxPTR"] = 0x%08x\n",
560
pos * sizeof(prog->hdr[0]), prog->hdr[pos]);
561
}
562
_debug_printf("shader binary code (0x%x bytes):", prog->code_size);
563
for (pos = 0; pos < prog->code_size / 4; ++pos) {
564
if ((pos % 8) == 0)
565
_debug_printf("\n");
566
_debug_printf("%08x ", prog->code[pos]);
567
}
568
_debug_printf("\n");
569
}
570
#endif
571
572
bool
573
nvc0_program_translate(struct nvc0_program *prog, uint16_t chipset,
574
struct disk_cache *disk_shader_cache,
575
struct pipe_debug_callback *debug)
576
{
577
struct blob blob;
578
size_t cache_size;
579
struct nv50_ir_prog_info *info;
580
struct nv50_ir_prog_info_out info_out = {};
581
582
int ret = 0;
583
cache_key key;
584
bool shader_loaded = false;
585
586
info = CALLOC_STRUCT(nv50_ir_prog_info);
587
if (!info)
588
return false;
589
590
info->type = prog->type;
591
info->target = chipset;
592
593
info->bin.sourceRep = prog->pipe.type;
594
switch (prog->pipe.type) {
595
case PIPE_SHADER_IR_TGSI:
596
info->bin.source = (void *)prog->pipe.tokens;
597
break;
598
case PIPE_SHADER_IR_NIR:
599
info->bin.source = (void *)nir_shader_clone(NULL, prog->pipe.ir.nir);
600
break;
601
default:
602
assert(!"unsupported IR!");
603
free(info);
604
return false;
605
}
606
607
#ifndef NDEBUG
608
info->target = debug_get_num_option("NV50_PROG_CHIPSET", chipset);
609
info->optLevel = debug_get_num_option("NV50_PROG_OPTIMIZE", 3);
610
info->dbgFlags = debug_get_num_option("NV50_PROG_DEBUG", 0);
611
info->omitLineNum = debug_get_num_option("NV50_PROG_DEBUG_OMIT_LINENUM", 0);
612
#else
613
info->optLevel = 3;
614
#endif
615
616
info->bin.smemSize = prog->cp.smem_size;
617
info->io.genUserClip = prog->vp.num_ucps;
618
info->io.auxCBSlot = 15;
619
info->io.msInfoCBSlot = 15;
620
info->io.ucpBase = NVC0_CB_AUX_UCP_INFO;
621
info->io.drawInfoBase = NVC0_CB_AUX_DRAW_INFO;
622
info->io.msInfoBase = NVC0_CB_AUX_MS_INFO;
623
info->io.bufInfoBase = NVC0_CB_AUX_BUF_INFO(0);
624
info->io.suInfoBase = NVC0_CB_AUX_SU_INFO(0);
625
if (info->target >= NVISA_GK104_CHIPSET) {
626
info->io.texBindBase = NVC0_CB_AUX_TEX_INFO(0);
627
info->io.fbtexBindBase = NVC0_CB_AUX_FB_TEX_INFO;
628
info->io.bindlessBase = NVC0_CB_AUX_BINDLESS_INFO(0);
629
}
630
631
if (prog->type == PIPE_SHADER_COMPUTE) {
632
if (info->target >= NVISA_GK104_CHIPSET) {
633
info->io.auxCBSlot = 7;
634
info->io.msInfoCBSlot = 7;
635
info->io.uboInfoBase = NVC0_CB_AUX_UBO_INFO(0);
636
}
637
info->prop.cp.gridInfoBase = NVC0_CB_AUX_GRID_INFO(0);
638
} else {
639
info->io.sampleInfoBase = NVC0_CB_AUX_SAMPLE_INFO;
640
}
641
642
info->assignSlots = nvc0_program_assign_varying_slots;
643
644
blob_init(&blob);
645
646
if (disk_shader_cache) {
647
if (nv50_ir_prog_info_serialize(&blob, info)) {
648
void *cached_data = NULL;
649
650
disk_cache_compute_key(disk_shader_cache, blob.data, blob.size, key);
651
cached_data = disk_cache_get(disk_shader_cache, key, &cache_size);
652
653
if (cached_data && cache_size >= blob.size) { // blob.size is the size of serialized "info"
654
/* Blob contains only "info". In disk cache, "info_out" comes right after it */
655
size_t offset = blob.size;
656
if (nv50_ir_prog_info_out_deserialize(cached_data, cache_size, offset, &info_out))
657
shader_loaded = true;
658
else
659
debug_printf("WARNING: Couldn't deserialize shaders");
660
}
661
free(cached_data);
662
} else {
663
debug_printf("WARNING: Couldn't serialize input shaders");
664
}
665
}
666
if (!shader_loaded) {
667
cache_size = 0;
668
ret = nv50_ir_generate_code(info, &info_out);
669
if (ret) {
670
NOUVEAU_ERR("shader translation failed: %i\n", ret);
671
goto out;
672
}
673
if (disk_shader_cache) {
674
if (nv50_ir_prog_info_out_serialize(&blob, &info_out)) {
675
disk_cache_put(disk_shader_cache, key, blob.data, blob.size, NULL);
676
cache_size = blob.size;
677
} else {
678
debug_printf("WARNING: Couldn't serialize shaders");
679
}
680
}
681
}
682
blob_finish(&blob);
683
684
prog->code = info_out.bin.code;
685
prog->code_size = info_out.bin.codeSize;
686
prog->relocs = info_out.bin.relocData;
687
prog->fixups = info_out.bin.fixupData;
688
if (info_out.target >= NVISA_GV100_CHIPSET)
689
prog->num_gprs = MIN2(info_out.bin.maxGPR + 5, 256); //XXX: why?
690
else
691
prog->num_gprs = MAX2(4, (info_out.bin.maxGPR + 1));
692
prog->cp.smem_size = info_out.bin.smemSize;
693
prog->num_barriers = info_out.numBarriers;
694
695
prog->vp.need_vertex_id = info_out.io.vertexId < PIPE_MAX_SHADER_INPUTS;
696
prog->vp.need_draw_parameters = info_out.prop.vp.usesDrawParameters;
697
698
if (info_out.io.edgeFlagOut < PIPE_MAX_ATTRIBS)
699
info_out.out[info_out.io.edgeFlagOut].mask = 0; /* for headergen */
700
prog->vp.edgeflag = info_out.io.edgeFlagIn;
701
702
switch (prog->type) {
703
case PIPE_SHADER_VERTEX:
704
ret = nvc0_vp_gen_header(prog, &info_out);
705
break;
706
case PIPE_SHADER_TESS_CTRL:
707
ret = nvc0_tcp_gen_header(prog, &info_out);
708
break;
709
case PIPE_SHADER_TESS_EVAL:
710
ret = nvc0_tep_gen_header(prog, &info_out);
711
break;
712
case PIPE_SHADER_GEOMETRY:
713
ret = nvc0_gp_gen_header(prog, &info_out);
714
break;
715
case PIPE_SHADER_FRAGMENT:
716
ret = nvc0_fp_gen_header(prog, &info_out);
717
break;
718
case PIPE_SHADER_COMPUTE:
719
break;
720
default:
721
ret = -1;
722
NOUVEAU_ERR("unknown program type: %u\n", prog->type);
723
break;
724
}
725
if (ret)
726
goto out;
727
728
if (info_out.bin.tlsSpace) {
729
assert(info_out.bin.tlsSpace < (1 << 24));
730
prog->hdr[0] |= 1 << 26;
731
prog->hdr[1] |= align(info_out.bin.tlsSpace, 0x10); /* l[] size */
732
prog->need_tls = true;
733
}
734
/* TODO: factor 2 only needed where joinat/precont is used,
735
* and we only have to count non-uniform branches
736
*/
737
/*
738
if ((info->maxCFDepth * 2) > 16) {
739
prog->hdr[2] |= (((info->maxCFDepth * 2) + 47) / 48) * 0x200;
740
prog->need_tls = true;
741
}
742
*/
743
if (info_out.io.globalAccess)
744
prog->hdr[0] |= 1 << 26;
745
if (info_out.io.globalAccess & 0x2)
746
prog->hdr[0] |= 1 << 16;
747
if (info_out.io.fp64)
748
prog->hdr[0] |= 1 << 27;
749
750
if (prog->pipe.stream_output.num_outputs)
751
prog->tfb = nvc0_program_create_tfb_state(&info_out,
752
&prog->pipe.stream_output);
753
754
pipe_debug_message(debug, SHADER_INFO,
755
"type: %d, local: %d, shared: %d, gpr: %d, inst: %d, bytes: %d, cached: %zd",
756
prog->type, info_out.bin.tlsSpace, info_out.bin.smemSize,
757
prog->num_gprs, info_out.bin.instructions,
758
info_out.bin.codeSize, cache_size);
759
760
#ifndef NDEBUG
761
if (debug_get_option("NV50_PROG_CHIPSET", NULL) && info->dbgFlags)
762
nvc0_program_dump(prog);
763
#endif
764
765
out:
766
if (info->bin.sourceRep == PIPE_SHADER_IR_NIR)
767
ralloc_free((void *)info->bin.source);
768
FREE(info);
769
return !ret;
770
}
771
772
static inline int
773
nvc0_program_alloc_code(struct nvc0_context *nvc0, struct nvc0_program *prog)
774
{
775
struct nvc0_screen *screen = nvc0->screen;
776
const bool is_cp = prog->type == PIPE_SHADER_COMPUTE;
777
int ret;
778
uint32_t size = prog->code_size;
779
780
if (!is_cp) {
781
if (screen->eng3d->oclass < TU102_3D_CLASS)
782
size += GF100_SHADER_HEADER_SIZE;
783
else
784
size += TU102_SHADER_HEADER_SIZE;
785
}
786
787
/* On Fermi, SP_START_ID must be aligned to 0x40.
788
* On Kepler, the first instruction must be aligned to 0x80 because
789
* latency information is expected only at certain positions.
790
*/
791
if (screen->base.class_3d >= NVE4_3D_CLASS)
792
size = size + (is_cp ? 0x40 : 0x70);
793
size = align(size, 0x40);
794
795
ret = nouveau_heap_alloc(screen->text_heap, size, prog, &prog->mem);
796
if (ret)
797
return ret;
798
prog->code_base = prog->mem->start;
799
800
if (!is_cp) {
801
if (screen->base.class_3d >= NVE4_3D_CLASS &&
802
screen->base.class_3d < TU102_3D_CLASS) {
803
switch (prog->mem->start & 0xff) {
804
case 0x40: prog->code_base += 0x70; break;
805
case 0x80: prog->code_base += 0x30; break;
806
case 0xc0: prog->code_base += 0x70; break;
807
default:
808
prog->code_base += 0x30;
809
assert((prog->mem->start & 0xff) == 0x00);
810
break;
811
}
812
}
813
} else {
814
if (screen->base.class_3d >= NVE4_3D_CLASS) {
815
if (prog->mem->start & 0x40)
816
prog->code_base += 0x40;
817
assert((prog->code_base & 0x7f) == 0x00);
818
}
819
}
820
821
return 0;
822
}
823
824
static inline void
825
nvc0_program_upload_code(struct nvc0_context *nvc0, struct nvc0_program *prog)
826
{
827
struct nvc0_screen *screen = nvc0->screen;
828
const bool is_cp = prog->type == PIPE_SHADER_COMPUTE;
829
uint32_t code_pos = prog->code_base;
830
uint32_t size_sph = 0;
831
832
if (!is_cp) {
833
if (screen->eng3d->oclass < TU102_3D_CLASS)
834
size_sph = GF100_SHADER_HEADER_SIZE;
835
else
836
size_sph = TU102_SHADER_HEADER_SIZE;
837
}
838
code_pos += size_sph;
839
840
if (prog->relocs)
841
nv50_ir_relocate_code(prog->relocs, prog->code, code_pos,
842
screen->lib_code->start, 0);
843
if (prog->fixups) {
844
nv50_ir_apply_fixups(prog->fixups, prog->code,
845
prog->fp.force_persample_interp,
846
prog->fp.flatshade,
847
0 /* alphatest */,
848
prog->fp.msaa);
849
for (int i = 0; i < 2; i++) {
850
unsigned mask = prog->fp.color_interp[i] >> 4;
851
unsigned interp = prog->fp.color_interp[i] & 3;
852
if (!mask)
853
continue;
854
prog->hdr[14] &= ~(0xff << (8 * i));
855
if (prog->fp.flatshade)
856
interp = NVC0_INTERP_FLAT;
857
for (int c = 0; c < 4; c++)
858
if (mask & (1 << c))
859
prog->hdr[14] |= interp << (2 * (4 * i + c));
860
}
861
}
862
863
if (!is_cp)
864
nvc0->base.push_data(&nvc0->base, screen->text, prog->code_base,
865
NV_VRAM_DOMAIN(&screen->base), size_sph, prog->hdr);
866
867
nvc0->base.push_data(&nvc0->base, screen->text, code_pos,
868
NV_VRAM_DOMAIN(&screen->base), prog->code_size,
869
prog->code);
870
}
871
872
bool
873
nvc0_program_upload(struct nvc0_context *nvc0, struct nvc0_program *prog)
874
{
875
struct nvc0_screen *screen = nvc0->screen;
876
const bool is_cp = prog->type == PIPE_SHADER_COMPUTE;
877
int ret;
878
uint32_t size = prog->code_size;
879
880
if (!is_cp) {
881
if (screen->eng3d->oclass < TU102_3D_CLASS)
882
size += GF100_SHADER_HEADER_SIZE;
883
else
884
size += TU102_SHADER_HEADER_SIZE;
885
}
886
887
ret = nvc0_program_alloc_code(nvc0, prog);
888
if (ret) {
889
struct nouveau_heap *heap = screen->text_heap;
890
struct nvc0_program *progs[] = { /* Sorted accordingly to SP_START_ID */
891
nvc0->compprog, nvc0->vertprog, nvc0->tctlprog,
892
nvc0->tevlprog, nvc0->gmtyprog, nvc0->fragprog
893
};
894
895
/* Note that the code library, which is allocated before anything else,
896
* does not have a priv pointer. We can stop once we hit it.
897
*/
898
while (heap->next && heap->next->priv) {
899
struct nvc0_program *evict = heap->next->priv;
900
nouveau_heap_free(&evict->mem);
901
}
902
debug_printf("WARNING: out of code space, evicting all shaders.\n");
903
904
/* Make sure to synchronize before deleting the code segment. */
905
IMMED_NVC0(nvc0->base.pushbuf, NVC0_3D(SERIALIZE), 0);
906
907
if ((screen->text->size << 1) <= (1 << 23)) {
908
ret = nvc0_screen_resize_text_area(screen, screen->text->size << 1);
909
if (ret) {
910
NOUVEAU_ERR("Error allocating TEXT area: %d\n", ret);
911
return false;
912
}
913
914
/* Re-upload the builtin function into the new code segment. */
915
nvc0_program_library_upload(nvc0);
916
}
917
918
ret = nvc0_program_alloc_code(nvc0, prog);
919
if (ret) {
920
NOUVEAU_ERR("shader too large (0x%x) to fit in code space ?\n", size);
921
return false;
922
}
923
924
/* All currently bound shaders have to be reuploaded. */
925
for (int i = 0; i < ARRAY_SIZE(progs); i++) {
926
if (!progs[i] || progs[i] == prog)
927
continue;
928
929
ret = nvc0_program_alloc_code(nvc0, progs[i]);
930
if (ret) {
931
NOUVEAU_ERR("failed to re-upload a shader after code eviction.\n");
932
return false;
933
}
934
nvc0_program_upload_code(nvc0, progs[i]);
935
936
if (progs[i]->type == PIPE_SHADER_COMPUTE) {
937
/* Caches have to be invalidated but the CP_START_ID will be
938
* updated in the launch_grid functions. */
939
BEGIN_NVC0(nvc0->base.pushbuf, NVC0_CP(FLUSH), 1);
940
PUSH_DATA (nvc0->base.pushbuf, NVC0_COMPUTE_FLUSH_CODE);
941
} else {
942
nvc0_program_sp_start_id(nvc0, i, progs[i]);
943
}
944
}
945
}
946
947
nvc0_program_upload_code(nvc0, prog);
948
949
#ifndef NDEBUG
950
if (debug_get_bool_option("NV50_PROG_DEBUG", false))
951
nvc0_program_dump(prog);
952
#endif
953
954
BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(MEM_BARRIER), 1);
955
PUSH_DATA (nvc0->base.pushbuf, 0x1011);
956
957
return true;
958
}
959
960
/* Upload code for builtin functions like integer division emulation. */
961
void
962
nvc0_program_library_upload(struct nvc0_context *nvc0)
963
{
964
struct nvc0_screen *screen = nvc0->screen;
965
int ret;
966
uint32_t size;
967
const uint32_t *code;
968
969
if (screen->lib_code)
970
return;
971
972
nv50_ir_get_target_library(screen->base.device->chipset, &code, &size);
973
if (!size)
974
return;
975
976
ret = nouveau_heap_alloc(screen->text_heap, align(size, 0x100), NULL,
977
&screen->lib_code);
978
if (ret)
979
return;
980
981
nvc0->base.push_data(&nvc0->base,
982
screen->text, screen->lib_code->start, NV_VRAM_DOMAIN(&screen->base),
983
size, code);
984
/* no need for a memory barrier, will be emitted with first program */
985
}
986
987
void
988
nvc0_program_destroy(struct nvc0_context *nvc0, struct nvc0_program *prog)
989
{
990
const struct pipe_shader_state pipe = prog->pipe;
991
const ubyte type = prog->type;
992
993
if (prog->mem)
994
nouveau_heap_free(&prog->mem);
995
FREE(prog->code); /* may be 0 for hardcoded shaders */
996
FREE(prog->relocs);
997
FREE(prog->fixups);
998
if (prog->tfb) {
999
if (nvc0->state.tfb == prog->tfb)
1000
nvc0->state.tfb = NULL;
1001
FREE(prog->tfb);
1002
}
1003
1004
memset(prog, 0, sizeof(*prog));
1005
1006
prog->pipe = pipe;
1007
prog->type = type;
1008
}
1009
1010
void
1011
nvc0_program_init_tcp_empty(struct nvc0_context *nvc0)
1012
{
1013
struct ureg_program *ureg;
1014
1015
ureg = ureg_create(PIPE_SHADER_TESS_CTRL);
1016
if (!ureg)
1017
return;
1018
1019
ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT, 1);
1020
ureg_END(ureg);
1021
1022
nvc0->tcp_empty = ureg_create_shader_and_destroy(ureg, &nvc0->base.pipe);
1023
}
1024
1025