Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/broadcom/compiler/vir.c
4564 views
1
/*
2
* Copyright © 2016-2017 Broadcom
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "broadcom/common/v3d_device_info.h"
25
#include "v3d_compiler.h"
26
#include "util/u_prim.h"
27
#include "compiler/nir/nir_schedule.h"
28
#include "compiler/nir/nir_builder.h"
29
30
int
31
vir_get_nsrc(struct qinst *inst)
32
{
33
switch (inst->qpu.type) {
34
case V3D_QPU_INSTR_TYPE_BRANCH:
35
return 0;
36
case V3D_QPU_INSTR_TYPE_ALU:
37
if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)
38
return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);
39
else
40
return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);
41
}
42
43
return 0;
44
}
45
46
/**
47
* Returns whether the instruction has any side effects that must be
48
* preserved.
49
*/
50
bool
51
vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
52
{
53
switch (inst->qpu.type) {
54
case V3D_QPU_INSTR_TYPE_BRANCH:
55
return true;
56
case V3D_QPU_INSTR_TYPE_ALU:
57
switch (inst->qpu.alu.add.op) {
58
case V3D_QPU_A_SETREVF:
59
case V3D_QPU_A_SETMSF:
60
case V3D_QPU_A_VPMSETUP:
61
case V3D_QPU_A_STVPMV:
62
case V3D_QPU_A_STVPMD:
63
case V3D_QPU_A_STVPMP:
64
case V3D_QPU_A_VPMWT:
65
case V3D_QPU_A_TMUWT:
66
return true;
67
default:
68
break;
69
}
70
71
switch (inst->qpu.alu.mul.op) {
72
case V3D_QPU_M_MULTOP:
73
return true;
74
default:
75
break;
76
}
77
}
78
79
if (inst->qpu.sig.ldtmu ||
80
inst->qpu.sig.ldvary ||
81
inst->qpu.sig.ldtlbu ||
82
inst->qpu.sig.ldtlb ||
83
inst->qpu.sig.wrtmuc ||
84
inst->qpu.sig.thrsw) {
85
return true;
86
}
87
88
/* ldunifa works like ldunif: it reads an element and advances the
89
* pointer, so each read has a side effect (we don't care for ldunif
90
* because we reconstruct the uniform stream buffer after compiling
91
* with the surviving uniforms), so allowing DCE to remove
92
* one would break follow-up loads. We could fix this by emiting a
93
* unifa for each ldunifa, but each unifa requires 3 delay slots
94
* before a ldunifa, so that would be quite expensive.
95
*/
96
if (inst->qpu.sig.ldunifa || inst->qpu.sig.ldunifarf)
97
return true;
98
99
return false;
100
}
101
102
bool
103
vir_is_raw_mov(struct qinst *inst)
104
{
105
if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
106
(inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&
107
inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {
108
return false;
109
}
110
111
if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||
112
inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {
113
return false;
114
}
115
116
if (inst->qpu.alu.add.a_unpack != V3D_QPU_UNPACK_NONE ||
117
inst->qpu.alu.add.b_unpack != V3D_QPU_UNPACK_NONE ||
118
inst->qpu.alu.mul.a_unpack != V3D_QPU_UNPACK_NONE ||
119
inst->qpu.alu.mul.b_unpack != V3D_QPU_UNPACK_NONE) {
120
return false;
121
}
122
123
if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||
124
inst->qpu.flags.mc != V3D_QPU_COND_NONE)
125
return false;
126
127
return true;
128
}
129
130
bool
131
vir_is_add(struct qinst *inst)
132
{
133
return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
134
inst->qpu.alu.add.op != V3D_QPU_A_NOP);
135
}
136
137
bool
138
vir_is_mul(struct qinst *inst)
139
{
140
return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
141
inst->qpu.alu.mul.op != V3D_QPU_M_NOP);
142
}
143
144
bool
145
vir_is_tex(const struct v3d_device_info *devinfo, struct qinst *inst)
146
{
147
if (inst->dst.file == QFILE_MAGIC)
148
return v3d_qpu_magic_waddr_is_tmu(devinfo, inst->dst.index);
149
150
if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
151
inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
152
return true;
153
}
154
155
return false;
156
}
157
158
bool
159
vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)
160
{
161
for (int i = 0; i < vir_get_nsrc(inst); i++) {
162
switch (inst->src[i].file) {
163
case QFILE_VPM:
164
return true;
165
default:
166
break;
167
}
168
}
169
170
if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||
171
inst->qpu.sig.ldtlb ||
172
inst->qpu.sig.ldtlbu ||
173
inst->qpu.sig.ldvpm)) {
174
return true;
175
}
176
177
return false;
178
}
179
180
bool
181
vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)
182
{
183
switch (inst->dst.file) {
184
case QFILE_MAGIC:
185
switch (inst->dst.index) {
186
case V3D_QPU_WADDR_RECIP:
187
case V3D_QPU_WADDR_RSQRT:
188
case V3D_QPU_WADDR_EXP:
189
case V3D_QPU_WADDR_LOG:
190
case V3D_QPU_WADDR_SIN:
191
return true;
192
}
193
break;
194
default:
195
break;
196
}
197
198
if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)
199
return true;
200
201
return false;
202
}
203
204
void
205
vir_set_unpack(struct qinst *inst, int src,
206
enum v3d_qpu_input_unpack unpack)
207
{
208
assert(src == 0 || src == 1);
209
210
if (vir_is_add(inst)) {
211
if (src == 0)
212
inst->qpu.alu.add.a_unpack = unpack;
213
else
214
inst->qpu.alu.add.b_unpack = unpack;
215
} else {
216
assert(vir_is_mul(inst));
217
if (src == 0)
218
inst->qpu.alu.mul.a_unpack = unpack;
219
else
220
inst->qpu.alu.mul.b_unpack = unpack;
221
}
222
}
223
224
void
225
vir_set_pack(struct qinst *inst, enum v3d_qpu_output_pack pack)
226
{
227
if (vir_is_add(inst)) {
228
inst->qpu.alu.add.output_pack = pack;
229
} else {
230
assert(vir_is_mul(inst));
231
inst->qpu.alu.mul.output_pack = pack;
232
}
233
}
234
235
void
236
vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)
237
{
238
if (vir_is_add(inst)) {
239
inst->qpu.flags.ac = cond;
240
} else {
241
assert(vir_is_mul(inst));
242
inst->qpu.flags.mc = cond;
243
}
244
}
245
246
void
247
vir_set_pf(struct v3d_compile *c, struct qinst *inst, enum v3d_qpu_pf pf)
248
{
249
c->flags_temp = -1;
250
if (vir_is_add(inst)) {
251
inst->qpu.flags.apf = pf;
252
} else {
253
assert(vir_is_mul(inst));
254
inst->qpu.flags.mpf = pf;
255
}
256
}
257
258
void
259
vir_set_uf(struct v3d_compile *c, struct qinst *inst, enum v3d_qpu_uf uf)
260
{
261
c->flags_temp = -1;
262
if (vir_is_add(inst)) {
263
inst->qpu.flags.auf = uf;
264
} else {
265
assert(vir_is_mul(inst));
266
inst->qpu.flags.muf = uf;
267
}
268
}
269
270
#if 0
271
uint8_t
272
vir_channels_written(struct qinst *inst)
273
{
274
if (vir_is_mul(inst)) {
275
switch (inst->dst.pack) {
276
case QPU_PACK_MUL_NOP:
277
case QPU_PACK_MUL_8888:
278
return 0xf;
279
case QPU_PACK_MUL_8A:
280
return 0x1;
281
case QPU_PACK_MUL_8B:
282
return 0x2;
283
case QPU_PACK_MUL_8C:
284
return 0x4;
285
case QPU_PACK_MUL_8D:
286
return 0x8;
287
}
288
} else {
289
switch (inst->dst.pack) {
290
case QPU_PACK_A_NOP:
291
case QPU_PACK_A_8888:
292
case QPU_PACK_A_8888_SAT:
293
case QPU_PACK_A_32_SAT:
294
return 0xf;
295
case QPU_PACK_A_8A:
296
case QPU_PACK_A_8A_SAT:
297
return 0x1;
298
case QPU_PACK_A_8B:
299
case QPU_PACK_A_8B_SAT:
300
return 0x2;
301
case QPU_PACK_A_8C:
302
case QPU_PACK_A_8C_SAT:
303
return 0x4;
304
case QPU_PACK_A_8D:
305
case QPU_PACK_A_8D_SAT:
306
return 0x8;
307
case QPU_PACK_A_16A:
308
case QPU_PACK_A_16A_SAT:
309
return 0x3;
310
case QPU_PACK_A_16B:
311
case QPU_PACK_A_16B_SAT:
312
return 0xc;
313
}
314
}
315
unreachable("Bad pack field");
316
}
317
#endif
318
319
struct qreg
320
vir_get_temp(struct v3d_compile *c)
321
{
322
struct qreg reg;
323
324
reg.file = QFILE_TEMP;
325
reg.index = c->num_temps++;
326
327
if (c->num_temps > c->defs_array_size) {
328
uint32_t old_size = c->defs_array_size;
329
c->defs_array_size = MAX2(old_size * 2, 16);
330
331
c->defs = reralloc(c, c->defs, struct qinst *,
332
c->defs_array_size);
333
memset(&c->defs[old_size], 0,
334
sizeof(c->defs[0]) * (c->defs_array_size - old_size));
335
336
c->spillable = reralloc(c, c->spillable,
337
BITSET_WORD,
338
BITSET_WORDS(c->defs_array_size));
339
for (int i = old_size; i < c->defs_array_size; i++)
340
BITSET_SET(c->spillable, i);
341
}
342
343
return reg;
344
}
345
346
struct qinst *
347
vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)
348
{
349
struct qinst *inst = calloc(1, sizeof(*inst));
350
351
inst->qpu = v3d_qpu_nop();
352
inst->qpu.alu.add.op = op;
353
354
inst->dst = dst;
355
inst->src[0] = src0;
356
inst->src[1] = src1;
357
inst->uniform = ~0;
358
359
return inst;
360
}
361
362
struct qinst *
363
vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)
364
{
365
struct qinst *inst = calloc(1, sizeof(*inst));
366
367
inst->qpu = v3d_qpu_nop();
368
inst->qpu.alu.mul.op = op;
369
370
inst->dst = dst;
371
inst->src[0] = src0;
372
inst->src[1] = src1;
373
inst->uniform = ~0;
374
375
return inst;
376
}
377
378
struct qinst *
379
vir_branch_inst(struct v3d_compile *c, enum v3d_qpu_branch_cond cond)
380
{
381
struct qinst *inst = calloc(1, sizeof(*inst));
382
383
inst->qpu = v3d_qpu_nop();
384
inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;
385
inst->qpu.branch.cond = cond;
386
inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;
387
inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;
388
inst->qpu.branch.ub = true;
389
inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
390
391
inst->dst = vir_nop_reg();
392
inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, 0);
393
394
return inst;
395
}
396
397
static void
398
vir_emit(struct v3d_compile *c, struct qinst *inst)
399
{
400
switch (c->cursor.mode) {
401
case vir_cursor_add:
402
list_add(&inst->link, c->cursor.link);
403
break;
404
case vir_cursor_addtail:
405
list_addtail(&inst->link, c->cursor.link);
406
break;
407
}
408
409
c->cursor = vir_after_inst(inst);
410
c->live_intervals_valid = false;
411
}
412
413
/* Updates inst to write to a new temporary, emits it, and notes the def. */
414
struct qreg
415
vir_emit_def(struct v3d_compile *c, struct qinst *inst)
416
{
417
assert(inst->dst.file == QFILE_NULL);
418
419
/* If we're emitting an instruction that's a def, it had better be
420
* writing a register.
421
*/
422
if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
423
assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP ||
424
v3d_qpu_add_op_has_dst(inst->qpu.alu.add.op));
425
assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP ||
426
v3d_qpu_mul_op_has_dst(inst->qpu.alu.mul.op));
427
}
428
429
inst->dst = vir_get_temp(c);
430
431
if (inst->dst.file == QFILE_TEMP)
432
c->defs[inst->dst.index] = inst;
433
434
vir_emit(c, inst);
435
436
return inst->dst;
437
}
438
439
struct qinst *
440
vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)
441
{
442
if (inst->dst.file == QFILE_TEMP)
443
c->defs[inst->dst.index] = NULL;
444
445
vir_emit(c, inst);
446
447
return inst;
448
}
449
450
struct qblock *
451
vir_new_block(struct v3d_compile *c)
452
{
453
struct qblock *block = rzalloc(c, struct qblock);
454
455
list_inithead(&block->instructions);
456
457
block->predecessors = _mesa_set_create(block,
458
_mesa_hash_pointer,
459
_mesa_key_pointer_equal);
460
461
block->index = c->next_block_index++;
462
463
return block;
464
}
465
466
void
467
vir_set_emit_block(struct v3d_compile *c, struct qblock *block)
468
{
469
c->cur_block = block;
470
c->cursor = vir_after_block(block);
471
list_addtail(&block->link, &c->blocks);
472
}
473
474
struct qblock *
475
vir_entry_block(struct v3d_compile *c)
476
{
477
return list_first_entry(&c->blocks, struct qblock, link);
478
}
479
480
struct qblock *
481
vir_exit_block(struct v3d_compile *c)
482
{
483
return list_last_entry(&c->blocks, struct qblock, link);
484
}
485
486
void
487
vir_link_blocks(struct qblock *predecessor, struct qblock *successor)
488
{
489
_mesa_set_add(successor->predecessors, predecessor);
490
if (predecessor->successors[0]) {
491
assert(!predecessor->successors[1]);
492
predecessor->successors[1] = successor;
493
} else {
494
predecessor->successors[0] = successor;
495
}
496
}
497
498
const struct v3d_compiler *
499
v3d_compiler_init(const struct v3d_device_info *devinfo)
500
{
501
struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);
502
if (!compiler)
503
return NULL;
504
505
compiler->devinfo = devinfo;
506
507
if (!vir_init_reg_sets(compiler)) {
508
ralloc_free(compiler);
509
return NULL;
510
}
511
512
return compiler;
513
}
514
515
void
516
v3d_compiler_free(const struct v3d_compiler *compiler)
517
{
518
ralloc_free((void *)compiler);
519
}
520
521
static struct v3d_compile *
522
vir_compile_init(const struct v3d_compiler *compiler,
523
struct v3d_key *key,
524
nir_shader *s,
525
void (*debug_output)(const char *msg,
526
void *debug_output_data),
527
void *debug_output_data,
528
int program_id, int variant_id,
529
uint32_t max_threads,
530
uint32_t min_threads_for_reg_alloc,
531
bool tmu_spilling_allowed,
532
bool disable_loop_unrolling,
533
bool disable_constant_ubo_load_sorting,
534
bool disable_tmu_pipelining,
535
bool fallback_scheduler)
536
{
537
struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
538
539
c->compiler = compiler;
540
c->devinfo = compiler->devinfo;
541
c->key = key;
542
c->program_id = program_id;
543
c->variant_id = variant_id;
544
c->threads = max_threads;
545
c->debug_output = debug_output;
546
c->debug_output_data = debug_output_data;
547
c->compilation_result = V3D_COMPILATION_SUCCEEDED;
548
c->min_threads_for_reg_alloc = min_threads_for_reg_alloc;
549
c->tmu_spilling_allowed = tmu_spilling_allowed;
550
c->fallback_scheduler = fallback_scheduler;
551
c->disable_tmu_pipelining = disable_tmu_pipelining;
552
c->disable_constant_ubo_load_sorting = disable_constant_ubo_load_sorting;
553
c->disable_loop_unrolling = disable_loop_unrolling;
554
555
s = nir_shader_clone(c, s);
556
c->s = s;
557
558
list_inithead(&c->blocks);
559
vir_set_emit_block(c, vir_new_block(c));
560
561
c->output_position_index = -1;
562
c->output_sample_mask_index = -1;
563
564
c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,
565
_mesa_key_pointer_equal);
566
567
c->tmu.outstanding_regs = _mesa_pointer_set_create(c);
568
c->flags_temp = -1;
569
570
return c;
571
}
572
573
static int
574
type_size_vec4(const struct glsl_type *type, bool bindless)
575
{
576
return glsl_count_attribute_slots(type, false);
577
}
578
579
static void
580
v3d_lower_nir(struct v3d_compile *c)
581
{
582
struct nir_lower_tex_options tex_options = {
583
.lower_txd = true,
584
.lower_tg4_broadcom_swizzle = true,
585
586
.lower_rect = false, /* XXX: Use this on V3D 3.x */
587
.lower_txp = ~0,
588
/* Apply swizzles to all samplers. */
589
.swizzle_result = ~0,
590
};
591
592
/* Lower the format swizzle and (for 32-bit returns)
593
* ARB_texture_swizzle-style swizzle.
594
*/
595
assert(c->key->num_tex_used <= ARRAY_SIZE(c->key->tex));
596
for (int i = 0; i < c->key->num_tex_used; i++) {
597
for (int j = 0; j < 4; j++)
598
tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];
599
}
600
601
assert(c->key->num_samplers_used <= ARRAY_SIZE(c->key->sampler));
602
for (int i = 0; i < c->key->num_samplers_used; i++) {
603
if (c->key->sampler[i].return_size == 16) {
604
tex_options.lower_tex_packing[i] =
605
nir_lower_tex_packing_16;
606
}
607
}
608
609
/* CS textures may not have return_size reflecting the shadow state. */
610
nir_foreach_uniform_variable(var, c->s) {
611
const struct glsl_type *type = glsl_without_array(var->type);
612
unsigned array_len = MAX2(glsl_get_length(var->type), 1);
613
614
if (!glsl_type_is_sampler(type) ||
615
!glsl_sampler_type_is_shadow(type))
616
continue;
617
618
for (int i = 0; i < array_len; i++) {
619
tex_options.lower_tex_packing[var->data.binding + i] =
620
nir_lower_tex_packing_16;
621
}
622
}
623
624
NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
625
NIR_PASS_V(c->s, nir_lower_system_values);
626
NIR_PASS_V(c->s, nir_lower_compute_system_values, NULL);
627
628
NIR_PASS_V(c->s, nir_lower_vars_to_scratch,
629
nir_var_function_temp,
630
0,
631
glsl_get_natural_size_align_bytes);
632
NIR_PASS_V(c->s, v3d_nir_lower_scratch);
633
}
634
635
static void
636
v3d_set_prog_data_uniforms(struct v3d_compile *c,
637
struct v3d_prog_data *prog_data)
638
{
639
int count = c->num_uniforms;
640
struct v3d_uniform_list *ulist = &prog_data->uniforms;
641
642
ulist->count = count;
643
ulist->data = ralloc_array(prog_data, uint32_t, count);
644
memcpy(ulist->data, c->uniform_data,
645
count * sizeof(*ulist->data));
646
ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);
647
memcpy(ulist->contents, c->uniform_contents,
648
count * sizeof(*ulist->contents));
649
}
650
651
static void
652
v3d_vs_set_prog_data(struct v3d_compile *c,
653
struct v3d_vs_prog_data *prog_data)
654
{
655
/* The vertex data gets format converted by the VPM so that
656
* each attribute channel takes up a VPM column. Precompute
657
* the sizes for the shader record.
658
*/
659
for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
660
prog_data->vattr_sizes[i] = c->vattr_sizes[i];
661
prog_data->vpm_input_size += c->vattr_sizes[i];
662
}
663
664
memset(prog_data->driver_location_map, -1,
665
sizeof(prog_data->driver_location_map));
666
667
nir_foreach_shader_in_variable(var, c->s) {
668
prog_data->driver_location_map[var->data.location] =
669
var->data.driver_location;
670
}
671
672
prog_data->uses_vid = BITSET_TEST(c->s->info.system_values_read,
673
SYSTEM_VALUE_VERTEX_ID) ||
674
BITSET_TEST(c->s->info.system_values_read,
675
SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
676
677
prog_data->uses_biid = BITSET_TEST(c->s->info.system_values_read,
678
SYSTEM_VALUE_BASE_INSTANCE);
679
680
prog_data->uses_iid = BITSET_TEST(c->s->info.system_values_read,
681
SYSTEM_VALUE_INSTANCE_ID) ||
682
BITSET_TEST(c->s->info.system_values_read,
683
SYSTEM_VALUE_INSTANCE_INDEX);
684
685
if (prog_data->uses_vid)
686
prog_data->vpm_input_size++;
687
if (prog_data->uses_biid)
688
prog_data->vpm_input_size++;
689
if (prog_data->uses_iid)
690
prog_data->vpm_input_size++;
691
692
/* Input/output segment size are in sectors (8 rows of 32 bits per
693
* channel).
694
*/
695
prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;
696
prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;
697
698
/* Set us up for shared input/output segments. This is apparently
699
* necessary for our VCM setup to avoid varying corruption.
700
*/
701
prog_data->separate_segments = false;
702
prog_data->vpm_output_size = MAX2(prog_data->vpm_output_size,
703
prog_data->vpm_input_size);
704
prog_data->vpm_input_size = 0;
705
706
/* Compute VCM cache size. We set up our program to take up less than
707
* half of the VPM, so that any set of bin and render programs won't
708
* run out of space. We need space for at least one input segment,
709
* and then allocate the rest to output segments (one for the current
710
* program, the rest to VCM). The valid range of the VCM cache size
711
* field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4
712
* batches.
713
*/
714
assert(c->devinfo->vpm_size);
715
int sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;
716
int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size;
717
int half_vpm = vpm_size_in_sectors / 2;
718
int vpm_output_sectors = half_vpm - prog_data->vpm_input_size;
719
int vpm_output_batches = vpm_output_sectors / prog_data->vpm_output_size;
720
assert(vpm_output_batches >= 2);
721
prog_data->vcm_cache_size = CLAMP(vpm_output_batches - 1, 2, 4);
722
}
723
724
static void
725
v3d_gs_set_prog_data(struct v3d_compile *c,
726
struct v3d_gs_prog_data *prog_data)
727
{
728
prog_data->num_inputs = c->num_inputs;
729
memcpy(prog_data->input_slots, c->input_slots,
730
c->num_inputs * sizeof(*c->input_slots));
731
732
/* gl_PrimitiveIdIn is written by the GBG into the first word of the
733
* VPM output header automatically and the shader will overwrite
734
* it after reading it if necessary, so it doesn't add to the VPM
735
* size requirements.
736
*/
737
prog_data->uses_pid = BITSET_TEST(c->s->info.system_values_read,
738
SYSTEM_VALUE_PRIMITIVE_ID);
739
740
/* Output segment size is in sectors (8 rows of 32 bits per channel) */
741
prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;
742
743
/* Compute SIMD dispatch width and update VPM output size accordingly
744
* to ensure we can fit our program in memory. Available widths are
745
* 16, 8, 4, 1.
746
*
747
* Notice that at draw time we will have to consider VPM memory
748
* requirements from other stages and choose a smaller dispatch
749
* width if needed to fit the program in VPM memory.
750
*/
751
prog_data->simd_width = 16;
752
while ((prog_data->simd_width > 1 && prog_data->vpm_output_size > 16) ||
753
prog_data->simd_width == 2) {
754
prog_data->simd_width >>= 1;
755
prog_data->vpm_output_size =
756
align(prog_data->vpm_output_size, 2) / 2;
757
}
758
assert(prog_data->vpm_output_size <= 16);
759
assert(prog_data->simd_width != 2);
760
761
prog_data->out_prim_type = c->s->info.gs.output_primitive;
762
prog_data->num_invocations = c->s->info.gs.invocations;
763
764
prog_data->writes_psiz =
765
c->s->info.outputs_written & (1 << VARYING_SLOT_PSIZ);
766
}
767
768
static void
769
v3d_set_fs_prog_data_inputs(struct v3d_compile *c,
770
struct v3d_fs_prog_data *prog_data)
771
{
772
prog_data->num_inputs = c->num_inputs;
773
memcpy(prog_data->input_slots, c->input_slots,
774
c->num_inputs * sizeof(*c->input_slots));
775
776
STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >
777
(V3D_MAX_FS_INPUTS - 1) / 24);
778
for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {
779
if (BITSET_TEST(c->flat_shade_flags, i))
780
prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);
781
782
if (BITSET_TEST(c->noperspective_flags, i))
783
prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);
784
785
if (BITSET_TEST(c->centroid_flags, i))
786
prog_data->centroid_flags[i / 24] |= 1 << (i % 24);
787
}
788
}
789
790
static void
791
v3d_fs_set_prog_data(struct v3d_compile *c,
792
struct v3d_fs_prog_data *prog_data)
793
{
794
v3d_set_fs_prog_data_inputs(c, prog_data);
795
prog_data->writes_z = c->writes_z;
796
prog_data->disable_ez = !c->s->info.fs.early_fragment_tests;
797
prog_data->uses_center_w = c->uses_center_w;
798
prog_data->uses_implicit_point_line_varyings =
799
c->uses_implicit_point_line_varyings;
800
prog_data->lock_scoreboard_on_first_thrsw =
801
c->lock_scoreboard_on_first_thrsw;
802
prog_data->force_per_sample_msaa = c->force_per_sample_msaa;
803
prog_data->uses_pid = c->fs_uses_primitive_id;
804
}
805
806
static void
807
v3d_cs_set_prog_data(struct v3d_compile *c,
808
struct v3d_compute_prog_data *prog_data)
809
{
810
prog_data->shared_size = c->s->info.shared_size;
811
812
prog_data->local_size[0] = c->s->info.workgroup_size[0];
813
prog_data->local_size[1] = c->s->info.workgroup_size[1];
814
prog_data->local_size[2] = c->s->info.workgroup_size[2];
815
816
prog_data->has_subgroups = c->has_subgroups;
817
}
818
819
static void
820
v3d_set_prog_data(struct v3d_compile *c,
821
struct v3d_prog_data *prog_data)
822
{
823
prog_data->threads = c->threads;
824
prog_data->single_seg = !c->last_thrsw;
825
prog_data->spill_size = c->spill_size;
826
prog_data->tmu_dirty_rcl = c->tmu_dirty_rcl;
827
prog_data->has_control_barrier = c->s->info.uses_control_barrier;
828
829
v3d_set_prog_data_uniforms(c, prog_data);
830
831
switch (c->s->info.stage) {
832
case MESA_SHADER_VERTEX:
833
v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data);
834
break;
835
case MESA_SHADER_GEOMETRY:
836
v3d_gs_set_prog_data(c, (struct v3d_gs_prog_data *)prog_data);
837
break;
838
case MESA_SHADER_FRAGMENT:
839
v3d_fs_set_prog_data(c, (struct v3d_fs_prog_data *)prog_data);
840
break;
841
case MESA_SHADER_COMPUTE:
842
v3d_cs_set_prog_data(c, (struct v3d_compute_prog_data *)prog_data);
843
break;
844
default:
845
unreachable("unsupported shader stage");
846
}
847
}
848
849
static uint64_t *
850
v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)
851
{
852
*final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);
853
854
uint64_t *qpu_insts = malloc(*final_assembly_size);
855
if (!qpu_insts)
856
return NULL;
857
858
memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);
859
860
vir_compile_destroy(c);
861
862
return qpu_insts;
863
}
864
865
static void
866
v3d_nir_lower_vs_early(struct v3d_compile *c)
867
{
868
/* Split our I/O vars and dead code eliminate the unused
869
* components.
870
*/
871
NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
872
nir_var_shader_in | nir_var_shader_out);
873
uint64_t used_outputs[4] = {0};
874
for (int i = 0; i < c->vs_key->num_used_outputs; i++) {
875
int slot = v3d_slot_get_slot(c->vs_key->used_outputs[i]);
876
int comp = v3d_slot_get_component(c->vs_key->used_outputs[i]);
877
used_outputs[comp] |= 1ull << slot;
878
}
879
NIR_PASS_V(c->s, nir_remove_unused_io_vars,
880
nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
881
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
882
v3d_optimize_nir(c, c->s);
883
NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
884
885
/* This must go before nir_lower_io */
886
if (c->vs_key->per_vertex_point_size)
887
NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
888
889
NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
890
type_size_vec4,
891
(nir_lower_io_options)0);
892
/* clean up nir_lower_io's deref_var remains and do a constant folding pass
893
* on the code it generated.
894
*/
895
NIR_PASS_V(c->s, nir_opt_dce);
896
NIR_PASS_V(c->s, nir_opt_constant_folding);
897
}
898
899
static void
900
v3d_nir_lower_gs_early(struct v3d_compile *c)
901
{
902
/* Split our I/O vars and dead code eliminate the unused
903
* components.
904
*/
905
NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
906
nir_var_shader_in | nir_var_shader_out);
907
uint64_t used_outputs[4] = {0};
908
for (int i = 0; i < c->gs_key->num_used_outputs; i++) {
909
int slot = v3d_slot_get_slot(c->gs_key->used_outputs[i]);
910
int comp = v3d_slot_get_component(c->gs_key->used_outputs[i]);
911
used_outputs[comp] |= 1ull << slot;
912
}
913
NIR_PASS_V(c->s, nir_remove_unused_io_vars,
914
nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
915
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
916
v3d_optimize_nir(c, c->s);
917
NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
918
919
/* This must go before nir_lower_io */
920
if (c->gs_key->per_vertex_point_size)
921
NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
922
923
NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
924
type_size_vec4,
925
(nir_lower_io_options)0);
926
/* clean up nir_lower_io's deref_var remains */
927
NIR_PASS_V(c->s, nir_opt_dce);
928
}
929
930
static void
931
v3d_fixup_fs_output_types(struct v3d_compile *c)
932
{
933
nir_foreach_shader_out_variable(var, c->s) {
934
uint32_t mask = 0;
935
936
switch (var->data.location) {
937
case FRAG_RESULT_COLOR:
938
mask = ~0;
939
break;
940
case FRAG_RESULT_DATA0:
941
case FRAG_RESULT_DATA1:
942
case FRAG_RESULT_DATA2:
943
case FRAG_RESULT_DATA3:
944
mask = 1 << (var->data.location - FRAG_RESULT_DATA0);
945
break;
946
}
947
948
if (c->fs_key->int_color_rb & mask) {
949
var->type =
950
glsl_vector_type(GLSL_TYPE_INT,
951
glsl_get_components(var->type));
952
} else if (c->fs_key->uint_color_rb & mask) {
953
var->type =
954
glsl_vector_type(GLSL_TYPE_UINT,
955
glsl_get_components(var->type));
956
}
957
}
958
}
959
960
static void
961
v3d_nir_lower_fs_early(struct v3d_compile *c)
962
{
963
if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)
964
v3d_fixup_fs_output_types(c);
965
966
NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c);
967
968
if (c->fs_key->line_smoothing) {
969
v3d_nir_lower_line_smooth(c->s);
970
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
971
/* The lowering pass can introduce new sysval reads */
972
nir_shader_gather_info(c->s, nir_shader_get_entrypoint(c->s));
973
}
974
975
/* If the shader has no non-TLB side effects, we can promote it to
976
* enabling early_fragment_tests even if the user didn't.
977
*/
978
if (!(c->s->info.num_images ||
979
c->s->info.num_ssbos)) {
980
c->s->info.fs.early_fragment_tests = true;
981
}
982
}
983
984
static void
985
v3d_nir_lower_gs_late(struct v3d_compile *c)
986
{
987
if (c->key->ucp_enables) {
988
NIR_PASS_V(c->s, nir_lower_clip_gs, c->key->ucp_enables,
989
false, NULL);
990
}
991
992
/* Note: GS output scalarizing must happen after nir_lower_clip_gs. */
993
NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
994
}
995
996
static void
997
v3d_nir_lower_vs_late(struct v3d_compile *c)
998
{
999
if (c->key->ucp_enables) {
1000
NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables,
1001
false, false, NULL);
1002
NIR_PASS_V(c->s, nir_lower_io_to_scalar,
1003
nir_var_shader_out);
1004
}
1005
1006
/* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
1007
NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
1008
}
1009
1010
static void
1011
v3d_nir_lower_fs_late(struct v3d_compile *c)
1012
{
1013
/* In OpenGL the fragment shader can't read gl_ClipDistance[], but
1014
* Vulkan allows it, in which case the SPIR-V compiler will declare
1015
* VARING_SLOT_CLIP_DIST0 as compact array variable. Pass true as
1016
* the last parameter to always operate with a compact array in both
1017
* OpenGL and Vulkan so we do't have to care about the API we
1018
* are using.
1019
*/
1020
if (c->key->ucp_enables)
1021
NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables, true);
1022
1023
NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
1024
}
1025
1026
static uint32_t
1027
vir_get_max_temps(struct v3d_compile *c)
1028
{
1029
int max_ip = 0;
1030
vir_for_each_inst_inorder(inst, c)
1031
max_ip++;
1032
1033
uint32_t *pressure = rzalloc_array(NULL, uint32_t, max_ip);
1034
1035
for (int t = 0; t < c->num_temps; t++) {
1036
for (int i = c->temp_start[t]; (i < c->temp_end[t] &&
1037
i < max_ip); i++) {
1038
if (i > max_ip)
1039
break;
1040
pressure[i]++;
1041
}
1042
}
1043
1044
uint32_t max_temps = 0;
1045
for (int i = 0; i < max_ip; i++)
1046
max_temps = MAX2(max_temps, pressure[i]);
1047
1048
ralloc_free(pressure);
1049
1050
return max_temps;
1051
}
1052
1053
enum v3d_dependency_class {
1054
V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0
1055
};
1056
1057
static bool
1058
v3d_intrinsic_dependency_cb(nir_intrinsic_instr *intr,
1059
nir_schedule_dependency *dep,
1060
void *user_data)
1061
{
1062
struct v3d_compile *c = user_data;
1063
1064
switch (intr->intrinsic) {
1065
case nir_intrinsic_store_output:
1066
/* Writing to location 0 overwrites the value passed in for
1067
* gl_PrimitiveID on geometry shaders
1068
*/
1069
if (c->s->info.stage != MESA_SHADER_GEOMETRY ||
1070
nir_intrinsic_base(intr) != 0)
1071
break;
1072
1073
nir_const_value *const_value =
1074
nir_src_as_const_value(intr->src[1]);
1075
1076
if (const_value == NULL)
1077
break;
1078
1079
uint64_t offset =
1080
nir_const_value_as_uint(*const_value,
1081
nir_src_bit_size(intr->src[1]));
1082
if (offset != 0)
1083
break;
1084
1085
dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0;
1086
dep->type = NIR_SCHEDULE_WRITE_DEPENDENCY;
1087
return true;
1088
1089
case nir_intrinsic_load_primitive_id:
1090
if (c->s->info.stage != MESA_SHADER_GEOMETRY)
1091
break;
1092
1093
dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0;
1094
dep->type = NIR_SCHEDULE_READ_DEPENDENCY;
1095
return true;
1096
1097
default:
1098
break;
1099
}
1100
1101
return false;
1102
}
1103
1104
static bool
1105
should_split_wrmask(const nir_instr *instr, const void *data)
1106
{
1107
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1108
switch (intr->intrinsic) {
1109
case nir_intrinsic_store_ssbo:
1110
case nir_intrinsic_store_shared:
1111
case nir_intrinsic_store_global:
1112
case nir_intrinsic_store_scratch:
1113
return true;
1114
default:
1115
return false;
1116
}
1117
}
1118
1119
static nir_intrinsic_instr *
1120
nir_instr_as_constant_ubo_load(nir_instr *inst)
1121
{
1122
if (inst->type != nir_instr_type_intrinsic)
1123
return NULL;
1124
1125
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(inst);
1126
if (intr->intrinsic != nir_intrinsic_load_ubo)
1127
return NULL;
1128
1129
assert(nir_src_is_const(intr->src[0]));
1130
if (!nir_src_is_const(intr->src[1]))
1131
return NULL;
1132
1133
return intr;
1134
}
1135
1136
static bool
1137
v3d_nir_sort_constant_ubo_load(nir_block *block, nir_intrinsic_instr *ref)
1138
{
1139
bool progress = false;
1140
1141
nir_instr *ref_inst = &ref->instr;
1142
uint32_t ref_offset = nir_src_as_uint(ref->src[1]);
1143
uint32_t ref_index = nir_src_as_uint(ref->src[0]);
1144
1145
/* Go through all instructions after ref searching for constant UBO
1146
* loads for the same UBO index.
1147
*/
1148
bool seq_break = false;
1149
nir_instr *inst = &ref->instr;
1150
nir_instr *next_inst = NULL;
1151
while (true) {
1152
inst = next_inst ? next_inst : nir_instr_next(inst);
1153
if (!inst)
1154
break;
1155
1156
next_inst = NULL;
1157
1158
if (inst->type != nir_instr_type_intrinsic)
1159
continue;
1160
1161
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(inst);
1162
if (intr->intrinsic != nir_intrinsic_load_ubo)
1163
continue;
1164
1165
/* We only produce unifa sequences for non-divergent loads */
1166
if (nir_src_is_divergent(intr->src[1]))
1167
continue;
1168
1169
/* If there are any UBO loads that are not constant or that
1170
* use a different UBO index in between the reference load and
1171
* any other constant load for the same index, they would break
1172
* the unifa sequence. We will flag that so we can then move
1173
* all constant UBO loads for the reference index before these
1174
* and not just the ones that are not ordered to avoid breaking
1175
* the sequence and reduce unifa writes.
1176
*/
1177
if (!nir_src_is_const(intr->src[1])) {
1178
seq_break = true;
1179
continue;
1180
}
1181
uint32_t offset = nir_src_as_uint(intr->src[1]);
1182
1183
assert(nir_src_is_const(intr->src[0]));
1184
uint32_t index = nir_src_as_uint(intr->src[0]);
1185
if (index != ref_index) {
1186
seq_break = true;
1187
continue;
1188
}
1189
1190
/* Only move loads with an offset that is close enough to the
1191
* reference offset, since otherwise we would not be able to
1192
* skip the unifa write for them. See ntq_emit_load_ubo_unifa.
1193
*/
1194
if (abs(ref_offset - offset) > MAX_UNIFA_SKIP_DISTANCE)
1195
continue;
1196
1197
/* We will move this load if its offset is smaller than ref's
1198
* (in which case we will move it before ref) or if the offset
1199
* is larger than ref's but there are sequence breakers in
1200
* in between (in which case we will move it after ref and
1201
* before the sequence breakers).
1202
*/
1203
if (!seq_break && offset >= ref_offset)
1204
continue;
1205
1206
/* Find where exactly we want to move this load:
1207
*
1208
* If we are moving it before ref, we want to check any other
1209
* UBO loads we placed before ref and make sure we insert this
1210
* one properly ordered with them. Likewise, if we are moving
1211
* it after ref.
1212
*/
1213
nir_instr *pos = ref_inst;
1214
nir_instr *tmp = pos;
1215
do {
1216
if (offset < ref_offset)
1217
tmp = nir_instr_prev(tmp);
1218
else
1219
tmp = nir_instr_next(tmp);
1220
1221
if (!tmp || tmp == inst)
1222
break;
1223
1224
/* Ignore non-unifa UBO loads */
1225
if (tmp->type != nir_instr_type_intrinsic)
1226
continue;
1227
1228
nir_intrinsic_instr *tmp_intr =
1229
nir_instr_as_intrinsic(tmp);
1230
if (tmp_intr->intrinsic != nir_intrinsic_load_ubo)
1231
continue;
1232
1233
if (nir_src_is_divergent(tmp_intr->src[1]))
1234
continue;
1235
1236
/* Stop if we find a unifa UBO load that breaks the
1237
* sequence.
1238
*/
1239
if (!nir_src_is_const(tmp_intr->src[1]))
1240
break;
1241
1242
if (nir_src_as_uint(tmp_intr->src[0]) != index)
1243
break;
1244
1245
uint32_t tmp_offset = nir_src_as_uint(tmp_intr->src[1]);
1246
if (offset < ref_offset) {
1247
if (tmp_offset < offset ||
1248
tmp_offset >= ref_offset) {
1249
break;
1250
} else {
1251
pos = tmp;
1252
}
1253
} else {
1254
if (tmp_offset > offset ||
1255
tmp_offset <= ref_offset) {
1256
break;
1257
} else {
1258
pos = tmp;
1259
}
1260
}
1261
} while (true);
1262
1263
/* We can't move the UBO load before the instruction that
1264
* defines its constant offset. If that instruction is placed
1265
* in between the new location (pos) and the current location
1266
* of this load, we will have to move that instruction too.
1267
*
1268
* We don't care about the UBO index definition because that
1269
* is optimized to be reused by all UBO loads for the same
1270
* index and therefore is certain to be defined before the
1271
* first UBO load that uses it.
1272
*/
1273
nir_instr *offset_inst = NULL;
1274
tmp = inst;
1275
while ((tmp = nir_instr_prev(tmp)) != NULL) {
1276
if (pos == tmp) {
1277
/* We reached the target location without
1278
* finding the instruction that defines the
1279
* offset, so that instruction must be before
1280
* the new position and we don't have to fix it.
1281
*/
1282
break;
1283
}
1284
if (intr->src[1].ssa->parent_instr == tmp) {
1285
offset_inst = tmp;
1286
break;
1287
}
1288
}
1289
1290
if (offset_inst) {
1291
exec_node_remove(&offset_inst->node);
1292
exec_node_insert_node_before(&pos->node,
1293
&offset_inst->node);
1294
}
1295
1296
/* Since we are moving the instruction before its current
1297
* location, grab its successor before the move so that
1298
* we can continue the next iteration of the main loop from
1299
* that instruction.
1300
*/
1301
next_inst = nir_instr_next(inst);
1302
1303
/* Move this load to the selected location */
1304
exec_node_remove(&inst->node);
1305
if (offset < ref_offset)
1306
exec_node_insert_node_before(&pos->node, &inst->node);
1307
else
1308
exec_node_insert_after(&pos->node, &inst->node);
1309
1310
progress = true;
1311
}
1312
1313
return progress;
1314
}
1315
1316
static bool
1317
v3d_nir_sort_constant_ubo_loads_block(struct v3d_compile *c,
1318
nir_block *block)
1319
{
1320
bool progress = false;
1321
bool local_progress;
1322
do {
1323
local_progress = false;
1324
nir_foreach_instr_safe(inst, block) {
1325
nir_intrinsic_instr *intr =
1326
nir_instr_as_constant_ubo_load(inst);
1327
if (intr) {
1328
local_progress |=
1329
v3d_nir_sort_constant_ubo_load(block, intr);
1330
}
1331
}
1332
progress |= local_progress;
1333
} while (local_progress);
1334
1335
return progress;
1336
}
1337
1338
/**
1339
* Sorts constant UBO loads in each block by offset to maximize chances of
1340
* skipping unifa writes when converting to VIR. This can increase register
1341
* pressure.
1342
*/
1343
static bool
1344
v3d_nir_sort_constant_ubo_loads(nir_shader *s, struct v3d_compile *c)
1345
{
1346
nir_foreach_function(function, s) {
1347
if (function->impl) {
1348
nir_foreach_block(block, function->impl) {
1349
c->sorted_any_ubo_loads |=
1350
v3d_nir_sort_constant_ubo_loads_block(c, block);
1351
}
1352
nir_metadata_preserve(function->impl,
1353
nir_metadata_block_index |
1354
nir_metadata_dominance);
1355
}
1356
}
1357
return c->sorted_any_ubo_loads;
1358
}
1359
1360
static void
1361
lower_load_num_subgroups(struct v3d_compile *c,
1362
nir_builder *b,
1363
nir_intrinsic_instr *intr)
1364
{
1365
assert(c->s->info.stage == MESA_SHADER_COMPUTE);
1366
assert(intr->intrinsic == nir_intrinsic_load_num_subgroups);
1367
1368
b->cursor = nir_after_instr(&intr->instr);
1369
uint32_t num_subgroups =
1370
DIV_ROUND_UP(c->s->info.workgroup_size[0] *
1371
c->s->info.workgroup_size[1] *
1372
c->s->info.workgroup_size[2], V3D_CHANNELS);
1373
nir_ssa_def *result = nir_imm_int(b, num_subgroups);
1374
nir_ssa_def_rewrite_uses(&intr->dest.ssa, result);
1375
nir_instr_remove(&intr->instr);
1376
}
1377
1378
static bool
1379
lower_subgroup_intrinsics(struct v3d_compile *c,
1380
nir_block *block, nir_builder *b)
1381
{
1382
bool progress = false;
1383
nir_foreach_instr_safe(inst, block) {
1384
if (inst->type != nir_instr_type_intrinsic)
1385
continue;;
1386
1387
nir_intrinsic_instr *intr =
1388
nir_instr_as_intrinsic(inst);
1389
if (!intr)
1390
continue;
1391
1392
switch (intr->intrinsic) {
1393
case nir_intrinsic_load_num_subgroups:
1394
lower_load_num_subgroups(c, b, intr);
1395
progress = true;
1396
FALLTHROUGH;
1397
case nir_intrinsic_load_subgroup_id:
1398
case nir_intrinsic_load_subgroup_size:
1399
case nir_intrinsic_load_subgroup_invocation:
1400
case nir_intrinsic_elect:
1401
c->has_subgroups = true;
1402
break;
1403
default:
1404
break;
1405
}
1406
}
1407
1408
return progress;
1409
}
1410
1411
static bool
1412
v3d_nir_lower_subgroup_intrinsics(nir_shader *s, struct v3d_compile *c)
1413
{
1414
bool progress = false;
1415
nir_foreach_function(function, s) {
1416
if (function->impl) {
1417
nir_builder b;
1418
nir_builder_init(&b, function->impl);
1419
1420
nir_foreach_block(block, function->impl)
1421
progress |= lower_subgroup_intrinsics(c, block, &b);
1422
1423
nir_metadata_preserve(function->impl,
1424
nir_metadata_block_index |
1425
nir_metadata_dominance);
1426
}
1427
}
1428
return progress;
1429
}
1430
1431
static void
1432
v3d_attempt_compile(struct v3d_compile *c)
1433
{
1434
switch (c->s->info.stage) {
1435
case MESA_SHADER_VERTEX:
1436
c->vs_key = (struct v3d_vs_key *) c->key;
1437
break;
1438
case MESA_SHADER_GEOMETRY:
1439
c->gs_key = (struct v3d_gs_key *) c->key;
1440
break;
1441
case MESA_SHADER_FRAGMENT:
1442
c->fs_key = (struct v3d_fs_key *) c->key;
1443
break;
1444
case MESA_SHADER_COMPUTE:
1445
break;
1446
default:
1447
unreachable("unsupported shader stage");
1448
}
1449
1450
switch (c->s->info.stage) {
1451
case MESA_SHADER_VERTEX:
1452
v3d_nir_lower_vs_early(c);
1453
break;
1454
case MESA_SHADER_GEOMETRY:
1455
v3d_nir_lower_gs_early(c);
1456
break;
1457
case MESA_SHADER_FRAGMENT:
1458
v3d_nir_lower_fs_early(c);
1459
break;
1460
default:
1461
break;
1462
}
1463
1464
v3d_lower_nir(c);
1465
1466
switch (c->s->info.stage) {
1467
case MESA_SHADER_VERTEX:
1468
v3d_nir_lower_vs_late(c);
1469
break;
1470
case MESA_SHADER_GEOMETRY:
1471
v3d_nir_lower_gs_late(c);
1472
break;
1473
case MESA_SHADER_FRAGMENT:
1474
v3d_nir_lower_fs_late(c);
1475
break;
1476
default:
1477
break;
1478
}
1479
1480
NIR_PASS_V(c->s, v3d_nir_lower_io, c);
1481
NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
1482
NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);
1483
nir_lower_idiv_options idiv_options = {
1484
.imprecise_32bit_lowering = true,
1485
.allow_fp16 = true,
1486
};
1487
NIR_PASS_V(c->s, nir_lower_idiv, &idiv_options);
1488
1489
if (c->key->robust_buffer_access) {
1490
/* v3d_nir_lower_robust_buffer_access assumes constant buffer
1491
* indices on ubo/ssbo intrinsics so run copy propagation and
1492
* constant folding passes before we run the lowering to warrant
1493
* this. We also want to run the lowering before v3d_optimize to
1494
* clean-up redundant get_buffer_size calls produced in the pass.
1495
*/
1496
NIR_PASS_V(c->s, nir_copy_prop);
1497
NIR_PASS_V(c->s, nir_opt_constant_folding);
1498
NIR_PASS_V(c->s, v3d_nir_lower_robust_buffer_access, c);
1499
}
1500
1501
NIR_PASS_V(c->s, nir_lower_wrmasks, should_split_wrmask, c->s);
1502
1503
NIR_PASS_V(c->s, v3d_nir_lower_subgroup_intrinsics, c);
1504
1505
v3d_optimize_nir(c, c->s);
1506
1507
/* Do late algebraic optimization to turn add(a, neg(b)) back into
1508
* subs, then the mandatory cleanup after algebraic. Note that it may
1509
* produce fnegs, and if so then we need to keep running to squash
1510
* fneg(fneg(a)).
1511
*/
1512
bool more_late_algebraic = true;
1513
while (more_late_algebraic) {
1514
more_late_algebraic = false;
1515
NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
1516
NIR_PASS_V(c->s, nir_opt_constant_folding);
1517
NIR_PASS_V(c->s, nir_copy_prop);
1518
NIR_PASS_V(c->s, nir_opt_dce);
1519
NIR_PASS_V(c->s, nir_opt_cse);
1520
}
1521
1522
NIR_PASS_V(c->s, nir_lower_bool_to_int32);
1523
nir_convert_to_lcssa(c->s, true, true);
1524
NIR_PASS_V(c->s, nir_divergence_analysis);
1525
NIR_PASS_V(c->s, nir_convert_from_ssa, true);
1526
1527
struct nir_schedule_options schedule_options = {
1528
/* Schedule for about half our register space, to enable more
1529
* shaders to hit 4 threads.
1530
*/
1531
.threshold = 24,
1532
1533
/* Vertex shaders share the same memory for inputs and outputs,
1534
* fragement and geometry shaders do not.
1535
*/
1536
.stages_with_shared_io_memory =
1537
(((1 << MESA_ALL_SHADER_STAGES) - 1) &
1538
~((1 << MESA_SHADER_FRAGMENT) |
1539
(1 << MESA_SHADER_GEOMETRY))),
1540
1541
.fallback = c->fallback_scheduler,
1542
1543
.intrinsic_cb = v3d_intrinsic_dependency_cb,
1544
.intrinsic_cb_data = c,
1545
};
1546
NIR_PASS_V(c->s, nir_schedule, &schedule_options);
1547
1548
if (!c->disable_constant_ubo_load_sorting)
1549
NIR_PASS_V(c->s, v3d_nir_sort_constant_ubo_loads, c);
1550
1551
v3d_nir_to_vir(c);
1552
}
1553
1554
uint32_t
1555
v3d_prog_data_size(gl_shader_stage stage)
1556
{
1557
static const int prog_data_size[] = {
1558
[MESA_SHADER_VERTEX] = sizeof(struct v3d_vs_prog_data),
1559
[MESA_SHADER_GEOMETRY] = sizeof(struct v3d_gs_prog_data),
1560
[MESA_SHADER_FRAGMENT] = sizeof(struct v3d_fs_prog_data),
1561
[MESA_SHADER_COMPUTE] = sizeof(struct v3d_compute_prog_data),
1562
};
1563
1564
assert(stage >= 0 &&
1565
stage < ARRAY_SIZE(prog_data_size) &&
1566
prog_data_size[stage]);
1567
1568
return prog_data_size[stage];
1569
}
1570
1571
int v3d_shaderdb_dump(struct v3d_compile *c,
1572
char **shaderdb_str)
1573
{
1574
if (c == NULL || c->compilation_result != V3D_COMPILATION_SUCCEEDED)
1575
return -1;
1576
1577
return asprintf(shaderdb_str,
1578
"%s shader: %d inst, %d threads, %d loops, "
1579
"%d uniforms, %d max-temps, %d:%d spills:fills, "
1580
"%d sfu-stalls, %d inst-and-stalls, %d nops",
1581
vir_get_stage_name(c),
1582
c->qpu_inst_count,
1583
c->threads,
1584
c->loops,
1585
c->num_uniforms,
1586
vir_get_max_temps(c),
1587
c->spills,
1588
c->fills,
1589
c->qpu_inst_stalled_count,
1590
c->qpu_inst_count + c->qpu_inst_stalled_count,
1591
c->nop_count);
1592
}
1593
1594
/* This is a list of incremental changes to the compilation strategy
1595
* that will be used to try to compile the shader successfully. The
1596
* default strategy is to enable all optimizations which will have
1597
* the highest register pressure but is expected to produce most
1598
* optimal code. Following strategies incrementally disable specific
1599
* optimizations that are known to contribute to register pressure
1600
* in order to be able to compile the shader successfully while meeting
1601
* thread count requirements.
1602
*
1603
* V3D 4.1+ has a min thread count of 2, but we can use 1 here to also
1604
* cover previous hardware as well (meaning that we are not limiting
1605
* register allocation to any particular thread count). This is fine
1606
* because v3d_nir_to_vir will cap this to the actual minimum.
1607
*/
1608
struct v3d_compiler_strategy {
1609
const char *name;
1610
uint32_t max_threads;
1611
uint32_t min_threads;
1612
bool disable_loop_unrolling;
1613
bool disable_ubo_load_sorting;
1614
bool disable_tmu_pipelining;
1615
bool tmu_spilling_allowed;
1616
} static const strategies[] = {
1617
/*0*/ { "default", 4, 4, false, false, false, false },
1618
/*1*/ { "disable loop unrolling", 4, 4, true, false, false, false },
1619
/*2*/ { "disable UBO load sorting", 4, 4, true, true, false, false },
1620
/*3*/ { "disable TMU pipelining", 4, 4, true, true, true, false },
1621
/*4*/ { "lower thread count", 2, 1, false, false, false, false },
1622
/*5*/ { "disable loop unrolling (ltc)", 2, 1, true, false, false, false },
1623
/*6*/ { "disable UBO load sorting (ltc)", 2, 1, true, true, false, false },
1624
/*7*/ { "disable TMU pipelining (ltc)", 2, 1, true, true, true, true },
1625
/*8*/ { "fallback scheduler", 2, 1, true, true, true, true }
1626
};
1627
1628
/**
1629
* If a particular optimization didn't make any progress during a compile
1630
* attempt disabling it alone won't allow us to compile the shader successfuly,
1631
* since we'll end up with the same code. Detect these scenarios so we can
1632
* avoid wasting time with useless compiles. We should also consider if the
1633
* strategy changes other aspects of the compilation process though, like
1634
* spilling, and not skip it in that case.
1635
*/
1636
static bool
1637
skip_compile_strategy(struct v3d_compile *c, uint32_t idx)
1638
{
1639
/* We decide if we can skip a strategy based on the optimizations that
1640
* were active in the previous strategy, so we should only be calling this
1641
* for strategies after the first.
1642
*/
1643
assert(idx > 0);
1644
1645
/* Don't skip a strategy that changes spilling behavior */
1646
if (strategies[idx].tmu_spilling_allowed !=
1647
strategies[idx - 1].tmu_spilling_allowed) {
1648
return false;
1649
}
1650
1651
switch (idx) {
1652
/* Loop unrolling: skip if we didn't unroll any loops */
1653
case 1:
1654
case 5:
1655
return !c->unrolled_any_loops;
1656
/* UBO load sorting: skip if we didn't sort any loads */
1657
case 2:
1658
case 6:
1659
return !c->sorted_any_ubo_loads;
1660
/* TMU pipelining: skip if we didn't pipeline any TMU ops */
1661
case 3:
1662
case 7:
1663
return !c->pipelined_any_tmu;
1664
/* Lower thread count: skip if we already tried less that 4 threads */
1665
case 4:
1666
return c->threads < 4;
1667
default:
1668
return false;
1669
};
1670
}
1671
uint64_t *v3d_compile(const struct v3d_compiler *compiler,
1672
struct v3d_key *key,
1673
struct v3d_prog_data **out_prog_data,
1674
nir_shader *s,
1675
void (*debug_output)(const char *msg,
1676
void *debug_output_data),
1677
void *debug_output_data,
1678
int program_id, int variant_id,
1679
uint32_t *final_assembly_size)
1680
{
1681
struct v3d_compile *c = NULL;
1682
for (int i = 0; i < ARRAY_SIZE(strategies); i++) {
1683
/* Fallback strategy */
1684
if (i > 0) {
1685
assert(c);
1686
if (skip_compile_strategy(c, i))
1687
continue;
1688
1689
char *debug_msg;
1690
int ret = asprintf(&debug_msg,
1691
"Falling back to strategy '%s' for %s",
1692
strategies[i].name,
1693
vir_get_stage_name(c));
1694
1695
if (ret >= 0) {
1696
if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF))
1697
fprintf(stderr, "%s\n", debug_msg);
1698
1699
c->debug_output(debug_msg, c->debug_output_data);
1700
free(debug_msg);
1701
}
1702
1703
vir_compile_destroy(c);
1704
}
1705
1706
c = vir_compile_init(compiler, key, s,
1707
debug_output, debug_output_data,
1708
program_id, variant_id,
1709
strategies[i].max_threads,
1710
strategies[i].min_threads,
1711
strategies[i].tmu_spilling_allowed,
1712
strategies[i].disable_loop_unrolling,
1713
strategies[i].disable_ubo_load_sorting,
1714
strategies[i].disable_tmu_pipelining,
1715
i == ARRAY_SIZE(strategies) - 1);
1716
1717
v3d_attempt_compile(c);
1718
1719
if (i >= ARRAY_SIZE(strategies) - 1 ||
1720
c->compilation_result !=
1721
V3D_COMPILATION_FAILED_REGISTER_ALLOCATION) {
1722
break;
1723
}
1724
}
1725
1726
if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF) &&
1727
c->compilation_result !=
1728
V3D_COMPILATION_FAILED_REGISTER_ALLOCATION &&
1729
c->spills > 0) {
1730
char *debug_msg;
1731
int ret = asprintf(&debug_msg,
1732
"Compiled %s with %d spills and %d fills",
1733
vir_get_stage_name(c),
1734
c->spills, c->fills);
1735
fprintf(stderr, "%s\n", debug_msg);
1736
1737
if (ret >= 0) {
1738
c->debug_output(debug_msg, c->debug_output_data);
1739
free(debug_msg);
1740
}
1741
}
1742
1743
if (c->compilation_result != V3D_COMPILATION_SUCCEEDED) {
1744
fprintf(stderr, "Failed to compile %s with any strategy.\n",
1745
vir_get_stage_name(c));
1746
}
1747
1748
struct v3d_prog_data *prog_data;
1749
1750
prog_data = rzalloc_size(NULL, v3d_prog_data_size(c->s->info.stage));
1751
1752
v3d_set_prog_data(c, prog_data);
1753
1754
*out_prog_data = prog_data;
1755
1756
char *shaderdb;
1757
int ret = v3d_shaderdb_dump(c, &shaderdb);
1758
if (ret >= 0) {
1759
if (V3D_DEBUG & V3D_DEBUG_SHADERDB)
1760
fprintf(stderr, "SHADER-DB: %s\n", shaderdb);
1761
1762
c->debug_output(shaderdb, c->debug_output_data);
1763
free(shaderdb);
1764
}
1765
1766
return v3d_return_qpu_insts(c, final_assembly_size);
1767
}
1768
1769
void
1770
vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)
1771
{
1772
if (qinst->dst.file == QFILE_TEMP)
1773
c->defs[qinst->dst.index] = NULL;
1774
1775
assert(&qinst->link != c->cursor.link);
1776
1777
list_del(&qinst->link);
1778
free(qinst);
1779
1780
c->live_intervals_valid = false;
1781
}
1782
1783
struct qreg
1784
vir_follow_movs(struct v3d_compile *c, struct qreg reg)
1785
{
1786
/* XXX
1787
int pack = reg.pack;
1788
1789
while (reg.file == QFILE_TEMP &&
1790
c->defs[reg.index] &&
1791
(c->defs[reg.index]->op == QOP_MOV ||
1792
c->defs[reg.index]->op == QOP_FMOV) &&
1793
!c->defs[reg.index]->dst.pack &&
1794
!c->defs[reg.index]->src[0].pack) {
1795
reg = c->defs[reg.index]->src[0];
1796
}
1797
1798
reg.pack = pack;
1799
*/
1800
return reg;
1801
}
1802
1803
void
1804
vir_compile_destroy(struct v3d_compile *c)
1805
{
1806
/* Defuse the assert that we aren't removing the cursor's instruction.
1807
*/
1808
c->cursor.link = NULL;
1809
1810
vir_for_each_block(block, c) {
1811
while (!list_is_empty(&block->instructions)) {
1812
struct qinst *qinst =
1813
list_first_entry(&block->instructions,
1814
struct qinst, link);
1815
vir_remove_instruction(c, qinst);
1816
}
1817
}
1818
1819
ralloc_free(c);
1820
}
1821
1822
uint32_t
1823
vir_get_uniform_index(struct v3d_compile *c,
1824
enum quniform_contents contents,
1825
uint32_t data)
1826
{
1827
for (int i = 0; i < c->num_uniforms; i++) {
1828
if (c->uniform_contents[i] == contents &&
1829
c->uniform_data[i] == data) {
1830
return i;
1831
}
1832
}
1833
1834
uint32_t uniform = c->num_uniforms++;
1835
1836
if (uniform >= c->uniform_array_size) {
1837
c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
1838
c->uniform_array_size * 2);
1839
1840
c->uniform_data = reralloc(c, c->uniform_data,
1841
uint32_t,
1842
c->uniform_array_size);
1843
c->uniform_contents = reralloc(c, c->uniform_contents,
1844
enum quniform_contents,
1845
c->uniform_array_size);
1846
}
1847
1848
c->uniform_contents[uniform] = contents;
1849
c->uniform_data[uniform] = data;
1850
1851
return uniform;
1852
}
1853
1854
/* Looks back into the current block to find the ldunif that wrote the uniform
1855
* at the requested index. If it finds it, it returns true and writes the
1856
* destination register of the ldunif instruction to 'unif'.
1857
*
1858
* This can impact register pressure and end up leading to worse code, so we
1859
* limit the number of instructions we are willing to look back through to
1860
* strike a good balance.
1861
*/
1862
static bool
1863
try_opt_ldunif(struct v3d_compile *c, uint32_t index, struct qreg *unif)
1864
{
1865
uint32_t count = 20;
1866
struct qinst *prev_inst = NULL;
1867
list_for_each_entry_from_rev(struct qinst, inst, c->cursor.link->prev,
1868
&c->cur_block->instructions, link) {
1869
if ((inst->qpu.sig.ldunif || inst->qpu.sig.ldunifrf) &&
1870
inst->uniform == index) {
1871
prev_inst = inst;
1872
break;
1873
}
1874
1875
if (--count == 0)
1876
break;
1877
}
1878
1879
if (!prev_inst)
1880
return false;
1881
1882
1883
list_for_each_entry_from(struct qinst, inst, prev_inst->link.next,
1884
&c->cur_block->instructions, link) {
1885
if (inst->dst.file == prev_inst->dst.file &&
1886
inst->dst.index == prev_inst->dst.index) {
1887
return false;
1888
}
1889
}
1890
1891
*unif = prev_inst->dst;
1892
return true;
1893
}
1894
1895
struct qreg
1896
vir_uniform(struct v3d_compile *c,
1897
enum quniform_contents contents,
1898
uint32_t data)
1899
{
1900
const int num_uniforms = c->num_uniforms;
1901
const int index = vir_get_uniform_index(c, contents, data);
1902
1903
/* If this is not the first time we see this uniform try to reuse the
1904
* result of the last ldunif that loaded it.
1905
*/
1906
const bool is_new_uniform = num_uniforms != c->num_uniforms;
1907
if (!is_new_uniform && !c->disable_ldunif_opt) {
1908
struct qreg ldunif_dst;
1909
if (try_opt_ldunif(c, index, &ldunif_dst))
1910
return ldunif_dst;
1911
}
1912
1913
struct qinst *inst = vir_NOP(c);
1914
inst->qpu.sig.ldunif = true;
1915
inst->uniform = index;
1916
inst->dst = vir_get_temp(c);
1917
c->defs[inst->dst.index] = inst;
1918
return inst->dst;
1919
}
1920
1921
#define OPTPASS(func) \
1922
do { \
1923
bool stage_progress = func(c); \
1924
if (stage_progress) { \
1925
progress = true; \
1926
if (print_opt_debug) { \
1927
fprintf(stderr, \
1928
"VIR opt pass %2d: %s progress\n", \
1929
pass, #func); \
1930
} \
1931
/*XXX vir_validate(c);*/ \
1932
} \
1933
} while (0)
1934
1935
void
1936
vir_optimize(struct v3d_compile *c)
1937
{
1938
bool print_opt_debug = false;
1939
int pass = 1;
1940
1941
while (true) {
1942
bool progress = false;
1943
1944
OPTPASS(vir_opt_copy_propagate);
1945
OPTPASS(vir_opt_redundant_flags);
1946
OPTPASS(vir_opt_dead_code);
1947
OPTPASS(vir_opt_small_immediates);
1948
OPTPASS(vir_opt_constant_alu);
1949
1950
if (!progress)
1951
break;
1952
1953
pass++;
1954
}
1955
}
1956
1957
const char *
1958
vir_get_stage_name(struct v3d_compile *c)
1959
{
1960
if (c->vs_key && c->vs_key->is_coord)
1961
return "MESA_SHADER_VERTEX_BIN";
1962
else if (c->gs_key && c->gs_key->is_coord)
1963
return "MESA_SHADER_GEOMETRY_BIN";
1964
else
1965
return gl_shader_stage_name(c->s->info.stage);
1966
}
1967
1968
static inline uint32_t
1969
compute_vpm_size_in_sectors(const struct v3d_device_info *devinfo)
1970
{
1971
assert(devinfo->vpm_size > 0);
1972
const uint32_t sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;
1973
return devinfo->vpm_size / sector_size;
1974
}
1975
1976
/* Computes various parameters affecting VPM memory configuration for programs
1977
* involving geometry shaders to ensure the program fits in memory and honors
1978
* requirements described in section "VPM usage" of the programming manual.
1979
*/
1980
static bool
1981
compute_vpm_config_gs(struct v3d_device_info *devinfo,
1982
struct v3d_vs_prog_data *vs,
1983
struct v3d_gs_prog_data *gs,
1984
struct vpm_config *vpm_cfg_out)
1985
{
1986
const uint32_t A = vs->separate_segments ? 1 : 0;
1987
const uint32_t Ad = vs->vpm_input_size;
1988
const uint32_t Vd = vs->vpm_output_size;
1989
1990
const uint32_t vpm_size = compute_vpm_size_in_sectors(devinfo);
1991
1992
/* Try to fit program into our VPM memory budget by adjusting
1993
* configurable parameters iteratively. We do this in two phases:
1994
* the first phase tries to fit the program into the total available
1995
* VPM memory. If we succeed at that, then the second phase attempts
1996
* to fit the program into half of that budget so we can run bin and
1997
* render programs in parallel.
1998
*/
1999
struct vpm_config vpm_cfg[2];
2000
struct vpm_config *final_vpm_cfg = NULL;
2001
uint32_t phase = 0;
2002
2003
vpm_cfg[phase].As = 1;
2004
vpm_cfg[phase].Gs = 1;
2005
vpm_cfg[phase].Gd = gs->vpm_output_size;
2006
vpm_cfg[phase].gs_width = gs->simd_width;
2007
2008
/* While there is a requirement that Vc >= [Vn / 16], this is
2009
* always the case when tessellation is not present because in that
2010
* case Vn can only be 6 at most (when input primitive is triangles
2011
* with adjacency).
2012
*
2013
* We always choose Vc=2. We can't go lower than this due to GFXH-1744,
2014
* and Broadcom has not found it worth it to increase it beyond this
2015
* in general. Increasing Vc also increases VPM memory pressure which
2016
* can turn up being detrimental for performance in some scenarios.
2017
*/
2018
vpm_cfg[phase].Vc = 2;
2019
2020
/* Gv is a constraint on the hardware to not exceed the
2021
* specified number of vertex segments per GS batch. If adding a
2022
* new primitive to a GS batch would result in a range of more
2023
* than Gv vertex segments being referenced by the batch, then
2024
* the hardware will flush the batch and start a new one. This
2025
* means that we can choose any value we want, we just need to
2026
* be aware that larger values improve GS batch utilization
2027
* at the expense of more VPM memory pressure (which can affect
2028
* other performance aspects, such as GS dispatch width).
2029
* We start with the largest value, and will reduce it if we
2030
* find that total memory pressure is too high.
2031
*/
2032
vpm_cfg[phase].Gv = 3;
2033
do {
2034
/* When GS is present in absence of TES, then we need to satisfy
2035
* that Ve >= Gv. We go with the smallest value of Ve to avoid
2036
* increasing memory pressure.
2037
*/
2038
vpm_cfg[phase].Ve = vpm_cfg[phase].Gv;
2039
2040
uint32_t vpm_sectors =
2041
A * vpm_cfg[phase].As * Ad +
2042
(vpm_cfg[phase].Vc + vpm_cfg[phase].Ve) * Vd +
2043
vpm_cfg[phase].Gs * vpm_cfg[phase].Gd;
2044
2045
/* Ideally we want to use no more than half of the available
2046
* memory so we can execute a bin and render program in parallel
2047
* without stalls. If we achieved that then we are done.
2048
*/
2049
if (vpm_sectors <= vpm_size / 2) {
2050
final_vpm_cfg = &vpm_cfg[phase];
2051
break;
2052
}
2053
2054
/* At the very least, we should not allocate more than the
2055
* total available VPM memory. If we have a configuration that
2056
* succeeds at this we save it and continue to see if we can
2057
* meet the half-memory-use criteria too.
2058
*/
2059
if (phase == 0 && vpm_sectors <= vpm_size) {
2060
vpm_cfg[1] = vpm_cfg[0];
2061
phase = 1;
2062
}
2063
2064
/* Try lowering Gv */
2065
if (vpm_cfg[phase].Gv > 0) {
2066
vpm_cfg[phase].Gv--;
2067
continue;
2068
}
2069
2070
/* Try lowering GS dispatch width */
2071
if (vpm_cfg[phase].gs_width > 1) {
2072
do {
2073
vpm_cfg[phase].gs_width >>= 1;
2074
vpm_cfg[phase].Gd = align(vpm_cfg[phase].Gd, 2) / 2;
2075
} while (vpm_cfg[phase].gs_width == 2);
2076
2077
/* Reset Gv to max after dropping dispatch width */
2078
vpm_cfg[phase].Gv = 3;
2079
continue;
2080
}
2081
2082
/* We ran out of options to reduce memory pressure. If we
2083
* are at phase 1 we have at least a valid configuration, so we
2084
* we use that.
2085
*/
2086
if (phase == 1)
2087
final_vpm_cfg = &vpm_cfg[0];
2088
break;
2089
} while (true);
2090
2091
if (!final_vpm_cfg)
2092
return false;
2093
2094
assert(final_vpm_cfg);
2095
assert(final_vpm_cfg->Gd <= 16);
2096
assert(final_vpm_cfg->Gv < 4);
2097
assert(final_vpm_cfg->Ve < 4);
2098
assert(final_vpm_cfg->Vc >= 2 && final_vpm_cfg->Vc <= 4);
2099
assert(final_vpm_cfg->gs_width == 1 ||
2100
final_vpm_cfg->gs_width == 4 ||
2101
final_vpm_cfg->gs_width == 8 ||
2102
final_vpm_cfg->gs_width == 16);
2103
2104
*vpm_cfg_out = *final_vpm_cfg;
2105
return true;
2106
}
2107
2108
bool
2109
v3d_compute_vpm_config(struct v3d_device_info *devinfo,
2110
struct v3d_vs_prog_data *vs_bin,
2111
struct v3d_vs_prog_data *vs,
2112
struct v3d_gs_prog_data *gs_bin,
2113
struct v3d_gs_prog_data *gs,
2114
struct vpm_config *vpm_cfg_bin,
2115
struct vpm_config *vpm_cfg)
2116
{
2117
assert(vs && vs_bin);
2118
assert((gs != NULL) == (gs_bin != NULL));
2119
2120
if (!gs) {
2121
vpm_cfg_bin->As = 1;
2122
vpm_cfg_bin->Ve = 0;
2123
vpm_cfg_bin->Vc = vs_bin->vcm_cache_size;
2124
2125
vpm_cfg->As = 1;
2126
vpm_cfg->Ve = 0;
2127
vpm_cfg->Vc = vs->vcm_cache_size;
2128
} else {
2129
if (!compute_vpm_config_gs(devinfo, vs_bin, gs_bin, vpm_cfg_bin))
2130
return false;
2131
2132
if (!compute_vpm_config_gs(devinfo, vs, gs, vpm_cfg))
2133
return false;
2134
}
2135
2136
return true;
2137
}
2138
2139