Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c
4570 views
1
/*
2
* Copyright (c) 2012-2019 Etnaviv Project
3
* Copyright (c) 2019 Zodiac Inflight Innovations
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sub license,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the
13
* next paragraph) shall be included in all copies or substantial portions
14
* of the Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22
* DEALINGS IN THE SOFTWARE.
23
*
24
* Authors:
25
* Jonathan Marek <[email protected]>
26
* Wladimir J. van der Laan <[email protected]>
27
*/
28
29
#include "etnaviv_compiler.h"
30
#include "etnaviv_compiler_nir.h"
31
#include "etnaviv_asm.h"
32
#include "etnaviv_context.h"
33
#include "etnaviv_debug.h"
34
#include "etnaviv_nir.h"
35
#include "etnaviv_uniforms.h"
36
#include "etnaviv_util.h"
37
38
#include <math.h>
39
#include "util/u_memory.h"
40
#include "util/register_allocate.h"
41
#include "compiler/nir/nir_builder.h"
42
43
#include "tgsi/tgsi_strings.h"
44
#include "util/compiler.h"
45
#include "util/half_float.h"
46
47
static bool
48
etna_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
49
{
50
const struct etna_specs *specs = data;
51
52
if (instr->type != nir_instr_type_alu)
53
return false;
54
55
nir_alu_instr *alu = nir_instr_as_alu(instr);
56
switch (alu->op) {
57
case nir_op_frsq:
58
case nir_op_frcp:
59
case nir_op_flog2:
60
case nir_op_fexp2:
61
case nir_op_fsqrt:
62
case nir_op_fcos:
63
case nir_op_fsin:
64
case nir_op_fdiv:
65
case nir_op_imul:
66
return true;
67
/* TODO: can do better than alu_to_scalar for vector compares */
68
case nir_op_b32all_fequal2:
69
case nir_op_b32all_fequal3:
70
case nir_op_b32all_fequal4:
71
case nir_op_b32any_fnequal2:
72
case nir_op_b32any_fnequal3:
73
case nir_op_b32any_fnequal4:
74
case nir_op_b32all_iequal2:
75
case nir_op_b32all_iequal3:
76
case nir_op_b32all_iequal4:
77
case nir_op_b32any_inequal2:
78
case nir_op_b32any_inequal3:
79
case nir_op_b32any_inequal4:
80
return true;
81
case nir_op_fdot2:
82
if (!specs->has_halti2_instructions)
83
return true;
84
break;
85
default:
86
break;
87
}
88
89
return false;
90
}
91
92
static void
93
etna_emit_block_start(struct etna_compile *c, unsigned block)
94
{
95
c->block_ptr[block] = c->inst_ptr;
96
}
97
98
static void
99
etna_emit_output(struct etna_compile *c, nir_variable *var, struct etna_inst_src src)
100
{
101
struct etna_shader_io_file *sf = &c->variant->outfile;
102
103
if (is_fs(c)) {
104
switch (var->data.location) {
105
case FRAG_RESULT_COLOR:
106
case FRAG_RESULT_DATA0: /* DATA0 is used by gallium shaders for color */
107
c->variant->ps_color_out_reg = src.reg;
108
break;
109
case FRAG_RESULT_DEPTH:
110
c->variant->ps_depth_out_reg = src.reg;
111
break;
112
default:
113
unreachable("Unsupported fs output");
114
}
115
return;
116
}
117
118
switch (var->data.location) {
119
case VARYING_SLOT_POS:
120
c->variant->vs_pos_out_reg = src.reg;
121
break;
122
case VARYING_SLOT_PSIZ:
123
c->variant->vs_pointsize_out_reg = src.reg;
124
break;
125
default:
126
sf->reg[sf->num_reg].reg = src.reg;
127
sf->reg[sf->num_reg].slot = var->data.location;
128
sf->reg[sf->num_reg].num_components = glsl_get_components(var->type);
129
sf->num_reg++;
130
break;
131
}
132
}
133
134
#define OPT(nir, pass, ...) ({ \
135
bool this_progress = false; \
136
NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
137
this_progress; \
138
})
139
140
static void
141
etna_optimize_loop(nir_shader *s)
142
{
143
bool progress;
144
do {
145
progress = false;
146
147
NIR_PASS_V(s, nir_lower_vars_to_ssa);
148
progress |= OPT(s, nir_opt_copy_prop_vars);
149
progress |= OPT(s, nir_opt_shrink_vectors, true);
150
progress |= OPT(s, nir_copy_prop);
151
progress |= OPT(s, nir_opt_dce);
152
progress |= OPT(s, nir_opt_cse);
153
progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
154
progress |= OPT(s, nir_opt_intrinsics);
155
progress |= OPT(s, nir_opt_algebraic);
156
progress |= OPT(s, nir_opt_constant_folding);
157
progress |= OPT(s, nir_opt_dead_cf);
158
if (OPT(s, nir_opt_trivial_continues)) {
159
progress = true;
160
/* If nir_opt_trivial_continues makes progress, then we need to clean
161
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
162
* to make progress.
163
*/
164
OPT(s, nir_copy_prop);
165
OPT(s, nir_opt_dce);
166
}
167
progress |= OPT(s, nir_opt_loop_unroll, nir_var_all);
168
progress |= OPT(s, nir_opt_if, false);
169
progress |= OPT(s, nir_opt_remove_phis);
170
progress |= OPT(s, nir_opt_undef);
171
}
172
while (progress);
173
}
174
175
static int
176
etna_glsl_type_size(const struct glsl_type *type, bool bindless)
177
{
178
return glsl_count_attribute_slots(type, false);
179
}
180
181
static void
182
copy_uniform_state_to_shader(struct etna_shader_variant *sobj, uint64_t *consts, unsigned count)
183
{
184
struct etna_shader_uniform_info *uinfo = &sobj->uniforms;
185
186
uinfo->count = count * 4;
187
uinfo->data = MALLOC(uinfo->count * sizeof(*uinfo->data));
188
uinfo->contents = MALLOC(uinfo->count * sizeof(*uinfo->contents));
189
190
for (unsigned i = 0; i < uinfo->count; i++) {
191
uinfo->data[i] = consts[i];
192
uinfo->contents[i] = consts[i] >> 32;
193
}
194
195
etna_set_shader_uniforms_dirty_flags(sobj);
196
}
197
198
#define ALU_SWIZ(s) INST_SWIZ((s)->swizzle[0], (s)->swizzle[1], (s)->swizzle[2], (s)->swizzle[3])
199
#define SRC_DISABLE ((hw_src){})
200
#define SRC_CONST(idx, s) ((hw_src){.use=1, .rgroup = INST_RGROUP_UNIFORM_0, .reg=idx, .swiz=s})
201
#define SRC_REG(idx, s) ((hw_src){.use=1, .rgroup = INST_RGROUP_TEMP, .reg=idx, .swiz=s})
202
203
typedef struct etna_inst_dst hw_dst;
204
typedef struct etna_inst_src hw_src;
205
206
static inline hw_src
207
src_swizzle(hw_src src, unsigned swizzle)
208
{
209
if (src.rgroup != INST_RGROUP_IMMEDIATE)
210
src.swiz = inst_swiz_compose(src.swiz, swizzle);
211
212
return src;
213
}
214
215
/* constants are represented as 64-bit ints
216
* 32-bit for the value and 32-bit for the type (imm, uniform, etc)
217
*/
218
219
#define CONST_VAL(a, b) (nir_const_value) {.u64 = (uint64_t)(a) << 32 | (uint64_t)(b)}
220
#define CONST(x) CONST_VAL(ETNA_UNIFORM_CONSTANT, x)
221
#define UNIFORM(x) CONST_VAL(ETNA_UNIFORM_UNIFORM, x)
222
#define TEXSCALE(x, i) CONST_VAL(ETNA_UNIFORM_TEXRECT_SCALE_X + (i), x)
223
224
static int
225
const_add(uint64_t *c, uint64_t value)
226
{
227
for (unsigned i = 0; i < 4; i++) {
228
if (c[i] == value || !c[i]) {
229
c[i] = value;
230
return i;
231
}
232
}
233
return -1;
234
}
235
236
static hw_src
237
const_src(struct etna_compile *c, nir_const_value *value, unsigned num_components)
238
{
239
/* use inline immediates if possible */
240
if (c->specs->halti >= 2 && num_components == 1 &&
241
value[0].u64 >> 32 == ETNA_UNIFORM_CONSTANT) {
242
uint32_t bits = value[0].u32;
243
244
/* "float" - shifted by 12 */
245
if ((bits & 0xfff) == 0)
246
return etna_immediate_src(0, bits >> 12);
247
248
/* "unsigned" - raw 20 bit value */
249
if (bits < (1 << 20))
250
return etna_immediate_src(2, bits);
251
252
/* "signed" - sign extended 20-bit (sign included) value */
253
if (bits >= 0xfff80000)
254
return etna_immediate_src(1, bits);
255
}
256
257
unsigned i;
258
int swiz = -1;
259
for (i = 0; swiz < 0; i++) {
260
uint64_t *a = &c->consts[i*4];
261
uint64_t save[4];
262
memcpy(save, a, sizeof(save));
263
swiz = 0;
264
for (unsigned j = 0; j < num_components; j++) {
265
int c = const_add(a, value[j].u64);
266
if (c < 0) {
267
memcpy(a, save, sizeof(save));
268
swiz = -1;
269
break;
270
}
271
swiz |= c << j * 2;
272
}
273
}
274
275
assert(i <= ETNA_MAX_IMM / 4);
276
c->const_count = MAX2(c->const_count, i);
277
278
return SRC_CONST(i - 1, swiz);
279
}
280
281
/* how to swizzle when used as a src */
282
static const uint8_t
283
reg_swiz[NUM_REG_TYPES] = {
284
[REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
285
[REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
286
[REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(Y, Y, Y, Y),
287
[REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
288
[REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
289
[REG_TYPE_VIRT_VEC2C_XY] = INST_SWIZ_IDENTITY,
290
[REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(Z, Z, Z, Z),
291
[REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, Z, X, Z),
292
[REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(Y, Z, Y, Z),
293
[REG_TYPE_VIRT_VEC2C_YZ] = SWIZZLE(Y, Z, Y, Z),
294
[REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
295
[REG_TYPE_VIRT_VEC3C_XYZ] = INST_SWIZ_IDENTITY,
296
[REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(W, W, W, W),
297
[REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, W, X, W),
298
[REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(Y, W, Y, W),
299
[REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, W, X),
300
[REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(Z, W, Z, W),
301
[REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(Z, W, Z, W),
302
[REG_TYPE_VIRT_VEC2C_ZW] = SWIZZLE(Z, W, Z, W),
303
[REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Z, W, X),
304
[REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(Y, Z, W, X),
305
[REG_TYPE_VIRT_VEC3C_YZW] = SWIZZLE(Y, Z, W, X),
306
};
307
308
/* how to swizzle when used as a dest */
309
static const uint8_t
310
reg_dst_swiz[NUM_REG_TYPES] = {
311
[REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
312
[REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
313
[REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(X, X, X, X),
314
[REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
315
[REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
316
[REG_TYPE_VIRT_VEC2C_XY] = INST_SWIZ_IDENTITY,
317
[REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(X, X, X, X),
318
[REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, X, Y, Y),
319
[REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(X, X, Y, Y),
320
[REG_TYPE_VIRT_VEC2C_YZ] = SWIZZLE(X, X, Y, Y),
321
[REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
322
[REG_TYPE_VIRT_VEC3C_XYZ] = INST_SWIZ_IDENTITY,
323
[REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(X, X, X, X),
324
[REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, X, Y, Y),
325
[REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(X, X, Y, Y),
326
[REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, Z, Z),
327
[REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(X, X, X, Y),
328
[REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(X, X, X, Y),
329
[REG_TYPE_VIRT_VEC2C_ZW] = SWIZZLE(X, X, X, Y),
330
[REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Y, Y, Z),
331
[REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(X, X, Y, Z),
332
[REG_TYPE_VIRT_VEC3C_YZW] = SWIZZLE(X, X, Y, Z),
333
};
334
335
/* nir_src to allocated register */
336
static hw_src
337
ra_src(struct etna_compile *c, nir_src *src)
338
{
339
unsigned reg = ra_get_node_reg(c->g, c->live_map[src_index(c->impl, src)]);
340
return SRC_REG(reg_get_base(c, reg), reg_swiz[reg_get_type(reg)]);
341
}
342
343
static hw_src
344
get_src(struct etna_compile *c, nir_src *src)
345
{
346
if (!src->is_ssa)
347
return ra_src(c, src);
348
349
nir_instr *instr = src->ssa->parent_instr;
350
351
if (instr->pass_flags & BYPASS_SRC) {
352
assert(instr->type == nir_instr_type_alu);
353
nir_alu_instr *alu = nir_instr_as_alu(instr);
354
assert(alu->op == nir_op_mov);
355
return src_swizzle(get_src(c, &alu->src[0].src), ALU_SWIZ(&alu->src[0]));
356
}
357
358
switch (instr->type) {
359
case nir_instr_type_load_const:
360
return const_src(c, nir_instr_as_load_const(instr)->value, src->ssa->num_components);
361
case nir_instr_type_intrinsic: {
362
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
363
switch (intr->intrinsic) {
364
case nir_intrinsic_load_input:
365
case nir_intrinsic_load_instance_id:
366
case nir_intrinsic_load_uniform:
367
case nir_intrinsic_load_ubo:
368
return ra_src(c, src);
369
case nir_intrinsic_load_front_face:
370
return (hw_src) { .use = 1, .rgroup = INST_RGROUP_INTERNAL };
371
case nir_intrinsic_load_frag_coord:
372
return SRC_REG(0, INST_SWIZ_IDENTITY);
373
case nir_intrinsic_load_texture_rect_scaling: {
374
int sampler = nir_src_as_int(intr->src[0]);
375
nir_const_value values[] = {
376
TEXSCALE(sampler, 0),
377
TEXSCALE(sampler, 1),
378
};
379
380
return src_swizzle(const_src(c, values, 2), SWIZZLE(X,Y,X,X));
381
}
382
default:
383
compile_error(c, "Unhandled NIR intrinsic type: %s\n",
384
nir_intrinsic_infos[intr->intrinsic].name);
385
break;
386
}
387
} break;
388
case nir_instr_type_alu:
389
case nir_instr_type_tex:
390
return ra_src(c, src);
391
case nir_instr_type_ssa_undef: {
392
/* return zero to deal with broken Blur demo */
393
nir_const_value value = CONST(0);
394
return src_swizzle(const_src(c, &value, 1), SWIZZLE(X,X,X,X));
395
}
396
default:
397
compile_error(c, "Unhandled NIR instruction type: %d\n", instr->type);
398
break;
399
}
400
401
return SRC_DISABLE;
402
}
403
404
static bool
405
vec_dest_has_swizzle(nir_alu_instr *vec, nir_ssa_def *ssa)
406
{
407
for (unsigned i = 0; i < 4; i++) {
408
if (!(vec->dest.write_mask & (1 << i)) || vec->src[i].src.ssa != ssa)
409
continue;
410
411
if (vec->src[i].swizzle[0] != i)
412
return true;
413
}
414
415
/* don't deal with possible bypassed vec/mov chain */
416
nir_foreach_use(use_src, ssa) {
417
nir_instr *instr = use_src->parent_instr;
418
if (instr->type != nir_instr_type_alu)
419
continue;
420
421
nir_alu_instr *alu = nir_instr_as_alu(instr);
422
423
switch (alu->op) {
424
case nir_op_mov:
425
case nir_op_vec2:
426
case nir_op_vec3:
427
case nir_op_vec4:
428
return true;
429
default:
430
break;
431
}
432
}
433
return false;
434
}
435
436
/* get allocated dest register for nir_dest
437
* *p_swiz tells how the components need to be placed into register
438
*/
439
static hw_dst
440
ra_dest(struct etna_compile *c, nir_dest *dest, unsigned *p_swiz)
441
{
442
unsigned swiz = INST_SWIZ_IDENTITY, mask = 0xf;
443
dest = real_dest(dest, &swiz, &mask);
444
445
unsigned r = ra_get_node_reg(c->g, c->live_map[dest_index(c->impl, dest)]);
446
unsigned t = reg_get_type(r);
447
448
*p_swiz = inst_swiz_compose(swiz, reg_dst_swiz[t]);
449
450
return (hw_dst) {
451
.use = 1,
452
.reg = reg_get_base(c, r),
453
.write_mask = inst_write_mask_compose(mask, reg_writemask[t]),
454
};
455
}
456
457
static void
458
emit_alu(struct etna_compile *c, nir_alu_instr * alu)
459
{
460
const nir_op_info *info = &nir_op_infos[alu->op];
461
462
/* marked as dead instruction (vecN and other bypassed instr) */
463
if (alu->instr.pass_flags)
464
return;
465
466
assert(!(alu->op >= nir_op_vec2 && alu->op <= nir_op_vec4));
467
468
unsigned dst_swiz;
469
hw_dst dst = ra_dest(c, &alu->dest.dest, &dst_swiz);
470
471
/* compose alu write_mask with RA write mask */
472
if (!alu->dest.dest.is_ssa)
473
dst.write_mask = inst_write_mask_compose(alu->dest.write_mask, dst.write_mask);
474
475
switch (alu->op) {
476
case nir_op_fdot2:
477
case nir_op_fdot3:
478
case nir_op_fdot4:
479
/* not per-component - don't compose dst_swiz */
480
dst_swiz = INST_SWIZ_IDENTITY;
481
break;
482
default:
483
break;
484
}
485
486
hw_src srcs[3];
487
488
for (int i = 0; i < info->num_inputs; i++) {
489
nir_alu_src *asrc = &alu->src[i];
490
hw_src src;
491
492
src = src_swizzle(get_src(c, &asrc->src), ALU_SWIZ(asrc));
493
src = src_swizzle(src, dst_swiz);
494
495
if (src.rgroup != INST_RGROUP_IMMEDIATE) {
496
src.neg = asrc->negate || (alu->op == nir_op_fneg);
497
src.abs = asrc->abs || (alu->op == nir_op_fabs);
498
} else {
499
assert(!asrc->negate && alu->op != nir_op_fneg);
500
assert(!asrc->abs && alu->op != nir_op_fabs);
501
}
502
503
srcs[i] = src;
504
}
505
506
etna_emit_alu(c, alu->op, dst, srcs, alu->dest.saturate || (alu->op == nir_op_fsat));
507
}
508
509
static void
510
emit_tex(struct etna_compile *c, nir_tex_instr * tex)
511
{
512
unsigned dst_swiz;
513
hw_dst dst = ra_dest(c, &tex->dest, &dst_swiz);
514
nir_src *coord = NULL, *lod_bias = NULL, *compare = NULL;
515
516
for (unsigned i = 0; i < tex->num_srcs; i++) {
517
switch (tex->src[i].src_type) {
518
case nir_tex_src_coord:
519
coord = &tex->src[i].src;
520
break;
521
case nir_tex_src_bias:
522
case nir_tex_src_lod:
523
assert(!lod_bias);
524
lod_bias = &tex->src[i].src;
525
break;
526
case nir_tex_src_comparator:
527
compare = &tex->src[i].src;
528
break;
529
default:
530
compile_error(c, "Unhandled NIR tex src type: %d\n",
531
tex->src[i].src_type);
532
break;
533
}
534
}
535
536
etna_emit_tex(c, tex->op, tex->sampler_index, dst_swiz, dst, get_src(c, coord),
537
lod_bias ? get_src(c, lod_bias) : SRC_DISABLE,
538
compare ? get_src(c, compare) : SRC_DISABLE);
539
}
540
541
static void
542
emit_intrinsic(struct etna_compile *c, nir_intrinsic_instr * intr)
543
{
544
switch (intr->intrinsic) {
545
case nir_intrinsic_store_deref:
546
etna_emit_output(c, nir_src_as_deref(intr->src[0])->var, get_src(c, &intr->src[1]));
547
break;
548
case nir_intrinsic_discard_if:
549
etna_emit_discard(c, get_src(c, &intr->src[0]));
550
break;
551
case nir_intrinsic_discard:
552
etna_emit_discard(c, SRC_DISABLE);
553
break;
554
case nir_intrinsic_load_uniform: {
555
unsigned dst_swiz;
556
struct etna_inst_dst dst = ra_dest(c, &intr->dest, &dst_swiz);
557
558
/* TODO: rework so extra MOV isn't required, load up to 4 addresses at once */
559
emit_inst(c, &(struct etna_inst) {
560
.opcode = INST_OPCODE_MOVAR,
561
.dst.write_mask = 0x1,
562
.src[2] = get_src(c, &intr->src[0]),
563
});
564
emit_inst(c, &(struct etna_inst) {
565
.opcode = INST_OPCODE_MOV,
566
.dst = dst,
567
.src[2] = {
568
.use = 1,
569
.rgroup = INST_RGROUP_UNIFORM_0,
570
.reg = nir_intrinsic_base(intr),
571
.swiz = dst_swiz,
572
.amode = INST_AMODE_ADD_A_X,
573
},
574
});
575
} break;
576
case nir_intrinsic_load_ubo: {
577
/* TODO: if offset is of the form (x + C) then add C to the base instead */
578
unsigned idx = nir_src_as_const_value(intr->src[0])[0].u32;
579
unsigned dst_swiz;
580
emit_inst(c, &(struct etna_inst) {
581
.opcode = INST_OPCODE_LOAD,
582
.type = INST_TYPE_U32,
583
.dst = ra_dest(c, &intr->dest, &dst_swiz),
584
.src[0] = get_src(c, &intr->src[1]),
585
.src[1] = const_src(c, &CONST_VAL(ETNA_UNIFORM_UBO0_ADDR + idx, 0), 1),
586
});
587
} break;
588
case nir_intrinsic_load_front_face:
589
case nir_intrinsic_load_frag_coord:
590
assert(intr->dest.is_ssa); /* TODO - lower phis could cause this */
591
break;
592
case nir_intrinsic_load_input:
593
case nir_intrinsic_load_instance_id:
594
case nir_intrinsic_load_texture_rect_scaling:
595
break;
596
default:
597
compile_error(c, "Unhandled NIR intrinsic type: %s\n",
598
nir_intrinsic_infos[intr->intrinsic].name);
599
}
600
}
601
602
static void
603
emit_instr(struct etna_compile *c, nir_instr * instr)
604
{
605
switch (instr->type) {
606
case nir_instr_type_alu:
607
emit_alu(c, nir_instr_as_alu(instr));
608
break;
609
case nir_instr_type_tex:
610
emit_tex(c, nir_instr_as_tex(instr));
611
break;
612
case nir_instr_type_intrinsic:
613
emit_intrinsic(c, nir_instr_as_intrinsic(instr));
614
break;
615
case nir_instr_type_jump:
616
assert(nir_instr_is_last(instr));
617
break;
618
case nir_instr_type_load_const:
619
case nir_instr_type_ssa_undef:
620
case nir_instr_type_deref:
621
break;
622
default:
623
compile_error(c, "Unhandled NIR instruction type: %d\n", instr->type);
624
break;
625
}
626
}
627
628
static void
629
emit_block(struct etna_compile *c, nir_block * block)
630
{
631
etna_emit_block_start(c, block->index);
632
633
nir_foreach_instr(instr, block)
634
emit_instr(c, instr);
635
636
/* succs->index < block->index is for the loop case */
637
nir_block *succs = block->successors[0];
638
if (nir_block_ends_in_jump(block) || succs->index < block->index)
639
etna_emit_jump(c, succs->index, SRC_DISABLE);
640
}
641
642
static void
643
emit_cf_list(struct etna_compile *c, struct exec_list *list);
644
645
static void
646
emit_if(struct etna_compile *c, nir_if * nif)
647
{
648
etna_emit_jump(c, nir_if_first_else_block(nif)->index, get_src(c, &nif->condition));
649
emit_cf_list(c, &nif->then_list);
650
651
/* jump at end of then_list to skip else_list
652
* not needed if then_list already ends with a jump or else_list is empty
653
*/
654
if (!nir_block_ends_in_jump(nir_if_last_then_block(nif)) &&
655
!nir_cf_list_is_empty_block(&nif->else_list))
656
etna_emit_jump(c, nir_if_last_else_block(nif)->successors[0]->index, SRC_DISABLE);
657
658
emit_cf_list(c, &nif->else_list);
659
}
660
661
static void
662
emit_cf_list(struct etna_compile *c, struct exec_list *list)
663
{
664
foreach_list_typed(nir_cf_node, node, node, list) {
665
switch (node->type) {
666
case nir_cf_node_block:
667
emit_block(c, nir_cf_node_as_block(node));
668
break;
669
case nir_cf_node_if:
670
emit_if(c, nir_cf_node_as_if(node));
671
break;
672
case nir_cf_node_loop:
673
emit_cf_list(c, &nir_cf_node_as_loop(node)->body);
674
break;
675
default:
676
compile_error(c, "Unknown NIR node type\n");
677
break;
678
}
679
}
680
}
681
682
/* based on nir_lower_vec_to_movs */
683
static unsigned
684
insert_vec_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
685
{
686
assert(start_idx < nir_op_infos[vec->op].num_inputs);
687
unsigned write_mask = (1u << start_idx);
688
689
nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
690
nir_alu_src_copy(&mov->src[0], &vec->src[start_idx], mov);
691
692
mov->src[0].swizzle[0] = vec->src[start_idx].swizzle[0];
693
mov->src[0].negate = vec->src[start_idx].negate;
694
mov->src[0].abs = vec->src[start_idx].abs;
695
696
unsigned num_components = 1;
697
698
for (unsigned i = start_idx + 1; i < 4; i++) {
699
if (!(vec->dest.write_mask & (1 << i)))
700
continue;
701
702
if (nir_srcs_equal(vec->src[i].src, vec->src[start_idx].src) &&
703
vec->src[i].negate == vec->src[start_idx].negate &&
704
vec->src[i].abs == vec->src[start_idx].abs) {
705
write_mask |= (1 << i);
706
mov->src[0].swizzle[num_components] = vec->src[i].swizzle[0];
707
num_components++;
708
}
709
}
710
711
mov->dest.write_mask = (1 << num_components) - 1;
712
nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components, 32, NULL);
713
714
/* replace vec srcs with inserted mov */
715
for (unsigned i = 0, j = 0; i < 4; i++) {
716
if (!(write_mask & (1 << i)))
717
continue;
718
719
nir_instr_rewrite_src(&vec->instr, &vec->src[i].src, nir_src_for_ssa(&mov->dest.dest.ssa));
720
vec->src[i].swizzle[0] = j++;
721
}
722
723
nir_instr_insert_before(&vec->instr, &mov->instr);
724
725
return write_mask;
726
}
727
728
/*
729
* for vecN instructions:
730
* -merge constant sources into a single src
731
* -insert movs (nir_lower_vec_to_movs equivalent)
732
* for non-vecN instructions:
733
* -try to merge constants as single constant
734
* -insert movs for multiple constants (pre-HALTI5)
735
*/
736
static void
737
lower_alu(struct etna_compile *c, nir_alu_instr *alu)
738
{
739
const nir_op_info *info = &nir_op_infos[alu->op];
740
741
nir_builder b;
742
nir_builder_init(&b, c->impl);
743
b.cursor = nir_before_instr(&alu->instr);
744
745
switch (alu->op) {
746
case nir_op_vec2:
747
case nir_op_vec3:
748
case nir_op_vec4:
749
break;
750
default:
751
/* pre-GC7000L can only have 1 uniform src per instruction */
752
if (c->specs->halti >= 5)
753
return;
754
755
nir_const_value value[4] = {};
756
uint8_t swizzle[4][4] = {};
757
unsigned swiz_max = 0, num_const = 0;
758
759
for (unsigned i = 0; i < info->num_inputs; i++) {
760
nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
761
if (!cv)
762
continue;
763
764
unsigned num_components = info->input_sizes[i] ?: alu->dest.dest.ssa.num_components;
765
for (unsigned j = 0; j < num_components; j++) {
766
int idx = const_add(&value[0].u64, cv[alu->src[i].swizzle[j]].u64);
767
swizzle[i][j] = idx;
768
swiz_max = MAX2(swiz_max, (unsigned) idx);
769
}
770
num_const++;
771
}
772
773
/* nothing to do */
774
if (num_const <= 1)
775
return;
776
777
/* resolve with single combined const src */
778
if (swiz_max < 4) {
779
nir_ssa_def *def = nir_build_imm(&b, swiz_max + 1, 32, value);
780
781
for (unsigned i = 0; i < info->num_inputs; i++) {
782
nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
783
if (!cv)
784
continue;
785
786
nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(def));
787
788
for (unsigned j = 0; j < 4; j++)
789
alu->src[i].swizzle[j] = swizzle[i][j];
790
}
791
return;
792
}
793
794
/* resolve with movs */
795
num_const = 0;
796
for (unsigned i = 0; i < info->num_inputs; i++) {
797
nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
798
if (!cv)
799
continue;
800
801
num_const++;
802
if (num_const == 1)
803
continue;
804
805
nir_ssa_def *mov = nir_mov(&b, alu->src[i].src.ssa);
806
nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(mov));
807
}
808
return;
809
}
810
811
nir_const_value value[4];
812
unsigned num_components = 0;
813
814
for (unsigned i = 0; i < info->num_inputs; i++) {
815
nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
816
if (cv)
817
value[num_components++] = cv[alu->src[i].swizzle[0]];
818
}
819
820
/* if there is more than one constant source to the vecN, combine them
821
* into a single load_const (removing the vecN completely if all components
822
* are constant)
823
*/
824
if (num_components > 1) {
825
nir_ssa_def *def = nir_build_imm(&b, num_components, 32, value);
826
827
if (num_components == info->num_inputs) {
828
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, def);
829
nir_instr_remove(&alu->instr);
830
return;
831
}
832
833
for (unsigned i = 0, j = 0; i < info->num_inputs; i++) {
834
nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
835
if (!cv)
836
continue;
837
838
nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(def));
839
alu->src[i].swizzle[0] = j++;
840
}
841
}
842
843
unsigned finished_write_mask = 0;
844
for (unsigned i = 0; i < 4; i++) {
845
if (!(alu->dest.write_mask & (1 << i)))
846
continue;
847
848
nir_ssa_def *ssa = alu->src[i].src.ssa;
849
850
/* check that vecN instruction is only user of this */
851
bool need_mov = list_length(&ssa->if_uses) != 0;
852
nir_foreach_use(use_src, ssa) {
853
if (use_src->parent_instr != &alu->instr)
854
need_mov = true;
855
}
856
857
nir_instr *instr = ssa->parent_instr;
858
switch (instr->type) {
859
case nir_instr_type_alu:
860
case nir_instr_type_tex:
861
break;
862
case nir_instr_type_intrinsic:
863
if (nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_input) {
864
need_mov = vec_dest_has_swizzle(alu, &nir_instr_as_intrinsic(instr)->dest.ssa);
865
break;
866
}
867
FALLTHROUGH;
868
default:
869
need_mov = true;
870
}
871
872
if (need_mov && !(finished_write_mask & (1 << i)))
873
finished_write_mask |= insert_vec_mov(alu, i, c->nir);
874
}
875
}
876
877
static bool
878
emit_shader(struct etna_compile *c, unsigned *num_temps, unsigned *num_consts)
879
{
880
nir_shader *shader = c->nir;
881
c->impl = nir_shader_get_entrypoint(shader);
882
883
bool have_indirect_uniform = false;
884
unsigned indirect_max = 0;
885
886
nir_builder b;
887
nir_builder_init(&b, c->impl);
888
889
/* convert non-dynamic uniform loads to constants, etc */
890
nir_foreach_block(block, c->impl) {
891
nir_foreach_instr_safe(instr, block) {
892
switch(instr->type) {
893
case nir_instr_type_alu:
894
/* deals with vecN and const srcs */
895
lower_alu(c, nir_instr_as_alu(instr));
896
break;
897
case nir_instr_type_load_const: {
898
nir_load_const_instr *load_const = nir_instr_as_load_const(instr);
899
for (unsigned i = 0; i < load_const->def.num_components; i++)
900
load_const->value[i] = CONST(load_const->value[i].u32);
901
} break;
902
case nir_instr_type_intrinsic: {
903
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
904
/* TODO: load_ubo can also become a constant in some cases
905
* (at the moment it can end up emitting a LOAD with two
906
* uniform sources, which could be a problem on HALTI2)
907
*/
908
if (intr->intrinsic != nir_intrinsic_load_uniform)
909
break;
910
nir_const_value *off = nir_src_as_const_value(intr->src[0]);
911
if (!off || off[0].u64 >> 32 != ETNA_UNIFORM_CONSTANT) {
912
have_indirect_uniform = true;
913
indirect_max = nir_intrinsic_base(intr) + nir_intrinsic_range(intr);
914
break;
915
}
916
917
unsigned base = nir_intrinsic_base(intr);
918
/* pre halti2 uniform offset will be float */
919
if (c->specs->halti < 2)
920
base += (unsigned) off[0].f32;
921
else
922
base += off[0].u32;
923
nir_const_value value[4];
924
925
for (unsigned i = 0; i < intr->dest.ssa.num_components; i++)
926
value[i] = UNIFORM(base * 4 + i);
927
928
b.cursor = nir_after_instr(instr);
929
nir_ssa_def *def = nir_build_imm(&b, intr->dest.ssa.num_components, 32, value);
930
931
nir_ssa_def_rewrite_uses(&intr->dest.ssa, def);
932
nir_instr_remove(instr);
933
} break;
934
default:
935
break;
936
}
937
}
938
}
939
940
/* TODO: only emit required indirect uniform ranges */
941
if (have_indirect_uniform) {
942
for (unsigned i = 0; i < indirect_max * 4; i++)
943
c->consts[i] = UNIFORM(i).u64;
944
c->const_count = indirect_max;
945
}
946
947
/* add mov for any store output using sysval/const */
948
nir_foreach_block(block, c->impl) {
949
nir_foreach_instr_safe(instr, block) {
950
if (instr->type != nir_instr_type_intrinsic)
951
continue;
952
953
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
954
955
switch (intr->intrinsic) {
956
case nir_intrinsic_store_deref: {
957
nir_src *src = &intr->src[1];
958
if (nir_src_is_const(*src) || is_sysval(src->ssa->parent_instr)) {
959
b.cursor = nir_before_instr(instr);
960
nir_instr_rewrite_src(instr, src, nir_src_for_ssa(nir_mov(&b, src->ssa)));
961
}
962
} break;
963
default:
964
break;
965
}
966
}
967
}
968
969
/* call directly to avoid validation (load_const don't pass validation at this point) */
970
nir_convert_from_ssa(shader, true);
971
nir_opt_dce(shader);
972
973
etna_ra_assign(c, shader);
974
975
emit_cf_list(c, &nir_shader_get_entrypoint(shader)->body);
976
977
*num_temps = etna_ra_finish(c);
978
*num_consts = c->const_count;
979
return true;
980
}
981
982
static bool
983
etna_compile_check_limits(struct etna_shader_variant *v)
984
{
985
const struct etna_specs *specs = v->shader->specs;
986
int max_uniforms = (v->stage == MESA_SHADER_VERTEX)
987
? specs->max_vs_uniforms
988
: specs->max_ps_uniforms;
989
990
if (!specs->has_icache && v->needs_icache) {
991
DBG("Number of instructions (%d) exceeds maximum %d", v->code_size / 4,
992
specs->max_instructions);
993
return false;
994
}
995
996
if (v->num_temps > specs->max_registers) {
997
DBG("Number of registers (%d) exceeds maximum %d", v->num_temps,
998
specs->max_registers);
999
return false;
1000
}
1001
1002
if (v->uniforms.count / 4 > max_uniforms) {
1003
DBG("Number of uniforms (%d) exceeds maximum %d",
1004
v->uniforms.count / 4, max_uniforms);
1005
return false;
1006
}
1007
1008
return true;
1009
}
1010
1011
static void
1012
fill_vs_mystery(struct etna_shader_variant *v)
1013
{
1014
const struct etna_specs *specs = v->shader->specs;
1015
1016
v->input_count_unk8 = DIV_ROUND_UP(v->infile.num_reg + 4, 16); /* XXX what is this */
1017
1018
/* fill in "mystery meat" load balancing value. This value determines how
1019
* work is scheduled between VS and PS
1020
* in the unified shader architecture. More precisely, it is determined from
1021
* the number of VS outputs, as well as chip-specific
1022
* vertex output buffer size, vertex cache size, and the number of shader
1023
* cores.
1024
*
1025
* XXX this is a conservative estimate, the "optimal" value is only known for
1026
* sure at link time because some
1027
* outputs may be unused and thus unmapped. Then again, in the general use
1028
* case with GLSL the vertex and fragment
1029
* shaders are linked already before submitting to Gallium, thus all outputs
1030
* are used.
1031
*
1032
* note: TGSI compiler counts all outputs (including position and pointsize), here
1033
* v->outfile.num_reg only counts varyings, +1 to compensate for the position output
1034
* TODO: might have a problem that we don't count pointsize when it is used
1035
*/
1036
1037
int half_out = v->outfile.num_reg / 2 + 1;
1038
assert(half_out);
1039
1040
uint32_t b = ((20480 / (specs->vertex_output_buffer_size -
1041
2 * half_out * specs->vertex_cache_size)) +
1042
9) /
1043
10;
1044
uint32_t a = (b + 256 / (specs->shader_core_count * half_out)) / 2;
1045
v->vs_load_balancing = VIVS_VS_LOAD_BALANCING_A(MIN2(a, 255)) |
1046
VIVS_VS_LOAD_BALANCING_B(MIN2(b, 255)) |
1047
VIVS_VS_LOAD_BALANCING_C(0x3f) |
1048
VIVS_VS_LOAD_BALANCING_D(0x0f);
1049
}
1050
1051
bool
1052
etna_compile_shader_nir(struct etna_shader_variant *v)
1053
{
1054
if (unlikely(!v))
1055
return false;
1056
1057
struct etna_compile *c = CALLOC_STRUCT(etna_compile);
1058
if (!c)
1059
return false;
1060
1061
c->variant = v;
1062
c->specs = v->shader->specs;
1063
c->nir = nir_shader_clone(NULL, v->shader->nir);
1064
1065
nir_shader *s = c->nir;
1066
const struct etna_specs *specs = c->specs;
1067
1068
v->stage = s->info.stage;
1069
v->uses_discard = s->info.fs.uses_discard;
1070
v->num_loops = 0; /* TODO */
1071
v->vs_id_in_reg = -1;
1072
v->vs_pos_out_reg = -1;
1073
v->vs_pointsize_out_reg = -1;
1074
v->ps_color_out_reg = 0; /* 0 for shader that doesn't write fragcolor.. */
1075
v->ps_depth_out_reg = -1;
1076
1077
/*
1078
* Lower glTexCoord, fixes e.g. neverball point sprite (exit cylinder stars)
1079
* and gl4es pointsprite.trace apitrace
1080
*/
1081
if (s->info.stage == MESA_SHADER_FRAGMENT && v->key.sprite_coord_enable) {
1082
NIR_PASS_V(s, nir_lower_texcoord_replace, v->key.sprite_coord_enable,
1083
false, v->key.sprite_coord_yinvert);
1084
}
1085
1086
/* setup input linking */
1087
struct etna_shader_io_file *sf = &v->infile;
1088
if (s->info.stage == MESA_SHADER_VERTEX) {
1089
nir_foreach_shader_in_variable(var, s) {
1090
unsigned idx = var->data.driver_location;
1091
sf->reg[idx].reg = idx;
1092
sf->reg[idx].slot = var->data.location;
1093
sf->reg[idx].num_components = glsl_get_components(var->type);
1094
sf->num_reg = MAX2(sf->num_reg, idx+1);
1095
}
1096
} else {
1097
unsigned count = 0;
1098
nir_foreach_shader_in_variable(var, s) {
1099
unsigned idx = var->data.driver_location;
1100
sf->reg[idx].reg = idx + 1;
1101
sf->reg[idx].slot = var->data.location;
1102
sf->reg[idx].num_components = glsl_get_components(var->type);
1103
sf->num_reg = MAX2(sf->num_reg, idx+1);
1104
count++;
1105
}
1106
assert(sf->num_reg == count);
1107
}
1108
1109
NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_uniform, etna_glsl_type_size,
1110
(nir_lower_io_options)0);
1111
1112
NIR_PASS_V(s, nir_lower_regs_to_ssa);
1113
NIR_PASS_V(s, nir_lower_vars_to_ssa);
1114
NIR_PASS_V(s, nir_lower_indirect_derefs, nir_var_all, UINT32_MAX);
1115
NIR_PASS_V(s, nir_lower_tex, &(struct nir_lower_tex_options) { .lower_txp = ~0u });
1116
NIR_PASS_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
1117
nir_lower_idiv_options idiv_options = {
1118
.imprecise_32bit_lowering = true,
1119
.allow_fp16 = true,
1120
};
1121
NIR_PASS_V(s, nir_lower_idiv, &idiv_options);
1122
1123
etna_optimize_loop(s);
1124
1125
/* TODO: remove this extra run if nir_opt_peephole_select is able to handle ubo's. */
1126
if (OPT(s, etna_nir_lower_ubo_to_uniform))
1127
etna_optimize_loop(s);
1128
1129
NIR_PASS_V(s, etna_lower_io, v);
1130
1131
if (v->shader->specs->vs_need_z_div)
1132
NIR_PASS_V(s, nir_lower_clip_halfz);
1133
1134
/* lower pre-halti2 to float (halti0 has integers, but only scalar..) */
1135
if (c->specs->halti < 2) {
1136
/* use opt_algebraic between int_to_float and boot_to_float because
1137
* int_to_float emits ftrunc, and ftrunc lowering generates bool ops
1138
*/
1139
NIR_PASS_V(s, nir_lower_int_to_float);
1140
NIR_PASS_V(s, nir_opt_algebraic);
1141
NIR_PASS_V(s, nir_lower_bool_to_float);
1142
} else {
1143
NIR_PASS_V(s, nir_lower_bool_to_int32);
1144
}
1145
1146
while( OPT(s, nir_opt_vectorize, NULL, NULL) );
1147
NIR_PASS_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
1148
1149
NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
1150
NIR_PASS_V(s, nir_opt_algebraic_late);
1151
1152
NIR_PASS_V(s, nir_move_vec_src_uses_to_dest);
1153
NIR_PASS_V(s, nir_copy_prop);
1154
/* only HW supported integer source mod is ineg for iadd instruction (?) */
1155
NIR_PASS_V(s, nir_lower_to_source_mods, ~nir_lower_int_source_mods);
1156
/* need copy prop after uses_to_dest, and before src mods: see
1157
* dEQP-GLES2.functional.shaders.random.all_features.fragment.95
1158
*/
1159
1160
NIR_PASS_V(s, nir_opt_dce);
1161
1162
NIR_PASS_V(s, nir_lower_bool_to_bitsize);
1163
NIR_PASS_V(s, etna_lower_alu, c->specs->has_new_transcendentals);
1164
1165
if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
1166
nir_print_shader(s, stdout);
1167
1168
unsigned block_ptr[nir_shader_get_entrypoint(s)->num_blocks];
1169
c->block_ptr = block_ptr;
1170
1171
unsigned num_consts;
1172
ASSERTED bool ok = emit_shader(c, &v->num_temps, &num_consts);
1173
assert(ok);
1174
1175
/* empty shader, emit NOP */
1176
if (!c->inst_ptr)
1177
emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_NOP });
1178
1179
/* assemble instructions, fixing up labels */
1180
uint32_t *code = MALLOC(c->inst_ptr * 16);
1181
for (unsigned i = 0; i < c->inst_ptr; i++) {
1182
struct etna_inst *inst = &c->code[i];
1183
if (inst->opcode == INST_OPCODE_BRANCH)
1184
inst->imm = block_ptr[inst->imm];
1185
1186
inst->halti5 = specs->halti >= 5;
1187
etna_assemble(&code[i * 4], inst);
1188
}
1189
1190
v->code_size = c->inst_ptr * 4;
1191
v->code = code;
1192
v->needs_icache = c->inst_ptr > specs->max_instructions;
1193
1194
copy_uniform_state_to_shader(v, c->consts, num_consts);
1195
1196
if (s->info.stage == MESA_SHADER_FRAGMENT) {
1197
v->input_count_unk8 = 31; /* XXX what is this */
1198
assert(v->ps_depth_out_reg <= 0);
1199
} else {
1200
fill_vs_mystery(v);
1201
}
1202
1203
bool result = etna_compile_check_limits(v);
1204
ralloc_free(c->nir);
1205
FREE(c);
1206
return result;
1207
}
1208
1209
static const struct etna_shader_inout *
1210
etna_shader_vs_lookup(const struct etna_shader_variant *sobj,
1211
const struct etna_shader_inout *in)
1212
{
1213
for (int i = 0; i < sobj->outfile.num_reg; i++)
1214
if (sobj->outfile.reg[i].slot == in->slot)
1215
return &sobj->outfile.reg[i];
1216
1217
return NULL;
1218
}
1219
1220
bool
1221
etna_link_shader_nir(struct etna_shader_link_info *info,
1222
const struct etna_shader_variant *vs,
1223
const struct etna_shader_variant *fs)
1224
{
1225
int comp_ofs = 0;
1226
/* For each fragment input we need to find the associated vertex shader
1227
* output, which can be found by matching on semantic name and index. A
1228
* binary search could be used because the vs outputs are sorted by their
1229
* semantic index and grouped by semantic type by fill_in_vs_outputs.
1230
*/
1231
assert(fs->infile.num_reg < ETNA_NUM_INPUTS);
1232
info->pcoord_varying_comp_ofs = -1;
1233
1234
for (int idx = 0; idx < fs->infile.num_reg; ++idx) {
1235
const struct etna_shader_inout *fsio = &fs->infile.reg[idx];
1236
const struct etna_shader_inout *vsio = etna_shader_vs_lookup(vs, fsio);
1237
struct etna_varying *varying;
1238
bool interpolate_always = true;
1239
1240
assert(fsio->reg > 0 && fsio->reg <= ARRAY_SIZE(info->varyings));
1241
1242
if (fsio->reg > info->num_varyings)
1243
info->num_varyings = fsio->reg;
1244
1245
varying = &info->varyings[fsio->reg - 1];
1246
varying->num_components = fsio->num_components;
1247
1248
if (!interpolate_always) /* colors affected by flat shading */
1249
varying->pa_attributes = 0x200;
1250
else /* texture coord or other bypasses flat shading */
1251
varying->pa_attributes = 0x2f1;
1252
1253
varying->use[0] = VARYING_COMPONENT_USE_UNUSED;
1254
varying->use[1] = VARYING_COMPONENT_USE_UNUSED;
1255
varying->use[2] = VARYING_COMPONENT_USE_UNUSED;
1256
varying->use[3] = VARYING_COMPONENT_USE_UNUSED;
1257
1258
/* point/tex coord is an input to the PS without matching VS output,
1259
* so it gets a varying slot without being assigned a VS register.
1260
*/
1261
if (fsio->slot == VARYING_SLOT_PNTC) {
1262
varying->use[0] = VARYING_COMPONENT_USE_POINTCOORD_X;
1263
varying->use[1] = VARYING_COMPONENT_USE_POINTCOORD_Y;
1264
1265
info->pcoord_varying_comp_ofs = comp_ofs;
1266
} else if (util_varying_is_point_coord(fsio->slot, fs->key.sprite_coord_enable)) {
1267
/*
1268
* Do nothing, TexCoord is lowered to PointCoord above
1269
* and the TexCoord here is just a remnant. This needs
1270
* to be removed with some nir_remove_dead_variables(),
1271
* but that one removes all FS inputs ... why?
1272
*/
1273
} else {
1274
if (vsio == NULL) { /* not found -- link error */
1275
BUG("Semantic value not found in vertex shader outputs\n");
1276
return true;
1277
}
1278
varying->reg = vsio->reg;
1279
}
1280
1281
comp_ofs += varying->num_components;
1282
}
1283
1284
assert(info->num_varyings == fs->infile.num_reg);
1285
1286
return false;
1287
}
1288
1289