Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/panfrost/midgard/midgard_compile.c
4564 views
1
/*
2
* Copyright (C) 2018-2019 Alyssa Rosenzweig <[email protected]>
3
* Copyright (C) 2019-2020 Collabora, Ltd.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
* SOFTWARE.
23
*/
24
25
#include <sys/types.h>
26
#include <sys/stat.h>
27
#include <sys/mman.h>
28
#include <fcntl.h>
29
#include <stdint.h>
30
#include <stdlib.h>
31
#include <stdio.h>
32
#include <err.h>
33
34
#include "main/mtypes.h"
35
#include "compiler/glsl/glsl_to_nir.h"
36
#include "compiler/nir_types.h"
37
#include "compiler/nir/nir_builder.h"
38
#include "util/half_float.h"
39
#include "util/u_math.h"
40
#include "util/u_debug.h"
41
#include "util/u_dynarray.h"
42
#include "util/list.h"
43
#include "main/mtypes.h"
44
45
#include "midgard.h"
46
#include "midgard_nir.h"
47
#include "midgard_compile.h"
48
#include "midgard_ops.h"
49
#include "helpers.h"
50
#include "compiler.h"
51
#include "midgard_quirks.h"
52
#include "panfrost-quirks.h"
53
#include "panfrost/util/pan_lower_framebuffer.h"
54
55
#include "disassemble.h"
56
57
static const struct debug_named_value midgard_debug_options[] = {
58
{"msgs", MIDGARD_DBG_MSGS, "Print debug messages"},
59
{"shaders", MIDGARD_DBG_SHADERS, "Dump shaders in NIR and MIR"},
60
{"shaderdb", MIDGARD_DBG_SHADERDB, "Prints shader-db statistics"},
61
{"inorder", MIDGARD_DBG_INORDER, "Disables out-of-order scheduling"},
62
{"verbose", MIDGARD_DBG_VERBOSE, "Dump shaders verbosely"},
63
{"internal", MIDGARD_DBG_INTERNAL, "Dump internal shaders"},
64
DEBUG_NAMED_VALUE_END
65
};
66
67
DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", midgard_debug_options, 0)
68
69
int midgard_debug = 0;
70
71
#define DBG(fmt, ...) \
72
do { if (midgard_debug & MIDGARD_DBG_MSGS) \
73
fprintf(stderr, "%s:%d: "fmt, \
74
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
75
static midgard_block *
76
create_empty_block(compiler_context *ctx)
77
{
78
midgard_block *blk = rzalloc(ctx, midgard_block);
79
80
blk->base.predecessors = _mesa_set_create(blk,
81
_mesa_hash_pointer,
82
_mesa_key_pointer_equal);
83
84
blk->base.name = ctx->block_source_count++;
85
86
return blk;
87
}
88
89
static void
90
schedule_barrier(compiler_context *ctx)
91
{
92
midgard_block *temp = ctx->after_block;
93
ctx->after_block = create_empty_block(ctx);
94
ctx->block_count++;
95
list_addtail(&ctx->after_block->base.link, &ctx->blocks);
96
list_inithead(&ctx->after_block->base.instructions);
97
pan_block_add_successor(&ctx->current_block->base, &ctx->after_block->base);
98
ctx->current_block = ctx->after_block;
99
ctx->after_block = temp;
100
}
101
102
/* Helpers to generate midgard_instruction's using macro magic, since every
103
* driver seems to do it that way */
104
105
#define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
106
107
#define M_LOAD_STORE(name, store, T) \
108
static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
109
midgard_instruction i = { \
110
.type = TAG_LOAD_STORE_4, \
111
.mask = 0xF, \
112
.dest = ~0, \
113
.src = { ~0, ~0, ~0, ~0 }, \
114
.swizzle = SWIZZLE_IDENTITY_4, \
115
.op = midgard_op_##name, \
116
.load_store = { \
117
.signed_offset = address \
118
} \
119
}; \
120
\
121
if (store) { \
122
i.src[0] = ssa; \
123
i.src_types[0] = T; \
124
i.dest_type = T; \
125
} else { \
126
i.dest = ssa; \
127
i.dest_type = T; \
128
} \
129
return i; \
130
}
131
132
#define M_LOAD(name, T) M_LOAD_STORE(name, false, T)
133
#define M_STORE(name, T) M_LOAD_STORE(name, true, T)
134
135
M_LOAD(ld_attr_32, nir_type_uint32);
136
M_LOAD(ld_vary_32, nir_type_uint32);
137
M_LOAD(ld_ubo_32, nir_type_uint32);
138
M_LOAD(ld_ubo_64, nir_type_uint32);
139
M_LOAD(ld_ubo_128, nir_type_uint32);
140
M_LOAD(ld_32, nir_type_uint32);
141
M_LOAD(ld_64, nir_type_uint32);
142
M_LOAD(ld_128, nir_type_uint32);
143
M_STORE(st_32, nir_type_uint32);
144
M_STORE(st_64, nir_type_uint32);
145
M_STORE(st_128, nir_type_uint32);
146
M_LOAD(ld_tilebuffer_raw, nir_type_uint32);
147
M_LOAD(ld_tilebuffer_16f, nir_type_float16);
148
M_LOAD(ld_tilebuffer_32f, nir_type_float32);
149
M_STORE(st_vary_32, nir_type_uint32);
150
M_LOAD(ld_cubemap_coords, nir_type_uint32);
151
M_LOAD(ldst_mov, nir_type_uint32);
152
M_LOAD(ld_image_32f, nir_type_float32);
153
M_LOAD(ld_image_16f, nir_type_float16);
154
M_LOAD(ld_image_32u, nir_type_uint32);
155
M_LOAD(ld_image_32i, nir_type_int32);
156
M_STORE(st_image_32f, nir_type_float32);
157
M_STORE(st_image_16f, nir_type_float16);
158
M_STORE(st_image_32u, nir_type_uint32);
159
M_STORE(st_image_32i, nir_type_int32);
160
M_LOAD(lea_image, nir_type_uint64);
161
162
#define M_IMAGE(op) \
163
static midgard_instruction \
164
op ## _image(nir_alu_type type, unsigned val, unsigned address) \
165
{ \
166
switch (type) { \
167
case nir_type_float32: \
168
return m_ ## op ## _image_32f(val, address); \
169
case nir_type_float16: \
170
return m_ ## op ## _image_16f(val, address); \
171
case nir_type_uint32: \
172
return m_ ## op ## _image_32u(val, address); \
173
case nir_type_int32: \
174
return m_ ## op ## _image_32i(val, address); \
175
default: \
176
unreachable("Invalid image type"); \
177
} \
178
}
179
180
M_IMAGE(ld);
181
M_IMAGE(st);
182
183
static midgard_instruction
184
v_branch(bool conditional, bool invert)
185
{
186
midgard_instruction ins = {
187
.type = TAG_ALU_4,
188
.unit = ALU_ENAB_BRANCH,
189
.compact_branch = true,
190
.branch = {
191
.conditional = conditional,
192
.invert_conditional = invert
193
},
194
.dest = ~0,
195
.src = { ~0, ~0, ~0, ~0 },
196
};
197
198
return ins;
199
}
200
201
static void
202
attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
203
{
204
ins->has_constants = true;
205
memcpy(&ins->constants, constants, 16);
206
}
207
208
static int
209
glsl_type_size(const struct glsl_type *type, bool bindless)
210
{
211
return glsl_count_attribute_slots(type, false);
212
}
213
214
/* Lower fdot2 to a vector multiplication followed by channel addition */
215
static bool
216
midgard_nir_lower_fdot2_instr(nir_builder *b, nir_instr *instr, void *data)
217
{
218
if (instr->type != nir_instr_type_alu)
219
return false;
220
221
nir_alu_instr *alu = nir_instr_as_alu(instr);
222
if (alu->op != nir_op_fdot2)
223
return false;
224
225
b->cursor = nir_before_instr(&alu->instr);
226
227
nir_ssa_def *src0 = nir_ssa_for_alu_src(b, alu, 0);
228
nir_ssa_def *src1 = nir_ssa_for_alu_src(b, alu, 1);
229
230
nir_ssa_def *product = nir_fmul(b, src0, src1);
231
232
nir_ssa_def *sum = nir_fadd(b,
233
nir_channel(b, product, 0),
234
nir_channel(b, product, 1));
235
236
/* Replace the fdot2 with this sum */
237
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, sum);
238
239
return true;
240
}
241
242
static bool
243
midgard_nir_lower_fdot2(nir_shader *shader)
244
{
245
return nir_shader_instructions_pass(shader,
246
midgard_nir_lower_fdot2_instr,
247
nir_metadata_block_index | nir_metadata_dominance,
248
NULL);
249
}
250
251
static bool
252
mdg_is_64(const nir_instr *instr, const void *_unused)
253
{
254
const nir_alu_instr *alu = nir_instr_as_alu(instr);
255
256
if (nir_dest_bit_size(alu->dest.dest) == 64)
257
return true;
258
259
switch (alu->op) {
260
case nir_op_umul_high:
261
case nir_op_imul_high:
262
return true;
263
default:
264
return false;
265
}
266
}
267
268
/* Only vectorize int64 up to vec2 */
269
static bool
270
midgard_vectorize_filter(const nir_instr *instr, void *data)
271
{
272
if (instr->type != nir_instr_type_alu)
273
return true;
274
275
const nir_alu_instr *alu = nir_instr_as_alu(instr);
276
277
unsigned num_components = alu->dest.dest.ssa.num_components;
278
279
int src_bit_size = nir_src_bit_size(alu->src[0].src);
280
int dst_bit_size = nir_dest_bit_size(alu->dest.dest);
281
282
if (src_bit_size == 64 || dst_bit_size == 64) {
283
if (num_components > 1)
284
return false;
285
}
286
287
return true;
288
}
289
290
291
/* Flushes undefined values to zero */
292
293
static void
294
optimise_nir(nir_shader *nir, unsigned quirks, bool is_blend)
295
{
296
bool progress;
297
unsigned lower_flrp =
298
(nir->options->lower_flrp16 ? 16 : 0) |
299
(nir->options->lower_flrp32 ? 32 : 0) |
300
(nir->options->lower_flrp64 ? 64 : 0);
301
302
NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
303
nir_lower_idiv_options idiv_options = {
304
.imprecise_32bit_lowering = true,
305
.allow_fp16 = true,
306
};
307
NIR_PASS(progress, nir, nir_lower_idiv, &idiv_options);
308
309
nir_lower_tex_options lower_tex_options = {
310
.lower_txs_lod = true,
311
.lower_txp = ~0,
312
.lower_tg4_broadcom_swizzle = true,
313
/* TODO: we have native gradient.. */
314
.lower_txd = true,
315
};
316
317
NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
318
319
/* Must lower fdot2 after tex is lowered */
320
NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
321
322
/* T720 is broken. */
323
324
if (quirks & MIDGARD_BROKEN_LOD)
325
NIR_PASS_V(nir, midgard_nir_lod_errata);
326
327
/* Midgard image ops coordinates are 16-bit instead of 32-bit */
328
NIR_PASS(progress, nir, midgard_nir_lower_image_bitsize);
329
NIR_PASS(progress, nir, midgard_nir_lower_helper_writes);
330
NIR_PASS(progress, nir, pan_lower_helper_invocation);
331
NIR_PASS(progress, nir, pan_lower_sample_pos);
332
333
NIR_PASS(progress, nir, midgard_nir_lower_algebraic_early);
334
335
do {
336
progress = false;
337
338
NIR_PASS(progress, nir, nir_lower_var_copies);
339
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
340
341
NIR_PASS(progress, nir, nir_copy_prop);
342
NIR_PASS(progress, nir, nir_opt_remove_phis);
343
NIR_PASS(progress, nir, nir_opt_dce);
344
NIR_PASS(progress, nir, nir_opt_dead_cf);
345
NIR_PASS(progress, nir, nir_opt_cse);
346
NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
347
NIR_PASS(progress, nir, nir_opt_algebraic);
348
NIR_PASS(progress, nir, nir_opt_constant_folding);
349
350
if (lower_flrp != 0) {
351
bool lower_flrp_progress = false;
352
NIR_PASS(lower_flrp_progress,
353
nir,
354
nir_lower_flrp,
355
lower_flrp,
356
false /* always_precise */);
357
if (lower_flrp_progress) {
358
NIR_PASS(progress, nir,
359
nir_opt_constant_folding);
360
progress = true;
361
}
362
363
/* Nothing should rematerialize any flrps, so we only
364
* need to do this lowering once.
365
*/
366
lower_flrp = 0;
367
}
368
369
NIR_PASS(progress, nir, nir_opt_undef);
370
NIR_PASS(progress, nir, nir_lower_undef_to_zero);
371
372
NIR_PASS(progress, nir, nir_opt_loop_unroll,
373
nir_var_shader_in |
374
nir_var_shader_out |
375
nir_var_function_temp);
376
377
NIR_PASS(progress, nir, nir_opt_vectorize,
378
midgard_vectorize_filter, NULL);
379
} while (progress);
380
381
NIR_PASS_V(nir, nir_lower_alu_to_scalar, mdg_is_64, NULL);
382
383
/* Run after opts so it can hit more */
384
if (!is_blend)
385
NIR_PASS(progress, nir, nir_fuse_io_16);
386
387
/* Must be run at the end to prevent creation of fsin/fcos ops */
388
NIR_PASS(progress, nir, midgard_nir_scale_trig);
389
390
do {
391
progress = false;
392
393
NIR_PASS(progress, nir, nir_opt_dce);
394
NIR_PASS(progress, nir, nir_opt_algebraic);
395
NIR_PASS(progress, nir, nir_opt_constant_folding);
396
NIR_PASS(progress, nir, nir_copy_prop);
397
} while (progress);
398
399
NIR_PASS(progress, nir, nir_opt_algebraic_late);
400
NIR_PASS(progress, nir, nir_opt_algebraic_distribute_src_mods);
401
402
/* We implement booleans as 32-bit 0/~0 */
403
NIR_PASS(progress, nir, nir_lower_bool_to_int32);
404
405
/* Now that booleans are lowered, we can run out late opts */
406
NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
407
NIR_PASS(progress, nir, midgard_nir_cancel_inot);
408
409
NIR_PASS(progress, nir, nir_copy_prop);
410
NIR_PASS(progress, nir, nir_opt_dce);
411
412
/* Backend scheduler is purely local, so do some global optimizations
413
* to reduce register pressure. */
414
nir_move_options move_all =
415
nir_move_const_undef | nir_move_load_ubo | nir_move_load_input |
416
nir_move_comparisons | nir_move_copies | nir_move_load_ssbo;
417
418
NIR_PASS_V(nir, nir_opt_sink, move_all);
419
NIR_PASS_V(nir, nir_opt_move, move_all);
420
421
/* Take us out of SSA */
422
NIR_PASS(progress, nir, nir_lower_locals_to_regs);
423
NIR_PASS(progress, nir, nir_convert_from_ssa, true);
424
425
/* We are a vector architecture; write combine where possible */
426
NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
427
NIR_PASS(progress, nir, nir_lower_vec_to_movs, NULL, NULL);
428
429
NIR_PASS(progress, nir, nir_opt_dce);
430
}
431
432
/* Do not actually emit a load; instead, cache the constant for inlining */
433
434
static void
435
emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
436
{
437
nir_ssa_def def = instr->def;
438
439
midgard_constants *consts = rzalloc(ctx, midgard_constants);
440
441
assert(instr->def.num_components * instr->def.bit_size <= sizeof(*consts) * 8);
442
443
#define RAW_CONST_COPY(bits) \
444
nir_const_value_to_array(consts->u##bits, instr->value, \
445
instr->def.num_components, u##bits)
446
447
switch (instr->def.bit_size) {
448
case 64:
449
RAW_CONST_COPY(64);
450
break;
451
case 32:
452
RAW_CONST_COPY(32);
453
break;
454
case 16:
455
RAW_CONST_COPY(16);
456
break;
457
case 8:
458
RAW_CONST_COPY(8);
459
break;
460
default:
461
unreachable("Invalid bit_size for load_const instruction\n");
462
}
463
464
/* Shifted for SSA, +1 for off-by-one */
465
_mesa_hash_table_u64_insert(ctx->ssa_constants, (def.index << 1) + 1, consts);
466
}
467
468
/* Normally constants are embedded implicitly, but for I/O and such we have to
469
* explicitly emit a move with the constant source */
470
471
static void
472
emit_explicit_constant(compiler_context *ctx, unsigned node, unsigned to)
473
{
474
void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, node + 1);
475
476
if (constant_value) {
477
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), to);
478
attach_constants(ctx, &ins, constant_value, node + 1);
479
emit_mir_instruction(ctx, ins);
480
}
481
}
482
483
static bool
484
nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
485
{
486
unsigned comp = src->swizzle[0];
487
488
for (unsigned c = 1; c < nr_components; ++c) {
489
if (src->swizzle[c] != comp)
490
return true;
491
}
492
493
return false;
494
}
495
496
#define ATOMIC_CASE_IMPL(ctx, instr, nir, op, is_shared) \
497
case nir_intrinsic_##nir: \
498
emit_atomic(ctx, instr, is_shared, midgard_op_##op, ~0); \
499
break;
500
501
#define ATOMIC_CASE(ctx, instr, nir, op) \
502
ATOMIC_CASE_IMPL(ctx, instr, shared_atomic_##nir, atomic_##op, true); \
503
ATOMIC_CASE_IMPL(ctx, instr, global_atomic_##nir, atomic_##op, false);
504
505
#define IMAGE_ATOMIC_CASE(ctx, instr, nir, op) \
506
case nir_intrinsic_image_atomic_##nir: { \
507
midgard_instruction ins = emit_image_op(ctx, instr, true); \
508
emit_atomic(ctx, instr, false, midgard_op_atomic_##op, ins.dest); \
509
break; \
510
}
511
512
#define ALU_CASE(nir, _op) \
513
case nir_op_##nir: \
514
op = midgard_alu_op_##_op; \
515
assert(src_bitsize == dst_bitsize); \
516
break;
517
518
#define ALU_CASE_RTZ(nir, _op) \
519
case nir_op_##nir: \
520
op = midgard_alu_op_##_op; \
521
roundmode = MIDGARD_RTZ; \
522
break;
523
524
#define ALU_CHECK_CMP() \
525
assert(src_bitsize == 16 || src_bitsize == 32 || src_bitsize == 64); \
526
assert(dst_bitsize == 16 || dst_bitsize == 32); \
527
528
#define ALU_CASE_BCAST(nir, _op, count) \
529
case nir_op_##nir: \
530
op = midgard_alu_op_##_op; \
531
broadcast_swizzle = count; \
532
ALU_CHECK_CMP(); \
533
break;
534
535
#define ALU_CASE_CMP(nir, _op) \
536
case nir_op_##nir: \
537
op = midgard_alu_op_##_op; \
538
ALU_CHECK_CMP(); \
539
break;
540
541
/* Compare mir_lower_invert */
542
static bool
543
nir_accepts_inot(nir_op op, unsigned src)
544
{
545
switch (op) {
546
case nir_op_ior:
547
case nir_op_iand: /* TODO: b2f16 */
548
case nir_op_ixor:
549
return true;
550
case nir_op_b32csel:
551
/* Only the condition */
552
return (src == 0);
553
default:
554
return false;
555
}
556
}
557
558
static bool
559
mir_accept_dest_mod(compiler_context *ctx, nir_dest **dest, nir_op op)
560
{
561
if (pan_has_dest_mod(dest, op)) {
562
assert((*dest)->is_ssa);
563
BITSET_SET(ctx->already_emitted, (*dest)->ssa.index);
564
return true;
565
}
566
567
return false;
568
}
569
570
/* Look for floating point mods. We have the mods clamp_m1_1, clamp_0_1,
571
* and clamp_0_inf. We also have the relations (note 3 * 2 = 6 cases):
572
*
573
* clamp_0_1(clamp_0_inf(x)) = clamp_m1_1(x)
574
* clamp_0_1(clamp_m1_1(x)) = clamp_m1_1(x)
575
* clamp_0_inf(clamp_0_1(x)) = clamp_m1_1(x)
576
* clamp_0_inf(clamp_m1_1(x)) = clamp_m1_1(x)
577
* clamp_m1_1(clamp_0_1(x)) = clamp_m1_1(x)
578
* clamp_m1_1(clamp_0_inf(x)) = clamp_m1_1(x)
579
*
580
* So by cases any composition of output modifiers is equivalent to
581
* clamp_m1_1 alone.
582
*/
583
static unsigned
584
mir_determine_float_outmod(compiler_context *ctx, nir_dest **dest, unsigned prior_outmod)
585
{
586
bool clamp_0_inf = mir_accept_dest_mod(ctx, dest, nir_op_fclamp_pos_mali);
587
bool clamp_0_1 = mir_accept_dest_mod(ctx, dest, nir_op_fsat);
588
bool clamp_m1_1 = mir_accept_dest_mod(ctx, dest, nir_op_fsat_signed_mali);
589
bool prior = (prior_outmod != midgard_outmod_none);
590
int count = (int) prior + (int) clamp_0_inf + (int) clamp_0_1 + (int) clamp_m1_1;
591
592
return ((count > 1) || clamp_0_1) ? midgard_outmod_clamp_0_1 :
593
clamp_0_inf ? midgard_outmod_clamp_0_inf :
594
clamp_m1_1 ? midgard_outmod_clamp_m1_1 :
595
prior_outmod;
596
}
597
598
static void
599
mir_copy_src(midgard_instruction *ins, nir_alu_instr *instr, unsigned i, unsigned to, bool *abs, bool *neg, bool *not, enum midgard_roundmode *roundmode, bool is_int, unsigned bcast_count)
600
{
601
nir_alu_src src = instr->src[i];
602
603
if (!is_int) {
604
if (pan_has_source_mod(&src, nir_op_fneg))
605
*neg = !(*neg);
606
607
if (pan_has_source_mod(&src, nir_op_fabs))
608
*abs = true;
609
}
610
611
if (nir_accepts_inot(instr->op, i) && pan_has_source_mod(&src, nir_op_inot))
612
*not = true;
613
614
if (roundmode) {
615
if (pan_has_source_mod(&src, nir_op_fround_even))
616
*roundmode = MIDGARD_RTE;
617
618
if (pan_has_source_mod(&src, nir_op_ftrunc))
619
*roundmode = MIDGARD_RTZ;
620
621
if (pan_has_source_mod(&src, nir_op_ffloor))
622
*roundmode = MIDGARD_RTN;
623
624
if (pan_has_source_mod(&src, nir_op_fceil))
625
*roundmode = MIDGARD_RTP;
626
}
627
628
unsigned bits = nir_src_bit_size(src.src);
629
630
ins->src[to] = nir_src_index(NULL, &src.src);
631
ins->src_types[to] = nir_op_infos[instr->op].input_types[i] | bits;
632
633
for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; ++c) {
634
ins->swizzle[to][c] = src.swizzle[
635
(!bcast_count || c < bcast_count) ? c :
636
(bcast_count - 1)];
637
}
638
}
639
640
/* Midgard features both fcsel and icsel, depending on whether you want int or
641
* float modifiers. NIR's csel is typeless, so we want a heuristic to guess if
642
* we should emit an int or float csel depending on what modifiers could be
643
* placed. In the absense of modifiers, this is probably arbitrary. */
644
645
static bool
646
mir_is_bcsel_float(nir_alu_instr *instr)
647
{
648
nir_op intmods[] = {
649
nir_op_i2i8, nir_op_i2i16,
650
nir_op_i2i32, nir_op_i2i64
651
};
652
653
nir_op floatmods[] = {
654
nir_op_fabs, nir_op_fneg,
655
nir_op_f2f16, nir_op_f2f32,
656
nir_op_f2f64
657
};
658
659
nir_op floatdestmods[] = {
660
nir_op_fsat, nir_op_fsat_signed_mali, nir_op_fclamp_pos_mali,
661
nir_op_f2f16, nir_op_f2f32
662
};
663
664
signed score = 0;
665
666
for (unsigned i = 1; i < 3; ++i) {
667
nir_alu_src s = instr->src[i];
668
for (unsigned q = 0; q < ARRAY_SIZE(intmods); ++q) {
669
if (pan_has_source_mod(&s, intmods[q]))
670
score--;
671
}
672
}
673
674
for (unsigned i = 1; i < 3; ++i) {
675
nir_alu_src s = instr->src[i];
676
for (unsigned q = 0; q < ARRAY_SIZE(floatmods); ++q) {
677
if (pan_has_source_mod(&s, floatmods[q]))
678
score++;
679
}
680
}
681
682
for (unsigned q = 0; q < ARRAY_SIZE(floatdestmods); ++q) {
683
nir_dest *dest = &instr->dest.dest;
684
if (pan_has_dest_mod(&dest, floatdestmods[q]))
685
score++;
686
}
687
688
return (score > 0);
689
}
690
691
static void
692
emit_alu(compiler_context *ctx, nir_alu_instr *instr)
693
{
694
nir_dest *dest = &instr->dest.dest;
695
696
if (dest->is_ssa && BITSET_TEST(ctx->already_emitted, dest->ssa.index))
697
return;
698
699
/* Derivatives end up emitted on the texture pipe, not the ALUs. This
700
* is handled elsewhere */
701
702
if (instr->op == nir_op_fddx || instr->op == nir_op_fddy) {
703
midgard_emit_derivatives(ctx, instr);
704
return;
705
}
706
707
bool is_ssa = dest->is_ssa;
708
709
unsigned nr_components = nir_dest_num_components(*dest);
710
unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
711
unsigned op = 0;
712
713
/* Number of components valid to check for the instruction (the rest
714
* will be forced to the last), or 0 to use as-is. Relevant as
715
* ball-type instructions have a channel count in NIR but are all vec4
716
* in Midgard */
717
718
unsigned broadcast_swizzle = 0;
719
720
/* Should we swap arguments? */
721
bool flip_src12 = false;
722
723
ASSERTED unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
724
ASSERTED unsigned dst_bitsize = nir_dest_bit_size(*dest);
725
726
enum midgard_roundmode roundmode = MIDGARD_RTE;
727
728
switch (instr->op) {
729
ALU_CASE(fadd, fadd);
730
ALU_CASE(fmul, fmul);
731
ALU_CASE(fmin, fmin);
732
ALU_CASE(fmax, fmax);
733
ALU_CASE(imin, imin);
734
ALU_CASE(imax, imax);
735
ALU_CASE(umin, umin);
736
ALU_CASE(umax, umax);
737
ALU_CASE(ffloor, ffloor);
738
ALU_CASE(fround_even, froundeven);
739
ALU_CASE(ftrunc, ftrunc);
740
ALU_CASE(fceil, fceil);
741
ALU_CASE(fdot3, fdot3);
742
ALU_CASE(fdot4, fdot4);
743
ALU_CASE(iadd, iadd);
744
ALU_CASE(isub, isub);
745
ALU_CASE(iadd_sat, iaddsat);
746
ALU_CASE(isub_sat, isubsat);
747
ALU_CASE(uadd_sat, uaddsat);
748
ALU_CASE(usub_sat, usubsat);
749
ALU_CASE(imul, imul);
750
ALU_CASE(imul_high, imul);
751
ALU_CASE(umul_high, imul);
752
ALU_CASE(uclz, iclz);
753
754
/* Zero shoved as second-arg */
755
ALU_CASE(iabs, iabsdiff);
756
757
ALU_CASE(uabs_isub, iabsdiff);
758
ALU_CASE(uabs_usub, uabsdiff);
759
760
ALU_CASE(mov, imov);
761
762
ALU_CASE_CMP(feq32, feq);
763
ALU_CASE_CMP(fneu32, fne);
764
ALU_CASE_CMP(flt32, flt);
765
ALU_CASE_CMP(ieq32, ieq);
766
ALU_CASE_CMP(ine32, ine);
767
ALU_CASE_CMP(ilt32, ilt);
768
ALU_CASE_CMP(ult32, ult);
769
770
/* We don't have a native b2f32 instruction. Instead, like many
771
* GPUs, we exploit booleans as 0/~0 for false/true, and
772
* correspondingly AND
773
* by 1.0 to do the type conversion. For the moment, prime us
774
* to emit:
775
*
776
* iand [whatever], #0
777
*
778
* At the end of emit_alu (as MIR), we'll fix-up the constant
779
*/
780
781
ALU_CASE_CMP(b2f32, iand);
782
ALU_CASE_CMP(b2f16, iand);
783
ALU_CASE_CMP(b2i32, iand);
784
785
/* Likewise, we don't have a dedicated f2b32 instruction, but
786
* we can do a "not equal to 0.0" test. */
787
788
ALU_CASE_CMP(f2b32, fne);
789
ALU_CASE_CMP(i2b32, ine);
790
791
ALU_CASE(frcp, frcp);
792
ALU_CASE(frsq, frsqrt);
793
ALU_CASE(fsqrt, fsqrt);
794
ALU_CASE(fexp2, fexp2);
795
ALU_CASE(flog2, flog2);
796
797
ALU_CASE_RTZ(f2i64, f2i_rte);
798
ALU_CASE_RTZ(f2u64, f2u_rte);
799
ALU_CASE_RTZ(i2f64, i2f_rte);
800
ALU_CASE_RTZ(u2f64, u2f_rte);
801
802
ALU_CASE_RTZ(f2i32, f2i_rte);
803
ALU_CASE_RTZ(f2u32, f2u_rte);
804
ALU_CASE_RTZ(i2f32, i2f_rte);
805
ALU_CASE_RTZ(u2f32, u2f_rte);
806
807
ALU_CASE_RTZ(f2i8, f2i_rte);
808
ALU_CASE_RTZ(f2u8, f2u_rte);
809
810
ALU_CASE_RTZ(f2i16, f2i_rte);
811
ALU_CASE_RTZ(f2u16, f2u_rte);
812
ALU_CASE_RTZ(i2f16, i2f_rte);
813
ALU_CASE_RTZ(u2f16, u2f_rte);
814
815
ALU_CASE(fsin, fsinpi);
816
ALU_CASE(fcos, fcospi);
817
818
/* We'll get 0 in the second arg, so:
819
* ~a = ~(a | 0) = nor(a, 0) */
820
ALU_CASE(inot, inor);
821
ALU_CASE(iand, iand);
822
ALU_CASE(ior, ior);
823
ALU_CASE(ixor, ixor);
824
ALU_CASE(ishl, ishl);
825
ALU_CASE(ishr, iasr);
826
ALU_CASE(ushr, ilsr);
827
828
ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
829
ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
830
ALU_CASE_CMP(b32all_fequal4, fball_eq);
831
832
ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
833
ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
834
ALU_CASE_CMP(b32any_fnequal4, fbany_neq);
835
836
ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
837
ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
838
ALU_CASE_CMP(b32all_iequal4, iball_eq);
839
840
ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
841
ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
842
ALU_CASE_CMP(b32any_inequal4, ibany_neq);
843
844
/* Source mods will be shoved in later */
845
ALU_CASE(fabs, fmov);
846
ALU_CASE(fneg, fmov);
847
ALU_CASE(fsat, fmov);
848
ALU_CASE(fsat_signed_mali, fmov);
849
ALU_CASE(fclamp_pos_mali, fmov);
850
851
/* For size conversion, we use a move. Ideally though we would squash
852
* these ops together; maybe that has to happen after in NIR as part of
853
* propagation...? An earlier algebraic pass ensured we step down by
854
* only / exactly one size. If stepping down, we use a dest override to
855
* reduce the size; if stepping up, we use a larger-sized move with a
856
* half source and a sign/zero-extension modifier */
857
858
case nir_op_i2i8:
859
case nir_op_i2i16:
860
case nir_op_i2i32:
861
case nir_op_i2i64:
862
case nir_op_u2u8:
863
case nir_op_u2u16:
864
case nir_op_u2u32:
865
case nir_op_u2u64:
866
case nir_op_f2f16:
867
case nir_op_f2f32:
868
case nir_op_f2f64: {
869
if (instr->op == nir_op_f2f16 || instr->op == nir_op_f2f32 ||
870
instr->op == nir_op_f2f64)
871
op = midgard_alu_op_fmov;
872
else
873
op = midgard_alu_op_imov;
874
875
break;
876
}
877
878
/* For greater-or-equal, we lower to less-or-equal and flip the
879
* arguments */
880
881
case nir_op_fge:
882
case nir_op_fge32:
883
case nir_op_ige32:
884
case nir_op_uge32: {
885
op =
886
instr->op == nir_op_fge ? midgard_alu_op_fle :
887
instr->op == nir_op_fge32 ? midgard_alu_op_fle :
888
instr->op == nir_op_ige32 ? midgard_alu_op_ile :
889
instr->op == nir_op_uge32 ? midgard_alu_op_ule :
890
0;
891
892
flip_src12 = true;
893
ALU_CHECK_CMP();
894
break;
895
}
896
897
case nir_op_b32csel: {
898
bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
899
bool is_float = mir_is_bcsel_float(instr);
900
op = is_float ?
901
(mixed ? midgard_alu_op_fcsel_v : midgard_alu_op_fcsel) :
902
(mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel);
903
904
break;
905
}
906
907
case nir_op_unpack_32_2x16:
908
case nir_op_unpack_32_4x8:
909
case nir_op_pack_32_2x16:
910
case nir_op_pack_32_4x8: {
911
op = midgard_alu_op_imov;
912
break;
913
}
914
915
default:
916
DBG("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
917
assert(0);
918
return;
919
}
920
921
/* Promote imov to fmov if it might help inline a constant */
922
if (op == midgard_alu_op_imov && nir_src_is_const(instr->src[0].src)
923
&& nir_src_bit_size(instr->src[0].src) == 32
924
&& nir_is_same_comp_swizzle(instr->src[0].swizzle,
925
nir_src_num_components(instr->src[0].src))) {
926
op = midgard_alu_op_fmov;
927
}
928
929
/* Midgard can perform certain modifiers on output of an ALU op */
930
931
unsigned outmod = 0;
932
bool is_int = midgard_is_integer_op(op);
933
934
if (instr->op == nir_op_umul_high || instr->op == nir_op_imul_high) {
935
outmod = midgard_outmod_keephi;
936
} else if (midgard_is_integer_out_op(op)) {
937
outmod = midgard_outmod_keeplo;
938
} else if (instr->op == nir_op_fsat) {
939
outmod = midgard_outmod_clamp_0_1;
940
} else if (instr->op == nir_op_fsat_signed_mali) {
941
outmod = midgard_outmod_clamp_m1_1;
942
} else if (instr->op == nir_op_fclamp_pos_mali) {
943
outmod = midgard_outmod_clamp_0_inf;
944
}
945
946
/* Fetch unit, quirks, etc information */
947
unsigned opcode_props = alu_opcode_props[op].props;
948
bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
949
950
if (!midgard_is_integer_out_op(op)) {
951
outmod = mir_determine_float_outmod(ctx, &dest, outmod);
952
}
953
954
midgard_instruction ins = {
955
.type = TAG_ALU_4,
956
.dest = nir_dest_index(dest),
957
.dest_type = nir_op_infos[instr->op].output_type
958
| nir_dest_bit_size(*dest),
959
.roundmode = roundmode,
960
};
961
962
enum midgard_roundmode *roundptr = (opcode_props & MIDGARD_ROUNDS) ?
963
&ins.roundmode : NULL;
964
965
for (unsigned i = nr_inputs; i < ARRAY_SIZE(ins.src); ++i)
966
ins.src[i] = ~0;
967
968
if (quirk_flipped_r24) {
969
ins.src[0] = ~0;
970
mir_copy_src(&ins, instr, 0, 1, &ins.src_abs[1], &ins.src_neg[1], &ins.src_invert[1], roundptr, is_int, broadcast_swizzle);
971
} else {
972
for (unsigned i = 0; i < nr_inputs; ++i) {
973
unsigned to = i;
974
975
if (instr->op == nir_op_b32csel) {
976
/* The condition is the first argument; move
977
* the other arguments up one to be a binary
978
* instruction for Midgard with the condition
979
* last */
980
981
if (i == 0)
982
to = 2;
983
else if (flip_src12)
984
to = 2 - i;
985
else
986
to = i - 1;
987
} else if (flip_src12) {
988
to = 1 - to;
989
}
990
991
mir_copy_src(&ins, instr, i, to, &ins.src_abs[to], &ins.src_neg[to], &ins.src_invert[to], roundptr, is_int, broadcast_swizzle);
992
993
/* (!c) ? a : b = c ? b : a */
994
if (instr->op == nir_op_b32csel && ins.src_invert[2]) {
995
ins.src_invert[2] = false;
996
flip_src12 ^= true;
997
}
998
}
999
}
1000
1001
if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
1002
/* Lowered to move */
1003
if (instr->op == nir_op_fneg)
1004
ins.src_neg[1] ^= true;
1005
1006
if (instr->op == nir_op_fabs)
1007
ins.src_abs[1] = true;
1008
}
1009
1010
ins.mask = mask_of(nr_components);
1011
1012
/* Apply writemask if non-SSA, keeping in mind that we can't write to
1013
* components that don't exist. Note modifier => SSA => !reg => no
1014
* writemask, so we don't have to worry about writemasks here.*/
1015
1016
if (!is_ssa)
1017
ins.mask &= instr->dest.write_mask;
1018
1019
ins.op = op;
1020
ins.outmod = outmod;
1021
1022
/* Late fixup for emulated instructions */
1023
1024
if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
1025
/* Presently, our second argument is an inline #0 constant.
1026
* Switch over to an embedded 1.0 constant (that can't fit
1027
* inline, since we're 32-bit, not 16-bit like the inline
1028
* constants) */
1029
1030
ins.has_inline_constant = false;
1031
ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1032
ins.src_types[1] = nir_type_float32;
1033
ins.has_constants = true;
1034
1035
if (instr->op == nir_op_b2f32)
1036
ins.constants.f32[0] = 1.0f;
1037
else
1038
ins.constants.i32[0] = 1;
1039
1040
for (unsigned c = 0; c < 16; ++c)
1041
ins.swizzle[1][c] = 0;
1042
} else if (instr->op == nir_op_b2f16) {
1043
ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1044
ins.src_types[1] = nir_type_float16;
1045
ins.has_constants = true;
1046
ins.constants.i16[0] = _mesa_float_to_half(1.0);
1047
1048
for (unsigned c = 0; c < 16; ++c)
1049
ins.swizzle[1][c] = 0;
1050
} else if (nr_inputs == 1 && !quirk_flipped_r24) {
1051
/* Lots of instructions need a 0 plonked in */
1052
ins.has_inline_constant = false;
1053
ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1054
ins.src_types[1] = ins.src_types[0];
1055
ins.has_constants = true;
1056
ins.constants.u32[0] = 0;
1057
1058
for (unsigned c = 0; c < 16; ++c)
1059
ins.swizzle[1][c] = 0;
1060
} else if (instr->op == nir_op_pack_32_2x16) {
1061
ins.dest_type = nir_type_uint16;
1062
ins.mask = mask_of(nr_components * 2);
1063
ins.is_pack = true;
1064
} else if (instr->op == nir_op_pack_32_4x8) {
1065
ins.dest_type = nir_type_uint8;
1066
ins.mask = mask_of(nr_components * 4);
1067
ins.is_pack = true;
1068
} else if (instr->op == nir_op_unpack_32_2x16) {
1069
ins.dest_type = nir_type_uint32;
1070
ins.mask = mask_of(nr_components >> 1);
1071
ins.is_pack = true;
1072
} else if (instr->op == nir_op_unpack_32_4x8) {
1073
ins.dest_type = nir_type_uint32;
1074
ins.mask = mask_of(nr_components >> 2);
1075
ins.is_pack = true;
1076
}
1077
1078
if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
1079
/* To avoid duplicating the lookup tables (probably), true LUT
1080
* instructions can only operate as if they were scalars. Lower
1081
* them here by changing the component. */
1082
1083
unsigned orig_mask = ins.mask;
1084
1085
unsigned swizzle_back[MIR_VEC_COMPONENTS];
1086
memcpy(&swizzle_back, ins.swizzle[0], sizeof(swizzle_back));
1087
1088
midgard_instruction ins_split[MIR_VEC_COMPONENTS];
1089
unsigned ins_count = 0;
1090
1091
for (int i = 0; i < nr_components; ++i) {
1092
/* Mask the associated component, dropping the
1093
* instruction if needed */
1094
1095
ins.mask = 1 << i;
1096
ins.mask &= orig_mask;
1097
1098
for (unsigned j = 0; j < ins_count; ++j) {
1099
if (swizzle_back[i] == ins_split[j].swizzle[0][0]) {
1100
ins_split[j].mask |= ins.mask;
1101
ins.mask = 0;
1102
break;
1103
}
1104
}
1105
1106
if (!ins.mask)
1107
continue;
1108
1109
for (unsigned j = 0; j < MIR_VEC_COMPONENTS; ++j)
1110
ins.swizzle[0][j] = swizzle_back[i]; /* Pull from the correct component */
1111
1112
ins_split[ins_count] = ins;
1113
1114
++ins_count;
1115
}
1116
1117
for (unsigned i = 0; i < ins_count; ++i) {
1118
emit_mir_instruction(ctx, ins_split[i]);
1119
}
1120
} else {
1121
emit_mir_instruction(ctx, ins);
1122
}
1123
}
1124
1125
#undef ALU_CASE
1126
1127
static void
1128
mir_set_intr_mask(nir_instr *instr, midgard_instruction *ins, bool is_read)
1129
{
1130
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1131
unsigned nir_mask = 0;
1132
unsigned dsize = 0;
1133
1134
if (is_read) {
1135
nir_mask = mask_of(nir_intrinsic_dest_components(intr));
1136
dsize = nir_dest_bit_size(intr->dest);
1137
} else {
1138
nir_mask = nir_intrinsic_write_mask(intr);
1139
dsize = 32;
1140
}
1141
1142
/* Once we have the NIR mask, we need to normalize to work in 32-bit space */
1143
unsigned bytemask = pan_to_bytemask(dsize, nir_mask);
1144
ins->dest_type = nir_type_uint | dsize;
1145
mir_set_bytemask(ins, bytemask);
1146
}
1147
1148
/* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
1149
* optimized) versions of UBO #0 */
1150
1151
static midgard_instruction *
1152
emit_ubo_read(
1153
compiler_context *ctx,
1154
nir_instr *instr,
1155
unsigned dest,
1156
unsigned offset,
1157
nir_src *indirect_offset,
1158
unsigned indirect_shift,
1159
unsigned index,
1160
unsigned nr_comps)
1161
{
1162
midgard_instruction ins;
1163
1164
unsigned dest_size = (instr->type == nir_instr_type_intrinsic) ?
1165
nir_dest_bit_size(nir_instr_as_intrinsic(instr)->dest) : 32;
1166
1167
unsigned bitsize = dest_size * nr_comps;
1168
1169
/* Pick the smallest intrinsic to avoid out-of-bounds reads */
1170
if (bitsize <= 32)
1171
ins = m_ld_ubo_32(dest, 0);
1172
else if (bitsize <= 64)
1173
ins = m_ld_ubo_64(dest, 0);
1174
else if (bitsize <= 128)
1175
ins = m_ld_ubo_128(dest, 0);
1176
else
1177
unreachable("Invalid UBO read size");
1178
1179
ins.constants.u32[0] = offset;
1180
1181
if (instr->type == nir_instr_type_intrinsic)
1182
mir_set_intr_mask(instr, &ins, true);
1183
1184
if (indirect_offset) {
1185
ins.src[2] = nir_src_index(ctx, indirect_offset);
1186
ins.src_types[2] = nir_type_uint32;
1187
ins.load_store.index_shift = indirect_shift;
1188
1189
/* X component for the whole swizzle to prevent register
1190
* pressure from ballooning from the extra components */
1191
for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[2]); ++i)
1192
ins.swizzle[2][i] = 0;
1193
} else {
1194
ins.load_store.index_reg = REGISTER_LDST_ZERO;
1195
}
1196
1197
if (indirect_offset && indirect_offset->is_ssa && !indirect_shift)
1198
mir_set_ubo_offset(&ins, indirect_offset, offset);
1199
1200
midgard_pack_ubo_index_imm(&ins.load_store, index);
1201
1202
return emit_mir_instruction(ctx, ins);
1203
}
1204
1205
/* Globals are like UBOs if you squint. And shared memory is like globals if
1206
* you squint even harder */
1207
1208
static void
1209
emit_global(
1210
compiler_context *ctx,
1211
nir_instr *instr,
1212
bool is_read,
1213
unsigned srcdest,
1214
nir_src *offset,
1215
unsigned seg)
1216
{
1217
midgard_instruction ins;
1218
1219
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1220
if (is_read) {
1221
unsigned bitsize = nir_dest_bit_size(intr->dest) *
1222
nir_dest_num_components(intr->dest);
1223
1224
if (bitsize <= 32)
1225
ins = m_ld_32(srcdest, 0);
1226
else if (bitsize <= 64)
1227
ins = m_ld_64(srcdest, 0);
1228
else if (bitsize <= 128)
1229
ins = m_ld_128(srcdest, 0);
1230
else
1231
unreachable("Invalid global read size");
1232
} else {
1233
unsigned bitsize = nir_src_bit_size(intr->src[0]) *
1234
nir_src_num_components(intr->src[0]);
1235
1236
if (bitsize <= 32)
1237
ins = m_st_32(srcdest, 0);
1238
else if (bitsize <= 64)
1239
ins = m_st_64(srcdest, 0);
1240
else if (bitsize <= 128)
1241
ins = m_st_128(srcdest, 0);
1242
else
1243
unreachable("Invalid global store size");
1244
}
1245
1246
mir_set_offset(ctx, &ins, offset, seg);
1247
mir_set_intr_mask(instr, &ins, is_read);
1248
1249
/* Set a valid swizzle for masked out components */
1250
assert(ins.mask);
1251
unsigned first_component = __builtin_ffs(ins.mask) - 1;
1252
1253
for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[0]); ++i) {
1254
if (!(ins.mask & (1 << i)))
1255
ins.swizzle[0][i] = first_component;
1256
}
1257
1258
emit_mir_instruction(ctx, ins);
1259
}
1260
1261
/* If is_shared is off, the only other possible value are globals, since
1262
* SSBO's are being lowered to globals through a NIR pass.
1263
* `image_direct_address` should be ~0 when instr is not an image_atomic
1264
* and the destination register of a lea_image op when it is an image_atomic. */
1265
static void
1266
emit_atomic(
1267
compiler_context *ctx,
1268
nir_intrinsic_instr *instr,
1269
bool is_shared,
1270
midgard_load_store_op op,
1271
unsigned image_direct_address)
1272
{
1273
nir_alu_type type =
1274
(op == midgard_op_atomic_imin || op == midgard_op_atomic_imax) ?
1275
nir_type_int : nir_type_uint;
1276
1277
bool is_image = image_direct_address != ~0;
1278
1279
unsigned dest = nir_dest_index(&instr->dest);
1280
unsigned val_src = is_image ? 3 : 1;
1281
unsigned val = nir_src_index(ctx, &instr->src[val_src]);
1282
unsigned bitsize = nir_src_bit_size(instr->src[val_src]);
1283
emit_explicit_constant(ctx, val, val);
1284
1285
midgard_instruction ins = {
1286
.type = TAG_LOAD_STORE_4,
1287
.mask = 0xF,
1288
.dest = dest,
1289
.src = { ~0, ~0, ~0, val },
1290
.src_types = { 0, 0, 0, type | bitsize },
1291
.op = op
1292
};
1293
1294
nir_src *src_offset = nir_get_io_offset_src(instr);
1295
1296
if (op == midgard_op_atomic_cmpxchg) {
1297
unsigned xchg_val_src = is_image ? 4 : 2;
1298
unsigned xchg_val = nir_src_index(ctx, &instr->src[xchg_val_src]);
1299
emit_explicit_constant(ctx, xchg_val, xchg_val);
1300
1301
ins.src[2] = val;
1302
ins.src_types[2] = type | bitsize;
1303
ins.src[3] = xchg_val;
1304
1305
if (is_shared) {
1306
ins.load_store.arg_reg = REGISTER_LDST_LOCAL_STORAGE_PTR;
1307
ins.load_store.arg_comp = COMPONENT_Z;
1308
ins.load_store.bitsize_toggle = true;
1309
} else {
1310
for(unsigned i = 0; i < 2; ++i)
1311
ins.swizzle[1][i] = i;
1312
1313
ins.src[1] = is_image ? image_direct_address :
1314
nir_src_index(ctx, src_offset);
1315
ins.src_types[1] = nir_type_uint64;
1316
}
1317
} else if (is_image) {
1318
for(unsigned i = 0; i < 2; ++i)
1319
ins.swizzle[2][i] = i;
1320
1321
ins.src[2] = image_direct_address;
1322
ins.src_types[2] = nir_type_uint64;
1323
1324
ins.load_store.arg_reg = REGISTER_LDST_ZERO;
1325
ins.load_store.bitsize_toggle = true;
1326
ins.load_store.index_format = midgard_index_address_u64;
1327
} else
1328
mir_set_offset(ctx, &ins, src_offset, is_shared ? LDST_SHARED : LDST_GLOBAL);
1329
1330
mir_set_intr_mask(&instr->instr, &ins, true);
1331
1332
emit_mir_instruction(ctx, ins);
1333
}
1334
1335
static void
1336
emit_varying_read(
1337
compiler_context *ctx,
1338
unsigned dest, unsigned offset,
1339
unsigned nr_comp, unsigned component,
1340
nir_src *indirect_offset, nir_alu_type type, bool flat)
1341
{
1342
/* XXX: Half-floats? */
1343
/* TODO: swizzle, mask */
1344
1345
midgard_instruction ins = m_ld_vary_32(dest, PACK_LDST_ATTRIB_OFS(offset));
1346
ins.mask = mask_of(nr_comp);
1347
ins.dest_type = type;
1348
1349
if (type == nir_type_float16) {
1350
/* Ensure we are aligned so we can pack it later */
1351
ins.mask = mask_of(ALIGN_POT(nr_comp, 2));
1352
}
1353
1354
for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[0]); ++i)
1355
ins.swizzle[0][i] = MIN2(i + component, COMPONENT_W);
1356
1357
1358
midgard_varying_params p = {
1359
.flat_shading = flat,
1360
.perspective_correction = 1,
1361
.interpolate_sample = true,
1362
};
1363
midgard_pack_varying_params(&ins.load_store, p);
1364
1365
if (indirect_offset) {
1366
ins.src[2] = nir_src_index(ctx, indirect_offset);
1367
ins.src_types[2] = nir_type_uint32;
1368
} else
1369
ins.load_store.index_reg = REGISTER_LDST_ZERO;
1370
1371
ins.load_store.arg_reg = REGISTER_LDST_ZERO;
1372
ins.load_store.index_format = midgard_index_address_u32;
1373
1374
/* Use the type appropriate load */
1375
switch (type) {
1376
case nir_type_uint32:
1377
case nir_type_bool32:
1378
ins.op = midgard_op_ld_vary_32u;
1379
break;
1380
case nir_type_int32:
1381
ins.op = midgard_op_ld_vary_32i;
1382
break;
1383
case nir_type_float32:
1384
ins.op = midgard_op_ld_vary_32;
1385
break;
1386
case nir_type_float16:
1387
ins.op = midgard_op_ld_vary_16;
1388
break;
1389
default:
1390
unreachable("Attempted to load unknown type");
1391
break;
1392
}
1393
1394
emit_mir_instruction(ctx, ins);
1395
}
1396
1397
1398
/* If `is_atomic` is true, we emit a `lea_image` since midgard doesn't not have special
1399
* image_atomic opcodes. The caller can then use that address to emit a normal atomic opcode. */
1400
static midgard_instruction
1401
emit_image_op(compiler_context *ctx, nir_intrinsic_instr *instr, bool is_atomic)
1402
{
1403
enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
1404
unsigned nr_attr = ctx->stage == MESA_SHADER_VERTEX ?
1405
util_bitcount64(ctx->nir->info.inputs_read) : 0;
1406
unsigned nr_dim = glsl_get_sampler_dim_coordinate_components(dim);
1407
bool is_array = nir_intrinsic_image_array(instr);
1408
bool is_store = instr->intrinsic == nir_intrinsic_image_store;
1409
1410
/* TODO: MSAA */
1411
assert(dim != GLSL_SAMPLER_DIM_MS && "MSAA'd images not supported");
1412
1413
unsigned coord_reg = nir_src_index(ctx, &instr->src[1]);
1414
emit_explicit_constant(ctx, coord_reg, coord_reg);
1415
1416
nir_src *index = &instr->src[0];
1417
bool is_direct = nir_src_is_const(*index);
1418
1419
/* For image opcodes, address is used as an index into the attribute descriptor */
1420
unsigned address = nr_attr;
1421
if (is_direct)
1422
address += nir_src_as_uint(*index);
1423
1424
midgard_instruction ins;
1425
if (is_store) { /* emit st_image_* */
1426
unsigned val = nir_src_index(ctx, &instr->src[3]);
1427
emit_explicit_constant(ctx, val, val);
1428
1429
nir_alu_type type = nir_intrinsic_src_type(instr);
1430
ins = st_image(type, val, PACK_LDST_ATTRIB_OFS(address));
1431
nir_alu_type base_type = nir_alu_type_get_base_type(type);
1432
ins.src_types[0] = base_type | nir_src_bit_size(instr->src[3]);
1433
} else if (is_atomic) { /* emit lea_image */
1434
unsigned dest = make_compiler_temp_reg(ctx);
1435
ins = m_lea_image(dest, PACK_LDST_ATTRIB_OFS(address));
1436
ins.mask = mask_of(2); /* 64-bit memory address */
1437
} else { /* emit ld_image_* */
1438
nir_alu_type type = nir_intrinsic_dest_type(instr);
1439
ins = ld_image(type, nir_dest_index(&instr->dest), PACK_LDST_ATTRIB_OFS(address));
1440
ins.mask = mask_of(nir_intrinsic_dest_components(instr));
1441
ins.dest_type = type;
1442
}
1443
1444
/* Coord reg */
1445
ins.src[1] = coord_reg;
1446
ins.src_types[1] = nir_type_uint16;
1447
if (nr_dim == 3 || is_array) {
1448
ins.load_store.bitsize_toggle = true;
1449
}
1450
1451
/* Image index reg */
1452
if (!is_direct) {
1453
ins.src[2] = nir_src_index(ctx, index);
1454
ins.src_types[2] = nir_type_uint32;
1455
} else
1456
ins.load_store.index_reg = REGISTER_LDST_ZERO;
1457
1458
emit_mir_instruction(ctx, ins);
1459
1460
return ins;
1461
}
1462
1463
static void
1464
emit_attr_read(
1465
compiler_context *ctx,
1466
unsigned dest, unsigned offset,
1467
unsigned nr_comp, nir_alu_type t)
1468
{
1469
midgard_instruction ins = m_ld_attr_32(dest, PACK_LDST_ATTRIB_OFS(offset));
1470
ins.load_store.arg_reg = REGISTER_LDST_ZERO;
1471
ins.load_store.index_reg = REGISTER_LDST_ZERO;
1472
ins.mask = mask_of(nr_comp);
1473
1474
/* Use the type appropriate load */
1475
switch (t) {
1476
case nir_type_uint:
1477
case nir_type_bool:
1478
ins.op = midgard_op_ld_attr_32u;
1479
break;
1480
case nir_type_int:
1481
ins.op = midgard_op_ld_attr_32i;
1482
break;
1483
case nir_type_float:
1484
ins.op = midgard_op_ld_attr_32;
1485
break;
1486
default:
1487
unreachable("Attempted to load unknown type");
1488
break;
1489
}
1490
1491
emit_mir_instruction(ctx, ins);
1492
}
1493
1494
static void
1495
emit_sysval_read(compiler_context *ctx, nir_instr *instr,
1496
unsigned nr_components, unsigned offset)
1497
{
1498
nir_dest nir_dest;
1499
1500
/* Figure out which uniform this is */
1501
unsigned sysval_ubo =
1502
MAX2(ctx->inputs->sysval_ubo, ctx->nir->info.num_ubos);
1503
int sysval = panfrost_sysval_for_instr(instr, &nir_dest);
1504
unsigned dest = nir_dest_index(&nir_dest);
1505
unsigned uniform =
1506
pan_lookup_sysval(ctx->sysval_to_id, &ctx->info->sysvals, sysval);
1507
1508
/* Emit the read itself -- this is never indirect */
1509
midgard_instruction *ins =
1510
emit_ubo_read(ctx, instr, dest, (uniform * 16) + offset, NULL, 0,
1511
sysval_ubo, nr_components);
1512
1513
ins->mask = mask_of(nr_components);
1514
}
1515
1516
static unsigned
1517
compute_builtin_arg(nir_intrinsic_op op)
1518
{
1519
switch (op) {
1520
case nir_intrinsic_load_workgroup_id:
1521
return REGISTER_LDST_GROUP_ID;
1522
case nir_intrinsic_load_local_invocation_id:
1523
return REGISTER_LDST_LOCAL_THREAD_ID;
1524
case nir_intrinsic_load_global_invocation_id:
1525
case nir_intrinsic_load_global_invocation_id_zero_base:
1526
return REGISTER_LDST_GLOBAL_THREAD_ID;
1527
default:
1528
unreachable("Invalid compute paramater loaded");
1529
}
1530
}
1531
1532
static void
1533
emit_fragment_store(compiler_context *ctx, unsigned src, unsigned src_z, unsigned src_s,
1534
enum midgard_rt_id rt, unsigned sample_iter)
1535
{
1536
assert(rt < ARRAY_SIZE(ctx->writeout_branch));
1537
assert(sample_iter < ARRAY_SIZE(ctx->writeout_branch[0]));
1538
1539
midgard_instruction *br = ctx->writeout_branch[rt][sample_iter];
1540
1541
assert(!br);
1542
1543
emit_explicit_constant(ctx, src, src);
1544
1545
struct midgard_instruction ins =
1546
v_branch(false, false);
1547
1548
bool depth_only = (rt == MIDGARD_ZS_RT);
1549
1550
ins.writeout = depth_only ? 0 : PAN_WRITEOUT_C;
1551
1552
/* Add dependencies */
1553
ins.src[0] = src;
1554
ins.src_types[0] = nir_type_uint32;
1555
1556
if (depth_only)
1557
ins.constants.u32[0] = 0xFF;
1558
else
1559
ins.constants.u32[0] = ((rt - MIDGARD_COLOR_RT0) << 8) | sample_iter;
1560
1561
for (int i = 0; i < 4; ++i)
1562
ins.swizzle[0][i] = i;
1563
1564
if (~src_z) {
1565
emit_explicit_constant(ctx, src_z, src_z);
1566
ins.src[2] = src_z;
1567
ins.src_types[2] = nir_type_uint32;
1568
ins.writeout |= PAN_WRITEOUT_Z;
1569
}
1570
if (~src_s) {
1571
emit_explicit_constant(ctx, src_s, src_s);
1572
ins.src[3] = src_s;
1573
ins.src_types[3] = nir_type_uint32;
1574
ins.writeout |= PAN_WRITEOUT_S;
1575
}
1576
1577
/* Emit the branch */
1578
br = emit_mir_instruction(ctx, ins);
1579
schedule_barrier(ctx);
1580
ctx->writeout_branch[rt][sample_iter] = br;
1581
1582
/* Push our current location = current block count - 1 = where we'll
1583
* jump to. Maybe a bit too clever for my own good */
1584
1585
br->branch.target_block = ctx->block_count - 1;
1586
}
1587
1588
static void
1589
emit_compute_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1590
{
1591
unsigned reg = nir_dest_index(&instr->dest);
1592
midgard_instruction ins = m_ldst_mov(reg, 0);
1593
ins.mask = mask_of(3);
1594
ins.swizzle[0][3] = COMPONENT_X; /* xyzx */
1595
ins.load_store.arg_reg = compute_builtin_arg(instr->intrinsic);
1596
emit_mir_instruction(ctx, ins);
1597
}
1598
1599
static unsigned
1600
vertex_builtin_arg(nir_intrinsic_op op)
1601
{
1602
switch (op) {
1603
case nir_intrinsic_load_vertex_id_zero_base:
1604
return PAN_VERTEX_ID;
1605
case nir_intrinsic_load_instance_id:
1606
return PAN_INSTANCE_ID;
1607
default:
1608
unreachable("Invalid vertex builtin");
1609
}
1610
}
1611
1612
static void
1613
emit_vertex_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1614
{
1615
unsigned reg = nir_dest_index(&instr->dest);
1616
emit_attr_read(ctx, reg, vertex_builtin_arg(instr->intrinsic), 1, nir_type_int);
1617
}
1618
1619
static void
1620
emit_special(compiler_context *ctx, nir_intrinsic_instr *instr, unsigned idx)
1621
{
1622
unsigned reg = nir_dest_index(&instr->dest);
1623
1624
midgard_instruction ld = m_ld_tilebuffer_raw(reg, 0);
1625
ld.op = midgard_op_ld_special_32u;
1626
ld.load_store.signed_offset = PACK_LDST_SELECTOR_OFS(idx);
1627
ld.load_store.index_reg = REGISTER_LDST_ZERO;
1628
1629
for (int i = 0; i < 4; ++i)
1630
ld.swizzle[0][i] = COMPONENT_X;
1631
1632
emit_mir_instruction(ctx, ld);
1633
}
1634
1635
static void
1636
emit_control_barrier(compiler_context *ctx)
1637
{
1638
midgard_instruction ins = {
1639
.type = TAG_TEXTURE_4,
1640
.dest = ~0,
1641
.src = { ~0, ~0, ~0, ~0 },
1642
.op = midgard_tex_op_barrier,
1643
};
1644
1645
emit_mir_instruction(ctx, ins);
1646
}
1647
1648
static unsigned
1649
mir_get_branch_cond(nir_src *src, bool *invert)
1650
{
1651
/* Wrap it. No swizzle since it's a scalar */
1652
1653
nir_alu_src alu = {
1654
.src = *src
1655
};
1656
1657
*invert = pan_has_source_mod(&alu, nir_op_inot);
1658
return nir_src_index(NULL, &alu.src);
1659
}
1660
1661
static uint8_t
1662
output_load_rt_addr(compiler_context *ctx, nir_intrinsic_instr *instr)
1663
{
1664
if (ctx->inputs->is_blend)
1665
return MIDGARD_COLOR_RT0 + ctx->inputs->blend.rt;
1666
1667
const nir_variable *var;
1668
var = nir_find_variable_with_driver_location(ctx->nir, nir_var_shader_out, nir_intrinsic_base(instr));
1669
assert(var);
1670
1671
unsigned loc = var->data.location;
1672
1673
if (loc >= FRAG_RESULT_DATA0)
1674
return loc - FRAG_RESULT_DATA0;
1675
1676
if (loc == FRAG_RESULT_DEPTH)
1677
return 0x1F;
1678
if (loc == FRAG_RESULT_STENCIL)
1679
return 0x1E;
1680
1681
unreachable("Invalid RT to load from");
1682
}
1683
1684
static void
1685
emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
1686
{
1687
unsigned offset = 0, reg;
1688
1689
switch (instr->intrinsic) {
1690
case nir_intrinsic_discard_if:
1691
case nir_intrinsic_discard: {
1692
bool conditional = instr->intrinsic == nir_intrinsic_discard_if;
1693
struct midgard_instruction discard = v_branch(conditional, false);
1694
discard.branch.target_type = TARGET_DISCARD;
1695
1696
if (conditional) {
1697
discard.src[0] = mir_get_branch_cond(&instr->src[0],
1698
&discard.branch.invert_conditional);
1699
discard.src_types[0] = nir_type_uint32;
1700
}
1701
1702
emit_mir_instruction(ctx, discard);
1703
schedule_barrier(ctx);
1704
1705
break;
1706
}
1707
1708
case nir_intrinsic_image_load:
1709
case nir_intrinsic_image_store:
1710
emit_image_op(ctx, instr, false);
1711
break;
1712
1713
case nir_intrinsic_image_size: {
1714
unsigned nr_comp = nir_intrinsic_dest_components(instr);
1715
emit_sysval_read(ctx, &instr->instr, nr_comp, 0);
1716
break;
1717
}
1718
1719
case nir_intrinsic_load_ubo:
1720
case nir_intrinsic_load_global:
1721
case nir_intrinsic_load_global_constant:
1722
case nir_intrinsic_load_shared:
1723
case nir_intrinsic_load_scratch:
1724
case nir_intrinsic_load_input:
1725
case nir_intrinsic_load_kernel_input:
1726
case nir_intrinsic_load_interpolated_input: {
1727
bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
1728
bool is_global = instr->intrinsic == nir_intrinsic_load_global ||
1729
instr->intrinsic == nir_intrinsic_load_global_constant;
1730
bool is_shared = instr->intrinsic == nir_intrinsic_load_shared;
1731
bool is_scratch = instr->intrinsic == nir_intrinsic_load_scratch;
1732
bool is_flat = instr->intrinsic == nir_intrinsic_load_input;
1733
bool is_kernel = instr->intrinsic == nir_intrinsic_load_kernel_input;
1734
bool is_interp = instr->intrinsic == nir_intrinsic_load_interpolated_input;
1735
1736
/* Get the base type of the intrinsic */
1737
/* TODO: Infer type? Does it matter? */
1738
nir_alu_type t =
1739
(is_interp) ? nir_type_float :
1740
(is_flat) ? nir_intrinsic_dest_type(instr) :
1741
nir_type_uint;
1742
1743
t = nir_alu_type_get_base_type(t);
1744
1745
if (!(is_ubo || is_global || is_scratch)) {
1746
offset = nir_intrinsic_base(instr);
1747
}
1748
1749
unsigned nr_comp = nir_intrinsic_dest_components(instr);
1750
1751
nir_src *src_offset = nir_get_io_offset_src(instr);
1752
1753
bool direct = nir_src_is_const(*src_offset);
1754
nir_src *indirect_offset = direct ? NULL : src_offset;
1755
1756
if (direct)
1757
offset += nir_src_as_uint(*src_offset);
1758
1759
/* We may need to apply a fractional offset */
1760
int component = (is_flat || is_interp) ?
1761
nir_intrinsic_component(instr) : 0;
1762
reg = nir_dest_index(&instr->dest);
1763
1764
if (is_kernel) {
1765
emit_ubo_read(ctx, &instr->instr, reg, offset, indirect_offset, 0, 0, nr_comp);
1766
} else if (is_ubo) {
1767
nir_src index = instr->src[0];
1768
1769
/* TODO: Is indirect block number possible? */
1770
assert(nir_src_is_const(index));
1771
1772
uint32_t uindex = nir_src_as_uint(index);
1773
emit_ubo_read(ctx, &instr->instr, reg, offset, indirect_offset, 0, uindex, nr_comp);
1774
} else if (is_global || is_shared || is_scratch) {
1775
unsigned seg = is_global ? LDST_GLOBAL : (is_shared ? LDST_SHARED : LDST_SCRATCH);
1776
emit_global(ctx, &instr->instr, true, reg, src_offset, seg);
1777
} else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->inputs->is_blend) {
1778
emit_varying_read(ctx, reg, offset, nr_comp, component, indirect_offset, t | nir_dest_bit_size(instr->dest), is_flat);
1779
} else if (ctx->inputs->is_blend) {
1780
/* ctx->blend_input will be precoloured to r0/r2, where
1781
* the input is preloaded */
1782
1783
unsigned *input = offset ? &ctx->blend_src1 : &ctx->blend_input;
1784
1785
if (*input == ~0)
1786
*input = reg;
1787
else
1788
emit_mir_instruction(ctx, v_mov(*input, reg));
1789
} else if (ctx->stage == MESA_SHADER_VERTEX) {
1790
emit_attr_read(ctx, reg, offset, nr_comp, t);
1791
} else {
1792
DBG("Unknown load\n");
1793
assert(0);
1794
}
1795
1796
break;
1797
}
1798
1799
/* Handled together with load_interpolated_input */
1800
case nir_intrinsic_load_barycentric_pixel:
1801
case nir_intrinsic_load_barycentric_centroid:
1802
case nir_intrinsic_load_barycentric_sample:
1803
break;
1804
1805
/* Reads 128-bit value raw off the tilebuffer during blending, tasty */
1806
1807
case nir_intrinsic_load_raw_output_pan: {
1808
reg = nir_dest_index(&instr->dest);
1809
1810
/* T720 and below use different blend opcodes with slightly
1811
* different semantics than T760 and up */
1812
1813
midgard_instruction ld = m_ld_tilebuffer_raw(reg, 0);
1814
1815
unsigned target = output_load_rt_addr(ctx, instr);
1816
ld.load_store.index_comp = target & 0x3;
1817
ld.load_store.index_reg = target >> 2;
1818
1819
if (nir_src_is_const(instr->src[0])) {
1820
unsigned sample = nir_src_as_uint(instr->src[0]);
1821
ld.load_store.arg_comp = sample & 0x3;
1822
ld.load_store.arg_reg = sample >> 2;
1823
} else {
1824
/* Enable sample index via register. */
1825
ld.load_store.signed_offset |= 1;
1826
ld.src[1] = nir_src_index(ctx, &instr->src[0]);
1827
ld.src_types[1] = nir_type_int32;
1828
}
1829
1830
if (ctx->quirks & MIDGARD_OLD_BLEND) {
1831
ld.op = midgard_op_ld_special_32u;
1832
ld.load_store.signed_offset = PACK_LDST_SELECTOR_OFS(16);
1833
ld.load_store.index_reg = REGISTER_LDST_ZERO;
1834
}
1835
1836
emit_mir_instruction(ctx, ld);
1837
break;
1838
}
1839
1840
case nir_intrinsic_load_output: {
1841
reg = nir_dest_index(&instr->dest);
1842
1843
unsigned bits = nir_dest_bit_size(instr->dest);
1844
1845
midgard_instruction ld;
1846
if (bits == 16)
1847
ld = m_ld_tilebuffer_16f(reg, 0);
1848
else
1849
ld = m_ld_tilebuffer_32f(reg, 0);
1850
1851
unsigned index = output_load_rt_addr(ctx, instr);
1852
ld.load_store.index_comp = index & 0x3;
1853
ld.load_store.index_reg = index >> 2;
1854
1855
for (unsigned c = 4; c < 16; ++c)
1856
ld.swizzle[0][c] = 0;
1857
1858
if (ctx->quirks & MIDGARD_OLD_BLEND) {
1859
if (bits == 16)
1860
ld.op = midgard_op_ld_special_16f;
1861
else
1862
ld.op = midgard_op_ld_special_32f;
1863
ld.load_store.signed_offset = PACK_LDST_SELECTOR_OFS(1);
1864
ld.load_store.index_reg = REGISTER_LDST_ZERO;
1865
}
1866
1867
emit_mir_instruction(ctx, ld);
1868
break;
1869
}
1870
1871
case nir_intrinsic_store_output:
1872
case nir_intrinsic_store_combined_output_pan:
1873
assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
1874
1875
offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
1876
1877
reg = nir_src_index(ctx, &instr->src[0]);
1878
1879
if (ctx->stage == MESA_SHADER_FRAGMENT) {
1880
bool combined = instr->intrinsic ==
1881
nir_intrinsic_store_combined_output_pan;
1882
1883
const nir_variable *var;
1884
var = nir_find_variable_with_driver_location(ctx->nir, nir_var_shader_out,
1885
nir_intrinsic_base(instr));
1886
assert(var);
1887
1888
/* Dual-source blend writeout is done by leaving the
1889
* value in r2 for the blend shader to use. */
1890
if (var->data.index) {
1891
if (instr->src[0].is_ssa) {
1892
emit_explicit_constant(ctx, reg, reg);
1893
1894
unsigned out = make_compiler_temp(ctx);
1895
1896
midgard_instruction ins = v_mov(reg, out);
1897
emit_mir_instruction(ctx, ins);
1898
1899
ctx->blend_src1 = out;
1900
} else {
1901
ctx->blend_src1 = reg;
1902
}
1903
1904
break;
1905
}
1906
1907
enum midgard_rt_id rt;
1908
if (var->data.location >= FRAG_RESULT_DATA0)
1909
rt = MIDGARD_COLOR_RT0 + var->data.location -
1910
FRAG_RESULT_DATA0;
1911
else if (combined)
1912
rt = MIDGARD_ZS_RT;
1913
else
1914
unreachable("bad rt");
1915
1916
unsigned reg_z = ~0, reg_s = ~0;
1917
if (combined) {
1918
unsigned writeout = nir_intrinsic_component(instr);
1919
if (writeout & PAN_WRITEOUT_Z)
1920
reg_z = nir_src_index(ctx, &instr->src[2]);
1921
if (writeout & PAN_WRITEOUT_S)
1922
reg_s = nir_src_index(ctx, &instr->src[3]);
1923
}
1924
1925
emit_fragment_store(ctx, reg, reg_z, reg_s, rt, 0);
1926
} else if (ctx->stage == MESA_SHADER_VERTEX) {
1927
assert(instr->intrinsic == nir_intrinsic_store_output);
1928
1929
/* We should have been vectorized, though we don't
1930
* currently check that st_vary is emitted only once
1931
* per slot (this is relevant, since there's not a mask
1932
* parameter available on the store [set to 0 by the
1933
* blob]). We do respect the component by adjusting the
1934
* swizzle. If this is a constant source, we'll need to
1935
* emit that explicitly. */
1936
1937
emit_explicit_constant(ctx, reg, reg);
1938
1939
unsigned dst_component = nir_intrinsic_component(instr);
1940
unsigned nr_comp = nir_src_num_components(instr->src[0]);
1941
1942
midgard_instruction st = m_st_vary_32(reg, PACK_LDST_ATTRIB_OFS(offset));
1943
st.load_store.arg_reg = REGISTER_LDST_ZERO;
1944
st.load_store.index_format = midgard_index_address_u32;
1945
st.load_store.index_reg = REGISTER_LDST_ZERO;
1946
1947
switch (nir_alu_type_get_base_type(nir_intrinsic_src_type(instr))) {
1948
case nir_type_uint:
1949
case nir_type_bool:
1950
st.op = midgard_op_st_vary_32u;
1951
break;
1952
case nir_type_int:
1953
st.op = midgard_op_st_vary_32i;
1954
break;
1955
case nir_type_float:
1956
st.op = midgard_op_st_vary_32;
1957
break;
1958
default:
1959
unreachable("Attempted to store unknown type");
1960
break;
1961
}
1962
1963
/* nir_intrinsic_component(store_intr) encodes the
1964
* destination component start. Source component offset
1965
* adjustment is taken care of in
1966
* install_registers_instr(), when offset_swizzle() is
1967
* called.
1968
*/
1969
unsigned src_component = COMPONENT_X;
1970
1971
assert(nr_comp > 0);
1972
for (unsigned i = 0; i < ARRAY_SIZE(st.swizzle); ++i) {
1973
st.swizzle[0][i] = src_component;
1974
if (i >= dst_component && i < dst_component + nr_comp - 1)
1975
src_component++;
1976
}
1977
1978
emit_mir_instruction(ctx, st);
1979
} else {
1980
DBG("Unknown store\n");
1981
assert(0);
1982
}
1983
1984
break;
1985
1986
/* Special case of store_output for lowered blend shaders */
1987
case nir_intrinsic_store_raw_output_pan:
1988
assert (ctx->stage == MESA_SHADER_FRAGMENT);
1989
reg = nir_src_index(ctx, &instr->src[0]);
1990
for (unsigned s = 0; s < ctx->blend_sample_iterations; s++)
1991
emit_fragment_store(ctx, reg, ~0, ~0,
1992
ctx->inputs->blend.rt + MIDGARD_COLOR_RT0,
1993
s);
1994
break;
1995
1996
case nir_intrinsic_store_global:
1997
case nir_intrinsic_store_shared:
1998
case nir_intrinsic_store_scratch:
1999
reg = nir_src_index(ctx, &instr->src[0]);
2000
emit_explicit_constant(ctx, reg, reg);
2001
2002
unsigned seg;
2003
if (instr->intrinsic == nir_intrinsic_store_global)
2004
seg = LDST_GLOBAL;
2005
else if (instr->intrinsic == nir_intrinsic_store_shared)
2006
seg = LDST_SHARED;
2007
else
2008
seg = LDST_SCRATCH;
2009
2010
emit_global(ctx, &instr->instr, false, reg, &instr->src[1], seg);
2011
break;
2012
2013
case nir_intrinsic_load_first_vertex:
2014
case nir_intrinsic_load_ssbo_address:
2015
case nir_intrinsic_load_work_dim:
2016
emit_sysval_read(ctx, &instr->instr, 1, 0);
2017
break;
2018
2019
case nir_intrinsic_load_base_vertex:
2020
emit_sysval_read(ctx, &instr->instr, 1, 4);
2021
break;
2022
2023
case nir_intrinsic_load_base_instance:
2024
emit_sysval_read(ctx, &instr->instr, 1, 8);
2025
break;
2026
2027
case nir_intrinsic_load_sample_positions_pan:
2028
emit_sysval_read(ctx, &instr->instr, 2, 0);
2029
break;
2030
2031
case nir_intrinsic_get_ssbo_size:
2032
emit_sysval_read(ctx, &instr->instr, 1, 8);
2033
break;
2034
2035
case nir_intrinsic_load_viewport_scale:
2036
case nir_intrinsic_load_viewport_offset:
2037
case nir_intrinsic_load_num_workgroups:
2038
case nir_intrinsic_load_sampler_lod_parameters_pan:
2039
case nir_intrinsic_load_workgroup_size:
2040
emit_sysval_read(ctx, &instr->instr, 3, 0);
2041
break;
2042
2043
case nir_intrinsic_load_workgroup_id:
2044
case nir_intrinsic_load_local_invocation_id:
2045
case nir_intrinsic_load_global_invocation_id:
2046
case nir_intrinsic_load_global_invocation_id_zero_base:
2047
emit_compute_builtin(ctx, instr);
2048
break;
2049
2050
case nir_intrinsic_load_vertex_id_zero_base:
2051
case nir_intrinsic_load_instance_id:
2052
emit_vertex_builtin(ctx, instr);
2053
break;
2054
2055
case nir_intrinsic_load_sample_mask_in:
2056
emit_special(ctx, instr, 96);
2057
break;
2058
2059
case nir_intrinsic_load_sample_id:
2060
emit_special(ctx, instr, 97);
2061
break;
2062
2063
/* Midgard doesn't seem to want special handling */
2064
case nir_intrinsic_memory_barrier:
2065
case nir_intrinsic_memory_barrier_buffer:
2066
case nir_intrinsic_memory_barrier_image:
2067
case nir_intrinsic_memory_barrier_shared:
2068
case nir_intrinsic_group_memory_barrier:
2069
break;
2070
2071
case nir_intrinsic_control_barrier:
2072
schedule_barrier(ctx);
2073
emit_control_barrier(ctx);
2074
schedule_barrier(ctx);
2075
break;
2076
2077
ATOMIC_CASE(ctx, instr, add, add);
2078
ATOMIC_CASE(ctx, instr, and, and);
2079
ATOMIC_CASE(ctx, instr, comp_swap, cmpxchg);
2080
ATOMIC_CASE(ctx, instr, exchange, xchg);
2081
ATOMIC_CASE(ctx, instr, imax, imax);
2082
ATOMIC_CASE(ctx, instr, imin, imin);
2083
ATOMIC_CASE(ctx, instr, or, or);
2084
ATOMIC_CASE(ctx, instr, umax, umax);
2085
ATOMIC_CASE(ctx, instr, umin, umin);
2086
ATOMIC_CASE(ctx, instr, xor, xor);
2087
2088
IMAGE_ATOMIC_CASE(ctx, instr, add, add);
2089
IMAGE_ATOMIC_CASE(ctx, instr, and, and);
2090
IMAGE_ATOMIC_CASE(ctx, instr, comp_swap, cmpxchg);
2091
IMAGE_ATOMIC_CASE(ctx, instr, exchange, xchg);
2092
IMAGE_ATOMIC_CASE(ctx, instr, imax, imax);
2093
IMAGE_ATOMIC_CASE(ctx, instr, imin, imin);
2094
IMAGE_ATOMIC_CASE(ctx, instr, or, or);
2095
IMAGE_ATOMIC_CASE(ctx, instr, umax, umax);
2096
IMAGE_ATOMIC_CASE(ctx, instr, umin, umin);
2097
IMAGE_ATOMIC_CASE(ctx, instr, xor, xor);
2098
2099
default:
2100
fprintf(stderr, "Unhandled intrinsic %s\n", nir_intrinsic_infos[instr->intrinsic].name);
2101
assert(0);
2102
break;
2103
}
2104
}
2105
2106
/* Returns dimension with 0 special casing cubemaps */
2107
static unsigned
2108
midgard_tex_format(enum glsl_sampler_dim dim)
2109
{
2110
switch (dim) {
2111
case GLSL_SAMPLER_DIM_1D:
2112
case GLSL_SAMPLER_DIM_BUF:
2113
return 1;
2114
2115
case GLSL_SAMPLER_DIM_2D:
2116
case GLSL_SAMPLER_DIM_MS:
2117
case GLSL_SAMPLER_DIM_EXTERNAL:
2118
case GLSL_SAMPLER_DIM_RECT:
2119
return 2;
2120
2121
case GLSL_SAMPLER_DIM_3D:
2122
return 3;
2123
2124
case GLSL_SAMPLER_DIM_CUBE:
2125
return 0;
2126
2127
default:
2128
DBG("Unknown sampler dim type\n");
2129
assert(0);
2130
return 0;
2131
}
2132
}
2133
2134
/* Tries to attach an explicit LOD or bias as a constant. Returns whether this
2135
* was successful */
2136
2137
static bool
2138
pan_attach_constant_bias(
2139
compiler_context *ctx,
2140
nir_src lod,
2141
midgard_texture_word *word)
2142
{
2143
/* To attach as constant, it has to *be* constant */
2144
2145
if (!nir_src_is_const(lod))
2146
return false;
2147
2148
float f = nir_src_as_float(lod);
2149
2150
/* Break into fixed-point */
2151
signed lod_int = f;
2152
float lod_frac = f - lod_int;
2153
2154
/* Carry over negative fractions */
2155
if (lod_frac < 0.0) {
2156
lod_int--;
2157
lod_frac += 1.0;
2158
}
2159
2160
/* Encode */
2161
word->bias = float_to_ubyte(lod_frac);
2162
word->bias_int = lod_int;
2163
2164
return true;
2165
}
2166
2167
static enum mali_texture_mode
2168
mdg_texture_mode(nir_tex_instr *instr)
2169
{
2170
if (instr->op == nir_texop_tg4 && instr->is_shadow)
2171
return TEXTURE_GATHER_SHADOW;
2172
else if (instr->op == nir_texop_tg4)
2173
return TEXTURE_GATHER_X + instr->component;
2174
else if (instr->is_shadow)
2175
return TEXTURE_SHADOW;
2176
else
2177
return TEXTURE_NORMAL;
2178
}
2179
2180
static void
2181
set_tex_coord(compiler_context *ctx, nir_tex_instr *instr,
2182
midgard_instruction *ins)
2183
{
2184
int coord_idx = nir_tex_instr_src_index(instr, nir_tex_src_coord);
2185
2186
assert(coord_idx >= 0);
2187
2188
int comparator_idx = nir_tex_instr_src_index(instr, nir_tex_src_comparator);
2189
int ms_idx = nir_tex_instr_src_index(instr, nir_tex_src_ms_index);
2190
assert(comparator_idx < 0 || ms_idx < 0);
2191
int ms_or_comparator_idx = ms_idx >= 0 ? ms_idx : comparator_idx;
2192
2193
unsigned coords = nir_src_index(ctx, &instr->src[coord_idx].src);
2194
2195
emit_explicit_constant(ctx, coords, coords);
2196
2197
ins->src_types[1] = nir_tex_instr_src_type(instr, coord_idx) |
2198
nir_src_bit_size(instr->src[coord_idx].src);
2199
2200
unsigned nr_comps = instr->coord_components;
2201
unsigned written_mask = 0, write_mask = 0;
2202
2203
/* Initialize all components to coord.x which is expected to always be
2204
* present. Swizzle is updated below based on the texture dimension
2205
* and extra attributes that are packed in the coordinate argument.
2206
*/
2207
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; c++)
2208
ins->swizzle[1][c] = COMPONENT_X;
2209
2210
/* Shadow ref value is part of the coordinates if there's no comparator
2211
* source, in that case it's always placed in the last component.
2212
* Midgard wants the ref value in coord.z.
2213
*/
2214
if (instr->is_shadow && comparator_idx < 0) {
2215
ins->swizzle[1][COMPONENT_Z] = --nr_comps;
2216
write_mask |= 1 << COMPONENT_Z;
2217
}
2218
2219
/* The array index is the last component if there's no shadow ref value
2220
* or second last if there's one. We already decremented the number of
2221
* components to account for the shadow ref value above.
2222
* Midgard wants the array index in coord.w.
2223
*/
2224
if (instr->is_array) {
2225
ins->swizzle[1][COMPONENT_W] = --nr_comps;
2226
write_mask |= 1 << COMPONENT_W;
2227
}
2228
2229
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
2230
/* texelFetch is undefined on samplerCube */
2231
assert(ins->op != midgard_tex_op_fetch);
2232
2233
ins->src[1] = make_compiler_temp_reg(ctx);
2234
2235
/* For cubemaps, we use a special ld/st op to select the face
2236
* and copy the xy into the texture register
2237
*/
2238
midgard_instruction ld = m_ld_cubemap_coords(ins->src[1], 0);
2239
ld.src[1] = coords;
2240
ld.src_types[1] = ins->src_types[1];
2241
ld.mask = 0x3; /* xy */
2242
ld.load_store.bitsize_toggle = true;
2243
ld.swizzle[1][3] = COMPONENT_X;
2244
emit_mir_instruction(ctx, ld);
2245
2246
/* We packed cube coordiates (X,Y,Z) into (X,Y), update the
2247
* written mask accordingly and decrement the number of
2248
* components
2249
*/
2250
nr_comps--;
2251
written_mask |= 3;
2252
}
2253
2254
/* Now flag tex coord components that have not been written yet */
2255
write_mask |= mask_of(nr_comps) & ~written_mask;
2256
for (unsigned c = 0; c < nr_comps; c++)
2257
ins->swizzle[1][c] = c;
2258
2259
/* Sample index and shadow ref are expected in coord.z */
2260
if (ms_or_comparator_idx >= 0) {
2261
assert(!((write_mask | written_mask) & (1 << COMPONENT_Z)));
2262
2263
unsigned sample_or_ref =
2264
nir_src_index(ctx, &instr->src[ms_or_comparator_idx].src);
2265
2266
emit_explicit_constant(ctx, sample_or_ref, sample_or_ref);
2267
2268
if (ins->src[1] == ~0)
2269
ins->src[1] = make_compiler_temp_reg(ctx);
2270
2271
midgard_instruction mov = v_mov(sample_or_ref, ins->src[1]);
2272
2273
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; c++)
2274
mov.swizzle[1][c] = COMPONENT_X;
2275
2276
mov.mask = 1 << COMPONENT_Z;
2277
written_mask |= 1 << COMPONENT_Z;
2278
ins->swizzle[1][COMPONENT_Z] = COMPONENT_Z;
2279
emit_mir_instruction(ctx, mov);
2280
}
2281
2282
/* Texelfetch coordinates uses all four elements (xyz/index) regardless
2283
* of texture dimensionality, which means it's necessary to zero the
2284
* unused components to keep everything happy.
2285
*/
2286
if (ins->op == midgard_tex_op_fetch &&
2287
(written_mask | write_mask) != 0xF) {
2288
if (ins->src[1] == ~0)
2289
ins->src[1] = make_compiler_temp_reg(ctx);
2290
2291
/* mov index.zw, #0, or generalized */
2292
midgard_instruction mov =
2293
v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), ins->src[1]);
2294
mov.has_constants = true;
2295
mov.mask = (written_mask | write_mask) ^ 0xF;
2296
emit_mir_instruction(ctx, mov);
2297
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; c++) {
2298
if (mov.mask & (1 << c))
2299
ins->swizzle[1][c] = c;
2300
}
2301
}
2302
2303
if (ins->src[1] == ~0) {
2304
/* No temporary reg created, use the src coords directly */
2305
ins->src[1] = coords;
2306
} else if (write_mask) {
2307
/* Move the remaining coordinates to the temporary reg */
2308
midgard_instruction mov = v_mov(coords, ins->src[1]);
2309
2310
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; c++) {
2311
if ((1 << c) & write_mask) {
2312
mov.swizzle[1][c] = ins->swizzle[1][c];
2313
ins->swizzle[1][c] = c;
2314
} else {
2315
mov.swizzle[1][c] = COMPONENT_X;
2316
}
2317
}
2318
2319
mov.mask = write_mask;
2320
emit_mir_instruction(ctx, mov);
2321
}
2322
}
2323
2324
static void
2325
emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
2326
unsigned midgard_texop)
2327
{
2328
/* TODO */
2329
//assert (!instr->sampler);
2330
2331
nir_dest *dest = &instr->dest;
2332
2333
int texture_index = instr->texture_index;
2334
int sampler_index = instr->sampler_index;
2335
2336
nir_alu_type dest_base = nir_alu_type_get_base_type(instr->dest_type);
2337
2338
/* texture instructions support float outmods */
2339
unsigned outmod = midgard_outmod_none;
2340
if (dest_base == nir_type_float) {
2341
outmod = mir_determine_float_outmod(ctx, &dest, 0);
2342
}
2343
2344
midgard_instruction ins = {
2345
.type = TAG_TEXTURE_4,
2346
.mask = 0xF,
2347
.dest = nir_dest_index(dest),
2348
.src = { ~0, ~0, ~0, ~0 },
2349
.dest_type = instr->dest_type,
2350
.swizzle = SWIZZLE_IDENTITY_4,
2351
.outmod = outmod,
2352
.op = midgard_texop,
2353
.texture = {
2354
.format = midgard_tex_format(instr->sampler_dim),
2355
.texture_handle = texture_index,
2356
.sampler_handle = sampler_index,
2357
.mode = mdg_texture_mode(instr)
2358
}
2359
};
2360
2361
if (instr->is_shadow && !instr->is_new_style_shadow && instr->op != nir_texop_tg4)
2362
for (int i = 0; i < 4; ++i)
2363
ins.swizzle[0][i] = COMPONENT_X;
2364
2365
for (unsigned i = 0; i < instr->num_srcs; ++i) {
2366
int index = nir_src_index(ctx, &instr->src[i].src);
2367
unsigned sz = nir_src_bit_size(instr->src[i].src);
2368
nir_alu_type T = nir_tex_instr_src_type(instr, i) | sz;
2369
2370
switch (instr->src[i].src_type) {
2371
case nir_tex_src_coord:
2372
set_tex_coord(ctx, instr, &ins);
2373
break;
2374
2375
case nir_tex_src_bias:
2376
case nir_tex_src_lod: {
2377
/* Try as a constant if we can */
2378
2379
bool is_txf = midgard_texop == midgard_tex_op_fetch;
2380
if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
2381
break;
2382
2383
ins.texture.lod_register = true;
2384
ins.src[2] = index;
2385
ins.src_types[2] = T;
2386
2387
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
2388
ins.swizzle[2][c] = COMPONENT_X;
2389
2390
emit_explicit_constant(ctx, index, index);
2391
2392
break;
2393
};
2394
2395
case nir_tex_src_offset: {
2396
ins.texture.offset_register = true;
2397
ins.src[3] = index;
2398
ins.src_types[3] = T;
2399
2400
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
2401
ins.swizzle[3][c] = (c > COMPONENT_Z) ? 0 : c;
2402
2403
emit_explicit_constant(ctx, index, index);
2404
break;
2405
};
2406
2407
case nir_tex_src_comparator:
2408
case nir_tex_src_ms_index:
2409
/* Nothing to do, handled in set_tex_coord() */
2410
break;
2411
2412
default: {
2413
fprintf(stderr, "Unknown texture source type: %d\n", instr->src[i].src_type);
2414
assert(0);
2415
}
2416
}
2417
}
2418
2419
emit_mir_instruction(ctx, ins);
2420
}
2421
2422
static void
2423
emit_tex(compiler_context *ctx, nir_tex_instr *instr)
2424
{
2425
switch (instr->op) {
2426
case nir_texop_tex:
2427
case nir_texop_txb:
2428
emit_texop_native(ctx, instr, midgard_tex_op_normal);
2429
break;
2430
case nir_texop_txl:
2431
case nir_texop_tg4:
2432
emit_texop_native(ctx, instr, midgard_tex_op_gradient);
2433
break;
2434
case nir_texop_txf:
2435
case nir_texop_txf_ms:
2436
emit_texop_native(ctx, instr, midgard_tex_op_fetch);
2437
break;
2438
case nir_texop_txs:
2439
emit_sysval_read(ctx, &instr->instr, 4, 0);
2440
break;
2441
default: {
2442
fprintf(stderr, "Unhandled texture op: %d\n", instr->op);
2443
assert(0);
2444
}
2445
}
2446
}
2447
2448
static void
2449
emit_jump(compiler_context *ctx, nir_jump_instr *instr)
2450
{
2451
switch (instr->type) {
2452
case nir_jump_break: {
2453
/* Emit a branch out of the loop */
2454
struct midgard_instruction br = v_branch(false, false);
2455
br.branch.target_type = TARGET_BREAK;
2456
br.branch.target_break = ctx->current_loop_depth;
2457
emit_mir_instruction(ctx, br);
2458
break;
2459
}
2460
2461
default:
2462
DBG("Unknown jump type %d\n", instr->type);
2463
break;
2464
}
2465
}
2466
2467
static void
2468
emit_instr(compiler_context *ctx, struct nir_instr *instr)
2469
{
2470
switch (instr->type) {
2471
case nir_instr_type_load_const:
2472
emit_load_const(ctx, nir_instr_as_load_const(instr));
2473
break;
2474
2475
case nir_instr_type_intrinsic:
2476
emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2477
break;
2478
2479
case nir_instr_type_alu:
2480
emit_alu(ctx, nir_instr_as_alu(instr));
2481
break;
2482
2483
case nir_instr_type_tex:
2484
emit_tex(ctx, nir_instr_as_tex(instr));
2485
break;
2486
2487
case nir_instr_type_jump:
2488
emit_jump(ctx, nir_instr_as_jump(instr));
2489
break;
2490
2491
case nir_instr_type_ssa_undef:
2492
/* Spurious */
2493
break;
2494
2495
default:
2496
DBG("Unhandled instruction type\n");
2497
break;
2498
}
2499
}
2500
2501
2502
/* ALU instructions can inline or embed constants, which decreases register
2503
* pressure and saves space. */
2504
2505
#define CONDITIONAL_ATTACH(idx) { \
2506
void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[idx] + 1); \
2507
\
2508
if (entry) { \
2509
attach_constants(ctx, alu, entry, alu->src[idx] + 1); \
2510
alu->src[idx] = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
2511
} \
2512
}
2513
2514
static void
2515
inline_alu_constants(compiler_context *ctx, midgard_block *block)
2516
{
2517
mir_foreach_instr_in_block(block, alu) {
2518
/* Other instructions cannot inline constants */
2519
if (alu->type != TAG_ALU_4) continue;
2520
if (alu->compact_branch) continue;
2521
2522
/* If there is already a constant here, we can do nothing */
2523
if (alu->has_constants) continue;
2524
2525
CONDITIONAL_ATTACH(0);
2526
2527
if (!alu->has_constants) {
2528
CONDITIONAL_ATTACH(1)
2529
} else if (!alu->inline_constant) {
2530
/* Corner case: _two_ vec4 constants, for instance with a
2531
* csel. For this case, we can only use a constant
2532
* register for one, we'll have to emit a move for the
2533
* other. */
2534
2535
void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[1] + 1);
2536
unsigned scratch = make_compiler_temp(ctx);
2537
2538
if (entry) {
2539
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), scratch);
2540
attach_constants(ctx, &ins, entry, alu->src[1] + 1);
2541
2542
/* Set the source */
2543
alu->src[1] = scratch;
2544
2545
/* Inject us -before- the last instruction which set r31 */
2546
mir_insert_instruction_before(ctx, mir_prev_op(alu), ins);
2547
}
2548
}
2549
}
2550
}
2551
2552
unsigned
2553
max_bitsize_for_alu(midgard_instruction *ins)
2554
{
2555
unsigned max_bitsize = 0;
2556
for (int i = 0; i < MIR_SRC_COUNT; i++) {
2557
if (ins->src[i] == ~0) continue;
2558
unsigned src_bitsize = nir_alu_type_get_type_size(ins->src_types[i]);
2559
max_bitsize = MAX2(src_bitsize, max_bitsize);
2560
}
2561
unsigned dst_bitsize = nir_alu_type_get_type_size(ins->dest_type);
2562
max_bitsize = MAX2(dst_bitsize, max_bitsize);
2563
2564
/* We don't have fp16 LUTs, so we'll want to emit code like:
2565
*
2566
* vlut.fsinr hr0, hr0
2567
*
2568
* where both input and output are 16-bit but the operation is carried
2569
* out in 32-bit
2570
*/
2571
2572
switch (ins->op) {
2573
case midgard_alu_op_fsqrt:
2574
case midgard_alu_op_frcp:
2575
case midgard_alu_op_frsqrt:
2576
case midgard_alu_op_fsinpi:
2577
case midgard_alu_op_fcospi:
2578
case midgard_alu_op_fexp2:
2579
case midgard_alu_op_flog2:
2580
max_bitsize = MAX2(max_bitsize, 32);
2581
break;
2582
2583
default:
2584
break;
2585
}
2586
2587
/* High implies computing at a higher bitsize, e.g umul_high of 32-bit
2588
* requires computing at 64-bit */
2589
if (midgard_is_integer_out_op(ins->op) && ins->outmod == midgard_outmod_keephi) {
2590
max_bitsize *= 2;
2591
assert(max_bitsize <= 64);
2592
}
2593
2594
return max_bitsize;
2595
}
2596
2597
midgard_reg_mode
2598
reg_mode_for_bitsize(unsigned bitsize)
2599
{
2600
switch (bitsize) {
2601
/* use 16 pipe for 8 since we don't support vec16 yet */
2602
case 8:
2603
case 16:
2604
return midgard_reg_mode_16;
2605
case 32:
2606
return midgard_reg_mode_32;
2607
case 64:
2608
return midgard_reg_mode_64;
2609
default:
2610
unreachable("invalid bit size");
2611
}
2612
}
2613
2614
/* Midgard supports two types of constants, embedded constants (128-bit) and
2615
* inline constants (16-bit). Sometimes, especially with scalar ops, embedded
2616
* constants can be demoted to inline constants, for space savings and
2617
* sometimes a performance boost */
2618
2619
static void
2620
embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
2621
{
2622
mir_foreach_instr_in_block(block, ins) {
2623
if (!ins->has_constants) continue;
2624
if (ins->has_inline_constant) continue;
2625
2626
unsigned max_bitsize = max_bitsize_for_alu(ins);
2627
2628
/* We can inline 32-bit (sometimes) or 16-bit (usually) */
2629
bool is_16 = max_bitsize == 16;
2630
bool is_32 = max_bitsize == 32;
2631
2632
if (!(is_16 || is_32))
2633
continue;
2634
2635
/* src1 cannot be an inline constant due to encoding
2636
* restrictions. So, if possible we try to flip the arguments
2637
* in that case */
2638
2639
int op = ins->op;
2640
2641
if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT) &&
2642
alu_opcode_props[op].props & OP_COMMUTES) {
2643
mir_flip(ins);
2644
}
2645
2646
if (ins->src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
2647
/* Component is from the swizzle. Take a nonzero component */
2648
assert(ins->mask);
2649
unsigned first_comp = ffs(ins->mask) - 1;
2650
unsigned component = ins->swizzle[1][first_comp];
2651
2652
/* Scale constant appropriately, if we can legally */
2653
int16_t scaled_constant = 0;
2654
2655
if (is_16) {
2656
scaled_constant = ins->constants.u16[component];
2657
} else if (midgard_is_integer_op(op)) {
2658
scaled_constant = ins->constants.u32[component];
2659
2660
/* Constant overflow after resize */
2661
if (scaled_constant != ins->constants.u32[component])
2662
continue;
2663
} else {
2664
float original = ins->constants.f32[component];
2665
scaled_constant = _mesa_float_to_half(original);
2666
2667
/* Check for loss of precision. If this is
2668
* mediump, we don't care, but for a highp
2669
* shader, we need to pay attention. NIR
2670
* doesn't yet tell us which mode we're in!
2671
* Practically this prevents most constants
2672
* from being inlined, sadly. */
2673
2674
float fp32 = _mesa_half_to_float(scaled_constant);
2675
2676
if (fp32 != original)
2677
continue;
2678
}
2679
2680
/* Should've been const folded */
2681
if (ins->src_abs[1] || ins->src_neg[1])
2682
continue;
2683
2684
/* Make sure that the constant is not itself a vector
2685
* by checking if all accessed values are the same. */
2686
2687
const midgard_constants *cons = &ins->constants;
2688
uint32_t value = is_16 ? cons->u16[component] : cons->u32[component];
2689
2690
bool is_vector = false;
2691
unsigned mask = effective_writemask(ins->op, ins->mask);
2692
2693
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
2694
/* We only care if this component is actually used */
2695
if (!(mask & (1 << c)))
2696
continue;
2697
2698
uint32_t test = is_16 ?
2699
cons->u16[ins->swizzle[1][c]] :
2700
cons->u32[ins->swizzle[1][c]];
2701
2702
if (test != value) {
2703
is_vector = true;
2704
break;
2705
}
2706
}
2707
2708
if (is_vector)
2709
continue;
2710
2711
/* Get rid of the embedded constant */
2712
ins->has_constants = false;
2713
ins->src[1] = ~0;
2714
ins->has_inline_constant = true;
2715
ins->inline_constant = scaled_constant;
2716
}
2717
}
2718
}
2719
2720
/* Dead code elimination for branches at the end of a block - only one branch
2721
* per block is legal semantically */
2722
2723
static void
2724
midgard_cull_dead_branch(compiler_context *ctx, midgard_block *block)
2725
{
2726
bool branched = false;
2727
2728
mir_foreach_instr_in_block_safe(block, ins) {
2729
if (!midgard_is_branch_unit(ins->unit)) continue;
2730
2731
if (branched)
2732
mir_remove_instruction(ins);
2733
2734
branched = true;
2735
}
2736
}
2737
2738
/* We want to force the invert on AND/OR to the second slot to legalize into
2739
* iandnot/iornot. The relevant patterns are for AND (and OR respectively)
2740
*
2741
* ~a & #b = ~a & ~(#~b)
2742
* ~a & b = b & ~a
2743
*/
2744
2745
static void
2746
midgard_legalize_invert(compiler_context *ctx, midgard_block *block)
2747
{
2748
mir_foreach_instr_in_block(block, ins) {
2749
if (ins->type != TAG_ALU_4) continue;
2750
2751
if (ins->op != midgard_alu_op_iand &&
2752
ins->op != midgard_alu_op_ior) continue;
2753
2754
if (ins->src_invert[1] || !ins->src_invert[0]) continue;
2755
2756
if (ins->has_inline_constant) {
2757
/* ~(#~a) = ~(~#a) = a, so valid, and forces both
2758
* inverts on */
2759
ins->inline_constant = ~ins->inline_constant;
2760
ins->src_invert[1] = true;
2761
} else {
2762
/* Flip to the right invert order. Note
2763
* has_inline_constant false by assumption on the
2764
* branch, so flipping makes sense. */
2765
mir_flip(ins);
2766
}
2767
}
2768
}
2769
2770
static unsigned
2771
emit_fragment_epilogue(compiler_context *ctx, unsigned rt, unsigned sample_iter)
2772
{
2773
/* Loop to ourselves */
2774
midgard_instruction *br = ctx->writeout_branch[rt][sample_iter];
2775
struct midgard_instruction ins = v_branch(false, false);
2776
ins.writeout = br->writeout;
2777
ins.branch.target_block = ctx->block_count - 1;
2778
ins.constants.u32[0] = br->constants.u32[0];
2779
memcpy(&ins.src_types, &br->src_types, sizeof(ins.src_types));
2780
emit_mir_instruction(ctx, ins);
2781
2782
ctx->current_block->epilogue = true;
2783
schedule_barrier(ctx);
2784
return ins.branch.target_block;
2785
}
2786
2787
static midgard_block *
2788
emit_block_init(compiler_context *ctx)
2789
{
2790
midgard_block *this_block = ctx->after_block;
2791
ctx->after_block = NULL;
2792
2793
if (!this_block)
2794
this_block = create_empty_block(ctx);
2795
2796
list_addtail(&this_block->base.link, &ctx->blocks);
2797
2798
this_block->scheduled = false;
2799
++ctx->block_count;
2800
2801
/* Set up current block */
2802
list_inithead(&this_block->base.instructions);
2803
ctx->current_block = this_block;
2804
2805
return this_block;
2806
}
2807
2808
static midgard_block *
2809
emit_block(compiler_context *ctx, nir_block *block)
2810
{
2811
midgard_block *this_block = emit_block_init(ctx);
2812
2813
nir_foreach_instr(instr, block) {
2814
emit_instr(ctx, instr);
2815
++ctx->instruction_count;
2816
}
2817
2818
return this_block;
2819
}
2820
2821
static midgard_block *emit_cf_list(struct compiler_context *ctx, struct exec_list *list);
2822
2823
static void
2824
emit_if(struct compiler_context *ctx, nir_if *nif)
2825
{
2826
midgard_block *before_block = ctx->current_block;
2827
2828
/* Speculatively emit the branch, but we can't fill it in until later */
2829
bool inv = false;
2830
EMIT(branch, true, true);
2831
midgard_instruction *then_branch = mir_last_in_block(ctx->current_block);
2832
then_branch->src[0] = mir_get_branch_cond(&nif->condition, &inv);
2833
then_branch->src_types[0] = nir_type_uint32;
2834
then_branch->branch.invert_conditional = !inv;
2835
2836
/* Emit the two subblocks. */
2837
midgard_block *then_block = emit_cf_list(ctx, &nif->then_list);
2838
midgard_block *end_then_block = ctx->current_block;
2839
2840
/* Emit a jump from the end of the then block to the end of the else */
2841
EMIT(branch, false, false);
2842
midgard_instruction *then_exit = mir_last_in_block(ctx->current_block);
2843
2844
/* Emit second block, and check if it's empty */
2845
2846
int else_idx = ctx->block_count;
2847
int count_in = ctx->instruction_count;
2848
midgard_block *else_block = emit_cf_list(ctx, &nif->else_list);
2849
midgard_block *end_else_block = ctx->current_block;
2850
int after_else_idx = ctx->block_count;
2851
2852
/* Now that we have the subblocks emitted, fix up the branches */
2853
2854
assert(then_block);
2855
assert(else_block);
2856
2857
if (ctx->instruction_count == count_in) {
2858
/* The else block is empty, so don't emit an exit jump */
2859
mir_remove_instruction(then_exit);
2860
then_branch->branch.target_block = after_else_idx;
2861
} else {
2862
then_branch->branch.target_block = else_idx;
2863
then_exit->branch.target_block = after_else_idx;
2864
}
2865
2866
/* Wire up the successors */
2867
2868
ctx->after_block = create_empty_block(ctx);
2869
2870
pan_block_add_successor(&before_block->base, &then_block->base);
2871
pan_block_add_successor(&before_block->base, &else_block->base);
2872
2873
pan_block_add_successor(&end_then_block->base, &ctx->after_block->base);
2874
pan_block_add_successor(&end_else_block->base, &ctx->after_block->base);
2875
}
2876
2877
static void
2878
emit_loop(struct compiler_context *ctx, nir_loop *nloop)
2879
{
2880
/* Remember where we are */
2881
midgard_block *start_block = ctx->current_block;
2882
2883
/* Allocate a loop number, growing the current inner loop depth */
2884
int loop_idx = ++ctx->current_loop_depth;
2885
2886
/* Get index from before the body so we can loop back later */
2887
int start_idx = ctx->block_count;
2888
2889
/* Emit the body itself */
2890
midgard_block *loop_block = emit_cf_list(ctx, &nloop->body);
2891
2892
/* Branch back to loop back */
2893
struct midgard_instruction br_back = v_branch(false, false);
2894
br_back.branch.target_block = start_idx;
2895
emit_mir_instruction(ctx, br_back);
2896
2897
/* Mark down that branch in the graph. */
2898
pan_block_add_successor(&start_block->base, &loop_block->base);
2899
pan_block_add_successor(&ctx->current_block->base, &loop_block->base);
2900
2901
/* Find the index of the block about to follow us (note: we don't add
2902
* one; blocks are 0-indexed so we get a fencepost problem) */
2903
int break_block_idx = ctx->block_count;
2904
2905
/* Fix up the break statements we emitted to point to the right place,
2906
* now that we can allocate a block number for them */
2907
ctx->after_block = create_empty_block(ctx);
2908
2909
mir_foreach_block_from(ctx, start_block, _block) {
2910
mir_foreach_instr_in_block(((midgard_block *) _block), ins) {
2911
if (ins->type != TAG_ALU_4) continue;
2912
if (!ins->compact_branch) continue;
2913
2914
/* We found a branch -- check the type to see if we need to do anything */
2915
if (ins->branch.target_type != TARGET_BREAK) continue;
2916
2917
/* It's a break! Check if it's our break */
2918
if (ins->branch.target_break != loop_idx) continue;
2919
2920
/* Okay, cool, we're breaking out of this loop.
2921
* Rewrite from a break to a goto */
2922
2923
ins->branch.target_type = TARGET_GOTO;
2924
ins->branch.target_block = break_block_idx;
2925
2926
pan_block_add_successor(_block, &ctx->after_block->base);
2927
}
2928
}
2929
2930
/* Now that we've finished emitting the loop, free up the depth again
2931
* so we play nice with recursion amid nested loops */
2932
--ctx->current_loop_depth;
2933
2934
/* Dump loop stats */
2935
++ctx->loop_count;
2936
}
2937
2938
static midgard_block *
2939
emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
2940
{
2941
midgard_block *start_block = NULL;
2942
2943
foreach_list_typed(nir_cf_node, node, node, list) {
2944
switch (node->type) {
2945
case nir_cf_node_block: {
2946
midgard_block *block = emit_block(ctx, nir_cf_node_as_block(node));
2947
2948
if (!start_block)
2949
start_block = block;
2950
2951
break;
2952
}
2953
2954
case nir_cf_node_if:
2955
emit_if(ctx, nir_cf_node_as_if(node));
2956
break;
2957
2958
case nir_cf_node_loop:
2959
emit_loop(ctx, nir_cf_node_as_loop(node));
2960
break;
2961
2962
case nir_cf_node_function:
2963
assert(0);
2964
break;
2965
}
2966
}
2967
2968
return start_block;
2969
}
2970
2971
/* Due to lookahead, we need to report the first tag executed in the command
2972
* stream and in branch targets. An initial block might be empty, so iterate
2973
* until we find one that 'works' */
2974
2975
unsigned
2976
midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx)
2977
{
2978
midgard_block *initial_block = mir_get_block(ctx, block_idx);
2979
2980
mir_foreach_block_from(ctx, initial_block, _v) {
2981
midgard_block *v = (midgard_block *) _v;
2982
if (v->quadword_count) {
2983
midgard_bundle *initial_bundle =
2984
util_dynarray_element(&v->bundles, midgard_bundle, 0);
2985
2986
return initial_bundle->tag;
2987
}
2988
}
2989
2990
/* Default to a tag 1 which will break from the shader, in case we jump
2991
* to the exit block (i.e. `return` in a compute shader) */
2992
2993
return 1;
2994
}
2995
2996
/* For each fragment writeout instruction, generate a writeout loop to
2997
* associate with it */
2998
2999
static void
3000
mir_add_writeout_loops(compiler_context *ctx)
3001
{
3002
for (unsigned rt = 0; rt < ARRAY_SIZE(ctx->writeout_branch); ++rt) {
3003
for (unsigned s = 0; s < MIDGARD_MAX_SAMPLE_ITER; ++s) {
3004
midgard_instruction *br = ctx->writeout_branch[rt][s];
3005
if (!br) continue;
3006
3007
unsigned popped = br->branch.target_block;
3008
pan_block_add_successor(&(mir_get_block(ctx, popped - 1)->base),
3009
&ctx->current_block->base);
3010
br->branch.target_block = emit_fragment_epilogue(ctx, rt, s);
3011
br->branch.target_type = TARGET_GOTO;
3012
3013
/* If we have more RTs, we'll need to restore back after our
3014
* loop terminates */
3015
midgard_instruction *next_br = NULL;
3016
3017
if ((s + 1) < MIDGARD_MAX_SAMPLE_ITER)
3018
next_br = ctx->writeout_branch[rt][s + 1];
3019
3020
if (!next_br && (rt + 1) < ARRAY_SIZE(ctx->writeout_branch))
3021
next_br = ctx->writeout_branch[rt + 1][0];
3022
3023
if (next_br) {
3024
midgard_instruction uncond = v_branch(false, false);
3025
uncond.branch.target_block = popped;
3026
uncond.branch.target_type = TARGET_GOTO;
3027
emit_mir_instruction(ctx, uncond);
3028
pan_block_add_successor(&ctx->current_block->base,
3029
&(mir_get_block(ctx, popped)->base));
3030
schedule_barrier(ctx);
3031
} else {
3032
/* We're last, so we can terminate here */
3033
br->last_writeout = true;
3034
}
3035
}
3036
}
3037
}
3038
3039
void
3040
midgard_compile_shader_nir(nir_shader *nir,
3041
const struct panfrost_compile_inputs *inputs,
3042
struct util_dynarray *binary,
3043
struct pan_shader_info *info)
3044
{
3045
midgard_debug = debug_get_option_midgard_debug();
3046
3047
/* TODO: Bound against what? */
3048
compiler_context *ctx = rzalloc(NULL, compiler_context);
3049
ctx->sysval_to_id = panfrost_init_sysvals(&info->sysvals, ctx);
3050
3051
ctx->inputs = inputs;
3052
ctx->nir = nir;
3053
ctx->info = info;
3054
ctx->stage = nir->info.stage;
3055
3056
if (inputs->is_blend) {
3057
unsigned nr_samples = MAX2(inputs->blend.nr_samples, 1);
3058
const struct util_format_description *desc =
3059
util_format_description(inputs->rt_formats[inputs->blend.rt]);
3060
3061
/* We have to split writeout in 128 bit chunks */
3062
ctx->blend_sample_iterations =
3063
DIV_ROUND_UP(desc->block.bits * nr_samples, 128);
3064
}
3065
ctx->blend_input = ~0;
3066
ctx->blend_src1 = ~0;
3067
ctx->quirks = midgard_get_quirks(inputs->gpu_id);
3068
3069
/* Initialize at a global (not block) level hash tables */
3070
3071
ctx->ssa_constants = _mesa_hash_table_u64_create(ctx);
3072
3073
/* Lower gl_Position pre-optimisation, but after lowering vars to ssa
3074
* (so we don't accidentally duplicate the epilogue since mesa/st has
3075
* messed with our I/O quite a bit already) */
3076
3077
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3078
3079
if (ctx->stage == MESA_SHADER_VERTEX) {
3080
NIR_PASS_V(nir, nir_lower_viewport_transform);
3081
NIR_PASS_V(nir, nir_lower_point_size, 1.0, 1024.0);
3082
}
3083
3084
NIR_PASS_V(nir, nir_lower_var_copies);
3085
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3086
NIR_PASS_V(nir, nir_split_var_copies);
3087
NIR_PASS_V(nir, nir_lower_var_copies);
3088
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
3089
NIR_PASS_V(nir, nir_lower_var_copies);
3090
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3091
3092
unsigned pan_quirks = panfrost_get_quirks(inputs->gpu_id, 0);
3093
NIR_PASS_V(nir, pan_lower_framebuffer,
3094
inputs->rt_formats, inputs->is_blend, pan_quirks);
3095
3096
NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
3097
glsl_type_size, 0);
3098
NIR_PASS_V(nir, nir_lower_ssbo);
3099
NIR_PASS_V(nir, pan_nir_lower_zs_store);
3100
3101
NIR_PASS_V(nir, pan_nir_lower_64bit_intrin);
3102
3103
/* Optimisation passes */
3104
3105
optimise_nir(nir, ctx->quirks, inputs->is_blend);
3106
3107
NIR_PASS_V(nir, pan_nir_reorder_writeout);
3108
3109
if ((midgard_debug & MIDGARD_DBG_SHADERS) &&
3110
((midgard_debug & MIDGARD_DBG_INTERNAL) || !nir->info.internal)) {
3111
nir_print_shader(nir, stdout);
3112
}
3113
3114
info->tls_size = nir->scratch_size;
3115
3116
nir_foreach_function(func, nir) {
3117
if (!func->impl)
3118
continue;
3119
3120
list_inithead(&ctx->blocks);
3121
ctx->block_count = 0;
3122
ctx->func = func;
3123
ctx->already_emitted = calloc(BITSET_WORDS(func->impl->ssa_alloc), sizeof(BITSET_WORD));
3124
3125
if (nir->info.outputs_read && !inputs->is_blend) {
3126
emit_block_init(ctx);
3127
3128
struct midgard_instruction wait = v_branch(false, false);
3129
wait.branch.target_type = TARGET_TILEBUF_WAIT;
3130
3131
emit_mir_instruction(ctx, wait);
3132
3133
++ctx->instruction_count;
3134
}
3135
3136
emit_cf_list(ctx, &func->impl->body);
3137
free(ctx->already_emitted);
3138
break; /* TODO: Multi-function shaders */
3139
}
3140
3141
/* Per-block lowering before opts */
3142
3143
mir_foreach_block(ctx, _block) {
3144
midgard_block *block = (midgard_block *) _block;
3145
inline_alu_constants(ctx, block);
3146
embedded_to_inline_constant(ctx, block);
3147
}
3148
/* MIR-level optimizations */
3149
3150
bool progress = false;
3151
3152
do {
3153
progress = false;
3154
progress |= midgard_opt_dead_code_eliminate(ctx);
3155
3156
mir_foreach_block(ctx, _block) {
3157
midgard_block *block = (midgard_block *) _block;
3158
progress |= midgard_opt_copy_prop(ctx, block);
3159
progress |= midgard_opt_combine_projection(ctx, block);
3160
progress |= midgard_opt_varying_projection(ctx, block);
3161
}
3162
} while (progress);
3163
3164
mir_foreach_block(ctx, _block) {
3165
midgard_block *block = (midgard_block *) _block;
3166
midgard_lower_derivatives(ctx, block);
3167
midgard_legalize_invert(ctx, block);
3168
midgard_cull_dead_branch(ctx, block);
3169
}
3170
3171
if (ctx->stage == MESA_SHADER_FRAGMENT)
3172
mir_add_writeout_loops(ctx);
3173
3174
/* Analyze now that the code is known but before scheduling creates
3175
* pipeline registers which are harder to track */
3176
mir_analyze_helper_requirements(ctx);
3177
3178
/* Schedule! */
3179
midgard_schedule_program(ctx);
3180
mir_ra(ctx);
3181
3182
/* Analyze after scheduling since this is order-dependent */
3183
mir_analyze_helper_terminate(ctx);
3184
3185
/* Emit flat binary from the instruction arrays. Iterate each block in
3186
* sequence. Save instruction boundaries such that lookahead tags can
3187
* be assigned easily */
3188
3189
/* Cache _all_ bundles in source order for lookahead across failed branches */
3190
3191
int bundle_count = 0;
3192
mir_foreach_block(ctx, _block) {
3193
midgard_block *block = (midgard_block *) _block;
3194
bundle_count += block->bundles.size / sizeof(midgard_bundle);
3195
}
3196
midgard_bundle **source_order_bundles = malloc(sizeof(midgard_bundle *) * bundle_count);
3197
int bundle_idx = 0;
3198
mir_foreach_block(ctx, _block) {
3199
midgard_block *block = (midgard_block *) _block;
3200
util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
3201
source_order_bundles[bundle_idx++] = bundle;
3202
}
3203
}
3204
3205
int current_bundle = 0;
3206
3207
/* Midgard prefetches instruction types, so during emission we
3208
* need to lookahead. Unless this is the last instruction, in
3209
* which we return 1. */
3210
3211
mir_foreach_block(ctx, _block) {
3212
midgard_block *block = (midgard_block *) _block;
3213
mir_foreach_bundle_in_block(block, bundle) {
3214
int lookahead = 1;
3215
3216
if (!bundle->last_writeout && (current_bundle + 1 < bundle_count))
3217
lookahead = source_order_bundles[current_bundle + 1]->tag;
3218
3219
emit_binary_bundle(ctx, block, bundle, binary, lookahead);
3220
++current_bundle;
3221
}
3222
3223
/* TODO: Free deeper */
3224
//util_dynarray_fini(&block->instructions);
3225
}
3226
3227
free(source_order_bundles);
3228
3229
/* Report the very first tag executed */
3230
info->midgard.first_tag = midgard_get_first_tag_from_block(ctx, 0);
3231
3232
info->ubo_mask = ctx->ubo_mask & BITSET_MASK(ctx->nir->info.num_ubos);
3233
3234
if ((midgard_debug & MIDGARD_DBG_SHADERS) &&
3235
((midgard_debug & MIDGARD_DBG_INTERNAL) || !nir->info.internal)) {
3236
disassemble_midgard(stdout, binary->data,
3237
binary->size, inputs->gpu_id,
3238
midgard_debug & MIDGARD_DBG_VERBOSE);
3239
fflush(stdout);
3240
}
3241
3242
/* A shader ending on a 16MB boundary causes INSTR_INVALID_PC faults,
3243
* workaround by adding some padding to the end of the shader. (The
3244
* kernel makes sure shader BOs can't cross 16MB boundaries.) */
3245
if (binary->size)
3246
memset(util_dynarray_grow(binary, uint8_t, 16), 0, 16);
3247
3248
if ((midgard_debug & MIDGARD_DBG_SHADERDB || inputs->shaderdb) &&
3249
!nir->info.internal) {
3250
unsigned nr_bundles = 0, nr_ins = 0;
3251
3252
/* Count instructions and bundles */
3253
3254
mir_foreach_block(ctx, _block) {
3255
midgard_block *block = (midgard_block *) _block;
3256
nr_bundles += util_dynarray_num_elements(
3257
&block->bundles, midgard_bundle);
3258
3259
mir_foreach_bundle_in_block(block, bun)
3260
nr_ins += bun->instruction_count;
3261
}
3262
3263
/* Calculate thread count. There are certain cutoffs by
3264
* register count for thread count */
3265
3266
unsigned nr_registers = info->work_reg_count;
3267
3268
unsigned nr_threads =
3269
(nr_registers <= 4) ? 4 :
3270
(nr_registers <= 8) ? 2 :
3271
1;
3272
3273
/* Dump stats */
3274
3275
fprintf(stderr, "%s - %s shader: "
3276
"%u inst, %u bundles, %u quadwords, "
3277
"%u registers, %u threads, %u loops, "
3278
"%u:%u spills:fills\n",
3279
ctx->nir->info.label ?: "",
3280
ctx->inputs->is_blend ? "PAN_SHADER_BLEND" :
3281
gl_shader_stage_name(ctx->stage),
3282
nr_ins, nr_bundles, ctx->quadword_count,
3283
nr_registers, nr_threads,
3284
ctx->loop_count,
3285
ctx->spills, ctx->fills);
3286
}
3287
3288
ralloc_free(ctx);
3289
}
3290
3291