Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/intel/compiler/brw_fs_nir.cpp
4550 views
1
/*
2
* Copyright © 2010 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "compiler/glsl/ir.h"
25
#include "brw_fs.h"
26
#include "brw_nir.h"
27
#include "brw_rt.h"
28
#include "brw_eu.h"
29
#include "nir_search_helpers.h"
30
#include "util/u_math.h"
31
#include "util/bitscan.h"
32
33
using namespace brw;
34
35
void
36
fs_visitor::emit_nir_code()
37
{
38
emit_shader_float_controls_execution_mode();
39
40
/* emit the arrays used for inputs and outputs - load/store intrinsics will
41
* be converted to reads/writes of these arrays
42
*/
43
nir_setup_outputs();
44
nir_setup_uniforms();
45
nir_emit_system_values();
46
last_scratch = ALIGN(nir->scratch_size, 4) * dispatch_width;
47
48
nir_emit_impl(nir_shader_get_entrypoint((nir_shader *)nir));
49
50
bld.emit(SHADER_OPCODE_HALT_TARGET);
51
}
52
53
void
54
fs_visitor::nir_setup_outputs()
55
{
56
if (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_FRAGMENT)
57
return;
58
59
unsigned vec4s[VARYING_SLOT_TESS_MAX] = { 0, };
60
61
/* Calculate the size of output registers in a separate pass, before
62
* allocating them. With ARB_enhanced_layouts, multiple output variables
63
* may occupy the same slot, but have different type sizes.
64
*/
65
nir_foreach_shader_out_variable(var, nir) {
66
const int loc = var->data.driver_location;
67
const unsigned var_vec4s =
68
var->data.compact ? DIV_ROUND_UP(glsl_get_length(var->type), 4)
69
: type_size_vec4(var->type, true);
70
vec4s[loc] = MAX2(vec4s[loc], var_vec4s);
71
}
72
73
for (unsigned loc = 0; loc < ARRAY_SIZE(vec4s);) {
74
if (vec4s[loc] == 0) {
75
loc++;
76
continue;
77
}
78
79
unsigned reg_size = vec4s[loc];
80
81
/* Check if there are any ranges that start within this range and extend
82
* past it. If so, include them in this allocation.
83
*/
84
for (unsigned i = 1; i < reg_size; i++) {
85
assert(i + loc < ARRAY_SIZE(vec4s));
86
reg_size = MAX2(vec4s[i + loc] + i, reg_size);
87
}
88
89
fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_F, 4 * reg_size);
90
for (unsigned i = 0; i < reg_size; i++) {
91
assert(loc + i < ARRAY_SIZE(outputs));
92
outputs[loc + i] = offset(reg, bld, 4 * i);
93
}
94
95
loc += reg_size;
96
}
97
}
98
99
void
100
fs_visitor::nir_setup_uniforms()
101
{
102
/* Only the first compile gets to set up uniforms. */
103
if (push_constant_loc) {
104
assert(pull_constant_loc);
105
return;
106
}
107
108
uniforms = nir->num_uniforms / 4;
109
110
if ((stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL) &&
111
devinfo->verx10 < 125) {
112
/* Add uniforms for builtins after regular NIR uniforms. */
113
assert(uniforms == prog_data->nr_params);
114
115
uint32_t *param;
116
if (nir->info.workgroup_size_variable &&
117
compiler->lower_variable_group_size) {
118
param = brw_stage_prog_data_add_params(prog_data, 3);
119
for (unsigned i = 0; i < 3; i++) {
120
param[i] = (BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i);
121
group_size[i] = fs_reg(UNIFORM, uniforms++, BRW_REGISTER_TYPE_UD);
122
}
123
}
124
125
/* Subgroup ID must be the last uniform on the list. This will make
126
* easier later to split between cross thread and per thread
127
* uniforms.
128
*/
129
param = brw_stage_prog_data_add_params(prog_data, 1);
130
*param = BRW_PARAM_BUILTIN_SUBGROUP_ID;
131
subgroup_id = fs_reg(UNIFORM, uniforms++, BRW_REGISTER_TYPE_UD);
132
}
133
}
134
135
static bool
136
emit_system_values_block(nir_block *block, fs_visitor *v)
137
{
138
fs_reg *reg;
139
140
nir_foreach_instr(instr, block) {
141
if (instr->type != nir_instr_type_intrinsic)
142
continue;
143
144
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
145
switch (intrin->intrinsic) {
146
case nir_intrinsic_load_vertex_id:
147
case nir_intrinsic_load_base_vertex:
148
unreachable("should be lowered by nir_lower_system_values().");
149
150
case nir_intrinsic_load_vertex_id_zero_base:
151
case nir_intrinsic_load_is_indexed_draw:
152
case nir_intrinsic_load_first_vertex:
153
case nir_intrinsic_load_instance_id:
154
case nir_intrinsic_load_base_instance:
155
case nir_intrinsic_load_draw_id:
156
unreachable("should be lowered by brw_nir_lower_vs_inputs().");
157
158
case nir_intrinsic_load_invocation_id:
159
if (v->stage == MESA_SHADER_TESS_CTRL)
160
break;
161
assert(v->stage == MESA_SHADER_GEOMETRY);
162
reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
163
if (reg->file == BAD_FILE) {
164
const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
165
fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
166
fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
167
abld.SHR(iid, g1, brw_imm_ud(27u));
168
*reg = iid;
169
}
170
break;
171
172
case nir_intrinsic_load_sample_pos:
173
assert(v->stage == MESA_SHADER_FRAGMENT);
174
reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
175
if (reg->file == BAD_FILE)
176
*reg = *v->emit_samplepos_setup();
177
break;
178
179
case nir_intrinsic_load_sample_id:
180
assert(v->stage == MESA_SHADER_FRAGMENT);
181
reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
182
if (reg->file == BAD_FILE)
183
*reg = *v->emit_sampleid_setup();
184
break;
185
186
case nir_intrinsic_load_sample_mask_in:
187
assert(v->stage == MESA_SHADER_FRAGMENT);
188
assert(v->devinfo->ver >= 7);
189
reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
190
if (reg->file == BAD_FILE)
191
*reg = *v->emit_samplemaskin_setup();
192
break;
193
194
case nir_intrinsic_load_workgroup_id:
195
assert(v->stage == MESA_SHADER_COMPUTE ||
196
v->stage == MESA_SHADER_KERNEL);
197
reg = &v->nir_system_values[SYSTEM_VALUE_WORKGROUP_ID];
198
if (reg->file == BAD_FILE)
199
*reg = *v->emit_cs_work_group_id_setup();
200
break;
201
202
case nir_intrinsic_load_helper_invocation:
203
assert(v->stage == MESA_SHADER_FRAGMENT);
204
reg = &v->nir_system_values[SYSTEM_VALUE_HELPER_INVOCATION];
205
if (reg->file == BAD_FILE) {
206
const fs_builder abld =
207
v->bld.annotate("gl_HelperInvocation", NULL);
208
209
/* On Gfx6+ (gl_HelperInvocation is only exposed on Gfx7+) the
210
* pixel mask is in g1.7 of the thread payload.
211
*
212
* We move the per-channel pixel enable bit to the low bit of each
213
* channel by shifting the byte containing the pixel mask by the
214
* vector immediate 0x76543210UV.
215
*
216
* The region of <1,8,0> reads only 1 byte (the pixel masks for
217
* subspans 0 and 1) in SIMD8 and an additional byte (the pixel
218
* masks for 2 and 3) in SIMD16.
219
*/
220
fs_reg shifted = abld.vgrf(BRW_REGISTER_TYPE_UW, 1);
221
222
for (unsigned i = 0; i < DIV_ROUND_UP(v->dispatch_width, 16); i++) {
223
const fs_builder hbld = abld.group(MIN2(16, v->dispatch_width), i);
224
hbld.SHR(offset(shifted, hbld, i),
225
stride(retype(brw_vec1_grf(1 + i, 7),
226
BRW_REGISTER_TYPE_UB),
227
1, 8, 0),
228
brw_imm_v(0x76543210));
229
}
230
231
/* A set bit in the pixel mask means the channel is enabled, but
232
* that is the opposite of gl_HelperInvocation so we need to invert
233
* the mask.
234
*
235
* The negate source-modifier bit of logical instructions on Gfx8+
236
* performs 1's complement negation, so we can use that instead of
237
* a NOT instruction.
238
*/
239
fs_reg inverted = negate(shifted);
240
if (v->devinfo->ver < 8) {
241
inverted = abld.vgrf(BRW_REGISTER_TYPE_UW);
242
abld.NOT(inverted, shifted);
243
}
244
245
/* We then resolve the 0/1 result to 0/~0 boolean values by ANDing
246
* with 1 and negating.
247
*/
248
fs_reg anded = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
249
abld.AND(anded, inverted, brw_imm_uw(1));
250
251
fs_reg dst = abld.vgrf(BRW_REGISTER_TYPE_D, 1);
252
abld.MOV(dst, negate(retype(anded, BRW_REGISTER_TYPE_D)));
253
*reg = dst;
254
}
255
break;
256
257
case nir_intrinsic_load_frag_shading_rate:
258
reg = &v->nir_system_values[SYSTEM_VALUE_FRAG_SHADING_RATE];
259
if (reg->file == BAD_FILE)
260
*reg = *v->emit_shading_rate_setup();
261
break;
262
263
default:
264
break;
265
}
266
}
267
268
return true;
269
}
270
271
void
272
fs_visitor::nir_emit_system_values()
273
{
274
nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
275
for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
276
nir_system_values[i] = fs_reg();
277
}
278
279
/* Always emit SUBGROUP_INVOCATION. Dead code will clean it up if we
280
* never end up using it.
281
*/
282
{
283
const fs_builder abld = bld.annotate("gl_SubgroupInvocation", NULL);
284
fs_reg &reg = nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION];
285
reg = abld.vgrf(BRW_REGISTER_TYPE_UW);
286
287
const fs_builder allbld8 = abld.group(8, 0).exec_all();
288
allbld8.MOV(reg, brw_imm_v(0x76543210));
289
if (dispatch_width > 8)
290
allbld8.ADD(byte_offset(reg, 16), reg, brw_imm_uw(8u));
291
if (dispatch_width > 16) {
292
const fs_builder allbld16 = abld.group(16, 0).exec_all();
293
allbld16.ADD(byte_offset(reg, 32), reg, brw_imm_uw(16u));
294
}
295
}
296
297
nir_function_impl *impl = nir_shader_get_entrypoint((nir_shader *)nir);
298
nir_foreach_block(block, impl)
299
emit_system_values_block(block, this);
300
}
301
302
void
303
fs_visitor::nir_emit_impl(nir_function_impl *impl)
304
{
305
nir_locals = ralloc_array(mem_ctx, fs_reg, impl->reg_alloc);
306
for (unsigned i = 0; i < impl->reg_alloc; i++) {
307
nir_locals[i] = fs_reg();
308
}
309
310
foreach_list_typed(nir_register, reg, node, &impl->registers) {
311
unsigned array_elems =
312
reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
313
unsigned size = array_elems * reg->num_components;
314
const brw_reg_type reg_type = reg->bit_size == 8 ? BRW_REGISTER_TYPE_B :
315
brw_reg_type_from_bit_size(reg->bit_size, BRW_REGISTER_TYPE_F);
316
nir_locals[reg->index] = bld.vgrf(reg_type, size);
317
}
318
319
nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
320
impl->ssa_alloc);
321
322
nir_emit_cf_list(&impl->body);
323
}
324
325
void
326
fs_visitor::nir_emit_cf_list(exec_list *list)
327
{
328
exec_list_validate(list);
329
foreach_list_typed(nir_cf_node, node, node, list) {
330
switch (node->type) {
331
case nir_cf_node_if:
332
nir_emit_if(nir_cf_node_as_if(node));
333
break;
334
335
case nir_cf_node_loop:
336
nir_emit_loop(nir_cf_node_as_loop(node));
337
break;
338
339
case nir_cf_node_block:
340
nir_emit_block(nir_cf_node_as_block(node));
341
break;
342
343
default:
344
unreachable("Invalid CFG node block");
345
}
346
}
347
}
348
349
void
350
fs_visitor::nir_emit_if(nir_if *if_stmt)
351
{
352
bool invert;
353
fs_reg cond_reg;
354
355
/* If the condition has the form !other_condition, use other_condition as
356
* the source, but invert the predicate on the if instruction.
357
*/
358
nir_alu_instr *cond = nir_src_as_alu_instr(if_stmt->condition);
359
if (cond != NULL && cond->op == nir_op_inot) {
360
invert = true;
361
cond_reg = get_nir_src(cond->src[0].src);
362
cond_reg = offset(cond_reg, bld, cond->src[0].swizzle[0]);
363
} else {
364
invert = false;
365
cond_reg = get_nir_src(if_stmt->condition);
366
}
367
368
/* first, put the condition into f0 */
369
fs_inst *inst = bld.MOV(bld.null_reg_d(),
370
retype(cond_reg, BRW_REGISTER_TYPE_D));
371
inst->conditional_mod = BRW_CONDITIONAL_NZ;
372
373
bld.IF(BRW_PREDICATE_NORMAL)->predicate_inverse = invert;
374
375
nir_emit_cf_list(&if_stmt->then_list);
376
377
if (!nir_cf_list_is_empty_block(&if_stmt->else_list)) {
378
bld.emit(BRW_OPCODE_ELSE);
379
nir_emit_cf_list(&if_stmt->else_list);
380
}
381
382
bld.emit(BRW_OPCODE_ENDIF);
383
384
if (devinfo->ver < 7)
385
limit_dispatch_width(16, "Non-uniform control flow unsupported "
386
"in SIMD32 mode.");
387
}
388
389
void
390
fs_visitor::nir_emit_loop(nir_loop *loop)
391
{
392
bld.emit(BRW_OPCODE_DO);
393
394
nir_emit_cf_list(&loop->body);
395
396
bld.emit(BRW_OPCODE_WHILE);
397
398
if (devinfo->ver < 7)
399
limit_dispatch_width(16, "Non-uniform control flow unsupported "
400
"in SIMD32 mode.");
401
}
402
403
void
404
fs_visitor::nir_emit_block(nir_block *block)
405
{
406
nir_foreach_instr(instr, block) {
407
nir_emit_instr(instr);
408
}
409
}
410
411
void
412
fs_visitor::nir_emit_instr(nir_instr *instr)
413
{
414
const fs_builder abld = bld.annotate(NULL, instr);
415
416
switch (instr->type) {
417
case nir_instr_type_alu:
418
nir_emit_alu(abld, nir_instr_as_alu(instr), true);
419
break;
420
421
case nir_instr_type_deref:
422
unreachable("All derefs should've been lowered");
423
break;
424
425
case nir_instr_type_intrinsic:
426
switch (stage) {
427
case MESA_SHADER_VERTEX:
428
nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
429
break;
430
case MESA_SHADER_TESS_CTRL:
431
nir_emit_tcs_intrinsic(abld, nir_instr_as_intrinsic(instr));
432
break;
433
case MESA_SHADER_TESS_EVAL:
434
nir_emit_tes_intrinsic(abld, nir_instr_as_intrinsic(instr));
435
break;
436
case MESA_SHADER_GEOMETRY:
437
nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
438
break;
439
case MESA_SHADER_FRAGMENT:
440
nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
441
break;
442
case MESA_SHADER_COMPUTE:
443
case MESA_SHADER_KERNEL:
444
nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
445
break;
446
case MESA_SHADER_RAYGEN:
447
case MESA_SHADER_ANY_HIT:
448
case MESA_SHADER_CLOSEST_HIT:
449
case MESA_SHADER_MISS:
450
case MESA_SHADER_INTERSECTION:
451
case MESA_SHADER_CALLABLE:
452
nir_emit_bs_intrinsic(abld, nir_instr_as_intrinsic(instr));
453
break;
454
default:
455
unreachable("unsupported shader stage");
456
}
457
break;
458
459
case nir_instr_type_tex:
460
nir_emit_texture(abld, nir_instr_as_tex(instr));
461
break;
462
463
case nir_instr_type_load_const:
464
nir_emit_load_const(abld, nir_instr_as_load_const(instr));
465
break;
466
467
case nir_instr_type_ssa_undef:
468
/* We create a new VGRF for undefs on every use (by handling
469
* them in get_nir_src()), rather than for each definition.
470
* This helps register coalescing eliminate MOVs from undef.
471
*/
472
break;
473
474
case nir_instr_type_jump:
475
nir_emit_jump(abld, nir_instr_as_jump(instr));
476
break;
477
478
default:
479
unreachable("unknown instruction type");
480
}
481
}
482
483
/**
484
* Recognizes a parent instruction of nir_op_extract_* and changes the type to
485
* match instr.
486
*/
487
bool
488
fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
489
const fs_reg &result)
490
{
491
if (!instr->src[0].src.is_ssa ||
492
!instr->src[0].src.ssa->parent_instr)
493
return false;
494
495
if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
496
return false;
497
498
nir_alu_instr *src0 =
499
nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
500
501
if (src0->op != nir_op_extract_u8 && src0->op != nir_op_extract_u16 &&
502
src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16)
503
return false;
504
505
unsigned element = nir_src_as_uint(src0->src[1].src);
506
507
/* Element type to extract.*/
508
const brw_reg_type type = brw_int_type(
509
src0->op == nir_op_extract_u16 || src0->op == nir_op_extract_i16 ? 2 : 1,
510
src0->op == nir_op_extract_i16 || src0->op == nir_op_extract_i8);
511
512
fs_reg op0 = get_nir_src(src0->src[0].src);
513
op0.type = brw_type_for_nir_type(devinfo,
514
(nir_alu_type)(nir_op_infos[src0->op].input_types[0] |
515
nir_src_bit_size(src0->src[0].src)));
516
op0 = offset(op0, bld, src0->src[0].swizzle[0]);
517
518
bld.MOV(result, subscript(op0, type, element));
519
return true;
520
}
521
522
bool
523
fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
524
const fs_reg &result)
525
{
526
nir_intrinsic_instr *src0 = nir_src_as_intrinsic(instr->src[0].src);
527
if (src0 == NULL || src0->intrinsic != nir_intrinsic_load_front_face)
528
return false;
529
530
if (!nir_src_is_const(instr->src[1].src) ||
531
!nir_src_is_const(instr->src[2].src))
532
return false;
533
534
const float value1 = nir_src_as_float(instr->src[1].src);
535
const float value2 = nir_src_as_float(instr->src[2].src);
536
if (fabsf(value1) != 1.0f || fabsf(value2) != 1.0f)
537
return false;
538
539
/* nir_opt_algebraic should have gotten rid of bcsel(b, a, a) */
540
assert(value1 == -value2);
541
542
fs_reg tmp = vgrf(glsl_type::int_type);
543
544
if (devinfo->ver >= 12) {
545
/* Bit 15 of g1.1 is 0 if the polygon is front facing. */
546
fs_reg g1 = fs_reg(retype(brw_vec1_grf(1, 1), BRW_REGISTER_TYPE_W));
547
548
/* For (gl_FrontFacing ? 1.0 : -1.0), emit:
549
*
550
* or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
551
* and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
552
*
553
* and negate the result for (gl_FrontFacing ? -1.0 : 1.0).
554
*/
555
bld.OR(subscript(tmp, BRW_REGISTER_TYPE_W, 1),
556
g1, brw_imm_uw(0x3f80));
557
558
if (value1 == -1.0f)
559
bld.MOV(tmp, negate(tmp));
560
561
} else if (devinfo->ver >= 6) {
562
/* Bit 15 of g0.0 is 0 if the polygon is front facing. */
563
fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
564
565
/* For (gl_FrontFacing ? 1.0 : -1.0), emit:
566
*
567
* or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
568
* and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
569
*
570
* and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
571
*
572
* This negation looks like it's safe in practice, because bits 0:4 will
573
* surely be TRIANGLES
574
*/
575
576
if (value1 == -1.0f) {
577
g0.negate = true;
578
}
579
580
bld.OR(subscript(tmp, BRW_REGISTER_TYPE_W, 1),
581
g0, brw_imm_uw(0x3f80));
582
} else {
583
/* Bit 31 of g1.6 is 0 if the polygon is front facing. */
584
fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
585
586
/* For (gl_FrontFacing ? 1.0 : -1.0), emit:
587
*
588
* or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
589
* and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
590
*
591
* and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
592
*
593
* This negation looks like it's safe in practice, because bits 0:4 will
594
* surely be TRIANGLES
595
*/
596
597
if (value1 == -1.0f) {
598
g1_6.negate = true;
599
}
600
601
bld.OR(tmp, g1_6, brw_imm_d(0x3f800000));
602
}
603
bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, brw_imm_d(0xbf800000));
604
605
return true;
606
}
607
608
static void
609
emit_find_msb_using_lzd(const fs_builder &bld,
610
const fs_reg &result,
611
const fs_reg &src,
612
bool is_signed)
613
{
614
fs_inst *inst;
615
fs_reg temp = src;
616
617
if (is_signed) {
618
/* LZD of an absolute value source almost always does the right
619
* thing. There are two problem values:
620
*
621
* * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
622
* 0. However, findMSB(int(0x80000000)) == 30.
623
*
624
* * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
625
* 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
626
*
627
* For a value of zero or negative one, -1 will be returned.
628
*
629
* * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
630
* findMSB(-(1<<x)) should return x-1.
631
*
632
* For all negative number cases, including 0x80000000 and
633
* 0xffffffff, the correct value is obtained from LZD if instead of
634
* negating the (already negative) value the logical-not is used. A
635
* conditonal logical-not can be achieved in two instructions.
636
*/
637
temp = bld.vgrf(BRW_REGISTER_TYPE_D);
638
639
bld.ASR(temp, src, brw_imm_d(31));
640
bld.XOR(temp, temp, src);
641
}
642
643
bld.LZD(retype(result, BRW_REGISTER_TYPE_UD),
644
retype(temp, BRW_REGISTER_TYPE_UD));
645
646
/* LZD counts from the MSB side, while GLSL's findMSB() wants the count
647
* from the LSB side. Subtract the result from 31 to convert the MSB
648
* count into an LSB count. If no bits are set, LZD will return 32.
649
* 31-32 = -1, which is exactly what findMSB() is supposed to return.
650
*/
651
inst = bld.ADD(result, retype(result, BRW_REGISTER_TYPE_D), brw_imm_d(31));
652
inst->src[0].negate = true;
653
}
654
655
static brw_rnd_mode
656
brw_rnd_mode_from_nir_op (const nir_op op) {
657
switch (op) {
658
case nir_op_f2f16_rtz:
659
return BRW_RND_MODE_RTZ;
660
case nir_op_f2f16_rtne:
661
return BRW_RND_MODE_RTNE;
662
default:
663
unreachable("Operation doesn't support rounding mode");
664
}
665
}
666
667
static brw_rnd_mode
668
brw_rnd_mode_from_execution_mode(unsigned execution_mode)
669
{
670
if (nir_has_any_rounding_mode_rtne(execution_mode))
671
return BRW_RND_MODE_RTNE;
672
if (nir_has_any_rounding_mode_rtz(execution_mode))
673
return BRW_RND_MODE_RTZ;
674
return BRW_RND_MODE_UNSPECIFIED;
675
}
676
677
fs_reg
678
fs_visitor::prepare_alu_destination_and_sources(const fs_builder &bld,
679
nir_alu_instr *instr,
680
fs_reg *op,
681
bool need_dest)
682
{
683
fs_reg result =
684
need_dest ? get_nir_dest(instr->dest.dest) : bld.null_reg_ud();
685
686
result.type = brw_type_for_nir_type(devinfo,
687
(nir_alu_type)(nir_op_infos[instr->op].output_type |
688
nir_dest_bit_size(instr->dest.dest)));
689
690
assert(!instr->dest.saturate);
691
692
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
693
/* We don't lower to source modifiers so they should not exist. */
694
assert(!instr->src[i].abs);
695
assert(!instr->src[i].negate);
696
697
op[i] = get_nir_src(instr->src[i].src);
698
op[i].type = brw_type_for_nir_type(devinfo,
699
(nir_alu_type)(nir_op_infos[instr->op].input_types[i] |
700
nir_src_bit_size(instr->src[i].src)));
701
}
702
703
/* Move and vecN instrutions may still be vectored. Return the raw,
704
* vectored source and destination so that fs_visitor::nir_emit_alu can
705
* handle it. Other callers should not have to handle these kinds of
706
* instructions.
707
*/
708
switch (instr->op) {
709
case nir_op_mov:
710
case nir_op_vec2:
711
case nir_op_vec3:
712
case nir_op_vec4:
713
case nir_op_vec8:
714
case nir_op_vec16:
715
return result;
716
default:
717
break;
718
}
719
720
/* At this point, we have dealt with any instruction that operates on
721
* more than a single channel. Therefore, we can just adjust the source
722
* and destination registers for that channel and emit the instruction.
723
*/
724
unsigned channel = 0;
725
if (nir_op_infos[instr->op].output_size == 0) {
726
/* Since NIR is doing the scalarizing for us, we should only ever see
727
* vectorized operations with a single channel.
728
*/
729
assert(util_bitcount(instr->dest.write_mask) == 1);
730
channel = ffs(instr->dest.write_mask) - 1;
731
732
result = offset(result, bld, channel);
733
}
734
735
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
736
assert(nir_op_infos[instr->op].input_sizes[i] < 2);
737
op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
738
}
739
740
return result;
741
}
742
743
void
744
fs_visitor::resolve_inot_sources(const fs_builder &bld, nir_alu_instr *instr,
745
fs_reg *op)
746
{
747
for (unsigned i = 0; i < 2; i++) {
748
nir_alu_instr *inot_instr = nir_src_as_alu_instr(instr->src[i].src);
749
750
if (inot_instr != NULL && inot_instr->op == nir_op_inot) {
751
/* The source of the inot is now the source of instr. */
752
prepare_alu_destination_and_sources(bld, inot_instr, &op[i], false);
753
754
assert(!op[i].negate);
755
op[i].negate = true;
756
} else {
757
op[i] = resolve_source_modifiers(op[i]);
758
}
759
}
760
}
761
762
bool
763
fs_visitor::try_emit_b2fi_of_inot(const fs_builder &bld,
764
fs_reg result,
765
nir_alu_instr *instr)
766
{
767
if (devinfo->ver < 6 || devinfo->ver >= 12)
768
return false;
769
770
nir_alu_instr *inot_instr = nir_src_as_alu_instr(instr->src[0].src);
771
772
if (inot_instr == NULL || inot_instr->op != nir_op_inot)
773
return false;
774
775
/* HF is also possible as a destination on BDW+. For nir_op_b2i, the set
776
* of valid size-changing combinations is a bit more complex.
777
*
778
* The source restriction is just because I was lazy about generating the
779
* constant below.
780
*/
781
if (nir_dest_bit_size(instr->dest.dest) != 32 ||
782
nir_src_bit_size(inot_instr->src[0].src) != 32)
783
return false;
784
785
/* b2[fi](inot(a)) maps a=0 => 1, a=-1 => 0. Since a can only be 0 or -1,
786
* this is float(1 + a).
787
*/
788
fs_reg op;
789
790
prepare_alu_destination_and_sources(bld, inot_instr, &op, false);
791
792
/* Ignore the saturate modifier, if there is one. The result of the
793
* arithmetic can only be 0 or 1, so the clamping will do nothing anyway.
794
*/
795
bld.ADD(result, op, brw_imm_d(1));
796
797
return true;
798
}
799
800
/**
801
* Emit code for nir_op_fsign possibly fused with a nir_op_fmul
802
*
803
* If \c instr is not the \c nir_op_fsign, then \c fsign_src is the index of
804
* the source of \c instr that is a \c nir_op_fsign.
805
*/
806
void
807
fs_visitor::emit_fsign(const fs_builder &bld, const nir_alu_instr *instr,
808
fs_reg result, fs_reg *op, unsigned fsign_src)
809
{
810
fs_inst *inst;
811
812
assert(instr->op == nir_op_fsign || instr->op == nir_op_fmul);
813
assert(fsign_src < nir_op_infos[instr->op].num_inputs);
814
815
if (instr->op != nir_op_fsign) {
816
const nir_alu_instr *const fsign_instr =
817
nir_src_as_alu_instr(instr->src[fsign_src].src);
818
819
/* op[fsign_src] has the nominal result of the fsign, and op[1 -
820
* fsign_src] has the other multiply source. This must be rearranged so
821
* that op[0] is the source of the fsign op[1] is the other multiply
822
* source.
823
*/
824
if (fsign_src != 0)
825
op[1] = op[0];
826
827
op[0] = get_nir_src(fsign_instr->src[0].src);
828
829
const nir_alu_type t =
830
(nir_alu_type)(nir_op_infos[instr->op].input_types[0] |
831
nir_src_bit_size(fsign_instr->src[0].src));
832
833
op[0].type = brw_type_for_nir_type(devinfo, t);
834
835
unsigned channel = 0;
836
if (nir_op_infos[instr->op].output_size == 0) {
837
/* Since NIR is doing the scalarizing for us, we should only ever see
838
* vectorized operations with a single channel.
839
*/
840
assert(util_bitcount(instr->dest.write_mask) == 1);
841
channel = ffs(instr->dest.write_mask) - 1;
842
}
843
844
op[0] = offset(op[0], bld, fsign_instr->src[0].swizzle[channel]);
845
}
846
847
if (type_sz(op[0].type) == 2) {
848
/* AND(val, 0x8000) gives the sign bit.
849
*
850
* Predicated OR ORs 1.0 (0x3c00) with the sign bit if val is not zero.
851
*/
852
fs_reg zero = retype(brw_imm_uw(0), BRW_REGISTER_TYPE_HF);
853
bld.CMP(bld.null_reg_f(), op[0], zero, BRW_CONDITIONAL_NZ);
854
855
op[0].type = BRW_REGISTER_TYPE_UW;
856
result.type = BRW_REGISTER_TYPE_UW;
857
bld.AND(result, op[0], brw_imm_uw(0x8000u));
858
859
if (instr->op == nir_op_fsign)
860
inst = bld.OR(result, result, brw_imm_uw(0x3c00u));
861
else {
862
/* Use XOR here to get the result sign correct. */
863
inst = bld.XOR(result, result, retype(op[1], BRW_REGISTER_TYPE_UW));
864
}
865
866
inst->predicate = BRW_PREDICATE_NORMAL;
867
} else if (type_sz(op[0].type) == 4) {
868
/* AND(val, 0x80000000) gives the sign bit.
869
*
870
* Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
871
* zero.
872
*/
873
bld.CMP(bld.null_reg_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
874
875
op[0].type = BRW_REGISTER_TYPE_UD;
876
result.type = BRW_REGISTER_TYPE_UD;
877
bld.AND(result, op[0], brw_imm_ud(0x80000000u));
878
879
if (instr->op == nir_op_fsign)
880
inst = bld.OR(result, result, brw_imm_ud(0x3f800000u));
881
else {
882
/* Use XOR here to get the result sign correct. */
883
inst = bld.XOR(result, result, retype(op[1], BRW_REGISTER_TYPE_UD));
884
}
885
886
inst->predicate = BRW_PREDICATE_NORMAL;
887
} else {
888
/* For doubles we do the same but we need to consider:
889
*
890
* - 2-src instructions can't operate with 64-bit immediates
891
* - The sign is encoded in the high 32-bit of each DF
892
* - We need to produce a DF result.
893
*/
894
895
fs_reg zero = vgrf(glsl_type::double_type);
896
bld.MOV(zero, setup_imm_df(bld, 0.0));
897
bld.CMP(bld.null_reg_df(), op[0], zero, BRW_CONDITIONAL_NZ);
898
899
bld.MOV(result, zero);
900
901
fs_reg r = subscript(result, BRW_REGISTER_TYPE_UD, 1);
902
bld.AND(r, subscript(op[0], BRW_REGISTER_TYPE_UD, 1),
903
brw_imm_ud(0x80000000u));
904
905
if (instr->op == nir_op_fsign) {
906
set_predicate(BRW_PREDICATE_NORMAL,
907
bld.OR(r, r, brw_imm_ud(0x3ff00000u)));
908
} else {
909
/* This could be done better in some cases. If the scale is an
910
* immediate with the low 32-bits all 0, emitting a separate XOR and
911
* OR would allow an algebraic optimization to remove the OR. There
912
* are currently zero instances of fsign(double(x))*IMM in shader-db
913
* or any test suite, so it is hard to care at this time.
914
*/
915
fs_reg result_int64 = retype(result, BRW_REGISTER_TYPE_UQ);
916
inst = bld.XOR(result_int64, result_int64,
917
retype(op[1], BRW_REGISTER_TYPE_UQ));
918
}
919
}
920
}
921
922
/**
923
* Deteremine whether sources of a nir_op_fmul can be fused with a nir_op_fsign
924
*
925
* Checks the operands of a \c nir_op_fmul to determine whether or not
926
* \c emit_fsign could fuse the multiplication with the \c sign() calculation.
927
*
928
* \param instr The multiplication instruction
929
*
930
* \param fsign_src The source of \c instr that may or may not be a
931
* \c nir_op_fsign
932
*/
933
static bool
934
can_fuse_fmul_fsign(nir_alu_instr *instr, unsigned fsign_src)
935
{
936
assert(instr->op == nir_op_fmul);
937
938
nir_alu_instr *const fsign_instr =
939
nir_src_as_alu_instr(instr->src[fsign_src].src);
940
941
/* Rules:
942
*
943
* 1. instr->src[fsign_src] must be a nir_op_fsign.
944
* 2. The nir_op_fsign can only be used by this multiplication.
945
* 3. The source that is the nir_op_fsign does not have source modifiers.
946
* \c emit_fsign only examines the source modifiers of the source of the
947
* \c nir_op_fsign.
948
*
949
* The nir_op_fsign must also not have the saturate modifier, but steps
950
* have already been taken (in nir_opt_algebraic) to ensure that.
951
*/
952
return fsign_instr != NULL && fsign_instr->op == nir_op_fsign &&
953
is_used_once(fsign_instr);
954
}
955
956
void
957
fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
958
bool need_dest)
959
{
960
struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
961
fs_inst *inst;
962
unsigned execution_mode =
963
bld.shader->nir->info.float_controls_execution_mode;
964
965
fs_reg op[NIR_MAX_VEC_COMPONENTS];
966
fs_reg result = prepare_alu_destination_and_sources(bld, instr, op, need_dest);
967
968
switch (instr->op) {
969
case nir_op_mov:
970
case nir_op_vec2:
971
case nir_op_vec3:
972
case nir_op_vec4:
973
case nir_op_vec8:
974
case nir_op_vec16: {
975
fs_reg temp = result;
976
bool need_extra_copy = false;
977
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
978
if (!instr->src[i].src.is_ssa &&
979
instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
980
need_extra_copy = true;
981
temp = bld.vgrf(result.type, 4);
982
break;
983
}
984
}
985
986
for (unsigned i = 0; i < 4; i++) {
987
if (!(instr->dest.write_mask & (1 << i)))
988
continue;
989
990
if (instr->op == nir_op_mov) {
991
bld.MOV(offset(temp, bld, i),
992
offset(op[0], bld, instr->src[0].swizzle[i]));
993
} else {
994
bld.MOV(offset(temp, bld, i),
995
offset(op[i], bld, instr->src[i].swizzle[0]));
996
}
997
}
998
999
/* In this case the source and destination registers were the same,
1000
* so we need to insert an extra set of moves in order to deal with
1001
* any swizzling.
1002
*/
1003
if (need_extra_copy) {
1004
for (unsigned i = 0; i < 4; i++) {
1005
if (!(instr->dest.write_mask & (1 << i)))
1006
continue;
1007
1008
bld.MOV(offset(result, bld, i), offset(temp, bld, i));
1009
}
1010
}
1011
return;
1012
}
1013
1014
case nir_op_i2f32:
1015
case nir_op_u2f32:
1016
if (optimize_extract_to_float(instr, result))
1017
return;
1018
inst = bld.MOV(result, op[0]);
1019
break;
1020
1021
case nir_op_f2f16_rtne:
1022
case nir_op_f2f16_rtz:
1023
case nir_op_f2f16: {
1024
brw_rnd_mode rnd = BRW_RND_MODE_UNSPECIFIED;
1025
1026
if (nir_op_f2f16 == instr->op)
1027
rnd = brw_rnd_mode_from_execution_mode(execution_mode);
1028
else
1029
rnd = brw_rnd_mode_from_nir_op(instr->op);
1030
1031
if (BRW_RND_MODE_UNSPECIFIED != rnd)
1032
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(), brw_imm_d(rnd));
1033
1034
/* In theory, it would be better to use BRW_OPCODE_F32TO16. Depending
1035
* on the HW gen, it is a special hw opcode or just a MOV, and
1036
* brw_F32TO16 (at brw_eu_emit) would do the work to chose.
1037
*
1038
* But if we want to use that opcode, we need to provide support on
1039
* different optimizations and lowerings. As right now HF support is
1040
* only for gfx8+, it will be better to use directly the MOV, and use
1041
* BRW_OPCODE_F32TO16 when/if we work for HF support on gfx7.
1042
*/
1043
assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */
1044
inst = bld.MOV(result, op[0]);
1045
break;
1046
}
1047
1048
case nir_op_b2i8:
1049
case nir_op_b2i16:
1050
case nir_op_b2i32:
1051
case nir_op_b2i64:
1052
case nir_op_b2f16:
1053
case nir_op_b2f32:
1054
case nir_op_b2f64:
1055
if (try_emit_b2fi_of_inot(bld, result, instr))
1056
break;
1057
op[0].type = BRW_REGISTER_TYPE_D;
1058
op[0].negate = !op[0].negate;
1059
FALLTHROUGH;
1060
case nir_op_i2f64:
1061
case nir_op_i2i64:
1062
case nir_op_u2f64:
1063
case nir_op_u2u64:
1064
case nir_op_f2f64:
1065
case nir_op_f2i64:
1066
case nir_op_f2u64:
1067
case nir_op_i2i32:
1068
case nir_op_u2u32:
1069
case nir_op_f2i32:
1070
case nir_op_f2u32:
1071
case nir_op_i2f16:
1072
case nir_op_i2i16:
1073
case nir_op_u2f16:
1074
case nir_op_u2u16:
1075
case nir_op_f2i16:
1076
case nir_op_f2u16:
1077
case nir_op_i2i8:
1078
case nir_op_u2u8:
1079
case nir_op_f2i8:
1080
case nir_op_f2u8:
1081
if (result.type == BRW_REGISTER_TYPE_B ||
1082
result.type == BRW_REGISTER_TYPE_UB ||
1083
result.type == BRW_REGISTER_TYPE_HF)
1084
assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */
1085
1086
if (op[0].type == BRW_REGISTER_TYPE_B ||
1087
op[0].type == BRW_REGISTER_TYPE_UB ||
1088
op[0].type == BRW_REGISTER_TYPE_HF)
1089
assert(type_sz(result.type) < 8); /* brw_nir_lower_conversions */
1090
1091
inst = bld.MOV(result, op[0]);
1092
break;
1093
1094
case nir_op_fsat:
1095
inst = bld.MOV(result, op[0]);
1096
inst->saturate = true;
1097
break;
1098
1099
case nir_op_fneg:
1100
case nir_op_ineg:
1101
op[0].negate = true;
1102
inst = bld.MOV(result, op[0]);
1103
break;
1104
1105
case nir_op_fabs:
1106
case nir_op_iabs:
1107
op[0].negate = false;
1108
op[0].abs = true;
1109
inst = bld.MOV(result, op[0]);
1110
break;
1111
1112
case nir_op_f2f32:
1113
if (nir_has_any_rounding_mode_enabled(execution_mode)) {
1114
brw_rnd_mode rnd =
1115
brw_rnd_mode_from_execution_mode(execution_mode);
1116
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1117
brw_imm_d(rnd));
1118
}
1119
1120
if (op[0].type == BRW_REGISTER_TYPE_HF)
1121
assert(type_sz(result.type) < 8); /* brw_nir_lower_conversions */
1122
1123
inst = bld.MOV(result, op[0]);
1124
break;
1125
1126
case nir_op_fsign:
1127
emit_fsign(bld, instr, result, op, 0);
1128
break;
1129
1130
case nir_op_frcp:
1131
inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
1132
break;
1133
1134
case nir_op_fexp2:
1135
inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
1136
break;
1137
1138
case nir_op_flog2:
1139
inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
1140
break;
1141
1142
case nir_op_fsin:
1143
inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
1144
break;
1145
1146
case nir_op_fcos:
1147
inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
1148
break;
1149
1150
case nir_op_fddx:
1151
if (fs_key->high_quality_derivatives) {
1152
inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
1153
} else {
1154
inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
1155
}
1156
break;
1157
case nir_op_fddx_fine:
1158
inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
1159
break;
1160
case nir_op_fddx_coarse:
1161
inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
1162
break;
1163
case nir_op_fddy:
1164
if (fs_key->high_quality_derivatives) {
1165
inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
1166
} else {
1167
inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
1168
}
1169
break;
1170
case nir_op_fddy_fine:
1171
inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
1172
break;
1173
case nir_op_fddy_coarse:
1174
inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
1175
break;
1176
1177
case nir_op_fadd:
1178
if (nir_has_any_rounding_mode_enabled(execution_mode)) {
1179
brw_rnd_mode rnd =
1180
brw_rnd_mode_from_execution_mode(execution_mode);
1181
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1182
brw_imm_d(rnd));
1183
}
1184
FALLTHROUGH;
1185
case nir_op_iadd:
1186
inst = bld.ADD(result, op[0], op[1]);
1187
break;
1188
1189
case nir_op_iadd_sat:
1190
case nir_op_uadd_sat:
1191
inst = bld.ADD(result, op[0], op[1]);
1192
inst->saturate = true;
1193
break;
1194
1195
case nir_op_isub_sat:
1196
bld.emit(SHADER_OPCODE_ISUB_SAT, result, op[0], op[1]);
1197
break;
1198
1199
case nir_op_usub_sat:
1200
bld.emit(SHADER_OPCODE_USUB_SAT, result, op[0], op[1]);
1201
break;
1202
1203
case nir_op_irhadd:
1204
case nir_op_urhadd:
1205
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1206
inst = bld.AVG(result, op[0], op[1]);
1207
break;
1208
1209
case nir_op_ihadd:
1210
case nir_op_uhadd: {
1211
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1212
fs_reg tmp = bld.vgrf(result.type);
1213
1214
if (devinfo->ver >= 8) {
1215
op[0] = resolve_source_modifiers(op[0]);
1216
op[1] = resolve_source_modifiers(op[1]);
1217
}
1218
1219
/* AVG(x, y) - ((x ^ y) & 1) */
1220
bld.XOR(tmp, op[0], op[1]);
1221
bld.AND(tmp, tmp, retype(brw_imm_ud(1), result.type));
1222
bld.AVG(result, op[0], op[1]);
1223
inst = bld.ADD(result, result, tmp);
1224
inst->src[1].negate = true;
1225
break;
1226
}
1227
1228
case nir_op_fmul:
1229
for (unsigned i = 0; i < 2; i++) {
1230
if (can_fuse_fmul_fsign(instr, i)) {
1231
emit_fsign(bld, instr, result, op, i);
1232
return;
1233
}
1234
}
1235
1236
/* We emit the rounding mode after the previous fsign optimization since
1237
* it won't result in a MUL, but will try to negate the value by other
1238
* means.
1239
*/
1240
if (nir_has_any_rounding_mode_enabled(execution_mode)) {
1241
brw_rnd_mode rnd =
1242
brw_rnd_mode_from_execution_mode(execution_mode);
1243
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1244
brw_imm_d(rnd));
1245
}
1246
1247
inst = bld.MUL(result, op[0], op[1]);
1248
break;
1249
1250
case nir_op_imul_2x32_64:
1251
case nir_op_umul_2x32_64:
1252
bld.MUL(result, op[0], op[1]);
1253
break;
1254
1255
case nir_op_imul_32x16:
1256
case nir_op_umul_32x16: {
1257
const bool ud = instr->op == nir_op_umul_32x16;
1258
1259
assert(nir_dest_bit_size(instr->dest.dest) == 32);
1260
1261
/* Before Gfx7, the order of the 32-bit source and the 16-bit source was
1262
* swapped. The extension isn't enabled on those platforms, so don't
1263
* pretend to support the differences.
1264
*/
1265
assert(devinfo->ver >= 7);
1266
1267
if (op[1].file == IMM)
1268
op[1] = ud ? brw_imm_uw(op[1].ud) : brw_imm_w(op[1].d);
1269
else {
1270
const enum brw_reg_type word_type =
1271
ud ? BRW_REGISTER_TYPE_UW : BRW_REGISTER_TYPE_W;
1272
1273
op[1] = subscript(op[1], word_type, 0);
1274
}
1275
1276
const enum brw_reg_type dword_type =
1277
ud ? BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_D;
1278
1279
bld.MUL(result, retype(op[0], dword_type), op[1]);
1280
break;
1281
}
1282
1283
case nir_op_imul:
1284
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1285
bld.MUL(result, op[0], op[1]);
1286
break;
1287
1288
case nir_op_imul_high:
1289
case nir_op_umul_high:
1290
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1291
bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
1292
break;
1293
1294
case nir_op_idiv:
1295
case nir_op_udiv:
1296
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1297
bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
1298
break;
1299
1300
case nir_op_uadd_carry:
1301
unreachable("Should have been lowered by carry_to_arith().");
1302
1303
case nir_op_usub_borrow:
1304
unreachable("Should have been lowered by borrow_to_arith().");
1305
1306
case nir_op_umod:
1307
case nir_op_irem:
1308
/* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1309
* appears that our hardware just does the right thing for signed
1310
* remainder.
1311
*/
1312
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1313
bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
1314
break;
1315
1316
case nir_op_imod: {
1317
/* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1318
bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
1319
1320
/* Math instructions don't support conditional mod */
1321
inst = bld.MOV(bld.null_reg_d(), result);
1322
inst->conditional_mod = BRW_CONDITIONAL_NZ;
1323
1324
/* Now, we need to determine if signs of the sources are different.
1325
* When we XOR the sources, the top bit is 0 if they are the same and 1
1326
* if they are different. We can then use a conditional modifier to
1327
* turn that into a predicate. This leads us to an XOR.l instruction.
1328
*
1329
* Technically, according to the PRM, you're not allowed to use .l on a
1330
* XOR instruction. However, emperical experiments and Curro's reading
1331
* of the simulator source both indicate that it's safe.
1332
*/
1333
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D);
1334
inst = bld.XOR(tmp, op[0], op[1]);
1335
inst->predicate = BRW_PREDICATE_NORMAL;
1336
inst->conditional_mod = BRW_CONDITIONAL_L;
1337
1338
/* If the result of the initial remainder operation is non-zero and the
1339
* two sources have different signs, add in a copy of op[1] to get the
1340
* final integer modulus value.
1341
*/
1342
inst = bld.ADD(result, result, op[1]);
1343
inst->predicate = BRW_PREDICATE_NORMAL;
1344
break;
1345
}
1346
1347
case nir_op_flt32:
1348
case nir_op_fge32:
1349
case nir_op_feq32:
1350
case nir_op_fneu32: {
1351
fs_reg dest = result;
1352
1353
const uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
1354
if (bit_size != 32)
1355
dest = bld.vgrf(op[0].type, 1);
1356
1357
bld.CMP(dest, op[0], op[1], brw_cmod_for_nir_comparison(instr->op));
1358
1359
if (bit_size > 32) {
1360
bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
1361
} else if(bit_size < 32) {
1362
/* When we convert the result to 32-bit we need to be careful and do
1363
* it as a signed conversion to get sign extension (for 32-bit true)
1364
*/
1365
const brw_reg_type src_type =
1366
brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_D);
1367
1368
bld.MOV(retype(result, BRW_REGISTER_TYPE_D), retype(dest, src_type));
1369
}
1370
break;
1371
}
1372
1373
case nir_op_ilt32:
1374
case nir_op_ult32:
1375
case nir_op_ige32:
1376
case nir_op_uge32:
1377
case nir_op_ieq32:
1378
case nir_op_ine32: {
1379
fs_reg dest = result;
1380
1381
const uint32_t bit_size = type_sz(op[0].type) * 8;
1382
if (bit_size != 32)
1383
dest = bld.vgrf(op[0].type, 1);
1384
1385
bld.CMP(dest, op[0], op[1],
1386
brw_cmod_for_nir_comparison(instr->op));
1387
1388
if (bit_size > 32) {
1389
bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
1390
} else if (bit_size < 32) {
1391
/* When we convert the result to 32-bit we need to be careful and do
1392
* it as a signed conversion to get sign extension (for 32-bit true)
1393
*/
1394
const brw_reg_type src_type =
1395
brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_D);
1396
1397
bld.MOV(retype(result, BRW_REGISTER_TYPE_D), retype(dest, src_type));
1398
}
1399
break;
1400
}
1401
1402
case nir_op_inot:
1403
if (devinfo->ver >= 8) {
1404
nir_alu_instr *inot_src_instr = nir_src_as_alu_instr(instr->src[0].src);
1405
1406
if (inot_src_instr != NULL &&
1407
(inot_src_instr->op == nir_op_ior ||
1408
inot_src_instr->op == nir_op_ixor ||
1409
inot_src_instr->op == nir_op_iand)) {
1410
/* The sources of the source logical instruction are now the
1411
* sources of the instruction that will be generated.
1412
*/
1413
prepare_alu_destination_and_sources(bld, inot_src_instr, op, false);
1414
resolve_inot_sources(bld, inot_src_instr, op);
1415
1416
/* Smash all of the sources and destination to be signed. This
1417
* doesn't matter for the operation of the instruction, but cmod
1418
* propagation fails on unsigned sources with negation (due to
1419
* fs_inst::can_do_cmod returning false).
1420
*/
1421
result.type =
1422
brw_type_for_nir_type(devinfo,
1423
(nir_alu_type)(nir_type_int |
1424
nir_dest_bit_size(instr->dest.dest)));
1425
op[0].type =
1426
brw_type_for_nir_type(devinfo,
1427
(nir_alu_type)(nir_type_int |
1428
nir_src_bit_size(inot_src_instr->src[0].src)));
1429
op[1].type =
1430
brw_type_for_nir_type(devinfo,
1431
(nir_alu_type)(nir_type_int |
1432
nir_src_bit_size(inot_src_instr->src[1].src)));
1433
1434
/* For XOR, only invert one of the sources. Arbitrarily choose
1435
* the first source.
1436
*/
1437
op[0].negate = !op[0].negate;
1438
if (inot_src_instr->op != nir_op_ixor)
1439
op[1].negate = !op[1].negate;
1440
1441
switch (inot_src_instr->op) {
1442
case nir_op_ior:
1443
bld.AND(result, op[0], op[1]);
1444
return;
1445
1446
case nir_op_iand:
1447
bld.OR(result, op[0], op[1]);
1448
return;
1449
1450
case nir_op_ixor:
1451
bld.XOR(result, op[0], op[1]);
1452
return;
1453
1454
default:
1455
unreachable("impossible opcode");
1456
}
1457
}
1458
op[0] = resolve_source_modifiers(op[0]);
1459
}
1460
bld.NOT(result, op[0]);
1461
break;
1462
case nir_op_ixor:
1463
if (devinfo->ver >= 8) {
1464
resolve_inot_sources(bld, instr, op);
1465
}
1466
bld.XOR(result, op[0], op[1]);
1467
break;
1468
case nir_op_ior:
1469
if (devinfo->ver >= 8) {
1470
resolve_inot_sources(bld, instr, op);
1471
}
1472
bld.OR(result, op[0], op[1]);
1473
break;
1474
case nir_op_iand:
1475
if (devinfo->ver >= 8) {
1476
resolve_inot_sources(bld, instr, op);
1477
}
1478
bld.AND(result, op[0], op[1]);
1479
break;
1480
1481
case nir_op_fdot2:
1482
case nir_op_fdot3:
1483
case nir_op_fdot4:
1484
case nir_op_b32all_fequal2:
1485
case nir_op_b32all_iequal2:
1486
case nir_op_b32all_fequal3:
1487
case nir_op_b32all_iequal3:
1488
case nir_op_b32all_fequal4:
1489
case nir_op_b32all_iequal4:
1490
case nir_op_b32any_fnequal2:
1491
case nir_op_b32any_inequal2:
1492
case nir_op_b32any_fnequal3:
1493
case nir_op_b32any_inequal3:
1494
case nir_op_b32any_fnequal4:
1495
case nir_op_b32any_inequal4:
1496
unreachable("Lowered by nir_lower_alu_reductions");
1497
1498
case nir_op_ldexp:
1499
unreachable("not reached: should be handled by ldexp_to_arith()");
1500
1501
case nir_op_fsqrt:
1502
inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
1503
break;
1504
1505
case nir_op_frsq:
1506
inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
1507
break;
1508
1509
case nir_op_i2b32:
1510
case nir_op_f2b32: {
1511
uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
1512
if (bit_size == 64) {
1513
/* two-argument instructions can't take 64-bit immediates */
1514
fs_reg zero;
1515
fs_reg tmp;
1516
1517
if (instr->op == nir_op_f2b32) {
1518
zero = vgrf(glsl_type::double_type);
1519
tmp = vgrf(glsl_type::double_type);
1520
bld.MOV(zero, setup_imm_df(bld, 0.0));
1521
} else {
1522
zero = vgrf(glsl_type::int64_t_type);
1523
tmp = vgrf(glsl_type::int64_t_type);
1524
bld.MOV(zero, brw_imm_q(0));
1525
}
1526
1527
/* A SIMD16 execution needs to be split in two instructions, so use
1528
* a vgrf instead of the flag register as dst so instruction splitting
1529
* works
1530
*/
1531
bld.CMP(tmp, op[0], zero, BRW_CONDITIONAL_NZ);
1532
bld.MOV(result, subscript(tmp, BRW_REGISTER_TYPE_UD, 0));
1533
} else {
1534
fs_reg zero;
1535
if (bit_size == 32) {
1536
zero = instr->op == nir_op_f2b32 ? brw_imm_f(0.0f) : brw_imm_d(0);
1537
} else {
1538
assert(bit_size == 16);
1539
zero = instr->op == nir_op_f2b32 ?
1540
retype(brw_imm_w(0), BRW_REGISTER_TYPE_HF) : brw_imm_w(0);
1541
}
1542
bld.CMP(result, op[0], zero, BRW_CONDITIONAL_NZ);
1543
}
1544
break;
1545
}
1546
1547
case nir_op_ftrunc:
1548
inst = bld.RNDZ(result, op[0]);
1549
if (devinfo->ver < 6) {
1550
set_condmod(BRW_CONDITIONAL_R, inst);
1551
set_predicate(BRW_PREDICATE_NORMAL,
1552
bld.ADD(result, result, brw_imm_f(1.0f)));
1553
inst = bld.MOV(result, result); /* for potential saturation */
1554
}
1555
break;
1556
1557
case nir_op_fceil: {
1558
op[0].negate = !op[0].negate;
1559
fs_reg temp = vgrf(glsl_type::float_type);
1560
bld.RNDD(temp, op[0]);
1561
temp.negate = true;
1562
inst = bld.MOV(result, temp);
1563
break;
1564
}
1565
case nir_op_ffloor:
1566
inst = bld.RNDD(result, op[0]);
1567
break;
1568
case nir_op_ffract:
1569
inst = bld.FRC(result, op[0]);
1570
break;
1571
case nir_op_fround_even:
1572
inst = bld.RNDE(result, op[0]);
1573
if (devinfo->ver < 6) {
1574
set_condmod(BRW_CONDITIONAL_R, inst);
1575
set_predicate(BRW_PREDICATE_NORMAL,
1576
bld.ADD(result, result, brw_imm_f(1.0f)));
1577
inst = bld.MOV(result, result); /* for potential saturation */
1578
}
1579
break;
1580
1581
case nir_op_fquantize2f16: {
1582
fs_reg tmp16 = bld.vgrf(BRW_REGISTER_TYPE_D);
1583
fs_reg tmp32 = bld.vgrf(BRW_REGISTER_TYPE_F);
1584
fs_reg zero = bld.vgrf(BRW_REGISTER_TYPE_F);
1585
1586
/* The destination stride must be at least as big as the source stride. */
1587
tmp16.type = BRW_REGISTER_TYPE_W;
1588
tmp16.stride = 2;
1589
1590
/* Check for denormal */
1591
fs_reg abs_src0 = op[0];
1592
abs_src0.abs = true;
1593
bld.CMP(bld.null_reg_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1594
BRW_CONDITIONAL_L);
1595
/* Get the appropriately signed zero */
1596
bld.AND(retype(zero, BRW_REGISTER_TYPE_UD),
1597
retype(op[0], BRW_REGISTER_TYPE_UD),
1598
brw_imm_ud(0x80000000));
1599
/* Do the actual F32 -> F16 -> F32 conversion */
1600
bld.emit(BRW_OPCODE_F32TO16, tmp16, op[0]);
1601
bld.emit(BRW_OPCODE_F16TO32, tmp32, tmp16);
1602
/* Select that or zero based on normal status */
1603
inst = bld.SEL(result, zero, tmp32);
1604
inst->predicate = BRW_PREDICATE_NORMAL;
1605
break;
1606
}
1607
1608
case nir_op_imin:
1609
case nir_op_umin:
1610
case nir_op_fmin:
1611
inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_L);
1612
break;
1613
1614
case nir_op_imax:
1615
case nir_op_umax:
1616
case nir_op_fmax:
1617
inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_GE);
1618
break;
1619
1620
case nir_op_pack_snorm_2x16:
1621
case nir_op_pack_snorm_4x8:
1622
case nir_op_pack_unorm_2x16:
1623
case nir_op_pack_unorm_4x8:
1624
case nir_op_unpack_snorm_2x16:
1625
case nir_op_unpack_snorm_4x8:
1626
case nir_op_unpack_unorm_2x16:
1627
case nir_op_unpack_unorm_4x8:
1628
case nir_op_unpack_half_2x16:
1629
case nir_op_pack_half_2x16:
1630
unreachable("not reached: should be handled by lower_packing_builtins");
1631
1632
case nir_op_unpack_half_2x16_split_x_flush_to_zero:
1633
assert(FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 & execution_mode);
1634
FALLTHROUGH;
1635
case nir_op_unpack_half_2x16_split_x:
1636
inst = bld.emit(BRW_OPCODE_F16TO32, result,
1637
subscript(op[0], BRW_REGISTER_TYPE_UW, 0));
1638
break;
1639
1640
case nir_op_unpack_half_2x16_split_y_flush_to_zero:
1641
assert(FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 & execution_mode);
1642
FALLTHROUGH;
1643
case nir_op_unpack_half_2x16_split_y:
1644
inst = bld.emit(BRW_OPCODE_F16TO32, result,
1645
subscript(op[0], BRW_REGISTER_TYPE_UW, 1));
1646
break;
1647
1648
case nir_op_pack_64_2x32_split:
1649
case nir_op_pack_32_2x16_split:
1650
bld.emit(FS_OPCODE_PACK, result, op[0], op[1]);
1651
break;
1652
1653
case nir_op_unpack_64_2x32_split_x:
1654
case nir_op_unpack_64_2x32_split_y: {
1655
if (instr->op == nir_op_unpack_64_2x32_split_x)
1656
bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 0));
1657
else
1658
bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 1));
1659
break;
1660
}
1661
1662
case nir_op_unpack_32_2x16_split_x:
1663
case nir_op_unpack_32_2x16_split_y: {
1664
if (instr->op == nir_op_unpack_32_2x16_split_x)
1665
bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UW, 0));
1666
else
1667
bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UW, 1));
1668
break;
1669
}
1670
1671
case nir_op_fpow:
1672
inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
1673
break;
1674
1675
case nir_op_bitfield_reverse:
1676
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1677
bld.BFREV(result, op[0]);
1678
break;
1679
1680
case nir_op_bit_count:
1681
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1682
bld.CBIT(result, op[0]);
1683
break;
1684
1685
case nir_op_ufind_msb: {
1686
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1687
emit_find_msb_using_lzd(bld, result, op[0], false);
1688
break;
1689
}
1690
1691
case nir_op_uclz:
1692
assert(nir_dest_bit_size(instr->dest.dest) == 32);
1693
bld.LZD(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
1694
break;
1695
1696
case nir_op_ifind_msb: {
1697
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1698
1699
if (devinfo->ver < 7) {
1700
emit_find_msb_using_lzd(bld, result, op[0], true);
1701
} else {
1702
bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
1703
1704
/* FBH counts from the MSB side, while GLSL's findMSB() wants the
1705
* count from the LSB side. If FBH didn't return an error
1706
* (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
1707
* count into an LSB count.
1708
*/
1709
bld.CMP(bld.null_reg_d(), result, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
1710
1711
inst = bld.ADD(result, result, brw_imm_d(31));
1712
inst->predicate = BRW_PREDICATE_NORMAL;
1713
inst->src[0].negate = true;
1714
}
1715
break;
1716
}
1717
1718
case nir_op_find_lsb:
1719
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1720
1721
if (devinfo->ver < 7) {
1722
fs_reg temp = vgrf(glsl_type::int_type);
1723
1724
/* (x & -x) generates a value that consists of only the LSB of x.
1725
* For all powers of 2, findMSB(y) == findLSB(y).
1726
*/
1727
fs_reg src = retype(op[0], BRW_REGISTER_TYPE_D);
1728
fs_reg negated_src = src;
1729
1730
/* One must be negated, and the other must be non-negated. It
1731
* doesn't matter which is which.
1732
*/
1733
negated_src.negate = true;
1734
src.negate = false;
1735
1736
bld.AND(temp, src, negated_src);
1737
emit_find_msb_using_lzd(bld, result, temp, false);
1738
} else {
1739
bld.FBL(result, op[0]);
1740
}
1741
break;
1742
1743
case nir_op_ubitfield_extract:
1744
case nir_op_ibitfield_extract:
1745
unreachable("should have been lowered");
1746
case nir_op_ubfe:
1747
case nir_op_ibfe:
1748
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1749
bld.BFE(result, op[2], op[1], op[0]);
1750
break;
1751
case nir_op_bfm:
1752
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1753
bld.BFI1(result, op[0], op[1]);
1754
break;
1755
case nir_op_bfi:
1756
assert(nir_dest_bit_size(instr->dest.dest) < 64);
1757
bld.BFI2(result, op[0], op[1], op[2]);
1758
break;
1759
1760
case nir_op_bitfield_insert:
1761
unreachable("not reached: should have been lowered");
1762
1763
case nir_op_ishl:
1764
bld.SHL(result, op[0], op[1]);
1765
break;
1766
case nir_op_ishr:
1767
bld.ASR(result, op[0], op[1]);
1768
break;
1769
case nir_op_ushr:
1770
bld.SHR(result, op[0], op[1]);
1771
break;
1772
1773
case nir_op_urol:
1774
bld.ROL(result, op[0], op[1]);
1775
break;
1776
case nir_op_uror:
1777
bld.ROR(result, op[0], op[1]);
1778
break;
1779
1780
case nir_op_pack_half_2x16_split:
1781
bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
1782
break;
1783
1784
case nir_op_ffma:
1785
if (nir_has_any_rounding_mode_enabled(execution_mode)) {
1786
brw_rnd_mode rnd =
1787
brw_rnd_mode_from_execution_mode(execution_mode);
1788
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1789
brw_imm_d(rnd));
1790
}
1791
1792
inst = bld.MAD(result, op[2], op[1], op[0]);
1793
break;
1794
1795
case nir_op_flrp:
1796
if (nir_has_any_rounding_mode_enabled(execution_mode)) {
1797
brw_rnd_mode rnd =
1798
brw_rnd_mode_from_execution_mode(execution_mode);
1799
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1800
brw_imm_d(rnd));
1801
}
1802
1803
inst = bld.LRP(result, op[0], op[1], op[2]);
1804
break;
1805
1806
case nir_op_b32csel:
1807
if (optimize_frontfacing_ternary(instr, result))
1808
return;
1809
1810
bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1811
inst = bld.SEL(result, op[1], op[2]);
1812
inst->predicate = BRW_PREDICATE_NORMAL;
1813
break;
1814
1815
case nir_op_extract_u8:
1816
case nir_op_extract_i8: {
1817
unsigned byte = nir_src_as_uint(instr->src[1].src);
1818
1819
/* The PRMs say:
1820
*
1821
* BDW+
1822
* There is no direct conversion from B/UB to Q/UQ or Q/UQ to B/UB.
1823
* Use two instructions and a word or DWord intermediate integer type.
1824
*/
1825
if (nir_dest_bit_size(instr->dest.dest) == 64) {
1826
const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
1827
1828
if (instr->op == nir_op_extract_i8) {
1829
/* If we need to sign extend, extract to a word first */
1830
fs_reg w_temp = bld.vgrf(BRW_REGISTER_TYPE_W);
1831
bld.MOV(w_temp, subscript(op[0], type, byte));
1832
bld.MOV(result, w_temp);
1833
} else if (byte & 1) {
1834
/* Extract the high byte from the word containing the desired byte
1835
* offset.
1836
*/
1837
bld.SHR(result,
1838
subscript(op[0], BRW_REGISTER_TYPE_UW, byte / 2),
1839
brw_imm_uw(8));
1840
} else {
1841
/* Otherwise use an AND with 0xff and a word type */
1842
bld.AND(result,
1843
subscript(op[0], BRW_REGISTER_TYPE_UW, byte / 2),
1844
brw_imm_uw(0xff));
1845
}
1846
} else {
1847
const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
1848
bld.MOV(result, subscript(op[0], type, byte));
1849
}
1850
break;
1851
}
1852
1853
case nir_op_extract_u16:
1854
case nir_op_extract_i16: {
1855
const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i16);
1856
unsigned word = nir_src_as_uint(instr->src[1].src);
1857
bld.MOV(result, subscript(op[0], type, word));
1858
break;
1859
}
1860
1861
default:
1862
unreachable("unhandled instruction");
1863
}
1864
1865
/* If we need to do a boolean resolve, replace the result with -(x & 1)
1866
* to sign extend the low bit to 0/~0
1867
*/
1868
if (devinfo->ver <= 5 &&
1869
!result.is_null() &&
1870
(instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1871
fs_reg masked = vgrf(glsl_type::int_type);
1872
bld.AND(masked, result, brw_imm_d(1));
1873
masked.negate = true;
1874
bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1875
}
1876
}
1877
1878
void
1879
fs_visitor::nir_emit_load_const(const fs_builder &bld,
1880
nir_load_const_instr *instr)
1881
{
1882
const brw_reg_type reg_type =
1883
brw_reg_type_from_bit_size(instr->def.bit_size, BRW_REGISTER_TYPE_D);
1884
fs_reg reg = bld.vgrf(reg_type, instr->def.num_components);
1885
1886
switch (instr->def.bit_size) {
1887
case 8:
1888
for (unsigned i = 0; i < instr->def.num_components; i++)
1889
bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value[i].i8));
1890
break;
1891
1892
case 16:
1893
for (unsigned i = 0; i < instr->def.num_components; i++)
1894
bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value[i].i16));
1895
break;
1896
1897
case 32:
1898
for (unsigned i = 0; i < instr->def.num_components; i++)
1899
bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value[i].i32));
1900
break;
1901
1902
case 64:
1903
assert(devinfo->ver >= 7);
1904
if (devinfo->ver == 7) {
1905
/* We don't get 64-bit integer types until gfx8 */
1906
for (unsigned i = 0; i < instr->def.num_components; i++) {
1907
bld.MOV(retype(offset(reg, bld, i), BRW_REGISTER_TYPE_DF),
1908
setup_imm_df(bld, instr->value[i].f64));
1909
}
1910
} else {
1911
for (unsigned i = 0; i < instr->def.num_components; i++)
1912
bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value[i].i64));
1913
}
1914
break;
1915
1916
default:
1917
unreachable("Invalid bit size");
1918
}
1919
1920
nir_ssa_values[instr->def.index] = reg;
1921
}
1922
1923
fs_reg
1924
fs_visitor::get_nir_src(const nir_src &src)
1925
{
1926
fs_reg reg;
1927
if (src.is_ssa) {
1928
if (nir_src_is_undef(src)) {
1929
const brw_reg_type reg_type =
1930
brw_reg_type_from_bit_size(src.ssa->bit_size, BRW_REGISTER_TYPE_D);
1931
reg = bld.vgrf(reg_type, src.ssa->num_components);
1932
} else {
1933
reg = nir_ssa_values[src.ssa->index];
1934
}
1935
} else {
1936
/* We don't handle indirects on locals */
1937
assert(src.reg.indirect == NULL);
1938
reg = offset(nir_locals[src.reg.reg->index], bld,
1939
src.reg.base_offset * src.reg.reg->num_components);
1940
}
1941
1942
if (nir_src_bit_size(src) == 64 && devinfo->ver == 7) {
1943
/* The only 64-bit type available on gfx7 is DF, so use that. */
1944
reg.type = BRW_REGISTER_TYPE_DF;
1945
} else {
1946
/* To avoid floating-point denorm flushing problems, set the type by
1947
* default to an integer type - instructions that need floating point
1948
* semantics will set this to F if they need to
1949
*/
1950
reg.type = brw_reg_type_from_bit_size(nir_src_bit_size(src),
1951
BRW_REGISTER_TYPE_D);
1952
}
1953
1954
return reg;
1955
}
1956
1957
/**
1958
* Return an IMM for constants; otherwise call get_nir_src() as normal.
1959
*
1960
* This function should not be called on any value which may be 64 bits.
1961
* We could theoretically support 64-bit on gfx8+ but we choose not to
1962
* because it wouldn't work in general (no gfx7 support) and there are
1963
* enough restrictions in 64-bit immediates that you can't take the return
1964
* value and treat it the same as the result of get_nir_src().
1965
*/
1966
fs_reg
1967
fs_visitor::get_nir_src_imm(const nir_src &src)
1968
{
1969
assert(nir_src_bit_size(src) == 32);
1970
return nir_src_is_const(src) ?
1971
fs_reg(brw_imm_d(nir_src_as_int(src))) : get_nir_src(src);
1972
}
1973
1974
fs_reg
1975
fs_visitor::get_nir_dest(const nir_dest &dest)
1976
{
1977
if (dest.is_ssa) {
1978
const brw_reg_type reg_type =
1979
brw_reg_type_from_bit_size(dest.ssa.bit_size,
1980
dest.ssa.bit_size == 8 ?
1981
BRW_REGISTER_TYPE_D :
1982
BRW_REGISTER_TYPE_F);
1983
nir_ssa_values[dest.ssa.index] =
1984
bld.vgrf(reg_type, dest.ssa.num_components);
1985
bld.UNDEF(nir_ssa_values[dest.ssa.index]);
1986
return nir_ssa_values[dest.ssa.index];
1987
} else {
1988
/* We don't handle indirects on locals */
1989
assert(dest.reg.indirect == NULL);
1990
return offset(nir_locals[dest.reg.reg->index], bld,
1991
dest.reg.base_offset * dest.reg.reg->num_components);
1992
}
1993
}
1994
1995
void
1996
fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1997
unsigned wr_mask)
1998
{
1999
for (unsigned i = 0; i < 4; i++) {
2000
if (!((wr_mask >> i) & 1))
2001
continue;
2002
2003
fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
2004
new_inst->dst = offset(new_inst->dst, bld, i);
2005
for (unsigned j = 0; j < new_inst->sources; j++)
2006
if (new_inst->src[j].file == VGRF)
2007
new_inst->src[j] = offset(new_inst->src[j], bld, i);
2008
2009
bld.emit(new_inst);
2010
}
2011
}
2012
2013
static fs_inst *
2014
emit_pixel_interpolater_send(const fs_builder &bld,
2015
enum opcode opcode,
2016
const fs_reg &dst,
2017
const fs_reg &src,
2018
const fs_reg &desc,
2019
glsl_interp_mode interpolation)
2020
{
2021
struct brw_wm_prog_data *wm_prog_data =
2022
brw_wm_prog_data(bld.shader->stage_prog_data);
2023
2024
fs_inst *inst = bld.emit(opcode, dst, src, desc);
2025
/* 2 floats per slot returned */
2026
inst->size_written = 2 * dst.component_size(inst->exec_size);
2027
inst->pi_noperspective = interpolation == INTERP_MODE_NOPERSPECTIVE;
2028
2029
wm_prog_data->pulls_bary = true;
2030
2031
return inst;
2032
}
2033
2034
/**
2035
* Computes 1 << x, given a D/UD register containing some value x.
2036
*/
2037
static fs_reg
2038
intexp2(const fs_builder &bld, const fs_reg &x)
2039
{
2040
assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
2041
2042
fs_reg result = bld.vgrf(x.type, 1);
2043
fs_reg one = bld.vgrf(x.type, 1);
2044
2045
bld.MOV(one, retype(brw_imm_d(1), one.type));
2046
bld.SHL(result, one, x);
2047
return result;
2048
}
2049
2050
void
2051
fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
2052
{
2053
assert(stage == MESA_SHADER_GEOMETRY);
2054
2055
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2056
2057
if (gs_compile->control_data_header_size_bits == 0)
2058
return;
2059
2060
/* We can only do EndPrimitive() functionality when the control data
2061
* consists of cut bits. Fortunately, the only time it isn't is when the
2062
* output type is points, in which case EndPrimitive() is a no-op.
2063
*/
2064
if (gs_prog_data->control_data_format !=
2065
GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
2066
return;
2067
}
2068
2069
/* Cut bits use one bit per vertex. */
2070
assert(gs_compile->control_data_bits_per_vertex == 1);
2071
2072
fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
2073
vertex_count.type = BRW_REGISTER_TYPE_UD;
2074
2075
/* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
2076
* vertex n, 0 otherwise. So all we need to do here is mark bit
2077
* (vertex_count - 1) % 32 in the cut_bits register to indicate that
2078
* EndPrimitive() was called after emitting vertex (vertex_count - 1);
2079
* vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
2080
*
2081
* Note that if EndPrimitive() is called before emitting any vertices, this
2082
* will cause us to set bit 31 of the control_data_bits register to 1.
2083
* That's fine because:
2084
*
2085
* - If max_vertices < 32, then vertex number 31 (zero-based) will never be
2086
* output, so the hardware will ignore cut bit 31.
2087
*
2088
* - If max_vertices == 32, then vertex number 31 is guaranteed to be the
2089
* last vertex, so setting cut bit 31 has no effect (since the primitive
2090
* is automatically ended when the GS terminates).
2091
*
2092
* - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
2093
* control_data_bits register to 0 when the first vertex is emitted.
2094
*/
2095
2096
const fs_builder abld = bld.annotate("end primitive");
2097
2098
/* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
2099
fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2100
abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
2101
fs_reg mask = intexp2(abld, prev_count);
2102
/* Note: we're relying on the fact that the GEN SHL instruction only pays
2103
* attention to the lower 5 bits of its second source argument, so on this
2104
* architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
2105
* ((vertex_count - 1) % 32).
2106
*/
2107
abld.OR(this->control_data_bits, this->control_data_bits, mask);
2108
}
2109
2110
void
2111
fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
2112
{
2113
assert(stage == MESA_SHADER_GEOMETRY);
2114
assert(gs_compile->control_data_bits_per_vertex != 0);
2115
2116
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2117
2118
const fs_builder abld = bld.annotate("emit control data bits");
2119
const fs_builder fwa_bld = bld.exec_all();
2120
2121
/* We use a single UD register to accumulate control data bits (32 bits
2122
* for each of the SIMD8 channels). So we need to write a DWord (32 bits)
2123
* at a time.
2124
*
2125
* Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
2126
* We have select a 128-bit group via the Global and Per-Slot Offsets, then
2127
* use the Channel Mask phase to enable/disable which DWord within that
2128
* group to write. (Remember, different SIMD8 channels may have emitted
2129
* different numbers of vertices, so we may need per-slot offsets.)
2130
*
2131
* Channel masking presents an annoying problem: we may have to replicate
2132
* the data up to 4 times:
2133
*
2134
* Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
2135
*
2136
* To avoid penalizing shaders that emit a small number of vertices, we
2137
* can avoid these sometimes: if the size of the control data header is
2138
* <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
2139
* land in the same 128-bit group, so we can skip per-slot offsets.
2140
*
2141
* Similarly, if the control data header is <= 32 bits, there is only one
2142
* DWord, so we can skip channel masks.
2143
*/
2144
enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
2145
2146
fs_reg channel_mask, per_slot_offset;
2147
2148
if (gs_compile->control_data_header_size_bits > 32) {
2149
opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2150
channel_mask = vgrf(glsl_type::uint_type);
2151
}
2152
2153
if (gs_compile->control_data_header_size_bits > 128) {
2154
opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
2155
per_slot_offset = vgrf(glsl_type::uint_type);
2156
}
2157
2158
/* Figure out which DWord we're trying to write to using the formula:
2159
*
2160
* dword_index = (vertex_count - 1) * bits_per_vertex / 32
2161
*
2162
* Since bits_per_vertex is a power of two, and is known at compile
2163
* time, this can be optimized to:
2164
*
2165
* dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
2166
*/
2167
if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
2168
fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2169
fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2170
abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
2171
unsigned log2_bits_per_vertex =
2172
util_last_bit(gs_compile->control_data_bits_per_vertex);
2173
abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
2174
2175
if (per_slot_offset.file != BAD_FILE) {
2176
/* Set the per-slot offset to dword_index / 4, so that we'll write to
2177
* the appropriate OWord within the control data header.
2178
*/
2179
abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u));
2180
}
2181
2182
/* Set the channel masks to 1 << (dword_index % 4), so that we'll
2183
* write to the appropriate DWORD within the OWORD.
2184
*/
2185
fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2186
fwa_bld.AND(channel, dword_index, brw_imm_ud(3u));
2187
channel_mask = intexp2(fwa_bld, channel);
2188
/* Then the channel masks need to be in bits 23:16. */
2189
fwa_bld.SHL(channel_mask, channel_mask, brw_imm_ud(16u));
2190
}
2191
2192
/* Store the control data bits in the message payload and send it. */
2193
unsigned mlen = 2;
2194
if (channel_mask.file != BAD_FILE)
2195
mlen += 4; /* channel masks, plus 3 extra copies of the data */
2196
if (per_slot_offset.file != BAD_FILE)
2197
mlen++;
2198
2199
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
2200
fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
2201
unsigned i = 0;
2202
sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
2203
if (per_slot_offset.file != BAD_FILE)
2204
sources[i++] = per_slot_offset;
2205
if (channel_mask.file != BAD_FILE)
2206
sources[i++] = channel_mask;
2207
while (i < mlen) {
2208
sources[i++] = this->control_data_bits;
2209
}
2210
2211
abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
2212
fs_inst *inst = abld.emit(opcode, reg_undef, payload);
2213
inst->mlen = mlen;
2214
/* We need to increment Global Offset by 256-bits to make room for
2215
* Broadwell's extra "Vertex Count" payload at the beginning of the
2216
* URB entry. Since this is an OWord message, Global Offset is counted
2217
* in 128-bit units, so we must set it to 2.
2218
*/
2219
if (gs_prog_data->static_vertex_count == -1)
2220
inst->offset = 2;
2221
}
2222
2223
void
2224
fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
2225
unsigned stream_id)
2226
{
2227
/* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
2228
2229
/* Note: we are calling this *before* increasing vertex_count, so
2230
* this->vertex_count == vertex_count - 1 in the formula above.
2231
*/
2232
2233
/* Stream mode uses 2 bits per vertex */
2234
assert(gs_compile->control_data_bits_per_vertex == 2);
2235
2236
/* Must be a valid stream */
2237
assert(stream_id < MAX_VERTEX_STREAMS);
2238
2239
/* Control data bits are initialized to 0 so we don't have to set any
2240
* bits when sending vertices to stream 0.
2241
*/
2242
if (stream_id == 0)
2243
return;
2244
2245
const fs_builder abld = bld.annotate("set stream control data bits", NULL);
2246
2247
/* reg::sid = stream_id */
2248
fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2249
abld.MOV(sid, brw_imm_ud(stream_id));
2250
2251
/* reg:shift_count = 2 * (vertex_count - 1) */
2252
fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2253
abld.SHL(shift_count, vertex_count, brw_imm_ud(1u));
2254
2255
/* Note: we're relying on the fact that the GEN SHL instruction only pays
2256
* attention to the lower 5 bits of its second source argument, so on this
2257
* architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
2258
* stream_id << ((2 * (vertex_count - 1)) % 32).
2259
*/
2260
fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2261
abld.SHL(mask, sid, shift_count);
2262
abld.OR(this->control_data_bits, this->control_data_bits, mask);
2263
}
2264
2265
void
2266
fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
2267
unsigned stream_id)
2268
{
2269
assert(stage == MESA_SHADER_GEOMETRY);
2270
2271
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2272
2273
fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
2274
vertex_count.type = BRW_REGISTER_TYPE_UD;
2275
2276
/* Haswell and later hardware ignores the "Render Stream Select" bits
2277
* from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
2278
* and instead sends all primitives down the pipeline for rasterization.
2279
* If the SOL stage is enabled, "Render Stream Select" is honored and
2280
* primitives bound to non-zero streams are discarded after stream output.
2281
*
2282
* Since the only purpose of primives sent to non-zero streams is to
2283
* be recorded by transform feedback, we can simply discard all geometry
2284
* bound to these streams when transform feedback is disabled.
2285
*/
2286
if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
2287
return;
2288
2289
/* If we're outputting 32 control data bits or less, then we can wait
2290
* until the shader is over to output them all. Otherwise we need to
2291
* output them as we go. Now is the time to do it, since we're about to
2292
* output the vertex_count'th vertex, so it's guaranteed that the
2293
* control data bits associated with the (vertex_count - 1)th vertex are
2294
* correct.
2295
*/
2296
if (gs_compile->control_data_header_size_bits > 32) {
2297
const fs_builder abld =
2298
bld.annotate("emit vertex: emit control data bits");
2299
2300
/* Only emit control data bits if we've finished accumulating a batch
2301
* of 32 bits. This is the case when:
2302
*
2303
* (vertex_count * bits_per_vertex) % 32 == 0
2304
*
2305
* (in other words, when the last 5 bits of vertex_count *
2306
* bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
2307
* integer n (which is always the case, since bits_per_vertex is
2308
* always 1 or 2), this is equivalent to requiring that the last 5-n
2309
* bits of vertex_count are 0:
2310
*
2311
* vertex_count & (2^(5-n) - 1) == 0
2312
*
2313
* 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
2314
* equivalent to:
2315
*
2316
* vertex_count & (32 / bits_per_vertex - 1) == 0
2317
*
2318
* TODO: If vertex_count is an immediate, we could do some of this math
2319
* at compile time...
2320
*/
2321
fs_inst *inst =
2322
abld.AND(bld.null_reg_d(), vertex_count,
2323
brw_imm_ud(32u / gs_compile->control_data_bits_per_vertex - 1u));
2324
inst->conditional_mod = BRW_CONDITIONAL_Z;
2325
2326
abld.IF(BRW_PREDICATE_NORMAL);
2327
/* If vertex_count is 0, then no control data bits have been
2328
* accumulated yet, so we can skip emitting them.
2329
*/
2330
abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
2331
BRW_CONDITIONAL_NEQ);
2332
abld.IF(BRW_PREDICATE_NORMAL);
2333
emit_gs_control_data_bits(vertex_count);
2334
abld.emit(BRW_OPCODE_ENDIF);
2335
2336
/* Reset control_data_bits to 0 so we can start accumulating a new
2337
* batch.
2338
*
2339
* Note: in the case where vertex_count == 0, this neutralizes the
2340
* effect of any call to EndPrimitive() that the shader may have
2341
* made before outputting its first vertex.
2342
*/
2343
inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u));
2344
inst->force_writemask_all = true;
2345
abld.emit(BRW_OPCODE_ENDIF);
2346
}
2347
2348
emit_urb_writes(vertex_count);
2349
2350
/* In stream mode we have to set control data bits for all vertices
2351
* unless we have disabled control data bits completely (which we do
2352
* do for GL_POINTS outputs that don't use streams).
2353
*/
2354
if (gs_compile->control_data_header_size_bits > 0 &&
2355
gs_prog_data->control_data_format ==
2356
GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
2357
set_gs_stream_control_data_bits(vertex_count, stream_id);
2358
}
2359
}
2360
2361
void
2362
fs_visitor::emit_gs_input_load(const fs_reg &dst,
2363
const nir_src &vertex_src,
2364
unsigned base_offset,
2365
const nir_src &offset_src,
2366
unsigned num_components,
2367
unsigned first_component)
2368
{
2369
assert(type_sz(dst.type) == 4);
2370
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2371
const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
2372
2373
/* TODO: figure out push input layout for invocations == 1 */
2374
if (gs_prog_data->invocations == 1 &&
2375
nir_src_is_const(offset_src) && nir_src_is_const(vertex_src) &&
2376
4 * (base_offset + nir_src_as_uint(offset_src)) < push_reg_count) {
2377
int imm_offset = (base_offset + nir_src_as_uint(offset_src)) * 4 +
2378
nir_src_as_uint(vertex_src) * push_reg_count;
2379
for (unsigned i = 0; i < num_components; i++) {
2380
bld.MOV(offset(dst, bld, i),
2381
fs_reg(ATTR, imm_offset + i + first_component, dst.type));
2382
}
2383
return;
2384
}
2385
2386
/* Resort to the pull model. Ensure the VUE handles are provided. */
2387
assert(gs_prog_data->base.include_vue_handles);
2388
2389
unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
2390
fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2391
2392
if (gs_prog_data->invocations == 1) {
2393
if (nir_src_is_const(vertex_src)) {
2394
/* The vertex index is constant; just select the proper URB handle. */
2395
icp_handle =
2396
retype(brw_vec8_grf(first_icp_handle + nir_src_as_uint(vertex_src), 0),
2397
BRW_REGISTER_TYPE_UD);
2398
} else {
2399
/* The vertex index is non-constant. We need to use indirect
2400
* addressing to fetch the proper URB handle.
2401
*
2402
* First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
2403
* indicating that channel <n> should read the handle from
2404
* DWord <n>. We convert that to bytes by multiplying by 4.
2405
*
2406
* Next, we convert the vertex index to bytes by multiplying
2407
* by 32 (shifting by 5), and add the two together. This is
2408
* the final indirect byte offset.
2409
*/
2410
fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_UW, 1);
2411
fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2412
fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2413
fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2414
2415
/* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
2416
bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
2417
/* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
2418
bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
2419
/* Convert vertex_index to bytes (multiply by 32) */
2420
bld.SHL(vertex_offset_bytes,
2421
retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2422
brw_imm_ud(5u));
2423
bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
2424
2425
/* Use first_icp_handle as the base offset. There is one register
2426
* of URB handles per vertex, so inform the register allocator that
2427
* we might read up to nir->info.gs.vertices_in registers.
2428
*/
2429
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2430
retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
2431
fs_reg(icp_offset_bytes),
2432
brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
2433
}
2434
} else {
2435
assert(gs_prog_data->invocations > 1);
2436
2437
if (nir_src_is_const(vertex_src)) {
2438
unsigned vertex = nir_src_as_uint(vertex_src);
2439
assert(devinfo->ver >= 9 || vertex <= 5);
2440
bld.MOV(icp_handle,
2441
retype(brw_vec1_grf(first_icp_handle + vertex / 8, vertex % 8),
2442
BRW_REGISTER_TYPE_UD));
2443
} else {
2444
/* The vertex index is non-constant. We need to use indirect
2445
* addressing to fetch the proper URB handle.
2446
*
2447
*/
2448
fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2449
2450
/* Convert vertex_index to bytes (multiply by 4) */
2451
bld.SHL(icp_offset_bytes,
2452
retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2453
brw_imm_ud(2u));
2454
2455
/* Use first_icp_handle as the base offset. There is one DWord
2456
* of URB handles per vertex, so inform the register allocator that
2457
* we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
2458
*/
2459
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2460
retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
2461
fs_reg(icp_offset_bytes),
2462
brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
2463
REG_SIZE));
2464
}
2465
}
2466
2467
fs_inst *inst;
2468
fs_reg indirect_offset = get_nir_src(offset_src);
2469
2470
if (nir_src_is_const(offset_src)) {
2471
/* Constant indexing - use global offset. */
2472
if (first_component != 0) {
2473
unsigned read_components = num_components + first_component;
2474
fs_reg tmp = bld.vgrf(dst.type, read_components);
2475
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, icp_handle);
2476
inst->size_written = read_components *
2477
tmp.component_size(inst->exec_size);
2478
for (unsigned i = 0; i < num_components; i++) {
2479
bld.MOV(offset(dst, bld, i),
2480
offset(tmp, bld, i + first_component));
2481
}
2482
} else {
2483
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
2484
inst->size_written = num_components *
2485
dst.component_size(inst->exec_size);
2486
}
2487
inst->offset = base_offset + nir_src_as_uint(offset_src);
2488
inst->mlen = 1;
2489
} else {
2490
/* Indirect indexing - use per-slot offsets as well. */
2491
const fs_reg srcs[] = { icp_handle, indirect_offset };
2492
unsigned read_components = num_components + first_component;
2493
fs_reg tmp = bld.vgrf(dst.type, read_components);
2494
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2495
bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2496
if (first_component != 0) {
2497
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2498
payload);
2499
inst->size_written = read_components *
2500
tmp.component_size(inst->exec_size);
2501
for (unsigned i = 0; i < num_components; i++) {
2502
bld.MOV(offset(dst, bld, i),
2503
offset(tmp, bld, i + first_component));
2504
}
2505
} else {
2506
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
2507
inst->size_written = num_components *
2508
dst.component_size(inst->exec_size);
2509
}
2510
inst->offset = base_offset;
2511
inst->mlen = 2;
2512
}
2513
}
2514
2515
fs_reg
2516
fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
2517
{
2518
nir_src *offset_src = nir_get_io_offset_src(instr);
2519
2520
if (nir_src_is_const(*offset_src)) {
2521
/* The only constant offset we should find is 0. brw_nir.c's
2522
* add_const_offset_to_base() will fold other constant offsets
2523
* into instr->const_index[0].
2524
*/
2525
assert(nir_src_as_uint(*offset_src) == 0);
2526
return fs_reg();
2527
}
2528
2529
return get_nir_src(*offset_src);
2530
}
2531
2532
void
2533
fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
2534
nir_intrinsic_instr *instr)
2535
{
2536
assert(stage == MESA_SHADER_VERTEX);
2537
2538
fs_reg dest;
2539
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2540
dest = get_nir_dest(instr->dest);
2541
2542
switch (instr->intrinsic) {
2543
case nir_intrinsic_load_vertex_id:
2544
case nir_intrinsic_load_base_vertex:
2545
unreachable("should be lowered by nir_lower_system_values()");
2546
2547
case nir_intrinsic_load_input: {
2548
assert(nir_dest_bit_size(instr->dest) == 32);
2549
fs_reg src = fs_reg(ATTR, nir_intrinsic_base(instr) * 4, dest.type);
2550
src = offset(src, bld, nir_intrinsic_component(instr));
2551
src = offset(src, bld, nir_src_as_uint(instr->src[0]));
2552
2553
for (unsigned i = 0; i < instr->num_components; i++)
2554
bld.MOV(offset(dest, bld, i), offset(src, bld, i));
2555
break;
2556
}
2557
2558
case nir_intrinsic_load_vertex_id_zero_base:
2559
case nir_intrinsic_load_instance_id:
2560
case nir_intrinsic_load_base_instance:
2561
case nir_intrinsic_load_draw_id:
2562
case nir_intrinsic_load_first_vertex:
2563
case nir_intrinsic_load_is_indexed_draw:
2564
unreachable("lowered by brw_nir_lower_vs_inputs");
2565
2566
default:
2567
nir_emit_intrinsic(bld, instr);
2568
break;
2569
}
2570
}
2571
2572
fs_reg
2573
fs_visitor::get_tcs_single_patch_icp_handle(const fs_builder &bld,
2574
nir_intrinsic_instr *instr)
2575
{
2576
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
2577
const nir_src &vertex_src = instr->src[0];
2578
nir_intrinsic_instr *vertex_intrin = nir_src_as_intrinsic(vertex_src);
2579
fs_reg icp_handle;
2580
2581
if (nir_src_is_const(vertex_src)) {
2582
/* Emit a MOV to resolve <0,1,0> regioning. */
2583
icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2584
unsigned vertex = nir_src_as_uint(vertex_src);
2585
bld.MOV(icp_handle,
2586
retype(brw_vec1_grf(1 + (vertex >> 3), vertex & 7),
2587
BRW_REGISTER_TYPE_UD));
2588
} else if (tcs_prog_data->instances == 1 && vertex_intrin &&
2589
vertex_intrin->intrinsic == nir_intrinsic_load_invocation_id) {
2590
/* For the common case of only 1 instance, an array index of
2591
* gl_InvocationID means reading g1. Skip all the indirect work.
2592
*/
2593
icp_handle = retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD);
2594
} else {
2595
/* The vertex index is non-constant. We need to use indirect
2596
* addressing to fetch the proper URB handle.
2597
*/
2598
icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2599
2600
/* Each ICP handle is a single DWord (4 bytes) */
2601
fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2602
bld.SHL(vertex_offset_bytes,
2603
retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2604
brw_imm_ud(2u));
2605
2606
/* Start at g1. We might read up to 4 registers. */
2607
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2608
retype(brw_vec8_grf(1, 0), icp_handle.type), vertex_offset_bytes,
2609
brw_imm_ud(4 * REG_SIZE));
2610
}
2611
2612
return icp_handle;
2613
}
2614
2615
fs_reg
2616
fs_visitor::get_tcs_eight_patch_icp_handle(const fs_builder &bld,
2617
nir_intrinsic_instr *instr)
2618
{
2619
struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
2620
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
2621
const nir_src &vertex_src = instr->src[0];
2622
2623
unsigned first_icp_handle = tcs_prog_data->include_primitive_id ? 3 : 2;
2624
2625
if (nir_src_is_const(vertex_src)) {
2626
return fs_reg(retype(brw_vec8_grf(first_icp_handle +
2627
nir_src_as_uint(vertex_src), 0),
2628
BRW_REGISTER_TYPE_UD));
2629
}
2630
2631
/* The vertex index is non-constant. We need to use indirect
2632
* addressing to fetch the proper URB handle.
2633
*
2634
* First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
2635
* indicating that channel <n> should read the handle from
2636
* DWord <n>. We convert that to bytes by multiplying by 4.
2637
*
2638
* Next, we convert the vertex index to bytes by multiplying
2639
* by 32 (shifting by 5), and add the two together. This is
2640
* the final indirect byte offset.
2641
*/
2642
fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2643
fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_UW, 1);
2644
fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2645
fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2646
fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2647
2648
/* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
2649
bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
2650
/* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
2651
bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
2652
/* Convert vertex_index to bytes (multiply by 32) */
2653
bld.SHL(vertex_offset_bytes,
2654
retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2655
brw_imm_ud(5u));
2656
bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
2657
2658
/* Use first_icp_handle as the base offset. There is one register
2659
* of URB handles per vertex, so inform the register allocator that
2660
* we might read up to nir->info.gs.vertices_in registers.
2661
*/
2662
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2663
retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
2664
icp_offset_bytes, brw_imm_ud(tcs_key->input_vertices * REG_SIZE));
2665
2666
return icp_handle;
2667
}
2668
2669
struct brw_reg
2670
fs_visitor::get_tcs_output_urb_handle()
2671
{
2672
struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
2673
2674
if (vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH) {
2675
return retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD);
2676
} else {
2677
assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH);
2678
return retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD);
2679
}
2680
}
2681
2682
void
2683
fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
2684
nir_intrinsic_instr *instr)
2685
{
2686
assert(stage == MESA_SHADER_TESS_CTRL);
2687
struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
2688
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
2689
struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
2690
2691
bool eight_patch =
2692
vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH;
2693
2694
fs_reg dst;
2695
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2696
dst = get_nir_dest(instr->dest);
2697
2698
switch (instr->intrinsic) {
2699
case nir_intrinsic_load_primitive_id:
2700
bld.MOV(dst, fs_reg(eight_patch ? brw_vec8_grf(2, 0)
2701
: brw_vec1_grf(0, 1)));
2702
break;
2703
case nir_intrinsic_load_invocation_id:
2704
bld.MOV(retype(dst, invocation_id.type), invocation_id);
2705
break;
2706
case nir_intrinsic_load_patch_vertices_in:
2707
bld.MOV(retype(dst, BRW_REGISTER_TYPE_D),
2708
brw_imm_d(tcs_key->input_vertices));
2709
break;
2710
2711
case nir_intrinsic_control_barrier: {
2712
if (tcs_prog_data->instances == 1)
2713
break;
2714
2715
fs_reg m0 = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2716
fs_reg m0_2 = component(m0, 2);
2717
2718
const fs_builder chanbld = bld.exec_all().group(1, 0);
2719
2720
/* Zero the message header */
2721
bld.exec_all().MOV(m0, brw_imm_ud(0u));
2722
2723
if (devinfo->ver < 11) {
2724
/* Copy "Barrier ID" from r0.2, bits 16:13 */
2725
chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
2726
brw_imm_ud(INTEL_MASK(16, 13)));
2727
2728
/* Shift it up to bits 27:24. */
2729
chanbld.SHL(m0_2, m0_2, brw_imm_ud(11));
2730
} else {
2731
chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
2732
brw_imm_ud(INTEL_MASK(30, 24)));
2733
}
2734
2735
/* Set the Barrier Count and the enable bit */
2736
if (devinfo->ver < 11) {
2737
chanbld.OR(m0_2, m0_2,
2738
brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15)));
2739
} else {
2740
chanbld.OR(m0_2, m0_2,
2741
brw_imm_ud(tcs_prog_data->instances << 8 | (1 << 15)));
2742
}
2743
2744
bld.emit(SHADER_OPCODE_BARRIER, bld.null_reg_ud(), m0);
2745
break;
2746
}
2747
2748
case nir_intrinsic_load_input:
2749
unreachable("nir_lower_io should never give us these.");
2750
break;
2751
2752
case nir_intrinsic_load_per_vertex_input: {
2753
assert(nir_dest_bit_size(instr->dest) == 32);
2754
fs_reg indirect_offset = get_indirect_offset(instr);
2755
unsigned imm_offset = instr->const_index[0];
2756
fs_inst *inst;
2757
2758
fs_reg icp_handle =
2759
eight_patch ? get_tcs_eight_patch_icp_handle(bld, instr)
2760
: get_tcs_single_patch_icp_handle(bld, instr);
2761
2762
/* We can only read two double components with each URB read, so
2763
* we send two read messages in that case, each one loading up to
2764
* two double components.
2765
*/
2766
unsigned num_components = instr->num_components;
2767
unsigned first_component = nir_intrinsic_component(instr);
2768
2769
if (indirect_offset.file == BAD_FILE) {
2770
/* Constant indexing - use global offset. */
2771
if (first_component != 0) {
2772
unsigned read_components = num_components + first_component;
2773
fs_reg tmp = bld.vgrf(dst.type, read_components);
2774
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, icp_handle);
2775
for (unsigned i = 0; i < num_components; i++) {
2776
bld.MOV(offset(dst, bld, i),
2777
offset(tmp, bld, i + first_component));
2778
}
2779
} else {
2780
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
2781
}
2782
inst->offset = imm_offset;
2783
inst->mlen = 1;
2784
} else {
2785
/* Indirect indexing - use per-slot offsets as well. */
2786
const fs_reg srcs[] = { icp_handle, indirect_offset };
2787
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2788
bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2789
if (first_component != 0) {
2790
unsigned read_components = num_components + first_component;
2791
fs_reg tmp = bld.vgrf(dst.type, read_components);
2792
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2793
payload);
2794
for (unsigned i = 0; i < num_components; i++) {
2795
bld.MOV(offset(dst, bld, i),
2796
offset(tmp, bld, i + first_component));
2797
}
2798
} else {
2799
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst,
2800
payload);
2801
}
2802
inst->offset = imm_offset;
2803
inst->mlen = 2;
2804
}
2805
inst->size_written = (num_components + first_component) *
2806
inst->dst.component_size(inst->exec_size);
2807
2808
/* Copy the temporary to the destination to deal with writemasking.
2809
*
2810
* Also attempt to deal with gl_PointSize being in the .w component.
2811
*/
2812
if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
2813
assert(type_sz(dst.type) == 4);
2814
inst->dst = bld.vgrf(dst.type, 4);
2815
inst->size_written = 4 * REG_SIZE;
2816
bld.MOV(dst, offset(inst->dst, bld, 3));
2817
}
2818
break;
2819
}
2820
2821
case nir_intrinsic_load_output:
2822
case nir_intrinsic_load_per_vertex_output: {
2823
assert(nir_dest_bit_size(instr->dest) == 32);
2824
fs_reg indirect_offset = get_indirect_offset(instr);
2825
unsigned imm_offset = instr->const_index[0];
2826
unsigned first_component = nir_intrinsic_component(instr);
2827
2828
struct brw_reg output_handles = get_tcs_output_urb_handle();
2829
2830
fs_inst *inst;
2831
if (indirect_offset.file == BAD_FILE) {
2832
/* This MOV replicates the output handle to all enabled channels
2833
* is SINGLE_PATCH mode.
2834
*/
2835
fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2836
bld.MOV(patch_handle, output_handles);
2837
2838
{
2839
if (first_component != 0) {
2840
unsigned read_components =
2841
instr->num_components + first_component;
2842
fs_reg tmp = bld.vgrf(dst.type, read_components);
2843
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
2844
patch_handle);
2845
inst->size_written = read_components * REG_SIZE;
2846
for (unsigned i = 0; i < instr->num_components; i++) {
2847
bld.MOV(offset(dst, bld, i),
2848
offset(tmp, bld, i + first_component));
2849
}
2850
} else {
2851
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst,
2852
patch_handle);
2853
inst->size_written = instr->num_components * REG_SIZE;
2854
}
2855
inst->offset = imm_offset;
2856
inst->mlen = 1;
2857
}
2858
} else {
2859
/* Indirect indexing - use per-slot offsets as well. */
2860
const fs_reg srcs[] = { output_handles, indirect_offset };
2861
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2862
bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2863
if (first_component != 0) {
2864
unsigned read_components =
2865
instr->num_components + first_component;
2866
fs_reg tmp = bld.vgrf(dst.type, read_components);
2867
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2868
payload);
2869
inst->size_written = read_components * REG_SIZE;
2870
for (unsigned i = 0; i < instr->num_components; i++) {
2871
bld.MOV(offset(dst, bld, i),
2872
offset(tmp, bld, i + first_component));
2873
}
2874
} else {
2875
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst,
2876
payload);
2877
inst->size_written = instr->num_components * REG_SIZE;
2878
}
2879
inst->offset = imm_offset;
2880
inst->mlen = 2;
2881
}
2882
break;
2883
}
2884
2885
case nir_intrinsic_store_output:
2886
case nir_intrinsic_store_per_vertex_output: {
2887
assert(nir_src_bit_size(instr->src[0]) == 32);
2888
fs_reg value = get_nir_src(instr->src[0]);
2889
fs_reg indirect_offset = get_indirect_offset(instr);
2890
unsigned imm_offset = instr->const_index[0];
2891
unsigned mask = instr->const_index[1];
2892
unsigned header_regs = 0;
2893
struct brw_reg output_handles = get_tcs_output_urb_handle();
2894
2895
fs_reg srcs[7];
2896
srcs[header_regs++] = output_handles;
2897
2898
if (indirect_offset.file != BAD_FILE) {
2899
srcs[header_regs++] = indirect_offset;
2900
}
2901
2902
if (mask == 0)
2903
break;
2904
2905
unsigned num_components = util_last_bit(mask);
2906
enum opcode opcode;
2907
2908
/* We can only pack two 64-bit components in a single message, so send
2909
* 2 messages if we have more components
2910
*/
2911
unsigned first_component = nir_intrinsic_component(instr);
2912
mask = mask << first_component;
2913
2914
if (mask != WRITEMASK_XYZW) {
2915
srcs[header_regs++] = brw_imm_ud(mask << 16);
2916
opcode = indirect_offset.file != BAD_FILE ?
2917
SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
2918
SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2919
} else {
2920
opcode = indirect_offset.file != BAD_FILE ?
2921
SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
2922
SHADER_OPCODE_URB_WRITE_SIMD8;
2923
}
2924
2925
for (unsigned i = 0; i < num_components; i++) {
2926
if (!(mask & (1 << (i + first_component))))
2927
continue;
2928
2929
srcs[header_regs + i + first_component] = offset(value, bld, i);
2930
}
2931
2932
unsigned mlen = header_regs + num_components + first_component;
2933
fs_reg payload =
2934
bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
2935
bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
2936
2937
fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
2938
inst->offset = imm_offset;
2939
inst->mlen = mlen;
2940
break;
2941
}
2942
2943
default:
2944
nir_emit_intrinsic(bld, instr);
2945
break;
2946
}
2947
}
2948
2949
void
2950
fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld,
2951
nir_intrinsic_instr *instr)
2952
{
2953
assert(stage == MESA_SHADER_TESS_EVAL);
2954
struct brw_tes_prog_data *tes_prog_data = brw_tes_prog_data(prog_data);
2955
2956
fs_reg dest;
2957
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2958
dest = get_nir_dest(instr->dest);
2959
2960
switch (instr->intrinsic) {
2961
case nir_intrinsic_load_primitive_id:
2962
bld.MOV(dest, fs_reg(brw_vec1_grf(0, 1)));
2963
break;
2964
case nir_intrinsic_load_tess_coord:
2965
/* gl_TessCoord is part of the payload in g1-3 */
2966
for (unsigned i = 0; i < 3; i++) {
2967
bld.MOV(offset(dest, bld, i), fs_reg(brw_vec8_grf(1 + i, 0)));
2968
}
2969
break;
2970
2971
case nir_intrinsic_load_input:
2972
case nir_intrinsic_load_per_vertex_input: {
2973
assert(nir_dest_bit_size(instr->dest) == 32);
2974
fs_reg indirect_offset = get_indirect_offset(instr);
2975
unsigned imm_offset = instr->const_index[0];
2976
unsigned first_component = nir_intrinsic_component(instr);
2977
2978
fs_inst *inst;
2979
if (indirect_offset.file == BAD_FILE) {
2980
/* Arbitrarily only push up to 32 vec4 slots worth of data,
2981
* which is 16 registers (since each holds 2 vec4 slots).
2982
*/
2983
const unsigned max_push_slots = 32;
2984
if (imm_offset < max_push_slots) {
2985
fs_reg src = fs_reg(ATTR, imm_offset / 2, dest.type);
2986
for (int i = 0; i < instr->num_components; i++) {
2987
unsigned comp = 4 * (imm_offset % 2) + i + first_component;
2988
bld.MOV(offset(dest, bld, i), component(src, comp));
2989
}
2990
2991
tes_prog_data->base.urb_read_length =
2992
MAX2(tes_prog_data->base.urb_read_length,
2993
(imm_offset / 2) + 1);
2994
} else {
2995
/* Replicate the patch handle to all enabled channels */
2996
const fs_reg srcs[] = {
2997
retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)
2998
};
2999
fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
3000
bld.LOAD_PAYLOAD(patch_handle, srcs, ARRAY_SIZE(srcs), 0);
3001
3002
if (first_component != 0) {
3003
unsigned read_components =
3004
instr->num_components + first_component;
3005
fs_reg tmp = bld.vgrf(dest.type, read_components);
3006
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
3007
patch_handle);
3008
inst->size_written = read_components * REG_SIZE;
3009
for (unsigned i = 0; i < instr->num_components; i++) {
3010
bld.MOV(offset(dest, bld, i),
3011
offset(tmp, bld, i + first_component));
3012
}
3013
} else {
3014
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dest,
3015
patch_handle);
3016
inst->size_written = instr->num_components * REG_SIZE;
3017
}
3018
inst->mlen = 1;
3019
inst->offset = imm_offset;
3020
}
3021
} else {
3022
/* Indirect indexing - use per-slot offsets as well. */
3023
3024
/* We can only read two double components with each URB read, so
3025
* we send two read messages in that case, each one loading up to
3026
* two double components.
3027
*/
3028
unsigned num_components = instr->num_components;
3029
const fs_reg srcs[] = {
3030
retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
3031
indirect_offset
3032
};
3033
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
3034
bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
3035
3036
if (first_component != 0) {
3037
unsigned read_components =
3038
num_components + first_component;
3039
fs_reg tmp = bld.vgrf(dest.type, read_components);
3040
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
3041
payload);
3042
for (unsigned i = 0; i < num_components; i++) {
3043
bld.MOV(offset(dest, bld, i),
3044
offset(tmp, bld, i + first_component));
3045
}
3046
} else {
3047
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dest,
3048
payload);
3049
}
3050
inst->mlen = 2;
3051
inst->offset = imm_offset;
3052
inst->size_written = (num_components + first_component) *
3053
inst->dst.component_size(inst->exec_size);
3054
}
3055
break;
3056
}
3057
default:
3058
nir_emit_intrinsic(bld, instr);
3059
break;
3060
}
3061
}
3062
3063
void
3064
fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
3065
nir_intrinsic_instr *instr)
3066
{
3067
assert(stage == MESA_SHADER_GEOMETRY);
3068
fs_reg indirect_offset;
3069
3070
fs_reg dest;
3071
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3072
dest = get_nir_dest(instr->dest);
3073
3074
switch (instr->intrinsic) {
3075
case nir_intrinsic_load_primitive_id:
3076
assert(stage == MESA_SHADER_GEOMETRY);
3077
assert(brw_gs_prog_data(prog_data)->include_primitive_id);
3078
bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
3079
retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
3080
break;
3081
3082
case nir_intrinsic_load_input:
3083
unreachable("load_input intrinsics are invalid for the GS stage");
3084
3085
case nir_intrinsic_load_per_vertex_input:
3086
emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
3087
instr->src[1], instr->num_components,
3088
nir_intrinsic_component(instr));
3089
break;
3090
3091
case nir_intrinsic_emit_vertex_with_counter:
3092
emit_gs_vertex(instr->src[0], instr->const_index[0]);
3093
break;
3094
3095
case nir_intrinsic_end_primitive_with_counter:
3096
emit_gs_end_primitive(instr->src[0]);
3097
break;
3098
3099
case nir_intrinsic_set_vertex_and_primitive_count:
3100
bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
3101
break;
3102
3103
case nir_intrinsic_load_invocation_id: {
3104
fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
3105
assert(val.file != BAD_FILE);
3106
dest.type = val.type;
3107
bld.MOV(dest, val);
3108
break;
3109
}
3110
3111
default:
3112
nir_emit_intrinsic(bld, instr);
3113
break;
3114
}
3115
}
3116
3117
/**
3118
* Fetch the current render target layer index.
3119
*/
3120
static fs_reg
3121
fetch_render_target_array_index(const fs_builder &bld)
3122
{
3123
if (bld.shader->devinfo->ver >= 12) {
3124
/* The render target array index is provided in the thread payload as
3125
* bits 26:16 of r1.1.
3126
*/
3127
const fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_UD);
3128
bld.AND(idx, brw_uw1_reg(BRW_GENERAL_REGISTER_FILE, 1, 3),
3129
brw_imm_uw(0x7ff));
3130
return idx;
3131
} else if (bld.shader->devinfo->ver >= 6) {
3132
/* The render target array index is provided in the thread payload as
3133
* bits 26:16 of r0.0.
3134
*/
3135
const fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_UD);
3136
bld.AND(idx, brw_uw1_reg(BRW_GENERAL_REGISTER_FILE, 0, 1),
3137
brw_imm_uw(0x7ff));
3138
return idx;
3139
} else {
3140
/* Pre-SNB we only ever render into the first layer of the framebuffer
3141
* since layered rendering is not implemented.
3142
*/
3143
return brw_imm_ud(0);
3144
}
3145
}
3146
3147
/**
3148
* Fake non-coherent framebuffer read implemented using TXF to fetch from the
3149
* framebuffer at the current fragment coordinates and sample index.
3150
*/
3151
fs_inst *
3152
fs_visitor::emit_non_coherent_fb_read(const fs_builder &bld, const fs_reg &dst,
3153
unsigned target)
3154
{
3155
const struct intel_device_info *devinfo = bld.shader->devinfo;
3156
3157
assert(bld.shader->stage == MESA_SHADER_FRAGMENT);
3158
const brw_wm_prog_key *wm_key =
3159
reinterpret_cast<const brw_wm_prog_key *>(key);
3160
assert(!wm_key->coherent_fb_fetch);
3161
const struct brw_wm_prog_data *wm_prog_data =
3162
brw_wm_prog_data(stage_prog_data);
3163
3164
/* Calculate the surface index relative to the start of the texture binding
3165
* table block, since that's what the texturing messages expect.
3166
*/
3167
const unsigned surface = target +
3168
wm_prog_data->binding_table.render_target_read_start -
3169
wm_prog_data->base.binding_table.texture_start;
3170
3171
/* Calculate the fragment coordinates. */
3172
const fs_reg coords = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
3173
bld.MOV(offset(coords, bld, 0), pixel_x);
3174
bld.MOV(offset(coords, bld, 1), pixel_y);
3175
bld.MOV(offset(coords, bld, 2), fetch_render_target_array_index(bld));
3176
3177
/* Calculate the sample index and MCS payload when multisampling. Luckily
3178
* the MCS fetch message behaves deterministically for UMS surfaces, so it
3179
* shouldn't be necessary to recompile based on whether the framebuffer is
3180
* CMS or UMS.
3181
*/
3182
if (wm_key->multisample_fbo &&
3183
nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
3184
nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
3185
3186
const fs_reg sample = nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
3187
const fs_reg mcs = wm_key->multisample_fbo ?
3188
emit_mcs_fetch(coords, 3, brw_imm_ud(surface), fs_reg()) : fs_reg();
3189
3190
/* Use either a normal or a CMS texel fetch message depending on whether
3191
* the framebuffer is single or multisample. On SKL+ use the wide CMS
3192
* message just in case the framebuffer uses 16x multisampling, it should
3193
* be equivalent to the normal CMS fetch for lower multisampling modes.
3194
*/
3195
const opcode op = !wm_key->multisample_fbo ? SHADER_OPCODE_TXF_LOGICAL :
3196
devinfo->ver >= 9 ? SHADER_OPCODE_TXF_CMS_W_LOGICAL :
3197
SHADER_OPCODE_TXF_CMS_LOGICAL;
3198
3199
/* Emit the instruction. */
3200
fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
3201
srcs[TEX_LOGICAL_SRC_COORDINATE] = coords;
3202
srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_ud(0);
3203
srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = sample;
3204
srcs[TEX_LOGICAL_SRC_MCS] = mcs;
3205
srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(surface);
3206
srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(0);
3207
srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_ud(3);
3208
srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_ud(0);
3209
3210
fs_inst *inst = bld.emit(op, dst, srcs, ARRAY_SIZE(srcs));
3211
inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
3212
3213
return inst;
3214
}
3215
3216
/**
3217
* Actual coherent framebuffer read implemented using the native render target
3218
* read message. Requires SKL+.
3219
*/
3220
static fs_inst *
3221
emit_coherent_fb_read(const fs_builder &bld, const fs_reg &dst, unsigned target)
3222
{
3223
assert(bld.shader->devinfo->ver >= 9);
3224
fs_inst *inst = bld.emit(FS_OPCODE_FB_READ_LOGICAL, dst);
3225
inst->target = target;
3226
inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
3227
3228
return inst;
3229
}
3230
3231
static fs_reg
3232
alloc_temporary(const fs_builder &bld, unsigned size, fs_reg *regs, unsigned n)
3233
{
3234
if (n && regs[0].file != BAD_FILE) {
3235
return regs[0];
3236
3237
} else {
3238
const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, size);
3239
3240
for (unsigned i = 0; i < n; i++)
3241
regs[i] = tmp;
3242
3243
return tmp;
3244
}
3245
}
3246
3247
static fs_reg
3248
alloc_frag_output(fs_visitor *v, unsigned location)
3249
{
3250
assert(v->stage == MESA_SHADER_FRAGMENT);
3251
const brw_wm_prog_key *const key =
3252
reinterpret_cast<const brw_wm_prog_key *>(v->key);
3253
const unsigned l = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_LOCATION);
3254
const unsigned i = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_INDEX);
3255
3256
if (i > 0 || (key->force_dual_color_blend && l == FRAG_RESULT_DATA1))
3257
return alloc_temporary(v->bld, 4, &v->dual_src_output, 1);
3258
3259
else if (l == FRAG_RESULT_COLOR)
3260
return alloc_temporary(v->bld, 4, v->outputs,
3261
MAX2(key->nr_color_regions, 1));
3262
3263
else if (l == FRAG_RESULT_DEPTH)
3264
return alloc_temporary(v->bld, 1, &v->frag_depth, 1);
3265
3266
else if (l == FRAG_RESULT_STENCIL)
3267
return alloc_temporary(v->bld, 1, &v->frag_stencil, 1);
3268
3269
else if (l == FRAG_RESULT_SAMPLE_MASK)
3270
return alloc_temporary(v->bld, 1, &v->sample_mask, 1);
3271
3272
else if (l >= FRAG_RESULT_DATA0 &&
3273
l < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS)
3274
return alloc_temporary(v->bld, 4,
3275
&v->outputs[l - FRAG_RESULT_DATA0], 1);
3276
3277
else
3278
unreachable("Invalid location");
3279
}
3280
3281
void
3282
fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
3283
nir_intrinsic_instr *instr)
3284
{
3285
assert(stage == MESA_SHADER_FRAGMENT);
3286
3287
fs_reg dest;
3288
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3289
dest = get_nir_dest(instr->dest);
3290
3291
switch (instr->intrinsic) {
3292
case nir_intrinsic_load_front_face:
3293
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
3294
*emit_frontfacing_interpolation());
3295
break;
3296
3297
case nir_intrinsic_load_sample_pos: {
3298
fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
3299
assert(sample_pos.file != BAD_FILE);
3300
dest.type = sample_pos.type;
3301
bld.MOV(dest, sample_pos);
3302
bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
3303
break;
3304
}
3305
3306
case nir_intrinsic_load_layer_id:
3307
dest.type = BRW_REGISTER_TYPE_UD;
3308
bld.MOV(dest, fetch_render_target_array_index(bld));
3309
break;
3310
3311
case nir_intrinsic_is_helper_invocation: {
3312
/* Unlike the regular gl_HelperInvocation, that is defined at dispatch,
3313
* the helperInvocationEXT() (aka SpvOpIsHelperInvocationEXT) takes into
3314
* consideration demoted invocations. That information is stored in
3315
* f0.1.
3316
*/
3317
dest.type = BRW_REGISTER_TYPE_UD;
3318
3319
bld.MOV(dest, brw_imm_ud(0));
3320
3321
fs_inst *mov = bld.MOV(dest, brw_imm_ud(~0));
3322
mov->predicate = BRW_PREDICATE_NORMAL;
3323
mov->predicate_inverse = true;
3324
mov->flag_subreg = sample_mask_flag_subreg(this);
3325
break;
3326
}
3327
3328
case nir_intrinsic_load_helper_invocation:
3329
case nir_intrinsic_load_sample_mask_in:
3330
case nir_intrinsic_load_sample_id:
3331
case nir_intrinsic_load_frag_shading_rate: {
3332
gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
3333
fs_reg val = nir_system_values[sv];
3334
assert(val.file != BAD_FILE);
3335
dest.type = val.type;
3336
bld.MOV(dest, val);
3337
break;
3338
}
3339
3340
case nir_intrinsic_store_output: {
3341
const fs_reg src = get_nir_src(instr->src[0]);
3342
const unsigned store_offset = nir_src_as_uint(instr->src[1]);
3343
const unsigned location = nir_intrinsic_base(instr) +
3344
SET_FIELD(store_offset, BRW_NIR_FRAG_OUTPUT_LOCATION);
3345
const fs_reg new_dest = retype(alloc_frag_output(this, location),
3346
src.type);
3347
3348
for (unsigned j = 0; j < instr->num_components; j++)
3349
bld.MOV(offset(new_dest, bld, nir_intrinsic_component(instr) + j),
3350
offset(src, bld, j));
3351
3352
break;
3353
}
3354
3355
case nir_intrinsic_load_output: {
3356
const unsigned l = GET_FIELD(nir_intrinsic_base(instr),
3357
BRW_NIR_FRAG_OUTPUT_LOCATION);
3358
assert(l >= FRAG_RESULT_DATA0);
3359
const unsigned load_offset = nir_src_as_uint(instr->src[0]);
3360
const unsigned target = l - FRAG_RESULT_DATA0 + load_offset;
3361
const fs_reg tmp = bld.vgrf(dest.type, 4);
3362
3363
if (reinterpret_cast<const brw_wm_prog_key *>(key)->coherent_fb_fetch)
3364
emit_coherent_fb_read(bld, tmp, target);
3365
else
3366
emit_non_coherent_fb_read(bld, tmp, target);
3367
3368
for (unsigned j = 0; j < instr->num_components; j++) {
3369
bld.MOV(offset(dest, bld, j),
3370
offset(tmp, bld, nir_intrinsic_component(instr) + j));
3371
}
3372
3373
break;
3374
}
3375
3376
case nir_intrinsic_demote:
3377
case nir_intrinsic_discard:
3378
case nir_intrinsic_terminate:
3379
case nir_intrinsic_demote_if:
3380
case nir_intrinsic_discard_if:
3381
case nir_intrinsic_terminate_if: {
3382
/* We track our discarded pixels in f0.1/f1.0. By predicating on it, we
3383
* can update just the flag bits that aren't yet discarded. If there's
3384
* no condition, we emit a CMP of g0 != g0, so all currently executing
3385
* channels will get turned off.
3386
*/
3387
fs_inst *cmp = NULL;
3388
if (instr->intrinsic == nir_intrinsic_demote_if ||
3389
instr->intrinsic == nir_intrinsic_discard_if ||
3390
instr->intrinsic == nir_intrinsic_terminate_if) {
3391
nir_alu_instr *alu = nir_src_as_alu_instr(instr->src[0]);
3392
3393
if (alu != NULL &&
3394
alu->op != nir_op_bcsel &&
3395
(devinfo->ver > 5 ||
3396
(alu->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) != BRW_NIR_BOOLEAN_NEEDS_RESOLVE ||
3397
alu->op == nir_op_fneu32 || alu->op == nir_op_feq32 ||
3398
alu->op == nir_op_flt32 || alu->op == nir_op_fge32 ||
3399
alu->op == nir_op_ine32 || alu->op == nir_op_ieq32 ||
3400
alu->op == nir_op_ilt32 || alu->op == nir_op_ige32 ||
3401
alu->op == nir_op_ult32 || alu->op == nir_op_uge32)) {
3402
/* Re-emit the instruction that generated the Boolean value, but
3403
* do not store it. Since this instruction will be conditional,
3404
* other instructions that want to use the real Boolean value may
3405
* get garbage. This was a problem for piglit's fs-discard-exit-2
3406
* test.
3407
*
3408
* Ideally we'd detect that the instruction cannot have a
3409
* conditional modifier before emitting the instructions. Alas,
3410
* that is nigh impossible. Instead, we're going to assume the
3411
* instruction (or last instruction) generated can have a
3412
* conditional modifier. If it cannot, fallback to the old-style
3413
* compare, and hope dead code elimination will clean up the
3414
* extra instructions generated.
3415
*/
3416
nir_emit_alu(bld, alu, false);
3417
3418
cmp = (fs_inst *) instructions.get_tail();
3419
if (cmp->conditional_mod == BRW_CONDITIONAL_NONE) {
3420
if (cmp->can_do_cmod())
3421
cmp->conditional_mod = BRW_CONDITIONAL_Z;
3422
else
3423
cmp = NULL;
3424
} else {
3425
/* The old sequence that would have been generated is,
3426
* basically, bool_result == false. This is equivalent to
3427
* !bool_result, so negate the old modifier.
3428
*/
3429
cmp->conditional_mod = brw_negate_cmod(cmp->conditional_mod);
3430
}
3431
}
3432
3433
if (cmp == NULL) {
3434
cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
3435
brw_imm_d(0), BRW_CONDITIONAL_Z);
3436
}
3437
} else {
3438
fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
3439
BRW_REGISTER_TYPE_UW));
3440
cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
3441
}
3442
3443
cmp->predicate = BRW_PREDICATE_NORMAL;
3444
cmp->flag_subreg = sample_mask_flag_subreg(this);
3445
3446
fs_inst *jump = bld.emit(BRW_OPCODE_HALT);
3447
jump->flag_subreg = sample_mask_flag_subreg(this);
3448
jump->predicate_inverse = true;
3449
3450
if (instr->intrinsic == nir_intrinsic_terminate ||
3451
instr->intrinsic == nir_intrinsic_terminate_if) {
3452
jump->predicate = BRW_PREDICATE_NORMAL;
3453
} else {
3454
/* Only jump when the whole quad is demoted. For historical
3455
* reasons this is also used for discard.
3456
*/
3457
jump->predicate = BRW_PREDICATE_ALIGN1_ANY4H;
3458
}
3459
3460
if (devinfo->ver < 7)
3461
limit_dispatch_width(
3462
16, "Fragment discard/demote not implemented in SIMD32 mode.\n");
3463
break;
3464
}
3465
3466
case nir_intrinsic_load_input: {
3467
/* load_input is only used for flat inputs */
3468
assert(nir_dest_bit_size(instr->dest) == 32);
3469
unsigned base = nir_intrinsic_base(instr);
3470
unsigned comp = nir_intrinsic_component(instr);
3471
unsigned num_components = instr->num_components;
3472
3473
/* Special case fields in the VUE header */
3474
if (base == VARYING_SLOT_LAYER)
3475
comp = 1;
3476
else if (base == VARYING_SLOT_VIEWPORT)
3477
comp = 2;
3478
3479
for (unsigned int i = 0; i < num_components; i++) {
3480
bld.MOV(offset(dest, bld, i),
3481
retype(component(interp_reg(base, comp + i), 3), dest.type));
3482
}
3483
break;
3484
}
3485
3486
case nir_intrinsic_load_fs_input_interp_deltas: {
3487
assert(stage == MESA_SHADER_FRAGMENT);
3488
assert(nir_src_as_uint(instr->src[0]) == 0);
3489
fs_reg interp = interp_reg(nir_intrinsic_base(instr),
3490
nir_intrinsic_component(instr));
3491
dest.type = BRW_REGISTER_TYPE_F;
3492
bld.MOV(offset(dest, bld, 0), component(interp, 3));
3493
bld.MOV(offset(dest, bld, 1), component(interp, 1));
3494
bld.MOV(offset(dest, bld, 2), component(interp, 0));
3495
break;
3496
}
3497
3498
case nir_intrinsic_load_barycentric_pixel:
3499
case nir_intrinsic_load_barycentric_centroid:
3500
case nir_intrinsic_load_barycentric_sample: {
3501
/* Use the delta_xy values computed from the payload */
3502
const glsl_interp_mode interp_mode =
3503
(enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
3504
enum brw_barycentric_mode bary =
3505
brw_barycentric_mode(interp_mode, instr->intrinsic);
3506
const fs_reg srcs[] = { offset(this->delta_xy[bary], bld, 0),
3507
offset(this->delta_xy[bary], bld, 1) };
3508
bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
3509
break;
3510
}
3511
3512
case nir_intrinsic_load_barycentric_at_sample: {
3513
const glsl_interp_mode interpolation =
3514
(enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
3515
3516
if (nir_src_is_const(instr->src[0])) {
3517
unsigned msg_data = nir_src_as_uint(instr->src[0]) << 4;
3518
3519
emit_pixel_interpolater_send(bld,
3520
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3521
dest,
3522
fs_reg(), /* src */
3523
brw_imm_ud(msg_data),
3524
interpolation);
3525
} else {
3526
const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
3527
BRW_REGISTER_TYPE_UD);
3528
3529
if (nir_src_is_dynamically_uniform(instr->src[0])) {
3530
const fs_reg sample_id = bld.emit_uniformize(sample_src);
3531
const fs_reg msg_data = vgrf(glsl_type::uint_type);
3532
bld.exec_all().group(1, 0)
3533
.SHL(msg_data, sample_id, brw_imm_ud(4u));
3534
emit_pixel_interpolater_send(bld,
3535
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3536
dest,
3537
fs_reg(), /* src */
3538
component(msg_data, 0),
3539
interpolation);
3540
} else {
3541
/* Make a loop that sends a message to the pixel interpolater
3542
* for the sample number in each live channel. If there are
3543
* multiple channels with the same sample number then these
3544
* will be handled simultaneously with a single interation of
3545
* the loop.
3546
*/
3547
bld.emit(BRW_OPCODE_DO);
3548
3549
/* Get the next live sample number into sample_id_reg */
3550
const fs_reg sample_id = bld.emit_uniformize(sample_src);
3551
3552
/* Set the flag register so that we can perform the send
3553
* message on all channels that have the same sample number
3554
*/
3555
bld.CMP(bld.null_reg_ud(),
3556
sample_src, sample_id,
3557
BRW_CONDITIONAL_EQ);
3558
const fs_reg msg_data = vgrf(glsl_type::uint_type);
3559
bld.exec_all().group(1, 0)
3560
.SHL(msg_data, sample_id, brw_imm_ud(4u));
3561
fs_inst *inst =
3562
emit_pixel_interpolater_send(bld,
3563
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3564
dest,
3565
fs_reg(), /* src */
3566
component(msg_data, 0),
3567
interpolation);
3568
set_predicate(BRW_PREDICATE_NORMAL, inst);
3569
3570
/* Continue the loop if there are any live channels left */
3571
set_predicate_inv(BRW_PREDICATE_NORMAL,
3572
true, /* inverse */
3573
bld.emit(BRW_OPCODE_WHILE));
3574
}
3575
}
3576
break;
3577
}
3578
3579
case nir_intrinsic_load_barycentric_at_offset: {
3580
const glsl_interp_mode interpolation =
3581
(enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
3582
3583
nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3584
3585
if (const_offset) {
3586
assert(nir_src_bit_size(instr->src[0]) == 32);
3587
unsigned off_x = const_offset[0].u32 & 0xf;
3588
unsigned off_y = const_offset[1].u32 & 0xf;
3589
3590
emit_pixel_interpolater_send(bld,
3591
FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
3592
dest,
3593
fs_reg(), /* src */
3594
brw_imm_ud(off_x | (off_y << 4)),
3595
interpolation);
3596
} else {
3597
fs_reg src = retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_D);
3598
const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
3599
emit_pixel_interpolater_send(bld,
3600
opcode,
3601
dest,
3602
src,
3603
brw_imm_ud(0u),
3604
interpolation);
3605
}
3606
break;
3607
}
3608
3609
case nir_intrinsic_load_frag_coord:
3610
emit_fragcoord_interpolation(dest);
3611
break;
3612
3613
case nir_intrinsic_load_interpolated_input: {
3614
assert(instr->src[0].ssa &&
3615
instr->src[0].ssa->parent_instr->type == nir_instr_type_intrinsic);
3616
nir_intrinsic_instr *bary_intrinsic =
3617
nir_instr_as_intrinsic(instr->src[0].ssa->parent_instr);
3618
nir_intrinsic_op bary_intrin = bary_intrinsic->intrinsic;
3619
enum glsl_interp_mode interp_mode =
3620
(enum glsl_interp_mode) nir_intrinsic_interp_mode(bary_intrinsic);
3621
fs_reg dst_xy;
3622
3623
if (bary_intrin == nir_intrinsic_load_barycentric_at_offset ||
3624
bary_intrin == nir_intrinsic_load_barycentric_at_sample) {
3625
/* Use the result of the PI message. */
3626
dst_xy = retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_F);
3627
} else {
3628
/* Use the delta_xy values computed from the payload */
3629
enum brw_barycentric_mode bary =
3630
brw_barycentric_mode(interp_mode, bary_intrin);
3631
dst_xy = this->delta_xy[bary];
3632
}
3633
3634
for (unsigned int i = 0; i < instr->num_components; i++) {
3635
fs_reg interp =
3636
component(interp_reg(nir_intrinsic_base(instr),
3637
nir_intrinsic_component(instr) + i), 0);
3638
interp.type = BRW_REGISTER_TYPE_F;
3639
dest.type = BRW_REGISTER_TYPE_F;
3640
3641
if (devinfo->ver < 6 && interp_mode == INTERP_MODE_SMOOTH) {
3642
fs_reg tmp = vgrf(glsl_type::float_type);
3643
bld.emit(FS_OPCODE_LINTERP, tmp, dst_xy, interp);
3644
bld.MUL(offset(dest, bld, i), tmp, this->pixel_w);
3645
} else {
3646
bld.emit(FS_OPCODE_LINTERP, offset(dest, bld, i), dst_xy, interp);
3647
}
3648
}
3649
break;
3650
}
3651
3652
default:
3653
nir_emit_intrinsic(bld, instr);
3654
break;
3655
}
3656
}
3657
3658
void
3659
fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
3660
nir_intrinsic_instr *instr)
3661
{
3662
assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL);
3663
struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data);
3664
3665
fs_reg dest;
3666
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3667
dest = get_nir_dest(instr->dest);
3668
3669
switch (instr->intrinsic) {
3670
case nir_intrinsic_control_barrier:
3671
/* The whole workgroup fits in a single HW thread, so all the
3672
* invocations are already executed lock-step. Instead of an actual
3673
* barrier just emit a scheduling fence, that will generate no code.
3674
*/
3675
if (!nir->info.workgroup_size_variable &&
3676
workgroup_size() <= dispatch_width) {
3677
bld.exec_all().group(1, 0).emit(FS_OPCODE_SCHEDULING_FENCE);
3678
break;
3679
}
3680
3681
emit_barrier();
3682
cs_prog_data->uses_barrier = true;
3683
break;
3684
3685
case nir_intrinsic_load_subgroup_id:
3686
if (devinfo->verx10 >= 125)
3687
bld.AND(retype(dest, BRW_REGISTER_TYPE_UD),
3688
retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
3689
brw_imm_ud(INTEL_MASK(7, 0)));
3690
else
3691
bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), subgroup_id);
3692
break;
3693
3694
case nir_intrinsic_load_local_invocation_id:
3695
case nir_intrinsic_load_workgroup_id: {
3696
gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
3697
fs_reg val = nir_system_values[sv];
3698
assert(val.file != BAD_FILE);
3699
dest.type = val.type;
3700
for (unsigned i = 0; i < 3; i++)
3701
bld.MOV(offset(dest, bld, i), offset(val, bld, i));
3702
break;
3703
}
3704
3705
case nir_intrinsic_load_num_workgroups: {
3706
assert(nir_dest_bit_size(instr->dest) == 32);
3707
const unsigned surface =
3708
cs_prog_data->binding_table.work_groups_start;
3709
3710
cs_prog_data->uses_num_work_groups = true;
3711
3712
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
3713
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(surface);
3714
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
3715
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(3); /* num components */
3716
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = brw_imm_ud(0);
3717
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(0);
3718
fs_inst *inst =
3719
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
3720
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
3721
inst->size_written = 3 * dispatch_width * 4;
3722
break;
3723
}
3724
3725
case nir_intrinsic_shared_atomic_add:
3726
case nir_intrinsic_shared_atomic_imin:
3727
case nir_intrinsic_shared_atomic_umin:
3728
case nir_intrinsic_shared_atomic_imax:
3729
case nir_intrinsic_shared_atomic_umax:
3730
case nir_intrinsic_shared_atomic_and:
3731
case nir_intrinsic_shared_atomic_or:
3732
case nir_intrinsic_shared_atomic_xor:
3733
case nir_intrinsic_shared_atomic_exchange:
3734
case nir_intrinsic_shared_atomic_comp_swap:
3735
nir_emit_shared_atomic(bld, brw_aop_for_nir_intrinsic(instr), instr);
3736
break;
3737
case nir_intrinsic_shared_atomic_fmin:
3738
case nir_intrinsic_shared_atomic_fmax:
3739
case nir_intrinsic_shared_atomic_fcomp_swap:
3740
nir_emit_shared_atomic_float(bld, brw_aop_for_nir_intrinsic(instr), instr);
3741
break;
3742
3743
case nir_intrinsic_load_shared: {
3744
assert(devinfo->ver >= 7);
3745
assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL);
3746
3747
const unsigned bit_size = nir_dest_bit_size(instr->dest);
3748
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
3749
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GFX7_BTI_SLM);
3750
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[0]);
3751
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
3752
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(0);
3753
3754
/* Make dest unsigned because that's what the temporary will be */
3755
dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
3756
3757
/* Read the vector */
3758
assert(nir_dest_bit_size(instr->dest) <= 32);
3759
assert(nir_intrinsic_align(instr) > 0);
3760
if (nir_dest_bit_size(instr->dest) == 32 &&
3761
nir_intrinsic_align(instr) >= 4) {
3762
assert(nir_dest_num_components(instr->dest) <= 4);
3763
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
3764
fs_inst *inst =
3765
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
3766
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
3767
inst->size_written = instr->num_components * dispatch_width * 4;
3768
} else {
3769
assert(nir_dest_num_components(instr->dest) == 1);
3770
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
3771
3772
fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
3773
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
3774
read_result, srcs, SURFACE_LOGICAL_NUM_SRCS);
3775
bld.MOV(dest, subscript(read_result, dest.type, 0));
3776
}
3777
break;
3778
}
3779
3780
case nir_intrinsic_store_shared: {
3781
assert(devinfo->ver >= 7);
3782
assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL);
3783
3784
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
3785
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
3786
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GFX7_BTI_SLM);
3787
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
3788
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
3789
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1);
3790
3791
fs_reg data = get_nir_src(instr->src[0]);
3792
data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
3793
3794
assert(nir_src_bit_size(instr->src[0]) <= 32);
3795
assert(nir_intrinsic_write_mask(instr) ==
3796
(1u << instr->num_components) - 1);
3797
assert(nir_intrinsic_align(instr) > 0);
3798
if (nir_src_bit_size(instr->src[0]) == 32 &&
3799
nir_intrinsic_align(instr) >= 4) {
3800
assert(nir_src_num_components(instr->src[0]) <= 4);
3801
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
3802
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
3803
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
3804
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
3805
} else {
3806
assert(nir_src_num_components(instr->src[0]) == 1);
3807
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
3808
3809
srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD);
3810
bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data);
3811
3812
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
3813
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
3814
}
3815
break;
3816
}
3817
3818
case nir_intrinsic_load_workgroup_size: {
3819
assert(compiler->lower_variable_group_size);
3820
assert(nir->info.workgroup_size_variable);
3821
for (unsigned i = 0; i < 3; i++) {
3822
bld.MOV(retype(offset(dest, bld, i), BRW_REGISTER_TYPE_UD),
3823
group_size[i]);
3824
}
3825
break;
3826
}
3827
3828
default:
3829
nir_emit_intrinsic(bld, instr);
3830
break;
3831
}
3832
}
3833
3834
void
3835
fs_visitor::nir_emit_bs_intrinsic(const fs_builder &bld,
3836
nir_intrinsic_instr *instr)
3837
{
3838
assert(brw_shader_stage_is_bindless(stage));
3839
3840
fs_reg dest;
3841
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3842
dest = get_nir_dest(instr->dest);
3843
3844
switch (instr->intrinsic) {
3845
case nir_intrinsic_load_btd_global_arg_addr_intel:
3846
bld.MOV(dest, retype(brw_vec1_grf(2, 0), dest.type));
3847
break;
3848
3849
case nir_intrinsic_load_btd_local_arg_addr_intel:
3850
bld.MOV(dest, retype(brw_vec1_grf(2, 2), dest.type));
3851
break;
3852
3853
case nir_intrinsic_trace_ray_initial_intel:
3854
bld.emit(RT_OPCODE_TRACE_RAY_LOGICAL,
3855
bld.null_reg_ud(),
3856
brw_imm_ud(BRW_RT_BVH_LEVEL_WORLD),
3857
brw_imm_ud(GEN_RT_TRACE_RAY_INITAL));
3858
break;
3859
3860
case nir_intrinsic_trace_ray_commit_intel:
3861
bld.emit(RT_OPCODE_TRACE_RAY_LOGICAL,
3862
bld.null_reg_ud(),
3863
brw_imm_ud(BRW_RT_BVH_LEVEL_OBJECT),
3864
brw_imm_ud(GEN_RT_TRACE_RAY_COMMIT));
3865
break;
3866
3867
case nir_intrinsic_trace_ray_continue_intel:
3868
bld.emit(RT_OPCODE_TRACE_RAY_LOGICAL,
3869
bld.null_reg_ud(),
3870
brw_imm_ud(BRW_RT_BVH_LEVEL_OBJECT),
3871
brw_imm_ud(GEN_RT_TRACE_RAY_CONTINUE));
3872
break;
3873
3874
default:
3875
nir_emit_intrinsic(bld, instr);
3876
break;
3877
}
3878
}
3879
3880
static fs_reg
3881
brw_nir_reduction_op_identity(const fs_builder &bld,
3882
nir_op op, brw_reg_type type)
3883
{
3884
nir_const_value value = nir_alu_binop_identity(op, type_sz(type) * 8);
3885
switch (type_sz(type)) {
3886
case 1:
3887
if (type == BRW_REGISTER_TYPE_UB) {
3888
return brw_imm_uw(value.u8);
3889
} else {
3890
assert(type == BRW_REGISTER_TYPE_B);
3891
return brw_imm_w(value.i8);
3892
}
3893
case 2:
3894
return retype(brw_imm_uw(value.u16), type);
3895
case 4:
3896
return retype(brw_imm_ud(value.u32), type);
3897
case 8:
3898
if (type == BRW_REGISTER_TYPE_DF)
3899
return setup_imm_df(bld, value.f64);
3900
else
3901
return retype(brw_imm_u64(value.u64), type);
3902
default:
3903
unreachable("Invalid type size");
3904
}
3905
}
3906
3907
static opcode
3908
brw_op_for_nir_reduction_op(nir_op op)
3909
{
3910
switch (op) {
3911
case nir_op_iadd: return BRW_OPCODE_ADD;
3912
case nir_op_fadd: return BRW_OPCODE_ADD;
3913
case nir_op_imul: return BRW_OPCODE_MUL;
3914
case nir_op_fmul: return BRW_OPCODE_MUL;
3915
case nir_op_imin: return BRW_OPCODE_SEL;
3916
case nir_op_umin: return BRW_OPCODE_SEL;
3917
case nir_op_fmin: return BRW_OPCODE_SEL;
3918
case nir_op_imax: return BRW_OPCODE_SEL;
3919
case nir_op_umax: return BRW_OPCODE_SEL;
3920
case nir_op_fmax: return BRW_OPCODE_SEL;
3921
case nir_op_iand: return BRW_OPCODE_AND;
3922
case nir_op_ior: return BRW_OPCODE_OR;
3923
case nir_op_ixor: return BRW_OPCODE_XOR;
3924
default:
3925
unreachable("Invalid reduction operation");
3926
}
3927
}
3928
3929
static brw_conditional_mod
3930
brw_cond_mod_for_nir_reduction_op(nir_op op)
3931
{
3932
switch (op) {
3933
case nir_op_iadd: return BRW_CONDITIONAL_NONE;
3934
case nir_op_fadd: return BRW_CONDITIONAL_NONE;
3935
case nir_op_imul: return BRW_CONDITIONAL_NONE;
3936
case nir_op_fmul: return BRW_CONDITIONAL_NONE;
3937
case nir_op_imin: return BRW_CONDITIONAL_L;
3938
case nir_op_umin: return BRW_CONDITIONAL_L;
3939
case nir_op_fmin: return BRW_CONDITIONAL_L;
3940
case nir_op_imax: return BRW_CONDITIONAL_GE;
3941
case nir_op_umax: return BRW_CONDITIONAL_GE;
3942
case nir_op_fmax: return BRW_CONDITIONAL_GE;
3943
case nir_op_iand: return BRW_CONDITIONAL_NONE;
3944
case nir_op_ior: return BRW_CONDITIONAL_NONE;
3945
case nir_op_ixor: return BRW_CONDITIONAL_NONE;
3946
default:
3947
unreachable("Invalid reduction operation");
3948
}
3949
}
3950
3951
fs_reg
3952
fs_visitor::get_nir_image_intrinsic_image(const brw::fs_builder &bld,
3953
nir_intrinsic_instr *instr)
3954
{
3955
fs_reg image = retype(get_nir_src_imm(instr->src[0]), BRW_REGISTER_TYPE_UD);
3956
fs_reg surf_index = image;
3957
3958
if (stage_prog_data->binding_table.image_start > 0) {
3959
if (image.file == BRW_IMMEDIATE_VALUE) {
3960
surf_index =
3961
brw_imm_ud(image.d + stage_prog_data->binding_table.image_start);
3962
} else {
3963
surf_index = vgrf(glsl_type::uint_type);
3964
bld.ADD(surf_index, image,
3965
brw_imm_d(stage_prog_data->binding_table.image_start));
3966
}
3967
}
3968
3969
return bld.emit_uniformize(surf_index);
3970
}
3971
3972
fs_reg
3973
fs_visitor::get_nir_ssbo_intrinsic_index(const brw::fs_builder &bld,
3974
nir_intrinsic_instr *instr)
3975
{
3976
/* SSBO stores are weird in that their index is in src[1] */
3977
const bool is_store =
3978
instr->intrinsic == nir_intrinsic_store_ssbo ||
3979
instr->intrinsic == nir_intrinsic_store_ssbo_block_intel;
3980
const unsigned src = is_store ? 1 : 0;
3981
3982
if (nir_src_is_const(instr->src[src])) {
3983
unsigned index = stage_prog_data->binding_table.ssbo_start +
3984
nir_src_as_uint(instr->src[src]);
3985
return brw_imm_ud(index);
3986
} else {
3987
fs_reg surf_index = vgrf(glsl_type::uint_type);
3988
bld.ADD(surf_index, get_nir_src(instr->src[src]),
3989
brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
3990
return bld.emit_uniformize(surf_index);
3991
}
3992
}
3993
3994
/**
3995
* The offsets we get from NIR act as if each SIMD channel has it's own blob
3996
* of contiguous space. However, if we actually place each SIMD channel in
3997
* it's own space, we end up with terrible cache performance because each SIMD
3998
* channel accesses a different cache line even when they're all accessing the
3999
* same byte offset. To deal with this problem, we swizzle the address using
4000
* a simple algorithm which ensures that any time a SIMD message reads or
4001
* writes the same address, it's all in the same cache line. We have to keep
4002
* the bottom two bits fixed so that we can read/write up to a dword at a time
4003
* and the individual element is contiguous. We do this by splitting the
4004
* address as follows:
4005
*
4006
* 31 4-6 2 0
4007
* +-------------------------------+------------+----------+
4008
* | Hi address bits | chan index | addr low |
4009
* +-------------------------------+------------+----------+
4010
*
4011
* In other words, the bottom two address bits stay, and the top 30 get
4012
* shifted up so that we can stick the SIMD channel index in the middle. This
4013
* way, we can access 8, 16, or 32-bit elements and, when accessing a 32-bit
4014
* at the same logical offset, the scratch read/write instruction acts on
4015
* continuous elements and we get good cache locality.
4016
*/
4017
fs_reg
4018
fs_visitor::swizzle_nir_scratch_addr(const brw::fs_builder &bld,
4019
const fs_reg &nir_addr,
4020
bool in_dwords)
4021
{
4022
const fs_reg &chan_index =
4023
nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION];
4024
const unsigned chan_index_bits = ffs(dispatch_width) - 1;
4025
4026
fs_reg addr = bld.vgrf(BRW_REGISTER_TYPE_UD);
4027
if (in_dwords) {
4028
/* In this case, we know the address is aligned to a DWORD and we want
4029
* the final address in DWORDs.
4030
*/
4031
bld.SHL(addr, nir_addr, brw_imm_ud(chan_index_bits - 2));
4032
bld.OR(addr, addr, chan_index);
4033
} else {
4034
/* This case substantially more annoying because we have to pay
4035
* attention to those pesky two bottom bits.
4036
*/
4037
fs_reg addr_hi = bld.vgrf(BRW_REGISTER_TYPE_UD);
4038
bld.AND(addr_hi, nir_addr, brw_imm_ud(~0x3u));
4039
bld.SHL(addr_hi, addr_hi, brw_imm_ud(chan_index_bits));
4040
fs_reg chan_addr = bld.vgrf(BRW_REGISTER_TYPE_UD);
4041
bld.SHL(chan_addr, chan_index, brw_imm_ud(2));
4042
bld.AND(addr, nir_addr, brw_imm_ud(0x3u));
4043
bld.OR(addr, addr, addr_hi);
4044
bld.OR(addr, addr, chan_addr);
4045
}
4046
return addr;
4047
}
4048
4049
static unsigned
4050
choose_oword_block_size_dwords(unsigned dwords)
4051
{
4052
unsigned block;
4053
if (dwords >= 32) {
4054
block = 32;
4055
} else if (dwords >= 16) {
4056
block = 16;
4057
} else {
4058
block = 8;
4059
}
4060
assert(block <= dwords);
4061
return block;
4062
}
4063
4064
static void
4065
increment_a64_address(const fs_builder &bld, fs_reg address, uint32_t v)
4066
{
4067
if (bld.shader->devinfo->has_64bit_int) {
4068
bld.ADD(address, address, brw_imm_ud(v));
4069
} else {
4070
fs_reg low = retype(address, BRW_REGISTER_TYPE_UD);
4071
fs_reg high = offset(low, bld, 1);
4072
4073
/* Add low and if that overflows, add carry to high. */
4074
bld.ADD(low, low, brw_imm_ud(v))->conditional_mod = BRW_CONDITIONAL_O;
4075
bld.ADD(high, high, brw_imm_ud(0x1))->predicate = BRW_PREDICATE_NORMAL;
4076
}
4077
}
4078
4079
void
4080
fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
4081
{
4082
fs_reg dest;
4083
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4084
dest = get_nir_dest(instr->dest);
4085
4086
switch (instr->intrinsic) {
4087
case nir_intrinsic_image_load:
4088
case nir_intrinsic_image_store:
4089
case nir_intrinsic_image_atomic_add:
4090
case nir_intrinsic_image_atomic_imin:
4091
case nir_intrinsic_image_atomic_umin:
4092
case nir_intrinsic_image_atomic_imax:
4093
case nir_intrinsic_image_atomic_umax:
4094
case nir_intrinsic_image_atomic_and:
4095
case nir_intrinsic_image_atomic_or:
4096
case nir_intrinsic_image_atomic_xor:
4097
case nir_intrinsic_image_atomic_exchange:
4098
case nir_intrinsic_image_atomic_comp_swap:
4099
case nir_intrinsic_bindless_image_load:
4100
case nir_intrinsic_bindless_image_store:
4101
case nir_intrinsic_bindless_image_atomic_add:
4102
case nir_intrinsic_bindless_image_atomic_imin:
4103
case nir_intrinsic_bindless_image_atomic_umin:
4104
case nir_intrinsic_bindless_image_atomic_imax:
4105
case nir_intrinsic_bindless_image_atomic_umax:
4106
case nir_intrinsic_bindless_image_atomic_and:
4107
case nir_intrinsic_bindless_image_atomic_or:
4108
case nir_intrinsic_bindless_image_atomic_xor:
4109
case nir_intrinsic_bindless_image_atomic_exchange:
4110
case nir_intrinsic_bindless_image_atomic_comp_swap: {
4111
/* Get some metadata from the image intrinsic. */
4112
const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
4113
4114
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4115
4116
switch (instr->intrinsic) {
4117
case nir_intrinsic_image_load:
4118
case nir_intrinsic_image_store:
4119
case nir_intrinsic_image_atomic_add:
4120
case nir_intrinsic_image_atomic_imin:
4121
case nir_intrinsic_image_atomic_umin:
4122
case nir_intrinsic_image_atomic_imax:
4123
case nir_intrinsic_image_atomic_umax:
4124
case nir_intrinsic_image_atomic_and:
4125
case nir_intrinsic_image_atomic_or:
4126
case nir_intrinsic_image_atomic_xor:
4127
case nir_intrinsic_image_atomic_exchange:
4128
case nir_intrinsic_image_atomic_comp_swap:
4129
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4130
get_nir_image_intrinsic_image(bld, instr);
4131
break;
4132
4133
default:
4134
/* Bindless */
4135
srcs[SURFACE_LOGICAL_SRC_SURFACE_HANDLE] =
4136
bld.emit_uniformize(get_nir_src(instr->src[0]));
4137
break;
4138
}
4139
4140
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
4141
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] =
4142
brw_imm_ud(nir_image_intrinsic_coord_components(instr));
4143
4144
/* Emit an image load, store or atomic op. */
4145
if (instr->intrinsic == nir_intrinsic_image_load ||
4146
instr->intrinsic == nir_intrinsic_bindless_image_load) {
4147
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4148
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(0);
4149
fs_inst *inst =
4150
bld.emit(SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL,
4151
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
4152
inst->size_written = instr->num_components * dispatch_width * 4;
4153
} else if (instr->intrinsic == nir_intrinsic_image_store ||
4154
instr->intrinsic == nir_intrinsic_bindless_image_store) {
4155
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4156
srcs[SURFACE_LOGICAL_SRC_DATA] = get_nir_src(instr->src[3]);
4157
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1);
4158
bld.emit(SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL,
4159
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
4160
} else {
4161
unsigned num_srcs = info->num_srcs;
4162
int op = brw_aop_for_nir_intrinsic(instr);
4163
if (op == BRW_AOP_INC || op == BRW_AOP_DEC) {
4164
assert(num_srcs == 4);
4165
num_srcs = 3;
4166
}
4167
4168
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
4169
4170
fs_reg data;
4171
if (num_srcs >= 4)
4172
data = get_nir_src(instr->src[3]);
4173
if (num_srcs >= 5) {
4174
fs_reg tmp = bld.vgrf(data.type, 2);
4175
fs_reg sources[2] = { data, get_nir_src(instr->src[4]) };
4176
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
4177
data = tmp;
4178
}
4179
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
4180
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1);
4181
4182
bld.emit(SHADER_OPCODE_TYPED_ATOMIC_LOGICAL,
4183
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
4184
}
4185
break;
4186
}
4187
4188
case nir_intrinsic_image_size:
4189
case nir_intrinsic_bindless_image_size: {
4190
/* Unlike the [un]typed load and store opcodes, the TXS that this turns
4191
* into will handle the binding table index for us in the geneerator.
4192
* Incidentally, this means that we can handle bindless with exactly the
4193
* same code.
4194
*/
4195
fs_reg image = retype(get_nir_src_imm(instr->src[0]),
4196
BRW_REGISTER_TYPE_UD);
4197
image = bld.emit_uniformize(image);
4198
4199
assert(nir_src_as_uint(instr->src[1]) == 0);
4200
4201
fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
4202
if (instr->intrinsic == nir_intrinsic_image_size)
4203
srcs[TEX_LOGICAL_SRC_SURFACE] = image;
4204
else
4205
srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE] = image;
4206
srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_d(0);
4207
srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(0);
4208
srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(0);
4209
4210
/* Since the image size is always uniform, we can just emit a SIMD8
4211
* query instruction and splat the result out.
4212
*/
4213
const fs_builder ubld = bld.exec_all().group(8, 0);
4214
4215
fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
4216
fs_inst *inst = ubld.emit(SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
4217
tmp, srcs, ARRAY_SIZE(srcs));
4218
inst->size_written = 4 * REG_SIZE;
4219
4220
for (unsigned c = 0; c < instr->dest.ssa.num_components; ++c) {
4221
if (c == 2 && nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_CUBE) {
4222
bld.emit(SHADER_OPCODE_INT_QUOTIENT,
4223
offset(retype(dest, tmp.type), bld, c),
4224
component(offset(tmp, ubld, c), 0), brw_imm_ud(6));
4225
} else {
4226
bld.MOV(offset(retype(dest, tmp.type), bld, c),
4227
component(offset(tmp, ubld, c), 0));
4228
}
4229
}
4230
break;
4231
}
4232
4233
case nir_intrinsic_image_load_raw_intel: {
4234
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4235
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4236
get_nir_image_intrinsic_image(bld, instr);
4237
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
4238
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
4239
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4240
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(0);
4241
4242
fs_inst *inst =
4243
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
4244
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
4245
inst->size_written = instr->num_components * dispatch_width * 4;
4246
break;
4247
}
4248
4249
case nir_intrinsic_image_store_raw_intel: {
4250
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4251
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4252
get_nir_image_intrinsic_image(bld, instr);
4253
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
4254
srcs[SURFACE_LOGICAL_SRC_DATA] = get_nir_src(instr->src[2]);
4255
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
4256
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4257
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1);
4258
4259
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
4260
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
4261
break;
4262
}
4263
4264
case nir_intrinsic_scoped_barrier:
4265
assert(nir_intrinsic_execution_scope(instr) == NIR_SCOPE_NONE);
4266
FALLTHROUGH;
4267
case nir_intrinsic_group_memory_barrier:
4268
case nir_intrinsic_memory_barrier_shared:
4269
case nir_intrinsic_memory_barrier_buffer:
4270
case nir_intrinsic_memory_barrier_image:
4271
case nir_intrinsic_memory_barrier:
4272
case nir_intrinsic_begin_invocation_interlock:
4273
case nir_intrinsic_end_invocation_interlock: {
4274
bool l3_fence, slm_fence, tgm_fence = false;
4275
const enum opcode opcode =
4276
instr->intrinsic == nir_intrinsic_begin_invocation_interlock ?
4277
SHADER_OPCODE_INTERLOCK : SHADER_OPCODE_MEMORY_FENCE;
4278
4279
switch (instr->intrinsic) {
4280
case nir_intrinsic_scoped_barrier: {
4281
nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
4282
l3_fence = modes & (nir_var_shader_out |
4283
nir_var_mem_ssbo |
4284
nir_var_mem_global);
4285
slm_fence = modes & nir_var_mem_shared;
4286
4287
/* NIR currently doesn't have an image mode */
4288
if (devinfo->has_lsc)
4289
tgm_fence = modes & nir_var_mem_ssbo;
4290
break;
4291
}
4292
4293
case nir_intrinsic_begin_invocation_interlock:
4294
case nir_intrinsic_end_invocation_interlock:
4295
/* For beginInvocationInterlockARB(), we will generate a memory fence
4296
* but with a different opcode so that generator can pick SENDC
4297
* instead of SEND.
4298
*
4299
* For endInvocationInterlockARB(), we need to insert a memory fence which
4300
* stalls in the shader until the memory transactions prior to that
4301
* fence are complete. This ensures that the shader does not end before
4302
* any writes from its critical section have landed. Otherwise, you can
4303
* end up with a case where the next invocation on that pixel properly
4304
* stalls for previous FS invocation on its pixel to complete but
4305
* doesn't actually wait for the dataport memory transactions from that
4306
* thread to land before submitting its own.
4307
*
4308
* Handling them here will allow the logic for IVB render cache (see
4309
* below) to be reused.
4310
*/
4311
l3_fence = true;
4312
slm_fence = false;
4313
break;
4314
4315
default:
4316
l3_fence = instr->intrinsic != nir_intrinsic_memory_barrier_shared;
4317
slm_fence = instr->intrinsic == nir_intrinsic_group_memory_barrier ||
4318
instr->intrinsic == nir_intrinsic_memory_barrier ||
4319
instr->intrinsic == nir_intrinsic_memory_barrier_shared;
4320
tgm_fence = instr->intrinsic == nir_intrinsic_memory_barrier_image;
4321
break;
4322
}
4323
4324
if (stage != MESA_SHADER_COMPUTE && stage != MESA_SHADER_KERNEL)
4325
slm_fence = false;
4326
4327
/* If the workgroup fits in a single HW thread, the messages for SLM are
4328
* processed in-order and the shader itself is already synchronized so
4329
* the memory fence is not necessary.
4330
*
4331
* TODO: Check if applies for many HW threads sharing same Data Port.
4332
*/
4333
if (!nir->info.workgroup_size_variable &&
4334
slm_fence && workgroup_size() <= dispatch_width)
4335
slm_fence = false;
4336
4337
/* Prior to Gfx11, there's only L3 fence, so emit that instead. */
4338
if (slm_fence && devinfo->ver < 11) {
4339
slm_fence = false;
4340
l3_fence = true;
4341
}
4342
4343
/* IVB does typed surface access through the render cache, so we need
4344
* to flush it too.
4345
*/
4346
const bool needs_render_fence =
4347
devinfo->verx10 == 70;
4348
4349
/* Be conservative in Gfx11+ and always stall in a fence. Since there
4350
* are two different fences, and shader might want to synchronize
4351
* between them.
4352
*
4353
* TODO: Use scope and visibility information for the barriers from NIR
4354
* to make a better decision on whether we need to stall.
4355
*/
4356
const bool stall = devinfo->ver >= 11 || needs_render_fence ||
4357
instr->intrinsic == nir_intrinsic_end_invocation_interlock;
4358
4359
const bool commit_enable = stall ||
4360
devinfo->ver >= 10; /* HSD ES # 1404612949 */
4361
4362
unsigned fence_regs_count = 0;
4363
fs_reg fence_regs[3] = {};
4364
4365
const fs_builder ubld = bld.group(8, 0);
4366
4367
if (l3_fence) {
4368
fs_inst *fence =
4369
ubld.emit(opcode,
4370
ubld.vgrf(BRW_REGISTER_TYPE_UD),
4371
brw_vec8_grf(0, 0),
4372
brw_imm_ud(commit_enable),
4373
brw_imm_ud(0 /* BTI; ignored for LSC */));
4374
4375
fence->sfid = devinfo->has_lsc ?
4376
GFX12_SFID_UGM :
4377
GFX7_SFID_DATAPORT_DATA_CACHE;
4378
4379
fence_regs[fence_regs_count++] = fence->dst;
4380
4381
if (needs_render_fence) {
4382
fs_inst *render_fence =
4383
ubld.emit(opcode,
4384
ubld.vgrf(BRW_REGISTER_TYPE_UD),
4385
brw_vec8_grf(0, 0),
4386
brw_imm_ud(commit_enable),
4387
brw_imm_ud(/* bti */ 0));
4388
render_fence->sfid = GFX6_SFID_DATAPORT_RENDER_CACHE;
4389
4390
fence_regs[fence_regs_count++] = render_fence->dst;
4391
}
4392
4393
/* Translate l3_fence into untyped and typed fence on XeHP */
4394
if (devinfo->has_lsc && tgm_fence) {
4395
fs_inst *fence =
4396
ubld.emit(opcode,
4397
ubld.vgrf(BRW_REGISTER_TYPE_UD),
4398
brw_vec8_grf(0, 0),
4399
brw_imm_ud(commit_enable),
4400
brw_imm_ud(/* ignored */0));
4401
4402
fence->sfid = GFX12_SFID_TGM;
4403
fence_regs[fence_regs_count++] = fence->dst;
4404
}
4405
}
4406
4407
if (slm_fence) {
4408
assert(opcode == SHADER_OPCODE_MEMORY_FENCE);
4409
fs_inst *fence =
4410
ubld.emit(opcode,
4411
ubld.vgrf(BRW_REGISTER_TYPE_UD),
4412
brw_vec8_grf(0, 0),
4413
brw_imm_ud(commit_enable),
4414
brw_imm_ud(GFX7_BTI_SLM /* ignored for LSC */));
4415
if (devinfo->has_lsc)
4416
fence->sfid = GFX12_SFID_SLM;
4417
else
4418
fence->sfid = GFX7_SFID_DATAPORT_DATA_CACHE;
4419
4420
fence_regs[fence_regs_count++] = fence->dst;
4421
}
4422
4423
assert(fence_regs_count <= 3);
4424
4425
if (stall || fence_regs_count == 0) {
4426
ubld.exec_all().group(1, 0).emit(
4427
FS_OPCODE_SCHEDULING_FENCE, ubld.null_reg_ud(),
4428
fence_regs, fence_regs_count);
4429
}
4430
4431
break;
4432
}
4433
4434
case nir_intrinsic_memory_barrier_tcs_patch:
4435
break;
4436
4437
case nir_intrinsic_shader_clock: {
4438
/* We cannot do anything if there is an event, so ignore it for now */
4439
const fs_reg shader_clock = get_timestamp(bld);
4440
const fs_reg srcs[] = { component(shader_clock, 0),
4441
component(shader_clock, 1) };
4442
bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
4443
break;
4444
}
4445
4446
case nir_intrinsic_image_samples:
4447
/* The driver does not support multi-sampled images. */
4448
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1));
4449
break;
4450
4451
case nir_intrinsic_load_reloc_const_intel: {
4452
uint32_t id = nir_intrinsic_param_idx(instr);
4453
bld.emit(SHADER_OPCODE_MOV_RELOC_IMM,
4454
dest, brw_imm_ud(id));
4455
break;
4456
}
4457
4458
case nir_intrinsic_load_uniform: {
4459
/* Offsets are in bytes but they should always aligned to
4460
* the type size
4461
*/
4462
assert(instr->const_index[0] % 4 == 0 ||
4463
instr->const_index[0] % type_sz(dest.type) == 0);
4464
4465
fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
4466
4467
if (nir_src_is_const(instr->src[0])) {
4468
unsigned load_offset = nir_src_as_uint(instr->src[0]);
4469
assert(load_offset % type_sz(dest.type) == 0);
4470
/* For 16-bit types we add the module of the const_index[0]
4471
* offset to access to not 32-bit aligned element
4472
*/
4473
src.offset = load_offset + instr->const_index[0] % 4;
4474
4475
for (unsigned j = 0; j < instr->num_components; j++) {
4476
bld.MOV(offset(dest, bld, j), offset(src, bld, j));
4477
}
4478
} else {
4479
fs_reg indirect = retype(get_nir_src(instr->src[0]),
4480
BRW_REGISTER_TYPE_UD);
4481
4482
/* We need to pass a size to the MOV_INDIRECT but we don't want it to
4483
* go past the end of the uniform. In order to keep the n'th
4484
* component from running past, we subtract off the size of all but
4485
* one component of the vector.
4486
*/
4487
assert(instr->const_index[1] >=
4488
instr->num_components * (int) type_sz(dest.type));
4489
unsigned read_size = instr->const_index[1] -
4490
(instr->num_components - 1) * type_sz(dest.type);
4491
4492
bool supports_64bit_indirects =
4493
!devinfo->is_cherryview && !intel_device_info_is_9lp(devinfo);
4494
4495
if (type_sz(dest.type) != 8 || supports_64bit_indirects) {
4496
for (unsigned j = 0; j < instr->num_components; j++) {
4497
bld.emit(SHADER_OPCODE_MOV_INDIRECT,
4498
offset(dest, bld, j), offset(src, bld, j),
4499
indirect, brw_imm_ud(read_size));
4500
}
4501
} else {
4502
const unsigned num_mov_indirects =
4503
type_sz(dest.type) / type_sz(BRW_REGISTER_TYPE_UD);
4504
/* We read a little bit less per MOV INDIRECT, as they are now
4505
* 32-bits ones instead of 64-bit. Fix read_size then.
4506
*/
4507
const unsigned read_size_32bit = read_size -
4508
(num_mov_indirects - 1) * type_sz(BRW_REGISTER_TYPE_UD);
4509
for (unsigned j = 0; j < instr->num_components; j++) {
4510
for (unsigned i = 0; i < num_mov_indirects; i++) {
4511
bld.emit(SHADER_OPCODE_MOV_INDIRECT,
4512
subscript(offset(dest, bld, j), BRW_REGISTER_TYPE_UD, i),
4513
subscript(offset(src, bld, j), BRW_REGISTER_TYPE_UD, i),
4514
indirect, brw_imm_ud(read_size_32bit));
4515
}
4516
}
4517
}
4518
}
4519
break;
4520
}
4521
4522
case nir_intrinsic_load_ubo: {
4523
fs_reg surf_index;
4524
if (nir_src_is_const(instr->src[0])) {
4525
const unsigned index = stage_prog_data->binding_table.ubo_start +
4526
nir_src_as_uint(instr->src[0]);
4527
surf_index = brw_imm_ud(index);
4528
} else {
4529
/* The block index is not a constant. Evaluate the index expression
4530
* per-channel and add the base UBO index; we have to select a value
4531
* from any live channel.
4532
*/
4533
surf_index = vgrf(glsl_type::uint_type);
4534
bld.ADD(surf_index, get_nir_src(instr->src[0]),
4535
brw_imm_ud(stage_prog_data->binding_table.ubo_start));
4536
surf_index = bld.emit_uniformize(surf_index);
4537
}
4538
4539
if (!nir_src_is_const(instr->src[1])) {
4540
fs_reg base_offset = retype(get_nir_src(instr->src[1]),
4541
BRW_REGISTER_TYPE_UD);
4542
4543
for (int i = 0; i < instr->num_components; i++)
4544
VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
4545
base_offset, i * type_sz(dest.type),
4546
nir_dest_bit_size(instr->dest) / 8);
4547
4548
prog_data->has_ubo_pull = true;
4549
} else {
4550
/* Even if we are loading doubles, a pull constant load will load
4551
* a 32-bit vec4, so should only reserve vgrf space for that. If we
4552
* need to load a full dvec4 we will have to emit 2 loads. This is
4553
* similar to demote_pull_constants(), except that in that case we
4554
* see individual accesses to each component of the vector and then
4555
* we let CSE deal with duplicate loads. Here we see a vector access
4556
* and we have to split it if necessary.
4557
*/
4558
const unsigned type_size = type_sz(dest.type);
4559
const unsigned load_offset = nir_src_as_uint(instr->src[1]);
4560
4561
/* See if we've selected this as a push constant candidate */
4562
if (nir_src_is_const(instr->src[0])) {
4563
const unsigned ubo_block = nir_src_as_uint(instr->src[0]);
4564
const unsigned offset_256b = load_offset / 32;
4565
4566
fs_reg push_reg;
4567
for (int i = 0; i < 4; i++) {
4568
const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
4569
if (range->block == ubo_block &&
4570
offset_256b >= range->start &&
4571
offset_256b < range->start + range->length) {
4572
4573
push_reg = fs_reg(UNIFORM, UBO_START + i, dest.type);
4574
push_reg.offset = load_offset - 32 * range->start;
4575
break;
4576
}
4577
}
4578
4579
if (push_reg.file != BAD_FILE) {
4580
for (unsigned i = 0; i < instr->num_components; i++) {
4581
bld.MOV(offset(dest, bld, i),
4582
byte_offset(push_reg, i * type_size));
4583
}
4584
break;
4585
}
4586
}
4587
4588
prog_data->has_ubo_pull = true;
4589
4590
const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
4591
const fs_builder ubld = bld.exec_all().group(block_sz / 4, 0);
4592
const fs_reg packed_consts = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4593
4594
for (unsigned c = 0; c < instr->num_components;) {
4595
const unsigned base = load_offset + c * type_size;
4596
/* Number of usable components in the next block-aligned load. */
4597
const unsigned count = MIN2(instr->num_components - c,
4598
(block_sz - base % block_sz) / type_size);
4599
4600
ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
4601
packed_consts, surf_index,
4602
brw_imm_ud(base & ~(block_sz - 1)));
4603
4604
const fs_reg consts =
4605
retype(byte_offset(packed_consts, base & (block_sz - 1)),
4606
dest.type);
4607
4608
for (unsigned d = 0; d < count; d++)
4609
bld.MOV(offset(dest, bld, c + d), component(consts, d));
4610
4611
c += count;
4612
}
4613
}
4614
break;
4615
}
4616
4617
case nir_intrinsic_load_global:
4618
case nir_intrinsic_load_global_constant: {
4619
assert(devinfo->ver >= 8);
4620
4621
assert(nir_dest_bit_size(instr->dest) <= 32);
4622
assert(nir_intrinsic_align(instr) > 0);
4623
if (nir_dest_bit_size(instr->dest) == 32 &&
4624
nir_intrinsic_align(instr) >= 4) {
4625
assert(nir_dest_num_components(instr->dest) <= 4);
4626
fs_inst *inst = bld.emit(SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL,
4627
dest,
4628
get_nir_src(instr->src[0]), /* Address */
4629
fs_reg(), /* No source data */
4630
brw_imm_ud(instr->num_components));
4631
inst->size_written = instr->num_components *
4632
inst->dst.component_size(inst->exec_size);
4633
} else {
4634
const unsigned bit_size = nir_dest_bit_size(instr->dest);
4635
assert(nir_dest_num_components(instr->dest) == 1);
4636
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
4637
bld.emit(SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL,
4638
tmp,
4639
get_nir_src(instr->src[0]), /* Address */
4640
fs_reg(), /* No source data */
4641
brw_imm_ud(bit_size));
4642
bld.MOV(dest, subscript(tmp, dest.type, 0));
4643
}
4644
break;
4645
}
4646
4647
case nir_intrinsic_store_global:
4648
assert(devinfo->ver >= 8);
4649
4650
assert(nir_src_bit_size(instr->src[0]) <= 32);
4651
assert(nir_intrinsic_write_mask(instr) ==
4652
(1u << instr->num_components) - 1);
4653
assert(nir_intrinsic_align(instr) > 0);
4654
if (nir_src_bit_size(instr->src[0]) == 32 &&
4655
nir_intrinsic_align(instr) >= 4) {
4656
assert(nir_src_num_components(instr->src[0]) <= 4);
4657
bld.emit(SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL,
4658
fs_reg(),
4659
get_nir_src(instr->src[1]), /* Address */
4660
get_nir_src(instr->src[0]), /* Data */
4661
brw_imm_ud(instr->num_components));
4662
} else {
4663
assert(nir_src_num_components(instr->src[0]) == 1);
4664
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
4665
brw_reg_type data_type =
4666
brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
4667
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
4668
bld.MOV(tmp, retype(get_nir_src(instr->src[0]), data_type));
4669
bld.emit(SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL,
4670
fs_reg(),
4671
get_nir_src(instr->src[1]), /* Address */
4672
tmp, /* Data */
4673
brw_imm_ud(nir_src_bit_size(instr->src[0])));
4674
}
4675
break;
4676
4677
case nir_intrinsic_global_atomic_add:
4678
case nir_intrinsic_global_atomic_imin:
4679
case nir_intrinsic_global_atomic_umin:
4680
case nir_intrinsic_global_atomic_imax:
4681
case nir_intrinsic_global_atomic_umax:
4682
case nir_intrinsic_global_atomic_and:
4683
case nir_intrinsic_global_atomic_or:
4684
case nir_intrinsic_global_atomic_xor:
4685
case nir_intrinsic_global_atomic_exchange:
4686
case nir_intrinsic_global_atomic_comp_swap:
4687
nir_emit_global_atomic(bld, brw_aop_for_nir_intrinsic(instr), instr);
4688
break;
4689
case nir_intrinsic_global_atomic_fmin:
4690
case nir_intrinsic_global_atomic_fmax:
4691
case nir_intrinsic_global_atomic_fcomp_swap:
4692
nir_emit_global_atomic_float(bld, brw_aop_for_nir_intrinsic(instr), instr);
4693
break;
4694
4695
case nir_intrinsic_load_global_const_block_intel: {
4696
assert(nir_dest_bit_size(instr->dest) == 32);
4697
assert(instr->num_components == 8 || instr->num_components == 16);
4698
4699
const fs_builder ubld = bld.exec_all().group(instr->num_components, 0);
4700
fs_reg load_val;
4701
4702
bool is_pred_const = nir_src_is_const(instr->src[1]);
4703
if (is_pred_const && nir_src_as_uint(instr->src[1]) == 0) {
4704
/* In this case, we don't want the UBO load at all. We really
4705
* shouldn't get here but it's possible.
4706
*/
4707
load_val = brw_imm_ud(0);
4708
} else {
4709
/* The uniform process may stomp the flag so do this first */
4710
fs_reg addr = bld.emit_uniformize(get_nir_src(instr->src[0]));
4711
4712
load_val = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4713
4714
/* If the predicate is constant and we got here, then it's non-zero
4715
* and we don't need the predicate at all.
4716
*/
4717
if (!is_pred_const) {
4718
/* Load the predicate */
4719
fs_reg pred = bld.emit_uniformize(get_nir_src(instr->src[1]));
4720
fs_inst *mov = ubld.MOV(bld.null_reg_d(), pred);
4721
mov->conditional_mod = BRW_CONDITIONAL_NZ;
4722
4723
/* Stomp the destination with 0 if we're OOB */
4724
mov = ubld.MOV(load_val, brw_imm_ud(0));
4725
mov->predicate = BRW_PREDICATE_NORMAL;
4726
mov->predicate_inverse = true;
4727
}
4728
4729
fs_inst *load = ubld.emit(SHADER_OPCODE_A64_OWORD_BLOCK_READ_LOGICAL,
4730
load_val, addr,
4731
fs_reg(), /* No source data */
4732
brw_imm_ud(instr->num_components));
4733
4734
if (!is_pred_const)
4735
load->predicate = BRW_PREDICATE_NORMAL;
4736
}
4737
4738
/* From the HW perspective, we just did a single SIMD16 instruction
4739
* which loaded a dword in each SIMD channel. From NIR's perspective,
4740
* this instruction returns a vec16. Any users of this data in the
4741
* back-end will expect a vec16 per SIMD channel so we have to emit a
4742
* pile of MOVs to resolve this discrepancy. Fortunately, copy-prop
4743
* will generally clean them up for us.
4744
*/
4745
for (unsigned i = 0; i < instr->num_components; i++) {
4746
bld.MOV(retype(offset(dest, bld, i), BRW_REGISTER_TYPE_UD),
4747
component(load_val, i));
4748
}
4749
break;
4750
}
4751
4752
case nir_intrinsic_load_ssbo: {
4753
assert(devinfo->ver >= 7);
4754
4755
const unsigned bit_size = nir_dest_bit_size(instr->dest);
4756
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4757
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4758
get_nir_ssbo_intrinsic_index(bld, instr);
4759
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
4760
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
4761
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(0);
4762
4763
/* Make dest unsigned because that's what the temporary will be */
4764
dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
4765
4766
/* Read the vector */
4767
assert(nir_dest_bit_size(instr->dest) <= 32);
4768
assert(nir_intrinsic_align(instr) > 0);
4769
if (nir_dest_bit_size(instr->dest) == 32 &&
4770
nir_intrinsic_align(instr) >= 4) {
4771
assert(nir_dest_num_components(instr->dest) <= 4);
4772
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4773
fs_inst *inst =
4774
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
4775
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
4776
inst->size_written = instr->num_components * dispatch_width * 4;
4777
} else {
4778
assert(nir_dest_num_components(instr->dest) == 1);
4779
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
4780
4781
fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
4782
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
4783
read_result, srcs, SURFACE_LOGICAL_NUM_SRCS);
4784
bld.MOV(dest, subscript(read_result, dest.type, 0));
4785
}
4786
break;
4787
}
4788
4789
case nir_intrinsic_store_ssbo: {
4790
assert(devinfo->ver >= 7);
4791
4792
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
4793
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4794
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4795
get_nir_ssbo_intrinsic_index(bld, instr);
4796
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[2]);
4797
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
4798
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1);
4799
4800
fs_reg data = get_nir_src(instr->src[0]);
4801
data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
4802
4803
assert(nir_src_bit_size(instr->src[0]) <= 32);
4804
assert(nir_intrinsic_write_mask(instr) ==
4805
(1u << instr->num_components) - 1);
4806
assert(nir_intrinsic_align(instr) > 0);
4807
if (nir_src_bit_size(instr->src[0]) == 32 &&
4808
nir_intrinsic_align(instr) >= 4) {
4809
assert(nir_src_num_components(instr->src[0]) <= 4);
4810
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
4811
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4812
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
4813
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
4814
} else {
4815
assert(nir_src_num_components(instr->src[0]) == 1);
4816
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
4817
4818
srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD);
4819
bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data);
4820
4821
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
4822
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
4823
}
4824
break;
4825
}
4826
4827
case nir_intrinsic_store_output: {
4828
assert(nir_src_bit_size(instr->src[0]) == 32);
4829
fs_reg src = get_nir_src(instr->src[0]);
4830
4831
unsigned store_offset = nir_src_as_uint(instr->src[1]);
4832
unsigned num_components = instr->num_components;
4833
unsigned first_component = nir_intrinsic_component(instr);
4834
4835
fs_reg new_dest = retype(offset(outputs[instr->const_index[0]], bld,
4836
4 * store_offset), src.type);
4837
for (unsigned j = 0; j < num_components; j++) {
4838
bld.MOV(offset(new_dest, bld, j + first_component),
4839
offset(src, bld, j));
4840
}
4841
break;
4842
}
4843
4844
case nir_intrinsic_ssbo_atomic_add:
4845
case nir_intrinsic_ssbo_atomic_imin:
4846
case nir_intrinsic_ssbo_atomic_umin:
4847
case nir_intrinsic_ssbo_atomic_imax:
4848
case nir_intrinsic_ssbo_atomic_umax:
4849
case nir_intrinsic_ssbo_atomic_and:
4850
case nir_intrinsic_ssbo_atomic_or:
4851
case nir_intrinsic_ssbo_atomic_xor:
4852
case nir_intrinsic_ssbo_atomic_exchange:
4853
case nir_intrinsic_ssbo_atomic_comp_swap:
4854
nir_emit_ssbo_atomic(bld, brw_aop_for_nir_intrinsic(instr), instr);
4855
break;
4856
case nir_intrinsic_ssbo_atomic_fmin:
4857
case nir_intrinsic_ssbo_atomic_fmax:
4858
case nir_intrinsic_ssbo_atomic_fcomp_swap:
4859
nir_emit_ssbo_atomic_float(bld, brw_aop_for_nir_intrinsic(instr), instr);
4860
break;
4861
4862
case nir_intrinsic_get_ssbo_size: {
4863
assert(nir_src_num_components(instr->src[0]) == 1);
4864
unsigned ssbo_index = nir_src_is_const(instr->src[0]) ?
4865
nir_src_as_uint(instr->src[0]) : 0;
4866
4867
/* A resinfo's sampler message is used to get the buffer size. The
4868
* SIMD8's writeback message consists of four registers and SIMD16's
4869
* writeback message consists of 8 destination registers (two per each
4870
* component). Because we are only interested on the first channel of
4871
* the first returned component, where resinfo returns the buffer size
4872
* for SURFTYPE_BUFFER, we can just use the SIMD8 variant regardless of
4873
* the dispatch width.
4874
*/
4875
const fs_builder ubld = bld.exec_all().group(8, 0);
4876
fs_reg src_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4877
fs_reg ret_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
4878
4879
/* Set LOD = 0 */
4880
ubld.MOV(src_payload, brw_imm_d(0));
4881
4882
const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
4883
fs_inst *inst = ubld.emit(SHADER_OPCODE_GET_BUFFER_SIZE, ret_payload,
4884
src_payload, brw_imm_ud(index));
4885
inst->header_size = 0;
4886
inst->mlen = 1;
4887
inst->size_written = 4 * REG_SIZE;
4888
4889
/* SKL PRM, vol07, 3D Media GPGPU Engine, Bounds Checking and Faulting:
4890
*
4891
* "Out-of-bounds checking is always performed at a DWord granularity. If
4892
* any part of the DWord is out-of-bounds then the whole DWord is
4893
* considered out-of-bounds."
4894
*
4895
* This implies that types with size smaller than 4-bytes need to be
4896
* padded if they don't complete the last dword of the buffer. But as we
4897
* need to maintain the original size we need to reverse the padding
4898
* calculation to return the correct size to know the number of elements
4899
* of an unsized array. As we stored in the last two bits of the surface
4900
* size the needed padding for the buffer, we calculate here the
4901
* original buffer_size reversing the surface_size calculation:
4902
*
4903
* surface_size = isl_align(buffer_size, 4) +
4904
* (isl_align(buffer_size) - buffer_size)
4905
*
4906
* buffer_size = surface_size & ~3 - surface_size & 3
4907
*/
4908
4909
fs_reg size_aligned4 = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4910
fs_reg size_padding = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4911
fs_reg buffer_size = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4912
4913
ubld.AND(size_padding, ret_payload, brw_imm_ud(3));
4914
ubld.AND(size_aligned4, ret_payload, brw_imm_ud(~3));
4915
ubld.ADD(buffer_size, size_aligned4, negate(size_padding));
4916
4917
bld.MOV(retype(dest, ret_payload.type), component(buffer_size, 0));
4918
break;
4919
}
4920
4921
case nir_intrinsic_load_scratch: {
4922
assert(devinfo->ver >= 7);
4923
4924
assert(nir_dest_num_components(instr->dest) == 1);
4925
const unsigned bit_size = nir_dest_bit_size(instr->dest);
4926
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4927
4928
if (devinfo->verx10 >= 125) {
4929
const fs_builder ubld = bld.exec_all().group(1, 0);
4930
fs_reg handle = component(ubld.vgrf(BRW_REGISTER_TYPE_UD), 0);
4931
ubld.AND(handle, retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
4932
brw_imm_ud(~0x3ffu));
4933
srcs[SURFACE_LOGICAL_SRC_SURFACE_HANDLE] = handle;
4934
} else if (devinfo->ver >= 8) {
4935
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4936
brw_imm_ud(GFX8_BTI_STATELESS_NON_COHERENT);
4937
} else {
4938
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(BRW_BTI_STATELESS);
4939
}
4940
4941
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
4942
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
4943
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(0);
4944
const fs_reg nir_addr = get_nir_src(instr->src[0]);
4945
4946
/* Make dest unsigned because that's what the temporary will be */
4947
dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
4948
4949
/* Read the vector */
4950
assert(nir_dest_num_components(instr->dest) == 1);
4951
assert(nir_dest_bit_size(instr->dest) <= 32);
4952
assert(nir_intrinsic_align(instr) > 0);
4953
if (devinfo->verx10 >= 125) {
4954
assert(nir_dest_bit_size(instr->dest) == 32 &&
4955
nir_intrinsic_align(instr) >= 4);
4956
4957
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
4958
swizzle_nir_scratch_addr(bld, nir_addr, false);
4959
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(1);
4960
4961
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
4962
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
4963
} else if (nir_dest_bit_size(instr->dest) >= 4 &&
4964
nir_intrinsic_align(instr) >= 4) {
4965
/* The offset for a DWORD scattered message is in dwords. */
4966
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
4967
swizzle_nir_scratch_addr(bld, nir_addr, true);
4968
4969
bld.emit(SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL,
4970
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
4971
} else {
4972
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
4973
swizzle_nir_scratch_addr(bld, nir_addr, false);
4974
4975
fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
4976
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
4977
read_result, srcs, SURFACE_LOGICAL_NUM_SRCS);
4978
bld.MOV(dest, read_result);
4979
}
4980
break;
4981
}
4982
4983
case nir_intrinsic_store_scratch: {
4984
assert(devinfo->ver >= 7);
4985
4986
assert(nir_src_num_components(instr->src[0]) == 1);
4987
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
4988
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4989
4990
if (devinfo->verx10 >= 125) {
4991
const fs_builder ubld = bld.exec_all().group(1, 0);
4992
fs_reg handle = component(ubld.vgrf(BRW_REGISTER_TYPE_UD), 0);
4993
ubld.AND(handle, retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
4994
brw_imm_ud(~0x3ffu));
4995
srcs[SURFACE_LOGICAL_SRC_SURFACE_HANDLE] = handle;
4996
} else if (devinfo->ver >= 8) {
4997
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4998
brw_imm_ud(GFX8_BTI_STATELESS_NON_COHERENT);
4999
} else {
5000
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(BRW_BTI_STATELESS);
5001
}
5002
5003
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
5004
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
5005
/**
5006
* While this instruction has side-effects, it should not be predicated
5007
* on sample mask, because otherwise fs helper invocations would
5008
* load undefined values from scratch memory. And scratch memory
5009
* load-stores are produced from operations without side-effects, thus
5010
* they should not have different behaviour in the helper invocations.
5011
*/
5012
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(0);
5013
const fs_reg nir_addr = get_nir_src(instr->src[1]);
5014
5015
fs_reg data = get_nir_src(instr->src[0]);
5016
data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
5017
5018
assert(nir_src_num_components(instr->src[0]) == 1);
5019
assert(nir_src_bit_size(instr->src[0]) <= 32);
5020
assert(nir_intrinsic_write_mask(instr) == 1);
5021
assert(nir_intrinsic_align(instr) > 0);
5022
if (devinfo->verx10 >= 125) {
5023
assert(nir_src_bit_size(instr->src[0]) == 32 &&
5024
nir_intrinsic_align(instr) >= 4);
5025
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
5026
5027
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
5028
swizzle_nir_scratch_addr(bld, nir_addr, false);
5029
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(1);
5030
5031
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
5032
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
5033
} else if (nir_src_bit_size(instr->src[0]) == 32 &&
5034
nir_intrinsic_align(instr) >= 4) {
5035
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
5036
5037
/* The offset for a DWORD scattered message is in dwords. */
5038
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
5039
swizzle_nir_scratch_addr(bld, nir_addr, true);
5040
5041
bld.emit(SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL,
5042
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
5043
} else {
5044
srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD);
5045
bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data);
5046
5047
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
5048
swizzle_nir_scratch_addr(bld, nir_addr, false);
5049
5050
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
5051
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
5052
}
5053
break;
5054
}
5055
5056
case nir_intrinsic_load_subgroup_size:
5057
/* This should only happen for fragment shaders because every other case
5058
* is lowered in NIR so we can optimize on it.
5059
*/
5060
assert(stage == MESA_SHADER_FRAGMENT);
5061
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(dispatch_width));
5062
break;
5063
5064
case nir_intrinsic_load_subgroup_invocation:
5065
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
5066
nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION]);
5067
break;
5068
5069
case nir_intrinsic_load_subgroup_eq_mask:
5070
case nir_intrinsic_load_subgroup_ge_mask:
5071
case nir_intrinsic_load_subgroup_gt_mask:
5072
case nir_intrinsic_load_subgroup_le_mask:
5073
case nir_intrinsic_load_subgroup_lt_mask:
5074
unreachable("not reached");
5075
5076
case nir_intrinsic_vote_any: {
5077
const fs_builder ubld = bld.exec_all().group(1, 0);
5078
5079
/* The any/all predicates do not consider channel enables. To prevent
5080
* dead channels from affecting the result, we initialize the flag with
5081
* with the identity value for the logical operation.
5082
*/
5083
if (dispatch_width == 32) {
5084
/* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
5085
ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
5086
brw_imm_ud(0));
5087
} else {
5088
ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0));
5089
}
5090
bld.CMP(bld.null_reg_d(), get_nir_src(instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
5091
5092
/* For some reason, the any/all predicates don't work properly with
5093
* SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
5094
* doesn't read the correct subset of the flag register and you end up
5095
* getting garbage in the second half. Work around this by using a pair
5096
* of 1-wide MOVs and scattering the result.
5097
*/
5098
fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
5099
ubld.MOV(res1, brw_imm_d(0));
5100
set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ANY8H :
5101
dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ANY16H :
5102
BRW_PREDICATE_ALIGN1_ANY32H,
5103
ubld.MOV(res1, brw_imm_d(-1)));
5104
5105
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
5106
break;
5107
}
5108
case nir_intrinsic_vote_all: {
5109
const fs_builder ubld = bld.exec_all().group(1, 0);
5110
5111
/* The any/all predicates do not consider channel enables. To prevent
5112
* dead channels from affecting the result, we initialize the flag with
5113
* with the identity value for the logical operation.
5114
*/
5115
if (dispatch_width == 32) {
5116
/* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
5117
ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
5118
brw_imm_ud(0xffffffff));
5119
} else {
5120
ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
5121
}
5122
bld.CMP(bld.null_reg_d(), get_nir_src(instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
5123
5124
/* For some reason, the any/all predicates don't work properly with
5125
* SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
5126
* doesn't read the correct subset of the flag register and you end up
5127
* getting garbage in the second half. Work around this by using a pair
5128
* of 1-wide MOVs and scattering the result.
5129
*/
5130
fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
5131
ubld.MOV(res1, brw_imm_d(0));
5132
set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ALL8H :
5133
dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
5134
BRW_PREDICATE_ALIGN1_ALL32H,
5135
ubld.MOV(res1, brw_imm_d(-1)));
5136
5137
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
5138
break;
5139
}
5140
case nir_intrinsic_vote_feq:
5141
case nir_intrinsic_vote_ieq: {
5142
fs_reg value = get_nir_src(instr->src[0]);
5143
if (instr->intrinsic == nir_intrinsic_vote_feq) {
5144
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
5145
value.type = bit_size == 8 ? BRW_REGISTER_TYPE_B :
5146
brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_F);
5147
}
5148
5149
fs_reg uniformized = bld.emit_uniformize(value);
5150
const fs_builder ubld = bld.exec_all().group(1, 0);
5151
5152
/* The any/all predicates do not consider channel enables. To prevent
5153
* dead channels from affecting the result, we initialize the flag with
5154
* with the identity value for the logical operation.
5155
*/
5156
if (dispatch_width == 32) {
5157
/* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
5158
ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
5159
brw_imm_ud(0xffffffff));
5160
} else {
5161
ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
5162
}
5163
bld.CMP(bld.null_reg_d(), value, uniformized, BRW_CONDITIONAL_Z);
5164
5165
/* For some reason, the any/all predicates don't work properly with
5166
* SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
5167
* doesn't read the correct subset of the flag register and you end up
5168
* getting garbage in the second half. Work around this by using a pair
5169
* of 1-wide MOVs and scattering the result.
5170
*/
5171
fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
5172
ubld.MOV(res1, brw_imm_d(0));
5173
set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ALL8H :
5174
dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
5175
BRW_PREDICATE_ALIGN1_ALL32H,
5176
ubld.MOV(res1, brw_imm_d(-1)));
5177
5178
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
5179
break;
5180
}
5181
5182
case nir_intrinsic_ballot: {
5183
const fs_reg value = retype(get_nir_src(instr->src[0]),
5184
BRW_REGISTER_TYPE_UD);
5185
struct brw_reg flag = brw_flag_reg(0, 0);
5186
/* FIXME: For SIMD32 programs, this causes us to stomp on f0.1 as well
5187
* as f0.0. This is a problem for fragment programs as we currently use
5188
* f0.1 for discards. Fortunately, we don't support SIMD32 fragment
5189
* programs yet so this isn't a problem. When we do, something will
5190
* have to change.
5191
*/
5192
if (dispatch_width == 32)
5193
flag.type = BRW_REGISTER_TYPE_UD;
5194
5195
bld.exec_all().group(1, 0).MOV(flag, brw_imm_ud(0u));
5196
bld.CMP(bld.null_reg_ud(), value, brw_imm_ud(0u), BRW_CONDITIONAL_NZ);
5197
5198
if (instr->dest.ssa.bit_size > 32) {
5199
dest.type = BRW_REGISTER_TYPE_UQ;
5200
} else {
5201
dest.type = BRW_REGISTER_TYPE_UD;
5202
}
5203
bld.MOV(dest, flag);
5204
break;
5205
}
5206
5207
case nir_intrinsic_read_invocation: {
5208
const fs_reg value = get_nir_src(instr->src[0]);
5209
const fs_reg invocation = get_nir_src(instr->src[1]);
5210
fs_reg tmp = bld.vgrf(value.type);
5211
5212
bld.exec_all().emit(SHADER_OPCODE_BROADCAST, tmp, value,
5213
bld.emit_uniformize(invocation));
5214
5215
bld.MOV(retype(dest, value.type), fs_reg(component(tmp, 0)));
5216
break;
5217
}
5218
5219
case nir_intrinsic_read_first_invocation: {
5220
const fs_reg value = get_nir_src(instr->src[0]);
5221
bld.MOV(retype(dest, value.type), bld.emit_uniformize(value));
5222
break;
5223
}
5224
5225
case nir_intrinsic_shuffle: {
5226
const fs_reg value = get_nir_src(instr->src[0]);
5227
const fs_reg index = get_nir_src(instr->src[1]);
5228
5229
bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, index);
5230
break;
5231
}
5232
5233
case nir_intrinsic_first_invocation: {
5234
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
5235
bld.exec_all().emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, tmp);
5236
bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
5237
fs_reg(component(tmp, 0)));
5238
break;
5239
}
5240
5241
case nir_intrinsic_quad_broadcast: {
5242
const fs_reg value = get_nir_src(instr->src[0]);
5243
const unsigned index = nir_src_as_uint(instr->src[1]);
5244
5245
bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, retype(dest, value.type),
5246
value, brw_imm_ud(index), brw_imm_ud(4));
5247
break;
5248
}
5249
5250
case nir_intrinsic_quad_swap_horizontal: {
5251
const fs_reg value = get_nir_src(instr->src[0]);
5252
const fs_reg tmp = bld.vgrf(value.type);
5253
if (devinfo->ver <= 7) {
5254
/* The hardware doesn't seem to support these crazy regions with
5255
* compressed instructions on gfx7 and earlier so we fall back to
5256
* using quad swizzles. Fortunately, we don't support 64-bit
5257
* anything in Vulkan on gfx7.
5258
*/
5259
assert(nir_src_bit_size(instr->src[0]) == 32);
5260
const fs_builder ubld = bld.exec_all();
5261
ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
5262
brw_imm_ud(BRW_SWIZZLE4(1,0,3,2)));
5263
bld.MOV(retype(dest, value.type), tmp);
5264
} else {
5265
const fs_builder ubld = bld.exec_all().group(dispatch_width / 2, 0);
5266
5267
const fs_reg src_left = horiz_stride(value, 2);
5268
const fs_reg src_right = horiz_stride(horiz_offset(value, 1), 2);
5269
const fs_reg tmp_left = horiz_stride(tmp, 2);
5270
const fs_reg tmp_right = horiz_stride(horiz_offset(tmp, 1), 2);
5271
5272
ubld.MOV(tmp_left, src_right);
5273
ubld.MOV(tmp_right, src_left);
5274
5275
}
5276
bld.MOV(retype(dest, value.type), tmp);
5277
break;
5278
}
5279
5280
case nir_intrinsic_quad_swap_vertical: {
5281
const fs_reg value = get_nir_src(instr->src[0]);
5282
if (nir_src_bit_size(instr->src[0]) == 32) {
5283
/* For 32-bit, we can use a SIMD4x2 instruction to do this easily */
5284
const fs_reg tmp = bld.vgrf(value.type);
5285
const fs_builder ubld = bld.exec_all();
5286
ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
5287
brw_imm_ud(BRW_SWIZZLE4(2,3,0,1)));
5288
bld.MOV(retype(dest, value.type), tmp);
5289
} else {
5290
/* For larger data types, we have to either emit dispatch_width many
5291
* MOVs or else fall back to doing indirects.
5292
*/
5293
fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
5294
bld.XOR(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
5295
brw_imm_w(0x2));
5296
bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, idx);
5297
}
5298
break;
5299
}
5300
5301
case nir_intrinsic_quad_swap_diagonal: {
5302
const fs_reg value = get_nir_src(instr->src[0]);
5303
if (nir_src_bit_size(instr->src[0]) == 32) {
5304
/* For 32-bit, we can use a SIMD4x2 instruction to do this easily */
5305
const fs_reg tmp = bld.vgrf(value.type);
5306
const fs_builder ubld = bld.exec_all();
5307
ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
5308
brw_imm_ud(BRW_SWIZZLE4(3,2,1,0)));
5309
bld.MOV(retype(dest, value.type), tmp);
5310
} else {
5311
/* For larger data types, we have to either emit dispatch_width many
5312
* MOVs or else fall back to doing indirects.
5313
*/
5314
fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
5315
bld.XOR(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
5316
brw_imm_w(0x3));
5317
bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, idx);
5318
}
5319
break;
5320
}
5321
5322
case nir_intrinsic_reduce: {
5323
fs_reg src = get_nir_src(instr->src[0]);
5324
nir_op redop = (nir_op)nir_intrinsic_reduction_op(instr);
5325
unsigned cluster_size = nir_intrinsic_cluster_size(instr);
5326
if (cluster_size == 0 || cluster_size > dispatch_width)
5327
cluster_size = dispatch_width;
5328
5329
/* Figure out the source type */
5330
src.type = brw_type_for_nir_type(devinfo,
5331
(nir_alu_type)(nir_op_infos[redop].input_types[0] |
5332
nir_src_bit_size(instr->src[0])));
5333
5334
fs_reg identity = brw_nir_reduction_op_identity(bld, redop, src.type);
5335
opcode brw_op = brw_op_for_nir_reduction_op(redop);
5336
brw_conditional_mod cond_mod = brw_cond_mod_for_nir_reduction_op(redop);
5337
5338
/* Set up a register for all of our scratching around and initialize it
5339
* to reduction operation's identity value.
5340
*/
5341
fs_reg scan = bld.vgrf(src.type);
5342
bld.exec_all().emit(SHADER_OPCODE_SEL_EXEC, scan, src, identity);
5343
5344
bld.emit_scan(brw_op, scan, cluster_size, cond_mod);
5345
5346
dest.type = src.type;
5347
if (cluster_size * type_sz(src.type) >= REG_SIZE * 2) {
5348
/* In this case, CLUSTER_BROADCAST instruction isn't needed because
5349
* the distance between clusters is at least 2 GRFs. In this case,
5350
* we don't need the weird striding of the CLUSTER_BROADCAST
5351
* instruction and can just do regular MOVs.
5352
*/
5353
assert((cluster_size * type_sz(src.type)) % (REG_SIZE * 2) == 0);
5354
const unsigned groups =
5355
(dispatch_width * type_sz(src.type)) / (REG_SIZE * 2);
5356
const unsigned group_size = dispatch_width / groups;
5357
for (unsigned i = 0; i < groups; i++) {
5358
const unsigned cluster = (i * group_size) / cluster_size;
5359
const unsigned comp = cluster * cluster_size + (cluster_size - 1);
5360
bld.group(group_size, i).MOV(horiz_offset(dest, i * group_size),
5361
component(scan, comp));
5362
}
5363
} else {
5364
bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, dest, scan,
5365
brw_imm_ud(cluster_size - 1), brw_imm_ud(cluster_size));
5366
}
5367
break;
5368
}
5369
5370
case nir_intrinsic_inclusive_scan:
5371
case nir_intrinsic_exclusive_scan: {
5372
fs_reg src = get_nir_src(instr->src[0]);
5373
nir_op redop = (nir_op)nir_intrinsic_reduction_op(instr);
5374
5375
/* Figure out the source type */
5376
src.type = brw_type_for_nir_type(devinfo,
5377
(nir_alu_type)(nir_op_infos[redop].input_types[0] |
5378
nir_src_bit_size(instr->src[0])));
5379
5380
fs_reg identity = brw_nir_reduction_op_identity(bld, redop, src.type);
5381
opcode brw_op = brw_op_for_nir_reduction_op(redop);
5382
brw_conditional_mod cond_mod = brw_cond_mod_for_nir_reduction_op(redop);
5383
5384
/* Set up a register for all of our scratching around and initialize it
5385
* to reduction operation's identity value.
5386
*/
5387
fs_reg scan = bld.vgrf(src.type);
5388
const fs_builder allbld = bld.exec_all();
5389
allbld.emit(SHADER_OPCODE_SEL_EXEC, scan, src, identity);
5390
5391
if (instr->intrinsic == nir_intrinsic_exclusive_scan) {
5392
/* Exclusive scan is a bit harder because we have to do an annoying
5393
* shift of the contents before we can begin. To make things worse,
5394
* we can't do this with a normal stride; we have to use indirects.
5395
*/
5396
fs_reg shifted = bld.vgrf(src.type);
5397
fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
5398
allbld.ADD(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
5399
brw_imm_w(-1));
5400
allbld.emit(SHADER_OPCODE_SHUFFLE, shifted, scan, idx);
5401
allbld.group(1, 0).MOV(component(shifted, 0), identity);
5402
scan = shifted;
5403
}
5404
5405
bld.emit_scan(brw_op, scan, dispatch_width, cond_mod);
5406
5407
bld.MOV(retype(dest, src.type), scan);
5408
break;
5409
}
5410
5411
case nir_intrinsic_load_global_block_intel: {
5412
assert(nir_dest_bit_size(instr->dest) == 32);
5413
5414
fs_reg address = bld.emit_uniformize(get_nir_src(instr->src[0]));
5415
5416
const fs_builder ubld1 = bld.exec_all().group(1, 0);
5417
const fs_builder ubld8 = bld.exec_all().group(8, 0);
5418
const fs_builder ubld16 = bld.exec_all().group(16, 0);
5419
5420
const unsigned total = instr->num_components * dispatch_width;
5421
unsigned loaded = 0;
5422
5423
while (loaded < total) {
5424
const unsigned block =
5425
choose_oword_block_size_dwords(total - loaded);
5426
const unsigned block_bytes = block * 4;
5427
5428
const fs_builder &ubld = block == 8 ? ubld8 : ubld16;
5429
ubld.emit(SHADER_OPCODE_A64_UNALIGNED_OWORD_BLOCK_READ_LOGICAL,
5430
retype(byte_offset(dest, loaded * 4), BRW_REGISTER_TYPE_UD),
5431
address,
5432
fs_reg(), /* No source data */
5433
brw_imm_ud(block))->size_written = block_bytes;
5434
5435
increment_a64_address(ubld1, address, block_bytes);
5436
loaded += block;
5437
}
5438
5439
assert(loaded == total);
5440
break;
5441
}
5442
5443
case nir_intrinsic_store_global_block_intel: {
5444
assert(nir_src_bit_size(instr->src[0]) == 32);
5445
5446
fs_reg address = bld.emit_uniformize(get_nir_src(instr->src[1]));
5447
fs_reg src = get_nir_src(instr->src[0]);
5448
5449
const fs_builder ubld1 = bld.exec_all().group(1, 0);
5450
const fs_builder ubld8 = bld.exec_all().group(8, 0);
5451
const fs_builder ubld16 = bld.exec_all().group(16, 0);
5452
5453
const unsigned total = instr->num_components * dispatch_width;
5454
unsigned written = 0;
5455
5456
while (written < total) {
5457
const unsigned block =
5458
choose_oword_block_size_dwords(total - written);
5459
5460
const fs_builder &ubld = block == 8 ? ubld8 : ubld16;
5461
ubld.emit(SHADER_OPCODE_A64_OWORD_BLOCK_WRITE_LOGICAL,
5462
fs_reg(),
5463
address,
5464
retype(byte_offset(src, written * 4), BRW_REGISTER_TYPE_UD),
5465
brw_imm_ud(block));
5466
5467
const unsigned block_bytes = block * 4;
5468
increment_a64_address(ubld1, address, block_bytes);
5469
written += block;
5470
}
5471
5472
assert(written == total);
5473
break;
5474
}
5475
5476
case nir_intrinsic_load_shared_block_intel:
5477
case nir_intrinsic_load_ssbo_block_intel: {
5478
assert(nir_dest_bit_size(instr->dest) == 32);
5479
5480
const bool is_ssbo =
5481
instr->intrinsic == nir_intrinsic_load_ssbo_block_intel;
5482
fs_reg address = bld.emit_uniformize(get_nir_src(instr->src[is_ssbo ? 1 : 0]));
5483
5484
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
5485
srcs[SURFACE_LOGICAL_SRC_SURFACE] = is_ssbo ?
5486
get_nir_ssbo_intrinsic_index(bld, instr) : fs_reg(brw_imm_ud(GFX7_BTI_SLM));
5487
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = address;
5488
5489
const fs_builder ubld1 = bld.exec_all().group(1, 0);
5490
const fs_builder ubld8 = bld.exec_all().group(8, 0);
5491
const fs_builder ubld16 = bld.exec_all().group(16, 0);
5492
5493
const unsigned total = instr->num_components * dispatch_width;
5494
unsigned loaded = 0;
5495
5496
while (loaded < total) {
5497
const unsigned block =
5498
choose_oword_block_size_dwords(total - loaded);
5499
const unsigned block_bytes = block * 4;
5500
5501
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(block);
5502
5503
const fs_builder &ubld = block == 8 ? ubld8 : ubld16;
5504
ubld.emit(SHADER_OPCODE_UNALIGNED_OWORD_BLOCK_READ_LOGICAL,
5505
retype(byte_offset(dest, loaded * 4), BRW_REGISTER_TYPE_UD),
5506
srcs, SURFACE_LOGICAL_NUM_SRCS)->size_written = block_bytes;
5507
5508
ubld1.ADD(address, address, brw_imm_ud(block_bytes));
5509
loaded += block;
5510
}
5511
5512
assert(loaded == total);
5513
break;
5514
}
5515
5516
case nir_intrinsic_store_shared_block_intel:
5517
case nir_intrinsic_store_ssbo_block_intel: {
5518
assert(nir_src_bit_size(instr->src[0]) == 32);
5519
5520
const bool is_ssbo =
5521
instr->intrinsic == nir_intrinsic_store_ssbo_block_intel;
5522
5523
fs_reg address = bld.emit_uniformize(get_nir_src(instr->src[is_ssbo ? 2 : 1]));
5524
fs_reg src = get_nir_src(instr->src[0]);
5525
5526
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
5527
srcs[SURFACE_LOGICAL_SRC_SURFACE] = is_ssbo ?
5528
get_nir_ssbo_intrinsic_index(bld, instr) : fs_reg(brw_imm_ud(GFX7_BTI_SLM));
5529
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = address;
5530
5531
const fs_builder ubld1 = bld.exec_all().group(1, 0);
5532
const fs_builder ubld8 = bld.exec_all().group(8, 0);
5533
const fs_builder ubld16 = bld.exec_all().group(16, 0);
5534
5535
const unsigned total = instr->num_components * dispatch_width;
5536
unsigned written = 0;
5537
5538
while (written < total) {
5539
const unsigned block =
5540
choose_oword_block_size_dwords(total - written);
5541
5542
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(block);
5543
srcs[SURFACE_LOGICAL_SRC_DATA] =
5544
retype(byte_offset(src, written * 4), BRW_REGISTER_TYPE_UD);
5545
5546
const fs_builder &ubld = block == 8 ? ubld8 : ubld16;
5547
ubld.emit(SHADER_OPCODE_OWORD_BLOCK_WRITE_LOGICAL,
5548
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
5549
5550
const unsigned block_bytes = block * 4;
5551
ubld1.ADD(address, address, brw_imm_ud(block_bytes));
5552
written += block;
5553
}
5554
5555
assert(written == total);
5556
break;
5557
}
5558
5559
case nir_intrinsic_load_btd_dss_id_intel:
5560
bld.emit(SHADER_OPCODE_GET_DSS_ID,
5561
retype(dest, BRW_REGISTER_TYPE_UD));
5562
break;
5563
5564
case nir_intrinsic_load_btd_stack_id_intel:
5565
if (stage == MESA_SHADER_COMPUTE) {
5566
assert(brw_cs_prog_data(prog_data)->uses_btd_stack_ids);
5567
} else {
5568
assert(brw_shader_stage_is_bindless(stage));
5569
}
5570
/* Stack IDs are always in R1 regardless of whether we're coming from a
5571
* bindless shader or a regular compute shader.
5572
*/
5573
bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
5574
retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UW));
5575
break;
5576
5577
case nir_intrinsic_btd_spawn_intel:
5578
if (stage == MESA_SHADER_COMPUTE) {
5579
assert(brw_cs_prog_data(prog_data)->uses_btd_stack_ids);
5580
} else {
5581
assert(brw_shader_stage_is_bindless(stage));
5582
}
5583
bld.emit(SHADER_OPCODE_BTD_SPAWN_LOGICAL, bld.null_reg_ud(),
5584
bld.emit_uniformize(get_nir_src(instr->src[0])),
5585
get_nir_src(instr->src[1]));
5586
break;
5587
5588
case nir_intrinsic_btd_retire_intel:
5589
if (stage == MESA_SHADER_COMPUTE) {
5590
assert(brw_cs_prog_data(prog_data)->uses_btd_stack_ids);
5591
} else {
5592
assert(brw_shader_stage_is_bindless(stage));
5593
}
5594
bld.emit(SHADER_OPCODE_BTD_RETIRE_LOGICAL);
5595
break;
5596
5597
default:
5598
unreachable("unknown intrinsic");
5599
}
5600
}
5601
5602
void
5603
fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
5604
int op, nir_intrinsic_instr *instr)
5605
{
5606
/* The BTI untyped atomic messages only support 32-bit atomics. If you
5607
* just look at the big table of messages in the Vol 7 of the SKL PRM, they
5608
* appear to exist. However, if you look at Vol 2a, there are no message
5609
* descriptors provided for Qword atomic ops except for A64 messages.
5610
*/
5611
assert(nir_dest_bit_size(instr->dest) == 32);
5612
5613
fs_reg dest;
5614
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5615
dest = get_nir_dest(instr->dest);
5616
5617
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
5618
srcs[SURFACE_LOGICAL_SRC_SURFACE] = get_nir_ssbo_intrinsic_index(bld, instr);
5619
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
5620
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
5621
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
5622
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1);
5623
5624
fs_reg data;
5625
if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
5626
data = get_nir_src(instr->src[2]);
5627
5628
if (op == BRW_AOP_CMPWR) {
5629
fs_reg tmp = bld.vgrf(data.type, 2);
5630
fs_reg sources[2] = { data, get_nir_src(instr->src[3]) };
5631
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5632
data = tmp;
5633
}
5634
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
5635
5636
/* Emit the actual atomic operation */
5637
5638
bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
5639
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
5640
}
5641
5642
void
5643
fs_visitor::nir_emit_ssbo_atomic_float(const fs_builder &bld,
5644
int op, nir_intrinsic_instr *instr)
5645
{
5646
fs_reg dest;
5647
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5648
dest = get_nir_dest(instr->dest);
5649
5650
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
5651
srcs[SURFACE_LOGICAL_SRC_SURFACE] = get_nir_ssbo_intrinsic_index(bld, instr);
5652
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
5653
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
5654
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
5655
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1);
5656
5657
fs_reg data = get_nir_src(instr->src[2]);
5658
if (op == BRW_AOP_FCMPWR) {
5659
fs_reg tmp = bld.vgrf(data.type, 2);
5660
fs_reg sources[2] = { data, get_nir_src(instr->src[3]) };
5661
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5662
data = tmp;
5663
}
5664
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
5665
5666
/* Emit the actual atomic operation */
5667
5668
bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
5669
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
5670
}
5671
5672
void
5673
fs_visitor::nir_emit_shared_atomic(const fs_builder &bld,
5674
int op, nir_intrinsic_instr *instr)
5675
{
5676
fs_reg dest;
5677
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5678
dest = get_nir_dest(instr->dest);
5679
5680
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
5681
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GFX7_BTI_SLM);
5682
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
5683
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
5684
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1);
5685
5686
fs_reg data;
5687
if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
5688
data = get_nir_src(instr->src[1]);
5689
if (op == BRW_AOP_CMPWR) {
5690
fs_reg tmp = bld.vgrf(data.type, 2);
5691
fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
5692
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5693
data = tmp;
5694
}
5695
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
5696
5697
/* Get the offset */
5698
if (nir_src_is_const(instr->src[0])) {
5699
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
5700
brw_imm_ud(instr->const_index[0] + nir_src_as_uint(instr->src[0]));
5701
} else {
5702
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = vgrf(glsl_type::uint_type);
5703
bld.ADD(srcs[SURFACE_LOGICAL_SRC_ADDRESS],
5704
retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
5705
brw_imm_ud(instr->const_index[0]));
5706
}
5707
5708
/* Emit the actual atomic operation operation */
5709
5710
bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
5711
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
5712
}
5713
5714
void
5715
fs_visitor::nir_emit_shared_atomic_float(const fs_builder &bld,
5716
int op, nir_intrinsic_instr *instr)
5717
{
5718
fs_reg dest;
5719
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5720
dest = get_nir_dest(instr->dest);
5721
5722
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
5723
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GFX7_BTI_SLM);
5724
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
5725
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
5726
srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1);
5727
5728
fs_reg data = get_nir_src(instr->src[1]);
5729
if (op == BRW_AOP_FCMPWR) {
5730
fs_reg tmp = bld.vgrf(data.type, 2);
5731
fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
5732
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5733
data = tmp;
5734
}
5735
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
5736
5737
/* Get the offset */
5738
if (nir_src_is_const(instr->src[0])) {
5739
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
5740
brw_imm_ud(instr->const_index[0] + nir_src_as_uint(instr->src[0]));
5741
} else {
5742
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = vgrf(glsl_type::uint_type);
5743
bld.ADD(srcs[SURFACE_LOGICAL_SRC_ADDRESS],
5744
retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
5745
brw_imm_ud(instr->const_index[0]));
5746
}
5747
5748
/* Emit the actual atomic operation operation */
5749
5750
bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
5751
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
5752
}
5753
5754
static fs_reg
5755
expand_to_32bit(const fs_builder &bld, const fs_reg &src)
5756
{
5757
if (type_sz(src.type) == 2) {
5758
fs_reg src32 = bld.vgrf(BRW_REGISTER_TYPE_UD);
5759
bld.MOV(src32, retype(src, BRW_REGISTER_TYPE_UW));
5760
return src32;
5761
} else {
5762
return src;
5763
}
5764
}
5765
5766
void
5767
fs_visitor::nir_emit_global_atomic(const fs_builder &bld,
5768
int op, nir_intrinsic_instr *instr)
5769
{
5770
fs_reg dest;
5771
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5772
dest = get_nir_dest(instr->dest);
5773
5774
fs_reg addr = get_nir_src(instr->src[0]);
5775
5776
fs_reg data;
5777
if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
5778
data = expand_to_32bit(bld, get_nir_src(instr->src[1]));
5779
5780
if (op == BRW_AOP_CMPWR) {
5781
fs_reg tmp = bld.vgrf(data.type, 2);
5782
fs_reg sources[2] = {
5783
data,
5784
expand_to_32bit(bld, get_nir_src(instr->src[2]))
5785
};
5786
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5787
data = tmp;
5788
}
5789
5790
switch (nir_dest_bit_size(instr->dest)) {
5791
case 16: {
5792
fs_reg dest32 = bld.vgrf(BRW_REGISTER_TYPE_UD);
5793
bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT16_LOGICAL,
5794
dest32, addr, data, brw_imm_ud(op));
5795
bld.MOV(retype(dest, BRW_REGISTER_TYPE_UW), dest32);
5796
break;
5797
}
5798
case 32:
5799
bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
5800
dest, addr, data, brw_imm_ud(op));
5801
break;
5802
case 64:
5803
bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL,
5804
dest, addr, data, brw_imm_ud(op));
5805
break;
5806
default:
5807
unreachable("Unsupported bit size");
5808
}
5809
}
5810
5811
void
5812
fs_visitor::nir_emit_global_atomic_float(const fs_builder &bld,
5813
int op, nir_intrinsic_instr *instr)
5814
{
5815
assert(nir_intrinsic_infos[instr->intrinsic].has_dest);
5816
fs_reg dest = get_nir_dest(instr->dest);
5817
5818
fs_reg addr = get_nir_src(instr->src[0]);
5819
5820
assert(op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC);
5821
fs_reg data = expand_to_32bit(bld, get_nir_src(instr->src[1]));
5822
5823
if (op == BRW_AOP_FCMPWR) {
5824
fs_reg tmp = bld.vgrf(data.type, 2);
5825
fs_reg sources[2] = {
5826
data,
5827
expand_to_32bit(bld, get_nir_src(instr->src[2]))
5828
};
5829
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5830
data = tmp;
5831
}
5832
5833
switch (nir_dest_bit_size(instr->dest)) {
5834
case 16: {
5835
fs_reg dest32 = bld.vgrf(BRW_REGISTER_TYPE_UD);
5836
bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT16_LOGICAL,
5837
dest32, addr, data, brw_imm_ud(op));
5838
bld.MOV(retype(dest, BRW_REGISTER_TYPE_UW), dest32);
5839
break;
5840
}
5841
case 32:
5842
bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT32_LOGICAL,
5843
dest, addr, data, brw_imm_ud(op));
5844
break;
5845
default:
5846
unreachable("Unsupported bit size");
5847
}
5848
}
5849
5850
void
5851
fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
5852
{
5853
unsigned texture = instr->texture_index;
5854
unsigned sampler = instr->sampler_index;
5855
5856
fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
5857
5858
srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture);
5859
srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(sampler);
5860
5861
int lod_components = 0;
5862
5863
/* The hardware requires a LOD for buffer textures */
5864
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
5865
srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_d(0);
5866
5867
uint32_t header_bits = 0;
5868
for (unsigned i = 0; i < instr->num_srcs; i++) {
5869
fs_reg src = get_nir_src(instr->src[i].src);
5870
switch (instr->src[i].src_type) {
5871
case nir_tex_src_bias:
5872
srcs[TEX_LOGICAL_SRC_LOD] =
5873
retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
5874
break;
5875
case nir_tex_src_comparator:
5876
srcs[TEX_LOGICAL_SRC_SHADOW_C] = retype(src, BRW_REGISTER_TYPE_F);
5877
break;
5878
case nir_tex_src_coord:
5879
switch (instr->op) {
5880
case nir_texop_txf:
5881
case nir_texop_txf_ms:
5882
case nir_texop_txf_ms_mcs:
5883
case nir_texop_samples_identical:
5884
srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_D);
5885
break;
5886
default:
5887
srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_F);
5888
break;
5889
}
5890
5891
/* Wa_14013363432:
5892
*
5893
* Compiler should send U,V,R parameters even if V,R are 0.
5894
*/
5895
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && devinfo->verx10 == 125)
5896
assert(instr->coord_components == 3u + instr->is_array);
5897
break;
5898
case nir_tex_src_ddx:
5899
srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
5900
lod_components = nir_tex_instr_src_size(instr, i);
5901
break;
5902
case nir_tex_src_ddy:
5903
srcs[TEX_LOGICAL_SRC_LOD2] = retype(src, BRW_REGISTER_TYPE_F);
5904
break;
5905
case nir_tex_src_lod:
5906
switch (instr->op) {
5907
case nir_texop_txs:
5908
srcs[TEX_LOGICAL_SRC_LOD] =
5909
retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_UD);
5910
break;
5911
case nir_texop_txf:
5912
srcs[TEX_LOGICAL_SRC_LOD] =
5913
retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_D);
5914
break;
5915
default:
5916
srcs[TEX_LOGICAL_SRC_LOD] =
5917
retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
5918
break;
5919
}
5920
break;
5921
case nir_tex_src_min_lod:
5922
srcs[TEX_LOGICAL_SRC_MIN_LOD] =
5923
retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
5924
break;
5925
case nir_tex_src_ms_index:
5926
srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = retype(src, BRW_REGISTER_TYPE_UD);
5927
break;
5928
5929
case nir_tex_src_offset: {
5930
uint32_t offset_bits = 0;
5931
if (brw_texture_offset(instr, i, &offset_bits)) {
5932
header_bits |= offset_bits;
5933
} else {
5934
srcs[TEX_LOGICAL_SRC_TG4_OFFSET] =
5935
retype(src, BRW_REGISTER_TYPE_D);
5936
}
5937
break;
5938
}
5939
5940
case nir_tex_src_projector:
5941
unreachable("should be lowered");
5942
5943
case nir_tex_src_texture_offset: {
5944
/* Emit code to evaluate the actual indexing expression */
5945
fs_reg tmp = vgrf(glsl_type::uint_type);
5946
bld.ADD(tmp, src, brw_imm_ud(texture));
5947
srcs[TEX_LOGICAL_SRC_SURFACE] = bld.emit_uniformize(tmp);
5948
break;
5949
}
5950
5951
case nir_tex_src_sampler_offset: {
5952
/* Emit code to evaluate the actual indexing expression */
5953
fs_reg tmp = vgrf(glsl_type::uint_type);
5954
bld.ADD(tmp, src, brw_imm_ud(sampler));
5955
srcs[TEX_LOGICAL_SRC_SAMPLER] = bld.emit_uniformize(tmp);
5956
break;
5957
}
5958
5959
case nir_tex_src_texture_handle:
5960
assert(nir_tex_instr_src_index(instr, nir_tex_src_texture_offset) == -1);
5961
srcs[TEX_LOGICAL_SRC_SURFACE] = fs_reg();
5962
srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE] = bld.emit_uniformize(src);
5963
break;
5964
5965
case nir_tex_src_sampler_handle:
5966
assert(nir_tex_instr_src_index(instr, nir_tex_src_sampler_offset) == -1);
5967
srcs[TEX_LOGICAL_SRC_SAMPLER] = fs_reg();
5968
srcs[TEX_LOGICAL_SRC_SAMPLER_HANDLE] = bld.emit_uniformize(src);
5969
break;
5970
5971
case nir_tex_src_ms_mcs:
5972
assert(instr->op == nir_texop_txf_ms);
5973
srcs[TEX_LOGICAL_SRC_MCS] = retype(src, BRW_REGISTER_TYPE_D);
5974
break;
5975
5976
case nir_tex_src_plane: {
5977
const uint32_t plane = nir_src_as_uint(instr->src[i].src);
5978
const uint32_t texture_index =
5979
instr->texture_index +
5980
stage_prog_data->binding_table.plane_start[plane] -
5981
stage_prog_data->binding_table.texture_start;
5982
5983
srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture_index);
5984
break;
5985
}
5986
5987
default:
5988
unreachable("unknown texture source");
5989
}
5990
}
5991
5992
if (srcs[TEX_LOGICAL_SRC_MCS].file == BAD_FILE &&
5993
(instr->op == nir_texop_txf_ms ||
5994
instr->op == nir_texop_samples_identical)) {
5995
if (devinfo->ver >= 7 &&
5996
key_tex->compressed_multisample_layout_mask & (1 << texture)) {
5997
srcs[TEX_LOGICAL_SRC_MCS] =
5998
emit_mcs_fetch(srcs[TEX_LOGICAL_SRC_COORDINATE],
5999
instr->coord_components,
6000
srcs[TEX_LOGICAL_SRC_SURFACE],
6001
srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE]);
6002
} else {
6003
srcs[TEX_LOGICAL_SRC_MCS] = brw_imm_ud(0u);
6004
}
6005
}
6006
6007
srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(instr->coord_components);
6008
srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(lod_components);
6009
6010
enum opcode opcode;
6011
switch (instr->op) {
6012
case nir_texop_tex:
6013
opcode = SHADER_OPCODE_TEX_LOGICAL;
6014
break;
6015
case nir_texop_txb:
6016
opcode = FS_OPCODE_TXB_LOGICAL;
6017
break;
6018
case nir_texop_txl:
6019
opcode = SHADER_OPCODE_TXL_LOGICAL;
6020
break;
6021
case nir_texop_txd:
6022
opcode = SHADER_OPCODE_TXD_LOGICAL;
6023
break;
6024
case nir_texop_txf:
6025
opcode = SHADER_OPCODE_TXF_LOGICAL;
6026
break;
6027
case nir_texop_txf_ms:
6028
if ((key_tex->msaa_16 & (1 << sampler)))
6029
opcode = SHADER_OPCODE_TXF_CMS_W_LOGICAL;
6030
else
6031
opcode = SHADER_OPCODE_TXF_CMS_LOGICAL;
6032
break;
6033
case nir_texop_txf_ms_mcs:
6034
opcode = SHADER_OPCODE_TXF_MCS_LOGICAL;
6035
break;
6036
case nir_texop_query_levels:
6037
case nir_texop_txs:
6038
opcode = SHADER_OPCODE_TXS_LOGICAL;
6039
break;
6040
case nir_texop_lod:
6041
opcode = SHADER_OPCODE_LOD_LOGICAL;
6042
break;
6043
case nir_texop_tg4:
6044
if (srcs[TEX_LOGICAL_SRC_TG4_OFFSET].file != BAD_FILE)
6045
opcode = SHADER_OPCODE_TG4_OFFSET_LOGICAL;
6046
else
6047
opcode = SHADER_OPCODE_TG4_LOGICAL;
6048
break;
6049
case nir_texop_texture_samples:
6050
opcode = SHADER_OPCODE_SAMPLEINFO_LOGICAL;
6051
break;
6052
case nir_texop_samples_identical: {
6053
fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
6054
6055
/* If mcs is an immediate value, it means there is no MCS. In that case
6056
* just return false.
6057
*/
6058
if (srcs[TEX_LOGICAL_SRC_MCS].file == BRW_IMMEDIATE_VALUE) {
6059
bld.MOV(dst, brw_imm_ud(0u));
6060
} else if ((key_tex->msaa_16 & (1 << sampler))) {
6061
fs_reg tmp = vgrf(glsl_type::uint_type);
6062
bld.OR(tmp, srcs[TEX_LOGICAL_SRC_MCS],
6063
offset(srcs[TEX_LOGICAL_SRC_MCS], bld, 1));
6064
bld.CMP(dst, tmp, brw_imm_ud(0u), BRW_CONDITIONAL_EQ);
6065
} else {
6066
bld.CMP(dst, srcs[TEX_LOGICAL_SRC_MCS], brw_imm_ud(0u),
6067
BRW_CONDITIONAL_EQ);
6068
}
6069
return;
6070
}
6071
default:
6072
unreachable("unknown texture opcode");
6073
}
6074
6075
if (instr->op == nir_texop_tg4) {
6076
if (instr->component == 1 &&
6077
key_tex->gather_channel_quirk_mask & (1 << texture)) {
6078
/* gather4 sampler is broken for green channel on RG32F --
6079
* we must ask for blue instead.
6080
*/
6081
header_bits |= 2 << 16;
6082
} else {
6083
header_bits |= instr->component << 16;
6084
}
6085
}
6086
6087
fs_reg dst = bld.vgrf(brw_type_for_nir_type(devinfo, instr->dest_type), 4);
6088
fs_inst *inst = bld.emit(opcode, dst, srcs, ARRAY_SIZE(srcs));
6089
inst->offset = header_bits;
6090
6091
const unsigned dest_size = nir_tex_instr_dest_size(instr);
6092
if (devinfo->ver >= 9 &&
6093
instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
6094
unsigned write_mask = instr->dest.is_ssa ?
6095
nir_ssa_def_components_read(&instr->dest.ssa):
6096
(1 << dest_size) - 1;
6097
assert(write_mask != 0); /* dead code should have been eliminated */
6098
inst->size_written = util_last_bit(write_mask) *
6099
inst->dst.component_size(inst->exec_size);
6100
} else {
6101
inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
6102
}
6103
6104
if (srcs[TEX_LOGICAL_SRC_SHADOW_C].file != BAD_FILE)
6105
inst->shadow_compare = true;
6106
6107
if (instr->op == nir_texop_tg4 && devinfo->ver == 6)
6108
emit_gfx6_gather_wa(key_tex->gfx6_gather_wa[texture], dst);
6109
6110
fs_reg nir_dest[5];
6111
for (unsigned i = 0; i < dest_size; i++)
6112
nir_dest[i] = offset(dst, bld, i);
6113
6114
if (instr->op == nir_texop_query_levels) {
6115
/* # levels is in .w */
6116
if (devinfo->ver <= 9) {
6117
/**
6118
* Wa_1940217:
6119
*
6120
* When a surface of type SURFTYPE_NULL is accessed by resinfo, the
6121
* MIPCount returned is undefined instead of 0.
6122
*/
6123
fs_inst *mov = bld.MOV(bld.null_reg_d(), dst);
6124
mov->conditional_mod = BRW_CONDITIONAL_NZ;
6125
nir_dest[0] = bld.vgrf(BRW_REGISTER_TYPE_D);
6126
fs_inst *sel = bld.SEL(nir_dest[0], offset(dst, bld, 3), brw_imm_d(0));
6127
sel->predicate = BRW_PREDICATE_NORMAL;
6128
} else {
6129
nir_dest[0] = offset(dst, bld, 3);
6130
}
6131
} else if (instr->op == nir_texop_txs &&
6132
dest_size >= 3 && devinfo->ver < 7) {
6133
/* Gfx4-6 return 0 instead of 1 for single layer surfaces. */
6134
fs_reg depth = offset(dst, bld, 2);
6135
nir_dest[2] = vgrf(glsl_type::int_type);
6136
bld.emit_minmax(nir_dest[2], depth, brw_imm_d(1), BRW_CONDITIONAL_GE);
6137
}
6138
6139
bld.LOAD_PAYLOAD(get_nir_dest(instr->dest), nir_dest, dest_size, 0);
6140
}
6141
6142
void
6143
fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
6144
{
6145
switch (instr->type) {
6146
case nir_jump_break:
6147
bld.emit(BRW_OPCODE_BREAK);
6148
break;
6149
case nir_jump_continue:
6150
bld.emit(BRW_OPCODE_CONTINUE);
6151
break;
6152
case nir_jump_halt:
6153
bld.emit(BRW_OPCODE_HALT);
6154
break;
6155
case nir_jump_return:
6156
default:
6157
unreachable("unknown jump");
6158
}
6159
}
6160
6161
/*
6162
* This helper takes a source register and un/shuffles it into the destination
6163
* register.
6164
*
6165
* If source type size is smaller than destination type size the operation
6166
* needed is a component shuffle. The opposite case would be an unshuffle. If
6167
* source/destination type size is equal a shuffle is done that would be
6168
* equivalent to a simple MOV.
6169
*
6170
* For example, if source is a 16-bit type and destination is 32-bit. A 3
6171
* components .xyz 16-bit vector on SIMD8 would be.
6172
*
6173
* |x1|x2|x3|x4|x5|x6|x7|x8|y1|y2|y3|y4|y5|y6|y7|y8|
6174
* |z1|z2|z3|z4|z5|z6|z7|z8| | | | | | | | |
6175
*
6176
* This helper will return the following 2 32-bit components with the 16-bit
6177
* values shuffled:
6178
*
6179
* |x1 y1|x2 y2|x3 y3|x4 y4|x5 y5|x6 y6|x7 y7|x8 y8|
6180
* |z1 |z2 |z3 |z4 |z5 |z6 |z7 |z8 |
6181
*
6182
* For unshuffle, the example would be the opposite, a 64-bit type source
6183
* and a 32-bit destination. A 2 component .xy 64-bit vector on SIMD8
6184
* would be:
6185
*
6186
* | x1l x1h | x2l x2h | x3l x3h | x4l x4h |
6187
* | x5l x5h | x6l x6h | x7l x7h | x8l x8h |
6188
* | y1l y1h | y2l y2h | y3l y3h | y4l y4h |
6189
* | y5l y5h | y6l y6h | y7l y7h | y8l y8h |
6190
*
6191
* The returned result would be the following 4 32-bit components unshuffled:
6192
*
6193
* | x1l | x2l | x3l | x4l | x5l | x6l | x7l | x8l |
6194
* | x1h | x2h | x3h | x4h | x5h | x6h | x7h | x8h |
6195
* | y1l | y2l | y3l | y4l | y5l | y6l | y7l | y8l |
6196
* | y1h | y2h | y3h | y4h | y5h | y6h | y7h | y8h |
6197
*
6198
* - Source and destination register must not be overlapped.
6199
* - components units are measured in terms of the smaller type between
6200
* source and destination because we are un/shuffling the smaller
6201
* components from/into the bigger ones.
6202
* - first_component parameter allows skipping source components.
6203
*/
6204
void
6205
shuffle_src_to_dst(const fs_builder &bld,
6206
const fs_reg &dst,
6207
const fs_reg &src,
6208
uint32_t first_component,
6209
uint32_t components)
6210
{
6211
if (type_sz(src.type) == type_sz(dst.type)) {
6212
assert(!regions_overlap(dst,
6213
type_sz(dst.type) * bld.dispatch_width() * components,
6214
offset(src, bld, first_component),
6215
type_sz(src.type) * bld.dispatch_width() * components));
6216
for (unsigned i = 0; i < components; i++) {
6217
bld.MOV(retype(offset(dst, bld, i), src.type),
6218
offset(src, bld, i + first_component));
6219
}
6220
} else if (type_sz(src.type) < type_sz(dst.type)) {
6221
/* Source is shuffled into destination */
6222
unsigned size_ratio = type_sz(dst.type) / type_sz(src.type);
6223
assert(!regions_overlap(dst,
6224
type_sz(dst.type) * bld.dispatch_width() *
6225
DIV_ROUND_UP(components, size_ratio),
6226
offset(src, bld, first_component),
6227
type_sz(src.type) * bld.dispatch_width() * components));
6228
6229
brw_reg_type shuffle_type =
6230
brw_reg_type_from_bit_size(8 * type_sz(src.type),
6231
BRW_REGISTER_TYPE_D);
6232
for (unsigned i = 0; i < components; i++) {
6233
fs_reg shuffle_component_i =
6234
subscript(offset(dst, bld, i / size_ratio),
6235
shuffle_type, i % size_ratio);
6236
bld.MOV(shuffle_component_i,
6237
retype(offset(src, bld, i + first_component), shuffle_type));
6238
}
6239
} else {
6240
/* Source is unshuffled into destination */
6241
unsigned size_ratio = type_sz(src.type) / type_sz(dst.type);
6242
assert(!regions_overlap(dst,
6243
type_sz(dst.type) * bld.dispatch_width() * components,
6244
offset(src, bld, first_component / size_ratio),
6245
type_sz(src.type) * bld.dispatch_width() *
6246
DIV_ROUND_UP(components + (first_component % size_ratio),
6247
size_ratio)));
6248
6249
brw_reg_type shuffle_type =
6250
brw_reg_type_from_bit_size(8 * type_sz(dst.type),
6251
BRW_REGISTER_TYPE_D);
6252
for (unsigned i = 0; i < components; i++) {
6253
fs_reg shuffle_component_i =
6254
subscript(offset(src, bld, (first_component + i) / size_ratio),
6255
shuffle_type, (first_component + i) % size_ratio);
6256
bld.MOV(retype(offset(dst, bld, i), shuffle_type),
6257
shuffle_component_i);
6258
}
6259
}
6260
}
6261
6262
void
6263
shuffle_from_32bit_read(const fs_builder &bld,
6264
const fs_reg &dst,
6265
const fs_reg &src,
6266
uint32_t first_component,
6267
uint32_t components)
6268
{
6269
assert(type_sz(src.type) == 4);
6270
6271
/* This function takes components in units of the destination type while
6272
* shuffle_src_to_dst takes components in units of the smallest type
6273
*/
6274
if (type_sz(dst.type) > 4) {
6275
assert(type_sz(dst.type) == 8);
6276
first_component *= 2;
6277
components *= 2;
6278
}
6279
6280
shuffle_src_to_dst(bld, dst, src, first_component, components);
6281
}
6282
6283
fs_reg
6284
setup_imm_df(const fs_builder &bld, double v)
6285
{
6286
const struct intel_device_info *devinfo = bld.shader->devinfo;
6287
assert(devinfo->ver >= 7);
6288
6289
if (devinfo->ver >= 8)
6290
return brw_imm_df(v);
6291
6292
/* gfx7.5 does not support DF immediates straighforward but the DIM
6293
* instruction allows to set the 64-bit immediate value.
6294
*/
6295
if (devinfo->is_haswell) {
6296
const fs_builder ubld = bld.exec_all().group(1, 0);
6297
fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_DF, 1);
6298
ubld.DIM(dst, brw_imm_df(v));
6299
return component(dst, 0);
6300
}
6301
6302
/* gfx7 does not support DF immediates, so we generate a 64-bit constant by
6303
* writing the low 32-bit of the constant to suboffset 0 of a VGRF and
6304
* the high 32-bit to suboffset 4 and then applying a stride of 0.
6305
*
6306
* Alternatively, we could also produce a normal VGRF (without stride 0)
6307
* by writing to all the channels in the VGRF, however, that would hit the
6308
* gfx7 bug where we have to split writes that span more than 1 register
6309
* into instructions with a width of 4 (otherwise the write to the second
6310
* register written runs into an execmask hardware bug) which isn't very
6311
* nice.
6312
*/
6313
union {
6314
double d;
6315
struct {
6316
uint32_t i1;
6317
uint32_t i2;
6318
};
6319
} di;
6320
6321
di.d = v;
6322
6323
const fs_builder ubld = bld.exec_all().group(1, 0);
6324
const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
6325
ubld.MOV(tmp, brw_imm_ud(di.i1));
6326
ubld.MOV(horiz_offset(tmp, 1), brw_imm_ud(di.i2));
6327
6328
return component(retype(tmp, BRW_REGISTER_TYPE_DF), 0);
6329
}
6330
6331
fs_reg
6332
setup_imm_b(const fs_builder &bld, int8_t v)
6333
{
6334
const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_B);
6335
bld.MOV(tmp, brw_imm_w(v));
6336
return tmp;
6337
}
6338
6339
fs_reg
6340
setup_imm_ub(const fs_builder &bld, uint8_t v)
6341
{
6342
const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UB);
6343
bld.MOV(tmp, brw_imm_uw(v));
6344
return tmp;
6345
}
6346
6347