Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/iris/iris_program.c
4565 views
1
/*
2
* Copyright © 2017 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included
12
* in all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
* DEALINGS IN THE SOFTWARE.
21
*/
22
23
/**
24
* @file iris_program.c
25
*
26
* This file contains the driver interface for compiling shaders.
27
*
28
* See iris_program_cache.c for the in-memory program cache where the
29
* compiled shaders are stored.
30
*/
31
32
#include <stdio.h>
33
#include <errno.h>
34
#include "pipe/p_defines.h"
35
#include "pipe/p_state.h"
36
#include "pipe/p_context.h"
37
#include "pipe/p_screen.h"
38
#include "util/u_atomic.h"
39
#include "util/u_upload_mgr.h"
40
#include "util/debug.h"
41
#include "compiler/nir/nir.h"
42
#include "compiler/nir/nir_builder.h"
43
#include "compiler/nir/nir_serialize.h"
44
#include "intel/compiler/brw_compiler.h"
45
#include "intel/compiler/brw_nir.h"
46
#include "iris_context.h"
47
#include "nir/tgsi_to_nir.h"
48
49
#define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
50
#define BRW_KEY_INIT(gen, prog_id) \
51
.base.program_string_id = prog_id, \
52
.base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
53
.base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
54
.base.tex.compressed_multisample_layout_mask = ~0, \
55
.base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
56
57
static unsigned
58
get_new_program_id(struct iris_screen *screen)
59
{
60
return p_atomic_inc_return(&screen->program_id);
61
}
62
63
static struct brw_vs_prog_key
64
iris_to_brw_vs_key(const struct intel_device_info *devinfo,
65
const struct iris_vs_prog_key *key)
66
{
67
return (struct brw_vs_prog_key) {
68
BRW_KEY_INIT(devinfo->ver, key->vue.base.program_string_id),
69
70
/* Don't tell the backend about our clip plane constants, we've
71
* already lowered them in NIR and don't want it doing it again.
72
*/
73
.nr_userclip_plane_consts = 0,
74
};
75
}
76
77
static struct brw_tcs_prog_key
78
iris_to_brw_tcs_key(const struct intel_device_info *devinfo,
79
const struct iris_tcs_prog_key *key)
80
{
81
return (struct brw_tcs_prog_key) {
82
BRW_KEY_INIT(devinfo->ver, key->vue.base.program_string_id),
83
.tes_primitive_mode = key->tes_primitive_mode,
84
.input_vertices = key->input_vertices,
85
.patch_outputs_written = key->patch_outputs_written,
86
.outputs_written = key->outputs_written,
87
.quads_workaround = key->quads_workaround,
88
};
89
}
90
91
static struct brw_tes_prog_key
92
iris_to_brw_tes_key(const struct intel_device_info *devinfo,
93
const struct iris_tes_prog_key *key)
94
{
95
return (struct brw_tes_prog_key) {
96
BRW_KEY_INIT(devinfo->ver, key->vue.base.program_string_id),
97
.patch_inputs_read = key->patch_inputs_read,
98
.inputs_read = key->inputs_read,
99
};
100
}
101
102
static struct brw_gs_prog_key
103
iris_to_brw_gs_key(const struct intel_device_info *devinfo,
104
const struct iris_gs_prog_key *key)
105
{
106
return (struct brw_gs_prog_key) {
107
BRW_KEY_INIT(devinfo->ver, key->vue.base.program_string_id),
108
};
109
}
110
111
static struct brw_wm_prog_key
112
iris_to_brw_fs_key(const struct intel_device_info *devinfo,
113
const struct iris_fs_prog_key *key)
114
{
115
return (struct brw_wm_prog_key) {
116
BRW_KEY_INIT(devinfo->ver, key->base.program_string_id),
117
.nr_color_regions = key->nr_color_regions,
118
.flat_shade = key->flat_shade,
119
.alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
120
.alpha_to_coverage = key->alpha_to_coverage,
121
.clamp_fragment_color = key->clamp_fragment_color,
122
.persample_interp = key->persample_interp,
123
.multisample_fbo = key->multisample_fbo,
124
.force_dual_color_blend = key->force_dual_color_blend,
125
.coherent_fb_fetch = key->coherent_fb_fetch,
126
.color_outputs_valid = key->color_outputs_valid,
127
.input_slots_valid = key->input_slots_valid,
128
.ignore_sample_mask_out = !key->multisample_fbo,
129
};
130
}
131
132
static struct brw_cs_prog_key
133
iris_to_brw_cs_key(const struct intel_device_info *devinfo,
134
const struct iris_cs_prog_key *key)
135
{
136
return (struct brw_cs_prog_key) {
137
BRW_KEY_INIT(devinfo->ver, key->base.program_string_id),
138
};
139
}
140
141
static void *
142
upload_state(struct u_upload_mgr *uploader,
143
struct iris_state_ref *ref,
144
unsigned size,
145
unsigned alignment)
146
{
147
void *p = NULL;
148
u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
149
return p;
150
}
151
152
void
153
iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
154
struct pipe_shader_buffer *buf,
155
struct iris_state_ref *surf_state,
156
isl_surf_usage_flags_t usage)
157
{
158
struct pipe_context *ctx = &ice->ctx;
159
struct iris_screen *screen = (struct iris_screen *) ctx->screen;
160
bool ssbo = usage & ISL_SURF_USAGE_STORAGE_BIT;
161
162
void *map =
163
upload_state(ice->state.surface_uploader, surf_state,
164
screen->isl_dev.ss.size, 64);
165
if (!unlikely(map)) {
166
surf_state->res = NULL;
167
return;
168
}
169
170
struct iris_resource *res = (void *) buf->buffer;
171
struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
172
surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
173
174
const bool dataport = ssbo || !screen->compiler->indirect_ubos_use_sampler;
175
176
isl_buffer_fill_state(&screen->isl_dev, map,
177
.address = res->bo->gtt_offset + res->offset +
178
buf->buffer_offset,
179
.size_B = buf->buffer_size - res->offset,
180
.format = dataport ? ISL_FORMAT_RAW
181
: ISL_FORMAT_R32G32B32A32_FLOAT,
182
.swizzle = ISL_SWIZZLE_IDENTITY,
183
.stride_B = 1,
184
.mocs = iris_mocs(res->bo, &screen->isl_dev, usage));
185
}
186
187
static nir_ssa_def *
188
get_aoa_deref_offset(nir_builder *b,
189
nir_deref_instr *deref,
190
unsigned elem_size)
191
{
192
unsigned array_size = elem_size;
193
nir_ssa_def *offset = nir_imm_int(b, 0);
194
195
while (deref->deref_type != nir_deref_type_var) {
196
assert(deref->deref_type == nir_deref_type_array);
197
198
/* This level's element size is the previous level's array size */
199
nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
200
assert(deref->arr.index.ssa);
201
offset = nir_iadd(b, offset,
202
nir_imul(b, index, nir_imm_int(b, array_size)));
203
204
deref = nir_deref_instr_parent(deref);
205
assert(glsl_type_is_array(deref->type));
206
array_size *= glsl_get_length(deref->type);
207
}
208
209
/* Accessing an invalid surface index with the dataport can result in a
210
* hang. According to the spec "if the index used to select an individual
211
* element is negative or greater than or equal to the size of the array,
212
* the results of the operation are undefined but may not lead to
213
* termination" -- which is one of the possible outcomes of the hang.
214
* Clamp the index to prevent access outside of the array bounds.
215
*/
216
return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
217
}
218
219
static void
220
iris_lower_storage_image_derefs(nir_shader *nir)
221
{
222
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
223
224
nir_builder b;
225
nir_builder_init(&b, impl);
226
227
nir_foreach_block(block, impl) {
228
nir_foreach_instr_safe(instr, block) {
229
if (instr->type != nir_instr_type_intrinsic)
230
continue;
231
232
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
233
switch (intrin->intrinsic) {
234
case nir_intrinsic_image_deref_load:
235
case nir_intrinsic_image_deref_store:
236
case nir_intrinsic_image_deref_atomic_add:
237
case nir_intrinsic_image_deref_atomic_imin:
238
case nir_intrinsic_image_deref_atomic_umin:
239
case nir_intrinsic_image_deref_atomic_imax:
240
case nir_intrinsic_image_deref_atomic_umax:
241
case nir_intrinsic_image_deref_atomic_and:
242
case nir_intrinsic_image_deref_atomic_or:
243
case nir_intrinsic_image_deref_atomic_xor:
244
case nir_intrinsic_image_deref_atomic_exchange:
245
case nir_intrinsic_image_deref_atomic_comp_swap:
246
case nir_intrinsic_image_deref_size:
247
case nir_intrinsic_image_deref_samples:
248
case nir_intrinsic_image_deref_load_raw_intel:
249
case nir_intrinsic_image_deref_store_raw_intel: {
250
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
251
nir_variable *var = nir_deref_instr_get_variable(deref);
252
253
b.cursor = nir_before_instr(&intrin->instr);
254
nir_ssa_def *index =
255
nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
256
get_aoa_deref_offset(&b, deref, 1));
257
nir_rewrite_image_intrinsic(intrin, index, false);
258
break;
259
}
260
261
default:
262
break;
263
}
264
}
265
}
266
}
267
268
/**
269
* Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
270
*/
271
static bool
272
iris_fix_edge_flags(nir_shader *nir)
273
{
274
if (nir->info.stage != MESA_SHADER_VERTEX) {
275
nir_shader_preserve_all_metadata(nir);
276
return false;
277
}
278
279
nir_variable *var = nir_find_variable_with_location(nir, nir_var_shader_out,
280
VARYING_SLOT_EDGE);
281
if (!var) {
282
nir_shader_preserve_all_metadata(nir);
283
return false;
284
}
285
286
var->data.mode = nir_var_shader_temp;
287
nir->info.outputs_written &= ~VARYING_BIT_EDGE;
288
nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
289
nir_fixup_deref_modes(nir);
290
291
nir_foreach_function(f, nir) {
292
if (f->impl) {
293
nir_metadata_preserve(f->impl, nir_metadata_block_index |
294
nir_metadata_dominance |
295
nir_metadata_live_ssa_defs |
296
nir_metadata_loop_analysis);
297
} else {
298
nir_metadata_preserve(f->impl, nir_metadata_all);
299
}
300
}
301
302
return true;
303
}
304
305
/**
306
* Fix an uncompiled shader's stream output info.
307
*
308
* Core Gallium stores output->register_index as a "slot" number, where
309
* slots are assigned consecutively to all outputs in info->outputs_written.
310
* This naive packing of outputs doesn't work for us - we too have slots,
311
* but the layout is defined by the VUE map, which we won't have until we
312
* compile a specific shader variant. So, we remap these and simply store
313
* VARYING_SLOT_* in our copy's output->register_index fields.
314
*
315
* We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
316
* components of our VUE header. See brw_vue_map.c for the layout.
317
*/
318
static void
319
update_so_info(struct pipe_stream_output_info *so_info,
320
uint64_t outputs_written)
321
{
322
uint8_t reverse_map[64] = {};
323
unsigned slot = 0;
324
while (outputs_written) {
325
reverse_map[slot++] = u_bit_scan64(&outputs_written);
326
}
327
328
for (unsigned i = 0; i < so_info->num_outputs; i++) {
329
struct pipe_stream_output *output = &so_info->output[i];
330
331
/* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
332
output->register_index = reverse_map[output->register_index];
333
334
/* The VUE header contains three scalar fields packed together:
335
* - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
336
* - gl_Layer is stored in VARYING_SLOT_PSIZ.y
337
* - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
338
*/
339
switch (output->register_index) {
340
case VARYING_SLOT_LAYER:
341
assert(output->num_components == 1);
342
output->register_index = VARYING_SLOT_PSIZ;
343
output->start_component = 1;
344
break;
345
case VARYING_SLOT_VIEWPORT:
346
assert(output->num_components == 1);
347
output->register_index = VARYING_SLOT_PSIZ;
348
output->start_component = 2;
349
break;
350
case VARYING_SLOT_PSIZ:
351
assert(output->num_components == 1);
352
output->start_component = 3;
353
break;
354
}
355
356
//info->outputs_written |= 1ull << output->register_index;
357
}
358
}
359
360
static void
361
setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
362
unsigned offset, unsigned n)
363
{
364
assert(offset % sizeof(uint32_t) == 0);
365
366
for (unsigned i = 0; i < n; ++i)
367
sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
368
369
for (unsigned i = n; i < 4; ++i)
370
sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
371
}
372
373
/**
374
* Associate NIR uniform variables with the prog_data->param[] mechanism
375
* used by the backend. Also, decide which UBOs we'd like to push in an
376
* ideal situation (though the backend can reduce this).
377
*/
378
static void
379
iris_setup_uniforms(const struct brw_compiler *compiler,
380
void *mem_ctx,
381
nir_shader *nir,
382
struct brw_stage_prog_data *prog_data,
383
unsigned kernel_input_size,
384
enum brw_param_builtin **out_system_values,
385
unsigned *out_num_system_values,
386
unsigned *out_num_cbufs)
387
{
388
UNUSED const struct intel_device_info *devinfo = compiler->devinfo;
389
390
unsigned system_values_start = ALIGN(kernel_input_size, sizeof(uint32_t));
391
392
const unsigned IRIS_MAX_SYSTEM_VALUES =
393
PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
394
enum brw_param_builtin *system_values =
395
rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
396
unsigned num_system_values = 0;
397
398
unsigned patch_vert_idx = -1;
399
unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
400
unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
401
unsigned variable_group_size_idx = -1;
402
unsigned work_dim_idx = -1;
403
memset(ucp_idx, -1, sizeof(ucp_idx));
404
memset(img_idx, -1, sizeof(img_idx));
405
406
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
407
408
nir_builder b;
409
nir_builder_init(&b, impl);
410
411
b.cursor = nir_before_block(nir_start_block(impl));
412
nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
413
414
/* Turn system value intrinsics into uniforms */
415
nir_foreach_block(block, impl) {
416
nir_foreach_instr_safe(instr, block) {
417
if (instr->type != nir_instr_type_intrinsic)
418
continue;
419
420
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
421
nir_ssa_def *offset;
422
423
switch (intrin->intrinsic) {
424
case nir_intrinsic_load_constant: {
425
unsigned load_size = intrin->dest.ssa.num_components *
426
intrin->dest.ssa.bit_size / 8;
427
unsigned load_align = intrin->dest.ssa.bit_size / 8;
428
429
/* This one is special because it reads from the shader constant
430
* data and not cbuf0 which gallium uploads for us.
431
*/
432
b.cursor = nir_instr_remove(&intrin->instr);
433
434
nir_ssa_def *offset =
435
nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
436
nir_intrinsic_base(intrin));
437
438
assert(load_size < b.shader->constant_data_size);
439
unsigned max_offset = b.shader->constant_data_size - load_size;
440
offset = nir_umin(&b, offset, nir_imm_int(&b, max_offset));
441
442
nir_ssa_def *const_data_base_addr = nir_pack_64_2x32_split(&b,
443
nir_load_reloc_const_intel(&b, BRW_SHADER_RELOC_CONST_DATA_ADDR_LOW),
444
nir_load_reloc_const_intel(&b, BRW_SHADER_RELOC_CONST_DATA_ADDR_HIGH));
445
446
nir_ssa_def *data =
447
nir_load_global(&b, nir_iadd(&b, const_data_base_addr,
448
nir_u2u64(&b, offset)),
449
load_align,
450
intrin->dest.ssa.num_components,
451
intrin->dest.ssa.bit_size);
452
453
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
454
data);
455
continue;
456
}
457
case nir_intrinsic_load_user_clip_plane: {
458
unsigned ucp = nir_intrinsic_ucp_id(intrin);
459
460
if (ucp_idx[ucp] == -1) {
461
ucp_idx[ucp] = num_system_values;
462
num_system_values += 4;
463
}
464
465
for (int i = 0; i < 4; i++) {
466
system_values[ucp_idx[ucp] + i] =
467
BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
468
}
469
470
b.cursor = nir_before_instr(instr);
471
offset = nir_imm_int(&b, system_values_start +
472
ucp_idx[ucp] * sizeof(uint32_t));
473
break;
474
}
475
case nir_intrinsic_load_patch_vertices_in:
476
if (patch_vert_idx == -1)
477
patch_vert_idx = num_system_values++;
478
479
system_values[patch_vert_idx] =
480
BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
481
482
b.cursor = nir_before_instr(instr);
483
offset = nir_imm_int(&b, system_values_start +
484
patch_vert_idx * sizeof(uint32_t));
485
break;
486
case nir_intrinsic_image_deref_load_param_intel: {
487
assert(devinfo->ver < 9);
488
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
489
nir_variable *var = nir_deref_instr_get_variable(deref);
490
491
if (img_idx[var->data.binding] == -1) {
492
/* GL only allows arrays of arrays of images. */
493
assert(glsl_type_is_image(glsl_without_array(var->type)));
494
unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
495
496
for (int i = 0; i < num_images; i++) {
497
const unsigned img = var->data.binding + i;
498
499
img_idx[img] = num_system_values;
500
num_system_values += BRW_IMAGE_PARAM_SIZE;
501
502
uint32_t *img_sv = &system_values[img_idx[img]];
503
504
setup_vec4_image_sysval(
505
img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
506
offsetof(struct brw_image_param, offset), 2);
507
setup_vec4_image_sysval(
508
img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
509
offsetof(struct brw_image_param, size), 3);
510
setup_vec4_image_sysval(
511
img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
512
offsetof(struct brw_image_param, stride), 4);
513
setup_vec4_image_sysval(
514
img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
515
offsetof(struct brw_image_param, tiling), 3);
516
setup_vec4_image_sysval(
517
img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
518
offsetof(struct brw_image_param, swizzling), 2);
519
}
520
}
521
522
b.cursor = nir_before_instr(instr);
523
offset = nir_iadd(&b,
524
get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
525
nir_imm_int(&b, system_values_start +
526
img_idx[var->data.binding] * 4 +
527
nir_intrinsic_base(intrin) * 16));
528
break;
529
}
530
case nir_intrinsic_load_workgroup_size: {
531
assert(nir->info.workgroup_size_variable);
532
if (variable_group_size_idx == -1) {
533
variable_group_size_idx = num_system_values;
534
num_system_values += 3;
535
for (int i = 0; i < 3; i++) {
536
system_values[variable_group_size_idx + i] =
537
BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i;
538
}
539
}
540
541
b.cursor = nir_before_instr(instr);
542
offset = nir_imm_int(&b, system_values_start +
543
variable_group_size_idx * sizeof(uint32_t));
544
break;
545
}
546
case nir_intrinsic_load_work_dim: {
547
if (work_dim_idx == -1) {
548
work_dim_idx = num_system_values++;
549
system_values[work_dim_idx] = BRW_PARAM_BUILTIN_WORK_DIM;
550
}
551
b.cursor = nir_before_instr(instr);
552
offset = nir_imm_int(&b, system_values_start +
553
work_dim_idx * sizeof(uint32_t));
554
break;
555
}
556
case nir_intrinsic_load_kernel_input: {
557
assert(nir_intrinsic_base(intrin) +
558
nir_intrinsic_range(intrin) <= kernel_input_size);
559
b.cursor = nir_before_instr(instr);
560
offset = nir_iadd_imm(&b, intrin->src[0].ssa,
561
nir_intrinsic_base(intrin));
562
break;
563
}
564
default:
565
continue;
566
}
567
568
nir_ssa_def *load =
569
nir_load_ubo(&b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size,
570
temp_ubo_name, offset,
571
.align_mul = 4,
572
.align_offset = 0,
573
.range_base = 0,
574
.range = ~0);
575
576
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
577
load);
578
nir_instr_remove(instr);
579
}
580
}
581
582
nir_validate_shader(nir, "before remapping");
583
584
/* Uniforms are stored in constant buffer 0, the
585
* user-facing UBOs are indexed by one. So if any constant buffer is
586
* needed, the constant buffer 0 will be needed, so account for it.
587
*/
588
unsigned num_cbufs = nir->info.num_ubos;
589
if (num_cbufs || nir->num_uniforms)
590
num_cbufs++;
591
592
/* Place the new params in a new cbuf. */
593
if (num_system_values > 0 || kernel_input_size > 0) {
594
unsigned sysval_cbuf_index = num_cbufs;
595
num_cbufs++;
596
597
system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
598
num_system_values);
599
600
nir_foreach_block(block, impl) {
601
nir_foreach_instr_safe(instr, block) {
602
if (instr->type != nir_instr_type_intrinsic)
603
continue;
604
605
nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
606
607
if (load->intrinsic != nir_intrinsic_load_ubo)
608
continue;
609
610
b.cursor = nir_before_instr(instr);
611
612
assert(load->src[0].is_ssa);
613
614
if (load->src[0].ssa == temp_ubo_name) {
615
nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
616
nir_instr_rewrite_src(instr, &load->src[0],
617
nir_src_for_ssa(imm));
618
}
619
}
620
}
621
622
/* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
623
nir_opt_constant_folding(nir);
624
} else {
625
ralloc_free(system_values);
626
system_values = NULL;
627
}
628
629
assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
630
nir_validate_shader(nir, "after remap");
631
632
/* We don't use params[] but gallium leaves num_uniforms set. We use this
633
* to detect when cbuf0 exists but we don't need it anymore when we get
634
* here. Instead, zero it out so that the back-end doesn't get confused
635
* when nr_params * 4 != num_uniforms != nr_params * 4.
636
*/
637
nir->num_uniforms = 0;
638
639
*out_system_values = system_values;
640
*out_num_system_values = num_system_values;
641
*out_num_cbufs = num_cbufs;
642
}
643
644
static const char *surface_group_names[] = {
645
[IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
646
[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
647
[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
648
[IRIS_SURFACE_GROUP_TEXTURE] = "texture",
649
[IRIS_SURFACE_GROUP_UBO] = "ubo",
650
[IRIS_SURFACE_GROUP_SSBO] = "ssbo",
651
[IRIS_SURFACE_GROUP_IMAGE] = "image",
652
};
653
654
static void
655
iris_print_binding_table(FILE *fp, const char *name,
656
const struct iris_binding_table *bt)
657
{
658
STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
659
660
uint32_t total = 0;
661
uint32_t compacted = 0;
662
663
for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
664
uint32_t size = bt->sizes[i];
665
total += size;
666
if (size)
667
compacted += util_bitcount64(bt->used_mask[i]);
668
}
669
670
if (total == 0) {
671
fprintf(fp, "Binding table for %s is empty\n\n", name);
672
return;
673
}
674
675
if (total != compacted) {
676
fprintf(fp, "Binding table for %s "
677
"(compacted to %u entries from %u entries)\n",
678
name, compacted, total);
679
} else {
680
fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
681
}
682
683
uint32_t entry = 0;
684
for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
685
uint64_t mask = bt->used_mask[i];
686
while (mask) {
687
int index = u_bit_scan64(&mask);
688
fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
689
}
690
}
691
fprintf(fp, "\n");
692
}
693
694
enum {
695
/* Max elements in a surface group. */
696
SURFACE_GROUP_MAX_ELEMENTS = 64,
697
};
698
699
/**
700
* Map a <group, index> pair to a binding table index.
701
*
702
* For example: <UBO, 5> => binding table index 12
703
*/
704
uint32_t
705
iris_group_index_to_bti(const struct iris_binding_table *bt,
706
enum iris_surface_group group, uint32_t index)
707
{
708
assert(index < bt->sizes[group]);
709
uint64_t mask = bt->used_mask[group];
710
uint64_t bit = 1ull << index;
711
if (bit & mask) {
712
return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
713
} else {
714
return IRIS_SURFACE_NOT_USED;
715
}
716
}
717
718
/**
719
* Map a binding table index back to a <group, index> pair.
720
*
721
* For example: binding table index 12 => <UBO, 5>
722
*/
723
uint32_t
724
iris_bti_to_group_index(const struct iris_binding_table *bt,
725
enum iris_surface_group group, uint32_t bti)
726
{
727
uint64_t used_mask = bt->used_mask[group];
728
assert(bti >= bt->offsets[group]);
729
730
uint32_t c = bti - bt->offsets[group];
731
while (used_mask) {
732
int i = u_bit_scan64(&used_mask);
733
if (c == 0)
734
return i;
735
c--;
736
}
737
738
return IRIS_SURFACE_NOT_USED;
739
}
740
741
static void
742
rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
743
nir_instr *instr, nir_src *src,
744
enum iris_surface_group group)
745
{
746
assert(bt->sizes[group] > 0);
747
748
b->cursor = nir_before_instr(instr);
749
nir_ssa_def *bti;
750
if (nir_src_is_const(*src)) {
751
uint32_t index = nir_src_as_uint(*src);
752
bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
753
src->ssa->bit_size);
754
} else {
755
/* Indirect usage makes all the surfaces of the group to be available,
756
* so we can just add the base.
757
*/
758
assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
759
bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
760
}
761
nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
762
}
763
764
static void
765
mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
766
enum iris_surface_group group)
767
{
768
assert(bt->sizes[group] > 0);
769
770
if (nir_src_is_const(*src)) {
771
uint64_t index = nir_src_as_uint(*src);
772
assert(index < bt->sizes[group]);
773
bt->used_mask[group] |= 1ull << index;
774
} else {
775
/* There's an indirect usage, we need all the surfaces. */
776
bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
777
}
778
}
779
780
static bool
781
skip_compacting_binding_tables(void)
782
{
783
static int skip = -1;
784
if (skip < 0)
785
skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
786
return skip;
787
}
788
789
/**
790
* Set up the binding table indices and apply to the shader.
791
*/
792
static void
793
iris_setup_binding_table(const struct intel_device_info *devinfo,
794
struct nir_shader *nir,
795
struct iris_binding_table *bt,
796
unsigned num_render_targets,
797
unsigned num_system_values,
798
unsigned num_cbufs)
799
{
800
const struct shader_info *info = &nir->info;
801
802
memset(bt, 0, sizeof(*bt));
803
804
/* Set the sizes for each surface group. For some groups, we already know
805
* upfront how many will be used, so mark them.
806
*/
807
if (info->stage == MESA_SHADER_FRAGMENT) {
808
bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
809
/* All render targets used. */
810
bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
811
BITFIELD64_MASK(num_render_targets);
812
813
/* Setup render target read surface group in order to support non-coherent
814
* framebuffer fetch on Gfx8
815
*/
816
if (devinfo->ver == 8 && info->outputs_read) {
817
bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
818
bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
819
BITFIELD64_MASK(num_render_targets);
820
}
821
} else if (info->stage == MESA_SHADER_COMPUTE) {
822
bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
823
}
824
825
bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = BITSET_LAST_BIT(info->textures_used);
826
bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used[0];
827
828
bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
829
830
/* Allocate an extra slot in the UBO section for NIR constants.
831
* Binding table compaction will remove it if unnecessary.
832
*
833
* We don't include them in iris_compiled_shader::num_cbufs because
834
* they are uploaded separately from shs->constbuf[], but from a shader
835
* point of view, they're another UBO (at the end of the section).
836
*/
837
bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
838
839
bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
840
841
for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
842
assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
843
844
/* Mark surfaces used for the cases we don't have the information available
845
* upfront.
846
*/
847
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
848
nir_foreach_block (block, impl) {
849
nir_foreach_instr (instr, block) {
850
if (instr->type != nir_instr_type_intrinsic)
851
continue;
852
853
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
854
switch (intrin->intrinsic) {
855
case nir_intrinsic_load_num_workgroups:
856
bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
857
break;
858
859
case nir_intrinsic_load_output:
860
if (devinfo->ver == 8) {
861
mark_used_with_src(bt, &intrin->src[0],
862
IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
863
}
864
break;
865
866
case nir_intrinsic_image_size:
867
case nir_intrinsic_image_load:
868
case nir_intrinsic_image_store:
869
case nir_intrinsic_image_atomic_add:
870
case nir_intrinsic_image_atomic_imin:
871
case nir_intrinsic_image_atomic_umin:
872
case nir_intrinsic_image_atomic_imax:
873
case nir_intrinsic_image_atomic_umax:
874
case nir_intrinsic_image_atomic_and:
875
case nir_intrinsic_image_atomic_or:
876
case nir_intrinsic_image_atomic_xor:
877
case nir_intrinsic_image_atomic_exchange:
878
case nir_intrinsic_image_atomic_comp_swap:
879
case nir_intrinsic_image_load_raw_intel:
880
case nir_intrinsic_image_store_raw_intel:
881
mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
882
break;
883
884
case nir_intrinsic_load_ubo:
885
mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
886
break;
887
888
case nir_intrinsic_store_ssbo:
889
mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
890
break;
891
892
case nir_intrinsic_get_ssbo_size:
893
case nir_intrinsic_ssbo_atomic_add:
894
case nir_intrinsic_ssbo_atomic_imin:
895
case nir_intrinsic_ssbo_atomic_umin:
896
case nir_intrinsic_ssbo_atomic_imax:
897
case nir_intrinsic_ssbo_atomic_umax:
898
case nir_intrinsic_ssbo_atomic_and:
899
case nir_intrinsic_ssbo_atomic_or:
900
case nir_intrinsic_ssbo_atomic_xor:
901
case nir_intrinsic_ssbo_atomic_exchange:
902
case nir_intrinsic_ssbo_atomic_comp_swap:
903
case nir_intrinsic_ssbo_atomic_fmin:
904
case nir_intrinsic_ssbo_atomic_fmax:
905
case nir_intrinsic_ssbo_atomic_fcomp_swap:
906
case nir_intrinsic_load_ssbo:
907
mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
908
break;
909
910
default:
911
break;
912
}
913
}
914
}
915
916
/* When disable we just mark everything as used. */
917
if (unlikely(skip_compacting_binding_tables())) {
918
for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
919
bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
920
}
921
922
/* Calculate the offsets and the binding table size based on the used
923
* surfaces. After this point, the functions to go between "group indices"
924
* and binding table indices can be used.
925
*/
926
uint32_t next = 0;
927
for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
928
if (bt->used_mask[i] != 0) {
929
bt->offsets[i] = next;
930
next += util_bitcount64(bt->used_mask[i]);
931
}
932
}
933
bt->size_bytes = next * 4;
934
935
if (INTEL_DEBUG & DEBUG_BT) {
936
iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
937
}
938
939
/* Apply the binding table indices. The backend compiler is not expected
940
* to change those, as we haven't set any of the *_start entries in brw
941
* binding_table.
942
*/
943
nir_builder b;
944
nir_builder_init(&b, impl);
945
946
nir_foreach_block (block, impl) {
947
nir_foreach_instr (instr, block) {
948
if (instr->type == nir_instr_type_tex) {
949
nir_tex_instr *tex = nir_instr_as_tex(instr);
950
tex->texture_index =
951
iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
952
tex->texture_index);
953
continue;
954
}
955
956
if (instr->type != nir_instr_type_intrinsic)
957
continue;
958
959
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
960
switch (intrin->intrinsic) {
961
case nir_intrinsic_image_size:
962
case nir_intrinsic_image_load:
963
case nir_intrinsic_image_store:
964
case nir_intrinsic_image_atomic_add:
965
case nir_intrinsic_image_atomic_imin:
966
case nir_intrinsic_image_atomic_umin:
967
case nir_intrinsic_image_atomic_imax:
968
case nir_intrinsic_image_atomic_umax:
969
case nir_intrinsic_image_atomic_and:
970
case nir_intrinsic_image_atomic_or:
971
case nir_intrinsic_image_atomic_xor:
972
case nir_intrinsic_image_atomic_exchange:
973
case nir_intrinsic_image_atomic_comp_swap:
974
case nir_intrinsic_image_load_raw_intel:
975
case nir_intrinsic_image_store_raw_intel:
976
rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
977
IRIS_SURFACE_GROUP_IMAGE);
978
break;
979
980
case nir_intrinsic_load_ubo:
981
rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
982
IRIS_SURFACE_GROUP_UBO);
983
break;
984
985
case nir_intrinsic_store_ssbo:
986
rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
987
IRIS_SURFACE_GROUP_SSBO);
988
break;
989
990
case nir_intrinsic_load_output:
991
if (devinfo->ver == 8) {
992
rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
993
IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
994
}
995
break;
996
997
case nir_intrinsic_get_ssbo_size:
998
case nir_intrinsic_ssbo_atomic_add:
999
case nir_intrinsic_ssbo_atomic_imin:
1000
case nir_intrinsic_ssbo_atomic_umin:
1001
case nir_intrinsic_ssbo_atomic_imax:
1002
case nir_intrinsic_ssbo_atomic_umax:
1003
case nir_intrinsic_ssbo_atomic_and:
1004
case nir_intrinsic_ssbo_atomic_or:
1005
case nir_intrinsic_ssbo_atomic_xor:
1006
case nir_intrinsic_ssbo_atomic_exchange:
1007
case nir_intrinsic_ssbo_atomic_comp_swap:
1008
case nir_intrinsic_ssbo_atomic_fmin:
1009
case nir_intrinsic_ssbo_atomic_fmax:
1010
case nir_intrinsic_ssbo_atomic_fcomp_swap:
1011
case nir_intrinsic_load_ssbo:
1012
rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1013
IRIS_SURFACE_GROUP_SSBO);
1014
break;
1015
1016
default:
1017
break;
1018
}
1019
}
1020
}
1021
}
1022
1023
static void
1024
iris_debug_recompile(struct iris_screen *screen,
1025
struct pipe_debug_callback *dbg,
1026
struct iris_uncompiled_shader *ish,
1027
const struct brw_base_prog_key *key)
1028
{
1029
if (!ish || list_is_empty(&ish->variants)
1030
|| list_is_singular(&ish->variants))
1031
return;
1032
1033
const struct intel_device_info *devinfo = &screen->devinfo;
1034
const struct brw_compiler *c = screen->compiler;
1035
const struct shader_info *info = &ish->nir->info;
1036
1037
c->shader_perf_log(dbg, "Recompiling %s shader for program %s: %s\n",
1038
_mesa_shader_stage_to_string(info->stage),
1039
info->name ? info->name : "(no identifier)",
1040
info->label ? info->label : "");
1041
1042
struct iris_compiled_shader *shader =
1043
list_first_entry(&ish->variants, struct iris_compiled_shader, link);
1044
const void *old_iris_key = &shader->key;
1045
1046
union brw_any_prog_key old_key;
1047
1048
switch (info->stage) {
1049
case MESA_SHADER_VERTEX:
1050
old_key.vs = iris_to_brw_vs_key(devinfo, old_iris_key);
1051
break;
1052
case MESA_SHADER_TESS_CTRL:
1053
old_key.tcs = iris_to_brw_tcs_key(devinfo, old_iris_key);
1054
break;
1055
case MESA_SHADER_TESS_EVAL:
1056
old_key.tes = iris_to_brw_tes_key(devinfo, old_iris_key);
1057
break;
1058
case MESA_SHADER_GEOMETRY:
1059
old_key.gs = iris_to_brw_gs_key(devinfo, old_iris_key);
1060
break;
1061
case MESA_SHADER_FRAGMENT:
1062
old_key.wm = iris_to_brw_fs_key(devinfo, old_iris_key);
1063
break;
1064
case MESA_SHADER_COMPUTE:
1065
old_key.cs = iris_to_brw_cs_key(devinfo, old_iris_key);
1066
break;
1067
default:
1068
unreachable("invalid shader stage");
1069
}
1070
1071
brw_debug_key_recompile(c, dbg, info->stage, &old_key.base, key);
1072
}
1073
1074
static void
1075
check_urb_size(struct iris_context *ice,
1076
unsigned needed_size,
1077
gl_shader_stage stage)
1078
{
1079
unsigned last_allocated_size = ice->shaders.urb.size[stage];
1080
1081
/* If the last URB allocation wasn't large enough for our needs,
1082
* flag it as needing to be reconfigured. Otherwise, we can use
1083
* the existing config. However, if the URB is constrained, and
1084
* we can shrink our size for this stage, we may be able to gain
1085
* extra concurrency by reconfiguring it to be smaller. Do so.
1086
*/
1087
if (last_allocated_size < needed_size ||
1088
(ice->shaders.urb.constrained && last_allocated_size > needed_size)) {
1089
ice->state.dirty |= IRIS_DIRTY_URB;
1090
}
1091
}
1092
1093
/**
1094
* Get the shader for the last enabled geometry stage.
1095
*
1096
* This stage is the one which will feed stream output and the rasterizer.
1097
*/
1098
static gl_shader_stage
1099
last_vue_stage(struct iris_context *ice)
1100
{
1101
if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1102
return MESA_SHADER_GEOMETRY;
1103
1104
if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1105
return MESA_SHADER_TESS_EVAL;
1106
1107
return MESA_SHADER_VERTEX;
1108
}
1109
1110
static inline struct iris_compiled_shader *
1111
find_variant(const struct iris_screen *screen,
1112
struct iris_uncompiled_shader *ish,
1113
const void *key, unsigned key_size)
1114
{
1115
struct list_head *start = ish->variants.next;
1116
1117
if (screen->precompile) {
1118
/* Check the first list entry. There will always be at least one
1119
* variant in the list (most likely the precompile variant), and
1120
* other contexts only append new variants, so we can safely check
1121
* it without locking, saving that cost in the common case.
1122
*/
1123
struct iris_compiled_shader *first =
1124
list_first_entry(&ish->variants, struct iris_compiled_shader, link);
1125
1126
if (memcmp(&first->key, key, key_size) == 0)
1127
return first;
1128
1129
/* Skip this one in the loop below */
1130
start = first->link.next;
1131
}
1132
1133
struct iris_compiled_shader *variant = NULL;
1134
1135
/* If it doesn't match, we have to walk the list; other contexts may be
1136
* concurrently appending shaders to it, so we need to lock here.
1137
*/
1138
simple_mtx_lock(&ish->lock);
1139
1140
list_for_each_entry_from(struct iris_compiled_shader, v, start,
1141
&ish->variants, link) {
1142
if (memcmp(&v->key, key, key_size) == 0) {
1143
variant = v;
1144
break;
1145
}
1146
}
1147
1148
simple_mtx_unlock(&ish->lock);
1149
1150
return variant;
1151
}
1152
1153
/**
1154
* Compile a vertex shader, and upload the assembly.
1155
*/
1156
static struct iris_compiled_shader *
1157
iris_compile_vs(struct iris_screen *screen,
1158
struct u_upload_mgr *uploader,
1159
struct pipe_debug_callback *dbg,
1160
struct iris_uncompiled_shader *ish,
1161
const struct iris_vs_prog_key *key)
1162
{
1163
const struct brw_compiler *compiler = screen->compiler;
1164
const struct intel_device_info *devinfo = &screen->devinfo;
1165
void *mem_ctx = ralloc_context(NULL);
1166
struct brw_vs_prog_data *vs_prog_data =
1167
rzalloc(mem_ctx, struct brw_vs_prog_data);
1168
struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1169
struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1170
enum brw_param_builtin *system_values;
1171
unsigned num_system_values;
1172
unsigned num_cbufs;
1173
1174
nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1175
1176
if (key->vue.nr_userclip_plane_consts) {
1177
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1178
nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1179
true, false, NULL);
1180
nir_lower_io_to_temporaries(nir, impl, true, false);
1181
nir_lower_global_vars_to_local(nir);
1182
nir_lower_vars_to_ssa(nir);
1183
nir_shader_gather_info(nir, impl);
1184
}
1185
1186
prog_data->use_alt_mode = ish->use_alt_mode;
1187
1188
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1189
&num_system_values, &num_cbufs);
1190
1191
struct iris_binding_table bt;
1192
iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1193
num_system_values, num_cbufs);
1194
1195
brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1196
1197
brw_compute_vue_map(devinfo,
1198
&vue_prog_data->vue_map, nir->info.outputs_written,
1199
nir->info.separate_shader, /* pos_slots */ 1);
1200
1201
struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
1202
1203
struct brw_compile_vs_params params = {
1204
.nir = nir,
1205
.key = &brw_key,
1206
.prog_data = vs_prog_data,
1207
.log_data = dbg,
1208
};
1209
1210
const unsigned *program = brw_compile_vs(compiler, mem_ctx, &params);
1211
if (program == NULL) {
1212
dbg_printf("Failed to compile vertex shader: %s\n", params.error_str);
1213
ralloc_free(mem_ctx);
1214
return false;
1215
}
1216
1217
iris_debug_recompile(screen, dbg, ish, &brw_key.base);
1218
1219
uint32_t *so_decls =
1220
screen->vtbl.create_so_decl_list(&ish->stream_output,
1221
&vue_prog_data->vue_map);
1222
1223
struct iris_compiled_shader *shader =
1224
iris_upload_shader(screen, ish, NULL, uploader,
1225
IRIS_CACHE_VS, sizeof(*key), key, program,
1226
prog_data, so_decls, system_values, num_system_values,
1227
0, num_cbufs, &bt);
1228
1229
iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1230
1231
ralloc_free(mem_ctx);
1232
return shader;
1233
}
1234
1235
/**
1236
* Update the current vertex shader variant.
1237
*
1238
* Fill out the key, look in the cache, compile and bind if needed.
1239
*/
1240
static void
1241
iris_update_compiled_vs(struct iris_context *ice)
1242
{
1243
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1244
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1245
struct u_upload_mgr *uploader = ice->shaders.uploader_driver;
1246
struct iris_uncompiled_shader *ish =
1247
ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1248
1249
struct iris_vs_prog_key key = { KEY_ID(vue.base) };
1250
screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1251
1252
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
1253
struct iris_compiled_shader *shader =
1254
find_variant(screen, ish, &key, sizeof(key));
1255
1256
if (!shader) {
1257
shader = iris_disk_cache_retrieve(screen, uploader, ish,
1258
&key, sizeof(key));
1259
}
1260
1261
if (!shader)
1262
shader = iris_compile_vs(screen, uploader, &ice->dbg, ish, &key);
1263
1264
if (old != shader) {
1265
iris_shader_variant_reference(&ice->shaders.prog[MESA_SHADER_VERTEX],
1266
shader);
1267
ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
1268
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_VS |
1269
IRIS_STAGE_DIRTY_BINDINGS_VS |
1270
IRIS_STAGE_DIRTY_CONSTANTS_VS;
1271
shs->sysvals_need_upload = true;
1272
1273
const struct brw_vue_prog_data *vue_prog_data =
1274
(void *) shader->prog_data;
1275
check_urb_size(ice, vue_prog_data->urb_entry_size, MESA_SHADER_VERTEX);
1276
}
1277
}
1278
1279
/**
1280
* Get the shader_info for a given stage, or NULL if the stage is disabled.
1281
*/
1282
const struct shader_info *
1283
iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
1284
{
1285
const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1286
1287
if (!ish)
1288
return NULL;
1289
1290
const nir_shader *nir = ish->nir;
1291
return &nir->info;
1292
}
1293
1294
/**
1295
* Get the union of TCS output and TES input slots.
1296
*
1297
* TCS and TES need to agree on a common URB entry layout. In particular,
1298
* the data for all patch vertices is stored in a single URB entry (unlike
1299
* GS which has one entry per input vertex). This means that per-vertex
1300
* array indexing needs a stride.
1301
*
1302
* SSO requires locations to match, but doesn't require the number of
1303
* outputs/inputs to match (in fact, the TCS often has extra outputs).
1304
* So, we need to take the extra step of unifying these on the fly.
1305
*/
1306
static void
1307
get_unified_tess_slots(const struct iris_context *ice,
1308
uint64_t *per_vertex_slots,
1309
uint32_t *per_patch_slots)
1310
{
1311
const struct shader_info *tcs =
1312
iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1313
const struct shader_info *tes =
1314
iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1315
1316
*per_vertex_slots = tes->inputs_read;
1317
*per_patch_slots = tes->patch_inputs_read;
1318
1319
if (tcs) {
1320
*per_vertex_slots |= tcs->outputs_written;
1321
*per_patch_slots |= tcs->patch_outputs_written;
1322
}
1323
}
1324
1325
/**
1326
* Compile a tessellation control shader, and upload the assembly.
1327
*/
1328
static struct iris_compiled_shader *
1329
iris_compile_tcs(struct iris_screen *screen,
1330
struct hash_table *passthrough_ht,
1331
struct u_upload_mgr *uploader,
1332
struct pipe_debug_callback *dbg,
1333
struct iris_uncompiled_shader *ish,
1334
const struct iris_tcs_prog_key *key)
1335
{
1336
const struct brw_compiler *compiler = screen->compiler;
1337
const struct nir_shader_compiler_options *options =
1338
compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
1339
void *mem_ctx = ralloc_context(NULL);
1340
struct brw_tcs_prog_data *tcs_prog_data =
1341
rzalloc(mem_ctx, struct brw_tcs_prog_data);
1342
struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1343
struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1344
const struct intel_device_info *devinfo = &screen->devinfo;
1345
enum brw_param_builtin *system_values = NULL;
1346
unsigned num_system_values = 0;
1347
unsigned num_cbufs = 0;
1348
1349
nir_shader *nir;
1350
1351
struct iris_binding_table bt;
1352
1353
struct brw_tcs_prog_key brw_key = iris_to_brw_tcs_key(devinfo, key);
1354
1355
if (ish) {
1356
nir = nir_shader_clone(mem_ctx, ish->nir);
1357
1358
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1359
&num_system_values, &num_cbufs);
1360
iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1361
num_system_values, num_cbufs);
1362
brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1363
} else {
1364
nir =
1365
brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, &brw_key);
1366
1367
/* Reserve space for passing the default tess levels as constants. */
1368
num_cbufs = 1;
1369
num_system_values = 8;
1370
system_values =
1371
rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
1372
prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
1373
prog_data->nr_params = num_system_values;
1374
1375
if (key->tes_primitive_mode == GL_QUADS) {
1376
for (int i = 0; i < 4; i++)
1377
system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1378
1379
system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1380
system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
1381
} else if (key->tes_primitive_mode == GL_TRIANGLES) {
1382
for (int i = 0; i < 3; i++)
1383
system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1384
1385
system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1386
} else {
1387
assert(key->tes_primitive_mode == GL_ISOLINES);
1388
system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
1389
system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
1390
}
1391
1392
/* Manually setup the TCS binding table. */
1393
memset(&bt, 0, sizeof(bt));
1394
bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
1395
bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
1396
bt.size_bytes = 4;
1397
1398
prog_data->ubo_ranges[0].length = 1;
1399
}
1400
1401
char *error_str = NULL;
1402
const unsigned *program =
1403
brw_compile_tcs(compiler, dbg, mem_ctx, &brw_key, tcs_prog_data,
1404
nir, -1, NULL, &error_str);
1405
if (program == NULL) {
1406
dbg_printf("Failed to compile control shader: %s\n", error_str);
1407
ralloc_free(mem_ctx);
1408
return false;
1409
}
1410
1411
iris_debug_recompile(screen, dbg, ish, &brw_key.base);
1412
1413
struct iris_compiled_shader *shader =
1414
iris_upload_shader(screen, ish, passthrough_ht, uploader,
1415
IRIS_CACHE_TCS, sizeof(*key), key, program,
1416
prog_data, NULL, system_values, num_system_values,
1417
0, num_cbufs, &bt);
1418
1419
if (ish)
1420
iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1421
1422
ralloc_free(mem_ctx);
1423
return shader;
1424
}
1425
1426
/**
1427
* Update the current tessellation control shader variant.
1428
*
1429
* Fill out the key, look in the cache, compile and bind if needed.
1430
*/
1431
static void
1432
iris_update_compiled_tcs(struct iris_context *ice)
1433
{
1434
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1435
struct iris_uncompiled_shader *tcs =
1436
ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1437
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1438
struct u_upload_mgr *uploader = ice->shaders.uploader_driver;
1439
const struct brw_compiler *compiler = screen->compiler;
1440
const struct intel_device_info *devinfo = &screen->devinfo;
1441
1442
const struct shader_info *tes_info =
1443
iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1444
struct iris_tcs_prog_key key = {
1445
.vue.base.program_string_id = tcs ? tcs->program_id : 0,
1446
.tes_primitive_mode = tes_info->tess.primitive_mode,
1447
.input_vertices =
1448
!tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
1449
.quads_workaround = devinfo->ver < 9 &&
1450
tes_info->tess.primitive_mode == GL_QUADS &&
1451
tes_info->tess.spacing == TESS_SPACING_EQUAL,
1452
};
1453
get_unified_tess_slots(ice, &key.outputs_written,
1454
&key.patch_outputs_written);
1455
screen->vtbl.populate_tcs_key(ice, &key);
1456
1457
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1458
struct iris_compiled_shader *shader =
1459
tcs ? find_variant(screen, tcs, &key, sizeof(key)) :
1460
iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1461
1462
if (tcs && !shader) {
1463
shader = iris_disk_cache_retrieve(screen, uploader, tcs,
1464
&key, sizeof(key));
1465
}
1466
1467
if (!shader) {
1468
shader = iris_compile_tcs(screen, ice->shaders.cache,
1469
uploader, &ice->dbg, tcs, &key);
1470
}
1471
1472
if (old != shader) {
1473
iris_shader_variant_reference(&ice->shaders.prog[MESA_SHADER_TESS_CTRL],
1474
shader);
1475
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TCS |
1476
IRIS_STAGE_DIRTY_BINDINGS_TCS |
1477
IRIS_STAGE_DIRTY_CONSTANTS_TCS;
1478
shs->sysvals_need_upload = true;
1479
1480
const struct brw_vue_prog_data *prog_data = (void *) shader->prog_data;
1481
check_urb_size(ice, prog_data->urb_entry_size, MESA_SHADER_TESS_CTRL);
1482
}
1483
}
1484
1485
/**
1486
* Compile a tessellation evaluation shader, and upload the assembly.
1487
*/
1488
static struct iris_compiled_shader *
1489
iris_compile_tes(struct iris_screen *screen,
1490
struct u_upload_mgr *uploader,
1491
struct pipe_debug_callback *dbg,
1492
struct iris_uncompiled_shader *ish,
1493
const struct iris_tes_prog_key *key)
1494
{
1495
const struct brw_compiler *compiler = screen->compiler;
1496
void *mem_ctx = ralloc_context(NULL);
1497
struct brw_tes_prog_data *tes_prog_data =
1498
rzalloc(mem_ctx, struct brw_tes_prog_data);
1499
struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1500
struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1501
enum brw_param_builtin *system_values;
1502
const struct intel_device_info *devinfo = &screen->devinfo;
1503
unsigned num_system_values;
1504
unsigned num_cbufs;
1505
1506
nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1507
1508
if (key->vue.nr_userclip_plane_consts) {
1509
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1510
nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1511
true, false, NULL);
1512
nir_lower_io_to_temporaries(nir, impl, true, false);
1513
nir_lower_global_vars_to_local(nir);
1514
nir_lower_vars_to_ssa(nir);
1515
nir_shader_gather_info(nir, impl);
1516
}
1517
1518
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1519
&num_system_values, &num_cbufs);
1520
1521
struct iris_binding_table bt;
1522
iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1523
num_system_values, num_cbufs);
1524
1525
brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1526
1527
struct brw_vue_map input_vue_map;
1528
brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1529
key->patch_inputs_read);
1530
1531
struct brw_tes_prog_key brw_key = iris_to_brw_tes_key(devinfo, key);
1532
1533
char *error_str = NULL;
1534
const unsigned *program =
1535
brw_compile_tes(compiler, dbg, mem_ctx, &brw_key, &input_vue_map,
1536
tes_prog_data, nir, -1, NULL, &error_str);
1537
if (program == NULL) {
1538
dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1539
ralloc_free(mem_ctx);
1540
return false;
1541
}
1542
1543
iris_debug_recompile(screen, dbg, ish, &brw_key.base);
1544
1545
uint32_t *so_decls =
1546
screen->vtbl.create_so_decl_list(&ish->stream_output,
1547
&vue_prog_data->vue_map);
1548
1549
1550
struct iris_compiled_shader *shader =
1551
iris_upload_shader(screen, ish, NULL, uploader,
1552
IRIS_CACHE_TES, sizeof(*key), key, program,
1553
prog_data, so_decls, system_values, num_system_values,
1554
0, num_cbufs, &bt);
1555
1556
iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1557
1558
ralloc_free(mem_ctx);
1559
return shader;
1560
}
1561
1562
/**
1563
* Update the current tessellation evaluation shader variant.
1564
*
1565
* Fill out the key, look in the cache, compile and bind if needed.
1566
*/
1567
static void
1568
iris_update_compiled_tes(struct iris_context *ice)
1569
{
1570
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1571
struct u_upload_mgr *uploader = ice->shaders.uploader_driver;
1572
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1573
struct iris_uncompiled_shader *ish =
1574
ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1575
1576
struct iris_tes_prog_key key = { KEY_ID(vue.base) };
1577
get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1578
screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1579
1580
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1581
struct iris_compiled_shader *shader =
1582
find_variant(screen, ish, &key, sizeof(key));
1583
1584
if (!shader) {
1585
shader = iris_disk_cache_retrieve(screen, uploader, ish,
1586
&key, sizeof(key));
1587
}
1588
1589
if (!shader)
1590
shader = iris_compile_tes(screen, uploader, &ice->dbg, ish, &key);
1591
1592
if (old != shader) {
1593
iris_shader_variant_reference(&ice->shaders.prog[MESA_SHADER_TESS_EVAL],
1594
shader);
1595
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TES |
1596
IRIS_STAGE_DIRTY_BINDINGS_TES |
1597
IRIS_STAGE_DIRTY_CONSTANTS_TES;
1598
shs->sysvals_need_upload = true;
1599
1600
const struct brw_vue_prog_data *prog_data = (void *) shader->prog_data;
1601
check_urb_size(ice, prog_data->urb_entry_size, MESA_SHADER_TESS_EVAL);
1602
}
1603
1604
/* TODO: Could compare and avoid flagging this. */
1605
const struct shader_info *tes_info = &ish->nir->info;
1606
if (BITSET_TEST(tes_info->system_values_read, SYSTEM_VALUE_VERTICES_IN)) {
1607
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TES;
1608
ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1609
}
1610
}
1611
1612
/**
1613
* Compile a geometry shader, and upload the assembly.
1614
*/
1615
static struct iris_compiled_shader *
1616
iris_compile_gs(struct iris_screen *screen,
1617
struct u_upload_mgr *uploader,
1618
struct pipe_debug_callback *dbg,
1619
struct iris_uncompiled_shader *ish,
1620
const struct iris_gs_prog_key *key)
1621
{
1622
const struct brw_compiler *compiler = screen->compiler;
1623
const struct intel_device_info *devinfo = &screen->devinfo;
1624
void *mem_ctx = ralloc_context(NULL);
1625
struct brw_gs_prog_data *gs_prog_data =
1626
rzalloc(mem_ctx, struct brw_gs_prog_data);
1627
struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1628
struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1629
enum brw_param_builtin *system_values;
1630
unsigned num_system_values;
1631
unsigned num_cbufs;
1632
1633
nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1634
1635
if (key->vue.nr_userclip_plane_consts) {
1636
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1637
nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1638
false, NULL);
1639
nir_lower_io_to_temporaries(nir, impl, true, false);
1640
nir_lower_global_vars_to_local(nir);
1641
nir_lower_vars_to_ssa(nir);
1642
nir_shader_gather_info(nir, impl);
1643
}
1644
1645
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1646
&num_system_values, &num_cbufs);
1647
1648
struct iris_binding_table bt;
1649
iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1650
num_system_values, num_cbufs);
1651
1652
brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1653
1654
brw_compute_vue_map(devinfo,
1655
&vue_prog_data->vue_map, nir->info.outputs_written,
1656
nir->info.separate_shader, /* pos_slots */ 1);
1657
1658
struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
1659
1660
char *error_str = NULL;
1661
const unsigned *program =
1662
brw_compile_gs(compiler, dbg, mem_ctx, &brw_key, gs_prog_data,
1663
nir, -1, NULL, &error_str);
1664
if (program == NULL) {
1665
dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1666
ralloc_free(mem_ctx);
1667
return false;
1668
}
1669
1670
iris_debug_recompile(screen, dbg, ish, &brw_key.base);
1671
1672
uint32_t *so_decls =
1673
screen->vtbl.create_so_decl_list(&ish->stream_output,
1674
&vue_prog_data->vue_map);
1675
1676
struct iris_compiled_shader *shader =
1677
iris_upload_shader(screen, ish, NULL, uploader,
1678
IRIS_CACHE_GS, sizeof(*key), key, program,
1679
prog_data, so_decls, system_values, num_system_values,
1680
0, num_cbufs, &bt);
1681
1682
iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1683
1684
ralloc_free(mem_ctx);
1685
return shader;
1686
}
1687
1688
/**
1689
* Update the current geometry shader variant.
1690
*
1691
* Fill out the key, look in the cache, compile and bind if needed.
1692
*/
1693
static void
1694
iris_update_compiled_gs(struct iris_context *ice)
1695
{
1696
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1697
struct u_upload_mgr *uploader = ice->shaders.uploader_driver;
1698
struct iris_uncompiled_shader *ish =
1699
ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1700
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1701
struct iris_compiled_shader *shader = NULL;
1702
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1703
1704
if (ish) {
1705
struct iris_gs_prog_key key = { KEY_ID(vue.base) };
1706
screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1707
1708
shader = find_variant(screen, ish, &key, sizeof(key));
1709
1710
if (!shader) {
1711
shader = iris_disk_cache_retrieve(screen, uploader, ish,
1712
&key, sizeof(key));
1713
}
1714
1715
if (!shader)
1716
shader = iris_compile_gs(screen, uploader, &ice->dbg, ish, &key);
1717
}
1718
1719
if (old != shader) {
1720
iris_shader_variant_reference(&ice->shaders.prog[MESA_SHADER_GEOMETRY],
1721
shader);
1722
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_GS |
1723
IRIS_STAGE_DIRTY_BINDINGS_GS |
1724
IRIS_STAGE_DIRTY_CONSTANTS_GS;
1725
shs->sysvals_need_upload = true;
1726
1727
unsigned urb_entry_size = shader ?
1728
((struct brw_vue_prog_data *) shader->prog_data)->urb_entry_size : 0;
1729
check_urb_size(ice, urb_entry_size, MESA_SHADER_GEOMETRY);
1730
}
1731
}
1732
1733
/**
1734
* Compile a fragment (pixel) shader, and upload the assembly.
1735
*/
1736
static struct iris_compiled_shader *
1737
iris_compile_fs(struct iris_screen *screen,
1738
struct u_upload_mgr *uploader,
1739
struct pipe_debug_callback *dbg,
1740
struct iris_uncompiled_shader *ish,
1741
const struct iris_fs_prog_key *key,
1742
struct brw_vue_map *vue_map)
1743
{
1744
const struct brw_compiler *compiler = screen->compiler;
1745
void *mem_ctx = ralloc_context(NULL);
1746
struct brw_wm_prog_data *fs_prog_data =
1747
rzalloc(mem_ctx, struct brw_wm_prog_data);
1748
struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1749
enum brw_param_builtin *system_values;
1750
const struct intel_device_info *devinfo = &screen->devinfo;
1751
unsigned num_system_values;
1752
unsigned num_cbufs;
1753
1754
nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1755
1756
prog_data->use_alt_mode = ish->use_alt_mode;
1757
1758
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1759
&num_system_values, &num_cbufs);
1760
1761
/* Lower output variables to load_output intrinsics before setting up
1762
* binding tables, so iris_setup_binding_table can map any load_output
1763
* intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gfx8 for
1764
* non-coherent framebuffer fetches.
1765
*/
1766
brw_nir_lower_fs_outputs(nir);
1767
1768
/* On Gfx11+, shader RT write messages have a "Null Render Target" bit
1769
* and do not need a binding table entry with a null surface. Earlier
1770
* generations need an entry for a null surface.
1771
*/
1772
int null_rts = devinfo->ver < 11 ? 1 : 0;
1773
1774
struct iris_binding_table bt;
1775
iris_setup_binding_table(devinfo, nir, &bt,
1776
MAX2(key->nr_color_regions, null_rts),
1777
num_system_values, num_cbufs);
1778
1779
brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1780
1781
struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(devinfo, key);
1782
1783
struct brw_compile_fs_params params = {
1784
.nir = nir,
1785
.key = &brw_key,
1786
.prog_data = fs_prog_data,
1787
1788
.allow_spilling = true,
1789
.vue_map = vue_map,
1790
1791
.log_data = dbg,
1792
};
1793
1794
const unsigned *program = brw_compile_fs(compiler, mem_ctx, &params);
1795
if (program == NULL) {
1796
dbg_printf("Failed to compile fragment shader: %s\n", params.error_str);
1797
ralloc_free(mem_ctx);
1798
return false;
1799
}
1800
1801
iris_debug_recompile(screen, dbg, ish, &brw_key.base);
1802
1803
struct iris_compiled_shader *shader =
1804
iris_upload_shader(screen, ish, NULL, uploader,
1805
IRIS_CACHE_FS, sizeof(*key), key, program,
1806
prog_data, NULL, system_values, num_system_values,
1807
0, num_cbufs, &bt);
1808
1809
iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1810
1811
ralloc_free(mem_ctx);
1812
return shader;
1813
}
1814
1815
/**
1816
* Update the current fragment shader variant.
1817
*
1818
* Fill out the key, look in the cache, compile and bind if needed.
1819
*/
1820
static void
1821
iris_update_compiled_fs(struct iris_context *ice)
1822
{
1823
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1824
struct u_upload_mgr *uploader = ice->shaders.uploader_driver;
1825
struct iris_uncompiled_shader *ish =
1826
ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1827
struct iris_fs_prog_key key = { KEY_ID(base) };
1828
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1829
screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1830
1831
struct brw_vue_map *last_vue_map =
1832
&brw_vue_prog_data(ice->shaders.last_vue_shader->prog_data)->vue_map;
1833
1834
if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1835
key.input_slots_valid = last_vue_map->slots_valid;
1836
1837
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1838
struct iris_compiled_shader *shader =
1839
find_variant(screen, ish, &key, sizeof(key));
1840
1841
if (!shader) {
1842
shader = iris_disk_cache_retrieve(screen, uploader, ish,
1843
&key, sizeof(key));
1844
}
1845
1846
if (!shader) {
1847
shader = iris_compile_fs(screen, uploader, &ice->dbg,
1848
ish, &key, last_vue_map);
1849
}
1850
1851
if (old != shader) {
1852
// XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1853
// toggles. might be able to avoid flagging SBE too.
1854
iris_shader_variant_reference(&ice->shaders.prog[MESA_SHADER_FRAGMENT],
1855
shader);
1856
ice->state.dirty |= IRIS_DIRTY_WM |
1857
IRIS_DIRTY_CLIP |
1858
IRIS_DIRTY_SBE;
1859
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS |
1860
IRIS_STAGE_DIRTY_BINDINGS_FS |
1861
IRIS_STAGE_DIRTY_CONSTANTS_FS;
1862
shs->sysvals_need_upload = true;
1863
}
1864
}
1865
1866
/**
1867
* Update the last enabled stage's VUE map.
1868
*
1869
* When the shader feeding the rasterizer's output interface changes, we
1870
* need to re-emit various packets.
1871
*/
1872
static void
1873
update_last_vue_map(struct iris_context *ice,
1874
struct iris_compiled_shader *shader)
1875
{
1876
struct brw_vue_prog_data *vue_prog_data = (void *) shader->prog_data;
1877
struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1878
struct brw_vue_map *old_map = !ice->shaders.last_vue_shader ? NULL :
1879
&brw_vue_prog_data(ice->shaders.last_vue_shader->prog_data)->vue_map;
1880
const uint64_t changed_slots =
1881
(old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1882
1883
if (changed_slots & VARYING_BIT_VIEWPORT) {
1884
ice->state.num_viewports =
1885
(vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1886
ice->state.dirty |= IRIS_DIRTY_CLIP |
1887
IRIS_DIRTY_SF_CL_VIEWPORT |
1888
IRIS_DIRTY_CC_VIEWPORT |
1889
IRIS_DIRTY_SCISSOR_RECT;
1890
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_FS |
1891
ice->state.stage_dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1892
}
1893
1894
if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1895
ice->state.dirty |= IRIS_DIRTY_SBE;
1896
}
1897
1898
iris_shader_variant_reference(&ice->shaders.last_vue_shader, shader);
1899
}
1900
1901
static void
1902
iris_update_pull_constant_descriptors(struct iris_context *ice,
1903
gl_shader_stage stage)
1904
{
1905
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
1906
1907
if (!shader || !shader->prog_data->has_ubo_pull)
1908
return;
1909
1910
struct iris_shader_state *shs = &ice->state.shaders[stage];
1911
bool any_new_descriptors =
1912
shader->num_system_values > 0 && shs->sysvals_need_upload;
1913
1914
unsigned bound_cbufs = shs->bound_cbufs;
1915
1916
while (bound_cbufs) {
1917
const int i = u_bit_scan(&bound_cbufs);
1918
struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
1919
struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
1920
if (!surf_state->res && cbuf->buffer) {
1921
iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state,
1922
ISL_SURF_USAGE_CONSTANT_BUFFER_BIT);
1923
any_new_descriptors = true;
1924
}
1925
}
1926
1927
if (any_new_descriptors)
1928
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
1929
}
1930
1931
/**
1932
* Update the current shader variants for the given state.
1933
*
1934
* This should be called on every draw call to ensure that the correct
1935
* shaders are bound. It will also flag any dirty state triggered by
1936
* swapping out those shaders.
1937
*/
1938
void
1939
iris_update_compiled_shaders(struct iris_context *ice)
1940
{
1941
const uint64_t stage_dirty = ice->state.stage_dirty;
1942
1943
if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
1944
IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1945
struct iris_uncompiled_shader *tes =
1946
ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1947
if (tes) {
1948
iris_update_compiled_tcs(ice);
1949
iris_update_compiled_tes(ice);
1950
} else {
1951
iris_shader_variant_reference(&ice->shaders.prog[MESA_SHADER_TESS_CTRL], NULL);
1952
iris_shader_variant_reference(&ice->shaders.prog[MESA_SHADER_TESS_EVAL], NULL);
1953
ice->state.stage_dirty |=
1954
IRIS_STAGE_DIRTY_TCS | IRIS_STAGE_DIRTY_TES |
1955
IRIS_STAGE_DIRTY_BINDINGS_TCS | IRIS_STAGE_DIRTY_BINDINGS_TES |
1956
IRIS_STAGE_DIRTY_CONSTANTS_TCS | IRIS_STAGE_DIRTY_CONSTANTS_TES;
1957
1958
if (ice->shaders.urb.constrained)
1959
ice->state.dirty |= IRIS_DIRTY_URB;
1960
}
1961
}
1962
1963
if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_VS)
1964
iris_update_compiled_vs(ice);
1965
if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_GS)
1966
iris_update_compiled_gs(ice);
1967
1968
if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_GS |
1969
IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1970
const struct iris_compiled_shader *gs =
1971
ice->shaders.prog[MESA_SHADER_GEOMETRY];
1972
const struct iris_compiled_shader *tes =
1973
ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1974
1975
bool points_or_lines = false;
1976
1977
if (gs) {
1978
const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1979
points_or_lines =
1980
gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1981
gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1982
} else if (tes) {
1983
const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1984
points_or_lines =
1985
tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1986
tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1987
}
1988
1989
if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1990
/* Outbound to XY Clip enables */
1991
ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1992
ice->state.dirty |= IRIS_DIRTY_CLIP;
1993
}
1994
}
1995
1996
gl_shader_stage last_stage = last_vue_stage(ice);
1997
struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1998
struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1999
update_last_vue_map(ice, shader);
2000
if (ice->state.streamout != shader->streamout) {
2001
ice->state.streamout = shader->streamout;
2002
ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
2003
}
2004
2005
if (ice->state.streamout_active) {
2006
for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
2007
struct iris_stream_output_target *so =
2008
(void *) ice->state.so_target[i];
2009
if (so)
2010
so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
2011
}
2012
}
2013
2014
if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_FS)
2015
iris_update_compiled_fs(ice);
2016
2017
for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
2018
if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << i))
2019
iris_update_pull_constant_descriptors(ice, i);
2020
}
2021
}
2022
2023
static struct iris_compiled_shader *
2024
iris_compile_cs(struct iris_screen *screen,
2025
struct u_upload_mgr *uploader,
2026
struct pipe_debug_callback *dbg,
2027
struct iris_uncompiled_shader *ish,
2028
const struct iris_cs_prog_key *key)
2029
{
2030
const struct brw_compiler *compiler = screen->compiler;
2031
void *mem_ctx = ralloc_context(NULL);
2032
struct brw_cs_prog_data *cs_prog_data =
2033
rzalloc(mem_ctx, struct brw_cs_prog_data);
2034
struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
2035
enum brw_param_builtin *system_values;
2036
const struct intel_device_info *devinfo = &screen->devinfo;
2037
unsigned num_system_values;
2038
unsigned num_cbufs;
2039
2040
nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
2041
2042
NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics);
2043
2044
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data,
2045
ish->kernel_input_size,
2046
&system_values, &num_system_values, &num_cbufs);
2047
2048
struct iris_binding_table bt;
2049
iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
2050
num_system_values, num_cbufs);
2051
2052
struct brw_cs_prog_key brw_key = iris_to_brw_cs_key(devinfo, key);
2053
2054
struct brw_compile_cs_params params = {
2055
.nir = nir,
2056
.key = &brw_key,
2057
.prog_data = cs_prog_data,
2058
.log_data = dbg,
2059
};
2060
2061
const unsigned *program = brw_compile_cs(compiler, mem_ctx, &params);
2062
if (program == NULL) {
2063
dbg_printf("Failed to compile compute shader: %s\n", params.error_str);
2064
ralloc_free(mem_ctx);
2065
return false;
2066
}
2067
2068
iris_debug_recompile(screen, dbg, ish, &brw_key.base);
2069
2070
struct iris_compiled_shader *shader =
2071
iris_upload_shader(screen, ish, NULL, uploader,
2072
IRIS_CACHE_CS, sizeof(*key), key, program,
2073
prog_data, NULL, system_values, num_system_values,
2074
ish->kernel_input_size, num_cbufs, &bt);
2075
2076
iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
2077
2078
ralloc_free(mem_ctx);
2079
return shader;
2080
}
2081
2082
static void
2083
iris_update_compiled_cs(struct iris_context *ice)
2084
{
2085
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
2086
struct u_upload_mgr *uploader = ice->shaders.uploader_driver;
2087
struct iris_uncompiled_shader *ish =
2088
ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
2089
2090
struct iris_cs_prog_key key = { KEY_ID(base) };
2091
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2092
screen->vtbl.populate_cs_key(ice, &key);
2093
2094
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
2095
struct iris_compiled_shader *shader =
2096
find_variant(screen, ish, &key, sizeof(key));
2097
2098
if (!shader) {
2099
shader = iris_disk_cache_retrieve(screen, uploader, ish,
2100
&key, sizeof(key));
2101
}
2102
2103
if (!shader)
2104
shader = iris_compile_cs(screen, uploader, &ice->dbg, ish, &key);
2105
2106
if (old != shader) {
2107
iris_shader_variant_reference(&ice->shaders.prog[MESA_SHADER_COMPUTE],
2108
shader);
2109
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CS |
2110
IRIS_STAGE_DIRTY_BINDINGS_CS |
2111
IRIS_STAGE_DIRTY_CONSTANTS_CS;
2112
shs->sysvals_need_upload = true;
2113
}
2114
}
2115
2116
void
2117
iris_update_compiled_compute_shader(struct iris_context *ice)
2118
{
2119
if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_CS)
2120
iris_update_compiled_cs(ice);
2121
2122
if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS)
2123
iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2124
}
2125
2126
void
2127
iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
2128
unsigned threads,
2129
uint32_t *dst)
2130
{
2131
assert(brw_cs_push_const_total_size(cs_prog_data, threads) > 0);
2132
assert(cs_prog_data->push.cross_thread.size == 0);
2133
assert(cs_prog_data->push.per_thread.dwords == 1);
2134
assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
2135
for (unsigned t = 0; t < threads; t++)
2136
dst[8 * t] = t;
2137
}
2138
2139
/**
2140
* Allocate scratch BOs as needed for the given per-thread size and stage.
2141
*/
2142
struct iris_bo *
2143
iris_get_scratch_space(struct iris_context *ice,
2144
unsigned per_thread_scratch,
2145
gl_shader_stage stage)
2146
{
2147
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2148
struct iris_bufmgr *bufmgr = screen->bufmgr;
2149
const struct intel_device_info *devinfo = &screen->devinfo;
2150
2151
unsigned encoded_size = ffs(per_thread_scratch) - 11;
2152
assert(encoded_size < ARRAY_SIZE(ice->shaders.scratch_bos));
2153
assert(per_thread_scratch == 1 << (encoded_size + 10));
2154
2155
/* On GFX version 12.5, scratch access changed to a surface-based model.
2156
* Instead of each shader type having its own layout based on IDs passed
2157
* from the relevant fixed-function unit, all scratch access is based on
2158
* thread IDs like it always has been for compute.
2159
*/
2160
if (devinfo->verx10 >= 125)
2161
stage = MESA_SHADER_COMPUTE;
2162
2163
struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2164
2165
/* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
2166
*
2167
* "Scratch Space per slice is computed based on 4 sub-slices. SW
2168
* must allocate scratch space enough so that each slice has 4
2169
* slices allowed."
2170
*
2171
* According to the other driver team, this applies to compute shaders
2172
* as well. This is not currently documented at all.
2173
*
2174
* This hack is no longer necessary on Gfx11+.
2175
*
2176
* For, Gfx11+, scratch space allocation is based on the number of threads
2177
* in the base configuration.
2178
*/
2179
unsigned subslice_total = screen->subslice_total;
2180
if (devinfo->verx10 == 125)
2181
subslice_total = 32;
2182
else if (devinfo->ver == 12)
2183
subslice_total = (devinfo->is_dg1 || devinfo->gt == 2 ? 6 : 2);
2184
else if (devinfo->ver == 11)
2185
subslice_total = 8;
2186
else if (devinfo->ver < 11)
2187
subslice_total = 4 * devinfo->num_slices;
2188
assert(subslice_total >= screen->subslice_total);
2189
2190
if (!*bop) {
2191
unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
2192
2193
if (devinfo->ver >= 12) {
2194
/* Same as ICL below, but with 16 EUs. */
2195
scratch_ids_per_subslice = 16 * 8;
2196
} else if (devinfo->ver == 11) {
2197
/* The MEDIA_VFE_STATE docs say:
2198
*
2199
* "Starting with this configuration, the Maximum Number of
2200
* Threads must be set to (#EU * 8) for GPGPU dispatches.
2201
*
2202
* Although there are only 7 threads per EU in the configuration,
2203
* the FFTID is calculated as if there are 8 threads per EU,
2204
* which in turn requires a larger amount of Scratch Space to be
2205
* allocated by the driver."
2206
*/
2207
scratch_ids_per_subslice = 8 * 8;
2208
}
2209
2210
uint32_t max_threads[] = {
2211
[MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
2212
[MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
2213
[MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
2214
[MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
2215
[MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
2216
[MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
2217
};
2218
2219
uint32_t size = per_thread_scratch * max_threads[stage];
2220
2221
*bop = iris_bo_alloc(bufmgr, "scratch", size, 1, IRIS_MEMZONE_SHADER, 0);
2222
}
2223
2224
return *bop;
2225
}
2226
2227
const struct iris_state_ref *
2228
iris_get_scratch_surf(struct iris_context *ice,
2229
unsigned per_thread_scratch)
2230
{
2231
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2232
ASSERTED const struct intel_device_info *devinfo = &screen->devinfo;
2233
2234
assert(devinfo->verx10 >= 125);
2235
2236
unsigned encoded_size = ffs(per_thread_scratch) - 11;
2237
assert(encoded_size < ARRAY_SIZE(ice->shaders.scratch_surfs));
2238
assert(per_thread_scratch == 1 << (encoded_size + 10));
2239
2240
struct iris_state_ref *ref = &ice->shaders.scratch_surfs[encoded_size];
2241
2242
if (ref->res)
2243
return ref;
2244
2245
struct iris_bo *scratch_bo =
2246
iris_get_scratch_space(ice, per_thread_scratch, MESA_SHADER_COMPUTE);
2247
2248
void *map = upload_state(ice->state.bindless_uploader, ref,
2249
screen->isl_dev.ss.size, 64);
2250
2251
isl_buffer_fill_state(&screen->isl_dev, map,
2252
.address = scratch_bo->gtt_offset,
2253
.size_B = scratch_bo->size,
2254
.format = ISL_FORMAT_RAW,
2255
.swizzle = ISL_SWIZZLE_IDENTITY,
2256
.mocs = iris_mocs(scratch_bo, &screen->isl_dev, 0),
2257
.stride_B = per_thread_scratch,
2258
.is_scratch = true);
2259
2260
return ref;
2261
}
2262
2263
/* ------------------------------------------------------------------- */
2264
2265
/**
2266
* The pipe->create_[stage]_state() driver hooks.
2267
*
2268
* Performs basic NIR preprocessing, records any state dependencies, and
2269
* returns an iris_uncompiled_shader as the Gallium CSO.
2270
*
2271
* Actual shader compilation to assembly happens later, at first use.
2272
*/
2273
static void *
2274
iris_create_uncompiled_shader(struct iris_screen *screen,
2275
nir_shader *nir,
2276
const struct pipe_stream_output_info *so_info)
2277
{
2278
const struct intel_device_info *devinfo = &screen->devinfo;
2279
2280
struct iris_uncompiled_shader *ish =
2281
calloc(1, sizeof(struct iris_uncompiled_shader));
2282
if (!ish)
2283
return NULL;
2284
2285
list_inithead(&ish->variants);
2286
simple_mtx_init(&ish->lock, mtx_plain);
2287
2288
NIR_PASS(ish->needs_edge_flag, nir, iris_fix_edge_flags);
2289
2290
brw_preprocess_nir(screen->compiler, nir, NULL);
2291
2292
NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo,
2293
&ish->uses_atomic_load_store);
2294
NIR_PASS_V(nir, iris_lower_storage_image_derefs);
2295
2296
nir_sweep(nir);
2297
2298
ish->program_id = get_new_program_id(screen);
2299
ish->nir = nir;
2300
if (so_info) {
2301
memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2302
update_so_info(&ish->stream_output, nir->info.outputs_written);
2303
}
2304
2305
/* Save this now before potentially dropping nir->info.name */
2306
if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
2307
ish->use_alt_mode = true;
2308
2309
if (screen->disk_cache) {
2310
/* Serialize the NIR to a binary blob that we can hash for the disk
2311
* cache. Drop unnecessary information (like variable names)
2312
* so the serialized NIR is smaller, and also to let us detect more
2313
* isomorphic shaders when hashing, increasing cache hits.
2314
*/
2315
struct blob blob;
2316
blob_init(&blob);
2317
nir_serialize(&blob, nir, true);
2318
_mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2319
blob_finish(&blob);
2320
}
2321
2322
return ish;
2323
}
2324
2325
static struct iris_uncompiled_shader *
2326
iris_create_shader_state(struct pipe_context *ctx,
2327
const struct pipe_shader_state *state)
2328
{
2329
struct iris_screen *screen = (void *) ctx->screen;
2330
struct nir_shader *nir;
2331
2332
if (state->type == PIPE_SHADER_IR_TGSI)
2333
nir = tgsi_to_nir(state->tokens, ctx->screen, false);
2334
else
2335
nir = state->ir.nir;
2336
2337
return iris_create_uncompiled_shader(screen, nir, &state->stream_output);
2338
}
2339
2340
static void *
2341
iris_create_vs_state(struct pipe_context *ctx,
2342
const struct pipe_shader_state *state)
2343
{
2344
struct iris_context *ice = (void *) ctx;
2345
struct iris_screen *screen = (void *) ctx->screen;
2346
struct u_upload_mgr *uploader = ice->shaders.uploader_unsync;
2347
struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2348
2349
/* User clip planes */
2350
if (ish->nir->info.clip_distance_array_size == 0)
2351
ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2352
2353
if (screen->precompile) {
2354
struct iris_vs_prog_key key = { KEY_ID(vue.base) };
2355
2356
if (!iris_disk_cache_retrieve(screen, uploader, ish, &key, sizeof(key)))
2357
iris_compile_vs(screen, uploader, &ice->dbg, ish, &key);
2358
}
2359
2360
return ish;
2361
}
2362
2363
static void *
2364
iris_create_tcs_state(struct pipe_context *ctx,
2365
const struct pipe_shader_state *state)
2366
{
2367
struct iris_context *ice = (void *) ctx;
2368
struct iris_screen *screen = (void *) ctx->screen;
2369
const struct brw_compiler *compiler = screen->compiler;
2370
struct u_upload_mgr *uploader = ice->shaders.uploader_unsync;
2371
struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2372
struct shader_info *info = &ish->nir->info;
2373
2374
if (screen->precompile) {
2375
const unsigned _GL_TRIANGLES = 0x0004;
2376
struct iris_tcs_prog_key key = {
2377
KEY_ID(vue.base),
2378
// XXX: make sure the linker fills this out from the TES...
2379
.tes_primitive_mode =
2380
info->tess.primitive_mode ? info->tess.primitive_mode
2381
: _GL_TRIANGLES,
2382
.outputs_written = info->outputs_written,
2383
.patch_outputs_written = info->patch_outputs_written,
2384
};
2385
2386
/* 8_PATCH mode needs the key to contain the input patch dimensionality.
2387
* We don't have that information, so we randomly guess that the input
2388
* and output patches are the same size. This is a bad guess, but we
2389
* can't do much better.
2390
*/
2391
if (compiler->use_tcs_8_patch)
2392
key.input_vertices = info->tess.tcs_vertices_out;
2393
2394
if (!iris_disk_cache_retrieve(screen, uploader, ish, &key, sizeof(key)))
2395
iris_compile_tcs(screen, NULL, uploader, &ice->dbg, ish, &key);
2396
}
2397
2398
return ish;
2399
}
2400
2401
static void *
2402
iris_create_tes_state(struct pipe_context *ctx,
2403
const struct pipe_shader_state *state)
2404
{
2405
struct iris_context *ice = (void *) ctx;
2406
struct iris_screen *screen = (void *) ctx->screen;
2407
struct u_upload_mgr *uploader = ice->shaders.uploader_unsync;
2408
struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2409
struct shader_info *info = &ish->nir->info;
2410
2411
/* User clip planes */
2412
if (ish->nir->info.clip_distance_array_size == 0)
2413
ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2414
2415
if (screen->precompile) {
2416
struct iris_tes_prog_key key = {
2417
KEY_ID(vue.base),
2418
// XXX: not ideal, need TCS output/TES input unification
2419
.inputs_read = info->inputs_read,
2420
.patch_inputs_read = info->patch_inputs_read,
2421
};
2422
2423
if (!iris_disk_cache_retrieve(screen, uploader, ish, &key, sizeof(key)))
2424
iris_compile_tes(screen, uploader, &ice->dbg, ish, &key);
2425
}
2426
2427
return ish;
2428
}
2429
2430
static void *
2431
iris_create_gs_state(struct pipe_context *ctx,
2432
const struct pipe_shader_state *state)
2433
{
2434
struct iris_context *ice = (void *) ctx;
2435
struct iris_screen *screen = (void *) ctx->screen;
2436
struct u_upload_mgr *uploader = ice->shaders.uploader_unsync;
2437
struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2438
2439
/* User clip planes */
2440
if (ish->nir->info.clip_distance_array_size == 0)
2441
ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2442
2443
if (screen->precompile) {
2444
struct iris_gs_prog_key key = { KEY_ID(vue.base) };
2445
2446
if (!iris_disk_cache_retrieve(screen, uploader, ish, &key, sizeof(key)))
2447
iris_compile_gs(screen, uploader, &ice->dbg, ish, &key);
2448
}
2449
2450
return ish;
2451
}
2452
2453
static void *
2454
iris_create_fs_state(struct pipe_context *ctx,
2455
const struct pipe_shader_state *state)
2456
{
2457
struct iris_context *ice = (void *) ctx;
2458
struct iris_screen *screen = (void *) ctx->screen;
2459
struct u_upload_mgr *uploader = ice->shaders.uploader_unsync;
2460
struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2461
struct shader_info *info = &ish->nir->info;
2462
2463
ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
2464
(1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
2465
(1ull << IRIS_NOS_RASTERIZER) |
2466
(1ull << IRIS_NOS_BLEND);
2467
2468
/* The program key needs the VUE map if there are > 16 inputs */
2469
if (util_bitcount64(ish->nir->info.inputs_read &
2470
BRW_FS_VARYING_INPUT_MASK) > 16) {
2471
ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
2472
}
2473
2474
if (screen->precompile) {
2475
const uint64_t color_outputs = info->outputs_written &
2476
~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2477
BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2478
BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2479
2480
bool can_rearrange_varyings =
2481
util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
2482
2483
const struct intel_device_info *devinfo = &screen->devinfo;
2484
struct iris_fs_prog_key key = {
2485
KEY_ID(base),
2486
.nr_color_regions = util_bitcount(color_outputs),
2487
.coherent_fb_fetch = devinfo->ver >= 9,
2488
.input_slots_valid =
2489
can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2490
};
2491
2492
if (!iris_disk_cache_retrieve(screen, uploader, ish, &key, sizeof(key)))
2493
iris_compile_fs(screen, uploader, &ice->dbg, ish, &key, NULL);
2494
}
2495
2496
return ish;
2497
}
2498
2499
static void *
2500
iris_create_compute_state(struct pipe_context *ctx,
2501
const struct pipe_compute_state *state)
2502
{
2503
struct iris_context *ice = (void *) ctx;
2504
struct iris_screen *screen = (void *) ctx->screen;
2505
struct u_upload_mgr *uploader = ice->shaders.uploader_unsync;
2506
const nir_shader_compiler_options *options =
2507
screen->compiler->glsl_compiler_options[MESA_SHADER_COMPUTE].NirOptions;
2508
2509
nir_shader *nir;
2510
switch (state->ir_type) {
2511
case PIPE_SHADER_IR_NIR:
2512
nir = (void *)state->prog;
2513
break;
2514
2515
case PIPE_SHADER_IR_NIR_SERIALIZED: {
2516
struct blob_reader reader;
2517
const struct pipe_binary_program_header *hdr = state->prog;
2518
blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
2519
nir = nir_deserialize(NULL, options, &reader);
2520
break;
2521
}
2522
2523
default:
2524
unreachable("Unsupported IR");
2525
}
2526
2527
/* Most of iris doesn't really care about the difference between compute
2528
* shaders and kernels. We also tend to hard-code COMPUTE everywhere so
2529
* it's way easier if we just normalize to COMPUTE here.
2530
*/
2531
assert(nir->info.stage == MESA_SHADER_COMPUTE ||
2532
nir->info.stage == MESA_SHADER_KERNEL);
2533
nir->info.stage = MESA_SHADER_COMPUTE;
2534
2535
struct iris_uncompiled_shader *ish =
2536
iris_create_uncompiled_shader(screen, nir, NULL);
2537
ish->kernel_input_size = state->req_input_mem;
2538
ish->kernel_shared_size = state->req_local_mem;
2539
2540
// XXX: disallow more than 64KB of shared variables
2541
2542
if (screen->precompile) {
2543
struct iris_cs_prog_key key = { KEY_ID(base) };
2544
2545
if (!iris_disk_cache_retrieve(screen, uploader, ish, &key, sizeof(key)))
2546
iris_compile_cs(screen, uploader, &ice->dbg, ish, &key);
2547
}
2548
2549
return ish;
2550
}
2551
2552
/**
2553
* The pipe->delete_[stage]_state() driver hooks.
2554
*
2555
* Frees the iris_uncompiled_shader.
2556
*/
2557
static void
2558
iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2559
{
2560
struct iris_uncompiled_shader *ish = state;
2561
struct iris_context *ice = (void *) ctx;
2562
2563
if (ice->shaders.uncompiled[stage] == ish) {
2564
ice->shaders.uncompiled[stage] = NULL;
2565
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2566
}
2567
2568
/* No need to take ish->lock; we hold the last reference to ish */
2569
list_for_each_entry_safe(struct iris_compiled_shader, shader,
2570
&ish->variants, link) {
2571
list_del(&shader->link);
2572
2573
iris_shader_variant_reference(&shader, NULL);
2574
}
2575
2576
simple_mtx_destroy(&ish->lock);
2577
2578
ralloc_free(ish->nir);
2579
free(ish);
2580
}
2581
2582
static void
2583
iris_delete_vs_state(struct pipe_context *ctx, void *state)
2584
{
2585
iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2586
}
2587
2588
static void
2589
iris_delete_tcs_state(struct pipe_context *ctx, void *state)
2590
{
2591
iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2592
}
2593
2594
static void
2595
iris_delete_tes_state(struct pipe_context *ctx, void *state)
2596
{
2597
iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2598
}
2599
2600
static void
2601
iris_delete_gs_state(struct pipe_context *ctx, void *state)
2602
{
2603
iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2604
}
2605
2606
static void
2607
iris_delete_fs_state(struct pipe_context *ctx, void *state)
2608
{
2609
iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2610
}
2611
2612
static void
2613
iris_delete_cs_state(struct pipe_context *ctx, void *state)
2614
{
2615
iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2616
}
2617
2618
/**
2619
* The pipe->bind_[stage]_state() driver hook.
2620
*
2621
* Binds an uncompiled shader as the current one for a particular stage.
2622
* Updates dirty tracking to account for the shader's NOS.
2623
*/
2624
static void
2625
bind_shader_state(struct iris_context *ice,
2626
struct iris_uncompiled_shader *ish,
2627
gl_shader_stage stage)
2628
{
2629
uint64_t stage_dirty_bit = IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2630
const uint64_t nos = ish ? ish->nos : 0;
2631
2632
const struct shader_info *old_info = iris_get_shader_info(ice, stage);
2633
const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2634
2635
if ((old_info ? BITSET_LAST_BIT(old_info->textures_used) : 0) !=
2636
(new_info ? BITSET_LAST_BIT(new_info->textures_used) : 0)) {
2637
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2638
}
2639
2640
ice->shaders.uncompiled[stage] = ish;
2641
ice->state.stage_dirty |= stage_dirty_bit;
2642
2643
/* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2644
* (or that they no longer need to do so).
2645
*/
2646
for (int i = 0; i < IRIS_NOS_COUNT; i++) {
2647
if (nos & (1 << i))
2648
ice->state.stage_dirty_for_nos[i] |= stage_dirty_bit;
2649
else
2650
ice->state.stage_dirty_for_nos[i] &= ~stage_dirty_bit;
2651
}
2652
}
2653
2654
static void
2655
iris_bind_vs_state(struct pipe_context *ctx, void *state)
2656
{
2657
struct iris_context *ice = (struct iris_context *)ctx;
2658
struct iris_uncompiled_shader *ish = state;
2659
2660
if (ish) {
2661
const struct shader_info *info = &ish->nir->info;
2662
if (ice->state.window_space_position != info->vs.window_space_position) {
2663
ice->state.window_space_position = info->vs.window_space_position;
2664
2665
ice->state.dirty |= IRIS_DIRTY_CLIP |
2666
IRIS_DIRTY_RASTER |
2667
IRIS_DIRTY_CC_VIEWPORT;
2668
}
2669
2670
const bool uses_draw_params =
2671
BITSET_TEST(info->system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
2672
BITSET_TEST(info->system_values_read, SYSTEM_VALUE_BASE_INSTANCE);
2673
const bool uses_derived_draw_params =
2674
BITSET_TEST(info->system_values_read, SYSTEM_VALUE_DRAW_ID) ||
2675
BITSET_TEST(info->system_values_read, SYSTEM_VALUE_IS_INDEXED_DRAW);
2676
const bool needs_sgvs_element = uses_draw_params ||
2677
BITSET_TEST(info->system_values_read, SYSTEM_VALUE_INSTANCE_ID) ||
2678
BITSET_TEST(info->system_values_read,
2679
SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
2680
2681
if (ice->state.vs_uses_draw_params != uses_draw_params ||
2682
ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
2683
ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
2684
ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
2685
IRIS_DIRTY_VERTEX_ELEMENTS;
2686
}
2687
2688
ice->state.vs_uses_draw_params = uses_draw_params;
2689
ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
2690
ice->state.vs_needs_sgvs_element = needs_sgvs_element;
2691
ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
2692
}
2693
2694
bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
2695
}
2696
2697
static void
2698
iris_bind_tcs_state(struct pipe_context *ctx, void *state)
2699
{
2700
bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
2701
}
2702
2703
static void
2704
iris_bind_tes_state(struct pipe_context *ctx, void *state)
2705
{
2706
struct iris_context *ice = (struct iris_context *)ctx;
2707
2708
/* Enabling/disabling optional stages requires a URB reconfiguration. */
2709
if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
2710
ice->state.dirty |= IRIS_DIRTY_URB;
2711
2712
bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
2713
}
2714
2715
static void
2716
iris_bind_gs_state(struct pipe_context *ctx, void *state)
2717
{
2718
struct iris_context *ice = (struct iris_context *)ctx;
2719
2720
/* Enabling/disabling optional stages requires a URB reconfiguration. */
2721
if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
2722
ice->state.dirty |= IRIS_DIRTY_URB;
2723
2724
bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
2725
}
2726
2727
static void
2728
iris_bind_fs_state(struct pipe_context *ctx, void *state)
2729
{
2730
struct iris_context *ice = (struct iris_context *) ctx;
2731
struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2732
const struct intel_device_info *devinfo = &screen->devinfo;
2733
struct iris_uncompiled_shader *old_ish =
2734
ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2735
struct iris_uncompiled_shader *new_ish = state;
2736
2737
const unsigned color_bits =
2738
BITFIELD64_BIT(FRAG_RESULT_COLOR) |
2739
BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
2740
2741
/* Fragment shader outputs influence HasWriteableRT */
2742
if (!old_ish || !new_ish ||
2743
(old_ish->nir->info.outputs_written & color_bits) !=
2744
(new_ish->nir->info.outputs_written & color_bits))
2745
ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
2746
2747
if (devinfo->ver == 8)
2748
ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
2749
2750
bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
2751
}
2752
2753
static void
2754
iris_bind_cs_state(struct pipe_context *ctx, void *state)
2755
{
2756
bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
2757
}
2758
2759
void
2760
iris_init_program_functions(struct pipe_context *ctx)
2761
{
2762
ctx->create_vs_state = iris_create_vs_state;
2763
ctx->create_tcs_state = iris_create_tcs_state;
2764
ctx->create_tes_state = iris_create_tes_state;
2765
ctx->create_gs_state = iris_create_gs_state;
2766
ctx->create_fs_state = iris_create_fs_state;
2767
ctx->create_compute_state = iris_create_compute_state;
2768
2769
ctx->delete_vs_state = iris_delete_vs_state;
2770
ctx->delete_tcs_state = iris_delete_tcs_state;
2771
ctx->delete_tes_state = iris_delete_tes_state;
2772
ctx->delete_gs_state = iris_delete_gs_state;
2773
ctx->delete_fs_state = iris_delete_fs_state;
2774
ctx->delete_compute_state = iris_delete_cs_state;
2775
2776
ctx->bind_vs_state = iris_bind_vs_state;
2777
ctx->bind_tcs_state = iris_bind_tcs_state;
2778
ctx->bind_tes_state = iris_bind_tes_state;
2779
ctx->bind_gs_state = iris_bind_gs_state;
2780
ctx->bind_fs_state = iris_bind_fs_state;
2781
ctx->bind_compute_state = iris_bind_cs_state;
2782
}
2783
2784