Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/intel/vulkan/anv_pipeline.c
4547 views
1
/*
2
* Copyright © 2015 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include <assert.h>
25
#include <stdbool.h>
26
#include <string.h>
27
#include <unistd.h>
28
#include <fcntl.h>
29
30
#include "util/mesa-sha1.h"
31
#include "util/os_time.h"
32
#include "common/intel_l3_config.h"
33
#include "common/intel_disasm.h"
34
#include "common/intel_sample_positions.h"
35
#include "anv_private.h"
36
#include "compiler/brw_nir.h"
37
#include "compiler/brw_nir_rt.h"
38
#include "anv_nir.h"
39
#include "nir/nir_xfb_info.h"
40
#include "spirv/nir_spirv.h"
41
#include "vk_util.h"
42
43
/* Needed for SWIZZLE macros */
44
#include "program/prog_instruction.h"
45
46
// Shader functions
47
#define SPIR_V_MAGIC_NUMBER 0x07230203
48
49
struct anv_spirv_debug_data {
50
struct anv_device *device;
51
const struct vk_shader_module *module;
52
};
53
54
static void anv_spirv_nir_debug(void *private_data,
55
enum nir_spirv_debug_level level,
56
size_t spirv_offset,
57
const char *message)
58
{
59
struct anv_spirv_debug_data *debug_data = private_data;
60
struct anv_instance *instance = debug_data->device->physical->instance;
61
62
static const VkDebugReportFlagsEXT vk_flags[] = {
63
[NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
64
[NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
65
[NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
66
};
67
char buffer[256];
68
69
snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s", (unsigned long) spirv_offset, message);
70
71
vk_debug_report(&instance->vk, vk_flags[level],
72
&debug_data->module->base,
73
0, 0, "anv", buffer);
74
}
75
76
/* Eventually, this will become part of anv_CreateShader. Unfortunately,
77
* we can't do that yet because we don't have the ability to copy nir.
78
*/
79
static nir_shader *
80
anv_shader_compile_to_nir(struct anv_device *device,
81
void *mem_ctx,
82
const struct vk_shader_module *module,
83
const char *entrypoint_name,
84
gl_shader_stage stage,
85
const VkSpecializationInfo *spec_info)
86
{
87
const struct anv_physical_device *pdevice = device->physical;
88
const struct brw_compiler *compiler = pdevice->compiler;
89
const nir_shader_compiler_options *nir_options =
90
compiler->glsl_compiler_options[stage].NirOptions;
91
92
uint32_t *spirv = (uint32_t *) module->data;
93
assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
94
assert(module->size % 4 == 0);
95
96
uint32_t num_spec_entries = 0;
97
struct nir_spirv_specialization *spec_entries = NULL;
98
if (spec_info && spec_info->mapEntryCount > 0) {
99
num_spec_entries = spec_info->mapEntryCount;
100
spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
101
for (uint32_t i = 0; i < num_spec_entries; i++) {
102
VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
103
const void *data = spec_info->pData + entry.offset;
104
assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
105
106
spec_entries[i].id = spec_info->pMapEntries[i].constantID;
107
switch (entry.size) {
108
case 8:
109
spec_entries[i].value.u64 = *(const uint64_t *)data;
110
break;
111
case 4:
112
spec_entries[i].value.u32 = *(const uint32_t *)data;
113
break;
114
case 2:
115
spec_entries[i].value.u16 = *(const uint16_t *)data;
116
break;
117
case 1:
118
spec_entries[i].value.u8 = *(const uint8_t *)data;
119
break;
120
default:
121
assert(!"Invalid spec constant size");
122
break;
123
}
124
}
125
}
126
127
struct anv_spirv_debug_data spirv_debug_data = {
128
.device = device,
129
.module = module,
130
};
131
struct spirv_to_nir_options spirv_options = {
132
.frag_coord_is_sysval = true,
133
.caps = {
134
.demote_to_helper_invocation = true,
135
.derivative_group = true,
136
.descriptor_array_dynamic_indexing = true,
137
.descriptor_array_non_uniform_indexing = true,
138
.descriptor_indexing = true,
139
.device_group = true,
140
.draw_parameters = true,
141
.float16 = pdevice->info.ver >= 8,
142
.float64 = pdevice->info.ver >= 8,
143
.fragment_shader_sample_interlock = pdevice->info.ver >= 9,
144
.fragment_shader_pixel_interlock = pdevice->info.ver >= 9,
145
.geometry_streams = true,
146
.image_write_without_format = true,
147
.int8 = pdevice->info.ver >= 8,
148
.int16 = pdevice->info.ver >= 8,
149
.int64 = pdevice->info.ver >= 8,
150
.int64_atomics = pdevice->info.ver >= 9 && pdevice->use_softpin,
151
.integer_functions2 = pdevice->info.ver >= 8,
152
.min_lod = true,
153
.multiview = true,
154
.physical_storage_buffer_address = pdevice->has_a64_buffer_access,
155
.post_depth_coverage = pdevice->info.ver >= 9,
156
.runtime_descriptor_array = true,
157
.float_controls = pdevice->info.ver >= 8,
158
.ray_tracing = pdevice->info.has_ray_tracing,
159
.shader_clock = true,
160
.shader_viewport_index_layer = true,
161
.stencil_export = pdevice->info.ver >= 9,
162
.storage_8bit = pdevice->info.ver >= 8,
163
.storage_16bit = pdevice->info.ver >= 8,
164
.subgroup_arithmetic = true,
165
.subgroup_basic = true,
166
.subgroup_ballot = true,
167
.subgroup_quad = true,
168
.subgroup_uniform_control_flow = true,
169
.subgroup_shuffle = true,
170
.subgroup_vote = true,
171
.tessellation = true,
172
.transform_feedback = pdevice->info.ver >= 8,
173
.variable_pointers = true,
174
.vk_memory_model = true,
175
.vk_memory_model_device_scope = true,
176
.workgroup_memory_explicit_layout = true,
177
.fragment_shading_rate = pdevice->info.ver >= 11,
178
},
179
.ubo_addr_format =
180
anv_nir_ubo_addr_format(pdevice, device->robust_buffer_access),
181
.ssbo_addr_format =
182
anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access),
183
.phys_ssbo_addr_format = nir_address_format_64bit_global,
184
.push_const_addr_format = nir_address_format_logical,
185
186
/* TODO: Consider changing this to an address format that has the NULL
187
* pointer equals to 0. That might be a better format to play nice
188
* with certain code / code generators.
189
*/
190
.shared_addr_format = nir_address_format_32bit_offset,
191
.debug = {
192
.func = anv_spirv_nir_debug,
193
.private_data = &spirv_debug_data,
194
},
195
};
196
197
198
nir_shader *nir =
199
spirv_to_nir(spirv, module->size / 4,
200
spec_entries, num_spec_entries,
201
stage, entrypoint_name, &spirv_options, nir_options);
202
if (!nir) {
203
free(spec_entries);
204
return NULL;
205
}
206
207
assert(nir->info.stage == stage);
208
nir_validate_shader(nir, "after spirv_to_nir");
209
nir_validate_ssa_dominance(nir, "after spirv_to_nir");
210
ralloc_steal(mem_ctx, nir);
211
212
free(spec_entries);
213
214
if (INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage)) {
215
fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
216
gl_shader_stage_name(stage));
217
nir_print_shader(nir, stderr);
218
}
219
220
/* We have to lower away local constant initializers right before we
221
* inline functions. That way they get properly initialized at the top
222
* of the function and not at the top of its caller.
223
*/
224
NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
225
NIR_PASS_V(nir, nir_lower_returns);
226
NIR_PASS_V(nir, nir_inline_functions);
227
NIR_PASS_V(nir, nir_copy_prop);
228
NIR_PASS_V(nir, nir_opt_deref);
229
230
/* Pick off the single entrypoint that we want */
231
foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
232
if (!func->is_entrypoint)
233
exec_node_remove(&func->node);
234
}
235
assert(exec_list_length(&nir->functions) == 1);
236
237
/* Now that we've deleted all but the main function, we can go ahead and
238
* lower the rest of the constant initializers. We do this here so that
239
* nir_remove_dead_variables and split_per_member_structs below see the
240
* corresponding stores.
241
*/
242
NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
243
244
/* Split member structs. We do this before lower_io_to_temporaries so that
245
* it doesn't lower system values to temporaries by accident.
246
*/
247
NIR_PASS_V(nir, nir_split_var_copies);
248
NIR_PASS_V(nir, nir_split_per_member_structs);
249
250
NIR_PASS_V(nir, nir_remove_dead_variables,
251
nir_var_shader_in | nir_var_shader_out | nir_var_system_value |
252
nir_var_shader_call_data | nir_var_ray_hit_attrib,
253
NULL);
254
255
NIR_PASS_V(nir, nir_propagate_invariant, false);
256
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
257
nir_shader_get_entrypoint(nir), true, false);
258
259
NIR_PASS_V(nir, nir_lower_frexp);
260
261
/* Vulkan uses the separate-shader linking model */
262
nir->info.separate_shader = true;
263
264
brw_preprocess_nir(compiler, nir, NULL);
265
266
return nir;
267
}
268
269
VkResult
270
anv_pipeline_init(struct anv_pipeline *pipeline,
271
struct anv_device *device,
272
enum anv_pipeline_type type,
273
VkPipelineCreateFlags flags,
274
const VkAllocationCallbacks *pAllocator)
275
{
276
VkResult result;
277
278
memset(pipeline, 0, sizeof(*pipeline));
279
280
vk_object_base_init(&device->vk, &pipeline->base,
281
VK_OBJECT_TYPE_PIPELINE);
282
pipeline->device = device;
283
284
/* It's the job of the child class to provide actual backing storage for
285
* the batch by setting batch.start, batch.next, and batch.end.
286
*/
287
pipeline->batch.alloc = pAllocator ? pAllocator : &device->vk.alloc;
288
pipeline->batch.relocs = &pipeline->batch_relocs;
289
pipeline->batch.status = VK_SUCCESS;
290
291
result = anv_reloc_list_init(&pipeline->batch_relocs,
292
pipeline->batch.alloc);
293
if (result != VK_SUCCESS)
294
return result;
295
296
pipeline->mem_ctx = ralloc_context(NULL);
297
298
pipeline->type = type;
299
pipeline->flags = flags;
300
301
util_dynarray_init(&pipeline->executables, pipeline->mem_ctx);
302
303
return VK_SUCCESS;
304
}
305
306
void
307
anv_pipeline_finish(struct anv_pipeline *pipeline,
308
struct anv_device *device,
309
const VkAllocationCallbacks *pAllocator)
310
{
311
anv_reloc_list_finish(&pipeline->batch_relocs,
312
pAllocator ? pAllocator : &device->vk.alloc);
313
ralloc_free(pipeline->mem_ctx);
314
vk_object_base_finish(&pipeline->base);
315
}
316
317
void anv_DestroyPipeline(
318
VkDevice _device,
319
VkPipeline _pipeline,
320
const VkAllocationCallbacks* pAllocator)
321
{
322
ANV_FROM_HANDLE(anv_device, device, _device);
323
ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
324
325
if (!pipeline)
326
return;
327
328
switch (pipeline->type) {
329
case ANV_PIPELINE_GRAPHICS: {
330
struct anv_graphics_pipeline *gfx_pipeline =
331
anv_pipeline_to_graphics(pipeline);
332
333
if (gfx_pipeline->blend_state.map)
334
anv_state_pool_free(&device->dynamic_state_pool, gfx_pipeline->blend_state);
335
if (gfx_pipeline->cps_state.map)
336
anv_state_pool_free(&device->dynamic_state_pool, gfx_pipeline->cps_state);
337
338
for (unsigned s = 0; s < ARRAY_SIZE(gfx_pipeline->shaders); s++) {
339
if (gfx_pipeline->shaders[s])
340
anv_shader_bin_unref(device, gfx_pipeline->shaders[s]);
341
}
342
break;
343
}
344
345
case ANV_PIPELINE_COMPUTE: {
346
struct anv_compute_pipeline *compute_pipeline =
347
anv_pipeline_to_compute(pipeline);
348
349
if (compute_pipeline->cs)
350
anv_shader_bin_unref(device, compute_pipeline->cs);
351
352
break;
353
}
354
355
case ANV_PIPELINE_RAY_TRACING: {
356
struct anv_ray_tracing_pipeline *rt_pipeline =
357
anv_pipeline_to_ray_tracing(pipeline);
358
359
util_dynarray_foreach(&rt_pipeline->shaders,
360
struct anv_shader_bin *, shader) {
361
anv_shader_bin_unref(device, *shader);
362
}
363
break;
364
}
365
366
default:
367
unreachable("invalid pipeline type");
368
}
369
370
anv_pipeline_finish(pipeline, device, pAllocator);
371
vk_free2(&device->vk.alloc, pAllocator, pipeline);
372
}
373
374
static const uint32_t vk_to_intel_primitive_type[] = {
375
[VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
376
[VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
377
[VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
378
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
379
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
380
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
381
[VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
382
[VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
383
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
384
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
385
};
386
387
static void
388
populate_sampler_prog_key(const struct intel_device_info *devinfo,
389
struct brw_sampler_prog_key_data *key)
390
{
391
/* Almost all multisampled textures are compressed. The only time when we
392
* don't compress a multisampled texture is for 16x MSAA with a surface
393
* width greater than 8k which is a bit of an edge case. Since the sampler
394
* just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
395
* to tell the compiler to always assume compression.
396
*/
397
key->compressed_multisample_layout_mask = ~0;
398
399
/* SkyLake added support for 16x MSAA. With this came a new message for
400
* reading from a 16x MSAA surface with compression. The new message was
401
* needed because now the MCS data is 64 bits instead of 32 or lower as is
402
* the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
403
* message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
404
* so we can just use it unconditionally. This may not be quite as
405
* efficient but it saves us from recompiling.
406
*/
407
if (devinfo->ver >= 9)
408
key->msaa_16 = ~0;
409
410
/* XXX: Handle texture swizzle on HSW- */
411
for (int i = 0; i < MAX_SAMPLERS; i++) {
412
/* Assume color sampler, no swizzling. (Works for BDW+) */
413
key->swizzles[i] = SWIZZLE_XYZW;
414
}
415
}
416
417
static void
418
populate_base_prog_key(const struct intel_device_info *devinfo,
419
VkPipelineShaderStageCreateFlags flags,
420
bool robust_buffer_acccess,
421
struct brw_base_prog_key *key)
422
{
423
if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
424
key->subgroup_size_type = BRW_SUBGROUP_SIZE_VARYING;
425
else
426
key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT;
427
428
key->robust_buffer_access = robust_buffer_acccess;
429
430
populate_sampler_prog_key(devinfo, &key->tex);
431
}
432
433
static void
434
populate_vs_prog_key(const struct intel_device_info *devinfo,
435
VkPipelineShaderStageCreateFlags flags,
436
bool robust_buffer_acccess,
437
struct brw_vs_prog_key *key)
438
{
439
memset(key, 0, sizeof(*key));
440
441
populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
442
443
/* XXX: Handle vertex input work-arounds */
444
445
/* XXX: Handle sampler_prog_key */
446
}
447
448
static void
449
populate_tcs_prog_key(const struct intel_device_info *devinfo,
450
VkPipelineShaderStageCreateFlags flags,
451
bool robust_buffer_acccess,
452
unsigned input_vertices,
453
struct brw_tcs_prog_key *key)
454
{
455
memset(key, 0, sizeof(*key));
456
457
populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
458
459
key->input_vertices = input_vertices;
460
}
461
462
static void
463
populate_tes_prog_key(const struct intel_device_info *devinfo,
464
VkPipelineShaderStageCreateFlags flags,
465
bool robust_buffer_acccess,
466
struct brw_tes_prog_key *key)
467
{
468
memset(key, 0, sizeof(*key));
469
470
populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
471
}
472
473
static void
474
populate_gs_prog_key(const struct intel_device_info *devinfo,
475
VkPipelineShaderStageCreateFlags flags,
476
bool robust_buffer_acccess,
477
struct brw_gs_prog_key *key)
478
{
479
memset(key, 0, sizeof(*key));
480
481
populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
482
}
483
484
static bool
485
pipeline_has_coarse_pixel(const struct anv_graphics_pipeline *pipeline,
486
const VkPipelineFragmentShadingRateStateCreateInfoKHR *fsr_info)
487
{
488
if (pipeline->sample_shading_enable)
489
return false;
490
491
/* Not dynamic & not specified for the pipeline. */
492
if ((pipeline->dynamic_states & ANV_CMD_DIRTY_DYNAMIC_SHADING_RATE) == 0 && !fsr_info)
493
return false;
494
495
/* Not dynamic & pipeline has a 1x1 fragment shading rate with no
496
* possibility for element of the pipeline to change the value.
497
*/
498
if ((pipeline->dynamic_states & ANV_CMD_DIRTY_DYNAMIC_SHADING_RATE) == 0 &&
499
fsr_info->fragmentSize.width <= 1 &&
500
fsr_info->fragmentSize.height <= 1 &&
501
fsr_info->combinerOps[0] == VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
502
fsr_info->combinerOps[1] == VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR)
503
return false;
504
505
return true;
506
}
507
508
static void
509
populate_wm_prog_key(const struct anv_graphics_pipeline *pipeline,
510
VkPipelineShaderStageCreateFlags flags,
511
bool robust_buffer_acccess,
512
const struct anv_subpass *subpass,
513
const VkPipelineMultisampleStateCreateInfo *ms_info,
514
const VkPipelineFragmentShadingRateStateCreateInfoKHR *fsr_info,
515
struct brw_wm_prog_key *key)
516
{
517
const struct anv_device *device = pipeline->base.device;
518
const struct intel_device_info *devinfo = &device->info;
519
520
memset(key, 0, sizeof(*key));
521
522
populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
523
524
/* We set this to 0 here and set to the actual value before we call
525
* brw_compile_fs.
526
*/
527
key->input_slots_valid = 0;
528
529
/* Vulkan doesn't specify a default */
530
key->high_quality_derivatives = false;
531
532
/* XXX Vulkan doesn't appear to specify */
533
key->clamp_fragment_color = false;
534
535
key->ignore_sample_mask_out = false;
536
537
assert(subpass->color_count <= MAX_RTS);
538
for (uint32_t i = 0; i < subpass->color_count; i++) {
539
if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
540
key->color_outputs_valid |= (1 << i);
541
}
542
543
key->nr_color_regions = subpass->color_count;
544
545
/* To reduce possible shader recompilations we would need to know if
546
* there is a SampleMask output variable to compute if we should emit
547
* code to workaround the issue that hardware disables alpha to coverage
548
* when there is SampleMask output.
549
*/
550
key->alpha_to_coverage = ms_info && ms_info->alphaToCoverageEnable;
551
552
/* Vulkan doesn't support fixed-function alpha test */
553
key->alpha_test_replicate_alpha = false;
554
555
if (ms_info) {
556
/* We should probably pull this out of the shader, but it's fairly
557
* harmless to compute it and then let dead-code take care of it.
558
*/
559
if (ms_info->rasterizationSamples > 1) {
560
key->persample_interp = ms_info->sampleShadingEnable &&
561
(ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
562
key->multisample_fbo = true;
563
}
564
565
key->frag_coord_adds_sample_pos = key->persample_interp;
566
}
567
568
key->coarse_pixel =
569
device->vk.enabled_extensions.KHR_fragment_shading_rate &&
570
pipeline_has_coarse_pixel(pipeline, fsr_info);
571
}
572
573
static void
574
populate_cs_prog_key(const struct intel_device_info *devinfo,
575
VkPipelineShaderStageCreateFlags flags,
576
bool robust_buffer_acccess,
577
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info,
578
struct brw_cs_prog_key *key)
579
{
580
memset(key, 0, sizeof(*key));
581
582
populate_base_prog_key(devinfo, flags, robust_buffer_acccess, &key->base);
583
584
if (rss_info) {
585
assert(key->base.subgroup_size_type != BRW_SUBGROUP_SIZE_VARYING);
586
587
/* These enum values are expressly chosen to be equal to the subgroup
588
* size that they require.
589
*/
590
assert(rss_info->requiredSubgroupSize == 8 ||
591
rss_info->requiredSubgroupSize == 16 ||
592
rss_info->requiredSubgroupSize == 32);
593
key->base.subgroup_size_type = rss_info->requiredSubgroupSize;
594
} else if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) {
595
/* If the client expressly requests full subgroups and they don't
596
* specify a subgroup size, we need to pick one. If they're requested
597
* varying subgroup sizes, we set it to UNIFORM and let the back-end
598
* compiler pick. Otherwise, we specify the API value of 32.
599
* Performance will likely be terrible in this case but there's nothing
600
* we can do about that. The client should have chosen a size.
601
*/
602
if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
603
key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM;
604
else
605
key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_REQUIRE_32;
606
}
607
}
608
609
static void
610
populate_bs_prog_key(const struct intel_device_info *devinfo,
611
VkPipelineShaderStageCreateFlags flags,
612
bool robust_buffer_access,
613
struct brw_bs_prog_key *key)
614
{
615
memset(key, 0, sizeof(*key));
616
617
populate_base_prog_key(devinfo, flags, robust_buffer_access, &key->base);
618
}
619
620
struct anv_pipeline_stage {
621
gl_shader_stage stage;
622
623
const struct vk_shader_module *module;
624
const char *entrypoint;
625
const VkSpecializationInfo *spec_info;
626
627
unsigned char shader_sha1[20];
628
629
union brw_any_prog_key key;
630
631
struct {
632
gl_shader_stage stage;
633
unsigned char sha1[20];
634
} cache_key;
635
636
nir_shader *nir;
637
638
struct anv_pipeline_binding surface_to_descriptor[256];
639
struct anv_pipeline_binding sampler_to_descriptor[256];
640
struct anv_pipeline_bind_map bind_map;
641
642
union brw_any_prog_data prog_data;
643
644
uint32_t num_stats;
645
struct brw_compile_stats stats[3];
646
char *disasm[3];
647
648
VkPipelineCreationFeedbackEXT feedback;
649
650
const unsigned *code;
651
652
struct anv_shader_bin *bin;
653
};
654
655
static void
656
anv_pipeline_hash_shader(const struct vk_shader_module *module,
657
const char *entrypoint,
658
gl_shader_stage stage,
659
const VkSpecializationInfo *spec_info,
660
unsigned char *sha1_out)
661
{
662
struct mesa_sha1 ctx;
663
_mesa_sha1_init(&ctx);
664
665
_mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
666
_mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
667
_mesa_sha1_update(&ctx, &stage, sizeof(stage));
668
if (spec_info) {
669
_mesa_sha1_update(&ctx, spec_info->pMapEntries,
670
spec_info->mapEntryCount *
671
sizeof(*spec_info->pMapEntries));
672
_mesa_sha1_update(&ctx, spec_info->pData,
673
spec_info->dataSize);
674
}
675
676
_mesa_sha1_final(&ctx, sha1_out);
677
}
678
679
static void
680
anv_pipeline_hash_graphics(struct anv_graphics_pipeline *pipeline,
681
struct anv_pipeline_layout *layout,
682
struct anv_pipeline_stage *stages,
683
unsigned char *sha1_out)
684
{
685
struct mesa_sha1 ctx;
686
_mesa_sha1_init(&ctx);
687
688
_mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
689
sizeof(pipeline->subpass->view_mask));
690
691
if (layout)
692
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
693
694
const bool rba = pipeline->base.device->robust_buffer_access;
695
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
696
697
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
698
if (stages[s].entrypoint) {
699
_mesa_sha1_update(&ctx, stages[s].shader_sha1,
700
sizeof(stages[s].shader_sha1));
701
_mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
702
}
703
}
704
705
_mesa_sha1_final(&ctx, sha1_out);
706
}
707
708
static void
709
anv_pipeline_hash_compute(struct anv_compute_pipeline *pipeline,
710
struct anv_pipeline_layout *layout,
711
struct anv_pipeline_stage *stage,
712
unsigned char *sha1_out)
713
{
714
struct mesa_sha1 ctx;
715
_mesa_sha1_init(&ctx);
716
717
if (layout)
718
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
719
720
const bool rba = pipeline->base.device->robust_buffer_access;
721
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
722
723
_mesa_sha1_update(&ctx, stage->shader_sha1,
724
sizeof(stage->shader_sha1));
725
_mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
726
727
_mesa_sha1_final(&ctx, sha1_out);
728
}
729
730
static void
731
anv_pipeline_hash_ray_tracing_shader(struct anv_ray_tracing_pipeline *pipeline,
732
struct anv_pipeline_layout *layout,
733
struct anv_pipeline_stage *stage,
734
unsigned char *sha1_out)
735
{
736
struct mesa_sha1 ctx;
737
_mesa_sha1_init(&ctx);
738
739
if (layout != NULL)
740
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
741
742
const bool rba = pipeline->base.device->robust_buffer_access;
743
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
744
745
_mesa_sha1_update(&ctx, stage->shader_sha1, sizeof(stage->shader_sha1));
746
_mesa_sha1_update(&ctx, &stage->key, sizeof(stage->key.bs));
747
748
_mesa_sha1_final(&ctx, sha1_out);
749
}
750
751
static void
752
anv_pipeline_hash_ray_tracing_combined_shader(struct anv_ray_tracing_pipeline *pipeline,
753
struct anv_pipeline_layout *layout,
754
struct anv_pipeline_stage *intersection,
755
struct anv_pipeline_stage *any_hit,
756
unsigned char *sha1_out)
757
{
758
struct mesa_sha1 ctx;
759
_mesa_sha1_init(&ctx);
760
761
if (layout != NULL)
762
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
763
764
const bool rba = pipeline->base.device->robust_buffer_access;
765
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
766
767
_mesa_sha1_update(&ctx, intersection->shader_sha1, sizeof(intersection->shader_sha1));
768
_mesa_sha1_update(&ctx, &intersection->key, sizeof(intersection->key.bs));
769
_mesa_sha1_update(&ctx, any_hit->shader_sha1, sizeof(any_hit->shader_sha1));
770
_mesa_sha1_update(&ctx, &any_hit->key, sizeof(any_hit->key.bs));
771
772
_mesa_sha1_final(&ctx, sha1_out);
773
}
774
775
static nir_shader *
776
anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
777
struct anv_pipeline_cache *cache,
778
void *mem_ctx,
779
struct anv_pipeline_stage *stage)
780
{
781
const struct brw_compiler *compiler =
782
pipeline->device->physical->compiler;
783
const nir_shader_compiler_options *nir_options =
784
compiler->glsl_compiler_options[stage->stage].NirOptions;
785
nir_shader *nir;
786
787
nir = anv_device_search_for_nir(pipeline->device, cache,
788
nir_options,
789
stage->shader_sha1,
790
mem_ctx);
791
if (nir) {
792
assert(nir->info.stage == stage->stage);
793
return nir;
794
}
795
796
nir = anv_shader_compile_to_nir(pipeline->device,
797
mem_ctx,
798
stage->module,
799
stage->entrypoint,
800
stage->stage,
801
stage->spec_info);
802
if (nir) {
803
anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
804
return nir;
805
}
806
807
return NULL;
808
}
809
810
static void
811
shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
812
{
813
assert(glsl_type_is_vector_or_scalar(type));
814
815
uint32_t comp_size = glsl_type_is_boolean(type)
816
? 4 : glsl_get_bit_size(type) / 8;
817
unsigned length = glsl_get_vector_elements(type);
818
*size = comp_size * length,
819
*align = comp_size * (length == 3 ? 4 : length);
820
}
821
822
static void
823
anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
824
void *mem_ctx,
825
struct anv_pipeline_stage *stage,
826
struct anv_pipeline_layout *layout)
827
{
828
const struct anv_physical_device *pdevice = pipeline->device->physical;
829
const struct brw_compiler *compiler = pdevice->compiler;
830
831
struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
832
nir_shader *nir = stage->nir;
833
834
if (nir->info.stage == MESA_SHADER_FRAGMENT) {
835
/* Check if sample shading is enabled in the shader and toggle
836
* it on for the pipeline independent if sampleShadingEnable is set.
837
*/
838
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
839
if (nir->info.fs.uses_sample_shading)
840
anv_pipeline_to_graphics(pipeline)->sample_shading_enable = true;
841
842
NIR_PASS_V(nir, nir_lower_wpos_center,
843
anv_pipeline_to_graphics(pipeline)->sample_shading_enable);
844
NIR_PASS_V(nir, nir_lower_input_attachments,
845
&(nir_input_attachment_options) {
846
.use_fragcoord_sysval = true,
847
.use_layer_id_sysval = true,
848
});
849
}
850
851
NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
852
853
if (pipeline->type == ANV_PIPELINE_GRAPHICS) {
854
NIR_PASS_V(nir, anv_nir_lower_multiview,
855
anv_pipeline_to_graphics(pipeline));
856
}
857
858
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
859
860
NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo, NULL);
861
862
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
863
nir_address_format_64bit_global);
864
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_push_const,
865
nir_address_format_32bit_offset);
866
867
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
868
anv_nir_apply_pipeline_layout(pdevice,
869
pipeline->device->robust_buffer_access,
870
layout, nir, &stage->bind_map);
871
872
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
873
anv_nir_ubo_addr_format(pdevice,
874
pipeline->device->robust_buffer_access));
875
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
876
anv_nir_ssbo_addr_format(pdevice,
877
pipeline->device->robust_buffer_access));
878
879
/* First run copy-prop to get rid of all of the vec() that address
880
* calculations often create and then constant-fold so that, when we
881
* get to anv_nir_lower_ubo_loads, we can detect constant offsets.
882
*/
883
NIR_PASS_V(nir, nir_copy_prop);
884
NIR_PASS_V(nir, nir_opt_constant_folding);
885
886
NIR_PASS_V(nir, anv_nir_lower_ubo_loads);
887
888
/* We don't support non-uniform UBOs and non-uniform SSBO access is
889
* handled naturally by falling back to A64 messages.
890
*/
891
NIR_PASS_V(nir, nir_lower_non_uniform_access,
892
&(nir_lower_non_uniform_access_options) {
893
.types = nir_lower_non_uniform_texture_access |
894
nir_lower_non_uniform_image_access,
895
.callback = NULL,
896
});
897
898
anv_nir_compute_push_layout(pdevice, pipeline->device->robust_buffer_access,
899
nir, prog_data, &stage->bind_map, mem_ctx);
900
901
if (gl_shader_stage_uses_workgroup(nir->info.stage)) {
902
if (!nir->info.shared_memory_explicit_layout) {
903
NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
904
nir_var_mem_shared, shared_type_info);
905
}
906
907
NIR_PASS_V(nir, nir_lower_explicit_io,
908
nir_var_mem_shared, nir_address_format_32bit_offset);
909
910
if (nir->info.zero_initialize_shared_memory &&
911
nir->info.shared_size > 0) {
912
/* The effective Shared Local Memory size is at least 1024 bytes and
913
* is always rounded to a power of two, so it is OK to align the size
914
* used by the shader to chunk_size -- which does simplify the logic.
915
*/
916
const unsigned chunk_size = 16;
917
const unsigned shared_size = ALIGN(nir->info.shared_size, chunk_size);
918
assert(shared_size <=
919
intel_calculate_slm_size(compiler->devinfo->ver, nir->info.shared_size));
920
921
NIR_PASS_V(nir, nir_zero_initialize_shared_memory,
922
shared_size, chunk_size);
923
}
924
}
925
926
stage->nir = nir;
927
}
928
929
static void
930
anv_pipeline_link_vs(const struct brw_compiler *compiler,
931
struct anv_pipeline_stage *vs_stage,
932
struct anv_pipeline_stage *next_stage)
933
{
934
if (next_stage)
935
brw_nir_link_shaders(compiler, vs_stage->nir, next_stage->nir);
936
}
937
938
static void
939
anv_pipeline_compile_vs(const struct brw_compiler *compiler,
940
void *mem_ctx,
941
struct anv_graphics_pipeline *pipeline,
942
struct anv_pipeline_stage *vs_stage)
943
{
944
/* When using Primitive Replication for multiview, each view gets its own
945
* position slot.
946
*/
947
uint32_t pos_slots = pipeline->use_primitive_replication ?
948
anv_subpass_view_count(pipeline->subpass) : 1;
949
950
brw_compute_vue_map(compiler->devinfo,
951
&vs_stage->prog_data.vs.base.vue_map,
952
vs_stage->nir->info.outputs_written,
953
vs_stage->nir->info.separate_shader,
954
pos_slots);
955
956
vs_stage->num_stats = 1;
957
958
struct brw_compile_vs_params params = {
959
.nir = vs_stage->nir,
960
.key = &vs_stage->key.vs,
961
.prog_data = &vs_stage->prog_data.vs,
962
.stats = vs_stage->stats,
963
.log_data = pipeline->base.device,
964
};
965
966
vs_stage->code = brw_compile_vs(compiler, mem_ctx, &params);
967
}
968
969
static void
970
merge_tess_info(struct shader_info *tes_info,
971
const struct shader_info *tcs_info)
972
{
973
/* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
974
*
975
* "PointMode. Controls generation of points rather than triangles
976
* or lines. This functionality defaults to disabled, and is
977
* enabled if either shader stage includes the execution mode.
978
*
979
* and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
980
* PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
981
* and OutputVertices, it says:
982
*
983
* "One mode must be set in at least one of the tessellation
984
* shader stages."
985
*
986
* So, the fields can be set in either the TCS or TES, but they must
987
* agree if set in both. Our backend looks at TES, so bitwise-or in
988
* the values from the TCS.
989
*/
990
assert(tcs_info->tess.tcs_vertices_out == 0 ||
991
tes_info->tess.tcs_vertices_out == 0 ||
992
tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
993
tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
994
995
assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
996
tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
997
tcs_info->tess.spacing == tes_info->tess.spacing);
998
tes_info->tess.spacing |= tcs_info->tess.spacing;
999
1000
assert(tcs_info->tess.primitive_mode == 0 ||
1001
tes_info->tess.primitive_mode == 0 ||
1002
tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
1003
tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
1004
tes_info->tess.ccw |= tcs_info->tess.ccw;
1005
tes_info->tess.point_mode |= tcs_info->tess.point_mode;
1006
}
1007
1008
static void
1009
anv_pipeline_link_tcs(const struct brw_compiler *compiler,
1010
struct anv_pipeline_stage *tcs_stage,
1011
struct anv_pipeline_stage *tes_stage)
1012
{
1013
assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
1014
1015
brw_nir_link_shaders(compiler, tcs_stage->nir, tes_stage->nir);
1016
1017
nir_lower_patch_vertices(tes_stage->nir,
1018
tcs_stage->nir->info.tess.tcs_vertices_out,
1019
NULL);
1020
1021
/* Copy TCS info into the TES info */
1022
merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
1023
1024
/* Whacking the key after cache lookup is a bit sketchy, but all of
1025
* this comes from the SPIR-V, which is part of the hash used for the
1026
* pipeline cache. So it should be safe.
1027
*/
1028
tcs_stage->key.tcs.tes_primitive_mode =
1029
tes_stage->nir->info.tess.primitive_mode;
1030
tcs_stage->key.tcs.quads_workaround =
1031
compiler->devinfo->ver < 9 &&
1032
tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
1033
tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
1034
}
1035
1036
static void
1037
anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
1038
void *mem_ctx,
1039
struct anv_device *device,
1040
struct anv_pipeline_stage *tcs_stage,
1041
struct anv_pipeline_stage *prev_stage)
1042
{
1043
tcs_stage->key.tcs.outputs_written =
1044
tcs_stage->nir->info.outputs_written;
1045
tcs_stage->key.tcs.patch_outputs_written =
1046
tcs_stage->nir->info.patch_outputs_written;
1047
1048
tcs_stage->num_stats = 1;
1049
tcs_stage->code = brw_compile_tcs(compiler, device, mem_ctx,
1050
&tcs_stage->key.tcs,
1051
&tcs_stage->prog_data.tcs,
1052
tcs_stage->nir, -1,
1053
tcs_stage->stats, NULL);
1054
}
1055
1056
static void
1057
anv_pipeline_link_tes(const struct brw_compiler *compiler,
1058
struct anv_pipeline_stage *tes_stage,
1059
struct anv_pipeline_stage *next_stage)
1060
{
1061
if (next_stage)
1062
brw_nir_link_shaders(compiler, tes_stage->nir, next_stage->nir);
1063
}
1064
1065
static void
1066
anv_pipeline_compile_tes(const struct brw_compiler *compiler,
1067
void *mem_ctx,
1068
struct anv_device *device,
1069
struct anv_pipeline_stage *tes_stage,
1070
struct anv_pipeline_stage *tcs_stage)
1071
{
1072
tes_stage->key.tes.inputs_read =
1073
tcs_stage->nir->info.outputs_written;
1074
tes_stage->key.tes.patch_inputs_read =
1075
tcs_stage->nir->info.patch_outputs_written;
1076
1077
tes_stage->num_stats = 1;
1078
tes_stage->code = brw_compile_tes(compiler, device, mem_ctx,
1079
&tes_stage->key.tes,
1080
&tcs_stage->prog_data.tcs.base.vue_map,
1081
&tes_stage->prog_data.tes,
1082
tes_stage->nir, -1,
1083
tes_stage->stats, NULL);
1084
}
1085
1086
static void
1087
anv_pipeline_link_gs(const struct brw_compiler *compiler,
1088
struct anv_pipeline_stage *gs_stage,
1089
struct anv_pipeline_stage *next_stage)
1090
{
1091
if (next_stage)
1092
brw_nir_link_shaders(compiler, gs_stage->nir, next_stage->nir);
1093
}
1094
1095
static void
1096
anv_pipeline_compile_gs(const struct brw_compiler *compiler,
1097
void *mem_ctx,
1098
struct anv_device *device,
1099
struct anv_pipeline_stage *gs_stage,
1100
struct anv_pipeline_stage *prev_stage)
1101
{
1102
brw_compute_vue_map(compiler->devinfo,
1103
&gs_stage->prog_data.gs.base.vue_map,
1104
gs_stage->nir->info.outputs_written,
1105
gs_stage->nir->info.separate_shader, 1);
1106
1107
gs_stage->num_stats = 1;
1108
gs_stage->code = brw_compile_gs(compiler, device, mem_ctx,
1109
&gs_stage->key.gs,
1110
&gs_stage->prog_data.gs,
1111
gs_stage->nir, -1,
1112
gs_stage->stats, NULL);
1113
}
1114
1115
static void
1116
anv_pipeline_link_fs(const struct brw_compiler *compiler,
1117
struct anv_pipeline_stage *stage)
1118
{
1119
unsigned num_rt_bindings;
1120
struct anv_pipeline_binding rt_bindings[MAX_RTS];
1121
if (stage->key.wm.nr_color_regions > 0) {
1122
assert(stage->key.wm.nr_color_regions <= MAX_RTS);
1123
for (unsigned rt = 0; rt < stage->key.wm.nr_color_regions; rt++) {
1124
if (stage->key.wm.color_outputs_valid & BITFIELD_BIT(rt)) {
1125
rt_bindings[rt] = (struct anv_pipeline_binding) {
1126
.set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
1127
.index = rt,
1128
};
1129
} else {
1130
/* Setup a null render target */
1131
rt_bindings[rt] = (struct anv_pipeline_binding) {
1132
.set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
1133
.index = UINT32_MAX,
1134
};
1135
}
1136
}
1137
num_rt_bindings = stage->key.wm.nr_color_regions;
1138
} else {
1139
/* Setup a null render target */
1140
rt_bindings[0] = (struct anv_pipeline_binding) {
1141
.set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
1142
.index = UINT32_MAX,
1143
};
1144
num_rt_bindings = 1;
1145
}
1146
1147
assert(num_rt_bindings <= MAX_RTS);
1148
assert(stage->bind_map.surface_count == 0);
1149
typed_memcpy(stage->bind_map.surface_to_descriptor,
1150
rt_bindings, num_rt_bindings);
1151
stage->bind_map.surface_count += num_rt_bindings;
1152
1153
/* Now that we've set up the color attachments, we can go through and
1154
* eliminate any shader outputs that map to VK_ATTACHMENT_UNUSED in the
1155
* hopes that dead code can clean them up in this and any earlier shader
1156
* stages.
1157
*/
1158
nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
1159
bool deleted_output = false;
1160
nir_foreach_shader_out_variable_safe(var, stage->nir) {
1161
/* TODO: We don't delete depth/stencil writes. We probably could if the
1162
* subpass doesn't have a depth/stencil attachment.
1163
*/
1164
if (var->data.location < FRAG_RESULT_DATA0)
1165
continue;
1166
1167
const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
1168
1169
/* If this is the RT at location 0 and we have alpha to coverage
1170
* enabled we still need that write because it will affect the coverage
1171
* mask even if it's never written to a color target.
1172
*/
1173
if (rt == 0 && stage->key.wm.alpha_to_coverage)
1174
continue;
1175
1176
const unsigned array_len =
1177
glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
1178
assert(rt + array_len <= MAX_RTS);
1179
1180
if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid &
1181
BITFIELD_RANGE(rt, array_len))) {
1182
deleted_output = true;
1183
var->data.mode = nir_var_function_temp;
1184
exec_node_remove(&var->node);
1185
exec_list_push_tail(&impl->locals, &var->node);
1186
}
1187
}
1188
1189
if (deleted_output)
1190
nir_fixup_deref_modes(stage->nir);
1191
1192
/* We stored the number of subpass color attachments in nr_color_regions
1193
* when calculating the key for caching. Now that we've computed the bind
1194
* map, we can reduce this to the actual max before we go into the back-end
1195
* compiler.
1196
*/
1197
stage->key.wm.nr_color_regions =
1198
util_last_bit(stage->key.wm.color_outputs_valid);
1199
}
1200
1201
static void
1202
anv_pipeline_compile_fs(const struct brw_compiler *compiler,
1203
void *mem_ctx,
1204
struct anv_device *device,
1205
struct anv_pipeline_stage *fs_stage,
1206
struct anv_pipeline_stage *prev_stage)
1207
{
1208
/* TODO: we could set this to 0 based on the information in nir_shader, but
1209
* we need this before we call spirv_to_nir.
1210
*/
1211
assert(prev_stage);
1212
fs_stage->key.wm.input_slots_valid =
1213
prev_stage->prog_data.vue.vue_map.slots_valid;
1214
1215
struct brw_compile_fs_params params = {
1216
.nir = fs_stage->nir,
1217
.key = &fs_stage->key.wm,
1218
.prog_data = &fs_stage->prog_data.wm,
1219
1220
.allow_spilling = true,
1221
.stats = fs_stage->stats,
1222
.log_data = device,
1223
};
1224
1225
fs_stage->code = brw_compile_fs(compiler, mem_ctx, &params);
1226
1227
fs_stage->num_stats = (uint32_t)fs_stage->prog_data.wm.dispatch_8 +
1228
(uint32_t)fs_stage->prog_data.wm.dispatch_16 +
1229
(uint32_t)fs_stage->prog_data.wm.dispatch_32;
1230
1231
if (fs_stage->key.wm.color_outputs_valid == 0 &&
1232
!fs_stage->prog_data.wm.has_side_effects &&
1233
!fs_stage->prog_data.wm.uses_omask &&
1234
!fs_stage->key.wm.alpha_to_coverage &&
1235
!fs_stage->prog_data.wm.uses_kill &&
1236
fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
1237
!fs_stage->prog_data.wm.computed_stencil) {
1238
/* This fragment shader has no outputs and no side effects. Go ahead
1239
* and return the code pointer so we don't accidentally think the
1240
* compile failed but zero out prog_data which will set program_size to
1241
* zero and disable the stage.
1242
*/
1243
memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
1244
}
1245
}
1246
1247
static void
1248
anv_pipeline_add_executable(struct anv_pipeline *pipeline,
1249
struct anv_pipeline_stage *stage,
1250
struct brw_compile_stats *stats,
1251
uint32_t code_offset)
1252
{
1253
char *nir = NULL;
1254
if (stage->nir &&
1255
(pipeline->flags &
1256
VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
1257
nir = nir_shader_as_str(stage->nir, pipeline->mem_ctx);
1258
}
1259
1260
char *disasm = NULL;
1261
if (stage->code &&
1262
(pipeline->flags &
1263
VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
1264
char *stream_data = NULL;
1265
size_t stream_size = 0;
1266
FILE *stream = open_memstream(&stream_data, &stream_size);
1267
1268
uint32_t push_size = 0;
1269
for (unsigned i = 0; i < 4; i++)
1270
push_size += stage->bind_map.push_ranges[i].length;
1271
if (push_size > 0) {
1272
fprintf(stream, "Push constant ranges:\n");
1273
for (unsigned i = 0; i < 4; i++) {
1274
if (stage->bind_map.push_ranges[i].length == 0)
1275
continue;
1276
1277
fprintf(stream, " RANGE%d (%dB): ", i,
1278
stage->bind_map.push_ranges[i].length * 32);
1279
1280
switch (stage->bind_map.push_ranges[i].set) {
1281
case ANV_DESCRIPTOR_SET_NULL:
1282
fprintf(stream, "NULL");
1283
break;
1284
1285
case ANV_DESCRIPTOR_SET_PUSH_CONSTANTS:
1286
fprintf(stream, "Vulkan push constants and API params");
1287
break;
1288
1289
case ANV_DESCRIPTOR_SET_DESCRIPTORS:
1290
fprintf(stream, "Descriptor buffer for set %d (start=%dB)",
1291
stage->bind_map.push_ranges[i].index,
1292
stage->bind_map.push_ranges[i].start * 32);
1293
break;
1294
1295
case ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS:
1296
unreachable("gl_NumWorkgroups is never pushed");
1297
1298
case ANV_DESCRIPTOR_SET_SHADER_CONSTANTS:
1299
fprintf(stream, "Inline shader constant data (start=%dB)",
1300
stage->bind_map.push_ranges[i].start * 32);
1301
break;
1302
1303
case ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS:
1304
unreachable("Color attachments can't be pushed");
1305
1306
default:
1307
fprintf(stream, "UBO (set=%d binding=%d start=%dB)",
1308
stage->bind_map.push_ranges[i].set,
1309
stage->bind_map.push_ranges[i].index,
1310
stage->bind_map.push_ranges[i].start * 32);
1311
break;
1312
}
1313
fprintf(stream, "\n");
1314
}
1315
fprintf(stream, "\n");
1316
}
1317
1318
/* Creating this is far cheaper than it looks. It's perfectly fine to
1319
* do it for every binary.
1320
*/
1321
intel_disassemble(&pipeline->device->info,
1322
stage->code, code_offset, stream);
1323
1324
fclose(stream);
1325
1326
/* Copy it to a ralloc'd thing */
1327
disasm = ralloc_size(pipeline->mem_ctx, stream_size + 1);
1328
memcpy(disasm, stream_data, stream_size);
1329
disasm[stream_size] = 0;
1330
1331
free(stream_data);
1332
}
1333
1334
const struct anv_pipeline_executable exe = {
1335
.stage = stage->stage,
1336
.stats = *stats,
1337
.nir = nir,
1338
.disasm = disasm,
1339
};
1340
util_dynarray_append(&pipeline->executables,
1341
struct anv_pipeline_executable, exe);
1342
}
1343
1344
static void
1345
anv_pipeline_add_executables(struct anv_pipeline *pipeline,
1346
struct anv_pipeline_stage *stage,
1347
struct anv_shader_bin *bin)
1348
{
1349
if (stage->stage == MESA_SHADER_FRAGMENT) {
1350
/* We pull the prog data and stats out of the anv_shader_bin because
1351
* the anv_pipeline_stage may not be fully populated if we successfully
1352
* looked up the shader in a cache.
1353
*/
1354
const struct brw_wm_prog_data *wm_prog_data =
1355
(const struct brw_wm_prog_data *)bin->prog_data;
1356
struct brw_compile_stats *stats = bin->stats;
1357
1358
if (wm_prog_data->dispatch_8) {
1359
anv_pipeline_add_executable(pipeline, stage, stats++, 0);
1360
}
1361
1362
if (wm_prog_data->dispatch_16) {
1363
anv_pipeline_add_executable(pipeline, stage, stats++,
1364
wm_prog_data->prog_offset_16);
1365
}
1366
1367
if (wm_prog_data->dispatch_32) {
1368
anv_pipeline_add_executable(pipeline, stage, stats++,
1369
wm_prog_data->prog_offset_32);
1370
}
1371
} else {
1372
anv_pipeline_add_executable(pipeline, stage, bin->stats, 0);
1373
}
1374
}
1375
1376
static void
1377
anv_pipeline_init_from_cached_graphics(struct anv_graphics_pipeline *pipeline)
1378
{
1379
/* TODO: Cache this pipeline-wide information. */
1380
1381
/* Primitive replication depends on information from all the shaders.
1382
* Recover this bit from the fact that we have more than one position slot
1383
* in the vertex shader when using it.
1384
*/
1385
assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1386
int pos_slots = 0;
1387
const struct brw_vue_prog_data *vue_prog_data =
1388
(const void *) pipeline->shaders[MESA_SHADER_VERTEX]->prog_data;
1389
const struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1390
for (int i = 0; i < vue_map->num_slots; i++) {
1391
if (vue_map->slot_to_varying[i] == VARYING_SLOT_POS)
1392
pos_slots++;
1393
}
1394
pipeline->use_primitive_replication = pos_slots > 1;
1395
}
1396
1397
static VkResult
1398
anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
1399
struct anv_pipeline_cache *cache,
1400
const VkGraphicsPipelineCreateInfo *info)
1401
{
1402
VkPipelineCreationFeedbackEXT pipeline_feedback = {
1403
.flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1404
};
1405
int64_t pipeline_start = os_time_get_nano();
1406
1407
const struct brw_compiler *compiler = pipeline->base.device->physical->compiler;
1408
struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
1409
1410
pipeline->active_stages = 0;
1411
1412
/* Information on which states are considered dynamic. */
1413
const VkPipelineDynamicStateCreateInfo *dyn_info =
1414
info->pDynamicState;
1415
uint32_t dynamic_states = 0;
1416
if (dyn_info) {
1417
for (unsigned i = 0; i < dyn_info->dynamicStateCount; i++)
1418
dynamic_states |=
1419
anv_cmd_dirty_bit_for_vk_dynamic_state(dyn_info->pDynamicStates[i]);
1420
}
1421
1422
VkResult result;
1423
for (uint32_t i = 0; i < info->stageCount; i++) {
1424
const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
1425
gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
1426
1427
pipeline->active_stages |= sinfo->stage;
1428
1429
int64_t stage_start = os_time_get_nano();
1430
1431
stages[stage].stage = stage;
1432
stages[stage].module = vk_shader_module_from_handle(sinfo->module);
1433
stages[stage].entrypoint = sinfo->pName;
1434
stages[stage].spec_info = sinfo->pSpecializationInfo;
1435
anv_pipeline_hash_shader(stages[stage].module,
1436
stages[stage].entrypoint,
1437
stage,
1438
stages[stage].spec_info,
1439
stages[stage].shader_sha1);
1440
1441
const struct intel_device_info *devinfo = &pipeline->base.device->info;
1442
switch (stage) {
1443
case MESA_SHADER_VERTEX:
1444
populate_vs_prog_key(devinfo, sinfo->flags,
1445
pipeline->base.device->robust_buffer_access,
1446
&stages[stage].key.vs);
1447
break;
1448
case MESA_SHADER_TESS_CTRL:
1449
populate_tcs_prog_key(devinfo, sinfo->flags,
1450
pipeline->base.device->robust_buffer_access,
1451
info->pTessellationState->patchControlPoints,
1452
&stages[stage].key.tcs);
1453
break;
1454
case MESA_SHADER_TESS_EVAL:
1455
populate_tes_prog_key(devinfo, sinfo->flags,
1456
pipeline->base.device->robust_buffer_access,
1457
&stages[stage].key.tes);
1458
break;
1459
case MESA_SHADER_GEOMETRY:
1460
populate_gs_prog_key(devinfo, sinfo->flags,
1461
pipeline->base.device->robust_buffer_access,
1462
&stages[stage].key.gs);
1463
break;
1464
case MESA_SHADER_FRAGMENT: {
1465
const bool raster_enabled =
1466
!info->pRasterizationState->rasterizerDiscardEnable ||
1467
dynamic_states & ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE;
1468
populate_wm_prog_key(pipeline, sinfo->flags,
1469
pipeline->base.device->robust_buffer_access,
1470
pipeline->subpass,
1471
raster_enabled ? info->pMultisampleState : NULL,
1472
vk_find_struct_const(info->pNext,
1473
PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR),
1474
&stages[stage].key.wm);
1475
break;
1476
}
1477
default:
1478
unreachable("Invalid graphics shader stage");
1479
}
1480
1481
stages[stage].feedback.duration += os_time_get_nano() - stage_start;
1482
stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
1483
}
1484
1485
if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1486
pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
1487
1488
assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1489
1490
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1491
1492
unsigned char sha1[20];
1493
anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
1494
1495
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
1496
if (!stages[s].entrypoint)
1497
continue;
1498
1499
stages[s].cache_key.stage = s;
1500
memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
1501
}
1502
1503
const bool skip_cache_lookup =
1504
(pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
1505
1506
if (!skip_cache_lookup) {
1507
unsigned found = 0;
1508
unsigned cache_hits = 0;
1509
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
1510
if (!stages[s].entrypoint)
1511
continue;
1512
1513
int64_t stage_start = os_time_get_nano();
1514
1515
bool cache_hit;
1516
struct anv_shader_bin *bin =
1517
anv_device_search_for_kernel(pipeline->base.device, cache,
1518
&stages[s].cache_key,
1519
sizeof(stages[s].cache_key), &cache_hit);
1520
if (bin) {
1521
found++;
1522
pipeline->shaders[s] = bin;
1523
}
1524
1525
if (cache_hit) {
1526
cache_hits++;
1527
stages[s].feedback.flags |=
1528
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1529
}
1530
stages[s].feedback.duration += os_time_get_nano() - stage_start;
1531
}
1532
1533
if (found == __builtin_popcount(pipeline->active_stages)) {
1534
if (cache_hits == found) {
1535
pipeline_feedback.flags |=
1536
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1537
}
1538
/* We found all our shaders in the cache. We're done. */
1539
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
1540
if (!stages[s].entrypoint)
1541
continue;
1542
1543
anv_pipeline_add_executables(&pipeline->base, &stages[s],
1544
pipeline->shaders[s]);
1545
}
1546
anv_pipeline_init_from_cached_graphics(pipeline);
1547
goto done;
1548
} else if (found > 0) {
1549
/* We found some but not all of our shaders. This shouldn't happen
1550
* most of the time but it can if we have a partially populated
1551
* pipeline cache.
1552
*/
1553
assert(found < __builtin_popcount(pipeline->active_stages));
1554
1555
vk_debug_report(&pipeline->base.device->physical->instance->vk,
1556
VK_DEBUG_REPORT_WARNING_BIT_EXT |
1557
VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1558
&cache->base, 0, 0, "anv",
1559
"Found a partial pipeline in the cache. This is "
1560
"most likely caused by an incomplete pipeline cache "
1561
"import or export");
1562
1563
/* We're going to have to recompile anyway, so just throw away our
1564
* references to the shaders in the cache. We'll get them out of the
1565
* cache again as part of the compilation process.
1566
*/
1567
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
1568
stages[s].feedback.flags = 0;
1569
if (pipeline->shaders[s]) {
1570
anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
1571
pipeline->shaders[s] = NULL;
1572
}
1573
}
1574
}
1575
}
1576
1577
if (info->flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT)
1578
return VK_PIPELINE_COMPILE_REQUIRED_EXT;
1579
1580
void *pipeline_ctx = ralloc_context(NULL);
1581
1582
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
1583
if (!stages[s].entrypoint)
1584
continue;
1585
1586
int64_t stage_start = os_time_get_nano();
1587
1588
assert(stages[s].stage == s);
1589
assert(pipeline->shaders[s] == NULL);
1590
1591
stages[s].bind_map = (struct anv_pipeline_bind_map) {
1592
.surface_to_descriptor = stages[s].surface_to_descriptor,
1593
.sampler_to_descriptor = stages[s].sampler_to_descriptor
1594
};
1595
1596
stages[s].nir = anv_pipeline_stage_get_nir(&pipeline->base, cache,
1597
pipeline_ctx,
1598
&stages[s]);
1599
if (stages[s].nir == NULL) {
1600
result = vk_error(VK_ERROR_UNKNOWN);
1601
goto fail;
1602
}
1603
1604
/* This is rather ugly.
1605
*
1606
* Any variable annotated as interpolated by sample essentially disables
1607
* coarse pixel shading. Unfortunately the CTS tests exercising this set
1608
* the varying value in the previous stage using a constant. Our NIR
1609
* infrastructure is clever enough to lookup variables across stages and
1610
* constant fold, removing the variable. So in order to comply with CTS
1611
* we have check variables here.
1612
*/
1613
if (s == MESA_SHADER_FRAGMENT) {
1614
nir_foreach_variable_in_list(var, &stages[s].nir->variables) {
1615
if (var->data.sample) {
1616
stages[s].key.wm.coarse_pixel = false;
1617
break;
1618
}
1619
}
1620
}
1621
1622
stages[s].feedback.duration += os_time_get_nano() - stage_start;
1623
}
1624
1625
/* Walk backwards to link */
1626
struct anv_pipeline_stage *next_stage = NULL;
1627
for (int s = ARRAY_SIZE(pipeline->shaders) - 1; s >= 0; s--) {
1628
if (!stages[s].entrypoint)
1629
continue;
1630
1631
switch (s) {
1632
case MESA_SHADER_VERTEX:
1633
anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1634
break;
1635
case MESA_SHADER_TESS_CTRL:
1636
anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1637
break;
1638
case MESA_SHADER_TESS_EVAL:
1639
anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1640
break;
1641
case MESA_SHADER_GEOMETRY:
1642
anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1643
break;
1644
case MESA_SHADER_FRAGMENT:
1645
anv_pipeline_link_fs(compiler, &stages[s]);
1646
break;
1647
default:
1648
unreachable("Invalid graphics shader stage");
1649
}
1650
1651
next_stage = &stages[s];
1652
}
1653
1654
if (pipeline->base.device->info.ver >= 12 &&
1655
pipeline->subpass->view_mask != 0) {
1656
/* For some pipelines HW Primitive Replication can be used instead of
1657
* instancing to implement Multiview. This depend on how viewIndex is
1658
* used in all the active shaders, so this check can't be done per
1659
* individual shaders.
1660
*/
1661
nir_shader *shaders[MESA_SHADER_STAGES] = {};
1662
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++)
1663
shaders[s] = stages[s].nir;
1664
1665
pipeline->use_primitive_replication =
1666
anv_check_for_primitive_replication(shaders, pipeline);
1667
} else {
1668
pipeline->use_primitive_replication = false;
1669
}
1670
1671
struct anv_pipeline_stage *prev_stage = NULL;
1672
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
1673
if (!stages[s].entrypoint)
1674
continue;
1675
1676
int64_t stage_start = os_time_get_nano();
1677
1678
void *stage_ctx = ralloc_context(NULL);
1679
1680
anv_pipeline_lower_nir(&pipeline->base, stage_ctx, &stages[s], layout);
1681
1682
if (prev_stage && compiler->glsl_compiler_options[s].NirOptions->unify_interfaces) {
1683
prev_stage->nir->info.outputs_written |= stages[s].nir->info.inputs_read &
1684
~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
1685
stages[s].nir->info.inputs_read |= prev_stage->nir->info.outputs_written &
1686
~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
1687
prev_stage->nir->info.patch_outputs_written |= stages[s].nir->info.patch_inputs_read;
1688
stages[s].nir->info.patch_inputs_read |= prev_stage->nir->info.patch_outputs_written;
1689
}
1690
1691
ralloc_free(stage_ctx);
1692
1693
stages[s].feedback.duration += os_time_get_nano() - stage_start;
1694
1695
prev_stage = &stages[s];
1696
}
1697
1698
prev_stage = NULL;
1699
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1700
if (!stages[s].entrypoint)
1701
continue;
1702
1703
int64_t stage_start = os_time_get_nano();
1704
1705
void *stage_ctx = ralloc_context(NULL);
1706
1707
nir_xfb_info *xfb_info = NULL;
1708
if (s == MESA_SHADER_VERTEX ||
1709
s == MESA_SHADER_TESS_EVAL ||
1710
s == MESA_SHADER_GEOMETRY)
1711
xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1712
1713
switch (s) {
1714
case MESA_SHADER_VERTEX:
1715
anv_pipeline_compile_vs(compiler, stage_ctx, pipeline,
1716
&stages[s]);
1717
break;
1718
case MESA_SHADER_TESS_CTRL:
1719
anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->base.device,
1720
&stages[s], prev_stage);
1721
break;
1722
case MESA_SHADER_TESS_EVAL:
1723
anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->base.device,
1724
&stages[s], prev_stage);
1725
break;
1726
case MESA_SHADER_GEOMETRY:
1727
anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->base.device,
1728
&stages[s], prev_stage);
1729
break;
1730
case MESA_SHADER_FRAGMENT:
1731
anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->base.device,
1732
&stages[s], prev_stage);
1733
break;
1734
default:
1735
unreachable("Invalid graphics shader stage");
1736
}
1737
if (stages[s].code == NULL) {
1738
ralloc_free(stage_ctx);
1739
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1740
goto fail;
1741
}
1742
1743
anv_nir_validate_push_layout(&stages[s].prog_data.base,
1744
&stages[s].bind_map);
1745
1746
struct anv_shader_bin *bin =
1747
anv_device_upload_kernel(pipeline->base.device, cache, s,
1748
&stages[s].cache_key,
1749
sizeof(stages[s].cache_key),
1750
stages[s].code,
1751
stages[s].prog_data.base.program_size,
1752
&stages[s].prog_data.base,
1753
brw_prog_data_size(s),
1754
stages[s].stats, stages[s].num_stats,
1755
xfb_info, &stages[s].bind_map);
1756
if (!bin) {
1757
ralloc_free(stage_ctx);
1758
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1759
goto fail;
1760
}
1761
1762
anv_pipeline_add_executables(&pipeline->base, &stages[s], bin);
1763
1764
pipeline->shaders[s] = bin;
1765
ralloc_free(stage_ctx);
1766
1767
stages[s].feedback.duration += os_time_get_nano() - stage_start;
1768
1769
prev_stage = &stages[s];
1770
}
1771
1772
ralloc_free(pipeline_ctx);
1773
1774
done:
1775
1776
if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1777
pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1778
/* This can happen if we decided to implicitly disable the fragment
1779
* shader. See anv_pipeline_compile_fs().
1780
*/
1781
anv_shader_bin_unref(pipeline->base.device,
1782
pipeline->shaders[MESA_SHADER_FRAGMENT]);
1783
pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1784
pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1785
}
1786
1787
pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1788
1789
const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1790
vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1791
if (create_feedback) {
1792
*create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1793
1794
assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
1795
for (uint32_t i = 0; i < info->stageCount; i++) {
1796
gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
1797
create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
1798
}
1799
}
1800
1801
return VK_SUCCESS;
1802
1803
fail:
1804
ralloc_free(pipeline_ctx);
1805
1806
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
1807
if (pipeline->shaders[s])
1808
anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
1809
}
1810
1811
return result;
1812
}
1813
1814
VkResult
1815
anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
1816
struct anv_pipeline_cache *cache,
1817
const VkComputePipelineCreateInfo *info,
1818
const struct vk_shader_module *module,
1819
const char *entrypoint,
1820
const VkSpecializationInfo *spec_info)
1821
{
1822
VkPipelineCreationFeedbackEXT pipeline_feedback = {
1823
.flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1824
};
1825
int64_t pipeline_start = os_time_get_nano();
1826
1827
const struct brw_compiler *compiler = pipeline->base.device->physical->compiler;
1828
1829
struct anv_pipeline_stage stage = {
1830
.stage = MESA_SHADER_COMPUTE,
1831
.module = module,
1832
.entrypoint = entrypoint,
1833
.spec_info = spec_info,
1834
.cache_key = {
1835
.stage = MESA_SHADER_COMPUTE,
1836
},
1837
.feedback = {
1838
.flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1839
},
1840
};
1841
anv_pipeline_hash_shader(stage.module,
1842
stage.entrypoint,
1843
MESA_SHADER_COMPUTE,
1844
stage.spec_info,
1845
stage.shader_sha1);
1846
1847
struct anv_shader_bin *bin = NULL;
1848
1849
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info =
1850
vk_find_struct_const(info->stage.pNext,
1851
PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
1852
1853
populate_cs_prog_key(&pipeline->base.device->info, info->stage.flags,
1854
pipeline->base.device->robust_buffer_access,
1855
rss_info, &stage.key.cs);
1856
1857
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1858
1859
const bool skip_cache_lookup =
1860
(pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
1861
1862
anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1863
1864
bool cache_hit = false;
1865
if (!skip_cache_lookup) {
1866
bin = anv_device_search_for_kernel(pipeline->base.device, cache,
1867
&stage.cache_key,
1868
sizeof(stage.cache_key),
1869
&cache_hit);
1870
}
1871
1872
if (bin == NULL &&
1873
(info->flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT))
1874
return VK_PIPELINE_COMPILE_REQUIRED_EXT;
1875
1876
void *mem_ctx = ralloc_context(NULL);
1877
if (bin == NULL) {
1878
int64_t stage_start = os_time_get_nano();
1879
1880
stage.bind_map = (struct anv_pipeline_bind_map) {
1881
.surface_to_descriptor = stage.surface_to_descriptor,
1882
.sampler_to_descriptor = stage.sampler_to_descriptor
1883
};
1884
1885
/* Set up a binding for the gl_NumWorkGroups */
1886
stage.bind_map.surface_count = 1;
1887
stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1888
.set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1889
};
1890
1891
stage.nir = anv_pipeline_stage_get_nir(&pipeline->base, cache, mem_ctx, &stage);
1892
if (stage.nir == NULL) {
1893
ralloc_free(mem_ctx);
1894
return vk_error(VK_ERROR_UNKNOWN);
1895
}
1896
1897
NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id);
1898
1899
anv_pipeline_lower_nir(&pipeline->base, mem_ctx, &stage, layout);
1900
1901
NIR_PASS_V(stage.nir, brw_nir_lower_cs_intrinsics);
1902
1903
stage.num_stats = 1;
1904
1905
struct brw_compile_cs_params params = {
1906
.nir = stage.nir,
1907
.key = &stage.key.cs,
1908
.prog_data = &stage.prog_data.cs,
1909
.stats = stage.stats,
1910
.log_data = pipeline->base.device,
1911
};
1912
1913
stage.code = brw_compile_cs(compiler, mem_ctx, &params);
1914
if (stage.code == NULL) {
1915
ralloc_free(mem_ctx);
1916
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1917
}
1918
1919
anv_nir_validate_push_layout(&stage.prog_data.base, &stage.bind_map);
1920
1921
if (!stage.prog_data.cs.uses_num_work_groups) {
1922
assert(stage.bind_map.surface_to_descriptor[0].set ==
1923
ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS);
1924
stage.bind_map.surface_to_descriptor[0].set = ANV_DESCRIPTOR_SET_NULL;
1925
}
1926
1927
const unsigned code_size = stage.prog_data.base.program_size;
1928
bin = anv_device_upload_kernel(pipeline->base.device, cache,
1929
MESA_SHADER_COMPUTE,
1930
&stage.cache_key, sizeof(stage.cache_key),
1931
stage.code, code_size,
1932
&stage.prog_data.base,
1933
sizeof(stage.prog_data.cs),
1934
stage.stats, stage.num_stats,
1935
NULL, &stage.bind_map);
1936
if (!bin) {
1937
ralloc_free(mem_ctx);
1938
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1939
}
1940
1941
stage.feedback.duration = os_time_get_nano() - stage_start;
1942
}
1943
1944
anv_pipeline_add_executables(&pipeline->base, &stage, bin);
1945
1946
ralloc_free(mem_ctx);
1947
1948
if (cache_hit) {
1949
stage.feedback.flags |=
1950
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1951
pipeline_feedback.flags |=
1952
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1953
}
1954
pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1955
1956
const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1957
vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1958
if (create_feedback) {
1959
*create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1960
1961
assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
1962
create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
1963
}
1964
1965
pipeline->cs = bin;
1966
1967
return VK_SUCCESS;
1968
}
1969
1970
/**
1971
* Copy pipeline state not marked as dynamic.
1972
* Dynamic state is pipeline state which hasn't been provided at pipeline
1973
* creation time, but is dynamically provided afterwards using various
1974
* vkCmdSet* functions.
1975
*
1976
* The set of state considered "non_dynamic" is determined by the pieces of
1977
* state that have their corresponding VkDynamicState enums omitted from
1978
* VkPipelineDynamicStateCreateInfo::pDynamicStates.
1979
*
1980
* @param[out] pipeline Destination non_dynamic state.
1981
* @param[in] pCreateInfo Source of non_dynamic state to be copied.
1982
*/
1983
static void
1984
copy_non_dynamic_state(struct anv_graphics_pipeline *pipeline,
1985
const VkGraphicsPipelineCreateInfo *pCreateInfo)
1986
{
1987
anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1988
struct anv_subpass *subpass = pipeline->subpass;
1989
1990
pipeline->dynamic_state = default_dynamic_state;
1991
1992
states &= ~pipeline->dynamic_states;
1993
1994
struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1995
1996
bool raster_discard =
1997
pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1998
!(pipeline->dynamic_states & ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE);
1999
2000
/* Section 9.2 of the Vulkan 1.0.15 spec says:
2001
*
2002
* pViewportState is [...] NULL if the pipeline
2003
* has rasterization disabled.
2004
*/
2005
if (!raster_discard) {
2006
assert(pCreateInfo->pViewportState);
2007
2008
dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
2009
if (states & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
2010
typed_memcpy(dynamic->viewport.viewports,
2011
pCreateInfo->pViewportState->pViewports,
2012
pCreateInfo->pViewportState->viewportCount);
2013
}
2014
2015
dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
2016
if (states & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
2017
typed_memcpy(dynamic->scissor.scissors,
2018
pCreateInfo->pViewportState->pScissors,
2019
pCreateInfo->pViewportState->scissorCount);
2020
}
2021
}
2022
2023
if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) {
2024
assert(pCreateInfo->pRasterizationState);
2025
dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
2026
}
2027
2028
if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS) {
2029
assert(pCreateInfo->pRasterizationState);
2030
dynamic->depth_bias.bias =
2031
pCreateInfo->pRasterizationState->depthBiasConstantFactor;
2032
dynamic->depth_bias.clamp =
2033
pCreateInfo->pRasterizationState->depthBiasClamp;
2034
dynamic->depth_bias.slope =
2035
pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
2036
}
2037
2038
if (states & ANV_CMD_DIRTY_DYNAMIC_CULL_MODE) {
2039
assert(pCreateInfo->pRasterizationState);
2040
dynamic->cull_mode =
2041
pCreateInfo->pRasterizationState->cullMode;
2042
}
2043
2044
if (states & ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE) {
2045
assert(pCreateInfo->pRasterizationState);
2046
dynamic->front_face =
2047
pCreateInfo->pRasterizationState->frontFace;
2048
}
2049
2050
if (states & ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY) {
2051
assert(pCreateInfo->pInputAssemblyState);
2052
dynamic->primitive_topology = pCreateInfo->pInputAssemblyState->topology;
2053
}
2054
2055
if (states & ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE) {
2056
assert(pCreateInfo->pRasterizationState);
2057
dynamic->raster_discard =
2058
pCreateInfo->pRasterizationState->rasterizerDiscardEnable;
2059
}
2060
2061
if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS_ENABLE) {
2062
assert(pCreateInfo->pRasterizationState);
2063
dynamic->depth_bias_enable =
2064
pCreateInfo->pRasterizationState->depthBiasEnable;
2065
}
2066
2067
if (states & ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_RESTART_ENABLE) {
2068
assert(pCreateInfo->pInputAssemblyState);
2069
dynamic->primitive_restart_enable =
2070
pCreateInfo->pInputAssemblyState->primitiveRestartEnable;
2071
}
2072
2073
/* Section 9.2 of the Vulkan 1.0.15 spec says:
2074
*
2075
* pColorBlendState is [...] NULL if the pipeline has rasterization
2076
* disabled or if the subpass of the render pass the pipeline is
2077
* created against does not use any color attachments.
2078
*/
2079
bool uses_color_att = false;
2080
for (unsigned i = 0; i < subpass->color_count; ++i) {
2081
if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
2082
uses_color_att = true;
2083
break;
2084
}
2085
}
2086
2087
if (uses_color_att && !raster_discard) {
2088
assert(pCreateInfo->pColorBlendState);
2089
2090
if (states & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
2091
typed_memcpy(dynamic->blend_constants,
2092
pCreateInfo->pColorBlendState->blendConstants, 4);
2093
}
2094
2095
/* If there is no depthstencil attachment, then don't read
2096
* pDepthStencilState. The Vulkan spec states that pDepthStencilState may
2097
* be NULL in this case. Even if pDepthStencilState is non-NULL, there is
2098
* no need to override the depthstencil defaults in
2099
* anv_pipeline::dynamic_state when there is no depthstencil attachment.
2100
*
2101
* Section 9.2 of the Vulkan 1.0.15 spec says:
2102
*
2103
* pDepthStencilState is [...] NULL if the pipeline has rasterization
2104
* disabled or if the subpass of the render pass the pipeline is created
2105
* against does not use a depth/stencil attachment.
2106
*/
2107
if (!raster_discard && subpass->depth_stencil_attachment) {
2108
assert(pCreateInfo->pDepthStencilState);
2109
2110
if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS) {
2111
dynamic->depth_bounds.min =
2112
pCreateInfo->pDepthStencilState->minDepthBounds;
2113
dynamic->depth_bounds.max =
2114
pCreateInfo->pDepthStencilState->maxDepthBounds;
2115
}
2116
2117
if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) {
2118
dynamic->stencil_compare_mask.front =
2119
pCreateInfo->pDepthStencilState->front.compareMask;
2120
dynamic->stencil_compare_mask.back =
2121
pCreateInfo->pDepthStencilState->back.compareMask;
2122
}
2123
2124
if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) {
2125
dynamic->stencil_write_mask.front =
2126
pCreateInfo->pDepthStencilState->front.writeMask;
2127
dynamic->stencil_write_mask.back =
2128
pCreateInfo->pDepthStencilState->back.writeMask;
2129
}
2130
2131
if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) {
2132
dynamic->stencil_reference.front =
2133
pCreateInfo->pDepthStencilState->front.reference;
2134
dynamic->stencil_reference.back =
2135
pCreateInfo->pDepthStencilState->back.reference;
2136
}
2137
2138
if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE) {
2139
dynamic->depth_test_enable =
2140
pCreateInfo->pDepthStencilState->depthTestEnable;
2141
}
2142
2143
if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE) {
2144
dynamic->depth_write_enable =
2145
pCreateInfo->pDepthStencilState->depthWriteEnable;
2146
}
2147
2148
if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP) {
2149
dynamic->depth_compare_op =
2150
pCreateInfo->pDepthStencilState->depthCompareOp;
2151
}
2152
2153
if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE) {
2154
dynamic->depth_bounds_test_enable =
2155
pCreateInfo->pDepthStencilState->depthBoundsTestEnable;
2156
}
2157
2158
if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE) {
2159
dynamic->stencil_test_enable =
2160
pCreateInfo->pDepthStencilState->stencilTestEnable;
2161
}
2162
2163
if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP) {
2164
const VkPipelineDepthStencilStateCreateInfo *info =
2165
pCreateInfo->pDepthStencilState;
2166
memcpy(&dynamic->stencil_op.front, &info->front,
2167
sizeof(dynamic->stencil_op.front));
2168
memcpy(&dynamic->stencil_op.back, &info->back,
2169
sizeof(dynamic->stencil_op.back));
2170
}
2171
}
2172
2173
const VkPipelineRasterizationLineStateCreateInfoEXT *line_state =
2174
vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
2175
PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
2176
if (line_state) {
2177
if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE) {
2178
dynamic->line_stipple.factor = line_state->lineStippleFactor;
2179
dynamic->line_stipple.pattern = line_state->lineStipplePattern;
2180
}
2181
}
2182
2183
const VkPipelineMultisampleStateCreateInfo *ms_info =
2184
pCreateInfo->pRasterizationState->rasterizerDiscardEnable ? NULL :
2185
pCreateInfo->pMultisampleState;
2186
if (states & ANV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS) {
2187
const VkPipelineSampleLocationsStateCreateInfoEXT *sl_info = ms_info ?
2188
vk_find_struct_const(ms_info, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT) : NULL;
2189
2190
if (sl_info) {
2191
dynamic->sample_locations.samples =
2192
sl_info->sampleLocationsInfo.sampleLocationsCount;
2193
const VkSampleLocationEXT *positions =
2194
sl_info->sampleLocationsInfo.pSampleLocations;
2195
for (uint32_t i = 0; i < dynamic->sample_locations.samples; i++) {
2196
dynamic->sample_locations.locations[i].x = positions[i].x;
2197
dynamic->sample_locations.locations[i].y = positions[i].y;
2198
}
2199
}
2200
}
2201
/* Ensure we always have valid values for sample_locations. */
2202
if (pipeline->base.device->vk.enabled_extensions.EXT_sample_locations &&
2203
dynamic->sample_locations.samples == 0) {
2204
dynamic->sample_locations.samples =
2205
ms_info ? ms_info->rasterizationSamples : 1;
2206
const struct intel_sample_position *positions =
2207
intel_get_sample_positions(dynamic->sample_locations.samples);
2208
for (uint32_t i = 0; i < dynamic->sample_locations.samples; i++) {
2209
dynamic->sample_locations.locations[i].x = positions[i].x;
2210
dynamic->sample_locations.locations[i].y = positions[i].y;
2211
}
2212
}
2213
2214
if (states & ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE) {
2215
if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
2216
uses_color_att) {
2217
assert(pCreateInfo->pColorBlendState);
2218
const VkPipelineColorWriteCreateInfoEXT *color_write_info =
2219
vk_find_struct_const(pCreateInfo->pColorBlendState->pNext,
2220
PIPELINE_COLOR_WRITE_CREATE_INFO_EXT);
2221
2222
if (color_write_info) {
2223
dynamic->color_writes = 0;
2224
for (uint32_t i = 0; i < color_write_info->attachmentCount; i++) {
2225
dynamic->color_writes |=
2226
color_write_info->pColorWriteEnables[i] ? (1u << i) : 0;
2227
}
2228
}
2229
}
2230
}
2231
2232
const VkPipelineFragmentShadingRateStateCreateInfoKHR *fsr_state =
2233
vk_find_struct_const(pCreateInfo->pNext,
2234
PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR);
2235
if (fsr_state) {
2236
if (states & ANV_CMD_DIRTY_DYNAMIC_SHADING_RATE)
2237
dynamic->fragment_shading_rate = fsr_state->fragmentSize;
2238
}
2239
2240
pipeline->dynamic_state_mask = states;
2241
2242
/* Mark states that can either be dynamic or fully baked into the pipeline.
2243
*/
2244
pipeline->static_state_mask = states &
2245
(ANV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS |
2246
ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE |
2247
ANV_CMD_DIRTY_DYNAMIC_SHADING_RATE |
2248
ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE |
2249
ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP |
2250
ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY);
2251
}
2252
2253
static void
2254
anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
2255
{
2256
#ifdef DEBUG
2257
struct anv_render_pass *renderpass = NULL;
2258
struct anv_subpass *subpass = NULL;
2259
2260
/* Assert that all required members of VkGraphicsPipelineCreateInfo are
2261
* present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
2262
*/
2263
assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
2264
2265
renderpass = anv_render_pass_from_handle(info->renderPass);
2266
assert(renderpass);
2267
2268
assert(info->subpass < renderpass->subpass_count);
2269
subpass = &renderpass->subpasses[info->subpass];
2270
2271
assert(info->stageCount >= 1);
2272
assert(info->pVertexInputState);
2273
assert(info->pInputAssemblyState);
2274
assert(info->pRasterizationState);
2275
if (!info->pRasterizationState->rasterizerDiscardEnable) {
2276
assert(info->pViewportState);
2277
assert(info->pMultisampleState);
2278
2279
if (subpass && subpass->depth_stencil_attachment)
2280
assert(info->pDepthStencilState);
2281
2282
if (subpass && subpass->color_count > 0) {
2283
bool all_color_unused = true;
2284
for (int i = 0; i < subpass->color_count; i++) {
2285
if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
2286
all_color_unused = false;
2287
}
2288
/* pColorBlendState is ignored if the pipeline has rasterization
2289
* disabled or if the subpass of the render pass the pipeline is
2290
* created against does not use any color attachments.
2291
*/
2292
assert(info->pColorBlendState || all_color_unused);
2293
}
2294
}
2295
2296
for (uint32_t i = 0; i < info->stageCount; ++i) {
2297
switch (info->pStages[i].stage) {
2298
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
2299
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
2300
assert(info->pTessellationState);
2301
break;
2302
default:
2303
break;
2304
}
2305
}
2306
#endif
2307
}
2308
2309
/**
2310
* Calculate the desired L3 partitioning based on the current state of the
2311
* pipeline. For now this simply returns the conservative defaults calculated
2312
* by get_default_l3_weights(), but we could probably do better by gathering
2313
* more statistics from the pipeline state (e.g. guess of expected URB usage
2314
* and bound surfaces), or by using feed-back from performance counters.
2315
*/
2316
void
2317
anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
2318
{
2319
const struct intel_device_info *devinfo = &pipeline->device->info;
2320
2321
const struct intel_l3_weights w =
2322
intel_get_default_l3_weights(devinfo, true, needs_slm);
2323
2324
pipeline->l3_config = intel_get_l3_config(devinfo, w);
2325
}
2326
2327
static VkLineRasterizationModeEXT
2328
vk_line_rasterization_mode(const VkPipelineRasterizationLineStateCreateInfoEXT *line_info,
2329
const VkPipelineMultisampleStateCreateInfo *ms_info)
2330
{
2331
VkLineRasterizationModeEXT line_mode =
2332
line_info ? line_info->lineRasterizationMode :
2333
VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT;
2334
2335
if (line_mode == VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT) {
2336
if (ms_info && ms_info->rasterizationSamples > 1) {
2337
return VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT;
2338
} else {
2339
return VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;
2340
}
2341
}
2342
2343
return line_mode;
2344
}
2345
2346
VkResult
2347
anv_graphics_pipeline_init(struct anv_graphics_pipeline *pipeline,
2348
struct anv_device *device,
2349
struct anv_pipeline_cache *cache,
2350
const VkGraphicsPipelineCreateInfo *pCreateInfo,
2351
const VkAllocationCallbacks *alloc)
2352
{
2353
VkResult result;
2354
2355
anv_pipeline_validate_create_info(pCreateInfo);
2356
2357
result = anv_pipeline_init(&pipeline->base, device,
2358
ANV_PIPELINE_GRAPHICS, pCreateInfo->flags,
2359
alloc);
2360
if (result != VK_SUCCESS)
2361
return result;
2362
2363
anv_batch_set_storage(&pipeline->base.batch, ANV_NULL_ADDRESS,
2364
pipeline->batch_data, sizeof(pipeline->batch_data));
2365
2366
ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
2367
assert(pCreateInfo->subpass < render_pass->subpass_count);
2368
pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
2369
2370
assert(pCreateInfo->pRasterizationState);
2371
2372
pipeline->dynamic_states = 0;
2373
if (pCreateInfo->pDynamicState) {
2374
/* Remove all of the states that are marked as dynamic */
2375
uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
2376
for (uint32_t s = 0; s < count; s++) {
2377
pipeline->dynamic_states |= anv_cmd_dirty_bit_for_vk_dynamic_state(
2378
pCreateInfo->pDynamicState->pDynamicStates[s]);
2379
}
2380
}
2381
copy_non_dynamic_state(pipeline, pCreateInfo);
2382
2383
pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState->depthClampEnable;
2384
2385
/* Previously we enabled depth clipping when !depthClampEnable.
2386
* DepthClipStateCreateInfo now makes depth clipping explicit so if the
2387
* clipping info is available, use its enable value to determine clipping,
2388
* otherwise fallback to the previous !depthClampEnable logic.
2389
*/
2390
const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
2391
vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
2392
PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
2393
pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
2394
2395
pipeline->sample_shading_enable =
2396
!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
2397
pCreateInfo->pMultisampleState &&
2398
pCreateInfo->pMultisampleState->sampleShadingEnable;
2399
2400
/* When we free the pipeline, we detect stages based on the NULL status
2401
* of various prog_data pointers. Make them NULL by default.
2402
*/
2403
memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
2404
2405
result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
2406
if (result != VK_SUCCESS) {
2407
anv_pipeline_finish(&pipeline->base, device, alloc);
2408
return result;
2409
}
2410
2411
assert(pipeline->shaders[MESA_SHADER_VERTEX]);
2412
2413
anv_pipeline_setup_l3_config(&pipeline->base, false);
2414
2415
const VkPipelineVertexInputStateCreateInfo *vi_info =
2416
pCreateInfo->pVertexInputState;
2417
2418
const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
2419
2420
pipeline->vb_used = 0;
2421
for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
2422
const VkVertexInputAttributeDescription *desc =
2423
&vi_info->pVertexAttributeDescriptions[i];
2424
2425
if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
2426
pipeline->vb_used |= 1 << desc->binding;
2427
}
2428
2429
for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
2430
const VkVertexInputBindingDescription *desc =
2431
&vi_info->pVertexBindingDescriptions[i];
2432
2433
pipeline->vb[desc->binding].stride = desc->stride;
2434
2435
/* Step rate is programmed per vertex element (attribute), not
2436
* binding. Set up a map of which bindings step per instance, for
2437
* reference by vertex element setup. */
2438
switch (desc->inputRate) {
2439
default:
2440
case VK_VERTEX_INPUT_RATE_VERTEX:
2441
pipeline->vb[desc->binding].instanced = false;
2442
break;
2443
case VK_VERTEX_INPUT_RATE_INSTANCE:
2444
pipeline->vb[desc->binding].instanced = true;
2445
break;
2446
}
2447
2448
pipeline->vb[desc->binding].instance_divisor = 1;
2449
}
2450
2451
const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
2452
vk_find_struct_const(vi_info->pNext,
2453
PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
2454
if (vi_div_state) {
2455
for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
2456
const VkVertexInputBindingDivisorDescriptionEXT *desc =
2457
&vi_div_state->pVertexBindingDivisors[i];
2458
2459
pipeline->vb[desc->binding].instance_divisor = desc->divisor;
2460
}
2461
}
2462
2463
/* Our implementation of VK_KHR_multiview uses instancing to draw the
2464
* different views. If the client asks for instancing, we need to multiply
2465
* the instance divisor by the number of views ensure that we repeat the
2466
* client's per-instance data once for each view.
2467
*/
2468
if (pipeline->subpass->view_mask && !pipeline->use_primitive_replication) {
2469
const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
2470
for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
2471
if (pipeline->vb[vb].instanced)
2472
pipeline->vb[vb].instance_divisor *= view_count;
2473
}
2474
}
2475
2476
const VkPipelineInputAssemblyStateCreateInfo *ia_info =
2477
pCreateInfo->pInputAssemblyState;
2478
const VkPipelineTessellationStateCreateInfo *tess_info =
2479
pCreateInfo->pTessellationState;
2480
2481
if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
2482
pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
2483
else
2484
pipeline->topology = vk_to_intel_primitive_type[ia_info->topology];
2485
2486
/* If rasterization is not enabled, ms_info must be ignored. */
2487
const bool raster_enabled =
2488
!pCreateInfo->pRasterizationState->rasterizerDiscardEnable ||
2489
(pipeline->dynamic_states &
2490
ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE);
2491
2492
const VkPipelineMultisampleStateCreateInfo *ms_info =
2493
raster_enabled ? pCreateInfo->pMultisampleState : NULL;
2494
2495
const VkPipelineRasterizationLineStateCreateInfoEXT *line_info =
2496
vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
2497
PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
2498
2499
/* Store line mode, polygon mode and rasterization samples, these are used
2500
* for dynamic primitive topology.
2501
*/
2502
pipeline->line_mode = vk_line_rasterization_mode(line_info, ms_info);
2503
pipeline->polygon_mode = pCreateInfo->pRasterizationState->polygonMode;
2504
pipeline->rasterization_samples =
2505
ms_info ? ms_info->rasterizationSamples : 1;
2506
2507
return VK_SUCCESS;
2508
}
2509
2510
static VkResult
2511
compile_upload_rt_shader(struct anv_ray_tracing_pipeline *pipeline,
2512
struct anv_pipeline_cache *cache,
2513
nir_shader *nir,
2514
struct anv_pipeline_stage *stage,
2515
struct anv_shader_bin **shader_out,
2516
void *mem_ctx)
2517
{
2518
const struct brw_compiler *compiler =
2519
pipeline->base.device->physical->compiler;
2520
const struct intel_device_info *devinfo = compiler->devinfo;
2521
2522
nir_shader **resume_shaders = NULL;
2523
uint32_t num_resume_shaders = 0;
2524
if (nir->info.stage != MESA_SHADER_COMPUTE) {
2525
NIR_PASS_V(nir, nir_lower_shader_calls,
2526
nir_address_format_64bit_global,
2527
BRW_BTD_STACK_ALIGN,
2528
&resume_shaders, &num_resume_shaders, mem_ctx);
2529
NIR_PASS_V(nir, brw_nir_lower_shader_calls);
2530
NIR_PASS_V(nir, brw_nir_lower_rt_intrinsics, devinfo);
2531
}
2532
2533
for (unsigned i = 0; i < num_resume_shaders; i++) {
2534
NIR_PASS_V(resume_shaders[i], brw_nir_lower_shader_calls);
2535
NIR_PASS_V(resume_shaders[i], brw_nir_lower_rt_intrinsics, devinfo);
2536
}
2537
2538
stage->code =
2539
brw_compile_bs(compiler, pipeline->base.device, mem_ctx,
2540
&stage->key.bs, &stage->prog_data.bs, nir,
2541
num_resume_shaders, resume_shaders, stage->stats, NULL);
2542
if (stage->code == NULL)
2543
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2544
2545
/* Ray-tracing shaders don't have a "real" bind map */
2546
struct anv_pipeline_bind_map empty_bind_map = {};
2547
2548
const unsigned code_size = stage->prog_data.base.program_size;
2549
struct anv_shader_bin *bin =
2550
anv_device_upload_kernel(pipeline->base.device,
2551
cache,
2552
stage->stage,
2553
&stage->cache_key, sizeof(stage->cache_key),
2554
stage->code, code_size,
2555
&stage->prog_data.base,
2556
sizeof(stage->prog_data.bs),
2557
stage->stats, 1,
2558
NULL, &empty_bind_map);
2559
if (bin == NULL)
2560
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2561
2562
/* TODO: Figure out executables for resume shaders */
2563
anv_pipeline_add_executables(&pipeline->base, stage, bin);
2564
util_dynarray_append(&pipeline->shaders, struct anv_shader_bin *, bin);
2565
2566
*shader_out = bin;
2567
2568
return VK_SUCCESS;
2569
}
2570
2571
static bool
2572
is_rt_stack_size_dynamic(const VkRayTracingPipelineCreateInfoKHR *info)
2573
{
2574
if (info->pDynamicState == NULL)
2575
return false;
2576
2577
for (unsigned i = 0; i < info->pDynamicState->dynamicStateCount; i++) {
2578
if (info->pDynamicState->pDynamicStates[i] ==
2579
VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR)
2580
return true;
2581
}
2582
2583
return false;
2584
}
2585
2586
static void
2587
anv_pipeline_compute_ray_tracing_stacks(struct anv_ray_tracing_pipeline *pipeline,
2588
const VkRayTracingPipelineCreateInfoKHR *info,
2589
uint32_t *stack_max)
2590
{
2591
if (is_rt_stack_size_dynamic(info)) {
2592
pipeline->stack_size = 0; /* 0 means dynamic */
2593
} else {
2594
/* From the Vulkan spec:
2595
*
2596
* "If the stack size is not set explicitly, the stack size for a
2597
* pipeline is:
2598
*
2599
* rayGenStackMax +
2600
* min(1, maxPipelineRayRecursionDepth) ×
2601
* max(closestHitStackMax, missStackMax,
2602
* intersectionStackMax + anyHitStackMax) +
2603
* max(0, maxPipelineRayRecursionDepth-1) ×
2604
* max(closestHitStackMax, missStackMax) +
2605
* 2 × callableStackMax"
2606
*/
2607
pipeline->stack_size =
2608
stack_max[MESA_SHADER_RAYGEN] +
2609
MIN2(1, info->maxPipelineRayRecursionDepth) *
2610
MAX4(stack_max[MESA_SHADER_CLOSEST_HIT],
2611
stack_max[MESA_SHADER_MISS],
2612
stack_max[MESA_SHADER_INTERSECTION],
2613
stack_max[MESA_SHADER_ANY_HIT]) +
2614
MAX2(0, (int)info->maxPipelineRayRecursionDepth - 1) *
2615
MAX2(stack_max[MESA_SHADER_CLOSEST_HIT],
2616
stack_max[MESA_SHADER_MISS]) +
2617
2 * stack_max[MESA_SHADER_CALLABLE];
2618
2619
/* This is an extremely unlikely case but we need to set it to some
2620
* non-zero value so that we don't accidentally think it's dynamic.
2621
* Our minimum stack size is 2KB anyway so we could set to any small
2622
* value we like.
2623
*/
2624
if (pipeline->stack_size == 0)
2625
pipeline->stack_size = 1;
2626
}
2627
}
2628
2629
static struct anv_pipeline_stage *
2630
anv_pipeline_init_ray_tracing_stages(struct anv_ray_tracing_pipeline *pipeline,
2631
const VkRayTracingPipelineCreateInfoKHR *info,
2632
void *pipeline_ctx)
2633
{
2634
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
2635
2636
/* Create enough stage entries for all shader modules plus potential
2637
* combinaisons in the groups.
2638
*/
2639
struct anv_pipeline_stage *stages =
2640
rzalloc_array(pipeline_ctx, struct anv_pipeline_stage, info->stageCount);
2641
2642
for (uint32_t i = 0; i < info->stageCount; i++) {
2643
const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
2644
if (sinfo->module == VK_NULL_HANDLE)
2645
continue;
2646
2647
int64_t stage_start = os_time_get_nano();
2648
2649
stages[i] = (struct anv_pipeline_stage) {
2650
.stage = vk_to_mesa_shader_stage(sinfo->stage),
2651
.module = vk_shader_module_from_handle(sinfo->module),
2652
.entrypoint = sinfo->pName,
2653
.spec_info = sinfo->pSpecializationInfo,
2654
.cache_key = {
2655
.stage = vk_to_mesa_shader_stage(sinfo->stage),
2656
},
2657
.feedback = {
2658
.flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
2659
},
2660
};
2661
2662
populate_bs_prog_key(&pipeline->base.device->info, sinfo->flags,
2663
pipeline->base.device->robust_buffer_access,
2664
&stages[i].key.bs);
2665
2666
anv_pipeline_hash_shader(stages[i].module,
2667
stages[i].entrypoint,
2668
stages[i].stage,
2669
stages[i].spec_info,
2670
stages[i].shader_sha1);
2671
2672
if (stages[i].stage != MESA_SHADER_INTERSECTION) {
2673
anv_pipeline_hash_ray_tracing_shader(pipeline, layout, &stages[i],
2674
stages[i].cache_key.sha1);
2675
}
2676
2677
stages[i].feedback.duration += os_time_get_nano() - stage_start;
2678
}
2679
2680
for (uint32_t i = 0; i < info->groupCount; i++) {
2681
const VkRayTracingShaderGroupCreateInfoKHR *ginfo = &info->pGroups[i];
2682
2683
if (ginfo->type != VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR)
2684
continue;
2685
2686
int64_t stage_start = os_time_get_nano();
2687
2688
uint32_t intersection_idx = ginfo->intersectionShader;
2689
assert(intersection_idx < info->stageCount);
2690
2691
uint32_t any_hit_idx = ginfo->anyHitShader;
2692
if (any_hit_idx != VK_SHADER_UNUSED_KHR) {
2693
assert(any_hit_idx < info->stageCount);
2694
anv_pipeline_hash_ray_tracing_combined_shader(pipeline,
2695
layout,
2696
&stages[intersection_idx],
2697
&stages[any_hit_idx],
2698
stages[intersection_idx].cache_key.sha1);
2699
} else {
2700
anv_pipeline_hash_ray_tracing_shader(pipeline, layout,
2701
&stages[intersection_idx],
2702
stages[intersection_idx].cache_key.sha1);
2703
}
2704
2705
stages[intersection_idx].feedback.duration += os_time_get_nano() - stage_start;
2706
}
2707
2708
return stages;
2709
}
2710
2711
static bool
2712
anv_pipeline_load_cached_shaders(struct anv_ray_tracing_pipeline *pipeline,
2713
struct anv_pipeline_cache *cache,
2714
const VkRayTracingPipelineCreateInfoKHR *info,
2715
struct anv_pipeline_stage *stages,
2716
uint32_t *stack_max)
2717
{
2718
uint32_t shaders = 0, cache_hits = 0;
2719
for (uint32_t i = 0; i < info->stageCount; i++) {
2720
if (stages[i].entrypoint == NULL)
2721
continue;
2722
2723
shaders++;
2724
2725
int64_t stage_start = os_time_get_nano();
2726
2727
bool cache_hit;
2728
stages[i].bin = anv_device_search_for_kernel(pipeline->base.device, cache,
2729
&stages[i].cache_key,
2730
sizeof(stages[i].cache_key),
2731
&cache_hit);
2732
if (cache_hit) {
2733
cache_hits++;
2734
stages[i].feedback.flags |=
2735
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
2736
}
2737
2738
if (stages[i].bin != NULL) {
2739
anv_pipeline_add_executables(&pipeline->base, &stages[i], stages[i].bin);
2740
util_dynarray_append(&pipeline->shaders, struct anv_shader_bin *, stages[i].bin);
2741
2742
uint32_t stack_size =
2743
brw_bs_prog_data_const(stages[i].bin->prog_data)->max_stack_size;
2744
stack_max[stages[i].stage] =
2745
MAX2(stack_max[stages[i].stage], stack_size);
2746
}
2747
2748
stages[i].feedback.duration += os_time_get_nano() - stage_start;
2749
}
2750
2751
return cache_hits == shaders;
2752
}
2753
2754
static VkResult
2755
anv_pipeline_compile_ray_tracing(struct anv_ray_tracing_pipeline *pipeline,
2756
struct anv_pipeline_cache *cache,
2757
const VkRayTracingPipelineCreateInfoKHR *info)
2758
{
2759
const struct intel_device_info *devinfo = &pipeline->base.device->info;
2760
VkResult result;
2761
2762
VkPipelineCreationFeedbackEXT pipeline_feedback = {
2763
.flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
2764
};
2765
int64_t pipeline_start = os_time_get_nano();
2766
2767
void *pipeline_ctx = ralloc_context(NULL);
2768
2769
struct anv_pipeline_stage *stages =
2770
anv_pipeline_init_ray_tracing_stages(pipeline, info, pipeline_ctx);
2771
2772
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
2773
2774
const bool skip_cache_lookup =
2775
(pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
2776
2777
uint32_t stack_max[MESA_VULKAN_SHADER_STAGES] = {};
2778
2779
if (!skip_cache_lookup &&
2780
anv_pipeline_load_cached_shaders(pipeline, cache, info, stages, stack_max)) {
2781
pipeline_feedback.flags |=
2782
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
2783
goto done;
2784
}
2785
2786
if (info->flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT) {
2787
ralloc_free(pipeline_ctx);
2788
return VK_PIPELINE_COMPILE_REQUIRED_EXT;
2789
}
2790
2791
for (uint32_t i = 0; i < info->stageCount; i++) {
2792
if (stages[i].entrypoint == NULL)
2793
continue;
2794
2795
int64_t stage_start = os_time_get_nano();
2796
2797
stages[i].nir = anv_pipeline_stage_get_nir(&pipeline->base, cache,
2798
pipeline_ctx, &stages[i]);
2799
if (stages[i].nir == NULL) {
2800
ralloc_free(pipeline_ctx);
2801
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2802
}
2803
2804
anv_pipeline_lower_nir(&pipeline->base, pipeline_ctx, &stages[i], layout);
2805
2806
stages[i].feedback.duration += os_time_get_nano() - stage_start;
2807
}
2808
2809
for (uint32_t i = 0; i < info->stageCount; i++) {
2810
if (stages[i].entrypoint == NULL)
2811
continue;
2812
2813
/* Shader found in cache already. */
2814
if (stages[i].bin != NULL)
2815
continue;
2816
2817
/* We handle intersection shaders as part of the group */
2818
if (stages[i].stage == MESA_SHADER_INTERSECTION)
2819
continue;
2820
2821
int64_t stage_start = os_time_get_nano();
2822
2823
void *stage_ctx = ralloc_context(pipeline_ctx);
2824
2825
nir_shader *nir = nir_shader_clone(stage_ctx, stages[i].nir);
2826
switch (stages[i].stage) {
2827
case MESA_SHADER_RAYGEN:
2828
brw_nir_lower_raygen(nir);
2829
break;
2830
2831
case MESA_SHADER_ANY_HIT:
2832
brw_nir_lower_any_hit(nir, devinfo);
2833
break;
2834
2835
case MESA_SHADER_CLOSEST_HIT:
2836
brw_nir_lower_closest_hit(nir);
2837
break;
2838
2839
case MESA_SHADER_MISS:
2840
brw_nir_lower_miss(nir);
2841
break;
2842
2843
case MESA_SHADER_INTERSECTION:
2844
unreachable("These are handled later");
2845
2846
case MESA_SHADER_CALLABLE:
2847
brw_nir_lower_callable(nir);
2848
break;
2849
2850
default:
2851
unreachable("Invalid ray-tracing shader stage");
2852
}
2853
2854
result = compile_upload_rt_shader(pipeline, cache, nir, &stages[i],
2855
&stages[i].bin, stage_ctx);
2856
if (result != VK_SUCCESS) {
2857
ralloc_free(pipeline_ctx);
2858
return result;
2859
}
2860
2861
uint32_t stack_size =
2862
brw_bs_prog_data_const(stages[i].bin->prog_data)->max_stack_size;
2863
stack_max[stages[i].stage] = MAX2(stack_max[stages[i].stage], stack_size);
2864
2865
ralloc_free(stage_ctx);
2866
2867
stages[i].feedback.duration += os_time_get_nano() - stage_start;
2868
}
2869
2870
for (uint32_t i = 0; i < info->groupCount; i++) {
2871
const VkRayTracingShaderGroupCreateInfoKHR *ginfo = &info->pGroups[i];
2872
struct anv_rt_shader_group *group = &pipeline->groups[i];
2873
group->type = ginfo->type;
2874
switch (ginfo->type) {
2875
case VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR:
2876
assert(ginfo->generalShader < info->stageCount);
2877
group->general = stages[ginfo->generalShader].bin;
2878
break;
2879
2880
case VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR:
2881
if (ginfo->anyHitShader < info->stageCount)
2882
group->any_hit = stages[ginfo->anyHitShader].bin;
2883
2884
if (ginfo->closestHitShader < info->stageCount)
2885
group->closest_hit = stages[ginfo->closestHitShader].bin;
2886
break;
2887
2888
case VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR: {
2889
if (ginfo->closestHitShader < info->stageCount)
2890
group->closest_hit = stages[ginfo->closestHitShader].bin;
2891
2892
uint32_t intersection_idx = info->pGroups[i].intersectionShader;
2893
assert(intersection_idx < info->stageCount);
2894
2895
/* Only compile this stage if not already found in the cache. */
2896
if (stages[intersection_idx].bin == NULL) {
2897
/* The any-hit and intersection shader have to be combined */
2898
uint32_t any_hit_idx = info->pGroups[i].anyHitShader;
2899
const nir_shader *any_hit = NULL;
2900
if (any_hit_idx < info->stageCount)
2901
any_hit = stages[any_hit_idx].nir;
2902
2903
void *group_ctx = ralloc_context(pipeline_ctx);
2904
nir_shader *intersection =
2905
nir_shader_clone(group_ctx, stages[intersection_idx].nir);
2906
2907
brw_nir_lower_combined_intersection_any_hit(intersection, any_hit,
2908
devinfo);
2909
2910
result = compile_upload_rt_shader(pipeline, cache,
2911
intersection,
2912
&stages[intersection_idx],
2913
&group->intersection,
2914
group_ctx);
2915
ralloc_free(group_ctx);
2916
if (result != VK_SUCCESS)
2917
return result;
2918
} else {
2919
group->intersection = stages[intersection_idx].bin;
2920
}
2921
2922
uint32_t stack_size =
2923
brw_bs_prog_data_const(group->intersection->prog_data)->max_stack_size;
2924
stack_max[MESA_SHADER_INTERSECTION] =
2925
MAX2(stack_max[MESA_SHADER_INTERSECTION], stack_size);
2926
2927
break;
2928
}
2929
2930
default:
2931
unreachable("Invalid ray tracing shader group type");
2932
}
2933
}
2934
2935
done:
2936
ralloc_free(pipeline_ctx);
2937
2938
anv_pipeline_compute_ray_tracing_stacks(pipeline, info, stack_max);
2939
2940
pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
2941
2942
const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
2943
vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
2944
if (create_feedback) {
2945
*create_feedback->pPipelineCreationFeedback = pipeline_feedback;
2946
2947
assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
2948
for (uint32_t i = 0; i < info->stageCount; i++) {
2949
gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
2950
create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
2951
}
2952
}
2953
2954
return VK_SUCCESS;
2955
}
2956
2957
VkResult
2958
anv_device_init_rt_shaders(struct anv_device *device)
2959
{
2960
if (!device->vk.enabled_extensions.KHR_ray_tracing_pipeline)
2961
return VK_SUCCESS;
2962
2963
bool cache_hit;
2964
2965
struct brw_rt_trampoline {
2966
char name[16];
2967
struct brw_cs_prog_key key;
2968
} trampoline_key = {
2969
.name = "rt-trampoline",
2970
.key = {
2971
/* TODO: Other subgroup sizes? */
2972
.base.subgroup_size_type = BRW_SUBGROUP_SIZE_REQUIRE_8,
2973
},
2974
};
2975
device->rt_trampoline =
2976
anv_device_search_for_kernel(device, &device->default_pipeline_cache,
2977
&trampoline_key, sizeof(trampoline_key),
2978
&cache_hit);
2979
if (device->rt_trampoline == NULL) {
2980
2981
void *tmp_ctx = ralloc_context(NULL);
2982
nir_shader *trampoline_nir =
2983
brw_nir_create_raygen_trampoline(device->physical->compiler, tmp_ctx);
2984
2985
struct anv_pipeline_bind_map bind_map = {
2986
.surface_count = 0,
2987
.sampler_count = 0,
2988
};
2989
uint32_t dummy_params[4] = { 0, };
2990
struct brw_cs_prog_data trampoline_prog_data = {
2991
.base.nr_params = 4,
2992
.base.param = dummy_params,
2993
.uses_inline_data = true,
2994
.uses_btd_stack_ids = true,
2995
};
2996
struct brw_compile_cs_params params = {
2997
.nir = trampoline_nir,
2998
.key = &trampoline_key.key,
2999
.prog_data = &trampoline_prog_data,
3000
.log_data = device,
3001
};
3002
const unsigned *tramp_data =
3003
brw_compile_cs(device->physical->compiler, tmp_ctx, &params);
3004
3005
device->rt_trampoline =
3006
anv_device_upload_kernel(device, &device->default_pipeline_cache,
3007
MESA_SHADER_COMPUTE,
3008
&trampoline_key, sizeof(trampoline_key),
3009
tramp_data,
3010
trampoline_prog_data.base.program_size,
3011
&trampoline_prog_data.base,
3012
sizeof(trampoline_prog_data),
3013
NULL, 0, NULL, &bind_map);
3014
3015
ralloc_free(tmp_ctx);
3016
3017
if (device->rt_trampoline == NULL)
3018
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3019
}
3020
3021
struct brw_rt_trivial_return {
3022
char name[16];
3023
struct brw_bs_prog_key key;
3024
} return_key = {
3025
.name = "rt-trivial-ret",
3026
};
3027
device->rt_trivial_return =
3028
anv_device_search_for_kernel(device, &device->default_pipeline_cache,
3029
&return_key, sizeof(return_key),
3030
&cache_hit);
3031
if (device->rt_trivial_return == NULL) {
3032
void *tmp_ctx = ralloc_context(NULL);
3033
nir_shader *trivial_return_nir =
3034
brw_nir_create_trivial_return_shader(device->physical->compiler, tmp_ctx);
3035
3036
NIR_PASS_V(trivial_return_nir, brw_nir_lower_rt_intrinsics, &device->info);
3037
3038
struct anv_pipeline_bind_map bind_map = {
3039
.surface_count = 0,
3040
.sampler_count = 0,
3041
};
3042
struct brw_bs_prog_data return_prog_data = { 0, };
3043
const unsigned *return_data =
3044
brw_compile_bs(device->physical->compiler, device, tmp_ctx,
3045
&return_key.key, &return_prog_data, trivial_return_nir,
3046
0, 0, NULL, NULL);
3047
3048
device->rt_trivial_return =
3049
anv_device_upload_kernel(device, &device->default_pipeline_cache,
3050
MESA_SHADER_CALLABLE,
3051
&return_key, sizeof(return_key),
3052
return_data, return_prog_data.base.program_size,
3053
&return_prog_data.base, sizeof(return_prog_data),
3054
NULL, 0, NULL, &bind_map);
3055
3056
ralloc_free(tmp_ctx);
3057
3058
if (device->rt_trivial_return == NULL) {
3059
anv_shader_bin_unref(device, device->rt_trampoline);
3060
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3061
}
3062
}
3063
3064
return VK_SUCCESS;
3065
}
3066
3067
void
3068
anv_device_finish_rt_shaders(struct anv_device *device)
3069
{
3070
if (!device->vk.enabled_extensions.KHR_ray_tracing_pipeline)
3071
return;
3072
3073
anv_shader_bin_unref(device, device->rt_trampoline);
3074
}
3075
3076
VkResult
3077
anv_ray_tracing_pipeline_init(struct anv_ray_tracing_pipeline *pipeline,
3078
struct anv_device *device,
3079
struct anv_pipeline_cache *cache,
3080
const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
3081
const VkAllocationCallbacks *alloc)
3082
{
3083
VkResult result;
3084
3085
/* Zero things out so our clean-up works */
3086
memset(pipeline->groups, 0,
3087
pipeline->group_count * sizeof(*pipeline->groups));
3088
3089
util_dynarray_init(&pipeline->shaders, pipeline->base.mem_ctx);
3090
3091
result = anv_pipeline_compile_ray_tracing(pipeline, cache, pCreateInfo);
3092
if (result != VK_SUCCESS)
3093
goto fail;
3094
3095
anv_pipeline_setup_l3_config(&pipeline->base, /* needs_slm */ false);
3096
3097
return VK_SUCCESS;
3098
3099
fail:
3100
util_dynarray_foreach(&pipeline->shaders,
3101
struct anv_shader_bin *, shader) {
3102
anv_shader_bin_unref(device, *shader);
3103
}
3104
return result;
3105
}
3106
3107
#define WRITE_STR(field, ...) ({ \
3108
memset(field, 0, sizeof(field)); \
3109
UNUSED int i = snprintf(field, sizeof(field), __VA_ARGS__); \
3110
assert(i > 0 && i < sizeof(field)); \
3111
})
3112
3113
VkResult anv_GetPipelineExecutablePropertiesKHR(
3114
VkDevice device,
3115
const VkPipelineInfoKHR* pPipelineInfo,
3116
uint32_t* pExecutableCount,
3117
VkPipelineExecutablePropertiesKHR* pProperties)
3118
{
3119
ANV_FROM_HANDLE(anv_pipeline, pipeline, pPipelineInfo->pipeline);
3120
VK_OUTARRAY_MAKE(out, pProperties, pExecutableCount);
3121
3122
util_dynarray_foreach (&pipeline->executables, struct anv_pipeline_executable, exe) {
3123
vk_outarray_append(&out, props) {
3124
gl_shader_stage stage = exe->stage;
3125
props->stages = mesa_to_vk_shader_stage(stage);
3126
3127
unsigned simd_width = exe->stats.dispatch_width;
3128
if (stage == MESA_SHADER_FRAGMENT) {
3129
WRITE_STR(props->name, "%s%d %s",
3130
simd_width ? "SIMD" : "vec",
3131
simd_width ? simd_width : 4,
3132
_mesa_shader_stage_to_string(stage));
3133
} else {
3134
WRITE_STR(props->name, "%s", _mesa_shader_stage_to_string(stage));
3135
}
3136
WRITE_STR(props->description, "%s%d %s shader",
3137
simd_width ? "SIMD" : "vec",
3138
simd_width ? simd_width : 4,
3139
_mesa_shader_stage_to_string(stage));
3140
3141
/* The compiler gives us a dispatch width of 0 for vec4 but Vulkan
3142
* wants a subgroup size of 1.
3143
*/
3144
props->subgroupSize = MAX2(simd_width, 1);
3145
}
3146
}
3147
3148
return vk_outarray_status(&out);
3149
}
3150
3151
static const struct anv_pipeline_executable *
3152
anv_pipeline_get_executable(struct anv_pipeline *pipeline, uint32_t index)
3153
{
3154
assert(index < util_dynarray_num_elements(&pipeline->executables,
3155
struct anv_pipeline_executable));
3156
return util_dynarray_element(
3157
&pipeline->executables, struct anv_pipeline_executable, index);
3158
}
3159
3160
VkResult anv_GetPipelineExecutableStatisticsKHR(
3161
VkDevice device,
3162
const VkPipelineExecutableInfoKHR* pExecutableInfo,
3163
uint32_t* pStatisticCount,
3164
VkPipelineExecutableStatisticKHR* pStatistics)
3165
{
3166
ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
3167
VK_OUTARRAY_MAKE(out, pStatistics, pStatisticCount);
3168
3169
const struct anv_pipeline_executable *exe =
3170
anv_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
3171
3172
const struct brw_stage_prog_data *prog_data;
3173
switch (pipeline->type) {
3174
case ANV_PIPELINE_GRAPHICS: {
3175
prog_data = anv_pipeline_to_graphics(pipeline)->shaders[exe->stage]->prog_data;
3176
break;
3177
}
3178
case ANV_PIPELINE_COMPUTE: {
3179
prog_data = anv_pipeline_to_compute(pipeline)->cs->prog_data;
3180
break;
3181
}
3182
default:
3183
unreachable("invalid pipeline type");
3184
}
3185
3186
vk_outarray_append(&out, stat) {
3187
WRITE_STR(stat->name, "Instruction Count");
3188
WRITE_STR(stat->description,
3189
"Number of GEN instructions in the final generated "
3190
"shader executable.");
3191
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3192
stat->value.u64 = exe->stats.instructions;
3193
}
3194
3195
vk_outarray_append(&out, stat) {
3196
WRITE_STR(stat->name, "SEND Count");
3197
WRITE_STR(stat->description,
3198
"Number of instructions in the final generated shader "
3199
"executable which access external units such as the "
3200
"constant cache or the sampler.");
3201
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3202
stat->value.u64 = exe->stats.sends;
3203
}
3204
3205
vk_outarray_append(&out, stat) {
3206
WRITE_STR(stat->name, "Loop Count");
3207
WRITE_STR(stat->description,
3208
"Number of loops (not unrolled) in the final generated "
3209
"shader executable.");
3210
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3211
stat->value.u64 = exe->stats.loops;
3212
}
3213
3214
vk_outarray_append(&out, stat) {
3215
WRITE_STR(stat->name, "Cycle Count");
3216
WRITE_STR(stat->description,
3217
"Estimate of the number of EU cycles required to execute "
3218
"the final generated executable. This is an estimate only "
3219
"and may vary greatly from actual run-time performance.");
3220
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3221
stat->value.u64 = exe->stats.cycles;
3222
}
3223
3224
vk_outarray_append(&out, stat) {
3225
WRITE_STR(stat->name, "Spill Count");
3226
WRITE_STR(stat->description,
3227
"Number of scratch spill operations. This gives a rough "
3228
"estimate of the cost incurred due to spilling temporary "
3229
"values to memory. If this is non-zero, you may want to "
3230
"adjust your shader to reduce register pressure.");
3231
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3232
stat->value.u64 = exe->stats.spills;
3233
}
3234
3235
vk_outarray_append(&out, stat) {
3236
WRITE_STR(stat->name, "Fill Count");
3237
WRITE_STR(stat->description,
3238
"Number of scratch fill operations. This gives a rough "
3239
"estimate of the cost incurred due to spilling temporary "
3240
"values to memory. If this is non-zero, you may want to "
3241
"adjust your shader to reduce register pressure.");
3242
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3243
stat->value.u64 = exe->stats.fills;
3244
}
3245
3246
vk_outarray_append(&out, stat) {
3247
WRITE_STR(stat->name, "Scratch Memory Size");
3248
WRITE_STR(stat->description,
3249
"Number of bytes of scratch memory required by the "
3250
"generated shader executable. If this is non-zero, you "
3251
"may want to adjust your shader to reduce register "
3252
"pressure.");
3253
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3254
stat->value.u64 = prog_data->total_scratch;
3255
}
3256
3257
if (gl_shader_stage_uses_workgroup(exe->stage)) {
3258
vk_outarray_append(&out, stat) {
3259
WRITE_STR(stat->name, "Workgroup Memory Size");
3260
WRITE_STR(stat->description,
3261
"Number of bytes of workgroup shared memory used by this "
3262
"shader including any padding.");
3263
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3264
stat->value.u64 = prog_data->total_shared;
3265
}
3266
}
3267
3268
return vk_outarray_status(&out);
3269
}
3270
3271
static bool
3272
write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
3273
const char *data)
3274
{
3275
ir->isText = VK_TRUE;
3276
3277
size_t data_len = strlen(data) + 1;
3278
3279
if (ir->pData == NULL) {
3280
ir->dataSize = data_len;
3281
return true;
3282
}
3283
3284
strncpy(ir->pData, data, ir->dataSize);
3285
if (ir->dataSize < data_len)
3286
return false;
3287
3288
ir->dataSize = data_len;
3289
return true;
3290
}
3291
3292
VkResult anv_GetPipelineExecutableInternalRepresentationsKHR(
3293
VkDevice device,
3294
const VkPipelineExecutableInfoKHR* pExecutableInfo,
3295
uint32_t* pInternalRepresentationCount,
3296
VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
3297
{
3298
ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
3299
VK_OUTARRAY_MAKE(out, pInternalRepresentations,
3300
pInternalRepresentationCount);
3301
bool incomplete_text = false;
3302
3303
const struct anv_pipeline_executable *exe =
3304
anv_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
3305
3306
if (exe->nir) {
3307
vk_outarray_append(&out, ir) {
3308
WRITE_STR(ir->name, "Final NIR");
3309
WRITE_STR(ir->description,
3310
"Final NIR before going into the back-end compiler");
3311
3312
if (!write_ir_text(ir, exe->nir))
3313
incomplete_text = true;
3314
}
3315
}
3316
3317
if (exe->disasm) {
3318
vk_outarray_append(&out, ir) {
3319
WRITE_STR(ir->name, "GEN Assembly");
3320
WRITE_STR(ir->description,
3321
"Final GEN assembly for the generated shader binary");
3322
3323
if (!write_ir_text(ir, exe->disasm))
3324
incomplete_text = true;
3325
}
3326
}
3327
3328
return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
3329
}
3330
3331
VkResult
3332
anv_GetRayTracingShaderGroupHandlesKHR(
3333
VkDevice device,
3334
VkPipeline _pipeline,
3335
uint32_t firstGroup,
3336
uint32_t groupCount,
3337
size_t dataSize,
3338
void* pData)
3339
{
3340
ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
3341
if (pipeline->type != ANV_PIPELINE_RAY_TRACING)
3342
return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
3343
3344
struct anv_ray_tracing_pipeline *rt_pipeline =
3345
anv_pipeline_to_ray_tracing(pipeline);
3346
3347
for (uint32_t i = 0; i < groupCount; i++) {
3348
struct anv_rt_shader_group *group = &rt_pipeline->groups[firstGroup + i];
3349
memcpy(pData, group->handle, sizeof(group->handle));
3350
pData += sizeof(group->handle);
3351
}
3352
3353
return VK_SUCCESS;
3354
}
3355
3356
VkResult
3357
anv_GetRayTracingCaptureReplayShaderGroupHandlesKHR(
3358
VkDevice device,
3359
VkPipeline pipeline,
3360
uint32_t firstGroup,
3361
uint32_t groupCount,
3362
size_t dataSize,
3363
void* pData)
3364
{
3365
unreachable("Unimplemented");
3366
return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
3367
}
3368
3369
VkDeviceSize
3370
anv_GetRayTracingShaderGroupStackSizeKHR(
3371
VkDevice device,
3372
VkPipeline _pipeline,
3373
uint32_t group,
3374
VkShaderGroupShaderKHR groupShader)
3375
{
3376
ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
3377
assert(pipeline->type == ANV_PIPELINE_RAY_TRACING);
3378
3379
struct anv_ray_tracing_pipeline *rt_pipeline =
3380
anv_pipeline_to_ray_tracing(pipeline);
3381
3382
assert(group < rt_pipeline->group_count);
3383
3384
struct anv_shader_bin *bin;
3385
switch (groupShader) {
3386
case VK_SHADER_GROUP_SHADER_GENERAL_KHR:
3387
bin = rt_pipeline->groups[group].general;
3388
break;
3389
3390
case VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR:
3391
bin = rt_pipeline->groups[group].closest_hit;
3392
break;
3393
3394
case VK_SHADER_GROUP_SHADER_ANY_HIT_KHR:
3395
bin = rt_pipeline->groups[group].any_hit;
3396
break;
3397
3398
case VK_SHADER_GROUP_SHADER_INTERSECTION_KHR:
3399
bin = rt_pipeline->groups[group].intersection;
3400
break;
3401
3402
default:
3403
unreachable("Invalid VkShaderGroupShader enum");
3404
}
3405
3406
if (bin == NULL)
3407
return 0;
3408
3409
return brw_bs_prog_data_const(bin->prog_data)->max_stack_size;
3410
}
3411
3412