Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/zink/zink_program.c
4570 views
1
/*
2
* Copyright 2018 Collabora Ltd.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*/
23
24
#include "zink_program.h"
25
26
#include "zink_compiler.h"
27
#include "zink_context.h"
28
#include "zink_descriptors.h"
29
#include "zink_render_pass.h"
30
#include "zink_resource.h"
31
#include "zink_screen.h"
32
#include "zink_state.h"
33
34
#include "util/hash_table.h"
35
#include "util/set.h"
36
#include "util/u_debug.h"
37
#include "util/u_memory.h"
38
#include "tgsi/tgsi_from_mesa.h"
39
40
/* for pipeline cache */
41
#define XXH_INLINE_ALL
42
#include "util/xxhash.h"
43
44
struct gfx_pipeline_cache_entry {
45
struct zink_gfx_pipeline_state state;
46
VkPipeline pipeline;
47
};
48
49
struct compute_pipeline_cache_entry {
50
struct zink_compute_pipeline_state state;
51
VkPipeline pipeline;
52
};
53
54
void
55
debug_describe_zink_gfx_program(char *buf, const struct zink_gfx_program *ptr)
56
{
57
sprintf(buf, "zink_gfx_program");
58
}
59
60
void
61
debug_describe_zink_compute_program(char *buf, const struct zink_compute_program *ptr)
62
{
63
sprintf(buf, "zink_compute_program");
64
}
65
66
/* copied from iris */
67
struct keybox {
68
uint16_t size;
69
gl_shader_stage stage;
70
uint8_t data[0];
71
};
72
73
static struct keybox *
74
make_keybox(void *mem_ctx, gl_shader_stage stage, const void *key, uint32_t key_size, void *base, uint32_t base_size)
75
{
76
struct keybox *keybox =
77
ralloc_size(mem_ctx, sizeof(struct keybox) + key_size + base_size);
78
79
keybox->stage = stage;
80
keybox->size = key_size + base_size;
81
memcpy(keybox->data, key, key_size);
82
if (base_size)
83
memcpy(&keybox->data[key_size], base, base_size);
84
return keybox;
85
}
86
87
static uint32_t
88
keybox_hash(const void *void_key)
89
{
90
const struct keybox *key = void_key;
91
return _mesa_hash_data(&key->stage, key->size + sizeof(key->stage));
92
}
93
94
static bool
95
keybox_equals(const void *void_a, const void *void_b)
96
{
97
const struct keybox *a = void_a, *b = void_b;
98
if (a->size != b->size)
99
return false;
100
101
return memcmp(a->data, b->data, a->size) == 0;
102
}
103
104
static void
105
shader_key_vs_gen(struct zink_context *ctx, struct zink_shader *zs,
106
struct zink_shader *shaders[ZINK_SHADER_COUNT], struct zink_shader_key *key)
107
{
108
struct zink_vs_key *vs_key = &key->key.vs;
109
key->size = sizeof(struct zink_vs_key);
110
111
vs_key->clip_halfz = ctx->rast_state->base.clip_halfz;
112
switch (zs->nir->info.stage) {
113
case MESA_SHADER_VERTEX:
114
vs_key->last_vertex_stage = !shaders[PIPE_SHADER_TESS_EVAL] && !shaders[PIPE_SHADER_GEOMETRY];
115
vs_key->push_drawid = ctx->drawid_broken;
116
break;
117
case MESA_SHADER_TESS_EVAL:
118
vs_key->last_vertex_stage = !shaders[PIPE_SHADER_GEOMETRY];
119
break;
120
case MESA_SHADER_GEOMETRY:
121
vs_key->last_vertex_stage = true;
122
break;
123
default:
124
unreachable("impossible case");
125
}
126
}
127
128
static void
129
shader_key_fs_gen(struct zink_context *ctx, struct zink_shader *zs,
130
struct zink_shader *shaders[ZINK_SHADER_COUNT], struct zink_shader_key *key)
131
{
132
struct zink_screen *screen = zink_screen(ctx->base.screen);
133
struct zink_fs_key *fs_key = &key->key.fs;
134
key->size = sizeof(struct zink_fs_key);
135
136
/* if gl_SampleMask[] is written to, we have to ensure that we get a shader with the same sample count:
137
* in GL, rast_samples==1 means ignore gl_SampleMask[]
138
* in VK, gl_SampleMask[] is never ignored
139
*/
140
if (zs->nir->info.outputs_written & (1 << FRAG_RESULT_SAMPLE_MASK))
141
fs_key->samples = !!ctx->fb_state.samples;
142
fs_key->force_dual_color_blend = screen->driconf.dual_color_blend_by_location &&
143
ctx->gfx_pipeline_state.blend_state->dual_src_blend &&
144
ctx->gfx_pipeline_state.blend_state->attachments[1].blendEnable;
145
if (((shaders[PIPE_SHADER_GEOMETRY] && shaders[PIPE_SHADER_GEOMETRY]->nir->info.gs.output_primitive == GL_POINTS) ||
146
ctx->gfx_prim_mode == PIPE_PRIM_POINTS) && ctx->rast_state->base.point_quad_rasterization && ctx->rast_state->base.sprite_coord_enable) {
147
fs_key->coord_replace_bits = ctx->rast_state->base.sprite_coord_enable;
148
fs_key->coord_replace_yinvert = !!ctx->rast_state->base.sprite_coord_mode;
149
}
150
}
151
152
static void
153
shader_key_tcs_gen(struct zink_context *ctx, struct zink_shader *zs,
154
struct zink_shader *shaders[ZINK_SHADER_COUNT], struct zink_shader_key *key)
155
{
156
struct zink_tcs_key *tcs_key = &key->key.tcs;
157
key->size = sizeof(struct zink_tcs_key);
158
159
tcs_key->vertices_per_patch = ctx->gfx_pipeline_state.vertices_per_patch;
160
tcs_key->vs_outputs_written = shaders[PIPE_SHADER_VERTEX]->nir->info.outputs_written;
161
}
162
163
typedef void (*zink_shader_key_gen)(struct zink_context *ctx, struct zink_shader *zs,
164
struct zink_shader *shaders[ZINK_SHADER_COUNT],
165
struct zink_shader_key *key);
166
static zink_shader_key_gen shader_key_vtbl[] =
167
{
168
[MESA_SHADER_VERTEX] = shader_key_vs_gen,
169
[MESA_SHADER_TESS_CTRL] = shader_key_tcs_gen,
170
/* reusing vs key for now since we're only using clip_halfz */
171
[MESA_SHADER_TESS_EVAL] = shader_key_vs_gen,
172
[MESA_SHADER_GEOMETRY] = shader_key_vs_gen,
173
[MESA_SHADER_FRAGMENT] = shader_key_fs_gen,
174
};
175
176
/* return pointer to make function reusable */
177
static inline struct zink_shader_module **
178
get_default_shader_module_ptr(struct zink_gfx_program *prog, struct zink_shader *zs, struct zink_shader_key *key)
179
{
180
if (zs->nir->info.stage == MESA_SHADER_VERTEX ||
181
zs->nir->info.stage == MESA_SHADER_TESS_EVAL) {
182
/* no streamout or halfz */
183
if (!zink_vs_key(key)->last_vertex_stage)
184
return &prog->default_variants[zs->nir->info.stage][1];
185
}
186
return &prog->default_variants[zs->nir->info.stage][0];
187
}
188
189
static struct zink_shader_module *
190
get_shader_module_for_stage(struct zink_context *ctx, struct zink_shader *zs, struct zink_gfx_program *prog)
191
{
192
gl_shader_stage stage = zs->nir->info.stage;
193
enum pipe_shader_type pstage = pipe_shader_type_from_mesa(stage);
194
struct zink_shader_key key = {0};
195
VkShaderModule mod;
196
struct zink_shader_module *zm;
197
struct zink_shader_module **default_zm = NULL;
198
struct keybox *keybox;
199
uint32_t hash;
200
unsigned base_size = 0;
201
202
shader_key_vtbl[stage](ctx, zs, ctx->gfx_stages, &key);
203
/* this is default variant if there is no default or it matches the default */
204
if (prog->default_variant_key[pstage]) {
205
const struct keybox *tmp = prog->default_variant_key[pstage];
206
/* if comparing against the existing default, use the base variant key size since
207
* we're only checking the stage-specific data
208
*/
209
key.is_default_variant = !memcmp(tmp->data, &key, key.size);
210
} else
211
key.is_default_variant = true;
212
213
if (zs->nir->info.num_inlinable_uniforms &&
214
ctx->inlinable_uniforms_valid_mask & BITFIELD64_BIT(pstage)) {
215
key.inline_uniforms = true;
216
memcpy(key.base.inlined_uniform_values,
217
ctx->inlinable_uniforms[pstage],
218
zs->nir->info.num_inlinable_uniforms * 4);
219
base_size = zs->nir->info.num_inlinable_uniforms * sizeof(uint32_t);
220
key.is_default_variant = false;
221
}
222
if (key.is_default_variant) {
223
default_zm = get_default_shader_module_ptr(prog, zs, &key);
224
if (*default_zm)
225
return *default_zm;
226
}
227
keybox = make_keybox(prog, stage, &key, key.size, &key.base, base_size);
228
hash = keybox_hash(keybox);
229
struct hash_entry *entry = _mesa_hash_table_search_pre_hashed(&prog->base.shader_cache[pstage],
230
hash, keybox);
231
232
if (entry) {
233
ralloc_free(keybox);
234
zm = entry->data;
235
} else {
236
zm = CALLOC_STRUCT(zink_shader_module);
237
if (!zm) {
238
ralloc_free(keybox);
239
return NULL;
240
}
241
mod = zink_shader_compile(zink_screen(ctx->base.screen), zs, prog->nir[stage], &key);
242
if (!mod) {
243
ralloc_free(keybox);
244
FREE(zm);
245
return NULL;
246
}
247
zm->shader = mod;
248
249
_mesa_hash_table_insert_pre_hashed(&prog->base.shader_cache[pstage], hash, keybox, zm);
250
if (key.is_default_variant) {
251
/* previously returned */
252
*default_zm = zm;
253
zm->default_variant = true;
254
prog->default_variant_key[pstage] = keybox;
255
}
256
}
257
return zm;
258
}
259
260
static void
261
zink_destroy_shader_module(struct zink_screen *screen, struct zink_shader_module *zm)
262
{
263
vkDestroyShaderModule(screen->dev, zm->shader, NULL);
264
free(zm);
265
}
266
267
static void
268
destroy_shader_cache(struct zink_screen *screen, struct hash_table *sc)
269
{
270
hash_table_foreach(sc, entry) {
271
struct zink_shader_module *zm = entry->data;
272
zink_destroy_shader_module(screen, zm);
273
}
274
}
275
276
static void
277
update_shader_modules(struct zink_context *ctx, struct zink_shader *stages[ZINK_SHADER_COUNT], struct zink_gfx_program *prog, bool disallow_reuse)
278
{
279
struct zink_shader *dirty[ZINK_SHADER_COUNT] = {NULL};
280
281
unsigned gfx_bits = u_bit_consecutive(PIPE_SHADER_VERTEX, 5);
282
unsigned dirty_shader_stages = ctx->dirty_shader_stages & gfx_bits;
283
if (!dirty_shader_stages)
284
return;
285
/* we need to map pipe_shader_type -> gl_shader_stage so we can ensure that we're compiling
286
* the shaders in pipeline order and have builtin input/output locations match up after being compacted
287
*/
288
while (dirty_shader_stages) {
289
unsigned type = u_bit_scan(&dirty_shader_stages);
290
dirty[tgsi_processor_to_shader_stage(type)] = stages[type];
291
}
292
293
for (int i = 0; i < ZINK_SHADER_COUNT; ++i) {
294
/* we need to iterate over the stages in pipeline-order here */
295
enum pipe_shader_type type = pipe_shader_type_from_mesa(i);
296
assert(type < ZINK_SHADER_COUNT);
297
if (dirty[i] || (stages[type] && !prog->modules[type])) {
298
struct zink_shader_module *zm;
299
zm = get_shader_module_for_stage(ctx, dirty[i] ? dirty[i] : stages[type], prog);
300
prog->modules[type] = zm;
301
ctx->gfx_pipeline_state.combined_dirty |= zm->shader != ctx->gfx_pipeline_state.modules[type];
302
ctx->gfx_pipeline_state.modules[type] = zm->shader;
303
} else if (!stages[type]) {
304
ctx->gfx_pipeline_state.combined_dirty |= ctx->gfx_pipeline_state.modules[type] != VK_NULL_HANDLE;
305
ctx->gfx_pipeline_state.modules[type] = VK_NULL_HANDLE;
306
}
307
}
308
ctx->gfx_pipeline_state.module_hash = _mesa_hash_data(ctx->gfx_pipeline_state.modules, sizeof(ctx->gfx_pipeline_state.modules));
309
unsigned clean = u_bit_consecutive(PIPE_SHADER_VERTEX, 5);
310
ctx->dirty_shader_stages &= ~clean;
311
}
312
313
static uint32_t
314
hash_gfx_pipeline_state(const void *key)
315
{
316
const struct zink_gfx_pipeline_state *state = key;
317
uint32_t hash = _mesa_hash_data(key, offsetof(struct zink_gfx_pipeline_state, hash));
318
if (state->have_EXT_extended_dynamic_state)
319
return hash;
320
return XXH32(&state->depth_stencil_alpha_state, sizeof(void*), hash);
321
}
322
323
static bool
324
equals_gfx_pipeline_state(const void *a, const void *b)
325
{
326
const struct zink_gfx_pipeline_state *sa = a;
327
const struct zink_gfx_pipeline_state *sb = b;
328
if (!sa->have_EXT_extended_dynamic_state) {
329
if (sa->vertex_buffers_enabled_mask != sb->vertex_buffers_enabled_mask)
330
return false;
331
/* if we don't have dynamic states, we have to hash the enabled vertex buffer bindings */
332
uint32_t mask_a = sa->vertex_buffers_enabled_mask;
333
uint32_t mask_b = sb->vertex_buffers_enabled_mask;
334
while (mask_a || mask_b) {
335
unsigned idx_a = u_bit_scan(&mask_a);
336
unsigned idx_b = u_bit_scan(&mask_b);
337
if (sa->vertex_strides[idx_a] != sb->vertex_strides[idx_b])
338
return false;
339
}
340
if (sa->front_face != sb->front_face)
341
return false;
342
if (!!sa->depth_stencil_alpha_state != !!sb->depth_stencil_alpha_state ||
343
(sa && sb && memcmp(sa->depth_stencil_alpha_state, sb->depth_stencil_alpha_state, sizeof(struct zink_depth_stencil_alpha_hw_state))))
344
return false;
345
}
346
return !memcmp(sa->modules, sb->modules, sizeof(sa->modules)) &&
347
!memcmp(a, b, offsetof(struct zink_gfx_pipeline_state, hash));
348
}
349
350
void
351
zink_update_gfx_program(struct zink_context *ctx, struct zink_gfx_program *prog)
352
{
353
update_shader_modules(ctx, ctx->gfx_stages, prog, true);
354
}
355
356
VkPipelineLayout
357
zink_pipeline_layout_create(struct zink_screen *screen, struct zink_program *pg)
358
{
359
VkPipelineLayoutCreateInfo plci = {0};
360
plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
361
362
plci.pSetLayouts = pg->dsl;
363
plci.setLayoutCount = pg->num_dsl;
364
365
VkPushConstantRange pcr[2] = {0};
366
if (pg->is_compute) {
367
if (((struct zink_compute_program*)pg)->shader->nir->info.stage == MESA_SHADER_KERNEL) {
368
pcr[0].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
369
pcr[0].offset = 0;
370
pcr[0].size = sizeof(struct zink_cs_push_constant);
371
plci.pushConstantRangeCount = 1;
372
}
373
} else {
374
pcr[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
375
pcr[0].offset = offsetof(struct zink_gfx_push_constant, draw_mode_is_indexed);
376
pcr[0].size = 2 * sizeof(unsigned);
377
pcr[1].stageFlags = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
378
pcr[1].offset = offsetof(struct zink_gfx_push_constant, default_inner_level);
379
pcr[1].size = sizeof(float) * 6;
380
plci.pushConstantRangeCount = 2;
381
}
382
plci.pPushConstantRanges = &pcr[0];
383
384
VkPipelineLayout layout;
385
if (vkCreatePipelineLayout(screen->dev, &plci, NULL, &layout) != VK_SUCCESS) {
386
debug_printf("vkCreatePipelineLayout failed!\n");
387
return VK_NULL_HANDLE;
388
}
389
390
return layout;
391
}
392
393
static void
394
assign_io(struct zink_gfx_program *prog, struct zink_shader *stages[ZINK_SHADER_COUNT])
395
{
396
struct zink_shader *shaders[PIPE_SHADER_TYPES];
397
398
/* build array in pipeline order */
399
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++)
400
shaders[tgsi_processor_to_shader_stage(i)] = stages[i];
401
402
for (unsigned i = 0; i < MESA_SHADER_FRAGMENT;) {
403
nir_shader *producer = shaders[i]->nir;
404
for (unsigned j = i + 1; j < ZINK_SHADER_COUNT; i++, j++) {
405
struct zink_shader *consumer = shaders[j];
406
if (!consumer)
407
continue;
408
if (!prog->nir[producer->info.stage])
409
prog->nir[producer->info.stage] = nir_shader_clone(prog, producer);
410
if (!prog->nir[j])
411
prog->nir[j] = nir_shader_clone(prog, consumer->nir);
412
zink_compiler_assign_io(prog->nir[producer->info.stage], prog->nir[j]);
413
i = j;
414
break;
415
}
416
}
417
}
418
419
struct zink_gfx_program *
420
zink_create_gfx_program(struct zink_context *ctx,
421
struct zink_shader *stages[ZINK_SHADER_COUNT])
422
{
423
struct zink_screen *screen = zink_screen(ctx->base.screen);
424
struct zink_gfx_program *prog = rzalloc(NULL, struct zink_gfx_program);
425
if (!prog)
426
goto fail;
427
428
pipe_reference_init(&prog->base.reference, 1);
429
430
for (int i = 0; i < ZINK_SHADER_COUNT; ++i) {
431
if (stages[i]) {
432
_mesa_hash_table_init(&prog->base.shader_cache[i], prog, keybox_hash, keybox_equals);
433
prog->shaders[i] = stages[i];
434
prog->stages_present |= BITFIELD_BIT(i);
435
}
436
}
437
if (stages[PIPE_SHADER_TESS_EVAL] && !stages[PIPE_SHADER_TESS_CTRL]) {
438
prog->shaders[PIPE_SHADER_TESS_EVAL]->generated =
439
prog->shaders[PIPE_SHADER_TESS_CTRL] =
440
zink_shader_tcs_create(ctx, stages[PIPE_SHADER_VERTEX]);
441
_mesa_hash_table_init(&prog->base.shader_cache[PIPE_SHADER_TESS_CTRL], prog, keybox_hash, keybox_equals);
442
prog->stages_present |= BITFIELD_BIT(PIPE_SHADER_TESS_CTRL);
443
}
444
445
/* always force shader creation during init */
446
ctx->dirty_shader_stages |= prog->stages_present;
447
assign_io(prog, prog->shaders);
448
449
update_shader_modules(ctx, prog->shaders, prog, false);
450
prog->default_variant_hash = ctx->gfx_pipeline_state.module_hash;
451
452
for (int i = 0; i < ARRAY_SIZE(prog->pipelines); ++i) {
453
prog->pipelines[i] = _mesa_hash_table_create(NULL,
454
NULL,
455
equals_gfx_pipeline_state);
456
if (!prog->pipelines[i])
457
goto fail;
458
}
459
460
if (stages[PIPE_SHADER_GEOMETRY])
461
prog->last_vertex_stage = stages[PIPE_SHADER_GEOMETRY];
462
else if (stages[PIPE_SHADER_TESS_EVAL])
463
prog->last_vertex_stage = stages[PIPE_SHADER_TESS_EVAL];
464
else
465
prog->last_vertex_stage = stages[PIPE_SHADER_VERTEX];
466
467
struct mesa_sha1 sctx;
468
_mesa_sha1_init(&sctx);
469
for (int i = 0; i < ZINK_SHADER_COUNT; ++i) {
470
if (prog->modules[i]) {
471
_mesa_set_add(prog->shaders[i]->programs, prog);
472
zink_gfx_program_reference(screen, NULL, prog);
473
_mesa_sha1_update(&sctx, prog->shaders[i]->base.sha1, sizeof(prog->shaders[i]->base.sha1));
474
}
475
}
476
_mesa_sha1_final(&sctx, prog->base.sha1);
477
p_atomic_dec(&prog->base.reference.count);
478
479
if (!screen->descriptor_program_init(ctx, &prog->base))
480
goto fail;
481
482
zink_screen_get_pipeline_cache(screen, &prog->base);
483
return prog;
484
485
fail:
486
if (prog)
487
zink_destroy_gfx_program(screen, prog);
488
return NULL;
489
}
490
491
static uint32_t
492
hash_compute_pipeline_state(const void *key)
493
{
494
const struct zink_compute_pipeline_state *state = key;
495
uint32_t hash = _mesa_hash_data(state, offsetof(struct zink_compute_pipeline_state, hash));
496
if (state->use_local_size)
497
hash = XXH32(&state->local_size[0], sizeof(state->local_size), hash);
498
return hash;
499
}
500
501
void
502
zink_program_update_compute_pipeline_state(struct zink_context *ctx, struct zink_compute_program *comp, const uint block[3])
503
{
504
struct zink_shader *zs = comp->shader;
505
bool use_local_size = !(zs->nir->info.workgroup_size[0] ||
506
zs->nir->info.workgroup_size[1] ||
507
zs->nir->info.workgroup_size[2]);
508
if (ctx->compute_pipeline_state.use_local_size != use_local_size)
509
ctx->compute_pipeline_state.dirty = true;
510
ctx->compute_pipeline_state.use_local_size = use_local_size;
511
512
if (ctx->compute_pipeline_state.use_local_size) {
513
for (int i = 0; i < ARRAY_SIZE(ctx->compute_pipeline_state.local_size); i++) {
514
if (ctx->compute_pipeline_state.local_size[i] != block[i])
515
ctx->compute_pipeline_state.dirty = true;
516
ctx->compute_pipeline_state.local_size[i] = block[i];
517
}
518
} else
519
ctx->compute_pipeline_state.local_size[0] =
520
ctx->compute_pipeline_state.local_size[1] =
521
ctx->compute_pipeline_state.local_size[2] = 0;
522
}
523
524
static bool
525
equals_compute_pipeline_state(const void *a, const void *b)
526
{
527
return memcmp(a, b, offsetof(struct zink_compute_pipeline_state, hash)) == 0;
528
}
529
530
struct zink_compute_program *
531
zink_create_compute_program(struct zink_context *ctx, struct zink_shader *shader)
532
{
533
struct zink_screen *screen = zink_screen(ctx->base.screen);
534
struct zink_compute_program *comp = rzalloc(NULL, struct zink_compute_program);
535
if (!comp)
536
goto fail;
537
538
pipe_reference_init(&comp->base.reference, 1);
539
comp->base.is_compute = true;
540
/* TODO: cs shader keys placeholder for now */
541
_mesa_hash_table_init(&comp->base.shader_cache[0], comp, _mesa_hash_pointer, _mesa_key_pointer_equal);
542
543
comp->module = CALLOC_STRUCT(zink_shader_module);
544
assert(comp->module);
545
comp->module->shader = zink_shader_compile(screen, shader, shader->nir, NULL);
546
assert(comp->module->shader);
547
_mesa_hash_table_insert(&comp->base.shader_cache[0], shader, comp->module);
548
549
ctx->dirty_shader_stages &= ~(1 << PIPE_SHADER_COMPUTE);
550
551
comp->pipelines = _mesa_hash_table_create(NULL, hash_compute_pipeline_state,
552
equals_compute_pipeline_state);
553
554
_mesa_set_add(shader->programs, comp);
555
comp->shader = shader;
556
memcpy(comp->base.sha1, shader->base.sha1, sizeof(shader->base.sha1));
557
558
if (!screen->descriptor_program_init(ctx, &comp->base))
559
goto fail;
560
561
zink_screen_get_pipeline_cache(screen, &comp->base);
562
return comp;
563
564
fail:
565
if (comp)
566
zink_destroy_compute_program(screen, comp);
567
return NULL;
568
}
569
570
uint32_t
571
zink_program_get_descriptor_usage(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type)
572
{
573
struct zink_shader *zs = NULL;
574
switch (stage) {
575
case PIPE_SHADER_VERTEX:
576
case PIPE_SHADER_TESS_CTRL:
577
case PIPE_SHADER_TESS_EVAL:
578
case PIPE_SHADER_GEOMETRY:
579
case PIPE_SHADER_FRAGMENT:
580
zs = ctx->gfx_stages[stage];
581
break;
582
case PIPE_SHADER_COMPUTE: {
583
zs = ctx->compute_stage;
584
break;
585
}
586
default:
587
unreachable("unknown shader type");
588
}
589
if (!zs)
590
return 0;
591
switch (type) {
592
case ZINK_DESCRIPTOR_TYPE_UBO:
593
return zs->ubos_used;
594
case ZINK_DESCRIPTOR_TYPE_SSBO:
595
return zs->ssbos_used;
596
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
597
return BITSET_TEST_RANGE(zs->nir->info.textures_used, 0, PIPE_MAX_SAMPLERS - 1);
598
case ZINK_DESCRIPTOR_TYPE_IMAGE:
599
return zs->nir->info.images_used;
600
default:
601
unreachable("unknown descriptor type!");
602
}
603
return 0;
604
}
605
606
bool
607
zink_program_descriptor_is_buffer(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type, unsigned i)
608
{
609
struct zink_shader *zs = NULL;
610
switch (stage) {
611
case PIPE_SHADER_VERTEX:
612
case PIPE_SHADER_TESS_CTRL:
613
case PIPE_SHADER_TESS_EVAL:
614
case PIPE_SHADER_GEOMETRY:
615
case PIPE_SHADER_FRAGMENT:
616
zs = ctx->gfx_stages[stage];
617
break;
618
case PIPE_SHADER_COMPUTE: {
619
zs = ctx->compute_stage;
620
break;
621
}
622
default:
623
unreachable("unknown shader type");
624
}
625
if (!zs)
626
return false;
627
return zink_shader_descriptor_is_buffer(zs, type, i);
628
}
629
630
static unsigned
631
get_num_bindings(struct zink_shader *zs, enum zink_descriptor_type type)
632
{
633
switch (type) {
634
case ZINK_DESCRIPTOR_TYPE_UBO:
635
case ZINK_DESCRIPTOR_TYPE_SSBO:
636
return zs->num_bindings[type];
637
default:
638
break;
639
}
640
unsigned num_bindings = 0;
641
for (int i = 0; i < zs->num_bindings[type]; i++)
642
num_bindings += zs->bindings[type][i].size;
643
return num_bindings;
644
}
645
646
unsigned
647
zink_program_num_bindings_typed(const struct zink_program *pg, enum zink_descriptor_type type, bool is_compute)
648
{
649
unsigned num_bindings = 0;
650
if (is_compute) {
651
struct zink_compute_program *comp = (void*)pg;
652
return get_num_bindings(comp->shader, type);
653
}
654
struct zink_gfx_program *prog = (void*)pg;
655
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
656
if (prog->shaders[i])
657
num_bindings += get_num_bindings(prog->shaders[i], type);
658
}
659
return num_bindings;
660
}
661
662
unsigned
663
zink_program_num_bindings(const struct zink_program *pg, bool is_compute)
664
{
665
unsigned num_bindings = 0;
666
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
667
num_bindings += zink_program_num_bindings_typed(pg, i, is_compute);
668
return num_bindings;
669
}
670
671
void
672
zink_destroy_gfx_program(struct zink_screen *screen,
673
struct zink_gfx_program *prog)
674
{
675
if (prog->base.layout)
676
vkDestroyPipelineLayout(screen->dev, prog->base.layout, NULL);
677
678
for (int i = 0; i < ZINK_SHADER_COUNT; ++i) {
679
if (prog->shaders[i]) {
680
_mesa_set_remove_key(prog->shaders[i]->programs, prog);
681
prog->shaders[i] = NULL;
682
destroy_shader_cache(screen, &prog->base.shader_cache[i]);
683
}
684
}
685
686
for (int i = 0; i < ARRAY_SIZE(prog->pipelines); ++i) {
687
hash_table_foreach(prog->pipelines[i], entry) {
688
struct gfx_pipeline_cache_entry *pc_entry = entry->data;
689
690
vkDestroyPipeline(screen->dev, pc_entry->pipeline, NULL);
691
free(pc_entry);
692
}
693
_mesa_hash_table_destroy(prog->pipelines[i], NULL);
694
}
695
if (prog->base.pipeline_cache)
696
vkDestroyPipelineCache(screen->dev, prog->base.pipeline_cache, NULL);
697
screen->descriptor_program_deinit(screen, &prog->base);
698
699
ralloc_free(prog);
700
}
701
702
void
703
zink_destroy_compute_program(struct zink_screen *screen,
704
struct zink_compute_program *comp)
705
{
706
if (comp->base.layout)
707
vkDestroyPipelineLayout(screen->dev, comp->base.layout, NULL);
708
709
if (comp->shader)
710
_mesa_set_remove_key(comp->shader->programs, comp);
711
712
hash_table_foreach(comp->pipelines, entry) {
713
struct compute_pipeline_cache_entry *pc_entry = entry->data;
714
715
vkDestroyPipeline(screen->dev, pc_entry->pipeline, NULL);
716
free(pc_entry);
717
}
718
_mesa_hash_table_destroy(comp->pipelines, NULL);
719
destroy_shader_cache(screen, &comp->base.shader_cache[0]);
720
if (comp->base.pipeline_cache)
721
vkDestroyPipelineCache(screen->dev, comp->base.pipeline_cache, NULL);
722
screen->descriptor_program_deinit(screen, &comp->base);
723
724
ralloc_free(comp);
725
}
726
727
static VkPrimitiveTopology
728
primitive_topology(enum pipe_prim_type mode)
729
{
730
switch (mode) {
731
case PIPE_PRIM_POINTS:
732
return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
733
734
case PIPE_PRIM_LINES:
735
return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
736
737
case PIPE_PRIM_LINE_STRIP:
738
return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
739
740
case PIPE_PRIM_TRIANGLES:
741
return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
742
743
case PIPE_PRIM_TRIANGLE_STRIP:
744
return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
745
746
case PIPE_PRIM_TRIANGLE_FAN:
747
return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
748
749
case PIPE_PRIM_LINE_STRIP_ADJACENCY:
750
return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY;
751
752
case PIPE_PRIM_LINES_ADJACENCY:
753
return VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY;
754
755
case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
756
return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY;
757
758
case PIPE_PRIM_TRIANGLES_ADJACENCY:
759
return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY;
760
761
case PIPE_PRIM_PATCHES:
762
return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST;
763
764
default:
765
unreachable("unexpected enum pipe_prim_type");
766
}
767
}
768
769
VkPipeline
770
zink_get_gfx_pipeline(struct zink_context *ctx,
771
struct zink_gfx_program *prog,
772
struct zink_gfx_pipeline_state *state,
773
enum pipe_prim_type mode)
774
{
775
if (!state->dirty && !state->combined_dirty && !state->vertex_state_dirty && mode == state->mode)
776
return state->pipeline;
777
778
struct zink_screen *screen = zink_screen(ctx->base.screen);
779
VkPrimitiveTopology vkmode = primitive_topology(mode);
780
assert(vkmode <= ARRAY_SIZE(prog->pipelines));
781
782
struct hash_entry *entry = NULL;
783
784
if (state->dirty) {
785
state->vertex_state_dirty = state->combined_dirty = true;
786
state->hash = hash_gfx_pipeline_state(state);
787
state->dirty = false;
788
}
789
if (state->combined_dirty) {
790
state->vertex_state_dirty = true;
791
state->combined_hash = XXH32(&state->module_hash, sizeof(uint32_t), state->hash);
792
state->combined_dirty = false;
793
}
794
if (state->vertex_state_dirty) {
795
uint32_t hash = state->combined_hash;
796
if (!state->have_EXT_extended_dynamic_state) {
797
/* if we don't have dynamic states, we have to hash the enabled vertex buffer bindings */
798
uint32_t vertex_buffers_enabled_mask = state->vertex_buffers_enabled_mask;
799
hash = XXH32(&vertex_buffers_enabled_mask, sizeof(uint32_t), hash);
800
801
for (unsigned i = 0; i < state->element_state->num_bindings; i++) {
802
struct pipe_vertex_buffer *vb = ctx->vertex_buffers + ctx->element_state->binding_map[i];
803
state->vertex_strides[i] = vb->buffer.resource ? vb->stride : 0;
804
hash = XXH32(&state->vertex_strides[i], sizeof(uint32_t), hash);
805
}
806
}
807
state->final_hash = XXH32(&state->element_state, sizeof(void*), hash);
808
state->vertex_state_dirty = false;
809
}
810
entry = _mesa_hash_table_search_pre_hashed(prog->pipelines[vkmode], state->final_hash, state);
811
812
if (!entry) {
813
util_queue_fence_wait(&prog->base.cache_fence);
814
VkPipeline pipeline = zink_create_gfx_pipeline(screen, prog,
815
state, vkmode);
816
if (pipeline == VK_NULL_HANDLE)
817
return VK_NULL_HANDLE;
818
819
struct gfx_pipeline_cache_entry *pc_entry = CALLOC_STRUCT(gfx_pipeline_cache_entry);
820
if (!pc_entry)
821
return VK_NULL_HANDLE;
822
823
memcpy(&pc_entry->state, state, sizeof(*state));
824
pc_entry->pipeline = pipeline;
825
826
entry = _mesa_hash_table_insert_pre_hashed(prog->pipelines[vkmode], state->final_hash, state, pc_entry);
827
assert(entry);
828
}
829
830
struct gfx_pipeline_cache_entry *cache_entry = entry->data;
831
state->pipeline = cache_entry->pipeline;
832
state->mode = mode;
833
return state->pipeline;
834
}
835
836
VkPipeline
837
zink_get_compute_pipeline(struct zink_screen *screen,
838
struct zink_compute_program *comp,
839
struct zink_compute_pipeline_state *state)
840
{
841
struct hash_entry *entry = NULL;
842
843
if (!state->dirty)
844
return state->pipeline;
845
if (state->dirty) {
846
state->hash = hash_compute_pipeline_state(state);
847
state->dirty = false;
848
}
849
entry = _mesa_hash_table_search_pre_hashed(comp->pipelines, state->hash, state);
850
851
if (!entry) {
852
util_queue_fence_wait(&comp->base.cache_fence);
853
VkPipeline pipeline = zink_create_compute_pipeline(screen, comp, state);
854
855
if (pipeline == VK_NULL_HANDLE)
856
return VK_NULL_HANDLE;
857
858
struct compute_pipeline_cache_entry *pc_entry = CALLOC_STRUCT(compute_pipeline_cache_entry);
859
if (!pc_entry)
860
return VK_NULL_HANDLE;
861
862
memcpy(&pc_entry->state, state, sizeof(*state));
863
pc_entry->pipeline = pipeline;
864
865
entry = _mesa_hash_table_insert_pre_hashed(comp->pipelines, state->hash, state, pc_entry);
866
assert(entry);
867
}
868
869
struct compute_pipeline_cache_entry *cache_entry = entry->data;
870
state->pipeline = cache_entry->pipeline;
871
return state->pipeline;
872
}
873
874
875
static inline void
876
bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
877
struct zink_shader *shader)
878
{
879
if (stage == PIPE_SHADER_COMPUTE)
880
ctx->compute_stage = shader;
881
else
882
ctx->gfx_stages[stage] = shader;
883
ctx->dirty_shader_stages |= 1 << stage;
884
if (shader && shader->nir->info.num_inlinable_uniforms)
885
ctx->shader_has_inlinable_uniforms_mask |= 1 << stage;
886
else
887
ctx->shader_has_inlinable_uniforms_mask &= ~(1 << stage);
888
}
889
890
static void
891
zink_bind_vs_state(struct pipe_context *pctx,
892
void *cso)
893
{
894
struct zink_context *ctx = zink_context(pctx);
895
bind_stage(ctx, PIPE_SHADER_VERTEX, cso);
896
if (!ctx->gfx_stages[PIPE_SHADER_GEOMETRY] &&
897
!ctx->gfx_stages[PIPE_SHADER_TESS_EVAL]) {
898
ctx->last_vertex_stage = cso;
899
}
900
}
901
902
static void
903
zink_bind_fs_state(struct pipe_context *pctx,
904
void *cso)
905
{
906
bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
907
}
908
909
static void
910
zink_bind_gs_state(struct pipe_context *pctx,
911
void *cso)
912
{
913
struct zink_context *ctx = zink_context(pctx);
914
if (!!ctx->gfx_stages[PIPE_SHADER_GEOMETRY] != !!cso)
915
ctx->dirty_shader_stages |= BITFIELD_BIT(PIPE_SHADER_VERTEX) |
916
BITFIELD_BIT(PIPE_SHADER_TESS_EVAL);
917
bind_stage(ctx, PIPE_SHADER_GEOMETRY, cso);
918
if (cso)
919
ctx->last_vertex_stage = cso;
920
else {
921
if (ctx->gfx_stages[PIPE_SHADER_TESS_EVAL])
922
ctx->last_vertex_stage = ctx->gfx_stages[PIPE_SHADER_TESS_EVAL];
923
else
924
ctx->last_vertex_stage = ctx->gfx_stages[PIPE_SHADER_VERTEX];
925
}
926
}
927
928
static void
929
zink_bind_tcs_state(struct pipe_context *pctx,
930
void *cso)
931
{
932
bind_stage(zink_context(pctx), PIPE_SHADER_TESS_CTRL, cso);
933
}
934
935
static void
936
zink_bind_tes_state(struct pipe_context *pctx,
937
void *cso)
938
{
939
struct zink_context *ctx = zink_context(pctx);
940
if (!!ctx->gfx_stages[PIPE_SHADER_TESS_EVAL] != !!cso) {
941
if (!cso) {
942
/* if unsetting a TESS that uses a generated TCS, ensure the TCS is unset */
943
if (ctx->gfx_stages[PIPE_SHADER_TESS_EVAL]->generated)
944
ctx->gfx_stages[PIPE_SHADER_TESS_CTRL] = NULL;
945
}
946
ctx->dirty_shader_stages |= BITFIELD_BIT(PIPE_SHADER_VERTEX);
947
}
948
bind_stage(ctx, PIPE_SHADER_TESS_EVAL, cso);
949
if (!ctx->gfx_stages[PIPE_SHADER_GEOMETRY]) {
950
if (cso)
951
ctx->last_vertex_stage = cso;
952
else
953
ctx->last_vertex_stage = ctx->gfx_stages[PIPE_SHADER_VERTEX];
954
}
955
}
956
957
static void *
958
zink_create_cs_state(struct pipe_context *pctx,
959
const struct pipe_compute_state *shader)
960
{
961
struct nir_shader *nir;
962
if (shader->ir_type != PIPE_SHADER_IR_NIR)
963
nir = zink_tgsi_to_nir(pctx->screen, shader->prog);
964
else
965
nir = (struct nir_shader *)shader->prog;
966
967
return zink_shader_create(zink_screen(pctx->screen), nir, NULL);
968
}
969
970
static void
971
zink_bind_cs_state(struct pipe_context *pctx,
972
void *cso)
973
{
974
bind_stage(zink_context(pctx), PIPE_SHADER_COMPUTE, cso);
975
}
976
977
void
978
zink_delete_shader_state(struct pipe_context *pctx, void *cso)
979
{
980
zink_shader_free(zink_context(pctx), cso);
981
}
982
983
void *
984
zink_create_gfx_shader_state(struct pipe_context *pctx, const struct pipe_shader_state *shader)
985
{
986
nir_shader *nir;
987
if (shader->type != PIPE_SHADER_IR_NIR)
988
nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
989
else
990
nir = (struct nir_shader *)shader->ir.nir;
991
992
return zink_shader_create(zink_screen(pctx->screen), nir, &shader->stream_output);
993
}
994
995
static void
996
zink_delete_cached_shader_state(struct pipe_context *pctx, void *cso)
997
{
998
struct zink_screen *screen = zink_screen(pctx->screen);
999
util_shader_reference(pctx, &screen->shaders, &cso, NULL);
1000
}
1001
1002
static void *
1003
zink_create_cached_shader_state(struct pipe_context *pctx, const struct pipe_shader_state *shader)
1004
{
1005
bool cache_hit;
1006
struct zink_screen *screen = zink_screen(pctx->screen);
1007
return util_live_shader_cache_get(pctx, &screen->shaders, shader, &cache_hit);
1008
}
1009
1010
void
1011
zink_program_init(struct zink_context *ctx)
1012
{
1013
ctx->base.create_vs_state = zink_create_cached_shader_state;
1014
ctx->base.bind_vs_state = zink_bind_vs_state;
1015
ctx->base.delete_vs_state = zink_delete_cached_shader_state;
1016
1017
ctx->base.create_fs_state = zink_create_cached_shader_state;
1018
ctx->base.bind_fs_state = zink_bind_fs_state;
1019
ctx->base.delete_fs_state = zink_delete_cached_shader_state;
1020
1021
ctx->base.create_gs_state = zink_create_cached_shader_state;
1022
ctx->base.bind_gs_state = zink_bind_gs_state;
1023
ctx->base.delete_gs_state = zink_delete_cached_shader_state;
1024
1025
ctx->base.create_tcs_state = zink_create_cached_shader_state;
1026
ctx->base.bind_tcs_state = zink_bind_tcs_state;
1027
ctx->base.delete_tcs_state = zink_delete_cached_shader_state;
1028
1029
ctx->base.create_tes_state = zink_create_cached_shader_state;
1030
ctx->base.bind_tes_state = zink_bind_tes_state;
1031
ctx->base.delete_tes_state = zink_delete_cached_shader_state;
1032
1033
ctx->base.create_compute_state = zink_create_cs_state;
1034
ctx->base.bind_compute_state = zink_bind_cs_state;
1035
ctx->base.delete_compute_state = zink_delete_shader_state;
1036
}
1037
1038