Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/zink/zink_descriptors.c
4570 views
1
/*
2
* Copyright © 2020 Mike Blumenkrantz
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*
23
* Authors:
24
* Mike Blumenkrantz <[email protected]>
25
*/
26
27
#include "tgsi/tgsi_from_mesa.h"
28
29
30
31
#include "zink_context.h"
32
#include "zink_descriptors.h"
33
#include "zink_program.h"
34
#include "zink_resource.h"
35
#include "zink_screen.h"
36
37
#define XXH_INLINE_ALL
38
#include "util/xxhash.h"
39
40
41
struct zink_descriptor_pool {
42
struct pipe_reference reference;
43
enum zink_descriptor_type type;
44
struct hash_table *desc_sets;
45
struct hash_table *free_desc_sets;
46
struct util_dynarray alloc_desc_sets;
47
VkDescriptorPool descpool;
48
struct zink_descriptor_pool_key key;
49
unsigned num_resources;
50
unsigned num_sets_allocated;
51
simple_mtx_t mtx;
52
};
53
54
struct zink_descriptor_set {
55
struct zink_descriptor_pool *pool;
56
struct pipe_reference reference; //incremented for batch usage
57
VkDescriptorSet desc_set;
58
uint32_t hash;
59
bool invalid;
60
bool punted;
61
bool recycled;
62
struct zink_descriptor_state_key key;
63
struct zink_batch_usage *batch_uses;
64
#ifndef NDEBUG
65
/* for extra debug asserts */
66
unsigned num_resources;
67
#endif
68
union {
69
struct zink_resource_object **res_objs;
70
struct {
71
struct zink_descriptor_surface *surfaces;
72
struct zink_sampler_state **sampler_states;
73
};
74
};
75
};
76
77
union zink_program_descriptor_refs {
78
struct zink_resource **res;
79
struct zink_descriptor_surface *dsurf;
80
struct {
81
struct zink_descriptor_surface *dsurf;
82
struct zink_sampler_state **sampler_state;
83
} sampler;
84
};
85
86
struct zink_program_descriptor_data_cached {
87
struct zink_program_descriptor_data base;
88
struct zink_descriptor_pool *pool[ZINK_DESCRIPTOR_TYPES];
89
struct zink_descriptor_set *last_set[ZINK_DESCRIPTOR_TYPES];
90
unsigned num_refs[ZINK_DESCRIPTOR_TYPES];
91
union zink_program_descriptor_refs *refs[ZINK_DESCRIPTOR_TYPES];
92
};
93
94
95
static inline struct zink_program_descriptor_data_cached *
96
pdd_cached(struct zink_program *pg)
97
{
98
return (struct zink_program_descriptor_data_cached*)pg->dd;
99
}
100
101
static bool
102
batch_add_desc_set(struct zink_batch *batch, struct zink_descriptor_set *zds)
103
{
104
if (zink_batch_usage_matches(zds->batch_uses, batch->state) ||
105
!batch_ptr_add_usage(batch, batch->state->dd->desc_sets, zds))
106
return false;
107
pipe_reference(NULL, &zds->reference);
108
zink_batch_usage_set(&zds->batch_uses, batch->state);
109
return true;
110
}
111
112
static void
113
debug_describe_zink_descriptor_pool(char *buf, const struct zink_descriptor_pool *ptr)
114
{
115
sprintf(buf, "zink_descriptor_pool");
116
}
117
118
static inline uint32_t
119
get_sampler_view_hash(const struct zink_sampler_view *sampler_view)
120
{
121
if (!sampler_view)
122
return 0;
123
return sampler_view->base.target == PIPE_BUFFER ?
124
sampler_view->buffer_view->hash : sampler_view->image_view->hash;
125
}
126
127
static inline uint32_t
128
get_image_view_hash(const struct zink_image_view *image_view)
129
{
130
if (!image_view || !image_view->base.resource)
131
return 0;
132
return image_view->base.resource->target == PIPE_BUFFER ?
133
image_view->buffer_view->hash : image_view->surface->hash;
134
}
135
136
uint32_t
137
zink_get_sampler_view_hash(struct zink_context *ctx, struct zink_sampler_view *sampler_view, bool is_buffer)
138
{
139
return get_sampler_view_hash(sampler_view) ? get_sampler_view_hash(sampler_view) :
140
(is_buffer ? zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view :
141
zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
142
}
143
144
uint32_t
145
zink_get_image_view_hash(struct zink_context *ctx, struct zink_image_view *image_view, bool is_buffer)
146
{
147
return get_image_view_hash(image_view) ? get_image_view_hash(image_view) :
148
(is_buffer ? zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view :
149
zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
150
}
151
152
#ifndef NDEBUG
153
static uint32_t
154
get_descriptor_surface_hash(struct zink_context *ctx, struct zink_descriptor_surface *dsurf)
155
{
156
return dsurf->is_buffer ? (dsurf->bufferview ? dsurf->bufferview->hash : zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view) :
157
(dsurf->surface ? dsurf->surface->hash : zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
158
}
159
#endif
160
161
static bool
162
desc_state_equal(const void *a, const void *b)
163
{
164
const struct zink_descriptor_state_key *a_k = (void*)a;
165
const struct zink_descriptor_state_key *b_k = (void*)b;
166
167
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
168
if (a_k->exists[i] != b_k->exists[i])
169
return false;
170
if (a_k->exists[i] && b_k->exists[i] &&
171
a_k->state[i] != b_k->state[i])
172
return false;
173
}
174
return true;
175
}
176
177
static uint32_t
178
desc_state_hash(const void *key)
179
{
180
const struct zink_descriptor_state_key *d_key = (void*)key;
181
uint32_t hash = 0;
182
bool first = true;
183
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
184
if (d_key->exists[i]) {
185
if (!first)
186
hash = XXH32(&d_key->state[i], sizeof(uint32_t), hash);
187
else
188
hash = d_key->state[i];
189
first = false;
190
}
191
}
192
return hash;
193
}
194
195
static void
196
pop_desc_set_ref(struct zink_descriptor_set *zds, struct util_dynarray *refs)
197
{
198
size_t size = sizeof(struct zink_descriptor_reference);
199
unsigned num_elements = refs->size / size;
200
for (unsigned i = 0; i < num_elements; i++) {
201
struct zink_descriptor_reference *ref = util_dynarray_element(refs, struct zink_descriptor_reference, i);
202
if (&zds->invalid == ref->invalid) {
203
memcpy(util_dynarray_element(refs, struct zink_descriptor_reference, i),
204
util_dynarray_pop_ptr(refs, struct zink_descriptor_reference), size);
205
break;
206
}
207
}
208
}
209
210
static void
211
descriptor_set_invalidate(struct zink_descriptor_set *zds)
212
{
213
zds->invalid = true;
214
for (unsigned i = 0; i < zds->pool->key.layout->num_descriptors; i++) {
215
switch (zds->pool->type) {
216
case ZINK_DESCRIPTOR_TYPE_UBO:
217
case ZINK_DESCRIPTOR_TYPE_SSBO:
218
if (zds->res_objs[i])
219
pop_desc_set_ref(zds, &zds->res_objs[i]->desc_set_refs.refs);
220
zds->res_objs[i] = NULL;
221
break;
222
case ZINK_DESCRIPTOR_TYPE_IMAGE:
223
if (zds->surfaces[i].is_buffer) {
224
if (zds->surfaces[i].bufferview)
225
pop_desc_set_ref(zds, &zds->surfaces[i].bufferview->desc_set_refs.refs);
226
zds->surfaces[i].bufferview = NULL;
227
} else {
228
if (zds->surfaces[i].surface)
229
pop_desc_set_ref(zds, &zds->surfaces[i].surface->desc_set_refs.refs);
230
zds->surfaces[i].surface = NULL;
231
}
232
break;
233
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
234
if (zds->surfaces[i].is_buffer) {
235
if (zds->surfaces[i].bufferview)
236
pop_desc_set_ref(zds, &zds->surfaces[i].bufferview->desc_set_refs.refs);
237
zds->surfaces[i].bufferview = NULL;
238
} else {
239
if (zds->surfaces[i].surface)
240
pop_desc_set_ref(zds, &zds->surfaces[i].surface->desc_set_refs.refs);
241
zds->surfaces[i].surface = NULL;
242
}
243
if (zds->sampler_states[i])
244
pop_desc_set_ref(zds, &zds->sampler_states[i]->desc_set_refs.refs);
245
zds->sampler_states[i] = NULL;
246
break;
247
default:
248
break;
249
}
250
}
251
}
252
253
#ifndef NDEBUG
254
static void
255
descriptor_pool_clear(struct hash_table *ht)
256
{
257
_mesa_hash_table_clear(ht, NULL);
258
}
259
#endif
260
261
static void
262
descriptor_pool_free(struct zink_screen *screen, struct zink_descriptor_pool *pool)
263
{
264
if (!pool)
265
return;
266
if (pool->descpool)
267
vkDestroyDescriptorPool(screen->dev, pool->descpool, NULL);
268
269
simple_mtx_lock(&pool->mtx);
270
#ifndef NDEBUG
271
if (pool->desc_sets)
272
descriptor_pool_clear(pool->desc_sets);
273
if (pool->free_desc_sets)
274
descriptor_pool_clear(pool->free_desc_sets);
275
#endif
276
if (pool->desc_sets)
277
_mesa_hash_table_destroy(pool->desc_sets, NULL);
278
if (pool->free_desc_sets)
279
_mesa_hash_table_destroy(pool->free_desc_sets, NULL);
280
281
simple_mtx_unlock(&pool->mtx);
282
util_dynarray_fini(&pool->alloc_desc_sets);
283
simple_mtx_destroy(&pool->mtx);
284
ralloc_free(pool);
285
}
286
287
static struct zink_descriptor_pool *
288
descriptor_pool_create(struct zink_screen *screen, enum zink_descriptor_type type,
289
struct zink_descriptor_layout_key *layout_key, VkDescriptorPoolSize *sizes, unsigned num_type_sizes)
290
{
291
struct zink_descriptor_pool *pool = rzalloc(NULL, struct zink_descriptor_pool);
292
if (!pool)
293
return NULL;
294
pipe_reference_init(&pool->reference, 1);
295
pool->type = type;
296
pool->key.layout = layout_key;
297
pool->key.num_type_sizes = num_type_sizes;
298
size_t types_size = num_type_sizes * sizeof(VkDescriptorPoolSize);
299
pool->key.sizes = ralloc_size(pool, types_size);
300
if (!pool->key.sizes) {
301
ralloc_free(pool);
302
return NULL;
303
}
304
memcpy(pool->key.sizes, sizes, types_size);
305
simple_mtx_init(&pool->mtx, mtx_plain);
306
for (unsigned i = 0; i < layout_key->num_descriptors; i++) {
307
pool->num_resources += layout_key->bindings[i].descriptorCount;
308
}
309
pool->desc_sets = _mesa_hash_table_create(NULL, desc_state_hash, desc_state_equal);
310
if (!pool->desc_sets)
311
goto fail;
312
313
pool->free_desc_sets = _mesa_hash_table_create(NULL, desc_state_hash, desc_state_equal);
314
if (!pool->free_desc_sets)
315
goto fail;
316
317
util_dynarray_init(&pool->alloc_desc_sets, NULL);
318
319
VkDescriptorPoolCreateInfo dpci = {0};
320
dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
321
dpci.pPoolSizes = sizes;
322
dpci.poolSizeCount = num_type_sizes;
323
dpci.flags = 0;
324
dpci.maxSets = ZINK_DEFAULT_MAX_DESCS;
325
if (vkCreateDescriptorPool(screen->dev, &dpci, 0, &pool->descpool) != VK_SUCCESS) {
326
debug_printf("vkCreateDescriptorPool failed\n");
327
goto fail;
328
}
329
330
return pool;
331
fail:
332
descriptor_pool_free(screen, pool);
333
return NULL;
334
}
335
336
static VkDescriptorSetLayout
337
descriptor_layout_create(struct zink_screen *screen, enum zink_descriptor_type t, VkDescriptorSetLayoutBinding *bindings, unsigned num_bindings)
338
{
339
VkDescriptorSetLayout dsl;
340
VkDescriptorSetLayoutCreateInfo dcslci = {0};
341
dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
342
dcslci.pNext = NULL;
343
VkDescriptorSetLayoutBindingFlagsCreateInfo fci = {0};
344
VkDescriptorBindingFlags flags[ZINK_MAX_DESCRIPTORS_PER_TYPE];
345
if (screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY) {
346
dcslci.pNext = &fci;
347
if (t == ZINK_DESCRIPTOR_TYPES)
348
dcslci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR;
349
fci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO;
350
fci.bindingCount = num_bindings;
351
fci.pBindingFlags = flags;
352
for (unsigned i = 0; i < num_bindings; i++) {
353
flags[i] = 0;
354
}
355
}
356
dcslci.bindingCount = num_bindings;
357
dcslci.pBindings = bindings;
358
VkDescriptorSetLayoutSupport supp;
359
supp.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT;
360
supp.pNext = NULL;
361
supp.supported = VK_FALSE;
362
if (screen->vk.GetDescriptorSetLayoutSupport) {
363
screen->vk.GetDescriptorSetLayoutSupport(screen->dev, &dcslci, &supp);
364
if (supp.supported == VK_FALSE) {
365
debug_printf("vkGetDescriptorSetLayoutSupport claims layout is unsupported\n");
366
return VK_NULL_HANDLE;
367
}
368
}
369
if (vkCreateDescriptorSetLayout(screen->dev, &dcslci, 0, &dsl) != VK_SUCCESS)
370
debug_printf("vkCreateDescriptorSetLayout failed\n");
371
return dsl;
372
}
373
374
static uint32_t
375
hash_descriptor_layout(const void *key)
376
{
377
uint32_t hash = 0;
378
const struct zink_descriptor_layout_key *k = key;
379
hash = XXH32(&k->num_descriptors, sizeof(unsigned), hash);
380
hash = XXH32(k->bindings, k->num_descriptors * sizeof(VkDescriptorSetLayoutBinding), hash);
381
382
return hash;
383
}
384
385
static bool
386
equals_descriptor_layout(const void *a, const void *b)
387
{
388
const struct zink_descriptor_layout_key *a_k = a;
389
const struct zink_descriptor_layout_key *b_k = b;
390
return a_k->num_descriptors == b_k->num_descriptors &&
391
!memcmp(a_k->bindings, b_k->bindings, a_k->num_descriptors * sizeof(VkDescriptorSetLayoutBinding));
392
}
393
394
struct zink_descriptor_layout *
395
zink_descriptor_util_layout_get(struct zink_context *ctx, enum zink_descriptor_type type,
396
VkDescriptorSetLayoutBinding *bindings, unsigned num_bindings,
397
struct zink_descriptor_layout_key **layout_key)
398
{
399
struct zink_screen *screen = zink_screen(ctx->base.screen);
400
uint32_t hash = 0;
401
struct zink_descriptor_layout_key key = {
402
.num_descriptors = num_bindings,
403
.bindings = bindings,
404
};
405
406
VkDescriptorSetLayoutBinding null_binding;
407
if (!bindings) {
408
null_binding.binding = 0;
409
null_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
410
null_binding.descriptorCount = 1;
411
null_binding.pImmutableSamplers = NULL;
412
null_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
413
VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
414
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | VK_SHADER_STAGE_COMPUTE_BIT;
415
key.bindings = &null_binding;
416
}
417
418
if (type != ZINK_DESCRIPTOR_TYPES) {
419
hash = hash_descriptor_layout(&key);
420
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&ctx->desc_set_layouts[type], hash, &key);
421
if (he) {
422
*layout_key = (void*)he->key;
423
return he->data;
424
}
425
}
426
427
VkDescriptorSetLayout dsl = descriptor_layout_create(screen, type, key.bindings, MAX2(num_bindings, 1));
428
if (!dsl)
429
return VK_NULL_HANDLE;
430
431
struct zink_descriptor_layout_key *k = ralloc(ctx, struct zink_descriptor_layout_key);
432
k->num_descriptors = num_bindings;
433
size_t bindings_size = MAX2(num_bindings, 1) * sizeof(VkDescriptorSetLayoutBinding);
434
k->bindings = ralloc_size(k, bindings_size);
435
if (!k->bindings) {
436
ralloc_free(k);
437
vkDestroyDescriptorSetLayout(screen->dev, dsl, NULL);
438
return VK_NULL_HANDLE;
439
}
440
memcpy(k->bindings, key.bindings, bindings_size);
441
442
struct zink_descriptor_layout *layout = rzalloc(ctx, struct zink_descriptor_layout);
443
layout->layout = dsl;
444
if (type != ZINK_DESCRIPTOR_TYPES) {
445
_mesa_hash_table_insert_pre_hashed(&ctx->desc_set_layouts[type], hash, k, layout);
446
}
447
*layout_key = k;
448
return layout;
449
}
450
451
bool
452
zink_descriptor_util_push_layouts_get(struct zink_context *ctx, struct zink_descriptor_layout **dsls, struct zink_descriptor_layout_key **layout_keys)
453
{
454
struct zink_screen *screen = zink_screen(ctx->base.screen);
455
VkDescriptorSetLayoutBinding bindings[PIPE_SHADER_TYPES];
456
for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) {
457
bindings[i].binding = tgsi_processor_to_shader_stage(i);
458
bindings[i].descriptorType = screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY ?
459
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
460
bindings[i].descriptorCount = 1;
461
bindings[i].stageFlags = zink_shader_stage(i);
462
bindings[i].pImmutableSamplers = NULL;
463
}
464
enum zink_descriptor_type dsl_type = screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY &&
465
screen->info.have_KHR_push_descriptor ? ZINK_DESCRIPTOR_TYPES : ZINK_DESCRIPTOR_TYPE_UBO;
466
dsls[0] = zink_descriptor_util_layout_get(ctx, dsl_type, bindings, ZINK_SHADER_COUNT, &layout_keys[0]);
467
dsls[1] = zink_descriptor_util_layout_get(ctx, dsl_type, &bindings[PIPE_SHADER_COMPUTE], 1, &layout_keys[1]);
468
return dsls[0] && dsls[1];
469
}
470
471
void
472
zink_descriptor_util_init_null_set(struct zink_context *ctx, VkDescriptorSet desc_set)
473
{
474
struct zink_screen *screen = zink_screen(ctx->base.screen);
475
VkDescriptorBufferInfo push_info;
476
VkWriteDescriptorSet push_wd;
477
push_wd.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
478
push_wd.pNext = NULL;
479
push_wd.dstBinding = 0;
480
push_wd.dstArrayElement = 0;
481
push_wd.descriptorCount = 1;
482
push_wd.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
483
push_wd.dstSet = desc_set;
484
push_wd.pBufferInfo = &push_info;
485
push_info.buffer = screen->info.rb2_feats.nullDescriptor ?
486
VK_NULL_HANDLE :
487
zink_resource(ctx->dummy_vertex_buffer)->obj->buffer;
488
push_info.offset = 0;
489
push_info.range = VK_WHOLE_SIZE;
490
vkUpdateDescriptorSets(screen->dev, 1, &push_wd, 0, NULL);
491
}
492
493
VkImageLayout
494
zink_descriptor_util_image_layout_eval(const struct zink_resource *res, bool is_compute)
495
{
496
return res->image_bind_count[is_compute] ? VK_IMAGE_LAYOUT_GENERAL :
497
res->aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) ?
498
//Vulkan-Docs#1490
499
//(res->aspect == VK_IMAGE_ASPECT_DEPTH_BIT ? VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL :
500
//res->aspect == VK_IMAGE_ASPECT_STENCIL_BIT ? VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL :
501
(res->aspect == VK_IMAGE_ASPECT_DEPTH_BIT ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL :
502
res->aspect == VK_IMAGE_ASPECT_STENCIL_BIT ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL :
503
VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) :
504
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
505
}
506
507
static uint32_t
508
hash_descriptor_pool(const void *key)
509
{
510
uint32_t hash = 0;
511
const struct zink_descriptor_pool_key *k = key;
512
hash = XXH32(&k->num_type_sizes, sizeof(unsigned), hash);
513
hash = XXH32(&k->layout, sizeof(k->layout), hash);
514
hash = XXH32(k->sizes, k->num_type_sizes * sizeof(VkDescriptorPoolSize), hash);
515
516
return hash;
517
}
518
519
static bool
520
equals_descriptor_pool(const void *a, const void *b)
521
{
522
const struct zink_descriptor_pool_key *a_k = a;
523
const struct zink_descriptor_pool_key *b_k = b;
524
return a_k->num_type_sizes == b_k->num_type_sizes &&
525
a_k->layout == b_k->layout &&
526
!memcmp(a_k->sizes, b_k->sizes, a_k->num_type_sizes * sizeof(VkDescriptorPoolSize));
527
}
528
529
static struct zink_descriptor_pool *
530
descriptor_pool_get(struct zink_context *ctx, enum zink_descriptor_type type,
531
struct zink_descriptor_layout_key *layout_key, VkDescriptorPoolSize *sizes, unsigned num_type_sizes)
532
{
533
uint32_t hash = 0;
534
if (type != ZINK_DESCRIPTOR_TYPES) {
535
struct zink_descriptor_pool_key key = {
536
.layout = layout_key,
537
.num_type_sizes = num_type_sizes,
538
.sizes = sizes,
539
};
540
541
hash = hash_descriptor_pool(&key);
542
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(ctx->dd->descriptor_pools[type], hash, &key);
543
if (he)
544
return (void*)he->data;
545
}
546
struct zink_descriptor_pool *pool = descriptor_pool_create(zink_screen(ctx->base.screen), type, layout_key, sizes, num_type_sizes);
547
if (type != ZINK_DESCRIPTOR_TYPES)
548
_mesa_hash_table_insert_pre_hashed(ctx->dd->descriptor_pools[type], hash, &pool->key, pool);
549
return pool;
550
}
551
552
static bool
553
get_invalidated_desc_set(struct zink_descriptor_set *zds)
554
{
555
if (!zds->invalid)
556
return false;
557
return p_atomic_read(&zds->reference.count) == 1;
558
}
559
560
bool
561
zink_descriptor_util_alloc_sets(struct zink_screen *screen, VkDescriptorSetLayout dsl, VkDescriptorPool pool, VkDescriptorSet *sets, unsigned num_sets)
562
{
563
VkDescriptorSetAllocateInfo dsai;
564
VkDescriptorSetLayout *layouts = alloca(sizeof(*layouts) * num_sets);
565
memset((void *)&dsai, 0, sizeof(dsai));
566
dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
567
dsai.pNext = NULL;
568
dsai.descriptorPool = pool;
569
dsai.descriptorSetCount = num_sets;
570
for (unsigned i = 0; i < num_sets; i ++)
571
layouts[i] = dsl;
572
dsai.pSetLayouts = layouts;
573
574
if (vkAllocateDescriptorSets(screen->dev, &dsai, sets) != VK_SUCCESS) {
575
debug_printf("ZINK: %" PRIu64 " failed to allocate descriptor set :/\n", (uint64_t)dsl);
576
return false;
577
}
578
return true;
579
}
580
581
unsigned
582
zink_descriptor_program_num_sizes(struct zink_program *pg, enum zink_descriptor_type type)
583
{
584
switch (type) {
585
case ZINK_DESCRIPTOR_TYPE_UBO:
586
return 1;
587
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
588
return !!pg->dd->sizes[ZDS_INDEX_COMBINED_SAMPLER].descriptorCount +
589
!!pg->dd->sizes[ZDS_INDEX_UNIFORM_TEXELS].descriptorCount;
590
case ZINK_DESCRIPTOR_TYPE_SSBO:
591
return 1;
592
case ZINK_DESCRIPTOR_TYPE_IMAGE:
593
return !!pg->dd->sizes[ZDS_INDEX_STORAGE_IMAGE].descriptorCount +
594
!!pg->dd->sizes[ZDS_INDEX_STORAGE_TEXELS].descriptorCount;
595
default: break;
596
}
597
unreachable("unknown type");
598
}
599
600
static struct zink_descriptor_set *
601
allocate_desc_set(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, unsigned descs_used, bool is_compute)
602
{
603
struct zink_screen *screen = zink_screen(ctx->base.screen);
604
bool push_set = type == ZINK_DESCRIPTOR_TYPES;
605
struct zink_descriptor_pool *pool = push_set ? ctx->dd->push_pool[is_compute] : pdd_cached(pg)->pool[type];
606
#define DESC_BUCKET_FACTOR 10
607
unsigned bucket_size = pool->key.layout->num_descriptors ? DESC_BUCKET_FACTOR : 1;
608
if (pool->key.layout->num_descriptors) {
609
for (unsigned desc_factor = DESC_BUCKET_FACTOR; desc_factor < descs_used; desc_factor *= DESC_BUCKET_FACTOR)
610
bucket_size = desc_factor;
611
}
612
VkDescriptorSet *desc_set = alloca(sizeof(*desc_set) * bucket_size);
613
if (!zink_descriptor_util_alloc_sets(screen, push_set ? ctx->dd->push_dsl[is_compute]->layout : pg->dsl[type + 1], pool->descpool, desc_set, bucket_size))
614
return VK_NULL_HANDLE;
615
616
struct zink_descriptor_set *alloc = ralloc_array(pool, struct zink_descriptor_set, bucket_size);
617
assert(alloc);
618
unsigned num_resources = pool->num_resources;
619
struct zink_resource_object **res_objs = NULL;
620
void **samplers = NULL;
621
struct zink_descriptor_surface *surfaces = NULL;
622
switch (type) {
623
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
624
samplers = rzalloc_array(pool, void*, num_resources * bucket_size);
625
assert(samplers);
626
FALLTHROUGH;
627
case ZINK_DESCRIPTOR_TYPE_IMAGE:
628
surfaces = rzalloc_array(pool, struct zink_descriptor_surface, num_resources * bucket_size);
629
assert(surfaces);
630
break;
631
default:
632
res_objs = rzalloc_array(pool, struct zink_resource_object*, num_resources * bucket_size);
633
assert(res_objs);
634
break;
635
}
636
for (unsigned i = 0; i < bucket_size; i ++) {
637
struct zink_descriptor_set *zds = &alloc[i];
638
pipe_reference_init(&zds->reference, 1);
639
zds->pool = pool;
640
zds->hash = 0;
641
zds->batch_uses = NULL;
642
zds->invalid = true;
643
zds->punted = zds->recycled = false;
644
#ifndef NDEBUG
645
zds->num_resources = num_resources;
646
#endif
647
switch (type) {
648
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
649
zds->sampler_states = (struct zink_sampler_state**)&samplers[i * pool->key.layout->num_descriptors];
650
FALLTHROUGH;
651
case ZINK_DESCRIPTOR_TYPE_IMAGE:
652
zds->surfaces = &surfaces[i * pool->key.layout->num_descriptors];
653
break;
654
default:
655
zds->res_objs = (struct zink_resource_object**)&res_objs[i * pool->key.layout->num_descriptors];
656
break;
657
}
658
zds->desc_set = desc_set[i];
659
if (i > 0)
660
util_dynarray_append(&pool->alloc_desc_sets, struct zink_descriptor_set *, zds);
661
}
662
pool->num_sets_allocated += bucket_size;
663
return alloc;
664
}
665
666
static void
667
populate_zds_key(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute,
668
struct zink_descriptor_state_key *key, uint32_t push_usage)
669
{
670
if (is_compute) {
671
for (unsigned i = 1; i < ZINK_SHADER_COUNT; i++)
672
key->exists[i] = false;
673
key->exists[0] = true;
674
if (type == ZINK_DESCRIPTOR_TYPES)
675
key->state[0] = ctx->dd->push_state[is_compute];
676
else
677
key->state[0] = ctx->dd->descriptor_states[is_compute].state[type];
678
} else if (type == ZINK_DESCRIPTOR_TYPES) {
679
/* gfx only */
680
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
681
if (push_usage & BITFIELD_BIT(i)) {
682
key->exists[i] = true;
683
key->state[i] = ctx->dd->gfx_push_state[i];
684
} else
685
key->exists[i] = false;
686
}
687
} else {
688
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
689
key->exists[i] = ctx->dd->gfx_descriptor_states[i].valid[type];
690
key->state[i] = ctx->dd->gfx_descriptor_states[i].state[type];
691
}
692
}
693
}
694
695
static void
696
punt_invalid_set(struct zink_descriptor_set *zds, struct hash_entry *he)
697
{
698
/* this is no longer usable, so we punt it for now until it gets recycled */
699
assert(!zds->recycled);
700
if (!he)
701
he = _mesa_hash_table_search_pre_hashed(zds->pool->desc_sets, zds->hash, &zds->key);
702
_mesa_hash_table_remove(zds->pool->desc_sets, he);
703
zds->punted = true;
704
}
705
706
static struct zink_descriptor_set *
707
zink_descriptor_set_get(struct zink_context *ctx,
708
enum zink_descriptor_type type,
709
bool is_compute,
710
bool *cache_hit)
711
{
712
*cache_hit = false;
713
struct zink_descriptor_set *zds;
714
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
715
struct zink_batch *batch = &ctx->batch;
716
bool push_set = type == ZINK_DESCRIPTOR_TYPES;
717
struct zink_descriptor_pool *pool = push_set ? ctx->dd->push_pool[is_compute] : pdd_cached(pg)->pool[type];
718
unsigned descs_used = 1;
719
assert(type <= ZINK_DESCRIPTOR_TYPES);
720
721
assert(pool->key.layout->num_descriptors);
722
uint32_t hash = push_set ? ctx->dd->push_state[is_compute] :
723
ctx->dd->descriptor_states[is_compute].state[type];
724
725
struct zink_descriptor_set *last_set = push_set ? ctx->dd->last_set[is_compute] : pdd_cached(pg)->last_set[type];
726
/* if the current state hasn't changed since the last time it was used,
727
* it's impossible for this set to not be valid, which means that an
728
* early return here can be done safely and with no locking
729
*/
730
if (last_set && ((push_set && !ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES]) ||
731
(!push_set && !ctx->dd->changed[is_compute][type]))) {
732
*cache_hit = true;
733
return last_set;
734
}
735
736
struct zink_descriptor_state_key key;
737
populate_zds_key(ctx, type, is_compute, &key, pg->dd->push_usage);
738
739
simple_mtx_lock(&pool->mtx);
740
if (last_set && last_set->hash == hash && desc_state_equal(&last_set->key, &key)) {
741
zds = last_set;
742
*cache_hit = !zds->invalid;
743
if (zds->recycled) {
744
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->free_desc_sets, hash, &key);
745
if (he)
746
_mesa_hash_table_remove(pool->free_desc_sets, he);
747
zds->recycled = false;
748
}
749
if (zds->invalid) {
750
if (zink_batch_usage_exists(zds->batch_uses))
751
punt_invalid_set(zds, NULL);
752
else
753
/* this set is guaranteed to be in pool->alloc_desc_sets */
754
goto skip_hash_tables;
755
zds = NULL;
756
}
757
if (zds)
758
goto out;
759
}
760
761
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->desc_sets, hash, &key);
762
bool recycled = false, punted = false;
763
if (he) {
764
zds = (void*)he->data;
765
if (zds->invalid && zink_batch_usage_exists(zds->batch_uses)) {
766
punt_invalid_set(zds, he);
767
zds = NULL;
768
punted = true;
769
}
770
}
771
if (!he) {
772
he = _mesa_hash_table_search_pre_hashed(pool->free_desc_sets, hash, &key);
773
recycled = true;
774
}
775
if (he && !punted) {
776
zds = (void*)he->data;
777
*cache_hit = !zds->invalid;
778
if (recycled) {
779
/* need to migrate this entry back to the in-use hash */
780
_mesa_hash_table_remove(pool->free_desc_sets, he);
781
goto out;
782
}
783
goto quick_out;
784
}
785
skip_hash_tables:
786
if (util_dynarray_num_elements(&pool->alloc_desc_sets, struct zink_descriptor_set *)) {
787
/* grab one off the allocated array */
788
zds = util_dynarray_pop(&pool->alloc_desc_sets, struct zink_descriptor_set *);
789
goto out;
790
}
791
792
if (_mesa_hash_table_num_entries(pool->free_desc_sets)) {
793
/* try for an invalidated set first */
794
unsigned count = 0;
795
hash_table_foreach(pool->free_desc_sets, he) {
796
struct zink_descriptor_set *tmp = he->data;
797
if ((count++ >= 100 && tmp->reference.count == 1) || get_invalidated_desc_set(he->data)) {
798
zds = tmp;
799
assert(p_atomic_read(&zds->reference.count) == 1);
800
descriptor_set_invalidate(zds);
801
_mesa_hash_table_remove(pool->free_desc_sets, he);
802
goto out;
803
}
804
}
805
}
806
807
if (pool->num_sets_allocated + pool->key.layout->num_descriptors > ZINK_DEFAULT_MAX_DESCS) {
808
simple_mtx_unlock(&pool->mtx);
809
zink_fence_wait(&ctx->base);
810
zink_batch_reference_program(batch, pg);
811
return zink_descriptor_set_get(ctx, type, is_compute, cache_hit);
812
}
813
814
zds = allocate_desc_set(ctx, pg, type, descs_used, is_compute);
815
out:
816
zds->hash = hash;
817
populate_zds_key(ctx, type, is_compute, &zds->key, pg->dd->push_usage);
818
zds->recycled = false;
819
_mesa_hash_table_insert_pre_hashed(pool->desc_sets, hash, &zds->key, zds);
820
quick_out:
821
zds->punted = zds->invalid = false;
822
batch_add_desc_set(batch, zds);
823
if (push_set)
824
ctx->dd->last_set[is_compute] = zds;
825
else
826
pdd_cached(pg)->last_set[type] = zds;
827
simple_mtx_unlock(&pool->mtx);
828
829
return zds;
830
}
831
832
void
833
zink_descriptor_set_recycle(struct zink_descriptor_set *zds)
834
{
835
struct zink_descriptor_pool *pool = zds->pool;
836
/* if desc set is still in use by a batch, don't recache */
837
uint32_t refcount = p_atomic_read(&zds->reference.count);
838
if (refcount != 1)
839
return;
840
/* this is a null set */
841
if (!pool->key.layout->num_descriptors)
842
return;
843
simple_mtx_lock(&pool->mtx);
844
if (zds->punted)
845
zds->invalid = true;
846
else {
847
/* if we've previously punted this set, then it won't have a hash or be in either of the tables */
848
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->desc_sets, zds->hash, &zds->key);
849
if (!he) {
850
/* desc sets can be used multiple times in the same batch */
851
simple_mtx_unlock(&pool->mtx);
852
return;
853
}
854
_mesa_hash_table_remove(pool->desc_sets, he);
855
}
856
857
if (zds->invalid) {
858
descriptor_set_invalidate(zds);
859
util_dynarray_append(&pool->alloc_desc_sets, struct zink_descriptor_set *, zds);
860
} else {
861
zds->recycled = true;
862
_mesa_hash_table_insert_pre_hashed(pool->free_desc_sets, zds->hash, &zds->key, zds);
863
}
864
simple_mtx_unlock(&pool->mtx);
865
}
866
867
868
static void
869
desc_set_ref_add(struct zink_descriptor_set *zds, struct zink_descriptor_refs *refs, void **ref_ptr, void *ptr)
870
{
871
struct zink_descriptor_reference ref = {ref_ptr, &zds->invalid};
872
*ref_ptr = ptr;
873
if (ptr)
874
util_dynarray_append(&refs->refs, struct zink_descriptor_reference, ref);
875
}
876
877
static void
878
zink_descriptor_surface_desc_set_add(struct zink_descriptor_surface *dsurf, struct zink_descriptor_set *zds, unsigned idx)
879
{
880
assert(idx < zds->num_resources);
881
zds->surfaces[idx].is_buffer = dsurf->is_buffer;
882
if (dsurf->is_buffer)
883
desc_set_ref_add(zds, &dsurf->bufferview->desc_set_refs, (void**)&zds->surfaces[idx].bufferview, dsurf->bufferview);
884
else
885
desc_set_ref_add(zds, &dsurf->surface->desc_set_refs, (void**)&zds->surfaces[idx].surface, dsurf->surface);
886
}
887
888
static void
889
zink_image_view_desc_set_add(struct zink_image_view *image_view, struct zink_descriptor_set *zds, unsigned idx, bool is_buffer)
890
{
891
assert(idx < zds->num_resources);
892
if (is_buffer)
893
desc_set_ref_add(zds, &image_view->buffer_view->desc_set_refs, (void**)&zds->surfaces[idx].bufferview, image_view->buffer_view);
894
else
895
desc_set_ref_add(zds, &image_view->surface->desc_set_refs, (void**)&zds->surfaces[idx].surface, image_view->surface);
896
}
897
898
static void
899
zink_sampler_state_desc_set_add(struct zink_sampler_state *sampler_state, struct zink_descriptor_set *zds, unsigned idx)
900
{
901
assert(idx < zds->num_resources);
902
if (sampler_state)
903
desc_set_ref_add(zds, &sampler_state->desc_set_refs, (void**)&zds->sampler_states[idx], sampler_state);
904
else
905
zds->sampler_states[idx] = NULL;
906
}
907
908
static void
909
zink_resource_desc_set_add(struct zink_resource *res, struct zink_descriptor_set *zds, unsigned idx)
910
{
911
assert(idx < zds->num_resources);
912
desc_set_ref_add(zds, res ? &res->obj->desc_set_refs : NULL, (void**)&zds->res_objs[idx], res ? res->obj : NULL);
913
}
914
915
void
916
zink_descriptor_set_refs_clear(struct zink_descriptor_refs *refs, void *ptr)
917
{
918
util_dynarray_foreach(&refs->refs, struct zink_descriptor_reference, ref) {
919
if (*ref->ref == ptr) {
920
*ref->invalid = true;
921
*ref->ref = NULL;
922
}
923
}
924
util_dynarray_fini(&refs->refs);
925
}
926
927
static inline void
928
zink_descriptor_pool_reference(struct zink_screen *screen,
929
struct zink_descriptor_pool **dst,
930
struct zink_descriptor_pool *src)
931
{
932
struct zink_descriptor_pool *old_dst = dst ? *dst : NULL;
933
934
if (pipe_reference_described(old_dst ? &old_dst->reference : NULL, &src->reference,
935
(debug_reference_descriptor)debug_describe_zink_descriptor_pool))
936
descriptor_pool_free(screen, old_dst);
937
if (dst) *dst = src;
938
}
939
940
static void
941
create_descriptor_ref_template(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type)
942
{
943
struct zink_shader **stages;
944
if (pg->is_compute)
945
stages = &((struct zink_compute_program*)pg)->shader;
946
else
947
stages = ((struct zink_gfx_program*)pg)->shaders;
948
unsigned num_shaders = pg->is_compute ? 1 : ZINK_SHADER_COUNT;
949
950
for (int i = 0; i < num_shaders; i++) {
951
struct zink_shader *shader = stages[i];
952
if (!shader)
953
continue;
954
955
for (int j = 0; j < shader->num_bindings[type]; j++) {
956
int index = shader->bindings[type][j].index;
957
if (type == ZINK_DESCRIPTOR_TYPE_UBO && !index)
958
continue;
959
pdd_cached(pg)->num_refs[type] += shader->bindings[type][j].size;
960
}
961
}
962
963
pdd_cached(pg)->refs[type] = ralloc_array(pg->dd, union zink_program_descriptor_refs, pdd_cached(pg)->num_refs[type]);
964
if (!pdd_cached(pg)->refs[type])
965
return;
966
967
unsigned ref_idx = 0;
968
for (int i = 0; i < num_shaders; i++) {
969
struct zink_shader *shader = stages[i];
970
if (!shader)
971
continue;
972
973
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
974
for (int j = 0; j < shader->num_bindings[type]; j++) {
975
int index = shader->bindings[type][j].index;
976
for (unsigned k = 0; k < shader->bindings[type][j].size; k++) {
977
switch (type) {
978
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
979
pdd_cached(pg)->refs[type][ref_idx].sampler.sampler_state = (struct zink_sampler_state**)&ctx->sampler_states[stage][index + k];
980
pdd_cached(pg)->refs[type][ref_idx].sampler.dsurf = &ctx->di.sampler_surfaces[stage][index + k];
981
break;
982
case ZINK_DESCRIPTOR_TYPE_IMAGE:
983
pdd_cached(pg)->refs[type][ref_idx].dsurf = &ctx->di.image_surfaces[stage][index + k];
984
break;
985
case ZINK_DESCRIPTOR_TYPE_UBO:
986
if (!index)
987
continue;
988
FALLTHROUGH;
989
default:
990
pdd_cached(pg)->refs[type][ref_idx].res = &ctx->di.descriptor_res[type][stage][index + k];
991
break;
992
}
993
assert(ref_idx < pdd_cached(pg)->num_refs[type]);
994
ref_idx++;
995
}
996
}
997
}
998
}
999
1000
bool
1001
zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
1002
{
1003
struct zink_screen *screen = zink_screen(ctx->base.screen);
1004
1005
pg->dd = (void*)rzalloc(pg, struct zink_program_descriptor_data_cached);
1006
if (!pg->dd)
1007
return false;
1008
1009
if (!zink_descriptor_program_init_lazy(ctx, pg))
1010
return false;
1011
1012
/* no descriptors */
1013
if (!pg->dd)
1014
return true;
1015
1016
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1017
if (!pg->dd->layout_key[i])
1018
continue;
1019
1020
unsigned idx = zink_descriptor_type_to_size_idx(i);
1021
VkDescriptorPoolSize *size = &pg->dd->sizes[idx];
1022
/* this is a sampler/image set with no images only texels */
1023
if (!size->descriptorCount)
1024
size++;
1025
unsigned num_sizes = zink_descriptor_program_num_sizes(pg, i);
1026
struct zink_descriptor_pool *pool = descriptor_pool_get(ctx, i, pg->dd->layout_key[i], size, num_sizes);
1027
if (!pool)
1028
return false;
1029
zink_descriptor_pool_reference(screen, &pdd_cached(pg)->pool[i], pool);
1030
1031
if (screen->info.have_KHR_descriptor_update_template &&
1032
screen->descriptor_mode != ZINK_DESCRIPTOR_MODE_NOTEMPLATES)
1033
create_descriptor_ref_template(ctx, pg, i);
1034
}
1035
1036
return true;
1037
}
1038
1039
void
1040
zink_descriptor_program_deinit(struct zink_screen *screen, struct zink_program *pg)
1041
{
1042
if (!pg->dd)
1043
return;
1044
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
1045
zink_descriptor_pool_reference(screen, &pdd_cached(pg)->pool[i], NULL);
1046
1047
zink_descriptor_program_deinit_lazy(screen, pg);
1048
}
1049
1050
static void
1051
zink_descriptor_pool_deinit(struct zink_context *ctx)
1052
{
1053
struct zink_screen *screen = zink_screen(ctx->base.screen);
1054
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1055
hash_table_foreach(ctx->dd->descriptor_pools[i], entry) {
1056
struct zink_descriptor_pool *pool = (void*)entry->data;
1057
zink_descriptor_pool_reference(screen, &pool, NULL);
1058
}
1059
_mesa_hash_table_destroy(ctx->dd->descriptor_pools[i], NULL);
1060
}
1061
}
1062
1063
static bool
1064
zink_descriptor_pool_init(struct zink_context *ctx)
1065
{
1066
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1067
ctx->dd->descriptor_pools[i] = _mesa_hash_table_create(ctx, hash_descriptor_pool, equals_descriptor_pool);
1068
if (!ctx->dd->descriptor_pools[i])
1069
return false;
1070
}
1071
struct zink_screen *screen = zink_screen(ctx->base.screen);
1072
VkDescriptorPoolSize sizes;
1073
sizes.type = screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1074
sizes.descriptorCount = ZINK_SHADER_COUNT * ZINK_DEFAULT_MAX_DESCS;
1075
ctx->dd->push_pool[0] = descriptor_pool_get(ctx, 0, ctx->dd->push_layout_keys[0], &sizes, 1);
1076
sizes.descriptorCount = ZINK_DEFAULT_MAX_DESCS;
1077
ctx->dd->push_pool[1] = descriptor_pool_get(ctx, 0, ctx->dd->push_layout_keys[1], &sizes, 1);
1078
return ctx->dd->push_pool[0] && ctx->dd->push_pool[1];
1079
}
1080
1081
1082
static void
1083
desc_set_res_add(struct zink_descriptor_set *zds, struct zink_resource *res, unsigned int i, bool cache_hit)
1084
{
1085
/* if we got a cache hit, we have to verify that the cached set is still valid;
1086
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1087
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
1088
* whenever a resource is destroyed
1089
*/
1090
assert(!cache_hit || zds->res_objs[i] == (res ? res->obj : NULL));
1091
if (!cache_hit)
1092
zink_resource_desc_set_add(res, zds, i);
1093
}
1094
1095
static void
1096
desc_set_sampler_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_descriptor_surface *dsurf,
1097
struct zink_sampler_state *state, unsigned int i, bool cache_hit)
1098
{
1099
/* if we got a cache hit, we have to verify that the cached set is still valid;
1100
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1101
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
1102
* whenever a resource is destroyed
1103
*/
1104
#ifndef NDEBUG
1105
uint32_t cur_hash = get_descriptor_surface_hash(ctx, &zds->surfaces[i]);
1106
uint32_t new_hash = get_descriptor_surface_hash(ctx, dsurf);
1107
#endif
1108
assert(!cache_hit || cur_hash == new_hash);
1109
assert(!cache_hit || zds->sampler_states[i] == state);
1110
if (!cache_hit) {
1111
zink_descriptor_surface_desc_set_add(dsurf, zds, i);
1112
zink_sampler_state_desc_set_add(state, zds, i);
1113
}
1114
}
1115
1116
static void
1117
desc_set_image_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_image_view *image_view,
1118
unsigned int i, bool is_buffer, bool cache_hit)
1119
{
1120
/* if we got a cache hit, we have to verify that the cached set is still valid;
1121
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1122
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
1123
* whenever a resource is destroyed
1124
*/
1125
#ifndef NDEBUG
1126
uint32_t cur_hash = get_descriptor_surface_hash(ctx, &zds->surfaces[i]);
1127
uint32_t new_hash = zink_get_image_view_hash(ctx, image_view, is_buffer);
1128
#endif
1129
assert(!cache_hit || cur_hash == new_hash);
1130
if (!cache_hit)
1131
zink_image_view_desc_set_add(image_view, zds, i, is_buffer);
1132
}
1133
1134
static void
1135
desc_set_descriptor_surface_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_descriptor_surface *dsurf,
1136
unsigned int i, bool cache_hit)
1137
{
1138
/* if we got a cache hit, we have to verify that the cached set is still valid;
1139
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1140
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
1141
* whenever a resource is destroyed
1142
*/
1143
#ifndef NDEBUG
1144
uint32_t cur_hash = get_descriptor_surface_hash(ctx, &zds->surfaces[i]);
1145
uint32_t new_hash = get_descriptor_surface_hash(ctx, dsurf);
1146
#endif
1147
assert(!cache_hit || cur_hash == new_hash);
1148
if (!cache_hit)
1149
zink_descriptor_surface_desc_set_add(dsurf, zds, i);
1150
}
1151
1152
static unsigned
1153
init_write_descriptor(struct zink_shader *shader, struct zink_descriptor_set *zds, enum zink_descriptor_type type, int idx, VkWriteDescriptorSet *wd, unsigned num_wds)
1154
{
1155
wd->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1156
wd->pNext = NULL;
1157
wd->dstBinding = shader ? shader->bindings[type][idx].binding : idx;
1158
wd->dstArrayElement = 0;
1159
wd->descriptorCount = shader ? shader->bindings[type][idx].size : 1;
1160
wd->descriptorType = shader ? shader->bindings[type][idx].type : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1161
wd->dstSet = zds->desc_set;
1162
return num_wds + 1;
1163
}
1164
1165
static unsigned
1166
update_push_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
1167
bool is_compute, bool cache_hit, uint32_t *dynamic_offsets)
1168
{
1169
struct zink_screen *screen = zink_screen(ctx->base.screen);
1170
VkWriteDescriptorSet wds[ZINK_SHADER_COUNT];
1171
VkDescriptorBufferInfo buffer_infos[ZINK_SHADER_COUNT];
1172
struct zink_shader **stages;
1173
1174
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
1175
if (is_compute)
1176
stages = &ctx->curr_compute->shader;
1177
else
1178
stages = &ctx->gfx_stages[0];
1179
1180
for (int i = 0; i < num_stages; i++) {
1181
struct zink_shader *shader = stages[i];
1182
enum pipe_shader_type pstage = shader ? pipe_shader_type_from_mesa(shader->nir->info.stage) : i;
1183
VkDescriptorBufferInfo *info = &ctx->di.ubos[pstage][0];
1184
unsigned dynamic_idx = is_compute ? 0 : tgsi_processor_to_shader_stage(pstage);
1185
1186
/* Values are taken from pDynamicOffsets in an order such that all entries for set N come before set N+1;
1187
* within a set, entries are ordered by the binding numbers in the descriptor set layouts
1188
* - vkCmdBindDescriptorSets spec
1189
*
1190
* because of this, we have to populate the dynamic offsets by their shader stage to ensure they
1191
* match what the driver expects
1192
*/
1193
dynamic_offsets[dynamic_idx] = info->offset;
1194
if (!cache_hit) {
1195
struct zink_resource *res = zink_get_resource_for_descriptor(ctx, ZINK_DESCRIPTOR_TYPE_UBO, pstage, 0);
1196
init_write_descriptor(NULL, zds, ZINK_DESCRIPTOR_TYPE_UBO, tgsi_processor_to_shader_stage(pstage), &wds[i], 0);
1197
desc_set_res_add(zds, res, i, cache_hit);
1198
/* these are dynamic UBO descriptors, so we have to always set 0 as the descriptor offset */
1199
buffer_infos[i] = *info;
1200
buffer_infos[i].offset = 0;
1201
wds[i].pBufferInfo = &buffer_infos[i];
1202
}
1203
}
1204
1205
if (!cache_hit)
1206
vkUpdateDescriptorSets(screen->dev, num_stages, wds, 0, NULL);
1207
return num_stages;
1208
}
1209
1210
static void
1211
set_descriptor_set_refs(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_program *pg, bool cache_hit)
1212
{
1213
enum zink_descriptor_type type = zds->pool->type;
1214
for (unsigned i = 0; i < pdd_cached(pg)->num_refs[type]; i++) {
1215
switch (type) {
1216
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1217
desc_set_sampler_add(ctx, zds, pdd_cached(pg)->refs[type][i].sampler.dsurf,
1218
*pdd_cached(pg)->refs[type][i].sampler.sampler_state, i, cache_hit);
1219
break;
1220
case ZINK_DESCRIPTOR_TYPE_IMAGE:
1221
desc_set_descriptor_surface_add(ctx, zds, pdd_cached(pg)->refs[type][i].dsurf, i, cache_hit);
1222
break;
1223
default:
1224
desc_set_res_add(zds, *pdd_cached(pg)->refs[type][i].res, i, cache_hit);
1225
break;
1226
}
1227
}
1228
}
1229
1230
static void
1231
update_descriptors_internal(struct zink_context *ctx, struct zink_descriptor_set **zds, struct zink_program *pg, bool *cache_hit)
1232
{
1233
struct zink_screen *screen = zink_screen(ctx->base.screen);
1234
struct zink_shader **stages;
1235
1236
unsigned num_stages = pg->is_compute ? 1 : ZINK_SHADER_COUNT;
1237
if (pg->is_compute)
1238
stages = &ctx->curr_compute->shader;
1239
else
1240
stages = &ctx->gfx_stages[0];
1241
1242
for (unsigned h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
1243
if (cache_hit[h] || !zds[h])
1244
continue;
1245
1246
if (screen->info.have_KHR_descriptor_update_template &&
1247
screen->descriptor_mode != ZINK_DESCRIPTOR_MODE_NOTEMPLATES) {
1248
set_descriptor_set_refs(ctx, zds[h], pg, cache_hit[h]);
1249
zink_descriptor_set_update_lazy(ctx, pg, h, zds[h]->desc_set);
1250
continue;
1251
}
1252
1253
unsigned num_resources = 0;
1254
ASSERTED unsigned num_bindings = zds[h]->pool->num_resources;
1255
VkWriteDescriptorSet wds[ZINK_MAX_DESCRIPTORS_PER_TYPE];
1256
unsigned num_wds = 0;
1257
1258
for (int i = 0; i < num_stages; i++) {
1259
struct zink_shader *shader = stages[i];
1260
if (!shader)
1261
continue;
1262
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
1263
for (int j = 0; j < shader->num_bindings[h]; j++) {
1264
int index = shader->bindings[h][j].index;
1265
switch (h) {
1266
case ZINK_DESCRIPTOR_TYPE_UBO:
1267
if (!index)
1268
continue;
1269
FALLTHROUGH;
1270
case ZINK_DESCRIPTOR_TYPE_SSBO: {
1271
VkDescriptorBufferInfo *info;
1272
struct zink_resource *res = zink_get_resource_for_descriptor(ctx, h, stage, index);
1273
if (h == ZINK_DESCRIPTOR_TYPE_UBO)
1274
info = &ctx->di.ubos[stage][index];
1275
else
1276
info = &ctx->di.ssbos[stage][index];
1277
assert(num_resources < num_bindings);
1278
desc_set_res_add(zds[h], res, num_resources++, cache_hit[h]);
1279
wds[num_wds].pBufferInfo = info;
1280
}
1281
break;
1282
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1283
case ZINK_DESCRIPTOR_TYPE_IMAGE: {
1284
VkDescriptorImageInfo *image_info;
1285
VkBufferView *buffer_info;
1286
if (h == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW) {
1287
image_info = &ctx->di.textures[stage][index];
1288
buffer_info = &ctx->di.tbos[stage][index];
1289
} else {
1290
image_info = &ctx->di.images[stage][index];
1291
buffer_info = &ctx->di.texel_images[stage][index];
1292
}
1293
bool is_buffer = zink_shader_descriptor_is_buffer(shader, h, j);
1294
for (unsigned k = 0; k < shader->bindings[h][j].size; k++) {
1295
assert(num_resources < num_bindings);
1296
if (h == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW) {
1297
struct zink_sampler_state *sampler = NULL;
1298
if (!is_buffer && image_info->imageView)
1299
sampler = ctx->sampler_states[stage][index + k];;
1300
1301
desc_set_sampler_add(ctx, zds[h], &ctx->di.sampler_surfaces[stage][index + k], sampler, num_resources++, cache_hit[h]);
1302
} else {
1303
struct zink_image_view *image_view = &ctx->image_views[stage][index + k];
1304
desc_set_image_add(ctx, zds[h], image_view, num_resources++, is_buffer, cache_hit[h]);
1305
}
1306
}
1307
if (is_buffer)
1308
wds[num_wds].pTexelBufferView = buffer_info;
1309
else
1310
wds[num_wds].pImageInfo = image_info;
1311
}
1312
break;
1313
default:
1314
unreachable("unknown descriptor type");
1315
}
1316
num_wds = init_write_descriptor(shader, zds[h], h, j, &wds[num_wds], num_wds);
1317
}
1318
}
1319
if (num_wds)
1320
vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
1321
}
1322
}
1323
1324
static void
1325
zink_context_update_descriptor_states(struct zink_context *ctx, struct zink_program *pg);
1326
1327
void
1328
zink_descriptors_update(struct zink_context *ctx, bool is_compute)
1329
{
1330
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
1331
1332
zink_context_update_descriptor_states(ctx, pg);
1333
bool cache_hit[ZINK_DESCRIPTOR_TYPES + 1];
1334
VkDescriptorSet sets[ZINK_DESCRIPTOR_TYPES + 1];
1335
struct zink_descriptor_set *zds[ZINK_DESCRIPTOR_TYPES + 1];
1336
/* push set is indexed in vulkan as 0 but isn't in the general pool array */
1337
ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES] |= ctx->dd->pg[is_compute] != pg;
1338
if (pg->dd->push_usage)
1339
zds[ZINK_DESCRIPTOR_TYPES] = zink_descriptor_set_get(ctx, ZINK_DESCRIPTOR_TYPES, is_compute, &cache_hit[ZINK_DESCRIPTOR_TYPES]);
1340
else {
1341
zds[ZINK_DESCRIPTOR_TYPES] = NULL;
1342
cache_hit[ZINK_DESCRIPTOR_TYPES] = false;
1343
}
1344
ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES] = false;
1345
sets[0] = zds[ZINK_DESCRIPTOR_TYPES] ? zds[ZINK_DESCRIPTOR_TYPES]->desc_set : ctx->dd->dummy_set;
1346
for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
1347
ctx->dd->changed[is_compute][h] |= ctx->dd->pg[is_compute] != pg;
1348
if (pg->dsl[h + 1]) {
1349
/* null set has null pool */
1350
if (pdd_cached(pg)->pool[h])
1351
zds[h] = zink_descriptor_set_get(ctx, h, is_compute, &cache_hit[h]);
1352
else
1353
zds[h] = NULL;
1354
/* reuse dummy set for bind */
1355
sets[h + 1] = zds[h] ? zds[h]->desc_set : ctx->dd->dummy_set;
1356
} else {
1357
zds[h] = NULL;
1358
}
1359
if (!zds[h])
1360
cache_hit[h] = false;
1361
ctx->dd->changed[is_compute][h] = false;
1362
}
1363
struct zink_batch *batch = &ctx->batch;
1364
zink_batch_reference_program(batch, pg);
1365
1366
uint32_t dynamic_offsets[PIPE_MAX_CONSTANT_BUFFERS];
1367
unsigned dynamic_offset_idx = 0;
1368
1369
if (pg->dd->push_usage) // push set
1370
dynamic_offset_idx = update_push_ubo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPES],
1371
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPES], dynamic_offsets);
1372
1373
update_descriptors_internal(ctx, zds, pg, cache_hit);
1374
1375
vkCmdBindDescriptorSets(batch->state->cmdbuf, is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
1376
pg->layout, 0, pg->num_dsl, sets,
1377
dynamic_offset_idx, dynamic_offsets);
1378
ctx->dd->pg[is_compute] = pg;
1379
}
1380
1381
void
1382
zink_batch_descriptor_deinit(struct zink_screen *screen, struct zink_batch_state *bs)
1383
{
1384
if (!bs->dd)
1385
return;
1386
_mesa_set_destroy(bs->dd->desc_sets, NULL);
1387
zink_batch_descriptor_deinit_lazy(screen, bs);
1388
}
1389
1390
void
1391
zink_batch_descriptor_reset(struct zink_screen *screen, struct zink_batch_state *bs)
1392
{
1393
set_foreach(bs->dd->desc_sets, entry) {
1394
struct zink_descriptor_set *zds = (void*)entry->key;
1395
zink_batch_usage_unset(&zds->batch_uses, bs);
1396
/* reset descriptor pools when no bs is using this program to avoid
1397
* having some inactive program hogging a billion descriptors
1398
*/
1399
pipe_reference(&zds->reference, NULL);
1400
zink_descriptor_set_recycle(zds);
1401
_mesa_set_remove(bs->dd->desc_sets, entry);
1402
}
1403
zink_batch_descriptor_reset_lazy(screen, bs);
1404
}
1405
1406
bool
1407
zink_batch_descriptor_init(struct zink_screen *screen, struct zink_batch_state *bs)
1408
{
1409
if (!zink_batch_descriptor_init_lazy(screen, bs))
1410
return false;
1411
bs->dd->desc_sets = _mesa_pointer_set_create(bs);
1412
return !!bs->dd->desc_sets;
1413
}
1414
1415
struct zink_resource *
1416
zink_get_resource_for_descriptor(struct zink_context *ctx, enum zink_descriptor_type type, enum pipe_shader_type shader, int idx)
1417
{
1418
switch (type) {
1419
case ZINK_DESCRIPTOR_TYPE_UBO:
1420
return zink_resource(ctx->ubos[shader][idx].buffer);
1421
case ZINK_DESCRIPTOR_TYPE_SSBO:
1422
return zink_resource(ctx->ssbos[shader][idx].buffer);
1423
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1424
return ctx->sampler_views[shader][idx] ? zink_resource(ctx->sampler_views[shader][idx]->texture) : NULL;
1425
case ZINK_DESCRIPTOR_TYPE_IMAGE:
1426
return zink_resource(ctx->image_views[shader][idx].base.resource);
1427
default:
1428
break;
1429
}
1430
unreachable("unknown descriptor type!");
1431
return NULL;
1432
}
1433
1434
static uint32_t
1435
calc_descriptor_state_hash_ubo(struct zink_context *ctx, enum pipe_shader_type shader, int idx, uint32_t hash, bool need_offset)
1436
{
1437
struct zink_resource *res = zink_get_resource_for_descriptor(ctx, ZINK_DESCRIPTOR_TYPE_UBO, shader, idx);
1438
struct zink_resource_object *obj = res ? res->obj : NULL;
1439
hash = XXH32(&obj, sizeof(void*), hash);
1440
void *hash_data = &ctx->ubos[shader][idx].buffer_size;
1441
size_t data_size = sizeof(unsigned);
1442
hash = XXH32(hash_data, data_size, hash);
1443
if (need_offset)
1444
hash = XXH32(&ctx->ubos[shader][idx].buffer_offset, sizeof(unsigned), hash);
1445
return hash;
1446
}
1447
1448
static uint32_t
1449
calc_descriptor_state_hash_ssbo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
1450
{
1451
struct zink_resource *res = zink_get_resource_for_descriptor(ctx, ZINK_DESCRIPTOR_TYPE_SSBO, shader, idx);
1452
struct zink_resource_object *obj = res ? res->obj : NULL;
1453
hash = XXH32(&obj, sizeof(void*), hash);
1454
if (obj) {
1455
struct pipe_shader_buffer *ssbo = &ctx->ssbos[shader][idx];
1456
hash = XXH32(&ssbo->buffer_offset, sizeof(ssbo->buffer_offset), hash);
1457
hash = XXH32(&ssbo->buffer_size, sizeof(ssbo->buffer_size), hash);
1458
}
1459
return hash;
1460
}
1461
1462
static uint32_t
1463
calc_descriptor_state_hash_sampler(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
1464
{
1465
for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW][i].size; k++) {
1466
struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[shader][idx + k]);
1467
bool is_buffer = zink_shader_descriptor_is_buffer(zs, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, i);
1468
ctx->di.sampler_surfaces[shader][idx + k].is_buffer = is_buffer;
1469
uint32_t val = zink_get_sampler_view_hash(ctx, sampler_view, is_buffer);
1470
hash = XXH32(&val, sizeof(uint32_t), hash);
1471
if (is_buffer)
1472
continue;
1473
1474
struct zink_sampler_state *sampler_state = ctx->sampler_states[shader][idx + k];
1475
1476
if (sampler_state)
1477
hash = XXH32(&sampler_state->hash, sizeof(uint32_t), hash);
1478
}
1479
return hash;
1480
}
1481
1482
static uint32_t
1483
calc_descriptor_state_hash_image(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
1484
{
1485
for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_IMAGE][i].size; k++) {
1486
bool is_buffer = zink_shader_descriptor_is_buffer(zs, ZINK_DESCRIPTOR_TYPE_IMAGE, i);
1487
uint32_t val = zink_get_image_view_hash(ctx, &ctx->image_views[shader][idx + k], is_buffer);
1488
ctx->di.image_surfaces[shader][idx + k].is_buffer = is_buffer;
1489
hash = XXH32(&val, sizeof(uint32_t), hash);
1490
}
1491
return hash;
1492
}
1493
1494
static uint32_t
1495
update_descriptor_stage_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type)
1496
{
1497
struct zink_shader *zs = shader == PIPE_SHADER_COMPUTE ? ctx->compute_stage : ctx->gfx_stages[shader];
1498
1499
uint32_t hash = 0;
1500
for (int i = 0; i < zs->num_bindings[type]; i++) {
1501
/* skip push set members */
1502
if (zs->bindings[type][i].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1503
continue;
1504
1505
int idx = zs->bindings[type][i].index;
1506
switch (type) {
1507
case ZINK_DESCRIPTOR_TYPE_UBO:
1508
hash = calc_descriptor_state_hash_ubo(ctx, shader, idx, hash, true);
1509
break;
1510
case ZINK_DESCRIPTOR_TYPE_SSBO:
1511
hash = calc_descriptor_state_hash_ssbo(ctx, zs, shader, i, idx, hash);
1512
break;
1513
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1514
hash = calc_descriptor_state_hash_sampler(ctx, zs, shader, i, idx, hash);
1515
break;
1516
case ZINK_DESCRIPTOR_TYPE_IMAGE:
1517
hash = calc_descriptor_state_hash_image(ctx, zs, shader, i, idx, hash);
1518
break;
1519
default:
1520
unreachable("unknown descriptor type");
1521
}
1522
}
1523
return hash;
1524
}
1525
1526
static void
1527
update_descriptor_state(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute)
1528
{
1529
/* we shouldn't be calling this if we don't have to */
1530
assert(!ctx->dd->descriptor_states[is_compute].valid[type]);
1531
bool has_any_usage = false;
1532
1533
if (is_compute) {
1534
/* just update compute state */
1535
bool has_usage = zink_program_get_descriptor_usage(ctx, PIPE_SHADER_COMPUTE, type);
1536
if (has_usage)
1537
ctx->dd->descriptor_states[is_compute].state[type] = update_descriptor_stage_state(ctx, PIPE_SHADER_COMPUTE, type);
1538
else
1539
ctx->dd->descriptor_states[is_compute].state[type] = 0;
1540
has_any_usage = has_usage;
1541
} else {
1542
/* update all gfx states */
1543
bool first = true;
1544
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
1545
bool has_usage = false;
1546
/* this is the incremental update for the shader stage */
1547
if (!ctx->dd->gfx_descriptor_states[i].valid[type]) {
1548
ctx->dd->gfx_descriptor_states[i].state[type] = 0;
1549
if (ctx->gfx_stages[i]) {
1550
has_usage = zink_program_get_descriptor_usage(ctx, i, type);
1551
if (has_usage)
1552
ctx->dd->gfx_descriptor_states[i].state[type] = update_descriptor_stage_state(ctx, i, type);
1553
ctx->dd->gfx_descriptor_states[i].valid[type] = has_usage;
1554
}
1555
}
1556
if (ctx->dd->gfx_descriptor_states[i].valid[type]) {
1557
/* this is the overall state update for the descriptor set hash */
1558
if (first) {
1559
/* no need to double hash the first state */
1560
ctx->dd->descriptor_states[is_compute].state[type] = ctx->dd->gfx_descriptor_states[i].state[type];
1561
first = false;
1562
} else {
1563
ctx->dd->descriptor_states[is_compute].state[type] = XXH32(&ctx->dd->gfx_descriptor_states[i].state[type],
1564
sizeof(uint32_t),
1565
ctx->dd->descriptor_states[is_compute].state[type]);
1566
}
1567
}
1568
has_any_usage |= has_usage;
1569
}
1570
}
1571
ctx->dd->descriptor_states[is_compute].valid[type] = has_any_usage;
1572
}
1573
1574
static void
1575
zink_context_update_descriptor_states(struct zink_context *ctx, struct zink_program *pg)
1576
{
1577
if (pg->dd->push_usage && (!ctx->dd->push_valid[pg->is_compute] ||
1578
pg->dd->push_usage != ctx->dd->last_push_usage[pg->is_compute])) {
1579
uint32_t hash = 0;
1580
if (pg->is_compute) {
1581
hash = calc_descriptor_state_hash_ubo(ctx, PIPE_SHADER_COMPUTE, 0, 0, false);
1582
} else {
1583
bool first = true;
1584
u_foreach_bit(stage, pg->dd->push_usage) {
1585
if (!ctx->dd->gfx_push_valid[stage]) {
1586
ctx->dd->gfx_push_state[stage] = calc_descriptor_state_hash_ubo(ctx, stage, 0, 0, false);
1587
ctx->dd->gfx_push_valid[stage] = true;
1588
}
1589
if (first)
1590
hash = ctx->dd->gfx_push_state[stage];
1591
else
1592
hash = XXH32(&ctx->dd->gfx_push_state[stage], sizeof(uint32_t), hash);
1593
first = false;
1594
}
1595
}
1596
ctx->dd->push_state[pg->is_compute] = hash;
1597
ctx->dd->push_valid[pg->is_compute] = true;
1598
ctx->dd->last_push_usage[pg->is_compute] = pg->dd->push_usage;
1599
}
1600
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1601
if (pdd_cached(pg)->pool[i] && !ctx->dd->descriptor_states[pg->is_compute].valid[i])
1602
update_descriptor_state(ctx, i, pg->is_compute);
1603
}
1604
}
1605
1606
void
1607
zink_context_invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type, unsigned start, unsigned count)
1608
{
1609
if (type == ZINK_DESCRIPTOR_TYPE_UBO && !start) {
1610
/* ubo 0 is the push set */
1611
ctx->dd->push_state[shader == PIPE_SHADER_COMPUTE] = 0;
1612
ctx->dd->push_valid[shader == PIPE_SHADER_COMPUTE] = false;
1613
if (shader != PIPE_SHADER_COMPUTE) {
1614
ctx->dd->gfx_push_state[shader] = 0;
1615
ctx->dd->gfx_push_valid[shader] = false;
1616
}
1617
ctx->dd->changed[shader == PIPE_SHADER_COMPUTE][ZINK_DESCRIPTOR_TYPES] = true;
1618
return;
1619
}
1620
if (shader != PIPE_SHADER_COMPUTE) {
1621
ctx->dd->gfx_descriptor_states[shader].valid[type] = false;
1622
ctx->dd->gfx_descriptor_states[shader].state[type] = 0;
1623
}
1624
ctx->dd->descriptor_states[shader == PIPE_SHADER_COMPUTE].valid[type] = false;
1625
ctx->dd->descriptor_states[shader == PIPE_SHADER_COMPUTE].state[type] = 0;
1626
ctx->dd->changed[shader == PIPE_SHADER_COMPUTE][type] = true;
1627
}
1628
1629
bool
1630
zink_descriptors_init(struct zink_context *ctx)
1631
{
1632
zink_descriptors_init_lazy(ctx);
1633
if (!ctx->dd)
1634
return false;
1635
return zink_descriptor_pool_init(ctx);
1636
}
1637
1638
void
1639
zink_descriptors_deinit(struct zink_context *ctx)
1640
{
1641
zink_descriptor_pool_deinit(ctx);
1642
zink_descriptors_deinit_lazy(ctx);
1643
}
1644
1645
bool
1646
zink_descriptor_layouts_init(struct zink_context *ctx)
1647
{
1648
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
1649
if (!_mesa_hash_table_init(&ctx->desc_set_layouts[i], ctx, hash_descriptor_layout, equals_descriptor_layout))
1650
return false;
1651
return true;
1652
}
1653
1654
void
1655
zink_descriptor_layouts_deinit(struct zink_context *ctx)
1656
{
1657
struct zink_screen *screen = zink_screen(ctx->base.screen);
1658
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1659
hash_table_foreach(&ctx->desc_set_layouts[i], he) {
1660
struct zink_descriptor_layout *layout = he->data;
1661
vkDestroyDescriptorSetLayout(screen->dev, layout->layout, NULL);
1662
if (layout->desc_template)
1663
screen->vk.DestroyDescriptorUpdateTemplate(screen->dev, layout->desc_template, NULL);
1664
ralloc_free(layout);
1665
_mesa_hash_table_remove(&ctx->desc_set_layouts[i], he);
1666
}
1667
}
1668
}
1669
1670