Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/amd/vulkan/radv_pipeline_cache.c
7176 views
1
/*
2
* Copyright © 2015 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "util/debug.h"
25
#include "util/disk_cache.h"
26
#include "util/macros.h"
27
#include "util/mesa-sha1.h"
28
#include "util/u_atomic.h"
29
#include "vulkan/util/vk_util.h"
30
#include "radv_debug.h"
31
#include "radv_private.h"
32
#include "radv_shader.h"
33
34
struct cache_entry {
35
union {
36
unsigned char sha1[20];
37
uint32_t sha1_dw[5];
38
};
39
uint32_t binary_sizes[MESA_SHADER_STAGES];
40
struct radv_shader_variant *variants[MESA_SHADER_STAGES];
41
char code[0];
42
};
43
44
static void
45
radv_pipeline_cache_lock(struct radv_pipeline_cache *cache)
46
{
47
if (cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT)
48
return;
49
50
mtx_lock(&cache->mutex);
51
}
52
53
static void
54
radv_pipeline_cache_unlock(struct radv_pipeline_cache *cache)
55
{
56
if (cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT)
57
return;
58
59
mtx_unlock(&cache->mutex);
60
}
61
62
void
63
radv_pipeline_cache_init(struct radv_pipeline_cache *cache, struct radv_device *device)
64
{
65
cache->device = device;
66
mtx_init(&cache->mutex, mtx_plain);
67
cache->flags = 0;
68
69
cache->modified = false;
70
cache->kernel_count = 0;
71
cache->total_size = 0;
72
cache->table_size = 1024;
73
const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]);
74
cache->hash_table = malloc(byte_size);
75
76
/* We don't consider allocation failure fatal, we just start with a 0-sized
77
* cache. Disable caching when we want to keep shader debug info, since
78
* we don't get the debug info on cached shaders. */
79
if (cache->hash_table == NULL || (device->instance->debug_flags & RADV_DEBUG_NO_CACHE))
80
cache->table_size = 0;
81
else
82
memset(cache->hash_table, 0, byte_size);
83
}
84
85
void
86
radv_pipeline_cache_finish(struct radv_pipeline_cache *cache)
87
{
88
for (unsigned i = 0; i < cache->table_size; ++i)
89
if (cache->hash_table[i]) {
90
for (int j = 0; j < MESA_SHADER_STAGES; ++j) {
91
if (cache->hash_table[i]->variants[j])
92
radv_shader_variant_destroy(cache->device, cache->hash_table[i]->variants[j]);
93
}
94
vk_free(&cache->alloc, cache->hash_table[i]);
95
}
96
mtx_destroy(&cache->mutex);
97
free(cache->hash_table);
98
}
99
100
static uint32_t
101
entry_size(struct cache_entry *entry)
102
{
103
size_t ret = sizeof(*entry);
104
for (int i = 0; i < MESA_SHADER_STAGES; ++i)
105
if (entry->binary_sizes[i])
106
ret += entry->binary_sizes[i];
107
ret = align(ret, alignof(struct cache_entry));
108
return ret;
109
}
110
111
void
112
radv_hash_shaders(unsigned char *hash, const VkPipelineShaderStageCreateInfo **stages,
113
const struct radv_pipeline_layout *layout, const struct radv_pipeline_key *key,
114
uint32_t flags)
115
{
116
struct mesa_sha1 ctx;
117
118
_mesa_sha1_init(&ctx);
119
if (key)
120
_mesa_sha1_update(&ctx, key, sizeof(*key));
121
if (layout)
122
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
123
124
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
125
if (stages[i]) {
126
RADV_FROM_HANDLE(vk_shader_module, module, stages[i]->module);
127
const VkSpecializationInfo *spec_info = stages[i]->pSpecializationInfo;
128
129
_mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
130
_mesa_sha1_update(&ctx, stages[i]->pName, strlen(stages[i]->pName));
131
if (spec_info && spec_info->mapEntryCount) {
132
_mesa_sha1_update(&ctx, spec_info->pMapEntries,
133
spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]);
134
_mesa_sha1_update(&ctx, spec_info->pData, spec_info->dataSize);
135
}
136
}
137
}
138
_mesa_sha1_update(&ctx, &flags, 4);
139
_mesa_sha1_final(&ctx, hash);
140
}
141
142
static struct cache_entry *
143
radv_pipeline_cache_search_unlocked(struct radv_pipeline_cache *cache, const unsigned char *sha1)
144
{
145
const uint32_t mask = cache->table_size - 1;
146
const uint32_t start = (*(uint32_t *)sha1);
147
148
if (cache->table_size == 0)
149
return NULL;
150
151
for (uint32_t i = 0; i < cache->table_size; i++) {
152
const uint32_t index = (start + i) & mask;
153
struct cache_entry *entry = cache->hash_table[index];
154
155
if (!entry)
156
return NULL;
157
158
if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
159
return entry;
160
}
161
}
162
163
unreachable("hash table should never be full");
164
}
165
166
static struct cache_entry *
167
radv_pipeline_cache_search(struct radv_pipeline_cache *cache, const unsigned char *sha1)
168
{
169
struct cache_entry *entry;
170
171
radv_pipeline_cache_lock(cache);
172
173
entry = radv_pipeline_cache_search_unlocked(cache, sha1);
174
175
radv_pipeline_cache_unlock(cache);
176
177
return entry;
178
}
179
180
static void
181
radv_pipeline_cache_set_entry(struct radv_pipeline_cache *cache, struct cache_entry *entry)
182
{
183
const uint32_t mask = cache->table_size - 1;
184
const uint32_t start = entry->sha1_dw[0];
185
186
/* We'll always be able to insert when we get here. */
187
assert(cache->kernel_count < cache->table_size / 2);
188
189
for (uint32_t i = 0; i < cache->table_size; i++) {
190
const uint32_t index = (start + i) & mask;
191
if (!cache->hash_table[index]) {
192
cache->hash_table[index] = entry;
193
break;
194
}
195
}
196
197
cache->total_size += entry_size(entry);
198
cache->kernel_count++;
199
}
200
201
static VkResult
202
radv_pipeline_cache_grow(struct radv_pipeline_cache *cache)
203
{
204
const uint32_t table_size = cache->table_size * 2;
205
const uint32_t old_table_size = cache->table_size;
206
const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
207
struct cache_entry **table;
208
struct cache_entry **old_table = cache->hash_table;
209
210
table = malloc(byte_size);
211
if (table == NULL)
212
return vk_error(cache->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
213
214
cache->hash_table = table;
215
cache->table_size = table_size;
216
cache->kernel_count = 0;
217
cache->total_size = 0;
218
219
memset(cache->hash_table, 0, byte_size);
220
for (uint32_t i = 0; i < old_table_size; i++) {
221
struct cache_entry *entry = old_table[i];
222
if (!entry)
223
continue;
224
225
radv_pipeline_cache_set_entry(cache, entry);
226
}
227
228
free(old_table);
229
230
return VK_SUCCESS;
231
}
232
233
static void
234
radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache, struct cache_entry *entry)
235
{
236
if (cache->kernel_count == cache->table_size / 2)
237
radv_pipeline_cache_grow(cache);
238
239
/* Failing to grow that hash table isn't fatal, but may mean we don't
240
* have enough space to add this new kernel. Only add it if there's room.
241
*/
242
if (cache->kernel_count < cache->table_size / 2)
243
radv_pipeline_cache_set_entry(cache, entry);
244
}
245
246
static bool
247
radv_is_cache_disabled(struct radv_device *device)
248
{
249
/* Pipeline caches can be disabled with RADV_DEBUG=nocache, with
250
* MESA_GLSL_CACHE_DISABLE=1, and when VK_AMD_shader_info is requested.
251
*/
252
return (device->instance->debug_flags & RADV_DEBUG_NO_CACHE);
253
}
254
255
bool
256
radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
257
struct radv_pipeline_cache *cache,
258
const unsigned char *sha1,
259
struct radv_shader_variant **variants,
260
bool *found_in_application_cache)
261
{
262
struct cache_entry *entry;
263
264
if (!cache) {
265
cache = device->mem_cache;
266
*found_in_application_cache = false;
267
}
268
269
radv_pipeline_cache_lock(cache);
270
271
entry = radv_pipeline_cache_search_unlocked(cache, sha1);
272
273
if (!entry) {
274
*found_in_application_cache = false;
275
276
/* Don't cache when we want debug info, since this isn't
277
* present in the cache.
278
*/
279
if (radv_is_cache_disabled(device) || !device->physical_device->disk_cache) {
280
radv_pipeline_cache_unlock(cache);
281
return false;
282
}
283
284
uint8_t disk_sha1[20];
285
disk_cache_compute_key(device->physical_device->disk_cache, sha1, 20, disk_sha1);
286
287
entry =
288
(struct cache_entry *)disk_cache_get(device->physical_device->disk_cache, disk_sha1, NULL);
289
if (!entry) {
290
radv_pipeline_cache_unlock(cache);
291
return false;
292
} else {
293
size_t size = entry_size(entry);
294
struct cache_entry *new_entry =
295
vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
296
if (!new_entry) {
297
free(entry);
298
radv_pipeline_cache_unlock(cache);
299
return false;
300
}
301
302
memcpy(new_entry, entry, entry_size(entry));
303
free(entry);
304
entry = new_entry;
305
306
if (!(device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE) ||
307
cache != device->mem_cache)
308
radv_pipeline_cache_add_entry(cache, new_entry);
309
}
310
}
311
312
char *p = entry->code;
313
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
314
if (!entry->variants[i] && entry->binary_sizes[i]) {
315
struct radv_shader_binary *binary = calloc(1, entry->binary_sizes[i]);
316
memcpy(binary, p, entry->binary_sizes[i]);
317
p += entry->binary_sizes[i];
318
319
entry->variants[i] = radv_shader_variant_create(device, binary, false);
320
free(binary);
321
} else if (entry->binary_sizes[i]) {
322
p += entry->binary_sizes[i];
323
}
324
}
325
326
memcpy(variants, entry->variants, sizeof(entry->variants));
327
328
if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE && cache == device->mem_cache)
329
vk_free(&cache->alloc, entry);
330
else {
331
for (int i = 0; i < MESA_SHADER_STAGES; ++i)
332
if (entry->variants[i])
333
p_atomic_inc(&entry->variants[i]->ref_count);
334
}
335
336
radv_pipeline_cache_unlock(cache);
337
return true;
338
}
339
340
void
341
radv_pipeline_cache_insert_shaders(struct radv_device *device, struct radv_pipeline_cache *cache,
342
const unsigned char *sha1, struct radv_shader_variant **variants,
343
struct radv_shader_binary *const *binaries)
344
{
345
if (!cache)
346
cache = device->mem_cache;
347
348
radv_pipeline_cache_lock(cache);
349
struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, sha1);
350
if (entry) {
351
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
352
if (entry->variants[i]) {
353
radv_shader_variant_destroy(cache->device, variants[i]);
354
variants[i] = entry->variants[i];
355
} else {
356
entry->variants[i] = variants[i];
357
}
358
if (variants[i])
359
p_atomic_inc(&variants[i]->ref_count);
360
}
361
radv_pipeline_cache_unlock(cache);
362
return;
363
}
364
365
/* Don't cache when we want debug info, since this isn't
366
* present in the cache.
367
*/
368
if (radv_is_cache_disabled(device)) {
369
radv_pipeline_cache_unlock(cache);
370
return;
371
}
372
373
size_t size = sizeof(*entry);
374
for (int i = 0; i < MESA_SHADER_STAGES; ++i)
375
if (variants[i])
376
size += binaries[i]->total_size;
377
const size_t size_without_align = size;
378
size = align(size_without_align, alignof(struct cache_entry));
379
380
entry = vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
381
if (!entry) {
382
radv_pipeline_cache_unlock(cache);
383
return;
384
}
385
386
memset(entry, 0, sizeof(*entry));
387
memcpy(entry->sha1, sha1, 20);
388
389
char *p = entry->code;
390
391
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
392
if (!variants[i])
393
continue;
394
395
entry->binary_sizes[i] = binaries[i]->total_size;
396
397
memcpy(p, binaries[i], binaries[i]->total_size);
398
p += binaries[i]->total_size;
399
}
400
401
// Make valgrind happy by filling the alignment hole at the end.
402
assert(p == (char *)entry + size_without_align);
403
assert(sizeof(*entry) + (p - entry->code) == size_without_align);
404
memset((char *)entry + size_without_align, 0, size - size_without_align);
405
406
/* Always add cache items to disk. This will allow collection of
407
* compiled shaders by third parties such as steam, even if the app
408
* implements its own pipeline cache.
409
*/
410
if (device->physical_device->disk_cache) {
411
uint8_t disk_sha1[20];
412
disk_cache_compute_key(device->physical_device->disk_cache, sha1, 20, disk_sha1);
413
414
disk_cache_put(device->physical_device->disk_cache, disk_sha1, entry, entry_size(entry),
415
NULL);
416
}
417
418
if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE && cache == device->mem_cache) {
419
vk_free2(&cache->alloc, NULL, entry);
420
radv_pipeline_cache_unlock(cache);
421
return;
422
}
423
424
/* We delay setting the variant so we have reproducible disk cache
425
* items.
426
*/
427
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
428
if (!variants[i])
429
continue;
430
431
entry->variants[i] = variants[i];
432
p_atomic_inc(&variants[i]->ref_count);
433
}
434
435
radv_pipeline_cache_add_entry(cache, entry);
436
437
cache->modified = true;
438
radv_pipeline_cache_unlock(cache);
439
return;
440
}
441
442
bool
443
radv_pipeline_cache_load(struct radv_pipeline_cache *cache, const void *data, size_t size)
444
{
445
struct radv_device *device = cache->device;
446
struct vk_pipeline_cache_header header;
447
448
if (size < sizeof(header))
449
return false;
450
memcpy(&header, data, sizeof(header));
451
if (header.header_size < sizeof(header))
452
return false;
453
if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
454
return false;
455
if (header.vendor_id != ATI_VENDOR_ID)
456
return false;
457
if (header.device_id != device->physical_device->rad_info.pci_id)
458
return false;
459
if (memcmp(header.uuid, device->physical_device->cache_uuid, VK_UUID_SIZE) != 0)
460
return false;
461
462
char *end = (char *)data + size;
463
char *p = (char *)data + header.header_size;
464
465
while (end - p >= sizeof(struct cache_entry)) {
466
struct cache_entry *entry = (struct cache_entry *)p;
467
struct cache_entry *dest_entry;
468
size_t size_of_entry = entry_size(entry);
469
if (end - p < size_of_entry)
470
break;
471
472
dest_entry = vk_alloc(&cache->alloc, size_of_entry, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
473
if (dest_entry) {
474
memcpy(dest_entry, entry, size_of_entry);
475
for (int i = 0; i < MESA_SHADER_STAGES; ++i)
476
dest_entry->variants[i] = NULL;
477
radv_pipeline_cache_add_entry(cache, dest_entry);
478
}
479
p += size_of_entry;
480
}
481
482
return true;
483
}
484
485
VkResult
486
radv_CreatePipelineCache(VkDevice _device, const VkPipelineCacheCreateInfo *pCreateInfo,
487
const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache)
488
{
489
RADV_FROM_HANDLE(radv_device, device, _device);
490
struct radv_pipeline_cache *cache;
491
492
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
493
494
cache = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*cache), 8,
495
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
496
if (cache == NULL)
497
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
498
499
vk_object_base_init(&device->vk, &cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE);
500
501
if (pAllocator)
502
cache->alloc = *pAllocator;
503
else
504
cache->alloc = device->vk.alloc;
505
506
radv_pipeline_cache_init(cache, device);
507
cache->flags = pCreateInfo->flags;
508
509
if (pCreateInfo->initialDataSize > 0) {
510
radv_pipeline_cache_load(cache, pCreateInfo->pInitialData, pCreateInfo->initialDataSize);
511
}
512
513
*pPipelineCache = radv_pipeline_cache_to_handle(cache);
514
515
return VK_SUCCESS;
516
}
517
518
void
519
radv_DestroyPipelineCache(VkDevice _device, VkPipelineCache _cache,
520
const VkAllocationCallbacks *pAllocator)
521
{
522
RADV_FROM_HANDLE(radv_device, device, _device);
523
RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
524
525
if (!cache)
526
return;
527
radv_pipeline_cache_finish(cache);
528
529
vk_object_base_finish(&cache->base);
530
vk_free2(&device->vk.alloc, pAllocator, cache);
531
}
532
533
VkResult
534
radv_GetPipelineCacheData(VkDevice _device, VkPipelineCache _cache, size_t *pDataSize, void *pData)
535
{
536
RADV_FROM_HANDLE(radv_device, device, _device);
537
RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
538
struct vk_pipeline_cache_header *header;
539
VkResult result = VK_SUCCESS;
540
541
radv_pipeline_cache_lock(cache);
542
543
const size_t size = sizeof(*header) + cache->total_size;
544
if (pData == NULL) {
545
radv_pipeline_cache_unlock(cache);
546
*pDataSize = size;
547
return VK_SUCCESS;
548
}
549
if (*pDataSize < sizeof(*header)) {
550
radv_pipeline_cache_unlock(cache);
551
*pDataSize = 0;
552
return VK_INCOMPLETE;
553
}
554
void *p = pData, *end = (char *)pData + *pDataSize;
555
header = p;
556
header->header_size = align(sizeof(*header), alignof(struct cache_entry));
557
header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
558
header->vendor_id = ATI_VENDOR_ID;
559
header->device_id = device->physical_device->rad_info.pci_id;
560
memcpy(header->uuid, device->physical_device->cache_uuid, VK_UUID_SIZE);
561
p = (char *)p + header->header_size;
562
563
struct cache_entry *entry;
564
for (uint32_t i = 0; i < cache->table_size; i++) {
565
if (!cache->hash_table[i])
566
continue;
567
entry = cache->hash_table[i];
568
const uint32_t size_of_entry = entry_size(entry);
569
if ((char *)end < (char *)p + size_of_entry) {
570
result = VK_INCOMPLETE;
571
break;
572
}
573
574
memcpy(p, entry, size_of_entry);
575
for (int j = 0; j < MESA_SHADER_STAGES; ++j)
576
((struct cache_entry *)p)->variants[j] = NULL;
577
p = (char *)p + size_of_entry;
578
}
579
*pDataSize = (char *)p - (char *)pData;
580
581
radv_pipeline_cache_unlock(cache);
582
return result;
583
}
584
585
static void
586
radv_pipeline_cache_merge(struct radv_pipeline_cache *dst, struct radv_pipeline_cache *src)
587
{
588
for (uint32_t i = 0; i < src->table_size; i++) {
589
struct cache_entry *entry = src->hash_table[i];
590
if (!entry || radv_pipeline_cache_search(dst, entry->sha1))
591
continue;
592
593
radv_pipeline_cache_add_entry(dst, entry);
594
595
src->hash_table[i] = NULL;
596
}
597
}
598
599
VkResult
600
radv_MergePipelineCaches(VkDevice _device, VkPipelineCache destCache, uint32_t srcCacheCount,
601
const VkPipelineCache *pSrcCaches)
602
{
603
RADV_FROM_HANDLE(radv_pipeline_cache, dst, destCache);
604
605
for (uint32_t i = 0; i < srcCacheCount; i++) {
606
RADV_FROM_HANDLE(radv_pipeline_cache, src, pSrcCaches[i]);
607
608
radv_pipeline_cache_merge(dst, src);
609
}
610
611
return VK_SUCCESS;
612
}
613
614