Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/amd/vulkan/radv_descriptor_set.c
7219 views
1
/*
2
* Copyright © 2016 Red Hat.
3
* Copyright © 2016 Bas Nieuwenhuizen
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22
* IN THE SOFTWARE.
23
*/
24
#include <assert.h>
25
#include <fcntl.h>
26
#include <stdbool.h>
27
#include <string.h>
28
29
#include "util/mesa-sha1.h"
30
#include "radv_private.h"
31
#include "sid.h"
32
#include "vk_descriptors.h"
33
#include "vk_format.h"
34
#include "vk_util.h"
35
36
static bool
37
has_equal_immutable_samplers(const VkSampler *samplers, uint32_t count)
38
{
39
if (!samplers)
40
return false;
41
for (uint32_t i = 1; i < count; ++i) {
42
if (memcmp(radv_sampler_from_handle(samplers[0])->state,
43
radv_sampler_from_handle(samplers[i])->state, 16)) {
44
return false;
45
}
46
}
47
return true;
48
}
49
50
static bool
51
radv_mutable_descriptor_type_size_alignment(const VkMutableDescriptorTypeListVALVE *list,
52
uint64_t *out_size, uint64_t *out_align)
53
{
54
uint32_t max_size = 0;
55
uint32_t max_align = 0;
56
57
for (uint32_t i = 0; i < list->descriptorTypeCount; i++) {
58
uint32_t size = 0;
59
uint32_t align = 0;
60
61
switch (list->pDescriptorTypes[i]) {
62
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
63
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
64
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
65
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
66
size = 16;
67
align = 16;
68
break;
69
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
70
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
71
size = 64;
72
align = 32;
73
break;
74
default:
75
return false;
76
}
77
78
max_size = MAX2(max_size, size);
79
max_align = MAX2(max_align, align);
80
}
81
82
*out_size = max_size;
83
*out_align = max_align;
84
return true;
85
}
86
87
VkResult
88
radv_CreateDescriptorSetLayout(VkDevice _device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
89
const VkAllocationCallbacks *pAllocator,
90
VkDescriptorSetLayout *pSetLayout)
91
{
92
RADV_FROM_HANDLE(radv_device, device, _device);
93
struct radv_descriptor_set_layout *set_layout;
94
95
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
96
const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
97
vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
98
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
99
vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE);
100
101
uint32_t num_bindings = 0;
102
uint32_t immutable_sampler_count = 0;
103
uint32_t ycbcr_sampler_count = 0;
104
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
105
num_bindings = MAX2(num_bindings, pCreateInfo->pBindings[j].binding + 1);
106
if ((pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
107
pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
108
pCreateInfo->pBindings[j].pImmutableSamplers) {
109
immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
110
111
bool has_ycbcr_sampler = false;
112
for (unsigned i = 0; i < pCreateInfo->pBindings[j].descriptorCount; ++i) {
113
if (radv_sampler_from_handle(pCreateInfo->pBindings[j].pImmutableSamplers[i])
114
->ycbcr_sampler)
115
has_ycbcr_sampler = true;
116
}
117
118
if (has_ycbcr_sampler)
119
ycbcr_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
120
}
121
}
122
123
uint32_t samplers_offset = offsetof(struct radv_descriptor_set_layout, binding[num_bindings]);
124
size_t size = samplers_offset + immutable_sampler_count * 4 * sizeof(uint32_t);
125
if (ycbcr_sampler_count > 0) {
126
/* Store block of offsets first, followed by the conversion descriptors (padded to the struct
127
* alignment) */
128
size += num_bindings * sizeof(uint32_t);
129
size = ALIGN(size, alignof(struct radv_sampler_ycbcr_conversion));
130
size += ycbcr_sampler_count * sizeof(struct radv_sampler_ycbcr_conversion);
131
}
132
133
set_layout =
134
vk_zalloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
135
if (!set_layout)
136
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
137
138
vk_object_base_init(&device->vk, &set_layout->base, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
139
140
set_layout->flags = pCreateInfo->flags;
141
set_layout->layout_size = size;
142
143
/* We just allocate all the samplers at the end of the struct */
144
uint32_t *samplers = (uint32_t *)&set_layout->binding[num_bindings];
145
struct radv_sampler_ycbcr_conversion *ycbcr_samplers = NULL;
146
uint32_t *ycbcr_sampler_offsets = NULL;
147
148
if (ycbcr_sampler_count > 0) {
149
ycbcr_sampler_offsets = samplers + 4 * immutable_sampler_count;
150
set_layout->ycbcr_sampler_offsets_offset = (char *)ycbcr_sampler_offsets - (char *)set_layout;
151
152
uintptr_t first_ycbcr_sampler_offset =
153
(uintptr_t)ycbcr_sampler_offsets + sizeof(uint32_t) * num_bindings;
154
first_ycbcr_sampler_offset =
155
ALIGN(first_ycbcr_sampler_offset, alignof(struct radv_sampler_ycbcr_conversion));
156
ycbcr_samplers = (struct radv_sampler_ycbcr_conversion *)first_ycbcr_sampler_offset;
157
} else
158
set_layout->ycbcr_sampler_offsets_offset = 0;
159
160
VkDescriptorSetLayoutBinding *bindings = NULL;
161
VkResult result =
162
vk_create_sorted_bindings(pCreateInfo->pBindings, pCreateInfo->bindingCount, &bindings);
163
if (result != VK_SUCCESS) {
164
vk_object_base_finish(&set_layout->base);
165
vk_free2(&device->vk.alloc, pAllocator, set_layout);
166
return vk_error(device->instance, result);
167
}
168
169
set_layout->binding_count = num_bindings;
170
set_layout->shader_stages = 0;
171
set_layout->dynamic_shader_stages = 0;
172
set_layout->has_immutable_samplers = false;
173
set_layout->size = 0;
174
175
uint32_t buffer_count = 0;
176
uint32_t dynamic_offset_count = 0;
177
178
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
179
const VkDescriptorSetLayoutBinding *binding = bindings + j;
180
uint32_t b = binding->binding;
181
uint32_t alignment = 0;
182
unsigned binding_buffer_count = 0;
183
uint32_t descriptor_count = binding->descriptorCount;
184
bool has_ycbcr_sampler = false;
185
186
/* main image + fmask */
187
uint32_t max_sampled_image_descriptors = 2;
188
189
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
190
binding->pImmutableSamplers) {
191
for (unsigned i = 0; i < binding->descriptorCount; ++i) {
192
struct radv_sampler_ycbcr_conversion *conversion =
193
radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler;
194
195
if (conversion) {
196
has_ycbcr_sampler = true;
197
max_sampled_image_descriptors = MAX2(max_sampled_image_descriptors,
198
vk_format_get_plane_count(conversion->format));
199
}
200
}
201
}
202
203
switch (binding->descriptorType) {
204
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
205
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
206
assert(!(pCreateInfo->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
207
set_layout->binding[b].dynamic_offset_count = 1;
208
set_layout->dynamic_shader_stages |= binding->stageFlags;
209
set_layout->binding[b].size = 0;
210
binding_buffer_count = 1;
211
alignment = 1;
212
break;
213
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
214
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
215
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
216
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
217
set_layout->binding[b].size = 16;
218
binding_buffer_count = 1;
219
alignment = 16;
220
break;
221
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
222
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
223
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
224
/* main descriptor + fmask descriptor */
225
set_layout->binding[b].size = 64;
226
binding_buffer_count = 1;
227
alignment = 32;
228
break;
229
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
230
/* main descriptor + fmask descriptor + sampler */
231
set_layout->binding[b].size = 96;
232
binding_buffer_count = 1;
233
alignment = 32;
234
break;
235
case VK_DESCRIPTOR_TYPE_SAMPLER:
236
set_layout->binding[b].size = 16;
237
alignment = 16;
238
break;
239
case VK_DESCRIPTOR_TYPE_MUTABLE_VALVE: {
240
uint64_t mutable_size = 0, mutable_align = 0;
241
radv_mutable_descriptor_type_size_alignment(&mutable_info->pMutableDescriptorTypeLists[j],
242
&mutable_size, &mutable_align);
243
assert(mutable_size && mutable_align);
244
set_layout->binding[b].size = mutable_size;
245
binding_buffer_count = 1;
246
alignment = mutable_align;
247
break;
248
}
249
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
250
alignment = 16;
251
set_layout->binding[b].size = descriptor_count;
252
descriptor_count = 1;
253
break;
254
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
255
set_layout->binding[b].size = 16;
256
alignment = 16;
257
break;
258
default:
259
break;
260
}
261
262
set_layout->size = align(set_layout->size, alignment);
263
set_layout->binding[b].type = binding->descriptorType;
264
set_layout->binding[b].array_size = descriptor_count;
265
set_layout->binding[b].offset = set_layout->size;
266
set_layout->binding[b].buffer_offset = buffer_count;
267
set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
268
269
if (variable_flags && binding->binding < variable_flags->bindingCount &&
270
(variable_flags->pBindingFlags[binding->binding] &
271
VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
272
assert(
273
!binding->pImmutableSamplers); /* Terribly ill defined how many samplers are valid */
274
assert(binding->binding == num_bindings - 1);
275
276
set_layout->has_variable_descriptors = true;
277
}
278
279
if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
280
binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
281
binding->pImmutableSamplers) {
282
set_layout->binding[b].immutable_samplers_offset = samplers_offset;
283
set_layout->binding[b].immutable_samplers_equal =
284
has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount);
285
set_layout->has_immutable_samplers = true;
286
287
for (uint32_t i = 0; i < binding->descriptorCount; i++)
288
memcpy(samplers + 4 * i,
289
&radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
290
291
/* Don't reserve space for the samplers if they're not accessed. */
292
if (set_layout->binding[b].immutable_samplers_equal) {
293
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
294
max_sampled_image_descriptors <= 2)
295
set_layout->binding[b].size -= 32;
296
else if (binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
297
set_layout->binding[b].size -= 16;
298
}
299
samplers += 4 * binding->descriptorCount;
300
samplers_offset += 4 * sizeof(uint32_t) * binding->descriptorCount;
301
302
if (has_ycbcr_sampler) {
303
ycbcr_sampler_offsets[b] = (const char *)ycbcr_samplers - (const char *)set_layout;
304
for (uint32_t i = 0; i < binding->descriptorCount; i++) {
305
if (radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler)
306
ycbcr_samplers[i] =
307
*radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler;
308
else
309
ycbcr_samplers[i].format = VK_FORMAT_UNDEFINED;
310
}
311
ycbcr_samplers += binding->descriptorCount;
312
}
313
}
314
315
set_layout->size += descriptor_count * set_layout->binding[b].size;
316
buffer_count += descriptor_count * binding_buffer_count;
317
dynamic_offset_count += descriptor_count * set_layout->binding[b].dynamic_offset_count;
318
set_layout->shader_stages |= binding->stageFlags;
319
}
320
321
free(bindings);
322
323
set_layout->buffer_count = buffer_count;
324
set_layout->dynamic_offset_count = dynamic_offset_count;
325
326
*pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
327
328
return VK_SUCCESS;
329
}
330
331
void
332
radv_DestroyDescriptorSetLayout(VkDevice _device, VkDescriptorSetLayout _set_layout,
333
const VkAllocationCallbacks *pAllocator)
334
{
335
RADV_FROM_HANDLE(radv_device, device, _device);
336
RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, _set_layout);
337
338
if (!set_layout)
339
return;
340
341
vk_object_base_finish(&set_layout->base);
342
vk_free2(&device->vk.alloc, pAllocator, set_layout);
343
}
344
345
void
346
radv_GetDescriptorSetLayoutSupport(VkDevice device,
347
const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
348
VkDescriptorSetLayoutSupport *pSupport)
349
{
350
VkDescriptorSetLayoutBinding *bindings = NULL;
351
VkResult result =
352
vk_create_sorted_bindings(pCreateInfo->pBindings, pCreateInfo->bindingCount, &bindings);
353
if (result != VK_SUCCESS) {
354
pSupport->supported = false;
355
return;
356
}
357
358
const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
359
vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
360
VkDescriptorSetVariableDescriptorCountLayoutSupport *variable_count = vk_find_struct(
361
(void *)pCreateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT);
362
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
363
vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE);
364
if (variable_count) {
365
variable_count->maxVariableDescriptorCount = 0;
366
}
367
368
bool supported = true;
369
uint64_t size = 0;
370
for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
371
const VkDescriptorSetLayoutBinding *binding = bindings + i;
372
373
uint64_t descriptor_size = 0;
374
uint64_t descriptor_alignment = 1;
375
uint32_t descriptor_count = binding->descriptorCount;
376
switch (binding->descriptorType) {
377
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
378
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
379
break;
380
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
381
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
382
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
383
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
384
descriptor_size = 16;
385
descriptor_alignment = 16;
386
break;
387
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
388
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
389
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
390
descriptor_size = 64;
391
descriptor_alignment = 32;
392
break;
393
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
394
if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
395
descriptor_size = 64;
396
} else {
397
descriptor_size = 96;
398
}
399
descriptor_alignment = 32;
400
break;
401
case VK_DESCRIPTOR_TYPE_SAMPLER:
402
if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
403
descriptor_size = 16;
404
descriptor_alignment = 16;
405
}
406
break;
407
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
408
descriptor_alignment = 16;
409
descriptor_size = descriptor_count;
410
descriptor_count = 1;
411
break;
412
case VK_DESCRIPTOR_TYPE_MUTABLE_VALVE:
413
if (!radv_mutable_descriptor_type_size_alignment(
414
&mutable_info->pMutableDescriptorTypeLists[i], &descriptor_size,
415
&descriptor_alignment)) {
416
supported = false;
417
}
418
break;
419
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
420
descriptor_size = 16;
421
descriptor_alignment = 16;
422
break;
423
default:
424
break;
425
}
426
427
if (size && !align_u64(size, descriptor_alignment)) {
428
supported = false;
429
}
430
size = align_u64(size, descriptor_alignment);
431
432
uint64_t max_count = INT32_MAX;
433
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
434
max_count = INT32_MAX - size;
435
else if (descriptor_size)
436
max_count = (INT32_MAX - size) / descriptor_size;
437
438
if (max_count < descriptor_count) {
439
supported = false;
440
}
441
if (variable_flags && binding->binding < variable_flags->bindingCount && variable_count &&
442
(variable_flags->pBindingFlags[binding->binding] &
443
VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
444
variable_count->maxVariableDescriptorCount = MIN2(UINT32_MAX, max_count);
445
}
446
size += descriptor_count * descriptor_size;
447
}
448
449
free(bindings);
450
451
pSupport->supported = supported;
452
}
453
454
/*
455
* Pipeline layouts. These have nothing to do with the pipeline. They are
456
* just multiple descriptor set layouts pasted together.
457
*/
458
459
VkResult
460
radv_CreatePipelineLayout(VkDevice _device, const VkPipelineLayoutCreateInfo *pCreateInfo,
461
const VkAllocationCallbacks *pAllocator,
462
VkPipelineLayout *pPipelineLayout)
463
{
464
RADV_FROM_HANDLE(radv_device, device, _device);
465
struct radv_pipeline_layout *layout;
466
struct mesa_sha1 ctx;
467
468
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
469
470
layout = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*layout), 8,
471
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
472
if (layout == NULL)
473
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
474
475
vk_object_base_init(&device->vk, &layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT);
476
477
layout->num_sets = pCreateInfo->setLayoutCount;
478
479
unsigned dynamic_offset_count = 0;
480
uint16_t dynamic_shader_stages = 0;
481
482
_mesa_sha1_init(&ctx);
483
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
484
RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[set]);
485
layout->set[set].layout = set_layout;
486
487
layout->set[set].dynamic_offset_start = dynamic_offset_count;
488
489
for (uint32_t b = 0; b < set_layout->binding_count; b++) {
490
dynamic_offset_count += set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
491
dynamic_shader_stages |= set_layout->dynamic_shader_stages;
492
}
493
494
/* Hash the entire set layout except for the vk_object_base. The
495
* rest of the set layout is carefully constructed to not have
496
* pointers so a full hash instead of a per-field hash should be ok. */
497
_mesa_sha1_update(&ctx, (const char *)set_layout + sizeof(struct vk_object_base),
498
set_layout->layout_size - sizeof(struct vk_object_base));
499
}
500
501
layout->dynamic_offset_count = dynamic_offset_count;
502
layout->dynamic_shader_stages = dynamic_shader_stages;
503
layout->push_constant_size = 0;
504
505
for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
506
const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
507
layout->push_constant_size = MAX2(layout->push_constant_size, range->offset + range->size);
508
}
509
510
layout->push_constant_size = align(layout->push_constant_size, 16);
511
_mesa_sha1_update(&ctx, &layout->push_constant_size, sizeof(layout->push_constant_size));
512
_mesa_sha1_final(&ctx, layout->sha1);
513
*pPipelineLayout = radv_pipeline_layout_to_handle(layout);
514
515
return VK_SUCCESS;
516
}
517
518
void
519
radv_DestroyPipelineLayout(VkDevice _device, VkPipelineLayout _pipelineLayout,
520
const VkAllocationCallbacks *pAllocator)
521
{
522
RADV_FROM_HANDLE(radv_device, device, _device);
523
RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
524
525
if (!pipeline_layout)
526
return;
527
528
vk_object_base_finish(&pipeline_layout->base);
529
vk_free2(&device->vk.alloc, pAllocator, pipeline_layout);
530
}
531
532
static VkResult
533
radv_descriptor_set_create(struct radv_device *device, struct radv_descriptor_pool *pool,
534
const struct radv_descriptor_set_layout *layout,
535
const uint32_t *variable_count, struct radv_descriptor_set **out_set)
536
{
537
struct radv_descriptor_set *set;
538
uint32_t buffer_count = layout->buffer_count;
539
if (variable_count) {
540
unsigned stride = 1;
541
if (layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_SAMPLER ||
542
layout->binding[layout->binding_count - 1].type ==
543
VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
544
stride = 0;
545
buffer_count =
546
layout->binding[layout->binding_count - 1].buffer_offset + *variable_count * stride;
547
}
548
unsigned range_offset =
549
sizeof(struct radv_descriptor_set_header) + sizeof(struct radeon_winsys_bo *) * buffer_count;
550
const unsigned dynamic_offset_count = layout->dynamic_offset_count;
551
unsigned mem_size =
552
range_offset + sizeof(struct radv_descriptor_range) * dynamic_offset_count;
553
554
if (pool->host_memory_base) {
555
if (pool->host_memory_end - pool->host_memory_ptr < mem_size)
556
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
557
558
set = (struct radv_descriptor_set *)pool->host_memory_ptr;
559
pool->host_memory_ptr += mem_size;
560
memset(set->descriptors, 0, sizeof(struct radeon_winsys_bo *) * buffer_count);
561
} else {
562
set = vk_alloc2(&device->vk.alloc, NULL, mem_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
563
564
if (!set)
565
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
566
}
567
568
memset(set, 0, mem_size);
569
570
vk_object_base_init(&device->vk, &set->header.base, VK_OBJECT_TYPE_DESCRIPTOR_SET);
571
572
if (dynamic_offset_count) {
573
set->header.dynamic_descriptors =
574
(struct radv_descriptor_range *)((uint8_t *)set + range_offset);
575
}
576
577
set->header.layout = layout;
578
set->header.buffer_count = buffer_count;
579
uint32_t layout_size = layout->size;
580
if (variable_count) {
581
uint32_t stride = layout->binding[layout->binding_count - 1].size;
582
if (layout->binding[layout->binding_count - 1].type ==
583
VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
584
stride = 1;
585
586
layout_size = layout->binding[layout->binding_count - 1].offset + *variable_count * stride;
587
}
588
layout_size = align_u32(layout_size, 32);
589
set->header.size = layout_size;
590
591
if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
592
vk_free2(&device->vk.alloc, NULL, set);
593
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
594
}
595
596
/* try to allocate linearly first, so that we don't spend
597
* time looking for gaps if the app only allocates &
598
* resets via the pool. */
599
if (pool->current_offset + layout_size <= pool->size) {
600
set->header.bo = pool->bo;
601
set->header.mapped_ptr = (uint32_t *)(pool->mapped_ptr + pool->current_offset);
602
set->header.va = pool->bo ? (radv_buffer_get_va(set->header.bo) + pool->current_offset) : 0;
603
if (!pool->host_memory_base) {
604
pool->entries[pool->entry_count].offset = pool->current_offset;
605
pool->entries[pool->entry_count].size = layout_size;
606
pool->entries[pool->entry_count].set = set;
607
pool->entry_count++;
608
}
609
pool->current_offset += layout_size;
610
} else if (!pool->host_memory_base) {
611
uint64_t offset = 0;
612
int index;
613
614
for (index = 0; index < pool->entry_count; ++index) {
615
if (pool->entries[index].offset - offset >= layout_size)
616
break;
617
offset = pool->entries[index].offset + pool->entries[index].size;
618
}
619
620
if (pool->size - offset < layout_size) {
621
vk_free2(&device->vk.alloc, NULL, set);
622
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
623
}
624
set->header.bo = pool->bo;
625
set->header.mapped_ptr = (uint32_t *)(pool->mapped_ptr + offset);
626
set->header.va = pool->bo ? (radv_buffer_get_va(set->header.bo) + offset) : 0;
627
memmove(&pool->entries[index + 1], &pool->entries[index],
628
sizeof(pool->entries[0]) * (pool->entry_count - index));
629
pool->entries[index].offset = offset;
630
pool->entries[index].size = layout_size;
631
pool->entries[index].set = set;
632
pool->entry_count++;
633
} else
634
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
635
636
if (layout->has_immutable_samplers) {
637
for (unsigned i = 0; i < layout->binding_count; ++i) {
638
if (!layout->binding[i].immutable_samplers_offset ||
639
layout->binding[i].immutable_samplers_equal)
640
continue;
641
642
unsigned offset = layout->binding[i].offset / 4;
643
if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
644
offset += radv_combined_image_descriptor_sampler_offset(layout->binding + i) / 4;
645
646
const uint32_t *samplers =
647
(const uint32_t *)((const char *)layout + layout->binding[i].immutable_samplers_offset);
648
for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
649
memcpy(set->header.mapped_ptr + offset, samplers + 4 * j, 16);
650
offset += layout->binding[i].size / 4;
651
}
652
}
653
}
654
*out_set = set;
655
return VK_SUCCESS;
656
}
657
658
static void
659
radv_descriptor_set_destroy(struct radv_device *device, struct radv_descriptor_pool *pool,
660
struct radv_descriptor_set *set, bool free_bo)
661
{
662
assert(!pool->host_memory_base);
663
664
if (free_bo && !pool->host_memory_base) {
665
for (int i = 0; i < pool->entry_count; ++i) {
666
if (pool->entries[i].set == set) {
667
memmove(&pool->entries[i], &pool->entries[i + 1],
668
sizeof(pool->entries[i]) * (pool->entry_count - i - 1));
669
--pool->entry_count;
670
break;
671
}
672
}
673
}
674
vk_object_base_finish(&set->header.base);
675
vk_free2(&device->vk.alloc, NULL, set);
676
}
677
678
static void
679
radv_destroy_descriptor_pool(struct radv_device *device, const VkAllocationCallbacks *pAllocator,
680
struct radv_descriptor_pool *pool)
681
{
682
if (!pool->host_memory_base) {
683
for (int i = 0; i < pool->entry_count; ++i) {
684
radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
685
}
686
}
687
688
if (pool->bo)
689
device->ws->buffer_destroy(device->ws, pool->bo);
690
if (pool->host_bo)
691
vk_free2(&device->vk.alloc, pAllocator, pool->host_bo);
692
693
vk_object_base_finish(&pool->base);
694
vk_free2(&device->vk.alloc, pAllocator, pool);
695
}
696
697
VkResult
698
radv_CreateDescriptorPool(VkDevice _device, const VkDescriptorPoolCreateInfo *pCreateInfo,
699
const VkAllocationCallbacks *pAllocator,
700
VkDescriptorPool *pDescriptorPool)
701
{
702
RADV_FROM_HANDLE(radv_device, device, _device);
703
struct radv_descriptor_pool *pool;
704
uint64_t size = sizeof(struct radv_descriptor_pool);
705
uint64_t bo_size = 0, bo_count = 0, range_count = 0;
706
707
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
708
vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE);
709
710
vk_foreach_struct(ext, pCreateInfo->pNext)
711
{
712
switch (ext->sType) {
713
case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT: {
714
const struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT *info =
715
(const struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT *)ext;
716
/* the sizes are 4 aligned, and we need to align to at
717
* most 32, which needs at most 28 bytes extra per
718
* binding. */
719
bo_size += 28llu * info->maxInlineUniformBlockBindings;
720
break;
721
}
722
default:
723
break;
724
}
725
}
726
727
for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
728
if (pCreateInfo->pPoolSizes[i].type != VK_DESCRIPTOR_TYPE_SAMPLER)
729
bo_count += pCreateInfo->pPoolSizes[i].descriptorCount;
730
731
switch (pCreateInfo->pPoolSizes[i].type) {
732
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
733
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
734
range_count += pCreateInfo->pPoolSizes[i].descriptorCount;
735
break;
736
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
737
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
738
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
739
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
740
case VK_DESCRIPTOR_TYPE_SAMPLER:
741
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
742
/* 32 as we may need to align for images */
743
bo_size += 32 * pCreateInfo->pPoolSizes[i].descriptorCount;
744
break;
745
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
746
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
747
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
748
bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
749
break;
750
case VK_DESCRIPTOR_TYPE_MUTABLE_VALVE:
751
/* Per spec, if a mutable descriptor type list is provided for the pool entry, we
752
* allocate enough memory to hold any subset of that list.
753
* If there is no mutable descriptor type list available,
754
* we must allocate enough for any supported mutable descriptor type, i.e. 64 bytes. */
755
if (mutable_info && i < mutable_info->mutableDescriptorTypeListCount) {
756
uint64_t mutable_size, mutable_alignment;
757
if (radv_mutable_descriptor_type_size_alignment(
758
&mutable_info->pMutableDescriptorTypeLists[i], &mutable_size,
759
&mutable_alignment)) {
760
/* 32 as we may need to align for images */
761
mutable_size = align(mutable_size, 32);
762
bo_size += mutable_size * pCreateInfo->pPoolSizes[i].descriptorCount;
763
}
764
} else {
765
bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
766
}
767
break;
768
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
769
bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
770
break;
771
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
772
bo_size += pCreateInfo->pPoolSizes[i].descriptorCount;
773
break;
774
default:
775
break;
776
}
777
}
778
779
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
780
uint64_t host_size = pCreateInfo->maxSets * sizeof(struct radv_descriptor_set);
781
host_size += sizeof(struct radeon_winsys_bo *) * bo_count;
782
host_size += sizeof(struct radv_descriptor_range) * range_count;
783
size += host_size;
784
} else {
785
size += sizeof(struct radv_descriptor_pool_entry) * pCreateInfo->maxSets;
786
}
787
788
pool = vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
789
if (!pool)
790
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
791
792
memset(pool, 0, sizeof(*pool));
793
794
vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL);
795
796
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
797
pool->host_memory_base = (uint8_t *)pool + sizeof(struct radv_descriptor_pool);
798
pool->host_memory_ptr = pool->host_memory_base;
799
pool->host_memory_end = (uint8_t *)pool + size;
800
}
801
802
if (bo_size) {
803
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE)) {
804
VkResult result = device->ws->buffer_create(
805
device->ws, bo_size, 32, RADEON_DOMAIN_VRAM,
806
RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT,
807
RADV_BO_PRIORITY_DESCRIPTOR, 0, &pool->bo);
808
if (result != VK_SUCCESS) {
809
radv_destroy_descriptor_pool(device, pAllocator, pool);
810
return vk_error(device->instance, result);
811
}
812
pool->mapped_ptr = (uint8_t *)device->ws->buffer_map(pool->bo);
813
if (!pool->mapped_ptr) {
814
radv_destroy_descriptor_pool(device, pAllocator, pool);
815
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
816
}
817
} else {
818
pool->host_bo =
819
vk_alloc2(&device->vk.alloc, pAllocator, bo_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
820
if (!pool->host_bo) {
821
radv_destroy_descriptor_pool(device, pAllocator, pool);
822
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
823
}
824
pool->mapped_ptr = pool->host_bo;
825
}
826
}
827
pool->size = bo_size;
828
pool->max_entry_count = pCreateInfo->maxSets;
829
830
*pDescriptorPool = radv_descriptor_pool_to_handle(pool);
831
return VK_SUCCESS;
832
}
833
834
void
835
radv_DestroyDescriptorPool(VkDevice _device, VkDescriptorPool _pool,
836
const VkAllocationCallbacks *pAllocator)
837
{
838
RADV_FROM_HANDLE(radv_device, device, _device);
839
RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
840
841
if (!pool)
842
return;
843
844
radv_destroy_descriptor_pool(device, pAllocator, pool);
845
}
846
847
VkResult
848
radv_ResetDescriptorPool(VkDevice _device, VkDescriptorPool descriptorPool,
849
VkDescriptorPoolResetFlags flags)
850
{
851
RADV_FROM_HANDLE(radv_device, device, _device);
852
RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
853
854
if (!pool->host_memory_base) {
855
for (int i = 0; i < pool->entry_count; ++i) {
856
radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
857
}
858
pool->entry_count = 0;
859
}
860
861
pool->current_offset = 0;
862
pool->host_memory_ptr = pool->host_memory_base;
863
864
return VK_SUCCESS;
865
}
866
867
VkResult
868
radv_AllocateDescriptorSets(VkDevice _device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
869
VkDescriptorSet *pDescriptorSets)
870
{
871
RADV_FROM_HANDLE(radv_device, device, _device);
872
RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
873
874
VkResult result = VK_SUCCESS;
875
uint32_t i;
876
struct radv_descriptor_set *set = NULL;
877
878
const VkDescriptorSetVariableDescriptorCountAllocateInfo *variable_counts = vk_find_struct_const(
879
pAllocateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
880
const uint32_t zero = 0;
881
882
/* allocate a set of buffers for each shader to contain descriptors */
883
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
884
RADV_FROM_HANDLE(radv_descriptor_set_layout, layout, pAllocateInfo->pSetLayouts[i]);
885
886
const uint32_t *variable_count = NULL;
887
if (layout->has_variable_descriptors && variable_counts) {
888
if (i < variable_counts->descriptorSetCount)
889
variable_count = variable_counts->pDescriptorCounts + i;
890
else
891
variable_count = &zero;
892
}
893
894
assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
895
896
result = radv_descriptor_set_create(device, pool, layout, variable_count, &set);
897
if (result != VK_SUCCESS)
898
break;
899
900
pDescriptorSets[i] = radv_descriptor_set_to_handle(set);
901
}
902
903
if (result != VK_SUCCESS) {
904
radv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool, i, pDescriptorSets);
905
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
906
pDescriptorSets[i] = VK_NULL_HANDLE;
907
}
908
}
909
return result;
910
}
911
912
VkResult
913
radv_FreeDescriptorSets(VkDevice _device, VkDescriptorPool descriptorPool, uint32_t count,
914
const VkDescriptorSet *pDescriptorSets)
915
{
916
RADV_FROM_HANDLE(radv_device, device, _device);
917
RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
918
919
for (uint32_t i = 0; i < count; i++) {
920
RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
921
922
if (set && !pool->host_memory_base)
923
radv_descriptor_set_destroy(device, pool, set, true);
924
}
925
return VK_SUCCESS;
926
}
927
928
static void
929
write_texel_buffer_descriptor(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer,
930
unsigned *dst, struct radeon_winsys_bo **buffer_list,
931
const VkBufferView _buffer_view)
932
{
933
RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
934
935
if (!buffer_view) {
936
memset(dst, 0, 4 * 4);
937
if (!cmd_buffer)
938
*buffer_list = NULL;
939
return;
940
}
941
942
memcpy(dst, buffer_view->state, 4 * 4);
943
944
if (cmd_buffer)
945
radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer_view->bo);
946
else
947
*buffer_list = buffer_view->bo;
948
}
949
950
static void
951
write_buffer_descriptor(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer,
952
unsigned *dst, struct radeon_winsys_bo **buffer_list,
953
const VkDescriptorBufferInfo *buffer_info)
954
{
955
RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
956
957
if (!buffer) {
958
memset(dst, 0, 4 * 4);
959
if (!cmd_buffer)
960
*buffer_list = NULL;
961
return;
962
}
963
964
uint64_t va = radv_buffer_get_va(buffer->bo);
965
uint32_t range = buffer_info->range;
966
967
if (buffer_info->range == VK_WHOLE_SIZE)
968
range = buffer->size - buffer_info->offset;
969
970
/* robustBufferAccess is relaxed enough to allow this (in combination
971
* with the alignment/size we return from vkGetBufferMemoryRequirements)
972
* and this allows the shader compiler to create more efficient 8/16-bit
973
* buffer accesses. */
974
range = align(range, 4);
975
976
va += buffer_info->offset + buffer->offset;
977
978
uint32_t rsrc_word3 =
979
S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
980
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
981
982
if (device->physical_device->rad_info.chip_class >= GFX10) {
983
rsrc_word3 |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
984
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
985
} else {
986
rsrc_word3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
987
S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
988
}
989
990
dst[0] = va;
991
dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
992
dst[2] = range;
993
dst[3] = rsrc_word3;
994
995
if (cmd_buffer)
996
radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo);
997
else
998
*buffer_list = buffer->bo;
999
}
1000
1001
static void
1002
write_block_descriptor(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer, void *dst,
1003
const VkWriteDescriptorSet *writeset)
1004
{
1005
const VkWriteDescriptorSetInlineUniformBlockEXT *inline_ub =
1006
vk_find_struct_const(writeset->pNext, WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT);
1007
1008
memcpy(dst, inline_ub->pData, inline_ub->dataSize);
1009
}
1010
1011
static void
1012
write_dynamic_buffer_descriptor(struct radv_device *device, struct radv_descriptor_range *range,
1013
struct radeon_winsys_bo **buffer_list,
1014
const VkDescriptorBufferInfo *buffer_info)
1015
{
1016
RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
1017
uint64_t va;
1018
unsigned size;
1019
1020
if (!buffer) {
1021
range->va = 0;
1022
*buffer_list = NULL;
1023
return;
1024
}
1025
1026
va = radv_buffer_get_va(buffer->bo);
1027
size = buffer_info->range;
1028
1029
if (buffer_info->range == VK_WHOLE_SIZE)
1030
size = buffer->size - buffer_info->offset;
1031
1032
/* robustBufferAccess is relaxed enough to allow this (in combination
1033
* with the alignment/size we return from vkGetBufferMemoryRequirements)
1034
* and this allows the shader compiler to create more efficient 8/16-bit
1035
* buffer accesses. */
1036
size = align(size, 4);
1037
1038
va += buffer_info->offset + buffer->offset;
1039
range->va = va;
1040
range->size = size;
1041
1042
*buffer_list = buffer->bo;
1043
}
1044
1045
static void
1046
write_image_descriptor(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer,
1047
unsigned size, unsigned *dst, struct radeon_winsys_bo **buffer_list,
1048
VkDescriptorType descriptor_type, const VkDescriptorImageInfo *image_info)
1049
{
1050
RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
1051
union radv_descriptor *descriptor;
1052
1053
if (!iview) {
1054
memset(dst, 0, size);
1055
if (!cmd_buffer)
1056
*buffer_list = NULL;
1057
return;
1058
}
1059
1060
if (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
1061
descriptor = &iview->storage_descriptor;
1062
} else {
1063
descriptor = &iview->descriptor;
1064
}
1065
1066
memcpy(dst, descriptor, size);
1067
1068
if (cmd_buffer)
1069
radv_cs_add_buffer(device->ws, cmd_buffer->cs, iview->image->bo);
1070
else
1071
*buffer_list = iview->image->bo;
1072
}
1073
1074
static void
1075
write_combined_image_sampler_descriptor(struct radv_device *device,
1076
struct radv_cmd_buffer *cmd_buffer, unsigned sampler_offset,
1077
unsigned *dst, struct radeon_winsys_bo **buffer_list,
1078
VkDescriptorType descriptor_type,
1079
const VkDescriptorImageInfo *image_info, bool has_sampler)
1080
{
1081
RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
1082
1083
write_image_descriptor(device, cmd_buffer, sampler_offset, dst, buffer_list, descriptor_type,
1084
image_info);
1085
/* copy over sampler state */
1086
if (has_sampler) {
1087
memcpy(dst + sampler_offset / sizeof(*dst), sampler->state, 16);
1088
}
1089
}
1090
1091
static void
1092
write_sampler_descriptor(struct radv_device *device, unsigned *dst,
1093
const VkDescriptorImageInfo *image_info)
1094
{
1095
RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
1096
1097
memcpy(dst, sampler->state, 16);
1098
}
1099
1100
static void
1101
write_accel_struct(void *ptr, VkAccelerationStructureKHR _accel_struct)
1102
{
1103
RADV_FROM_HANDLE(radv_acceleration_structure, accel_struct, _accel_struct);
1104
uint64_t va = radv_accel_struct_get_va(accel_struct);
1105
memcpy(ptr, &va, sizeof(va));
1106
}
1107
1108
void
1109
radv_update_descriptor_sets(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer,
1110
VkDescriptorSet dstSetOverride, uint32_t descriptorWriteCount,
1111
const VkWriteDescriptorSet *pDescriptorWrites,
1112
uint32_t descriptorCopyCount,
1113
const VkCopyDescriptorSet *pDescriptorCopies)
1114
{
1115
uint32_t i, j;
1116
for (i = 0; i < descriptorWriteCount; i++) {
1117
const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
1118
RADV_FROM_HANDLE(radv_descriptor_set, set,
1119
dstSetOverride ? dstSetOverride : writeset->dstSet);
1120
const struct radv_descriptor_set_binding_layout *binding_layout =
1121
set->header.layout->binding + writeset->dstBinding;
1122
uint32_t *ptr = set->header.mapped_ptr;
1123
struct radeon_winsys_bo **buffer_list = set->descriptors;
1124
/* Immutable samplers are not copied into push descriptors when they are
1125
* allocated, so if we are writing push descriptors we have to copy the
1126
* immutable samplers into them now.
1127
*/
1128
const bool copy_immutable_samplers = cmd_buffer &&
1129
binding_layout->immutable_samplers_offset &&
1130
!binding_layout->immutable_samplers_equal;
1131
const uint32_t *samplers = radv_immutable_samplers(set->header.layout, binding_layout);
1132
const VkWriteDescriptorSetAccelerationStructureKHR *accel_structs = NULL;
1133
1134
ptr += binding_layout->offset / 4;
1135
1136
if (writeset->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
1137
write_block_descriptor(device, cmd_buffer, (uint8_t *)ptr + writeset->dstArrayElement,
1138
writeset);
1139
continue;
1140
} else if (writeset->descriptorType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
1141
accel_structs =
1142
vk_find_struct_const(writeset->pNext, WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR);
1143
}
1144
1145
ptr += binding_layout->size * writeset->dstArrayElement / 4;
1146
buffer_list += binding_layout->buffer_offset;
1147
buffer_list += writeset->dstArrayElement;
1148
for (j = 0; j < writeset->descriptorCount; ++j) {
1149
switch (writeset->descriptorType) {
1150
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1151
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1152
unsigned idx = writeset->dstArrayElement + j;
1153
idx += binding_layout->dynamic_offset_offset;
1154
assert(!(set->header.layout->flags &
1155
VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1156
write_dynamic_buffer_descriptor(device, set->header.dynamic_descriptors + idx,
1157
buffer_list, writeset->pBufferInfo + j);
1158
break;
1159
}
1160
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1161
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1162
write_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
1163
writeset->pBufferInfo + j);
1164
break;
1165
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1166
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1167
write_texel_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
1168
writeset->pTexelBufferView[j]);
1169
break;
1170
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1171
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1172
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1173
write_image_descriptor(device, cmd_buffer, 64, ptr, buffer_list,
1174
writeset->descriptorType, writeset->pImageInfo + j);
1175
break;
1176
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
1177
unsigned sampler_offset = radv_combined_image_descriptor_sampler_offset(binding_layout);
1178
write_combined_image_sampler_descriptor(
1179
device, cmd_buffer, sampler_offset, ptr, buffer_list, writeset->descriptorType,
1180
writeset->pImageInfo + j, !binding_layout->immutable_samplers_offset);
1181
if (copy_immutable_samplers) {
1182
const unsigned idx = writeset->dstArrayElement + j;
1183
memcpy((char *)ptr + sampler_offset, samplers + 4 * idx, 16);
1184
}
1185
break;
1186
}
1187
case VK_DESCRIPTOR_TYPE_SAMPLER:
1188
if (!binding_layout->immutable_samplers_offset) {
1189
write_sampler_descriptor(device, ptr, writeset->pImageInfo + j);
1190
} else if (copy_immutable_samplers) {
1191
unsigned idx = writeset->dstArrayElement + j;
1192
memcpy(ptr, samplers + 4 * idx, 16);
1193
}
1194
break;
1195
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
1196
write_accel_struct(ptr, accel_structs->pAccelerationStructures[j]);
1197
break;
1198
default:
1199
break;
1200
}
1201
ptr += binding_layout->size / 4;
1202
++buffer_list;
1203
}
1204
}
1205
1206
for (i = 0; i < descriptorCopyCount; i++) {
1207
const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i];
1208
RADV_FROM_HANDLE(radv_descriptor_set, src_set, copyset->srcSet);
1209
RADV_FROM_HANDLE(radv_descriptor_set, dst_set, copyset->dstSet);
1210
const struct radv_descriptor_set_binding_layout *src_binding_layout =
1211
src_set->header.layout->binding + copyset->srcBinding;
1212
const struct radv_descriptor_set_binding_layout *dst_binding_layout =
1213
dst_set->header.layout->binding + copyset->dstBinding;
1214
uint32_t *src_ptr = src_set->header.mapped_ptr;
1215
uint32_t *dst_ptr = dst_set->header.mapped_ptr;
1216
struct radeon_winsys_bo **src_buffer_list = src_set->descriptors;
1217
struct radeon_winsys_bo **dst_buffer_list = dst_set->descriptors;
1218
1219
src_ptr += src_binding_layout->offset / 4;
1220
dst_ptr += dst_binding_layout->offset / 4;
1221
1222
if (src_binding_layout->type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
1223
src_ptr += copyset->srcArrayElement / 4;
1224
dst_ptr += copyset->dstArrayElement / 4;
1225
1226
memcpy(dst_ptr, src_ptr, copyset->descriptorCount);
1227
continue;
1228
}
1229
1230
src_ptr += src_binding_layout->size * copyset->srcArrayElement / 4;
1231
dst_ptr += dst_binding_layout->size * copyset->dstArrayElement / 4;
1232
1233
src_buffer_list += src_binding_layout->buffer_offset;
1234
src_buffer_list += copyset->srcArrayElement;
1235
1236
dst_buffer_list += dst_binding_layout->buffer_offset;
1237
dst_buffer_list += copyset->dstArrayElement;
1238
1239
/* In case of copies between mutable descriptor types
1240
* and non-mutable descriptor types. */
1241
size_t copy_size = MIN2(src_binding_layout->size, dst_binding_layout->size);
1242
1243
for (j = 0; j < copyset->descriptorCount; ++j) {
1244
switch (src_binding_layout->type) {
1245
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1246
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1247
unsigned src_idx = copyset->srcArrayElement + j;
1248
unsigned dst_idx = copyset->dstArrayElement + j;
1249
struct radv_descriptor_range *src_range, *dst_range;
1250
src_idx += src_binding_layout->dynamic_offset_offset;
1251
dst_idx += dst_binding_layout->dynamic_offset_offset;
1252
1253
src_range = src_set->header.dynamic_descriptors + src_idx;
1254
dst_range = dst_set->header.dynamic_descriptors + dst_idx;
1255
*dst_range = *src_range;
1256
break;
1257
}
1258
default:
1259
memcpy(dst_ptr, src_ptr, copy_size);
1260
}
1261
src_ptr += src_binding_layout->size / 4;
1262
dst_ptr += dst_binding_layout->size / 4;
1263
1264
if (src_binding_layout->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
1265
src_binding_layout->type != VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
1266
/* Sampler descriptors don't have a buffer list. */
1267
dst_buffer_list[j] = src_buffer_list[j];
1268
}
1269
}
1270
}
1271
}
1272
1273
void
1274
radv_UpdateDescriptorSets(VkDevice _device, uint32_t descriptorWriteCount,
1275
const VkWriteDescriptorSet *pDescriptorWrites,
1276
uint32_t descriptorCopyCount,
1277
const VkCopyDescriptorSet *pDescriptorCopies)
1278
{
1279
RADV_FROM_HANDLE(radv_device, device, _device);
1280
1281
radv_update_descriptor_sets(device, NULL, VK_NULL_HANDLE, descriptorWriteCount,
1282
pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
1283
}
1284
1285
VkResult
1286
radv_CreateDescriptorUpdateTemplate(VkDevice _device,
1287
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
1288
const VkAllocationCallbacks *pAllocator,
1289
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
1290
{
1291
RADV_FROM_HANDLE(radv_device, device, _device);
1292
RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->descriptorSetLayout);
1293
const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
1294
const size_t size = sizeof(struct radv_descriptor_update_template) +
1295
sizeof(struct radv_descriptor_update_template_entry) * entry_count;
1296
struct radv_descriptor_update_template *templ;
1297
uint32_t i;
1298
1299
templ = vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1300
if (!templ)
1301
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1302
1303
vk_object_base_init(&device->vk, &templ->base, VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE);
1304
1305
templ->entry_count = entry_count;
1306
1307
if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
1308
RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->pipelineLayout);
1309
1310
/* descriptorSetLayout should be ignored for push descriptors
1311
* and instead it refers to pipelineLayout and set.
1312
*/
1313
assert(pCreateInfo->set < MAX_SETS);
1314
set_layout = pipeline_layout->set[pCreateInfo->set].layout;
1315
1316
templ->bind_point = pCreateInfo->pipelineBindPoint;
1317
}
1318
1319
for (i = 0; i < entry_count; i++) {
1320
const VkDescriptorUpdateTemplateEntry *entry = &pCreateInfo->pDescriptorUpdateEntries[i];
1321
const struct radv_descriptor_set_binding_layout *binding_layout =
1322
set_layout->binding + entry->dstBinding;
1323
const uint32_t buffer_offset = binding_layout->buffer_offset + entry->dstArrayElement;
1324
const uint32_t *immutable_samplers = NULL;
1325
uint32_t dst_offset;
1326
uint32_t dst_stride;
1327
1328
/* dst_offset is an offset into dynamic_descriptors when the descriptor
1329
is dynamic, and an offset into mapped_ptr otherwise */
1330
switch (entry->descriptorType) {
1331
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1332
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1333
assert(pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET);
1334
dst_offset = binding_layout->dynamic_offset_offset + entry->dstArrayElement;
1335
dst_stride = 0; /* Not used */
1336
break;
1337
default:
1338
switch (entry->descriptorType) {
1339
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1340
case VK_DESCRIPTOR_TYPE_SAMPLER:
1341
/* Immutable samplers are copied into push descriptors when they are pushed */
1342
if (pCreateInfo->templateType ==
1343
VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR &&
1344
binding_layout->immutable_samplers_offset &&
1345
!binding_layout->immutable_samplers_equal) {
1346
immutable_samplers =
1347
radv_immutable_samplers(set_layout, binding_layout) + entry->dstArrayElement * 4;
1348
}
1349
break;
1350
default:
1351
break;
1352
}
1353
dst_offset = binding_layout->offset / 4;
1354
if (entry->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
1355
dst_offset += entry->dstArrayElement / 4;
1356
else
1357
dst_offset += binding_layout->size * entry->dstArrayElement / 4;
1358
1359
dst_stride = binding_layout->size / 4;
1360
break;
1361
}
1362
1363
templ->entry[i] = (struct radv_descriptor_update_template_entry){
1364
.descriptor_type = entry->descriptorType,
1365
.descriptor_count = entry->descriptorCount,
1366
.src_offset = entry->offset,
1367
.src_stride = entry->stride,
1368
.dst_offset = dst_offset,
1369
.dst_stride = dst_stride,
1370
.buffer_offset = buffer_offset,
1371
.has_sampler = !binding_layout->immutable_samplers_offset,
1372
.sampler_offset = radv_combined_image_descriptor_sampler_offset(binding_layout),
1373
.immutable_samplers = immutable_samplers};
1374
}
1375
1376
*pDescriptorUpdateTemplate = radv_descriptor_update_template_to_handle(templ);
1377
return VK_SUCCESS;
1378
}
1379
1380
void
1381
radv_DestroyDescriptorUpdateTemplate(VkDevice _device,
1382
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1383
const VkAllocationCallbacks *pAllocator)
1384
{
1385
RADV_FROM_HANDLE(radv_device, device, _device);
1386
RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
1387
1388
if (!templ)
1389
return;
1390
1391
vk_object_base_finish(&templ->base);
1392
vk_free2(&device->vk.alloc, pAllocator, templ);
1393
}
1394
1395
void
1396
radv_update_descriptor_set_with_template(struct radv_device *device,
1397
struct radv_cmd_buffer *cmd_buffer,
1398
struct radv_descriptor_set *set,
1399
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1400
const void *pData)
1401
{
1402
RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
1403
uint32_t i;
1404
1405
for (i = 0; i < templ->entry_count; ++i) {
1406
struct radeon_winsys_bo **buffer_list = set->descriptors + templ->entry[i].buffer_offset;
1407
uint32_t *pDst = set->header.mapped_ptr + templ->entry[i].dst_offset;
1408
const uint8_t *pSrc = ((const uint8_t *)pData) + templ->entry[i].src_offset;
1409
uint32_t j;
1410
1411
if (templ->entry[i].descriptor_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
1412
memcpy((uint8_t *)pDst, pSrc, templ->entry[i].descriptor_count);
1413
continue;
1414
}
1415
1416
for (j = 0; j < templ->entry[i].descriptor_count; ++j) {
1417
switch (templ->entry[i].descriptor_type) {
1418
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1419
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1420
const unsigned idx = templ->entry[i].dst_offset + j;
1421
assert(!(set->header.layout->flags &
1422
VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1423
write_dynamic_buffer_descriptor(device, set->header.dynamic_descriptors + idx,
1424
buffer_list, (struct VkDescriptorBufferInfo *)pSrc);
1425
break;
1426
}
1427
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1428
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1429
write_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
1430
(struct VkDescriptorBufferInfo *)pSrc);
1431
break;
1432
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1433
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1434
write_texel_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
1435
*(VkBufferView *)pSrc);
1436
break;
1437
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1438
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1439
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1440
write_image_descriptor(device, cmd_buffer, 64, pDst, buffer_list,
1441
templ->entry[i].descriptor_type,
1442
(struct VkDescriptorImageInfo *)pSrc);
1443
break;
1444
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1445
write_combined_image_sampler_descriptor(
1446
device, cmd_buffer, templ->entry[i].sampler_offset, pDst, buffer_list,
1447
templ->entry[i].descriptor_type, (struct VkDescriptorImageInfo *)pSrc,
1448
templ->entry[i].has_sampler);
1449
if (templ->entry[i].immutable_samplers) {
1450
memcpy((char *)pDst + templ->entry[i].sampler_offset,
1451
templ->entry[i].immutable_samplers + 4 * j, 16);
1452
}
1453
break;
1454
case VK_DESCRIPTOR_TYPE_SAMPLER:
1455
if (templ->entry[i].has_sampler)
1456
write_sampler_descriptor(device, pDst, (struct VkDescriptorImageInfo *)pSrc);
1457
else if (templ->entry[i].immutable_samplers)
1458
memcpy(pDst, templ->entry[i].immutable_samplers + 4 * j, 16);
1459
break;
1460
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
1461
write_accel_struct(pDst, *(const VkAccelerationStructureKHR *)pSrc);
1462
break;
1463
default:
1464
break;
1465
}
1466
pSrc += templ->entry[i].src_stride;
1467
pDst += templ->entry[i].dst_stride;
1468
++buffer_list;
1469
}
1470
}
1471
}
1472
1473
void
1474
radv_UpdateDescriptorSetWithTemplate(VkDevice _device, VkDescriptorSet descriptorSet,
1475
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1476
const void *pData)
1477
{
1478
RADV_FROM_HANDLE(radv_device, device, _device);
1479
RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
1480
1481
radv_update_descriptor_set_with_template(device, NULL, set, descriptorUpdateTemplate, pData);
1482
}
1483
1484
VkResult
1485
radv_CreateSamplerYcbcrConversion(VkDevice _device,
1486
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
1487
const VkAllocationCallbacks *pAllocator,
1488
VkSamplerYcbcrConversion *pYcbcrConversion)
1489
{
1490
RADV_FROM_HANDLE(radv_device, device, _device);
1491
struct radv_sampler_ycbcr_conversion *conversion = NULL;
1492
1493
conversion = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*conversion), 8,
1494
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1495
1496
if (conversion == NULL)
1497
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1498
1499
vk_object_base_init(&device->vk, &conversion->base, VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION);
1500
1501
conversion->format = pCreateInfo->format;
1502
conversion->ycbcr_model = pCreateInfo->ycbcrModel;
1503
conversion->ycbcr_range = pCreateInfo->ycbcrRange;
1504
conversion->components = pCreateInfo->components;
1505
conversion->chroma_offsets[0] = pCreateInfo->xChromaOffset;
1506
conversion->chroma_offsets[1] = pCreateInfo->yChromaOffset;
1507
conversion->chroma_filter = pCreateInfo->chromaFilter;
1508
1509
*pYcbcrConversion = radv_sampler_ycbcr_conversion_to_handle(conversion);
1510
return VK_SUCCESS;
1511
}
1512
1513
void
1514
radv_DestroySamplerYcbcrConversion(VkDevice _device, VkSamplerYcbcrConversion ycbcrConversion,
1515
const VkAllocationCallbacks *pAllocator)
1516
{
1517
RADV_FROM_HANDLE(radv_device, device, _device);
1518
RADV_FROM_HANDLE(radv_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion);
1519
1520
if (!ycbcr_conversion)
1521
return;
1522
1523
vk_object_base_finish(&ycbcr_conversion->base);
1524
vk_free2(&device->vk.alloc, pAllocator, ycbcr_conversion);
1525
}
1526
1527