Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/amd/vulkan/radv_meta_buffer.c
7129 views
1
#include "nir/nir_builder.h"
2
#include "radv_meta.h"
3
4
#include "radv_cs.h"
5
#include "sid.h"
6
7
static nir_shader *
8
build_buffer_fill_shader(struct radv_device *dev)
9
{
10
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_buffer_fill");
11
b.shader->info.workgroup_size[0] = 64;
12
b.shader->info.workgroup_size[1] = 1;
13
b.shader->info.workgroup_size[2] = 1;
14
15
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
16
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
17
nir_ssa_def *block_size =
18
nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
19
b.shader->info.workgroup_size[2], 0);
20
21
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
22
23
nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16));
24
offset = nir_channel(&b, offset, 0);
25
26
nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
27
28
nir_ssa_def *load = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4);
29
nir_ssa_def *swizzled_load = nir_swizzle(&b, load, (unsigned[]){0, 0, 0, 0}, 4);
30
31
nir_store_ssbo(&b, swizzled_load, dst_buf, offset, .write_mask = 0xf,
32
.access = ACCESS_NON_READABLE, .align_mul = 16);
33
34
return b.shader;
35
}
36
37
static nir_shader *
38
build_buffer_copy_shader(struct radv_device *dev)
39
{
40
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_buffer_copy");
41
b.shader->info.workgroup_size[0] = 64;
42
b.shader->info.workgroup_size[1] = 1;
43
b.shader->info.workgroup_size[2] = 1;
44
45
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
46
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
47
nir_ssa_def *block_size =
48
nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
49
b.shader->info.workgroup_size[2], 0);
50
51
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
52
53
nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16));
54
offset = nir_channel(&b, offset, 0);
55
56
nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
57
nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
58
59
nir_ssa_def *load = nir_load_ssbo(&b, 4, 32, src_buf, offset, .align_mul = 16);
60
nir_store_ssbo(&b, load, dst_buf, offset, .write_mask = 0xf, .access = ACCESS_NON_READABLE,
61
.align_mul = 16);
62
63
return b.shader;
64
}
65
66
VkResult
67
radv_device_init_meta_buffer_state(struct radv_device *device)
68
{
69
VkResult result;
70
nir_shader *fill_cs = build_buffer_fill_shader(device);
71
nir_shader *copy_cs = build_buffer_copy_shader(device);
72
73
VkDescriptorSetLayoutCreateInfo fill_ds_create_info = {
74
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
75
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
76
.bindingCount = 1,
77
.pBindings = (VkDescriptorSetLayoutBinding[]){
78
{.binding = 0,
79
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
80
.descriptorCount = 1,
81
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
82
.pImmutableSamplers = NULL},
83
}};
84
85
result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device), &fill_ds_create_info,
86
&device->meta_state.alloc,
87
&device->meta_state.buffer.fill_ds_layout);
88
if (result != VK_SUCCESS)
89
goto fail;
90
91
VkDescriptorSetLayoutCreateInfo copy_ds_create_info = {
92
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
93
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
94
.bindingCount = 2,
95
.pBindings = (VkDescriptorSetLayoutBinding[]){
96
{.binding = 0,
97
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
98
.descriptorCount = 1,
99
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
100
.pImmutableSamplers = NULL},
101
{.binding = 1,
102
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
103
.descriptorCount = 1,
104
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
105
.pImmutableSamplers = NULL},
106
}};
107
108
result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device), &copy_ds_create_info,
109
&device->meta_state.alloc,
110
&device->meta_state.buffer.copy_ds_layout);
111
if (result != VK_SUCCESS)
112
goto fail;
113
114
VkPipelineLayoutCreateInfo fill_pl_create_info = {
115
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
116
.setLayoutCount = 1,
117
.pSetLayouts = &device->meta_state.buffer.fill_ds_layout,
118
.pushConstantRangeCount = 1,
119
.pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 4},
120
};
121
122
result = radv_CreatePipelineLayout(radv_device_to_handle(device), &fill_pl_create_info,
123
&device->meta_state.alloc,
124
&device->meta_state.buffer.fill_p_layout);
125
if (result != VK_SUCCESS)
126
goto fail;
127
128
VkPipelineLayoutCreateInfo copy_pl_create_info = {
129
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
130
.setLayoutCount = 1,
131
.pSetLayouts = &device->meta_state.buffer.copy_ds_layout,
132
.pushConstantRangeCount = 0,
133
};
134
135
result = radv_CreatePipelineLayout(radv_device_to_handle(device), &copy_pl_create_info,
136
&device->meta_state.alloc,
137
&device->meta_state.buffer.copy_p_layout);
138
if (result != VK_SUCCESS)
139
goto fail;
140
141
VkPipelineShaderStageCreateInfo fill_pipeline_shader_stage = {
142
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
143
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
144
.module = vk_shader_module_handle_from_nir(fill_cs),
145
.pName = "main",
146
.pSpecializationInfo = NULL,
147
};
148
149
VkComputePipelineCreateInfo fill_vk_pipeline_info = {
150
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
151
.stage = fill_pipeline_shader_stage,
152
.flags = 0,
153
.layout = device->meta_state.buffer.fill_p_layout,
154
};
155
156
result = radv_CreateComputePipelines(
157
radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
158
&fill_vk_pipeline_info, NULL, &device->meta_state.buffer.fill_pipeline);
159
if (result != VK_SUCCESS)
160
goto fail;
161
162
VkPipelineShaderStageCreateInfo copy_pipeline_shader_stage = {
163
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
164
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
165
.module = vk_shader_module_handle_from_nir(copy_cs),
166
.pName = "main",
167
.pSpecializationInfo = NULL,
168
};
169
170
VkComputePipelineCreateInfo copy_vk_pipeline_info = {
171
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
172
.stage = copy_pipeline_shader_stage,
173
.flags = 0,
174
.layout = device->meta_state.buffer.copy_p_layout,
175
};
176
177
result = radv_CreateComputePipelines(
178
radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
179
&copy_vk_pipeline_info, NULL, &device->meta_state.buffer.copy_pipeline);
180
if (result != VK_SUCCESS)
181
goto fail;
182
183
ralloc_free(fill_cs);
184
ralloc_free(copy_cs);
185
return VK_SUCCESS;
186
fail:
187
radv_device_finish_meta_buffer_state(device);
188
ralloc_free(fill_cs);
189
ralloc_free(copy_cs);
190
return result;
191
}
192
193
void
194
radv_device_finish_meta_buffer_state(struct radv_device *device)
195
{
196
struct radv_meta_state *state = &device->meta_state;
197
198
radv_DestroyPipeline(radv_device_to_handle(device), state->buffer.copy_pipeline, &state->alloc);
199
radv_DestroyPipeline(radv_device_to_handle(device), state->buffer.fill_pipeline, &state->alloc);
200
radv_DestroyPipelineLayout(radv_device_to_handle(device), state->buffer.copy_p_layout,
201
&state->alloc);
202
radv_DestroyPipelineLayout(radv_device_to_handle(device), state->buffer.fill_p_layout,
203
&state->alloc);
204
radv_DestroyDescriptorSetLayout(radv_device_to_handle(device), state->buffer.copy_ds_layout,
205
&state->alloc);
206
radv_DestroyDescriptorSetLayout(radv_device_to_handle(device), state->buffer.fill_ds_layout,
207
&state->alloc);
208
}
209
210
static void
211
fill_buffer_shader(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *bo, uint64_t offset,
212
uint64_t size, uint32_t value)
213
{
214
struct radv_device *device = cmd_buffer->device;
215
uint64_t block_count = round_up_u64(size, 1024);
216
struct radv_meta_saved_state saved_state;
217
218
radv_meta_save(
219
&saved_state, cmd_buffer,
220
RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS | RADV_META_SAVE_DESCRIPTORS);
221
222
struct radv_buffer dst_buffer = {.bo = bo, .offset = offset, .size = size};
223
224
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
225
device->meta_state.buffer.fill_pipeline);
226
227
radv_meta_push_descriptor_set(
228
cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, device->meta_state.buffer.fill_p_layout,
229
0, /* set */
230
1, /* descriptorWriteCount */
231
(VkWriteDescriptorSet[]){
232
{.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
233
.dstBinding = 0,
234
.dstArrayElement = 0,
235
.descriptorCount = 1,
236
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
237
.pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&dst_buffer),
238
.offset = 0,
239
.range = size}}});
240
241
radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
242
device->meta_state.buffer.fill_p_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, 4,
243
&value);
244
245
radv_CmdDispatch(radv_cmd_buffer_to_handle(cmd_buffer), block_count, 1, 1);
246
247
radv_meta_restore(&saved_state, cmd_buffer);
248
}
249
250
static void
251
copy_buffer_shader(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *src_bo,
252
struct radeon_winsys_bo *dst_bo, uint64_t src_offset, uint64_t dst_offset,
253
uint64_t size)
254
{
255
struct radv_device *device = cmd_buffer->device;
256
uint64_t block_count = round_up_u64(size, 1024);
257
struct radv_meta_saved_state saved_state;
258
259
radv_meta_save(&saved_state, cmd_buffer,
260
RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_DESCRIPTORS);
261
262
struct radv_buffer dst_buffer = {.bo = dst_bo, .offset = dst_offset, .size = size};
263
264
struct radv_buffer src_buffer = {.bo = src_bo, .offset = src_offset, .size = size};
265
266
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
267
device->meta_state.buffer.copy_pipeline);
268
269
radv_meta_push_descriptor_set(
270
cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, device->meta_state.buffer.copy_p_layout,
271
0, /* set */
272
2, /* descriptorWriteCount */
273
(VkWriteDescriptorSet[]){
274
{.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
275
.dstBinding = 0,
276
.dstArrayElement = 0,
277
.descriptorCount = 1,
278
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
279
.pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&dst_buffer),
280
.offset = 0,
281
.range = size}},
282
{.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
283
.dstBinding = 1,
284
.dstArrayElement = 0,
285
.descriptorCount = 1,
286
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
287
.pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&src_buffer),
288
.offset = 0,
289
.range = size}}});
290
291
radv_CmdDispatch(radv_cmd_buffer_to_handle(cmd_buffer), block_count, 1, 1);
292
293
radv_meta_restore(&saved_state, cmd_buffer);
294
}
295
296
static bool
297
radv_prefer_compute_dma(const struct radv_device *device, uint64_t size,
298
struct radeon_winsys_bo *src_bo, struct radeon_winsys_bo *dst_bo)
299
{
300
bool use_compute = size >= RADV_BUFFER_OPS_CS_THRESHOLD;
301
302
if (device->physical_device->rad_info.chip_class >= GFX10 &&
303
device->physical_device->rad_info.has_dedicated_vram) {
304
if ((src_bo && !(src_bo->initial_domain & RADEON_DOMAIN_VRAM)) ||
305
!(dst_bo->initial_domain & RADEON_DOMAIN_VRAM)) {
306
/* Prefer CP DMA for GTT on dGPUS due to slow PCIe. */
307
use_compute = false;
308
}
309
}
310
311
return use_compute;
312
}
313
314
uint32_t
315
radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer, const struct radv_image *image,
316
struct radeon_winsys_bo *bo, uint64_t offset, uint64_t size, uint32_t value)
317
{
318
bool use_compute = radv_prefer_compute_dma(cmd_buffer->device, size, NULL, bo);
319
uint32_t flush_bits = 0;
320
321
assert(!(offset & 3));
322
assert(!(size & 3));
323
324
if (use_compute) {
325
cmd_buffer->state.flush_bits |=
326
radv_dst_access_flush(cmd_buffer, VK_ACCESS_SHADER_WRITE_BIT, image);
327
328
fill_buffer_shader(cmd_buffer, bo, offset, size, value);
329
330
flush_bits = RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_VCACHE |
331
radv_src_access_flush(cmd_buffer, VK_ACCESS_SHADER_WRITE_BIT, image);
332
} else if (size) {
333
uint64_t va = radv_buffer_get_va(bo);
334
va += offset;
335
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, bo);
336
si_cp_dma_clear_buffer(cmd_buffer, va, size, value);
337
}
338
339
return flush_bits;
340
}
341
342
static void
343
radv_copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *src_bo,
344
struct radeon_winsys_bo *dst_bo, uint64_t src_offset, uint64_t dst_offset,
345
uint64_t size)
346
{
347
bool use_compute = !(size & 3) && !(src_offset & 3) && !(dst_offset & 3) &&
348
radv_prefer_compute_dma(cmd_buffer->device, size, src_bo, dst_bo);
349
350
if (use_compute)
351
copy_buffer_shader(cmd_buffer, src_bo, dst_bo, src_offset, dst_offset, size);
352
else if (size) {
353
uint64_t src_va = radv_buffer_get_va(src_bo);
354
uint64_t dst_va = radv_buffer_get_va(dst_bo);
355
src_va += src_offset;
356
dst_va += dst_offset;
357
358
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, src_bo);
359
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_bo);
360
361
si_cp_dma_buffer_copy(cmd_buffer, src_va, dst_va, size);
362
}
363
}
364
365
void
366
radv_CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
367
VkDeviceSize fillSize, uint32_t data)
368
{
369
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
370
RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
371
372
if (fillSize == VK_WHOLE_SIZE)
373
fillSize = (dst_buffer->size - dstOffset) & ~3ull;
374
375
radv_fill_buffer(cmd_buffer, NULL, dst_buffer->bo, dst_buffer->offset + dstOffset, fillSize,
376
data);
377
}
378
379
static void
380
copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radv_buffer *src_buffer,
381
struct radv_buffer *dst_buffer, const VkBufferCopy2KHR *region)
382
{
383
bool old_predicating;
384
385
/* VK_EXT_conditional_rendering says that copy commands should not be
386
* affected by conditional rendering.
387
*/
388
old_predicating = cmd_buffer->state.predicating;
389
cmd_buffer->state.predicating = false;
390
391
radv_copy_buffer(cmd_buffer, src_buffer->bo, dst_buffer->bo,
392
src_buffer->offset + region->srcOffset, dst_buffer->offset + region->dstOffset,
393
region->size);
394
395
/* Restore conditional rendering. */
396
cmd_buffer->state.predicating = old_predicating;
397
}
398
399
void
400
radv_CmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfo)
401
{
402
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
403
RADV_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
404
RADV_FROM_HANDLE(radv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
405
406
for (unsigned r = 0; r < pCopyBufferInfo->regionCount; r++) {
407
copy_buffer(cmd_buffer, src_buffer, dst_buffer, &pCopyBufferInfo->pRegions[r]);
408
}
409
}
410
411
void
412
radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
413
VkDeviceSize dataSize, const void *pData)
414
{
415
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
416
RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
417
bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
418
uint64_t words = dataSize / 4;
419
uint64_t va = radv_buffer_get_va(dst_buffer->bo);
420
va += dstOffset + dst_buffer->offset;
421
422
assert(!(dataSize & 3));
423
assert(!(va & 3));
424
425
if (!dataSize)
426
return;
427
428
if (dataSize < RADV_BUFFER_UPDATE_THRESHOLD) {
429
si_emit_cache_flush(cmd_buffer);
430
431
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
432
433
radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, words + 4);
434
435
radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + words, 0));
436
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(mec ? V_370_MEM : V_370_MEM_GRBM) |
437
S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_ME));
438
radeon_emit(cmd_buffer->cs, va);
439
radeon_emit(cmd_buffer->cs, va >> 32);
440
radeon_emit_array(cmd_buffer->cs, pData, words);
441
442
if (unlikely(cmd_buffer->device->trace_bo))
443
radv_cmd_buffer_trace_emit(cmd_buffer);
444
} else {
445
uint32_t buf_offset;
446
radv_cmd_buffer_upload_data(cmd_buffer, dataSize, pData, &buf_offset);
447
radv_copy_buffer(cmd_buffer, cmd_buffer->upload.upload_bo, dst_buffer->bo, buf_offset,
448
dstOffset + dst_buffer->offset, dataSize);
449
}
450
}
451
452