Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/broadcom/vulkan/v3dv_cmd_buffer.c
4560 views
1
/*
2
* Copyright © 2019 Raspberry Pi
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "v3dv_private.h"
25
#include "util/u_pack_color.h"
26
#include "vk_format_info.h"
27
#include "vk_util.h"
28
29
const struct v3dv_dynamic_state default_dynamic_state = {
30
.viewport = {
31
.count = 0,
32
},
33
.scissor = {
34
.count = 0,
35
},
36
.stencil_compare_mask =
37
{
38
.front = ~0u,
39
.back = ~0u,
40
},
41
.stencil_write_mask =
42
{
43
.front = ~0u,
44
.back = ~0u,
45
},
46
.stencil_reference =
47
{
48
.front = 0u,
49
.back = 0u,
50
},
51
.blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
52
.depth_bias = {
53
.constant_factor = 0.0f,
54
.depth_bias_clamp = 0.0f,
55
.slope_factor = 0.0f,
56
},
57
.line_width = 1.0f,
58
};
59
60
void
61
v3dv_job_add_bo(struct v3dv_job *job, struct v3dv_bo *bo)
62
{
63
if (!bo)
64
return;
65
66
if (job->bo_handle_mask & bo->handle_bit) {
67
if (_mesa_set_search(job->bos, bo))
68
return;
69
}
70
71
_mesa_set_add(job->bos, bo);
72
job->bo_count++;
73
job->bo_handle_mask |= bo->handle_bit;
74
}
75
76
void
77
v3dv_job_add_bo_unchecked(struct v3dv_job *job, struct v3dv_bo *bo)
78
{
79
assert(bo);
80
_mesa_set_add(job->bos, bo);
81
job->bo_count++;
82
job->bo_handle_mask |= bo->handle_bit;
83
}
84
85
VKAPI_ATTR VkResult VKAPI_CALL
86
v3dv_CreateCommandPool(VkDevice _device,
87
const VkCommandPoolCreateInfo *pCreateInfo,
88
const VkAllocationCallbacks *pAllocator,
89
VkCommandPool *pCmdPool)
90
{
91
V3DV_FROM_HANDLE(v3dv_device, device, _device);
92
struct v3dv_cmd_pool *pool;
93
94
/* We only support one queue */
95
assert(pCreateInfo->queueFamilyIndex == 0);
96
97
pool = vk_object_zalloc(&device->vk, pAllocator, sizeof(*pool),
98
VK_OBJECT_TYPE_COMMAND_POOL);
99
if (pool == NULL)
100
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
101
102
if (pAllocator)
103
pool->alloc = *pAllocator;
104
else
105
pool->alloc = device->vk.alloc;
106
107
list_inithead(&pool->cmd_buffers);
108
109
*pCmdPool = v3dv_cmd_pool_to_handle(pool);
110
111
return VK_SUCCESS;
112
}
113
114
static void
115
cmd_buffer_init(struct v3dv_cmd_buffer *cmd_buffer,
116
struct v3dv_device *device,
117
struct v3dv_cmd_pool *pool,
118
VkCommandBufferLevel level)
119
{
120
/* Do not reset the base object! If we are calling this from a command
121
* buffer reset that would reset the loader's dispatch table for the
122
* command buffer, and any other relevant info from vk_object_base
123
*/
124
const uint32_t base_size = sizeof(struct vk_object_base);
125
uint8_t *cmd_buffer_driver_start = ((uint8_t *) cmd_buffer) + base_size;
126
memset(cmd_buffer_driver_start, 0, sizeof(*cmd_buffer) - base_size);
127
128
cmd_buffer->device = device;
129
cmd_buffer->pool = pool;
130
cmd_buffer->level = level;
131
132
list_inithead(&cmd_buffer->private_objs);
133
list_inithead(&cmd_buffer->jobs);
134
list_inithead(&cmd_buffer->list_link);
135
136
assert(pool);
137
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
138
139
cmd_buffer->state.subpass_idx = -1;
140
cmd_buffer->state.meta.subpass_idx = -1;
141
142
cmd_buffer->status = V3DV_CMD_BUFFER_STATUS_INITIALIZED;
143
}
144
145
static VkResult
146
cmd_buffer_create(struct v3dv_device *device,
147
struct v3dv_cmd_pool *pool,
148
VkCommandBufferLevel level,
149
VkCommandBuffer *pCommandBuffer)
150
{
151
struct v3dv_cmd_buffer *cmd_buffer;
152
cmd_buffer = vk_object_zalloc(&device->vk,
153
&pool->alloc,
154
sizeof(*cmd_buffer),
155
VK_OBJECT_TYPE_COMMAND_BUFFER);
156
if (cmd_buffer == NULL)
157
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
158
159
cmd_buffer_init(cmd_buffer, device, pool, level);
160
161
*pCommandBuffer = v3dv_cmd_buffer_to_handle(cmd_buffer);
162
163
return VK_SUCCESS;
164
}
165
166
static void
167
job_destroy_gpu_cl_resources(struct v3dv_job *job)
168
{
169
assert(job->type == V3DV_JOB_TYPE_GPU_CL ||
170
job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
171
172
v3dv_cl_destroy(&job->bcl);
173
v3dv_cl_destroy(&job->rcl);
174
v3dv_cl_destroy(&job->indirect);
175
176
/* Since we don't ref BOs when we add them to the command buffer, don't
177
* unref them here either. Bo's will be freed when their corresponding API
178
* objects are destroyed.
179
*/
180
_mesa_set_destroy(job->bos, NULL);
181
182
v3dv_bo_free(job->device, job->tile_alloc);
183
v3dv_bo_free(job->device, job->tile_state);
184
}
185
186
static void
187
job_destroy_cloned_gpu_cl_resources(struct v3dv_job *job)
188
{
189
assert(job->type == V3DV_JOB_TYPE_GPU_CL);
190
191
list_for_each_entry_safe(struct v3dv_bo, bo, &job->bcl.bo_list, list_link) {
192
list_del(&bo->list_link);
193
vk_free(&job->device->vk.alloc, bo);
194
}
195
196
list_for_each_entry_safe(struct v3dv_bo, bo, &job->rcl.bo_list, list_link) {
197
list_del(&bo->list_link);
198
vk_free(&job->device->vk.alloc, bo);
199
}
200
201
list_for_each_entry_safe(struct v3dv_bo, bo, &job->indirect.bo_list, list_link) {
202
list_del(&bo->list_link);
203
vk_free(&job->device->vk.alloc, bo);
204
}
205
}
206
207
static void
208
job_destroy_gpu_csd_resources(struct v3dv_job *job)
209
{
210
assert(job->type == V3DV_JOB_TYPE_GPU_CSD);
211
assert(job->cmd_buffer);
212
213
v3dv_cl_destroy(&job->indirect);
214
215
_mesa_set_destroy(job->bos, NULL);
216
217
if (job->csd.shared_memory)
218
v3dv_bo_free(job->device, job->csd.shared_memory);
219
}
220
221
static void
222
job_destroy_cpu_wait_events_resources(struct v3dv_job *job)
223
{
224
assert(job->type == V3DV_JOB_TYPE_CPU_WAIT_EVENTS);
225
assert(job->cmd_buffer);
226
vk_free(&job->cmd_buffer->device->vk.alloc, job->cpu.event_wait.events);
227
}
228
229
static void
230
job_destroy_cpu_csd_indirect_resources(struct v3dv_job *job)
231
{
232
assert(job->type == V3DV_JOB_TYPE_CPU_CSD_INDIRECT);
233
assert(job->cmd_buffer);
234
v3dv_job_destroy(job->cpu.csd_indirect.csd_job);
235
}
236
237
void
238
v3dv_job_destroy(struct v3dv_job *job)
239
{
240
assert(job);
241
242
list_del(&job->list_link);
243
244
/* Cloned jobs don't make deep copies of the original jobs, so they don't
245
* own any of their resources. However, they do allocate clones of BO
246
* structs, so make sure we free those.
247
*/
248
if (!job->is_clone) {
249
switch (job->type) {
250
case V3DV_JOB_TYPE_GPU_CL:
251
case V3DV_JOB_TYPE_GPU_CL_SECONDARY:
252
job_destroy_gpu_cl_resources(job);
253
break;
254
case V3DV_JOB_TYPE_GPU_CSD:
255
job_destroy_gpu_csd_resources(job);
256
break;
257
case V3DV_JOB_TYPE_CPU_WAIT_EVENTS:
258
job_destroy_cpu_wait_events_resources(job);
259
break;
260
case V3DV_JOB_TYPE_CPU_CSD_INDIRECT:
261
job_destroy_cpu_csd_indirect_resources(job);
262
break;
263
default:
264
break;
265
}
266
} else {
267
/* Cloned jobs */
268
if (job->type == V3DV_JOB_TYPE_GPU_CL)
269
job_destroy_cloned_gpu_cl_resources(job);
270
}
271
272
vk_free(&job->device->vk.alloc, job);
273
}
274
275
void
276
v3dv_cmd_buffer_add_private_obj(struct v3dv_cmd_buffer *cmd_buffer,
277
uint64_t obj,
278
v3dv_cmd_buffer_private_obj_destroy_cb destroy_cb)
279
{
280
struct v3dv_cmd_buffer_private_obj *pobj =
281
vk_alloc(&cmd_buffer->device->vk.alloc, sizeof(*pobj), 8,
282
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
283
if (!pobj) {
284
v3dv_flag_oom(cmd_buffer, NULL);
285
return;
286
}
287
288
pobj->obj = obj;
289
pobj->destroy_cb = destroy_cb;
290
291
list_addtail(&pobj->list_link, &cmd_buffer->private_objs);
292
}
293
294
static void
295
cmd_buffer_destroy_private_obj(struct v3dv_cmd_buffer *cmd_buffer,
296
struct v3dv_cmd_buffer_private_obj *pobj)
297
{
298
assert(pobj && pobj->obj && pobj->destroy_cb);
299
pobj->destroy_cb(v3dv_device_to_handle(cmd_buffer->device),
300
pobj->obj,
301
&cmd_buffer->device->vk.alloc);
302
list_del(&pobj->list_link);
303
vk_free(&cmd_buffer->device->vk.alloc, pobj);
304
}
305
306
static void
307
cmd_buffer_free_resources(struct v3dv_cmd_buffer *cmd_buffer)
308
{
309
list_for_each_entry_safe(struct v3dv_job, job,
310
&cmd_buffer->jobs, list_link) {
311
v3dv_job_destroy(job);
312
}
313
314
if (cmd_buffer->state.job)
315
v3dv_job_destroy(cmd_buffer->state.job);
316
317
if (cmd_buffer->state.attachments)
318
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
319
320
if (cmd_buffer->state.query.end.alloc_count > 0)
321
vk_free(&cmd_buffer->device->vk.alloc, cmd_buffer->state.query.end.states);
322
323
if (cmd_buffer->push_constants_resource.bo)
324
v3dv_bo_free(cmd_buffer->device, cmd_buffer->push_constants_resource.bo);
325
326
list_for_each_entry_safe(struct v3dv_cmd_buffer_private_obj, pobj,
327
&cmd_buffer->private_objs, list_link) {
328
cmd_buffer_destroy_private_obj(cmd_buffer, pobj);
329
}
330
331
if (cmd_buffer->state.meta.attachments) {
332
assert(cmd_buffer->state.meta.attachment_alloc_count > 0);
333
vk_free(&cmd_buffer->device->vk.alloc, cmd_buffer->state.meta.attachments);
334
}
335
}
336
337
static void
338
cmd_buffer_destroy(struct v3dv_cmd_buffer *cmd_buffer)
339
{
340
list_del(&cmd_buffer->pool_link);
341
cmd_buffer_free_resources(cmd_buffer);
342
vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
343
}
344
345
static bool
346
attachment_list_is_subset(struct v3dv_subpass_attachment *l1, uint32_t l1_count,
347
struct v3dv_subpass_attachment *l2, uint32_t l2_count)
348
{
349
for (uint32_t i = 0; i < l1_count; i++) {
350
uint32_t attachment_idx = l1[i].attachment;
351
if (attachment_idx == VK_ATTACHMENT_UNUSED)
352
continue;
353
354
uint32_t j;
355
for (j = 0; j < l2_count; j++) {
356
if (l2[j].attachment == attachment_idx)
357
break;
358
}
359
if (j == l2_count)
360
return false;
361
}
362
363
return true;
364
}
365
366
static bool
367
cmd_buffer_can_merge_subpass(struct v3dv_cmd_buffer *cmd_buffer,
368
uint32_t subpass_idx)
369
{
370
const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
371
assert(state->pass);
372
373
const struct v3dv_physical_device *physical_device =
374
&cmd_buffer->device->instance->physicalDevice;
375
376
if (cmd_buffer->level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
377
return false;
378
379
if (!cmd_buffer->state.job)
380
return false;
381
382
if (cmd_buffer->state.job->always_flush)
383
return false;
384
385
if (!physical_device->options.merge_jobs)
386
return false;
387
388
/* Each render pass starts a new job */
389
if (subpass_idx == 0)
390
return false;
391
392
/* Two subpasses can be merged in the same job if we can emit a single RCL
393
* for them (since the RCL includes the END_OF_RENDERING command that
394
* triggers the "render job finished" interrupt). We can do this so long
395
* as both subpasses render against the same attachments.
396
*/
397
assert(state->subpass_idx == subpass_idx - 1);
398
struct v3dv_subpass *prev_subpass = &state->pass->subpasses[state->subpass_idx];
399
struct v3dv_subpass *subpass = &state->pass->subpasses[subpass_idx];
400
401
/* Because the list of subpass attachments can include VK_ATTACHMENT_UNUSED,
402
* we need to check that for each subpass all its used attachments are
403
* used by the other subpass.
404
*/
405
bool compatible =
406
attachment_list_is_subset(prev_subpass->color_attachments,
407
prev_subpass->color_count,
408
subpass->color_attachments,
409
subpass->color_count);
410
if (!compatible)
411
return false;
412
413
compatible =
414
attachment_list_is_subset(subpass->color_attachments,
415
subpass->color_count,
416
prev_subpass->color_attachments,
417
prev_subpass->color_count);
418
if (!compatible)
419
return false;
420
421
if (subpass->ds_attachment.attachment !=
422
prev_subpass->ds_attachment.attachment)
423
return false;
424
425
/* FIXME: Since some attachment formats can't be resolved using the TLB we
426
* need to emit separate resolve jobs for them and that would not be
427
* compatible with subpass merges. We could fix that by testing if any of
428
* the attachments to resolve doesn't suppotr TLB resolves.
429
*/
430
if (prev_subpass->resolve_attachments || subpass->resolve_attachments)
431
return false;
432
433
return true;
434
}
435
436
/**
437
* Computes and sets the job frame tiling information required to setup frame
438
* binning and rendering.
439
*/
440
static struct v3dv_frame_tiling *
441
job_compute_frame_tiling(struct v3dv_job *job,
442
uint32_t width,
443
uint32_t height,
444
uint32_t layers,
445
uint32_t render_target_count,
446
uint8_t max_internal_bpp,
447
bool msaa)
448
{
449
static const uint8_t tile_sizes[] = {
450
64, 64,
451
64, 32,
452
32, 32,
453
32, 16,
454
16, 16,
455
16, 8,
456
8, 8
457
};
458
459
assert(job);
460
struct v3dv_frame_tiling *tiling = &job->frame_tiling;
461
462
tiling->width = width;
463
tiling->height = height;
464
tiling->layers = layers;
465
tiling->render_target_count = render_target_count;
466
tiling->msaa = msaa;
467
468
uint32_t tile_size_index = 0;
469
470
if (render_target_count > 2)
471
tile_size_index += 2;
472
else if (render_target_count > 1)
473
tile_size_index += 1;
474
475
if (msaa)
476
tile_size_index += 2;
477
478
tiling->internal_bpp = max_internal_bpp;
479
tile_size_index += tiling->internal_bpp;
480
assert(tile_size_index < ARRAY_SIZE(tile_sizes) / 2);
481
482
tiling->tile_width = tile_sizes[tile_size_index * 2];
483
tiling->tile_height = tile_sizes[tile_size_index * 2 + 1];
484
485
tiling->draw_tiles_x = DIV_ROUND_UP(width, tiling->tile_width);
486
tiling->draw_tiles_y = DIV_ROUND_UP(height, tiling->tile_height);
487
488
/* Size up our supertiles until we get under the limit */
489
const uint32_t max_supertiles = 256;
490
tiling->supertile_width = 1;
491
tiling->supertile_height = 1;
492
for (;;) {
493
tiling->frame_width_in_supertiles =
494
DIV_ROUND_UP(tiling->draw_tiles_x, tiling->supertile_width);
495
tiling->frame_height_in_supertiles =
496
DIV_ROUND_UP(tiling->draw_tiles_y, tiling->supertile_height);
497
const uint32_t num_supertiles = tiling->frame_width_in_supertiles *
498
tiling->frame_height_in_supertiles;
499
if (num_supertiles < max_supertiles)
500
break;
501
502
if (tiling->supertile_width < tiling->supertile_height)
503
tiling->supertile_width++;
504
else
505
tiling->supertile_height++;
506
}
507
508
return tiling;
509
}
510
511
void
512
v3dv_job_start_frame(struct v3dv_job *job,
513
uint32_t width,
514
uint32_t height,
515
uint32_t layers,
516
uint32_t render_target_count,
517
uint8_t max_internal_bpp,
518
bool msaa)
519
{
520
assert(job);
521
522
/* Start by computing frame tiling spec for this job */
523
const struct v3dv_frame_tiling *tiling =
524
job_compute_frame_tiling(job,
525
width, height, layers,
526
render_target_count, max_internal_bpp, msaa);
527
528
v3dv_cl_ensure_space_with_branch(&job->bcl, 256);
529
v3dv_return_if_oom(NULL, job);
530
531
/* The PTB will request the tile alloc initial size per tile at start
532
* of tile binning.
533
*/
534
uint32_t tile_alloc_size = 64 * tiling->layers *
535
tiling->draw_tiles_x *
536
tiling->draw_tiles_y;
537
538
/* The PTB allocates in aligned 4k chunks after the initial setup. */
539
tile_alloc_size = align(tile_alloc_size, 4096);
540
541
/* Include the first two chunk allocations that the PTB does so that
542
* we definitely clear the OOM condition before triggering one (the HW
543
* won't trigger OOM during the first allocations).
544
*/
545
tile_alloc_size += 8192;
546
547
/* For performance, allocate some extra initial memory after the PTB's
548
* minimal allocations, so that we hopefully don't have to block the
549
* GPU on the kernel handling an OOM signal.
550
*/
551
tile_alloc_size += 512 * 1024;
552
553
job->tile_alloc = v3dv_bo_alloc(job->device, tile_alloc_size,
554
"tile_alloc", true);
555
if (!job->tile_alloc) {
556
v3dv_flag_oom(NULL, job);
557
return;
558
}
559
560
v3dv_job_add_bo_unchecked(job, job->tile_alloc);
561
562
const uint32_t tsda_per_tile_size = 256;
563
const uint32_t tile_state_size = tiling->layers *
564
tiling->draw_tiles_x *
565
tiling->draw_tiles_y *
566
tsda_per_tile_size;
567
job->tile_state = v3dv_bo_alloc(job->device, tile_state_size, "TSDA", true);
568
if (!job->tile_state) {
569
v3dv_flag_oom(NULL, job);
570
return;
571
}
572
573
v3dv_job_add_bo_unchecked(job, job->tile_state);
574
575
v3dv_X(job->device, job_emit_binning_prolog)(job, tiling, layers);
576
577
job->ez_state = V3D_EZ_UNDECIDED;
578
job->first_ez_state = V3D_EZ_UNDECIDED;
579
}
580
581
static void
582
cmd_buffer_end_render_pass_frame(struct v3dv_cmd_buffer *cmd_buffer)
583
{
584
assert(cmd_buffer->state.job);
585
586
/* Typically, we have a single job for each subpass and we emit the job's RCL
587
* here when we are ending the frame for the subpass. However, some commands
588
* such as vkCmdClearAttachments need to run in their own separate job and
589
* they emit their own RCL even if they execute inside a subpass. In this
590
* scenario, we don't want to emit subpass RCL when we end the frame for
591
* those jobs, so we only emit the subpass RCL if the job has not recorded
592
* any RCL commands of its own.
593
*/
594
if (v3dv_cl_offset(&cmd_buffer->state.job->rcl) == 0)
595
v3dv_X(cmd_buffer->device, cmd_buffer_emit_render_pass_rcl)(cmd_buffer);
596
597
v3dv_X(cmd_buffer->device, job_emit_binning_flush)(cmd_buffer->state.job);
598
}
599
600
struct v3dv_job *
601
v3dv_cmd_buffer_create_cpu_job(struct v3dv_device *device,
602
enum v3dv_job_type type,
603
struct v3dv_cmd_buffer *cmd_buffer,
604
uint32_t subpass_idx)
605
{
606
struct v3dv_job *job = vk_zalloc(&device->vk.alloc,
607
sizeof(struct v3dv_job), 8,
608
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
609
if (!job) {
610
v3dv_flag_oom(cmd_buffer, NULL);
611
return NULL;
612
}
613
614
v3dv_job_init(job, type, device, cmd_buffer, subpass_idx);
615
return job;
616
}
617
618
static void
619
cmd_buffer_add_cpu_jobs_for_pending_state(struct v3dv_cmd_buffer *cmd_buffer)
620
{
621
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
622
623
if (state->query.end.used_count > 0) {
624
const uint32_t query_count = state->query.end.used_count;
625
for (uint32_t i = 0; i < query_count; i++) {
626
assert(i < state->query.end.used_count);
627
struct v3dv_job *job =
628
v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
629
V3DV_JOB_TYPE_CPU_END_QUERY,
630
cmd_buffer, -1);
631
v3dv_return_if_oom(cmd_buffer, NULL);
632
633
job->cpu.query_end = state->query.end.states[i];
634
list_addtail(&job->list_link, &cmd_buffer->jobs);
635
}
636
}
637
}
638
639
void
640
v3dv_cmd_buffer_finish_job(struct v3dv_cmd_buffer *cmd_buffer)
641
{
642
struct v3dv_job *job = cmd_buffer->state.job;
643
if (!job)
644
return;
645
646
if (cmd_buffer->state.oom) {
647
v3dv_job_destroy(job);
648
cmd_buffer->state.job = NULL;
649
return;
650
}
651
652
/* If we have created a job for a command buffer then we should have
653
* recorded something into it: if the job was started in a render pass, it
654
* should at least have the start frame commands, otherwise, it should have
655
* a transfer command. The only exception are secondary command buffers
656
* inside a render pass.
657
*/
658
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
659
v3dv_cl_offset(&job->bcl) > 0);
660
661
/* When we merge multiple subpasses into the same job we must only emit one
662
* RCL, so we do that here, when we decided that we need to finish the job.
663
* Any rendering that happens outside a render pass is never merged, so
664
* the RCL should have been emitted by the time we got here.
665
*/
666
assert(v3dv_cl_offset(&job->rcl) != 0 || cmd_buffer->state.pass);
667
668
/* If we are finishing a job inside a render pass we have two scenarios:
669
*
670
* 1. It is a regular CL, in which case we will submit the job to the GPU,
671
* so we may need to generate an RCL and add a binning flush.
672
*
673
* 2. It is a partial CL recorded in a secondary command buffer, in which
674
* case we are not submitting it directly to the GPU but rather branch to
675
* it from a primary command buffer. In this case we just want to end
676
* the BCL with a RETURN_FROM_SUB_LIST and the RCL and binning flush
677
* will be the primary job that branches to this CL.
678
*/
679
if (cmd_buffer->state.pass) {
680
if (job->type == V3DV_JOB_TYPE_GPU_CL) {
681
cmd_buffer_end_render_pass_frame(cmd_buffer);
682
} else {
683
assert(job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
684
v3dv_X(cmd_buffer->device, cmd_buffer_end_render_pass_secondary)(cmd_buffer);
685
}
686
}
687
688
list_addtail(&job->list_link, &cmd_buffer->jobs);
689
cmd_buffer->state.job = NULL;
690
691
/* If we have recorded any state with this last GPU job that requires to
692
* emit CPU jobs after the job is completed, add them now. The only
693
* exception is secondary command buffers inside a render pass, because in
694
* that case we want to defer this until we finish recording the primary
695
* job into which we execute the secondary.
696
*/
697
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY ||
698
!cmd_buffer->state.pass) {
699
cmd_buffer_add_cpu_jobs_for_pending_state(cmd_buffer);
700
}
701
}
702
703
static bool
704
job_type_is_gpu(struct v3dv_job *job)
705
{
706
switch (job->type) {
707
case V3DV_JOB_TYPE_GPU_CL:
708
case V3DV_JOB_TYPE_GPU_CL_SECONDARY:
709
case V3DV_JOB_TYPE_GPU_TFU:
710
case V3DV_JOB_TYPE_GPU_CSD:
711
return true;
712
default:
713
return false;
714
}
715
}
716
717
static void
718
cmd_buffer_serialize_job_if_needed(struct v3dv_cmd_buffer *cmd_buffer,
719
struct v3dv_job *job)
720
{
721
assert(cmd_buffer && job);
722
723
if (!cmd_buffer->state.has_barrier)
724
return;
725
726
/* Serialization only affects GPU jobs, CPU jobs are always automatically
727
* serialized.
728
*/
729
if (!job_type_is_gpu(job))
730
return;
731
732
job->serialize = true;
733
if (cmd_buffer->state.has_bcl_barrier &&
734
(job->type == V3DV_JOB_TYPE_GPU_CL ||
735
job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY)) {
736
job->needs_bcl_sync = true;
737
}
738
739
cmd_buffer->state.has_barrier = false;
740
cmd_buffer->state.has_bcl_barrier = false;
741
}
742
743
void
744
v3dv_job_init(struct v3dv_job *job,
745
enum v3dv_job_type type,
746
struct v3dv_device *device,
747
struct v3dv_cmd_buffer *cmd_buffer,
748
int32_t subpass_idx)
749
{
750
assert(job);
751
752
/* Make sure we haven't made this new job current before calling here */
753
assert(!cmd_buffer || cmd_buffer->state.job != job);
754
755
job->type = type;
756
757
job->device = device;
758
job->cmd_buffer = cmd_buffer;
759
760
list_inithead(&job->list_link);
761
762
if (type == V3DV_JOB_TYPE_GPU_CL ||
763
type == V3DV_JOB_TYPE_GPU_CL_SECONDARY ||
764
type == V3DV_JOB_TYPE_GPU_CSD) {
765
job->bos =
766
_mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
767
job->bo_count = 0;
768
769
v3dv_cl_init(job, &job->indirect);
770
771
if (V3D_DEBUG & V3D_DEBUG_ALWAYS_FLUSH)
772
job->always_flush = true;
773
}
774
775
if (type == V3DV_JOB_TYPE_GPU_CL ||
776
type == V3DV_JOB_TYPE_GPU_CL_SECONDARY) {
777
v3dv_cl_init(job, &job->bcl);
778
v3dv_cl_init(job, &job->rcl);
779
}
780
781
if (cmd_buffer) {
782
/* Flag all state as dirty. Generally, we need to re-emit state for each
783
* new job.
784
*
785
* FIXME: there may be some exceptions, in which case we could skip some
786
* bits.
787
*/
788
cmd_buffer->state.dirty = ~0;
789
cmd_buffer->state.dirty_descriptor_stages = ~0;
790
791
/* Honor inheritance of occlussion queries in secondaries if requested */
792
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
793
cmd_buffer->state.inheritance.occlusion_query_enable) {
794
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_OCCLUSION_QUERY;
795
}
796
797
/* Keep track of the first subpass that we are recording in this new job.
798
* We will use this when we emit the RCL to decide how to emit our loads
799
* and stores.
800
*/
801
if (cmd_buffer->state.pass)
802
job->first_subpass = subpass_idx;
803
804
cmd_buffer_serialize_job_if_needed(cmd_buffer, job);
805
}
806
}
807
808
struct v3dv_job *
809
v3dv_cmd_buffer_start_job(struct v3dv_cmd_buffer *cmd_buffer,
810
int32_t subpass_idx,
811
enum v3dv_job_type type)
812
{
813
/* Don't create a new job if we can merge the current subpass into
814
* the current job.
815
*/
816
if (cmd_buffer->state.pass &&
817
subpass_idx != -1 &&
818
cmd_buffer_can_merge_subpass(cmd_buffer, subpass_idx)) {
819
cmd_buffer->state.job->is_subpass_finish = false;
820
return cmd_buffer->state.job;
821
}
822
823
/* Ensure we are not starting a new job without finishing a previous one */
824
if (cmd_buffer->state.job != NULL)
825
v3dv_cmd_buffer_finish_job(cmd_buffer);
826
827
assert(cmd_buffer->state.job == NULL);
828
struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
829
sizeof(struct v3dv_job), 8,
830
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
831
832
if (!job) {
833
fprintf(stderr, "Error: failed to allocate CPU memory for job\n");
834
v3dv_flag_oom(cmd_buffer, NULL);
835
return NULL;
836
}
837
838
v3dv_job_init(job, type, cmd_buffer->device, cmd_buffer, subpass_idx);
839
cmd_buffer->state.job = job;
840
841
return job;
842
}
843
844
static VkResult
845
cmd_buffer_reset(struct v3dv_cmd_buffer *cmd_buffer,
846
VkCommandBufferResetFlags flags)
847
{
848
if (cmd_buffer->status != V3DV_CMD_BUFFER_STATUS_INITIALIZED) {
849
struct v3dv_device *device = cmd_buffer->device;
850
struct v3dv_cmd_pool *pool = cmd_buffer->pool;
851
VkCommandBufferLevel level = cmd_buffer->level;
852
853
/* cmd_buffer_init below will re-add the command buffer to the pool
854
* so remove it here so we don't end up adding it again.
855
*/
856
list_del(&cmd_buffer->pool_link);
857
858
/* FIXME: For now we always free all resources as if
859
* VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT was set.
860
*/
861
if (cmd_buffer->status != V3DV_CMD_BUFFER_STATUS_NEW)
862
cmd_buffer_free_resources(cmd_buffer);
863
864
cmd_buffer_init(cmd_buffer, device, pool, level);
865
}
866
867
assert(cmd_buffer->status == V3DV_CMD_BUFFER_STATUS_INITIALIZED);
868
return VK_SUCCESS;
869
}
870
871
VKAPI_ATTR VkResult VKAPI_CALL
872
v3dv_AllocateCommandBuffers(VkDevice _device,
873
const VkCommandBufferAllocateInfo *pAllocateInfo,
874
VkCommandBuffer *pCommandBuffers)
875
{
876
V3DV_FROM_HANDLE(v3dv_device, device, _device);
877
V3DV_FROM_HANDLE(v3dv_cmd_pool, pool, pAllocateInfo->commandPool);
878
879
VkResult result = VK_SUCCESS;
880
uint32_t i;
881
882
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
883
result = cmd_buffer_create(device, pool, pAllocateInfo->level,
884
&pCommandBuffers[i]);
885
if (result != VK_SUCCESS)
886
break;
887
}
888
889
if (result != VK_SUCCESS) {
890
v3dv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
891
i, pCommandBuffers);
892
for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
893
pCommandBuffers[i] = VK_NULL_HANDLE;
894
}
895
896
return result;
897
}
898
899
VKAPI_ATTR void VKAPI_CALL
900
v3dv_FreeCommandBuffers(VkDevice device,
901
VkCommandPool commandPool,
902
uint32_t commandBufferCount,
903
const VkCommandBuffer *pCommandBuffers)
904
{
905
for (uint32_t i = 0; i < commandBufferCount; i++) {
906
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
907
908
if (!cmd_buffer)
909
continue;
910
911
cmd_buffer_destroy(cmd_buffer);
912
}
913
}
914
915
VKAPI_ATTR void VKAPI_CALL
916
v3dv_DestroyCommandPool(VkDevice _device,
917
VkCommandPool commandPool,
918
const VkAllocationCallbacks *pAllocator)
919
{
920
V3DV_FROM_HANDLE(v3dv_device, device, _device);
921
V3DV_FROM_HANDLE(v3dv_cmd_pool, pool, commandPool);
922
923
if (!pool)
924
return;
925
926
list_for_each_entry_safe(struct v3dv_cmd_buffer, cmd_buffer,
927
&pool->cmd_buffers, pool_link) {
928
cmd_buffer_destroy(cmd_buffer);
929
}
930
931
vk_object_free(&device->vk, pAllocator, pool);
932
}
933
934
VKAPI_ATTR void VKAPI_CALL
935
v3dv_TrimCommandPool(VkDevice device,
936
VkCommandPool commandPool,
937
VkCommandPoolTrimFlags flags)
938
{
939
/* We don't need to do anything here, our command pools never hold on to
940
* any resources from command buffers that are freed or reset.
941
*/
942
}
943
944
945
static void
946
cmd_buffer_subpass_handle_pending_resolves(struct v3dv_cmd_buffer *cmd_buffer)
947
{
948
assert(cmd_buffer->state.subpass_idx < cmd_buffer->state.pass->subpass_count);
949
const struct v3dv_render_pass *pass = cmd_buffer->state.pass;
950
const struct v3dv_subpass *subpass =
951
&pass->subpasses[cmd_buffer->state.subpass_idx];
952
953
if (!subpass->resolve_attachments)
954
return;
955
956
struct v3dv_framebuffer *fb = cmd_buffer->state.framebuffer;
957
958
/* At this point we have already ended the current subpass and now we are
959
* about to emit vkCmdResolveImage calls to get the resolves we can't handle
960
* handle in the subpass RCL.
961
*
962
* vkCmdResolveImage is not supposed to be called inside a render pass so
963
* before we call that we need to make sure our command buffer state reflects
964
* that we are no longer in a subpass by finishing the current job and
965
* resetting the framebuffer and render pass state temporarily and then
966
* restoring it after we are done with the resolves.
967
*/
968
if (cmd_buffer->state.job)
969
v3dv_cmd_buffer_finish_job(cmd_buffer);
970
struct v3dv_framebuffer *restore_fb = cmd_buffer->state.framebuffer;
971
struct v3dv_render_pass *restore_pass = cmd_buffer->state.pass;
972
uint32_t restore_subpass_idx = cmd_buffer->state.subpass_idx;
973
cmd_buffer->state.framebuffer = NULL;
974
cmd_buffer->state.pass = NULL;
975
cmd_buffer->state.subpass_idx = -1;
976
977
VkCommandBuffer cmd_buffer_handle = v3dv_cmd_buffer_to_handle(cmd_buffer);
978
for (uint32_t i = 0; i < subpass->color_count; i++) {
979
const uint32_t src_attachment_idx =
980
subpass->color_attachments[i].attachment;
981
if (src_attachment_idx == VK_ATTACHMENT_UNUSED)
982
continue;
983
984
if (pass->attachments[src_attachment_idx].use_tlb_resolve)
985
continue;
986
987
const uint32_t dst_attachment_idx =
988
subpass->resolve_attachments[i].attachment;
989
if (dst_attachment_idx == VK_ATTACHMENT_UNUSED)
990
continue;
991
992
struct v3dv_image_view *src_iview = fb->attachments[src_attachment_idx];
993
struct v3dv_image_view *dst_iview = fb->attachments[dst_attachment_idx];
994
995
VkImageResolve2KHR region = {
996
.sType = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR,
997
.srcSubresource = {
998
VK_IMAGE_ASPECT_COLOR_BIT,
999
src_iview->base_level,
1000
src_iview->first_layer,
1001
src_iview->last_layer - src_iview->first_layer + 1,
1002
},
1003
.srcOffset = { 0, 0, 0 },
1004
.dstSubresource = {
1005
VK_IMAGE_ASPECT_COLOR_BIT,
1006
dst_iview->base_level,
1007
dst_iview->first_layer,
1008
dst_iview->last_layer - dst_iview->first_layer + 1,
1009
},
1010
.dstOffset = { 0, 0, 0 },
1011
.extent = src_iview->image->extent,
1012
};
1013
1014
VkResolveImageInfo2KHR resolve_info = {
1015
.sType = VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR,
1016
.srcImage = v3dv_image_to_handle(src_iview->image),
1017
.srcImageLayout = VK_IMAGE_LAYOUT_GENERAL,
1018
.dstImage = v3dv_image_to_handle(dst_iview->image),
1019
.dstImageLayout = VK_IMAGE_LAYOUT_GENERAL,
1020
.regionCount = 1,
1021
.pRegions = &region,
1022
};
1023
v3dv_CmdResolveImage2KHR(cmd_buffer_handle, &resolve_info);
1024
}
1025
1026
cmd_buffer->state.framebuffer = restore_fb;
1027
cmd_buffer->state.pass = restore_pass;
1028
cmd_buffer->state.subpass_idx = restore_subpass_idx;
1029
}
1030
1031
static VkResult
1032
cmd_buffer_begin_render_pass_secondary(
1033
struct v3dv_cmd_buffer *cmd_buffer,
1034
const VkCommandBufferInheritanceInfo *inheritance_info)
1035
{
1036
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1037
assert(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
1038
assert(inheritance_info);
1039
1040
cmd_buffer->state.pass =
1041
v3dv_render_pass_from_handle(inheritance_info->renderPass);
1042
assert(cmd_buffer->state.pass);
1043
1044
cmd_buffer->state.framebuffer =
1045
v3dv_framebuffer_from_handle(inheritance_info->framebuffer);
1046
1047
assert(inheritance_info->subpass < cmd_buffer->state.pass->subpass_count);
1048
cmd_buffer->state.subpass_idx = inheritance_info->subpass;
1049
1050
cmd_buffer->state.inheritance.occlusion_query_enable =
1051
inheritance_info->occlusionQueryEnable;
1052
1053
/* Secondaries that execute inside a render pass won't start subpasses
1054
* so we want to create a job for them here.
1055
*/
1056
struct v3dv_job *job =
1057
v3dv_cmd_buffer_start_job(cmd_buffer, inheritance_info->subpass,
1058
V3DV_JOB_TYPE_GPU_CL_SECONDARY);
1059
if (!job) {
1060
v3dv_flag_oom(cmd_buffer, NULL);
1061
return VK_ERROR_OUT_OF_HOST_MEMORY;
1062
}
1063
1064
/* Secondary command buffers don't know about the render area, but our
1065
* scissor setup accounts for it, so let's make sure we make it large
1066
* enough that it doesn't actually constrain any rendering. This should
1067
* be fine, since the Vulkan spec states:
1068
*
1069
* "The application must ensure (using scissor if necessary) that all
1070
* rendering is contained within the render area."
1071
*
1072
* FIXME: setup constants for the max framebuffer dimensions and use them
1073
* here and when filling in VkPhysicalDeviceLimits.
1074
*/
1075
const struct v3dv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1076
cmd_buffer->state.render_area.offset.x = 0;
1077
cmd_buffer->state.render_area.offset.y = 0;
1078
cmd_buffer->state.render_area.extent.width =
1079
framebuffer ? framebuffer->width : 4096;
1080
cmd_buffer->state.render_area.extent.height =
1081
framebuffer ? framebuffer->height : 4096;
1082
1083
return VK_SUCCESS;
1084
}
1085
1086
VKAPI_ATTR VkResult VKAPI_CALL
1087
v3dv_BeginCommandBuffer(VkCommandBuffer commandBuffer,
1088
const VkCommandBufferBeginInfo *pBeginInfo)
1089
{
1090
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1091
1092
/* If this is the first vkBeginCommandBuffer, we must initialize the
1093
* command buffer's state. Otherwise, we must reset its state. In both
1094
* cases we reset it.
1095
*/
1096
VkResult result = cmd_buffer_reset(cmd_buffer, 0);
1097
if (result != VK_SUCCESS)
1098
return result;
1099
1100
assert(cmd_buffer->status == V3DV_CMD_BUFFER_STATUS_INITIALIZED);
1101
1102
cmd_buffer->usage_flags = pBeginInfo->flags;
1103
1104
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1105
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1106
result =
1107
cmd_buffer_begin_render_pass_secondary(cmd_buffer,
1108
pBeginInfo->pInheritanceInfo);
1109
if (result != VK_SUCCESS)
1110
return result;
1111
}
1112
}
1113
1114
cmd_buffer->status = V3DV_CMD_BUFFER_STATUS_RECORDING;
1115
1116
return VK_SUCCESS;
1117
}
1118
1119
VKAPI_ATTR VkResult VKAPI_CALL
1120
v3dv_ResetCommandBuffer(VkCommandBuffer commandBuffer,
1121
VkCommandBufferResetFlags flags)
1122
{
1123
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1124
return cmd_buffer_reset(cmd_buffer, flags);
1125
}
1126
1127
VKAPI_ATTR VkResult VKAPI_CALL
1128
v3dv_ResetCommandPool(VkDevice device,
1129
VkCommandPool commandPool,
1130
VkCommandPoolResetFlags flags)
1131
{
1132
V3DV_FROM_HANDLE(v3dv_cmd_pool, pool, commandPool);
1133
1134
VkCommandBufferResetFlags reset_flags = 0;
1135
if (flags & VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT)
1136
reset_flags = VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT;
1137
list_for_each_entry_safe(struct v3dv_cmd_buffer, cmd_buffer,
1138
&pool->cmd_buffers, pool_link) {
1139
cmd_buffer_reset(cmd_buffer, reset_flags);
1140
}
1141
1142
return VK_SUCCESS;
1143
}
1144
1145
static void
1146
cmd_buffer_update_tile_alignment(struct v3dv_cmd_buffer *cmd_buffer)
1147
{
1148
/* Render areas and scissor/viewport are only relevant inside render passes,
1149
* otherwise we are dealing with transfer operations where these elements
1150
* don't apply.
1151
*/
1152
assert(cmd_buffer->state.pass);
1153
const VkRect2D *rect = &cmd_buffer->state.render_area;
1154
1155
/* We should only call this at the beginning of a subpass so we should
1156
* always have framebuffer information available.
1157
*/
1158
assert(cmd_buffer->state.framebuffer);
1159
cmd_buffer->state.tile_aligned_render_area =
1160
v3dv_subpass_area_is_tile_aligned(cmd_buffer->device, rect,
1161
cmd_buffer->state.framebuffer,
1162
cmd_buffer->state.pass,
1163
cmd_buffer->state.subpass_idx);
1164
1165
if (!cmd_buffer->state.tile_aligned_render_area) {
1166
perf_debug("Render area for subpass %d of render pass %p doesn't "
1167
"match render pass granularity.\n",
1168
cmd_buffer->state.subpass_idx, cmd_buffer->state.pass);
1169
}
1170
}
1171
1172
static void
1173
cmd_buffer_state_set_attachment_clear_color(struct v3dv_cmd_buffer *cmd_buffer,
1174
uint32_t attachment_idx,
1175
const VkClearColorValue *color)
1176
{
1177
assert(attachment_idx < cmd_buffer->state.pass->attachment_count);
1178
1179
const struct v3dv_render_pass_attachment *attachment =
1180
&cmd_buffer->state.pass->attachments[attachment_idx];
1181
1182
uint32_t internal_type, internal_bpp;
1183
const struct v3dv_format *format =
1184
v3dv_X(cmd_buffer->device, get_format)(attachment->desc.format);
1185
1186
v3dv_X(cmd_buffer->device, get_internal_type_bpp_for_output_format)
1187
(format->rt_type, &internal_type, &internal_bpp);
1188
1189
uint32_t internal_size = 4 << internal_bpp;
1190
1191
struct v3dv_cmd_buffer_attachment_state *attachment_state =
1192
&cmd_buffer->state.attachments[attachment_idx];
1193
1194
v3dv_X(cmd_buffer->device, get_hw_clear_color)
1195
(color, internal_type, internal_size, &attachment_state->clear_value.color[0]);
1196
1197
attachment_state->vk_clear_value.color = *color;
1198
}
1199
1200
static void
1201
cmd_buffer_state_set_attachment_clear_depth_stencil(
1202
struct v3dv_cmd_buffer *cmd_buffer,
1203
uint32_t attachment_idx,
1204
bool clear_depth, bool clear_stencil,
1205
const VkClearDepthStencilValue *ds)
1206
{
1207
struct v3dv_cmd_buffer_attachment_state *attachment_state =
1208
&cmd_buffer->state.attachments[attachment_idx];
1209
1210
if (clear_depth)
1211
attachment_state->clear_value.z = ds->depth;
1212
1213
if (clear_stencil)
1214
attachment_state->clear_value.s = ds->stencil;
1215
1216
attachment_state->vk_clear_value.depthStencil = *ds;
1217
}
1218
1219
static void
1220
cmd_buffer_state_set_clear_values(struct v3dv_cmd_buffer *cmd_buffer,
1221
uint32_t count, const VkClearValue *values)
1222
{
1223
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1224
const struct v3dv_render_pass *pass = state->pass;
1225
1226
/* There could be less clear values than attachments in the render pass, in
1227
* which case we only want to process as many as we have, or there could be
1228
* more, in which case we want to ignore those for which we don't have a
1229
* corresponding attachment.
1230
*/
1231
count = MIN2(count, pass->attachment_count);
1232
for (uint32_t i = 0; i < count; i++) {
1233
const struct v3dv_render_pass_attachment *attachment =
1234
&pass->attachments[i];
1235
1236
if (attachment->desc.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
1237
continue;
1238
1239
VkImageAspectFlags aspects = vk_format_aspects(attachment->desc.format);
1240
if (aspects & VK_IMAGE_ASPECT_COLOR_BIT) {
1241
cmd_buffer_state_set_attachment_clear_color(cmd_buffer, i,
1242
&values[i].color);
1243
} else if (aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
1244
VK_IMAGE_ASPECT_STENCIL_BIT)) {
1245
cmd_buffer_state_set_attachment_clear_depth_stencil(
1246
cmd_buffer, i,
1247
aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
1248
aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
1249
&values[i].depthStencil);
1250
}
1251
}
1252
}
1253
1254
static void
1255
cmd_buffer_init_render_pass_attachment_state(struct v3dv_cmd_buffer *cmd_buffer,
1256
const VkRenderPassBeginInfo *pRenderPassBegin)
1257
{
1258
cmd_buffer_state_set_clear_values(cmd_buffer,
1259
pRenderPassBegin->clearValueCount,
1260
pRenderPassBegin->pClearValues);
1261
}
1262
1263
static void
1264
cmd_buffer_ensure_render_pass_attachment_state(struct v3dv_cmd_buffer *cmd_buffer)
1265
{
1266
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1267
const struct v3dv_render_pass *pass = state->pass;
1268
1269
if (state->attachment_alloc_count < pass->attachment_count) {
1270
if (state->attachments > 0) {
1271
assert(state->attachment_alloc_count > 0);
1272
vk_free(&cmd_buffer->device->vk.alloc, state->attachments);
1273
}
1274
1275
uint32_t size = sizeof(struct v3dv_cmd_buffer_attachment_state) *
1276
pass->attachment_count;
1277
state->attachments = vk_zalloc(&cmd_buffer->device->vk.alloc, size, 8,
1278
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1279
if (!state->attachments) {
1280
v3dv_flag_oom(cmd_buffer, NULL);
1281
return;
1282
}
1283
state->attachment_alloc_count = pass->attachment_count;
1284
}
1285
1286
assert(state->attachment_alloc_count >= pass->attachment_count);
1287
}
1288
1289
VKAPI_ATTR void VKAPI_CALL
1290
v3dv_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
1291
const VkRenderPassBeginInfo *pRenderPassBegin,
1292
VkSubpassContents contents)
1293
{
1294
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1295
V3DV_FROM_HANDLE(v3dv_render_pass, pass, pRenderPassBegin->renderPass);
1296
V3DV_FROM_HANDLE(v3dv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1297
1298
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1299
state->pass = pass;
1300
state->framebuffer = framebuffer;
1301
1302
cmd_buffer_ensure_render_pass_attachment_state(cmd_buffer);
1303
v3dv_return_if_oom(cmd_buffer, NULL);
1304
1305
cmd_buffer_init_render_pass_attachment_state(cmd_buffer, pRenderPassBegin);
1306
1307
state->render_area = pRenderPassBegin->renderArea;
1308
1309
/* If our render area is smaller than the current clip window we will have
1310
* to emit a new clip window to constraint it to the render area.
1311
*/
1312
uint32_t min_render_x = state->render_area.offset.x;
1313
uint32_t min_render_y = state->render_area.offset.y;
1314
uint32_t max_render_x = min_render_x + state->render_area.extent.width - 1;
1315
uint32_t max_render_y = min_render_y + state->render_area.extent.height - 1;
1316
uint32_t min_clip_x = state->clip_window.offset.x;
1317
uint32_t min_clip_y = state->clip_window.offset.y;
1318
uint32_t max_clip_x = min_clip_x + state->clip_window.extent.width - 1;
1319
uint32_t max_clip_y = min_clip_y + state->clip_window.extent.height - 1;
1320
if (min_render_x > min_clip_x || min_render_y > min_clip_y ||
1321
max_render_x < max_clip_x || max_render_y < max_clip_y) {
1322
state->dirty |= V3DV_CMD_DIRTY_SCISSOR;
1323
}
1324
1325
/* Setup for first subpass */
1326
v3dv_cmd_buffer_subpass_start(cmd_buffer, 0);
1327
}
1328
1329
VKAPI_ATTR void VKAPI_CALL
1330
v3dv_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
1331
{
1332
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1333
1334
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1335
assert(state->subpass_idx < state->pass->subpass_count - 1);
1336
1337
/* Finish the previous subpass */
1338
v3dv_cmd_buffer_subpass_finish(cmd_buffer);
1339
cmd_buffer_subpass_handle_pending_resolves(cmd_buffer);
1340
1341
/* Start the next subpass */
1342
v3dv_cmd_buffer_subpass_start(cmd_buffer, state->subpass_idx + 1);
1343
}
1344
1345
static void
1346
cmd_buffer_emit_subpass_clears(struct v3dv_cmd_buffer *cmd_buffer)
1347
{
1348
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1349
1350
assert(cmd_buffer->state.pass);
1351
assert(cmd_buffer->state.subpass_idx < cmd_buffer->state.pass->subpass_count);
1352
const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1353
const struct v3dv_render_pass *pass = state->pass;
1354
const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
1355
1356
/* We only need to emit subpass clears as draw calls when the render
1357
* area is not aligned to tile boundaries or for GFXH-1461.
1358
*/
1359
if (cmd_buffer->state.tile_aligned_render_area &&
1360
!subpass->do_depth_clear_with_draw &&
1361
!subpass->do_depth_clear_with_draw) {
1362
return;
1363
}
1364
1365
uint32_t att_count = 0;
1366
VkClearAttachment atts[V3D_MAX_DRAW_BUFFERS + 1]; /* 4 color + D/S */
1367
1368
/* We only need to emit subpass clears as draw calls for color attachments
1369
* if the render area is not aligned to tile boundaries.
1370
*/
1371
if (!cmd_buffer->state.tile_aligned_render_area) {
1372
for (uint32_t i = 0; i < subpass->color_count; i++) {
1373
const uint32_t att_idx = subpass->color_attachments[i].attachment;
1374
if (att_idx == VK_ATTACHMENT_UNUSED)
1375
continue;
1376
1377
struct v3dv_render_pass_attachment *att = &pass->attachments[att_idx];
1378
if (att->desc.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
1379
continue;
1380
1381
if (state->subpass_idx != att->first_subpass)
1382
continue;
1383
1384
atts[att_count].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1385
atts[att_count].colorAttachment = i;
1386
atts[att_count].clearValue = state->attachments[att_idx].vk_clear_value;
1387
att_count++;
1388
}
1389
}
1390
1391
/* For D/S we may also need to emit a subpass clear for GFXH-1461 */
1392
const uint32_t ds_att_idx = subpass->ds_attachment.attachment;
1393
if (ds_att_idx != VK_ATTACHMENT_UNUSED) {
1394
struct v3dv_render_pass_attachment *att = &pass->attachments[ds_att_idx];
1395
if (state->subpass_idx == att->first_subpass) {
1396
VkImageAspectFlags aspects = vk_format_aspects(att->desc.format);
1397
if (att->desc.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR ||
1398
(cmd_buffer->state.tile_aligned_render_area &&
1399
!subpass->do_depth_clear_with_draw)) {
1400
aspects &= ~VK_IMAGE_ASPECT_DEPTH_BIT;
1401
}
1402
if (att->desc.stencilLoadOp != VK_ATTACHMENT_LOAD_OP_CLEAR ||
1403
(cmd_buffer->state.tile_aligned_render_area &&
1404
!subpass->do_stencil_clear_with_draw)) {
1405
aspects &= ~VK_IMAGE_ASPECT_STENCIL_BIT;
1406
}
1407
if (aspects) {
1408
atts[att_count].aspectMask = aspects;
1409
atts[att_count].colorAttachment = 0; /* Ignored */
1410
atts[att_count].clearValue =
1411
state->attachments[ds_att_idx].vk_clear_value;
1412
att_count++;
1413
}
1414
}
1415
}
1416
1417
if (att_count == 0)
1418
return;
1419
1420
if (!cmd_buffer->state.tile_aligned_render_area) {
1421
perf_debug("Render area doesn't match render pass granularity, falling "
1422
"back to vkCmdClearAttachments for "
1423
"VK_ATTACHMENT_LOAD_OP_CLEAR.\n");
1424
} else if (subpass->do_depth_clear_with_draw ||
1425
subpass->do_stencil_clear_with_draw) {
1426
perf_debug("Subpass clears DEPTH but loads STENCIL (or viceversa), "
1427
"falling back to vkCmdClearAttachments for "
1428
"VK_ATTACHMENT_LOAD_OP_CLEAR.\n");
1429
}
1430
1431
/* From the Vulkan 1.0 spec:
1432
*
1433
* "VK_ATTACHMENT_LOAD_OP_CLEAR specifies that the contents within the
1434
* render area will be cleared to a uniform value, which is specified
1435
* when a render pass instance is begun."
1436
*
1437
* So the clear is only constrained by the render area and not by pipeline
1438
* state such as scissor or viewport, these are the semantics of
1439
* vkCmdClearAttachments as well.
1440
*/
1441
VkCommandBuffer _cmd_buffer = v3dv_cmd_buffer_to_handle(cmd_buffer);
1442
VkClearRect rect = {
1443
.rect = state->render_area,
1444
.baseArrayLayer = 0,
1445
.layerCount = 1,
1446
};
1447
v3dv_CmdClearAttachments(_cmd_buffer, att_count, atts, 1, &rect);
1448
}
1449
1450
static struct v3dv_job *
1451
cmd_buffer_subpass_create_job(struct v3dv_cmd_buffer *cmd_buffer,
1452
uint32_t subpass_idx,
1453
enum v3dv_job_type type)
1454
{
1455
assert(type == V3DV_JOB_TYPE_GPU_CL ||
1456
type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
1457
1458
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1459
assert(subpass_idx < state->pass->subpass_count);
1460
1461
/* Starting a new job can trigger a finish of the current one, so don't
1462
* change the command buffer state for the new job until we are done creating
1463
* the new job.
1464
*/
1465
struct v3dv_job *job =
1466
v3dv_cmd_buffer_start_job(cmd_buffer, subpass_idx, type);
1467
if (!job)
1468
return NULL;
1469
1470
state->subpass_idx = subpass_idx;
1471
1472
/* If we are starting a new job we need to setup binning. We only do this
1473
* for V3DV_JOB_TYPE_GPU_CL jobs because V3DV_JOB_TYPE_GPU_CL_SECONDARY
1474
* jobs are not submitted to the GPU directly, and are instead meant to be
1475
* branched to from other V3DV_JOB_TYPE_GPU_CL jobs.
1476
*/
1477
if (type == V3DV_JOB_TYPE_GPU_CL &&
1478
job->first_subpass == state->subpass_idx) {
1479
const struct v3dv_subpass *subpass =
1480
&state->pass->subpasses[state->subpass_idx];
1481
1482
const struct v3dv_framebuffer *framebuffer = state->framebuffer;
1483
1484
uint8_t internal_bpp;
1485
bool msaa;
1486
v3dv_X(job->device, framebuffer_compute_internal_bpp_msaa)
1487
(framebuffer, subpass, &internal_bpp, &msaa);
1488
1489
v3dv_job_start_frame(job,
1490
framebuffer->width,
1491
framebuffer->height,
1492
framebuffer->layers,
1493
subpass->color_count,
1494
internal_bpp,
1495
msaa);
1496
}
1497
1498
return job;
1499
}
1500
1501
struct v3dv_job *
1502
v3dv_cmd_buffer_subpass_start(struct v3dv_cmd_buffer *cmd_buffer,
1503
uint32_t subpass_idx)
1504
{
1505
assert(cmd_buffer->state.pass);
1506
assert(subpass_idx < cmd_buffer->state.pass->subpass_count);
1507
1508
struct v3dv_job *job =
1509
cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
1510
V3DV_JOB_TYPE_GPU_CL);
1511
if (!job)
1512
return NULL;
1513
1514
/* Check if our render area is aligned to tile boundaries. We have to do
1515
* this in each subpass because the subset of attachments used can change
1516
* and with that the tile size selected by the hardware can change too.
1517
*/
1518
cmd_buffer_update_tile_alignment(cmd_buffer);
1519
1520
/* If we can't use TLB clears then we need to emit draw clears for any
1521
* LOAD_OP_CLEAR attachments in this subpass now. We might also need to emit
1522
* Depth/Stencil clears if we hit GFXH-1461.
1523
*
1524
* Secondary command buffers don't start subpasses (and may not even have
1525
* framebuffer state), so we only care about this in primaries. The only
1526
* exception could be a secondary runnning inside a subpass that needs to
1527
* record a meta operation (with its own render pass) that relies on
1528
* attachment load clears, but we don't have any instances of that right
1529
* now.
1530
*/
1531
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
1532
cmd_buffer_emit_subpass_clears(cmd_buffer);
1533
1534
return job;
1535
}
1536
1537
struct v3dv_job *
1538
v3dv_cmd_buffer_subpass_resume(struct v3dv_cmd_buffer *cmd_buffer,
1539
uint32_t subpass_idx)
1540
{
1541
assert(cmd_buffer->state.pass);
1542
assert(subpass_idx < cmd_buffer->state.pass->subpass_count);
1543
1544
struct v3dv_job *job;
1545
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
1546
job = cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
1547
V3DV_JOB_TYPE_GPU_CL);
1548
} else {
1549
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1550
job = cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
1551
V3DV_JOB_TYPE_GPU_CL_SECONDARY);
1552
}
1553
1554
if (!job)
1555
return NULL;
1556
1557
job->is_subpass_continue = true;
1558
1559
return job;
1560
}
1561
1562
void
1563
v3dv_cmd_buffer_subpass_finish(struct v3dv_cmd_buffer *cmd_buffer)
1564
{
1565
/* We can end up here without a job if the last command recorded into the
1566
* subpass already finished the job (for example a pipeline barrier). In
1567
* that case we miss to set the is_subpass_finish flag, but that is not
1568
* required for proper behavior.
1569
*/
1570
struct v3dv_job *job = cmd_buffer->state.job;
1571
if (job)
1572
job->is_subpass_finish = true;
1573
}
1574
1575
VKAPI_ATTR void VKAPI_CALL
1576
v3dv_CmdEndRenderPass(VkCommandBuffer commandBuffer)
1577
{
1578
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1579
1580
/* Finalize last subpass */
1581
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1582
assert(state->subpass_idx == state->pass->subpass_count - 1);
1583
v3dv_cmd_buffer_subpass_finish(cmd_buffer);
1584
v3dv_cmd_buffer_finish_job(cmd_buffer);
1585
1586
cmd_buffer_subpass_handle_pending_resolves(cmd_buffer);
1587
1588
/* We are no longer inside a render pass */
1589
state->framebuffer = NULL;
1590
state->pass = NULL;
1591
state->subpass_idx = -1;
1592
}
1593
1594
VKAPI_ATTR VkResult VKAPI_CALL
1595
v3dv_EndCommandBuffer(VkCommandBuffer commandBuffer)
1596
{
1597
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1598
1599
if (cmd_buffer->state.oom)
1600
return VK_ERROR_OUT_OF_HOST_MEMORY;
1601
1602
/* Primaries should have ended any recording jobs by the time they hit
1603
* vkEndRenderPass (if we are inside a render pass). Commands outside
1604
* a render pass instance (for both primaries and secondaries) spawn
1605
* complete jobs too. So the only case where we can get here without
1606
* finishing a recording job is when we are recording a secondary
1607
* inside a render pass.
1608
*/
1609
if (cmd_buffer->state.job) {
1610
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
1611
cmd_buffer->state.pass);
1612
v3dv_cmd_buffer_finish_job(cmd_buffer);
1613
}
1614
1615
cmd_buffer->status = V3DV_CMD_BUFFER_STATUS_EXECUTABLE;
1616
1617
return VK_SUCCESS;
1618
}
1619
1620
static void
1621
clone_bo_list(struct v3dv_cmd_buffer *cmd_buffer,
1622
struct list_head *dst,
1623
struct list_head *src)
1624
{
1625
assert(cmd_buffer);
1626
1627
list_inithead(dst);
1628
list_for_each_entry(struct v3dv_bo, bo, src, list_link) {
1629
struct v3dv_bo *clone_bo =
1630
vk_alloc(&cmd_buffer->device->vk.alloc, sizeof(struct v3dv_bo), 8,
1631
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1632
if (!clone_bo) {
1633
v3dv_flag_oom(cmd_buffer, NULL);
1634
return;
1635
}
1636
1637
*clone_bo = *bo;
1638
list_addtail(&clone_bo->list_link, dst);
1639
}
1640
}
1641
1642
/* Clones a job for inclusion in the given command buffer. Note that this
1643
* doesn't make a deep copy so the cloned job it doesn't own any resources.
1644
* Useful when we need to have a job in more than one list, which happens
1645
* for jobs recorded in secondary command buffers when we want to execute
1646
* them in primaries.
1647
*/
1648
struct v3dv_job *
1649
v3dv_job_clone_in_cmd_buffer(struct v3dv_job *job,
1650
struct v3dv_cmd_buffer *cmd_buffer)
1651
{
1652
struct v3dv_job *clone_job = vk_alloc(&job->device->vk.alloc,
1653
sizeof(struct v3dv_job), 8,
1654
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1655
if (!clone_job) {
1656
v3dv_flag_oom(cmd_buffer, NULL);
1657
return NULL;
1658
}
1659
1660
/* Cloned jobs don't duplicate resources! */
1661
*clone_job = *job;
1662
clone_job->is_clone = true;
1663
clone_job->cmd_buffer = cmd_buffer;
1664
list_addtail(&clone_job->list_link, &cmd_buffer->jobs);
1665
1666
/* We need to regen the BO lists so that they point to the BO list in the
1667
* cloned job. Otherwise functions like list_length() will loop forever.
1668
*/
1669
if (job->type == V3DV_JOB_TYPE_GPU_CL) {
1670
clone_bo_list(cmd_buffer, &clone_job->bcl.bo_list, &job->bcl.bo_list);
1671
clone_bo_list(cmd_buffer, &clone_job->rcl.bo_list, &job->rcl.bo_list);
1672
clone_bo_list(cmd_buffer, &clone_job->indirect.bo_list,
1673
&job->indirect.bo_list);
1674
}
1675
1676
return clone_job;
1677
}
1678
1679
static void
1680
cmd_buffer_execute_outside_pass(struct v3dv_cmd_buffer *primary,
1681
uint32_t cmd_buffer_count,
1682
const VkCommandBuffer *cmd_buffers)
1683
{
1684
bool pending_barrier = false;
1685
bool pending_bcl_barrier = false;
1686
for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1687
V3DV_FROM_HANDLE(v3dv_cmd_buffer, secondary, cmd_buffers[i]);
1688
1689
assert(!(secondary->usage_flags &
1690
VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
1691
1692
/* Secondary command buffers that execute outside a render pass create
1693
* complete jobs with an RCL and tile setup, so we simply want to merge
1694
* their job list into the primary's. However, because they may be
1695
* executed into multiple primaries at the same time and we only have a
1696
* single list_link in each job, we can't just add then to the primary's
1697
* job list and we instead have to clone them first.
1698
*
1699
* Alternatively, we could create a "execute secondary" CPU job that
1700
* when executed in a queue, would submit all the jobs in the referenced
1701
* secondary command buffer. However, this would raise some challenges
1702
* to make it work with the implementation of wait threads in the queue
1703
* which we use for event waits, for example.
1704
*/
1705
list_for_each_entry(struct v3dv_job, secondary_job,
1706
&secondary->jobs, list_link) {
1707
/* These can only happen inside a render pass */
1708
assert(secondary_job->type != V3DV_JOB_TYPE_GPU_CL_SECONDARY);
1709
struct v3dv_job *job = v3dv_job_clone_in_cmd_buffer(secondary_job, primary);
1710
if (!job)
1711
return;
1712
1713
if (pending_barrier) {
1714
job->serialize = true;
1715
if (pending_bcl_barrier)
1716
job->needs_bcl_sync = true;
1717
pending_barrier = false;
1718
pending_bcl_barrier = false;
1719
}
1720
}
1721
1722
/* If this secondary had any pending barrier state we will need that
1723
* barrier state consumed with whatever comes after it (first job in
1724
* the next secondary or the primary, if this was the last secondary).
1725
*/
1726
assert(secondary->state.has_barrier || !secondary->state.has_bcl_barrier);
1727
pending_barrier = secondary->state.has_barrier;
1728
pending_bcl_barrier = secondary->state.has_bcl_barrier;
1729
}
1730
1731
if (pending_barrier) {
1732
primary->state.has_barrier = true;
1733
primary->state.has_bcl_barrier |= pending_bcl_barrier;
1734
}
1735
}
1736
1737
VKAPI_ATTR void VKAPI_CALL
1738
v3dv_CmdExecuteCommands(VkCommandBuffer commandBuffer,
1739
uint32_t commandBufferCount,
1740
const VkCommandBuffer *pCommandBuffers)
1741
{
1742
V3DV_FROM_HANDLE(v3dv_cmd_buffer, primary, commandBuffer);
1743
1744
if (primary->state.pass != NULL) {
1745
v3dv_X(primary->device, cmd_buffer_execute_inside_pass)
1746
(primary, commandBufferCount, pCommandBuffers);
1747
} else {
1748
cmd_buffer_execute_outside_pass(primary,
1749
commandBufferCount, pCommandBuffers);
1750
}
1751
}
1752
1753
/* This goes though the list of possible dynamic states in the pipeline and,
1754
* for those that are not configured as dynamic, copies relevant state into
1755
* the command buffer.
1756
*/
1757
static void
1758
cmd_buffer_bind_pipeline_static_state(struct v3dv_cmd_buffer *cmd_buffer,
1759
const struct v3dv_dynamic_state *src)
1760
{
1761
struct v3dv_dynamic_state *dest = &cmd_buffer->state.dynamic;
1762
uint32_t dynamic_mask = src->mask;
1763
uint32_t dirty = 0;
1764
1765
if (!(dynamic_mask & V3DV_DYNAMIC_VIEWPORT)) {
1766
dest->viewport.count = src->viewport.count;
1767
if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
1768
src->viewport.count * sizeof(VkViewport))) {
1769
typed_memcpy(dest->viewport.viewports,
1770
src->viewport.viewports,
1771
src->viewport.count);
1772
typed_memcpy(dest->viewport.scale, src->viewport.scale,
1773
src->viewport.count);
1774
typed_memcpy(dest->viewport.translate, src->viewport.translate,
1775
src->viewport.count);
1776
dirty |= V3DV_CMD_DIRTY_VIEWPORT;
1777
}
1778
}
1779
1780
if (!(dynamic_mask & V3DV_DYNAMIC_SCISSOR)) {
1781
dest->scissor.count = src->scissor.count;
1782
if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
1783
src->scissor.count * sizeof(VkRect2D))) {
1784
typed_memcpy(dest->scissor.scissors,
1785
src->scissor.scissors, src->scissor.count);
1786
dirty |= V3DV_CMD_DIRTY_SCISSOR;
1787
}
1788
}
1789
1790
if (!(dynamic_mask & V3DV_DYNAMIC_STENCIL_COMPARE_MASK)) {
1791
if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
1792
sizeof(src->stencil_compare_mask))) {
1793
dest->stencil_compare_mask = src->stencil_compare_mask;
1794
dirty |= V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK;
1795
}
1796
}
1797
1798
if (!(dynamic_mask & V3DV_DYNAMIC_STENCIL_WRITE_MASK)) {
1799
if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
1800
sizeof(src->stencil_write_mask))) {
1801
dest->stencil_write_mask = src->stencil_write_mask;
1802
dirty |= V3DV_CMD_DIRTY_STENCIL_WRITE_MASK;
1803
}
1804
}
1805
1806
if (!(dynamic_mask & V3DV_DYNAMIC_STENCIL_REFERENCE)) {
1807
if (memcmp(&dest->stencil_reference, &src->stencil_reference,
1808
sizeof(src->stencil_reference))) {
1809
dest->stencil_reference = src->stencil_reference;
1810
dirty |= V3DV_CMD_DIRTY_STENCIL_REFERENCE;
1811
}
1812
}
1813
1814
if (!(dynamic_mask & V3DV_DYNAMIC_BLEND_CONSTANTS)) {
1815
if (memcmp(dest->blend_constants, src->blend_constants,
1816
sizeof(src->blend_constants))) {
1817
memcpy(dest->blend_constants, src->blend_constants,
1818
sizeof(src->blend_constants));
1819
dirty |= V3DV_CMD_DIRTY_BLEND_CONSTANTS;
1820
}
1821
}
1822
1823
if (!(dynamic_mask & V3DV_DYNAMIC_DEPTH_BIAS)) {
1824
if (memcmp(&dest->depth_bias, &src->depth_bias,
1825
sizeof(src->depth_bias))) {
1826
memcpy(&dest->depth_bias, &src->depth_bias, sizeof(src->depth_bias));
1827
dirty |= V3DV_CMD_DIRTY_DEPTH_BIAS;
1828
}
1829
}
1830
1831
if (!(dynamic_mask & V3DV_DYNAMIC_LINE_WIDTH)) {
1832
if (dest->line_width != src->line_width) {
1833
dest->line_width = src->line_width;
1834
dirty |= V3DV_CMD_DIRTY_LINE_WIDTH;
1835
}
1836
}
1837
1838
cmd_buffer->state.dynamic.mask = dynamic_mask;
1839
cmd_buffer->state.dirty |= dirty;
1840
}
1841
1842
static void
1843
bind_graphics_pipeline(struct v3dv_cmd_buffer *cmd_buffer,
1844
struct v3dv_pipeline *pipeline)
1845
{
1846
assert(pipeline && !(pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT));
1847
if (cmd_buffer->state.gfx.pipeline == pipeline)
1848
return;
1849
1850
cmd_buffer->state.gfx.pipeline = pipeline;
1851
1852
cmd_buffer_bind_pipeline_static_state(cmd_buffer, &pipeline->dynamic_state);
1853
1854
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_PIPELINE;
1855
}
1856
1857
static void
1858
bind_compute_pipeline(struct v3dv_cmd_buffer *cmd_buffer,
1859
struct v3dv_pipeline *pipeline)
1860
{
1861
assert(pipeline && pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
1862
1863
if (cmd_buffer->state.compute.pipeline == pipeline)
1864
return;
1865
1866
cmd_buffer->state.compute.pipeline = pipeline;
1867
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_COMPUTE_PIPELINE;
1868
}
1869
1870
VKAPI_ATTR void VKAPI_CALL
1871
v3dv_CmdBindPipeline(VkCommandBuffer commandBuffer,
1872
VkPipelineBindPoint pipelineBindPoint,
1873
VkPipeline _pipeline)
1874
{
1875
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1876
V3DV_FROM_HANDLE(v3dv_pipeline, pipeline, _pipeline);
1877
1878
switch (pipelineBindPoint) {
1879
case VK_PIPELINE_BIND_POINT_COMPUTE:
1880
bind_compute_pipeline(cmd_buffer, pipeline);
1881
break;
1882
1883
case VK_PIPELINE_BIND_POINT_GRAPHICS:
1884
bind_graphics_pipeline(cmd_buffer, pipeline);
1885
break;
1886
1887
default:
1888
assert(!"invalid bind point");
1889
break;
1890
}
1891
}
1892
1893
/* FIXME: C&P from radv. tu has similar code. Perhaps common place? */
1894
void
1895
v3dv_viewport_compute_xform(const VkViewport *viewport,
1896
float scale[3],
1897
float translate[3])
1898
{
1899
float x = viewport->x;
1900
float y = viewport->y;
1901
float half_width = 0.5f * viewport->width;
1902
float half_height = 0.5f * viewport->height;
1903
double n = viewport->minDepth;
1904
double f = viewport->maxDepth;
1905
1906
scale[0] = half_width;
1907
translate[0] = half_width + x;
1908
scale[1] = half_height;
1909
translate[1] = half_height + y;
1910
1911
scale[2] = (f - n);
1912
translate[2] = n;
1913
1914
/* It seems that if the scale is small enough the hardware won't clip
1915
* correctly so we work around this my choosing the smallest scale that
1916
* seems to work.
1917
*
1918
* This case is exercised by CTS:
1919
* dEQP-VK.draw.inverted_depth_ranges.nodepthclamp_deltazero
1920
*/
1921
const float min_abs_scale = 0.000009f;
1922
if (fabs(scale[2]) < min_abs_scale)
1923
scale[2] = min_abs_scale * (scale[2] < 0 ? -1.0f : 1.0f);
1924
}
1925
1926
VKAPI_ATTR void VKAPI_CALL
1927
v3dv_CmdSetViewport(VkCommandBuffer commandBuffer,
1928
uint32_t firstViewport,
1929
uint32_t viewportCount,
1930
const VkViewport *pViewports)
1931
{
1932
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1933
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1934
const uint32_t total_count = firstViewport + viewportCount;
1935
1936
assert(firstViewport < MAX_VIEWPORTS);
1937
assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
1938
1939
if (state->dynamic.viewport.count < total_count)
1940
state->dynamic.viewport.count = total_count;
1941
1942
if (!memcmp(state->dynamic.viewport.viewports + firstViewport,
1943
pViewports, viewportCount * sizeof(*pViewports))) {
1944
return;
1945
}
1946
1947
memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
1948
viewportCount * sizeof(*pViewports));
1949
1950
for (uint32_t i = firstViewport; i < total_count; i++) {
1951
v3dv_viewport_compute_xform(&state->dynamic.viewport.viewports[i],
1952
state->dynamic.viewport.scale[i],
1953
state->dynamic.viewport.translate[i]);
1954
}
1955
1956
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_VIEWPORT;
1957
}
1958
1959
VKAPI_ATTR void VKAPI_CALL
1960
v3dv_CmdSetScissor(VkCommandBuffer commandBuffer,
1961
uint32_t firstScissor,
1962
uint32_t scissorCount,
1963
const VkRect2D *pScissors)
1964
{
1965
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1966
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1967
1968
assert(firstScissor < MAX_SCISSORS);
1969
assert(firstScissor + scissorCount >= 1 &&
1970
firstScissor + scissorCount <= MAX_SCISSORS);
1971
1972
if (state->dynamic.scissor.count < firstScissor + scissorCount)
1973
state->dynamic.scissor.count = firstScissor + scissorCount;
1974
1975
if (!memcmp(state->dynamic.scissor.scissors + firstScissor,
1976
pScissors, scissorCount * sizeof(*pScissors))) {
1977
return;
1978
}
1979
1980
memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
1981
scissorCount * sizeof(*pScissors));
1982
1983
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_SCISSOR;
1984
}
1985
1986
static void
1987
emit_scissor(struct v3dv_cmd_buffer *cmd_buffer)
1988
{
1989
if (cmd_buffer->state.dynamic.viewport.count == 0)
1990
return;
1991
1992
struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1993
1994
/* FIXME: right now we only support one viewport. viewporst[0] would work
1995
* now, but would need to change if we allow multiple viewports.
1996
*/
1997
float *vptranslate = dynamic->viewport.translate[0];
1998
float *vpscale = dynamic->viewport.scale[0];
1999
2000
float vp_minx = -fabsf(vpscale[0]) + vptranslate[0];
2001
float vp_maxx = fabsf(vpscale[0]) + vptranslate[0];
2002
float vp_miny = -fabsf(vpscale[1]) + vptranslate[1];
2003
float vp_maxy = fabsf(vpscale[1]) + vptranslate[1];
2004
2005
/* Quoting from v3dx_emit:
2006
* "Clip to the scissor if it's enabled, but still clip to the
2007
* drawable regardless since that controls where the binner
2008
* tries to put things.
2009
*
2010
* Additionally, always clip the rendering to the viewport,
2011
* since the hardware does guardband clipping, meaning
2012
* primitives would rasterize outside of the view volume."
2013
*/
2014
uint32_t minx, miny, maxx, maxy;
2015
2016
/* From the Vulkan spec:
2017
*
2018
* "The application must ensure (using scissor if necessary) that all
2019
* rendering is contained within the render area. The render area must be
2020
* contained within the framebuffer dimensions."
2021
*
2022
* So it is the application's responsibility to ensure this. Still, we can
2023
* help by automatically restricting the scissor rect to the render area.
2024
*/
2025
minx = MAX2(vp_minx, cmd_buffer->state.render_area.offset.x);
2026
miny = MAX2(vp_miny, cmd_buffer->state.render_area.offset.y);
2027
maxx = MIN2(vp_maxx, cmd_buffer->state.render_area.offset.x +
2028
cmd_buffer->state.render_area.extent.width);
2029
maxy = MIN2(vp_maxy, cmd_buffer->state.render_area.offset.y +
2030
cmd_buffer->state.render_area.extent.height);
2031
2032
minx = vp_minx;
2033
miny = vp_miny;
2034
maxx = vp_maxx;
2035
maxy = vp_maxy;
2036
2037
/* Clip against user provided scissor if needed.
2038
*
2039
* FIXME: right now we only allow one scissor. Below would need to be
2040
* updated if we support more
2041
*/
2042
if (dynamic->scissor.count > 0) {
2043
VkRect2D *scissor = &dynamic->scissor.scissors[0];
2044
minx = MAX2(minx, scissor->offset.x);
2045
miny = MAX2(miny, scissor->offset.y);
2046
maxx = MIN2(maxx, scissor->offset.x + scissor->extent.width);
2047
maxy = MIN2(maxy, scissor->offset.y + scissor->extent.height);
2048
}
2049
2050
/* If the scissor is outside the viewport area we end up with
2051
* min{x,y} > max{x,y}.
2052
*/
2053
if (minx > maxx)
2054
maxx = minx;
2055
if (miny > maxy)
2056
maxy = miny;
2057
2058
cmd_buffer->state.clip_window.offset.x = minx;
2059
cmd_buffer->state.clip_window.offset.y = miny;
2060
cmd_buffer->state.clip_window.extent.width = maxx - minx;
2061
cmd_buffer->state.clip_window.extent.height = maxy - miny;
2062
2063
v3dv_X(cmd_buffer->device, job_emit_clip_window)
2064
(cmd_buffer->state.job, &cmd_buffer->state.clip_window);
2065
2066
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_SCISSOR;
2067
}
2068
2069
static void
2070
update_gfx_uniform_state(struct v3dv_cmd_buffer *cmd_buffer,
2071
uint32_t dirty_uniform_state)
2072
{
2073
/* We need to update uniform streams if any piece of state that is passed
2074
* to the shader as a uniform may have changed.
2075
*
2076
* If only descriptor sets are dirty then we can safely ignore updates
2077
* for shader stages that don't access descriptors.
2078
*/
2079
2080
struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2081
assert(pipeline);
2082
2083
const bool has_new_pipeline = dirty_uniform_state & V3DV_CMD_DIRTY_PIPELINE;
2084
const bool has_new_viewport = dirty_uniform_state & V3DV_CMD_DIRTY_VIEWPORT;
2085
const bool has_new_push_constants = dirty_uniform_state & V3DV_CMD_DIRTY_PUSH_CONSTANTS;
2086
const bool has_new_descriptors = dirty_uniform_state & V3DV_CMD_DIRTY_DESCRIPTOR_SETS;
2087
2088
/* VK_SHADER_STAGE_FRAGMENT_BIT */
2089
const bool has_new_descriptors_fs =
2090
has_new_descriptors &&
2091
(cmd_buffer->state.dirty_descriptor_stages & VK_SHADER_STAGE_FRAGMENT_BIT);
2092
2093
const bool has_new_push_constants_fs =
2094
has_new_push_constants &&
2095
(cmd_buffer->state.dirty_push_constants_stages & VK_SHADER_STAGE_FRAGMENT_BIT);
2096
2097
const bool needs_fs_update = has_new_pipeline ||
2098
has_new_push_constants_fs ||
2099
has_new_descriptors_fs;
2100
2101
if (needs_fs_update) {
2102
struct v3dv_shader_variant *fs_variant =
2103
pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
2104
2105
cmd_buffer->state.uniforms.fs =
2106
v3dv_write_uniforms(cmd_buffer, pipeline, fs_variant);
2107
}
2108
2109
/* VK_SHADER_STAGE_GEOMETRY_BIT */
2110
if (pipeline->has_gs) {
2111
const bool has_new_descriptors_gs =
2112
has_new_descriptors &&
2113
(cmd_buffer->state.dirty_descriptor_stages &
2114
VK_SHADER_STAGE_GEOMETRY_BIT);
2115
2116
const bool has_new_push_constants_gs =
2117
has_new_push_constants &&
2118
(cmd_buffer->state.dirty_push_constants_stages &
2119
VK_SHADER_STAGE_GEOMETRY_BIT);
2120
2121
const bool needs_gs_update = has_new_viewport ||
2122
has_new_pipeline ||
2123
has_new_push_constants_gs ||
2124
has_new_descriptors_gs;
2125
2126
if (needs_gs_update) {
2127
struct v3dv_shader_variant *gs_variant =
2128
pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY];
2129
2130
struct v3dv_shader_variant *gs_bin_variant =
2131
pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN];
2132
2133
cmd_buffer->state.uniforms.gs =
2134
v3dv_write_uniforms(cmd_buffer, pipeline, gs_variant);
2135
2136
cmd_buffer->state.uniforms.gs_bin =
2137
v3dv_write_uniforms(cmd_buffer, pipeline, gs_bin_variant);
2138
}
2139
}
2140
2141
/* VK_SHADER_STAGE_VERTEX_BIT */
2142
const bool has_new_descriptors_vs =
2143
has_new_descriptors &&
2144
(cmd_buffer->state.dirty_descriptor_stages & VK_SHADER_STAGE_VERTEX_BIT);
2145
2146
const bool has_new_push_constants_vs =
2147
has_new_push_constants &&
2148
(cmd_buffer->state.dirty_push_constants_stages & VK_SHADER_STAGE_VERTEX_BIT);
2149
2150
const bool needs_vs_update = has_new_viewport ||
2151
has_new_pipeline ||
2152
has_new_push_constants_vs ||
2153
has_new_descriptors_vs;
2154
2155
if (needs_vs_update) {
2156
struct v3dv_shader_variant *vs_variant =
2157
pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
2158
2159
struct v3dv_shader_variant *vs_bin_variant =
2160
pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN];
2161
2162
cmd_buffer->state.uniforms.vs =
2163
v3dv_write_uniforms(cmd_buffer, pipeline, vs_variant);
2164
2165
cmd_buffer->state.uniforms.vs_bin =
2166
v3dv_write_uniforms(cmd_buffer, pipeline, vs_bin_variant);
2167
}
2168
}
2169
2170
/* This stores command buffer state that we might be about to stomp for
2171
* a meta operation.
2172
*/
2173
void
2174
v3dv_cmd_buffer_meta_state_push(struct v3dv_cmd_buffer *cmd_buffer,
2175
bool push_descriptor_state)
2176
{
2177
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2178
2179
if (state->subpass_idx != -1) {
2180
state->meta.subpass_idx = state->subpass_idx;
2181
state->meta.framebuffer = v3dv_framebuffer_to_handle(state->framebuffer);
2182
state->meta.pass = v3dv_render_pass_to_handle(state->pass);
2183
2184
const uint32_t attachment_state_item_size =
2185
sizeof(struct v3dv_cmd_buffer_attachment_state);
2186
const uint32_t attachment_state_total_size =
2187
attachment_state_item_size * state->attachment_alloc_count;
2188
if (state->meta.attachment_alloc_count < state->attachment_alloc_count) {
2189
if (state->meta.attachment_alloc_count > 0)
2190
vk_free(&cmd_buffer->device->vk.alloc, state->meta.attachments);
2191
2192
state->meta.attachments = vk_zalloc(&cmd_buffer->device->vk.alloc,
2193
attachment_state_total_size, 8,
2194
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2195
if (!state->meta.attachments) {
2196
v3dv_flag_oom(cmd_buffer, NULL);
2197
return;
2198
}
2199
state->meta.attachment_alloc_count = state->attachment_alloc_count;
2200
}
2201
state->meta.attachment_count = state->attachment_alloc_count;
2202
memcpy(state->meta.attachments, state->attachments,
2203
attachment_state_total_size);
2204
2205
state->meta.tile_aligned_render_area = state->tile_aligned_render_area;
2206
memcpy(&state->meta.render_area, &state->render_area, sizeof(VkRect2D));
2207
}
2208
2209
/* We expect that meta operations are graphics-only, so we only take into
2210
* account the graphics pipeline, and the graphics state
2211
*/
2212
state->meta.gfx.pipeline = state->gfx.pipeline;
2213
memcpy(&state->meta.dynamic, &state->dynamic, sizeof(state->dynamic));
2214
2215
struct v3dv_descriptor_state *gfx_descriptor_state =
2216
&cmd_buffer->state.gfx.descriptor_state;
2217
2218
if (push_descriptor_state) {
2219
if (gfx_descriptor_state->valid != 0) {
2220
memcpy(&state->meta.gfx.descriptor_state, gfx_descriptor_state,
2221
sizeof(state->gfx.descriptor_state));
2222
}
2223
state->meta.has_descriptor_state = true;
2224
} else {
2225
state->meta.has_descriptor_state = false;
2226
}
2227
2228
/* FIXME: if we keep track of wether we have bound any push constant state
2229
* at all we could restruct this only to cases where it is actually
2230
* necessary.
2231
*/
2232
memcpy(state->meta.push_constants, cmd_buffer->push_constants_data,
2233
sizeof(state->meta.push_constants));
2234
}
2235
2236
/* This restores command buffer state after a meta operation
2237
*/
2238
void
2239
v3dv_cmd_buffer_meta_state_pop(struct v3dv_cmd_buffer *cmd_buffer,
2240
uint32_t dirty_dynamic_state,
2241
bool needs_subpass_resume)
2242
{
2243
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2244
2245
if (state->meta.subpass_idx != -1) {
2246
state->pass = v3dv_render_pass_from_handle(state->meta.pass);
2247
state->framebuffer = v3dv_framebuffer_from_handle(state->meta.framebuffer);
2248
2249
assert(state->meta.attachment_count <= state->attachment_alloc_count);
2250
const uint32_t attachment_state_item_size =
2251
sizeof(struct v3dv_cmd_buffer_attachment_state);
2252
const uint32_t attachment_state_total_size =
2253
attachment_state_item_size * state->meta.attachment_count;
2254
memcpy(state->attachments, state->meta.attachments,
2255
attachment_state_total_size);
2256
2257
state->tile_aligned_render_area = state->meta.tile_aligned_render_area;
2258
memcpy(&state->render_area, &state->meta.render_area, sizeof(VkRect2D));
2259
2260
/* Is needs_subpass_resume is true it means that the emitted the meta
2261
* operation in its own job (possibly with an RT config that is
2262
* incompatible with the current subpass), so resuming subpass execution
2263
* after it requires that we create a new job with the subpass RT setup.
2264
*/
2265
if (needs_subpass_resume)
2266
v3dv_cmd_buffer_subpass_resume(cmd_buffer, state->meta.subpass_idx);
2267
} else {
2268
state->subpass_idx = -1;
2269
}
2270
2271
if (state->meta.gfx.pipeline != NULL) {
2272
struct v3dv_pipeline *pipeline = state->meta.gfx.pipeline;
2273
VkPipelineBindPoint pipeline_binding =
2274
v3dv_pipeline_get_binding_point(pipeline);
2275
v3dv_CmdBindPipeline(v3dv_cmd_buffer_to_handle(cmd_buffer),
2276
pipeline_binding,
2277
v3dv_pipeline_to_handle(state->meta.gfx.pipeline));
2278
} else {
2279
state->gfx.pipeline = NULL;
2280
}
2281
2282
if (dirty_dynamic_state) {
2283
memcpy(&state->dynamic, &state->meta.dynamic, sizeof(state->dynamic));
2284
state->dirty |= dirty_dynamic_state;
2285
}
2286
2287
if (state->meta.has_descriptor_state) {
2288
if (state->meta.gfx.descriptor_state.valid != 0) {
2289
memcpy(&state->gfx.descriptor_state, &state->meta.gfx.descriptor_state,
2290
sizeof(state->gfx.descriptor_state));
2291
} else {
2292
state->gfx.descriptor_state.valid = 0;
2293
}
2294
}
2295
2296
memcpy(cmd_buffer->push_constants_data, state->meta.push_constants,
2297
sizeof(state->meta.push_constants));
2298
2299
state->meta.gfx.pipeline = NULL;
2300
state->meta.framebuffer = VK_NULL_HANDLE;
2301
state->meta.pass = VK_NULL_HANDLE;
2302
state->meta.subpass_idx = -1;
2303
state->meta.has_descriptor_state = false;
2304
}
2305
2306
static struct v3dv_job *
2307
cmd_buffer_pre_draw_split_job(struct v3dv_cmd_buffer *cmd_buffer)
2308
{
2309
struct v3dv_job *job = cmd_buffer->state.job;
2310
assert(job);
2311
2312
/* If the job has been flagged with 'always_flush' and it has already
2313
* recorded any draw calls then we need to start a new job for it.
2314
*/
2315
if (job->always_flush && job->draw_count > 0) {
2316
assert(cmd_buffer->state.pass);
2317
/* First, flag the current job as not being the last in the
2318
* current subpass
2319
*/
2320
job->is_subpass_finish = false;
2321
2322
/* Now start a new job in the same subpass and flag it as continuing
2323
* the current subpass.
2324
*/
2325
job = v3dv_cmd_buffer_subpass_resume(cmd_buffer,
2326
cmd_buffer->state.subpass_idx);
2327
assert(job->draw_count == 0);
2328
2329
/* Inherit the 'always flush' behavior */
2330
job->always_flush = true;
2331
}
2332
2333
assert(job->draw_count == 0 || !job->always_flush);
2334
return job;
2335
}
2336
2337
/**
2338
* The Vulkan spec states:
2339
*
2340
* "It is legal for a subpass to use no color or depth/stencil
2341
* attachments (...) This kind of subpass can use shader side effects such
2342
* as image stores and atomics to produce an output. In this case, the
2343
* subpass continues to use the width, height, and layers of the framebuffer
2344
* to define the dimensions of the rendering area, and the
2345
* rasterizationSamples from each pipeline’s
2346
* VkPipelineMultisampleStateCreateInfo to define the number of samples used
2347
* in rasterization."
2348
*
2349
* We need to enable MSAA in the TILE_BINNING_MODE_CFG packet, which we
2350
* emit when we start a new frame at the begining of a subpass. At that point,
2351
* if the framebuffer doesn't have any attachments we won't enable MSAA and
2352
* the job won't be valid in the scenario described by the spec.
2353
*
2354
* This function is intended to be called before a draw call and will test if
2355
* we are in that scenario, in which case, it will restart the current job
2356
* with MSAA enabled.
2357
*/
2358
static void
2359
cmd_buffer_restart_job_for_msaa_if_needed(struct v3dv_cmd_buffer *cmd_buffer)
2360
{
2361
assert(cmd_buffer->state.job);
2362
2363
/* We don't support variableMultisampleRate so we know that all pipelines
2364
* bound in the same subpass must have matching number of samples, so we
2365
* can do this check only on the first draw call.
2366
*/
2367
if (cmd_buffer->state.job->draw_count > 0)
2368
return;
2369
2370
/* We only need to restart the frame if the pipeline requires MSAA but
2371
* our frame tiling didn't enable it.
2372
*/
2373
if (!cmd_buffer->state.gfx.pipeline->msaa ||
2374
cmd_buffer->state.job->frame_tiling.msaa) {
2375
return;
2376
}
2377
2378
/* FIXME: Secondary command buffers don't start frames. Instead, they are
2379
* recorded into primary jobs that start them. For secondaries, we should
2380
* still handle this scenario, but we should do that when we record them
2381
* into primaries by testing if any of the secondaries has multisampled
2382
* draw calls in them, and then using that info to decide if we need to
2383
* restart the primary job into which they are being recorded.
2384
*/
2385
if (cmd_buffer->level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
2386
return;
2387
2388
/* Drop the current job and restart it with MSAA enabled */
2389
struct v3dv_job *old_job = cmd_buffer->state.job;
2390
cmd_buffer->state.job = NULL;
2391
2392
struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
2393
sizeof(struct v3dv_job), 8,
2394
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2395
if (!job) {
2396
v3dv_flag_oom(cmd_buffer, NULL);
2397
return;
2398
}
2399
2400
v3dv_job_init(job, V3DV_JOB_TYPE_GPU_CL, cmd_buffer->device, cmd_buffer,
2401
cmd_buffer->state.subpass_idx);
2402
cmd_buffer->state.job = job;
2403
2404
v3dv_job_start_frame(job,
2405
old_job->frame_tiling.width,
2406
old_job->frame_tiling.height,
2407
old_job->frame_tiling.layers,
2408
old_job->frame_tiling.render_target_count,
2409
old_job->frame_tiling.internal_bpp,
2410
true /* msaa */);
2411
2412
v3dv_job_destroy(old_job);
2413
}
2414
2415
void
2416
v3dv_cmd_buffer_emit_pre_draw(struct v3dv_cmd_buffer *cmd_buffer)
2417
{
2418
assert(cmd_buffer->state.gfx.pipeline);
2419
assert(!(cmd_buffer->state.gfx.pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT));
2420
2421
/* If we emitted a pipeline barrier right before this draw we won't have
2422
* an active job. In that case, create a new job continuing the current
2423
* subpass.
2424
*/
2425
if (!cmd_buffer->state.job) {
2426
v3dv_cmd_buffer_subpass_resume(cmd_buffer,
2427
cmd_buffer->state.subpass_idx);
2428
}
2429
2430
/* Restart single sample job for MSAA pipeline if needed */
2431
cmd_buffer_restart_job_for_msaa_if_needed(cmd_buffer);
2432
2433
/* If the job is configured to flush on every draw call we need to create
2434
* a new job now.
2435
*/
2436
struct v3dv_job *job = cmd_buffer_pre_draw_split_job(cmd_buffer);
2437
job->draw_count++;
2438
2439
/* GL shader state binds shaders, uniform and vertex attribute state. The
2440
* compiler injects uniforms to handle some descriptor types (such as
2441
* textures), so we need to regen that when descriptor state changes.
2442
*
2443
* We also need to emit new shader state if we have a dirty viewport since
2444
* that will require that we new uniform state for QUNIFORM_VIEWPORT_*.
2445
*/
2446
uint32_t *dirty = &cmd_buffer->state.dirty;
2447
2448
const uint32_t dirty_uniform_state =
2449
*dirty & (V3DV_CMD_DIRTY_PIPELINE |
2450
V3DV_CMD_DIRTY_PUSH_CONSTANTS |
2451
V3DV_CMD_DIRTY_DESCRIPTOR_SETS |
2452
V3DV_CMD_DIRTY_VIEWPORT);
2453
2454
if (dirty_uniform_state)
2455
update_gfx_uniform_state(cmd_buffer, dirty_uniform_state);
2456
2457
struct v3dv_device *device = cmd_buffer->device;
2458
2459
if (dirty_uniform_state || (*dirty & V3DV_CMD_DIRTY_VERTEX_BUFFER))
2460
v3dv_X(device, cmd_buffer_emit_gl_shader_state)(cmd_buffer);
2461
2462
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE)) {
2463
v3dv_X(device, cmd_buffer_emit_configuration_bits)(cmd_buffer);
2464
v3dv_X(device, cmd_buffer_emit_varyings_state)(cmd_buffer);
2465
}
2466
2467
if (*dirty & (V3DV_CMD_DIRTY_VIEWPORT | V3DV_CMD_DIRTY_SCISSOR)) {
2468
emit_scissor(cmd_buffer);
2469
}
2470
2471
if (*dirty & V3DV_CMD_DIRTY_VIEWPORT) {
2472
v3dv_X(device, cmd_buffer_emit_viewport)(cmd_buffer);
2473
}
2474
2475
if (*dirty & V3DV_CMD_DIRTY_INDEX_BUFFER)
2476
v3dv_X(device, cmd_buffer_emit_index_buffer)(cmd_buffer);
2477
2478
const uint32_t dynamic_stencil_dirty_flags =
2479
V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK |
2480
V3DV_CMD_DIRTY_STENCIL_WRITE_MASK |
2481
V3DV_CMD_DIRTY_STENCIL_REFERENCE;
2482
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | dynamic_stencil_dirty_flags))
2483
v3dv_X(device, cmd_buffer_emit_stencil)(cmd_buffer);
2484
2485
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | V3DV_CMD_DIRTY_DEPTH_BIAS))
2486
v3dv_X(device, cmd_buffer_emit_depth_bias)(cmd_buffer);
2487
2488
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | V3DV_CMD_DIRTY_BLEND_CONSTANTS))
2489
v3dv_X(device, cmd_buffer_emit_blend)(cmd_buffer);
2490
2491
if (*dirty & V3DV_CMD_DIRTY_OCCLUSION_QUERY)
2492
v3dv_X(device, cmd_buffer_emit_occlusion_query)(cmd_buffer);
2493
2494
if (*dirty & V3DV_CMD_DIRTY_LINE_WIDTH)
2495
v3dv_X(device, cmd_buffer_emit_line_width)(cmd_buffer);
2496
2497
if (*dirty & V3DV_CMD_DIRTY_PIPELINE)
2498
v3dv_X(device, cmd_buffer_emit_sample_state)(cmd_buffer);
2499
2500
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_PIPELINE;
2501
}
2502
2503
static void
2504
cmd_buffer_draw(struct v3dv_cmd_buffer *cmd_buffer,
2505
struct v3dv_draw_info *info)
2506
{
2507
v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2508
v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw)(cmd_buffer, info);
2509
}
2510
2511
VKAPI_ATTR void VKAPI_CALL
2512
v3dv_CmdDraw(VkCommandBuffer commandBuffer,
2513
uint32_t vertexCount,
2514
uint32_t instanceCount,
2515
uint32_t firstVertex,
2516
uint32_t firstInstance)
2517
{
2518
if (vertexCount == 0 || instanceCount == 0)
2519
return;
2520
2521
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2522
struct v3dv_draw_info info = {};
2523
info.vertex_count = vertexCount;
2524
info.instance_count = instanceCount;
2525
info.first_instance = firstInstance;
2526
info.first_vertex = firstVertex;
2527
2528
cmd_buffer_draw(cmd_buffer, &info);
2529
}
2530
2531
VKAPI_ATTR void VKAPI_CALL
2532
v3dv_CmdDrawIndexed(VkCommandBuffer commandBuffer,
2533
uint32_t indexCount,
2534
uint32_t instanceCount,
2535
uint32_t firstIndex,
2536
int32_t vertexOffset,
2537
uint32_t firstInstance)
2538
{
2539
if (indexCount == 0 || instanceCount == 0)
2540
return;
2541
2542
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2543
2544
v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw_indexed)
2545
(cmd_buffer, indexCount, instanceCount,
2546
firstIndex, vertexOffset, firstInstance);
2547
}
2548
2549
VKAPI_ATTR void VKAPI_CALL
2550
v3dv_CmdDrawIndirect(VkCommandBuffer commandBuffer,
2551
VkBuffer _buffer,
2552
VkDeviceSize offset,
2553
uint32_t drawCount,
2554
uint32_t stride)
2555
{
2556
/* drawCount is the number of draws to execute, and can be zero. */
2557
if (drawCount == 0)
2558
return;
2559
2560
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2561
V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
2562
2563
v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw_indirect)
2564
(cmd_buffer, buffer, offset, drawCount, stride);
2565
}
2566
2567
VKAPI_ATTR void VKAPI_CALL
2568
v3dv_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
2569
VkBuffer _buffer,
2570
VkDeviceSize offset,
2571
uint32_t drawCount,
2572
uint32_t stride)
2573
{
2574
/* drawCount is the number of draws to execute, and can be zero. */
2575
if (drawCount == 0)
2576
return;
2577
2578
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2579
V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
2580
2581
v3dv_X(cmd_buffer->device, cmd_buffer_emit_indexed_indirect)
2582
(cmd_buffer, buffer, offset, drawCount, stride);
2583
}
2584
2585
VKAPI_ATTR void VKAPI_CALL
2586
v3dv_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
2587
VkPipelineStageFlags srcStageMask,
2588
VkPipelineStageFlags dstStageMask,
2589
VkDependencyFlags dependencyFlags,
2590
uint32_t memoryBarrierCount,
2591
const VkMemoryBarrier *pMemoryBarriers,
2592
uint32_t bufferBarrierCount,
2593
const VkBufferMemoryBarrier *pBufferBarriers,
2594
uint32_t imageBarrierCount,
2595
const VkImageMemoryBarrier *pImageBarriers)
2596
{
2597
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2598
2599
/* We only care about barriers between GPU jobs */
2600
if (srcStageMask == VK_PIPELINE_STAGE_HOST_BIT ||
2601
dstStageMask == VK_PIPELINE_STAGE_HOST_BIT) {
2602
return;
2603
}
2604
2605
/* If we have a recording job, finish it here */
2606
struct v3dv_job *job = cmd_buffer->state.job;
2607
if (job)
2608
v3dv_cmd_buffer_finish_job(cmd_buffer);
2609
2610
cmd_buffer->state.has_barrier = true;
2611
if (dstStageMask & (VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
2612
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
2613
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
2614
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
2615
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
2616
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT)) {
2617
cmd_buffer->state.has_bcl_barrier = true;
2618
}
2619
}
2620
2621
VKAPI_ATTR void VKAPI_CALL
2622
v3dv_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
2623
uint32_t firstBinding,
2624
uint32_t bindingCount,
2625
const VkBuffer *pBuffers,
2626
const VkDeviceSize *pOffsets)
2627
{
2628
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2629
struct v3dv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
2630
2631
/* We have to defer setting up vertex buffer since we need the buffer
2632
* stride from the pipeline.
2633
*/
2634
2635
assert(firstBinding + bindingCount <= MAX_VBS);
2636
bool vb_state_changed = false;
2637
for (uint32_t i = 0; i < bindingCount; i++) {
2638
if (vb[firstBinding + i].buffer != v3dv_buffer_from_handle(pBuffers[i])) {
2639
vb[firstBinding + i].buffer = v3dv_buffer_from_handle(pBuffers[i]);
2640
vb_state_changed = true;
2641
}
2642
if (vb[firstBinding + i].offset != pOffsets[i]) {
2643
vb[firstBinding + i].offset = pOffsets[i];
2644
vb_state_changed = true;
2645
}
2646
}
2647
2648
if (vb_state_changed)
2649
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_VERTEX_BUFFER;
2650
}
2651
2652
static uint32_t
2653
get_index_size(VkIndexType index_type)
2654
{
2655
switch (index_type) {
2656
case VK_INDEX_TYPE_UINT8_EXT:
2657
return 1;
2658
break;
2659
case VK_INDEX_TYPE_UINT16:
2660
return 2;
2661
break;
2662
case VK_INDEX_TYPE_UINT32:
2663
return 4;
2664
break;
2665
default:
2666
unreachable("Unsupported index type");
2667
}
2668
}
2669
2670
VKAPI_ATTR void VKAPI_CALL
2671
v3dv_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
2672
VkBuffer buffer,
2673
VkDeviceSize offset,
2674
VkIndexType indexType)
2675
{
2676
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2677
2678
const uint32_t index_size = get_index_size(indexType);
2679
if (buffer == cmd_buffer->state.index_buffer.buffer &&
2680
offset == cmd_buffer->state.index_buffer.offset &&
2681
index_size == cmd_buffer->state.index_buffer.index_size) {
2682
return;
2683
}
2684
2685
cmd_buffer->state.index_buffer.buffer = buffer;
2686
cmd_buffer->state.index_buffer.offset = offset;
2687
cmd_buffer->state.index_buffer.index_size = index_size;
2688
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_INDEX_BUFFER;
2689
}
2690
2691
VKAPI_ATTR void VKAPI_CALL
2692
v3dv_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
2693
VkStencilFaceFlags faceMask,
2694
uint32_t compareMask)
2695
{
2696
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2697
2698
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2699
cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask & 0xff;
2700
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2701
cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask & 0xff;
2702
2703
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK;
2704
}
2705
2706
VKAPI_ATTR void VKAPI_CALL
2707
v3dv_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
2708
VkStencilFaceFlags faceMask,
2709
uint32_t writeMask)
2710
{
2711
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2712
2713
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2714
cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask & 0xff;
2715
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2716
cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask & 0xff;
2717
2718
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_STENCIL_WRITE_MASK;
2719
}
2720
2721
VKAPI_ATTR void VKAPI_CALL
2722
v3dv_CmdSetStencilReference(VkCommandBuffer commandBuffer,
2723
VkStencilFaceFlags faceMask,
2724
uint32_t reference)
2725
{
2726
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2727
2728
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2729
cmd_buffer->state.dynamic.stencil_reference.front = reference & 0xff;
2730
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2731
cmd_buffer->state.dynamic.stencil_reference.back = reference & 0xff;
2732
2733
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_STENCIL_REFERENCE;
2734
}
2735
2736
VKAPI_ATTR void VKAPI_CALL
2737
v3dv_CmdSetDepthBias(VkCommandBuffer commandBuffer,
2738
float depthBiasConstantFactor,
2739
float depthBiasClamp,
2740
float depthBiasSlopeFactor)
2741
{
2742
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2743
2744
cmd_buffer->state.dynamic.depth_bias.constant_factor = depthBiasConstantFactor;
2745
cmd_buffer->state.dynamic.depth_bias.depth_bias_clamp = depthBiasClamp;
2746
cmd_buffer->state.dynamic.depth_bias.slope_factor = depthBiasSlopeFactor;
2747
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_DEPTH_BIAS;
2748
}
2749
2750
VKAPI_ATTR void VKAPI_CALL
2751
v3dv_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
2752
float minDepthBounds,
2753
float maxDepthBounds)
2754
{
2755
/* We do not support depth bounds testing so we just ingore this. We are
2756
* already asserting that pipelines don't enable the feature anyway.
2757
*/
2758
}
2759
2760
VKAPI_ATTR void VKAPI_CALL
2761
v3dv_CmdSetLineWidth(VkCommandBuffer commandBuffer,
2762
float lineWidth)
2763
{
2764
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2765
2766
cmd_buffer->state.dynamic.line_width = lineWidth;
2767
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_LINE_WIDTH;
2768
}
2769
2770
VKAPI_ATTR void VKAPI_CALL
2771
v3dv_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
2772
VkPipelineBindPoint pipelineBindPoint,
2773
VkPipelineLayout _layout,
2774
uint32_t firstSet,
2775
uint32_t descriptorSetCount,
2776
const VkDescriptorSet *pDescriptorSets,
2777
uint32_t dynamicOffsetCount,
2778
const uint32_t *pDynamicOffsets)
2779
{
2780
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2781
V3DV_FROM_HANDLE(v3dv_pipeline_layout, layout, _layout);
2782
2783
uint32_t dyn_index = 0;
2784
2785
assert(firstSet + descriptorSetCount <= MAX_SETS);
2786
2787
struct v3dv_descriptor_state *descriptor_state =
2788
pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE ?
2789
&cmd_buffer->state.compute.descriptor_state :
2790
&cmd_buffer->state.gfx.descriptor_state;
2791
2792
VkShaderStageFlags dirty_stages = 0;
2793
bool descriptor_state_changed = false;
2794
for (uint32_t i = 0; i < descriptorSetCount; i++) {
2795
V3DV_FROM_HANDLE(v3dv_descriptor_set, set, pDescriptorSets[i]);
2796
uint32_t index = firstSet + i;
2797
2798
descriptor_state->valid |= (1u << index);
2799
if (descriptor_state->descriptor_sets[index] != set) {
2800
descriptor_state->descriptor_sets[index] = set;
2801
dirty_stages |= set->layout->shader_stages;
2802
descriptor_state_changed = true;
2803
}
2804
2805
for (uint32_t j = 0; j < set->layout->dynamic_offset_count; j++, dyn_index++) {
2806
uint32_t idx = j + layout->set[i + firstSet].dynamic_offset_start;
2807
2808
if (descriptor_state->dynamic_offsets[idx] != pDynamicOffsets[dyn_index]) {
2809
descriptor_state->dynamic_offsets[idx] = pDynamicOffsets[dyn_index];
2810
dirty_stages |= set->layout->shader_stages;
2811
descriptor_state_changed = true;
2812
}
2813
}
2814
}
2815
2816
if (descriptor_state_changed) {
2817
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
2818
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_DESCRIPTOR_SETS;
2819
cmd_buffer->state.dirty_descriptor_stages |= dirty_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
2820
} else {
2821
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
2822
cmd_buffer->state.dirty_descriptor_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
2823
}
2824
}
2825
}
2826
2827
VKAPI_ATTR void VKAPI_CALL
2828
v3dv_CmdPushConstants(VkCommandBuffer commandBuffer,
2829
VkPipelineLayout layout,
2830
VkShaderStageFlags stageFlags,
2831
uint32_t offset,
2832
uint32_t size,
2833
const void *pValues)
2834
{
2835
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2836
2837
if (!memcmp((uint8_t *) cmd_buffer->push_constants_data + offset, pValues, size))
2838
return;
2839
2840
memcpy((uint8_t *) cmd_buffer->push_constants_data + offset, pValues, size);
2841
2842
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_PUSH_CONSTANTS;
2843
cmd_buffer->state.dirty_push_constants_stages |= stageFlags;
2844
}
2845
2846
VKAPI_ATTR void VKAPI_CALL
2847
v3dv_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
2848
const float blendConstants[4])
2849
{
2850
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2851
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2852
2853
if (!memcmp(state->dynamic.blend_constants, blendConstants,
2854
sizeof(state->dynamic.blend_constants))) {
2855
return;
2856
}
2857
2858
memcpy(state->dynamic.blend_constants, blendConstants,
2859
sizeof(state->dynamic.blend_constants));
2860
2861
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_BLEND_CONSTANTS;
2862
}
2863
2864
void
2865
v3dv_cmd_buffer_reset_queries(struct v3dv_cmd_buffer *cmd_buffer,
2866
struct v3dv_query_pool *pool,
2867
uint32_t first,
2868
uint32_t count)
2869
{
2870
/* Resets can only happen outside a render pass instance so we should not
2871
* be in the middle of job recording.
2872
*/
2873
assert(cmd_buffer->state.pass == NULL);
2874
assert(cmd_buffer->state.job == NULL);
2875
2876
assert(first < pool->query_count);
2877
assert(first + count <= pool->query_count);
2878
2879
struct v3dv_job *job =
2880
v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
2881
V3DV_JOB_TYPE_CPU_RESET_QUERIES,
2882
cmd_buffer, -1);
2883
v3dv_return_if_oom(cmd_buffer, NULL);
2884
2885
job->cpu.query_reset.pool = pool;
2886
job->cpu.query_reset.first = first;
2887
job->cpu.query_reset.count = count;
2888
2889
list_addtail(&job->list_link, &cmd_buffer->jobs);
2890
}
2891
2892
void
2893
v3dv_cmd_buffer_ensure_array_state(struct v3dv_cmd_buffer *cmd_buffer,
2894
uint32_t slot_size,
2895
uint32_t used_count,
2896
uint32_t *alloc_count,
2897
void **ptr)
2898
{
2899
if (used_count >= *alloc_count) {
2900
const uint32_t prev_slot_count = *alloc_count;
2901
void *old_buffer = *ptr;
2902
2903
const uint32_t new_slot_count = MAX2(*alloc_count * 2, 4);
2904
const uint32_t bytes = new_slot_count * slot_size;
2905
*ptr = vk_alloc(&cmd_buffer->device->vk.alloc, bytes, 8,
2906
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2907
if (*ptr == NULL) {
2908
fprintf(stderr, "Error: failed to allocate CPU buffer for query.\n");
2909
v3dv_flag_oom(cmd_buffer, NULL);
2910
return;
2911
}
2912
2913
memcpy(*ptr, old_buffer, prev_slot_count * slot_size);
2914
*alloc_count = new_slot_count;
2915
}
2916
assert(used_count < *alloc_count);
2917
}
2918
2919
void
2920
v3dv_cmd_buffer_begin_query(struct v3dv_cmd_buffer *cmd_buffer,
2921
struct v3dv_query_pool *pool,
2922
uint32_t query,
2923
VkQueryControlFlags flags)
2924
{
2925
/* FIXME: we only support one active query for now */
2926
assert(cmd_buffer->state.query.active_query.bo == NULL);
2927
assert(query < pool->query_count);
2928
2929
cmd_buffer->state.query.active_query.bo = pool->queries[query].bo;
2930
cmd_buffer->state.query.active_query.offset = pool->queries[query].offset;
2931
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_OCCLUSION_QUERY;
2932
}
2933
2934
void
2935
v3dv_cmd_buffer_end_query(struct v3dv_cmd_buffer *cmd_buffer,
2936
struct v3dv_query_pool *pool,
2937
uint32_t query)
2938
{
2939
assert(query < pool->query_count);
2940
assert(cmd_buffer->state.query.active_query.bo != NULL);
2941
2942
if (cmd_buffer->state.pass) {
2943
/* Queue the EndQuery in the command buffer state, we will create a CPU
2944
* job to flag all of these queries as possibly available right after the
2945
* render pass job in which they have been recorded.
2946
*/
2947
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2948
v3dv_cmd_buffer_ensure_array_state(cmd_buffer,
2949
sizeof(struct v3dv_end_query_cpu_job_info),
2950
state->query.end.used_count,
2951
&state->query.end.alloc_count,
2952
(void **) &state->query.end.states);
2953
v3dv_return_if_oom(cmd_buffer, NULL);
2954
2955
struct v3dv_end_query_cpu_job_info *info =
2956
&state->query.end.states[state->query.end.used_count++];
2957
2958
info->pool = pool;
2959
info->query = query;
2960
} else {
2961
/* Otherwise, schedule the CPU job immediately */
2962
struct v3dv_job *job =
2963
v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
2964
V3DV_JOB_TYPE_CPU_END_QUERY,
2965
cmd_buffer, -1);
2966
v3dv_return_if_oom(cmd_buffer, NULL);
2967
2968
job->cpu.query_end.pool = pool;
2969
job->cpu.query_end.query = query;
2970
list_addtail(&job->list_link, &cmd_buffer->jobs);
2971
}
2972
2973
cmd_buffer->state.query.active_query.bo = NULL;
2974
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_OCCLUSION_QUERY;
2975
}
2976
2977
void
2978
v3dv_cmd_buffer_copy_query_results(struct v3dv_cmd_buffer *cmd_buffer,
2979
struct v3dv_query_pool *pool,
2980
uint32_t first,
2981
uint32_t count,
2982
struct v3dv_buffer *dst,
2983
uint32_t offset,
2984
uint32_t stride,
2985
VkQueryResultFlags flags)
2986
{
2987
/* Copies can only happen outside a render pass instance so we should not
2988
* be in the middle of job recording.
2989
*/
2990
assert(cmd_buffer->state.pass == NULL);
2991
assert(cmd_buffer->state.job == NULL);
2992
2993
assert(first < pool->query_count);
2994
assert(first + count <= pool->query_count);
2995
2996
struct v3dv_job *job =
2997
v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
2998
V3DV_JOB_TYPE_CPU_COPY_QUERY_RESULTS,
2999
cmd_buffer, -1);
3000
v3dv_return_if_oom(cmd_buffer, NULL);
3001
3002
job->cpu.query_copy_results.pool = pool;
3003
job->cpu.query_copy_results.first = first;
3004
job->cpu.query_copy_results.count = count;
3005
job->cpu.query_copy_results.dst = dst;
3006
job->cpu.query_copy_results.offset = offset;
3007
job->cpu.query_copy_results.stride = stride;
3008
job->cpu.query_copy_results.flags = flags;
3009
3010
list_addtail(&job->list_link, &cmd_buffer->jobs);
3011
}
3012
3013
void
3014
v3dv_cmd_buffer_add_tfu_job(struct v3dv_cmd_buffer *cmd_buffer,
3015
struct drm_v3d_submit_tfu *tfu)
3016
{
3017
struct v3dv_device *device = cmd_buffer->device;
3018
struct v3dv_job *job = vk_zalloc(&device->vk.alloc,
3019
sizeof(struct v3dv_job), 8,
3020
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3021
if (!job) {
3022
v3dv_flag_oom(cmd_buffer, NULL);
3023
return;
3024
}
3025
3026
v3dv_job_init(job, V3DV_JOB_TYPE_GPU_TFU, device, cmd_buffer, -1);
3027
job->tfu = *tfu;
3028
list_addtail(&job->list_link, &cmd_buffer->jobs);
3029
}
3030
3031
VKAPI_ATTR void VKAPI_CALL
3032
v3dv_CmdSetEvent(VkCommandBuffer commandBuffer,
3033
VkEvent _event,
3034
VkPipelineStageFlags stageMask)
3035
{
3036
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3037
V3DV_FROM_HANDLE(v3dv_event, event, _event);
3038
3039
/* Event (re)sets can only happen outside a render pass instance so we
3040
* should not be in the middle of job recording.
3041
*/
3042
assert(cmd_buffer->state.pass == NULL);
3043
assert(cmd_buffer->state.job == NULL);
3044
3045
struct v3dv_job *job =
3046
v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3047
V3DV_JOB_TYPE_CPU_SET_EVENT,
3048
cmd_buffer, -1);
3049
v3dv_return_if_oom(cmd_buffer, NULL);
3050
3051
job->cpu.event_set.event = event;
3052
job->cpu.event_set.state = 1;
3053
3054
list_addtail(&job->list_link, &cmd_buffer->jobs);
3055
}
3056
3057
VKAPI_ATTR void VKAPI_CALL
3058
v3dv_CmdResetEvent(VkCommandBuffer commandBuffer,
3059
VkEvent _event,
3060
VkPipelineStageFlags stageMask)
3061
{
3062
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3063
V3DV_FROM_HANDLE(v3dv_event, event, _event);
3064
3065
/* Event (re)sets can only happen outside a render pass instance so we
3066
* should not be in the middle of job recording.
3067
*/
3068
assert(cmd_buffer->state.pass == NULL);
3069
assert(cmd_buffer->state.job == NULL);
3070
3071
struct v3dv_job *job =
3072
v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3073
V3DV_JOB_TYPE_CPU_SET_EVENT,
3074
cmd_buffer, -1);
3075
v3dv_return_if_oom(cmd_buffer, NULL);
3076
3077
job->cpu.event_set.event = event;
3078
job->cpu.event_set.state = 0;
3079
3080
list_addtail(&job->list_link, &cmd_buffer->jobs);
3081
}
3082
3083
VKAPI_ATTR void VKAPI_CALL
3084
v3dv_CmdWaitEvents(VkCommandBuffer commandBuffer,
3085
uint32_t eventCount,
3086
const VkEvent *pEvents,
3087
VkPipelineStageFlags srcStageMask,
3088
VkPipelineStageFlags dstStageMask,
3089
uint32_t memoryBarrierCount,
3090
const VkMemoryBarrier *pMemoryBarriers,
3091
uint32_t bufferMemoryBarrierCount,
3092
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3093
uint32_t imageMemoryBarrierCount,
3094
const VkImageMemoryBarrier *pImageMemoryBarriers)
3095
{
3096
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3097
3098
assert(eventCount > 0);
3099
3100
struct v3dv_job *job =
3101
v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3102
V3DV_JOB_TYPE_CPU_WAIT_EVENTS,
3103
cmd_buffer, -1);
3104
v3dv_return_if_oom(cmd_buffer, NULL);
3105
3106
const uint32_t event_list_size = sizeof(struct v3dv_event *) * eventCount;
3107
3108
job->cpu.event_wait.events =
3109
vk_alloc(&cmd_buffer->device->vk.alloc, event_list_size, 8,
3110
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3111
if (!job->cpu.event_wait.events) {
3112
v3dv_flag_oom(cmd_buffer, NULL);
3113
return;
3114
}
3115
job->cpu.event_wait.event_count = eventCount;
3116
3117
for (uint32_t i = 0; i < eventCount; i++)
3118
job->cpu.event_wait.events[i] = v3dv_event_from_handle(pEvents[i]);
3119
3120
/* vkCmdWaitEvents can be recorded inside a render pass, so we might have
3121
* an active job.
3122
*
3123
* If we are inside a render pass, because we vkCmd(Re)SetEvent can't happen
3124
* inside a render pass, it is safe to move the wait job so it happens right
3125
* before the current job we are currently recording for the subpass, if any
3126
* (it would actually be safe to move it all the way back to right before
3127
* the start of the render pass).
3128
*
3129
* If we are outside a render pass then we should not have any on-going job
3130
* and we are free to just add the wait job without restrictions.
3131
*/
3132
assert(cmd_buffer->state.pass || !cmd_buffer->state.job);
3133
list_addtail(&job->list_link, &cmd_buffer->jobs);
3134
}
3135
3136
VKAPI_ATTR void VKAPI_CALL
3137
v3dv_CmdWriteTimestamp(VkCommandBuffer commandBuffer,
3138
VkPipelineStageFlagBits pipelineStage,
3139
VkQueryPool queryPool,
3140
uint32_t query)
3141
{
3142
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3143
V3DV_FROM_HANDLE(v3dv_query_pool, query_pool, queryPool);
3144
3145
/* If this is called inside a render pass we need to finish the current
3146
* job here...
3147
*/
3148
if (cmd_buffer->state.pass)
3149
v3dv_cmd_buffer_finish_job(cmd_buffer);
3150
3151
struct v3dv_job *job =
3152
v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3153
V3DV_JOB_TYPE_CPU_TIMESTAMP_QUERY,
3154
cmd_buffer, -1);
3155
v3dv_return_if_oom(cmd_buffer, NULL);
3156
3157
job->cpu.query_timestamp.pool = query_pool;
3158
job->cpu.query_timestamp.query = query;
3159
3160
list_addtail(&job->list_link, &cmd_buffer->jobs);
3161
cmd_buffer->state.job = NULL;
3162
3163
/* ...and resume the subpass after the timestamp */
3164
if (cmd_buffer->state.pass)
3165
v3dv_cmd_buffer_subpass_resume(cmd_buffer, cmd_buffer->state.subpass_idx);
3166
}
3167
3168
static void
3169
cmd_buffer_emit_pre_dispatch(struct v3dv_cmd_buffer *cmd_buffer)
3170
{
3171
assert(cmd_buffer->state.compute.pipeline);
3172
assert(cmd_buffer->state.compute.pipeline->active_stages ==
3173
VK_SHADER_STAGE_COMPUTE_BIT);
3174
3175
cmd_buffer->state.dirty &= ~(V3DV_CMD_DIRTY_COMPUTE_PIPELINE |
3176
V3DV_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS);
3177
cmd_buffer->state.dirty_descriptor_stages &= ~VK_SHADER_STAGE_COMPUTE_BIT;
3178
cmd_buffer->state.dirty_push_constants_stages &= ~VK_SHADER_STAGE_COMPUTE_BIT;
3179
}
3180
3181
#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
3182
#define V3D_CSD_CFG012_WG_OFFSET_SHIFT 0
3183
/* Allow this dispatch to start while the last one is still running. */
3184
#define V3D_CSD_CFG3_OVERLAP_WITH_PREV (1 << 26)
3185
/* Maximum supergroup ID. 6 bits. */
3186
#define V3D_CSD_CFG3_MAX_SG_ID_SHIFT 20
3187
/* Batches per supergroup minus 1. 8 bits. */
3188
#define V3D_CSD_CFG3_BATCHES_PER_SG_M1_SHIFT 12
3189
/* Workgroups per supergroup, 0 means 16 */
3190
#define V3D_CSD_CFG3_WGS_PER_SG_SHIFT 8
3191
#define V3D_CSD_CFG3_WG_SIZE_SHIFT 0
3192
3193
#define V3D_CSD_CFG5_PROPAGATE_NANS (1 << 2)
3194
#define V3D_CSD_CFG5_SINGLE_SEG (1 << 1)
3195
#define V3D_CSD_CFG5_THREADING (1 << 0)
3196
3197
void
3198
v3dv_cmd_buffer_rewrite_indirect_csd_job(
3199
struct v3dv_csd_indirect_cpu_job_info *info,
3200
const uint32_t *wg_counts)
3201
{
3202
assert(info->csd_job);
3203
struct v3dv_job *job = info->csd_job;
3204
3205
assert(job->type == V3DV_JOB_TYPE_GPU_CSD);
3206
assert(wg_counts[0] > 0 && wg_counts[1] > 0 && wg_counts[2] > 0);
3207
3208
struct drm_v3d_submit_csd *submit = &job->csd.submit;
3209
3210
job->csd.wg_count[0] = wg_counts[0];
3211
job->csd.wg_count[1] = wg_counts[1];
3212
job->csd.wg_count[2] = wg_counts[2];
3213
3214
submit->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3215
submit->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3216
submit->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3217
3218
submit->cfg[4] = DIV_ROUND_UP(info->wg_size, 16) *
3219
(wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
3220
assert(submit->cfg[4] != ~0);
3221
3222
if (info->needs_wg_uniform_rewrite) {
3223
/* Make sure the GPU is not currently accessing the indirect CL for this
3224
* job, since we are about to overwrite some of the uniform data.
3225
*/
3226
v3dv_bo_wait(job->device, job->indirect.bo, PIPE_TIMEOUT_INFINITE);
3227
3228
for (uint32_t i = 0; i < 3; i++) {
3229
if (info->wg_uniform_offsets[i]) {
3230
/* Sanity check that our uniform pointers are within the allocated
3231
* BO space for our indirect CL.
3232
*/
3233
assert(info->wg_uniform_offsets[i] >= (uint32_t *) job->indirect.base);
3234
assert(info->wg_uniform_offsets[i] < (uint32_t *) job->indirect.next);
3235
*(info->wg_uniform_offsets[i]) = wg_counts[i];
3236
}
3237
}
3238
}
3239
}
3240
3241
static struct v3dv_job *
3242
cmd_buffer_create_csd_job(struct v3dv_cmd_buffer *cmd_buffer,
3243
uint32_t base_offset_x,
3244
uint32_t base_offset_y,
3245
uint32_t base_offset_z,
3246
uint32_t group_count_x,
3247
uint32_t group_count_y,
3248
uint32_t group_count_z,
3249
uint32_t **wg_uniform_offsets_out,
3250
uint32_t *wg_size_out)
3251
{
3252
struct v3dv_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
3253
assert(pipeline && pipeline->shared_data->variants[BROADCOM_SHADER_COMPUTE]);
3254
struct v3dv_shader_variant *cs_variant =
3255
pipeline->shared_data->variants[BROADCOM_SHADER_COMPUTE];
3256
3257
struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
3258
sizeof(struct v3dv_job), 8,
3259
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3260
if (!job) {
3261
v3dv_flag_oom(cmd_buffer, NULL);
3262
return NULL;
3263
}
3264
3265
v3dv_job_init(job, V3DV_JOB_TYPE_GPU_CSD, cmd_buffer->device, cmd_buffer, -1);
3266
cmd_buffer->state.job = job;
3267
3268
struct drm_v3d_submit_csd *submit = &job->csd.submit;
3269
3270
job->csd.wg_count[0] = group_count_x;
3271
job->csd.wg_count[1] = group_count_y;
3272
job->csd.wg_count[2] = group_count_z;
3273
3274
job->csd.wg_base[0] = base_offset_x;
3275
job->csd.wg_base[1] = base_offset_y;
3276
job->csd.wg_base[2] = base_offset_z;
3277
3278
submit->cfg[0] |= group_count_x << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3279
submit->cfg[1] |= group_count_y << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3280
submit->cfg[2] |= group_count_z << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3281
3282
const struct v3d_compute_prog_data *cpd =
3283
cs_variant->prog_data.cs;
3284
3285
const uint32_t num_wgs = group_count_x * group_count_y * group_count_z;
3286
const uint32_t wg_size = cpd->local_size[0] *
3287
cpd->local_size[1] *
3288
cpd->local_size[2];
3289
3290
uint32_t wgs_per_sg =
3291
v3d_csd_choose_workgroups_per_supergroup(
3292
&cmd_buffer->device->devinfo,
3293
cs_variant->prog_data.cs->has_subgroups,
3294
cs_variant->prog_data.cs->base.has_control_barrier,
3295
cs_variant->prog_data.cs->base.threads,
3296
num_wgs, wg_size);
3297
3298
uint32_t batches_per_sg = DIV_ROUND_UP(wgs_per_sg * wg_size, 16);
3299
uint32_t whole_sgs = num_wgs / wgs_per_sg;
3300
uint32_t rem_wgs = num_wgs - whole_sgs * wgs_per_sg;
3301
uint32_t num_batches = batches_per_sg * whole_sgs +
3302
DIV_ROUND_UP(rem_wgs * wg_size, 16);
3303
3304
submit->cfg[3] |= (wgs_per_sg & 0xf) << V3D_CSD_CFG3_WGS_PER_SG_SHIFT;
3305
submit->cfg[3] |= (batches_per_sg - 1) << V3D_CSD_CFG3_BATCHES_PER_SG_M1_SHIFT;
3306
submit->cfg[3] |= (wg_size & 0xff) << V3D_CSD_CFG3_WG_SIZE_SHIFT;
3307
if (wg_size_out)
3308
*wg_size_out = wg_size;
3309
3310
submit->cfg[4] = num_batches - 1;
3311
assert(submit->cfg[4] != ~0);
3312
3313
assert(pipeline->shared_data->assembly_bo);
3314
struct v3dv_bo *cs_assembly_bo = pipeline->shared_data->assembly_bo;
3315
3316
submit->cfg[5] = cs_assembly_bo->offset + cs_variant->assembly_offset;
3317
submit->cfg[5] |= V3D_CSD_CFG5_PROPAGATE_NANS;
3318
if (cs_variant->prog_data.base->single_seg)
3319
submit->cfg[5] |= V3D_CSD_CFG5_SINGLE_SEG;
3320
if (cs_variant->prog_data.base->threads == 4)
3321
submit->cfg[5] |= V3D_CSD_CFG5_THREADING;
3322
3323
if (cs_variant->prog_data.cs->shared_size > 0) {
3324
job->csd.shared_memory =
3325
v3dv_bo_alloc(cmd_buffer->device,
3326
cs_variant->prog_data.cs->shared_size * wgs_per_sg,
3327
"shared_vars", true);
3328
if (!job->csd.shared_memory) {
3329
v3dv_flag_oom(cmd_buffer, NULL);
3330
return job;
3331
}
3332
}
3333
3334
v3dv_job_add_bo_unchecked(job, cs_assembly_bo);
3335
struct v3dv_cl_reloc uniforms =
3336
v3dv_write_uniforms_wg_offsets(cmd_buffer, pipeline,
3337
cs_variant,
3338
wg_uniform_offsets_out);
3339
submit->cfg[6] = uniforms.bo->offset + uniforms.offset;
3340
3341
v3dv_job_add_bo(job, uniforms.bo);
3342
3343
return job;
3344
}
3345
3346
static void
3347
cmd_buffer_dispatch(struct v3dv_cmd_buffer *cmd_buffer,
3348
uint32_t base_offset_x,
3349
uint32_t base_offset_y,
3350
uint32_t base_offset_z,
3351
uint32_t group_count_x,
3352
uint32_t group_count_y,
3353
uint32_t group_count_z)
3354
{
3355
if (group_count_x == 0 || group_count_y == 0 || group_count_z == 0)
3356
return;
3357
3358
struct v3dv_job *job =
3359
cmd_buffer_create_csd_job(cmd_buffer,
3360
base_offset_x,
3361
base_offset_y,
3362
base_offset_z,
3363
group_count_x,
3364
group_count_y,
3365
group_count_z,
3366
NULL, NULL);
3367
3368
list_addtail(&job->list_link, &cmd_buffer->jobs);
3369
cmd_buffer->state.job = NULL;
3370
}
3371
3372
VKAPI_ATTR void VKAPI_CALL
3373
v3dv_CmdDispatch(VkCommandBuffer commandBuffer,
3374
uint32_t groupCountX,
3375
uint32_t groupCountY,
3376
uint32_t groupCountZ)
3377
{
3378
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3379
3380
cmd_buffer_emit_pre_dispatch(cmd_buffer);
3381
cmd_buffer_dispatch(cmd_buffer, 0, 0, 0,
3382
groupCountX, groupCountY, groupCountZ);
3383
}
3384
3385
VKAPI_ATTR void VKAPI_CALL
3386
v3dv_CmdDispatchBase(VkCommandBuffer commandBuffer,
3387
uint32_t baseGroupX,
3388
uint32_t baseGroupY,
3389
uint32_t baseGroupZ,
3390
uint32_t groupCountX,
3391
uint32_t groupCountY,
3392
uint32_t groupCountZ)
3393
{
3394
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3395
3396
cmd_buffer_emit_pre_dispatch(cmd_buffer);
3397
cmd_buffer_dispatch(cmd_buffer,
3398
baseGroupX, baseGroupY, baseGroupZ,
3399
groupCountX, groupCountY, groupCountZ);
3400
}
3401
3402
3403
static void
3404
cmd_buffer_dispatch_indirect(struct v3dv_cmd_buffer *cmd_buffer,
3405
struct v3dv_buffer *buffer,
3406
uint32_t offset)
3407
{
3408
/* We can't do indirect dispatches, so instead we record a CPU job that,
3409
* when executed in the queue, will map the indirect buffer, read the
3410
* dispatch parameters, and submit a regular dispatch.
3411
*/
3412
struct v3dv_job *job =
3413
v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3414
V3DV_JOB_TYPE_CPU_CSD_INDIRECT,
3415
cmd_buffer, -1);
3416
v3dv_return_if_oom(cmd_buffer, NULL);
3417
3418
/* We need to create a CSD job now, even if we still don't know the actual
3419
* dispatch parameters, because the job setup needs to be done using the
3420
* current command buffer state (i.e. pipeline, descriptor sets, push
3421
* constants, etc.). So we create the job with default dispatch parameters
3422
* and we will rewrite the parts we need at submit time if the indirect
3423
* parameters don't match the ones we used to setup the job.
3424
*/
3425
struct v3dv_job *csd_job =
3426
cmd_buffer_create_csd_job(cmd_buffer,
3427
0, 0, 0,
3428
1, 1, 1,
3429
&job->cpu.csd_indirect.wg_uniform_offsets[0],
3430
&job->cpu.csd_indirect.wg_size);
3431
v3dv_return_if_oom(cmd_buffer, NULL);
3432
assert(csd_job);
3433
3434
job->cpu.csd_indirect.buffer = buffer;
3435
job->cpu.csd_indirect.offset = offset;
3436
job->cpu.csd_indirect.csd_job = csd_job;
3437
3438
/* If the compute shader reads the workgroup sizes we will also need to
3439
* rewrite the corresponding uniforms.
3440
*/
3441
job->cpu.csd_indirect.needs_wg_uniform_rewrite =
3442
job->cpu.csd_indirect.wg_uniform_offsets[0] ||
3443
job->cpu.csd_indirect.wg_uniform_offsets[1] ||
3444
job->cpu.csd_indirect.wg_uniform_offsets[2];
3445
3446
list_addtail(&job->list_link, &cmd_buffer->jobs);
3447
cmd_buffer->state.job = NULL;
3448
}
3449
3450
VKAPI_ATTR void VKAPI_CALL
3451
v3dv_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
3452
VkBuffer _buffer,
3453
VkDeviceSize offset)
3454
{
3455
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3456
V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
3457
3458
assert(offset <= UINT32_MAX);
3459
3460
cmd_buffer_emit_pre_dispatch(cmd_buffer);
3461
cmd_buffer_dispatch_indirect(cmd_buffer, buffer, offset);
3462
}
3463
3464
VKAPI_ATTR void VKAPI_CALL
3465
v3dv_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
3466
{
3467
/* Nothing to do here since we only support a single device */
3468
assert(deviceMask == 0x1);
3469
}
3470
3471