Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/zink/zink_batch.c
4570 views
1
#include "zink_batch.h"
2
3
#include "zink_context.h"
4
#include "zink_fence.h"
5
#include "zink_framebuffer.h"
6
#include "zink_query.h"
7
#include "zink_program.h"
8
#include "zink_render_pass.h"
9
#include "zink_resource.h"
10
#include "zink_screen.h"
11
#include "zink_surface.h"
12
13
#include "util/hash_table.h"
14
#include "util/u_debug.h"
15
#include "util/set.h"
16
17
#ifdef VK_USE_PLATFORM_METAL_EXT
18
#include "QuartzCore/CAMetalLayer.h"
19
#endif
20
#include "wsi_common.h"
21
22
void
23
debug_describe_zink_batch_state(char *buf, const struct zink_batch_state *ptr)
24
{
25
sprintf(buf, "zink_batch_state");
26
}
27
28
void
29
zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
30
{
31
struct zink_screen *screen = zink_screen(ctx->base.screen);
32
33
if (vkResetCommandPool(screen->dev, bs->cmdpool, 0) != VK_SUCCESS)
34
debug_printf("vkResetCommandPool failed\n");
35
36
/* unref all used resources */
37
set_foreach_remove(bs->resources, entry) {
38
struct zink_resource_object *obj = (struct zink_resource_object *)entry->key;
39
zink_batch_usage_unset(&obj->reads, bs);
40
zink_batch_usage_unset(&obj->writes, bs);
41
zink_resource_object_reference(screen, &obj, NULL);
42
}
43
44
set_foreach_remove(bs->active_queries, entry) {
45
struct zink_query *query = (void*)entry->key;
46
zink_prune_query(screen, query);
47
}
48
49
set_foreach_remove(bs->surfaces, entry) {
50
struct zink_surface *surf = (struct zink_surface *)entry->key;
51
zink_batch_usage_unset(&surf->batch_uses, bs);
52
zink_surface_reference(screen, &surf, NULL);
53
}
54
set_foreach_remove(bs->bufferviews, entry) {
55
struct zink_buffer_view *buffer_view = (struct zink_buffer_view *)entry->key;
56
zink_batch_usage_unset(&buffer_view->batch_uses, bs);
57
zink_buffer_view_reference(screen, &buffer_view, NULL);
58
}
59
60
util_dynarray_foreach(&bs->zombie_samplers, VkSampler, samp) {
61
vkDestroySampler(screen->dev, *samp, NULL);
62
}
63
util_dynarray_clear(&bs->zombie_samplers);
64
util_dynarray_clear(&bs->persistent_resources);
65
66
screen->batch_descriptor_reset(screen, bs);
67
68
set_foreach_remove(bs->programs, entry) {
69
struct zink_program *pg = (struct zink_program*)entry->key;
70
zink_batch_usage_unset(&pg->batch_uses, bs);
71
if (pg->is_compute) {
72
struct zink_compute_program *comp = (struct zink_compute_program*)pg;
73
bool in_use = comp == ctx->curr_compute;
74
if (zink_compute_program_reference(screen, &comp, NULL) && in_use)
75
ctx->curr_compute = NULL;
76
} else {
77
struct zink_gfx_program *prog = (struct zink_gfx_program*)pg;
78
bool in_use = prog == ctx->curr_program;
79
if (zink_gfx_program_reference(screen, &prog, NULL) && in_use)
80
ctx->curr_program = NULL;
81
}
82
}
83
84
set_foreach(bs->fbs, entry) {
85
struct zink_framebuffer *fb = (void*)entry->key;
86
zink_framebuffer_reference(screen, &fb, NULL);
87
_mesa_set_remove(bs->fbs, entry);
88
}
89
90
pipe_resource_reference(&bs->flush_res, NULL);
91
92
ctx->resource_size -= bs->resource_size;
93
bs->resource_size = 0;
94
95
/* only reset submitted here so that tc fence desync can pick up the 'completed' flag
96
* before the state is reused
97
*/
98
bs->fence.submitted = false;
99
bs->has_barriers = false;
100
bs->scanout_flush = false;
101
if (bs->fence.batch_id)
102
zink_screen_update_last_finished(screen, bs->fence.batch_id);
103
bs->submit_count++;
104
bs->fence.batch_id = 0;
105
bs->usage.usage = 0;
106
bs->draw_count = bs->compute_count = 0;
107
}
108
109
void
110
zink_clear_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
111
{
112
bs->fence.completed = true;
113
zink_reset_batch_state(ctx, bs);
114
}
115
116
void
117
zink_batch_reset_all(struct zink_context *ctx)
118
{
119
simple_mtx_lock(&ctx->batch_mtx);
120
hash_table_foreach(&ctx->batch_states, entry) {
121
struct zink_batch_state *bs = entry->data;
122
bs->fence.completed = true;
123
zink_reset_batch_state(ctx, bs);
124
_mesa_hash_table_remove(&ctx->batch_states, entry);
125
util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, bs);
126
}
127
simple_mtx_unlock(&ctx->batch_mtx);
128
}
129
130
void
131
zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs)
132
{
133
if (!bs)
134
return;
135
136
util_queue_fence_destroy(&bs->flush_completed);
137
138
cnd_destroy(&bs->usage.flush);
139
mtx_destroy(&bs->usage.mtx);
140
141
if (bs->fence.fence)
142
vkDestroyFence(screen->dev, bs->fence.fence, NULL);
143
144
if (bs->cmdbuf)
145
vkFreeCommandBuffers(screen->dev, bs->cmdpool, 1, &bs->cmdbuf);
146
if (bs->barrier_cmdbuf)
147
vkFreeCommandBuffers(screen->dev, bs->cmdpool, 1, &bs->barrier_cmdbuf);
148
if (bs->cmdpool)
149
vkDestroyCommandPool(screen->dev, bs->cmdpool, NULL);
150
151
_mesa_set_destroy(bs->fbs, NULL);
152
util_dynarray_fini(&bs->zombie_samplers);
153
_mesa_set_destroy(bs->surfaces, NULL);
154
_mesa_set_destroy(bs->bufferviews, NULL);
155
_mesa_set_destroy(bs->programs, NULL);
156
_mesa_set_destroy(bs->active_queries, NULL);
157
screen->batch_descriptor_deinit(screen, bs);
158
ralloc_free(bs);
159
}
160
161
static struct zink_batch_state *
162
create_batch_state(struct zink_context *ctx)
163
{
164
struct zink_screen *screen = zink_screen(ctx->base.screen);
165
struct zink_batch_state *bs = rzalloc(NULL, struct zink_batch_state);
166
bs->have_timelines = ctx->have_timelines;
167
VkCommandPoolCreateInfo cpci = {0};
168
cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
169
cpci.queueFamilyIndex = screen->gfx_queue;
170
cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
171
if (vkCreateCommandPool(screen->dev, &cpci, NULL, &bs->cmdpool) != VK_SUCCESS)
172
goto fail;
173
174
VkCommandBufferAllocateInfo cbai = {0};
175
cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
176
cbai.commandPool = bs->cmdpool;
177
cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
178
cbai.commandBufferCount = 1;
179
180
if (vkAllocateCommandBuffers(screen->dev, &cbai, &bs->cmdbuf) != VK_SUCCESS)
181
goto fail;
182
183
if (vkAllocateCommandBuffers(screen->dev, &cbai, &bs->barrier_cmdbuf) != VK_SUCCESS)
184
goto fail;
185
186
#define SET_CREATE_OR_FAIL(ptr) \
187
ptr = _mesa_pointer_set_create(bs); \
188
if (!ptr) \
189
goto fail
190
191
bs->ctx = ctx;
192
pipe_reference_init(&bs->reference, 1);
193
194
SET_CREATE_OR_FAIL(bs->fbs);
195
SET_CREATE_OR_FAIL(bs->resources);
196
SET_CREATE_OR_FAIL(bs->surfaces);
197
SET_CREATE_OR_FAIL(bs->bufferviews);
198
SET_CREATE_OR_FAIL(bs->programs);
199
SET_CREATE_OR_FAIL(bs->active_queries);
200
util_dynarray_init(&bs->zombie_samplers, NULL);
201
util_dynarray_init(&bs->persistent_resources, NULL);
202
203
cnd_init(&bs->usage.flush);
204
mtx_init(&bs->usage.mtx, mtx_plain);
205
206
if (!screen->batch_descriptor_init(screen, bs))
207
goto fail;
208
209
VkFenceCreateInfo fci = {0};
210
fci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
211
212
if (vkCreateFence(screen->dev, &fci, NULL, &bs->fence.fence) != VK_SUCCESS)
213
goto fail;
214
215
util_queue_fence_init(&bs->flush_completed);
216
217
return bs;
218
fail:
219
zink_batch_state_destroy(screen, bs);
220
return NULL;
221
}
222
223
static inline bool
224
find_unused_state(struct hash_entry *entry)
225
{
226
struct zink_fence *fence = entry->data;
227
/* we can't reset these from fence_finish because threads */
228
bool completed = p_atomic_read(&fence->completed);
229
bool submitted = p_atomic_read(&fence->submitted);
230
return submitted && completed;
231
}
232
233
static struct zink_batch_state *
234
get_batch_state(struct zink_context *ctx, struct zink_batch *batch)
235
{
236
struct zink_batch_state *bs = NULL;
237
238
simple_mtx_lock(&ctx->batch_mtx);
239
if (util_dynarray_num_elements(&ctx->free_batch_states, struct zink_batch_state*))
240
bs = util_dynarray_pop(&ctx->free_batch_states, struct zink_batch_state*);
241
if (!bs) {
242
hash_table_foreach(&ctx->batch_states, he) {
243
struct zink_fence *fence = he->data;
244
if (zink_screen_check_last_finished(zink_screen(ctx->base.screen), fence->batch_id) || find_unused_state(he)) {
245
bs = he->data;
246
_mesa_hash_table_remove(&ctx->batch_states, he);
247
break;
248
}
249
}
250
}
251
simple_mtx_unlock(&ctx->batch_mtx);
252
if (bs) {
253
if (bs->fence.submitted && !bs->fence.completed)
254
/* this fence is already done, so we need vulkan to release the cmdbuf */
255
zink_vkfence_wait(zink_screen(ctx->base.screen), &bs->fence, PIPE_TIMEOUT_INFINITE);
256
zink_reset_batch_state(ctx, bs);
257
} else {
258
if (!batch->state) {
259
/* this is batch init, so create a few more states for later use */
260
for (int i = 0; i < 3; i++) {
261
struct zink_batch_state *state = create_batch_state(ctx);
262
util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, state);
263
}
264
}
265
bs = create_batch_state(ctx);
266
}
267
return bs;
268
}
269
270
void
271
zink_reset_batch(struct zink_context *ctx, struct zink_batch *batch)
272
{
273
struct zink_screen *screen = zink_screen(ctx->base.screen);
274
275
if (ctx->have_timelines && screen->last_finished > ctx->curr_batch && ctx->curr_batch == 1) {
276
if (!zink_screen_init_semaphore(screen)) {
277
debug_printf("timeline init failed, things are about to go dramatically wrong.");
278
ctx->have_timelines = false;
279
}
280
}
281
282
batch->state = get_batch_state(ctx, batch);
283
assert(batch->state);
284
285
batch->has_work = false;
286
}
287
288
void
289
zink_start_batch(struct zink_context *ctx, struct zink_batch *batch)
290
{
291
zink_reset_batch(ctx, batch);
292
293
batch->state->usage.unflushed = true;
294
295
VkCommandBufferBeginInfo cbbi = {0};
296
cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
297
cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
298
if (vkBeginCommandBuffer(batch->state->cmdbuf, &cbbi) != VK_SUCCESS)
299
debug_printf("vkBeginCommandBuffer failed\n");
300
if (vkBeginCommandBuffer(batch->state->barrier_cmdbuf, &cbbi) != VK_SUCCESS)
301
debug_printf("vkBeginCommandBuffer failed\n");
302
303
batch->state->fence.batch_id = ctx->curr_batch;
304
batch->state->fence.completed = false;
305
if (ctx->last_fence) {
306
struct zink_batch_state *last_state = zink_batch_state(ctx->last_fence);
307
batch->last_batch_usage = &last_state->usage;
308
}
309
if (!ctx->queries_disabled)
310
zink_resume_queries(ctx, batch);
311
}
312
313
static void
314
post_submit(void *data, void *gdata, int thread_index)
315
{
316
struct zink_batch_state *bs = data;
317
318
if (bs->is_device_lost) {
319
if (bs->ctx->reset.reset)
320
bs->ctx->reset.reset(bs->ctx->reset.data, PIPE_GUILTY_CONTEXT_RESET);
321
zink_screen(bs->ctx->base.screen)->device_lost = true;
322
}
323
}
324
325
static void
326
submit_queue(void *data, void *gdata, int thread_index)
327
{
328
struct zink_batch_state *bs = data;
329
struct zink_context *ctx = bs->ctx;
330
struct zink_screen *screen = zink_screen(ctx->base.screen);
331
VkSubmitInfo si = {0};
332
333
simple_mtx_lock(&ctx->batch_mtx);
334
while (!bs->fence.batch_id)
335
bs->fence.batch_id = p_atomic_inc_return(&screen->curr_batch);
336
_mesa_hash_table_insert_pre_hashed(&ctx->batch_states, bs->fence.batch_id, (void*)(uintptr_t)bs->fence.batch_id, bs);
337
bs->usage.usage = bs->fence.batch_id;
338
bs->usage.unflushed = false;
339
simple_mtx_unlock(&ctx->batch_mtx);
340
341
vkResetFences(screen->dev, 1, &bs->fence.fence);
342
343
uint64_t batch_id = bs->fence.batch_id;
344
si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
345
si.waitSemaphoreCount = 0;
346
si.pWaitSemaphores = NULL;
347
si.signalSemaphoreCount = 0;
348
si.pSignalSemaphores = NULL;
349
si.pWaitDstStageMask = NULL;
350
si.commandBufferCount = bs->has_barriers ? 2 : 1;
351
VkCommandBuffer cmdbufs[2] = {
352
bs->barrier_cmdbuf,
353
bs->cmdbuf,
354
};
355
si.pCommandBuffers = bs->has_barriers ? cmdbufs : &cmdbufs[1];
356
357
VkTimelineSemaphoreSubmitInfo tsi = {0};
358
if (bs->have_timelines) {
359
tsi.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO;
360
si.pNext = &tsi;
361
tsi.signalSemaphoreValueCount = 1;
362
tsi.pSignalSemaphoreValues = &batch_id;
363
si.signalSemaphoreCount = 1;
364
si.pSignalSemaphores = &screen->sem;
365
}
366
367
struct wsi_memory_signal_submit_info mem_signal = {
368
.sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
369
.pNext = si.pNext,
370
};
371
372
if (bs->flush_res && screen->needs_mesa_flush_wsi) {
373
struct zink_resource *flush_res = zink_resource(bs->flush_res);
374
mem_signal.memory = flush_res->scanout_obj ? flush_res->scanout_obj->mem : flush_res->obj->mem;
375
si.pNext = &mem_signal;
376
}
377
378
if (vkEndCommandBuffer(bs->cmdbuf) != VK_SUCCESS) {
379
debug_printf("vkEndCommandBuffer failed\n");
380
bs->is_device_lost = true;
381
goto end;
382
}
383
if (vkEndCommandBuffer(bs->barrier_cmdbuf) != VK_SUCCESS) {
384
debug_printf("vkEndCommandBuffer failed\n");
385
bs->is_device_lost = true;
386
goto end;
387
}
388
389
while (util_dynarray_contains(&bs->persistent_resources, struct zink_resource_object*)) {
390
struct zink_resource_object *obj = util_dynarray_pop(&bs->persistent_resources, struct zink_resource_object*);
391
VkMappedMemoryRange range = zink_resource_init_mem_range(screen, obj, 0, obj->size);
392
vkFlushMappedMemoryRanges(screen->dev, 1, &range);
393
}
394
395
if (vkQueueSubmit(bs->queue, 1, &si, bs->fence.fence) != VK_SUCCESS) {
396
debug_printf("ZINK: vkQueueSubmit() failed\n");
397
bs->is_device_lost = true;
398
}
399
bs->submit_count++;
400
end:
401
cnd_broadcast(&bs->usage.flush);
402
403
p_atomic_set(&bs->fence.submitted, true);
404
}
405
406
407
/* TODO: remove for wsi */
408
static void
409
copy_scanout(struct zink_batch_state *bs, struct zink_resource *res)
410
{
411
if (!bs->scanout_flush)
412
return;
413
414
VkImageCopy region = {0};
415
struct pipe_box box = {0, 0, 0,
416
u_minify(res->base.b.width0, 0),
417
u_minify(res->base.b.height0, 0), res->base.b.array_size};
418
box.depth = util_num_layers(&res->base.b, 0);
419
struct pipe_box *src_box = &box;
420
unsigned dstz = 0;
421
422
region.srcSubresource.aspectMask = res->aspect;
423
region.srcSubresource.mipLevel = 0;
424
switch (res->base.b.target) {
425
case PIPE_TEXTURE_CUBE:
426
case PIPE_TEXTURE_CUBE_ARRAY:
427
case PIPE_TEXTURE_2D_ARRAY:
428
case PIPE_TEXTURE_1D_ARRAY:
429
/* these use layer */
430
region.srcSubresource.baseArrayLayer = src_box->z;
431
region.srcSubresource.layerCount = src_box->depth;
432
region.srcOffset.z = 0;
433
region.extent.depth = 1;
434
break;
435
case PIPE_TEXTURE_3D:
436
/* this uses depth */
437
region.srcSubresource.baseArrayLayer = 0;
438
region.srcSubresource.layerCount = 1;
439
region.srcOffset.z = src_box->z;
440
region.extent.depth = src_box->depth;
441
break;
442
default:
443
/* these must only copy one layer */
444
region.srcSubresource.baseArrayLayer = 0;
445
region.srcSubresource.layerCount = 1;
446
region.srcOffset.z = 0;
447
region.extent.depth = 1;
448
}
449
450
region.srcOffset.x = src_box->x;
451
region.srcOffset.y = src_box->y;
452
453
region.dstSubresource.aspectMask = res->aspect;
454
region.dstSubresource.mipLevel = 0;
455
switch (res->base.b.target) {
456
case PIPE_TEXTURE_CUBE:
457
case PIPE_TEXTURE_CUBE_ARRAY:
458
case PIPE_TEXTURE_2D_ARRAY:
459
case PIPE_TEXTURE_1D_ARRAY:
460
/* these use layer */
461
region.dstSubresource.baseArrayLayer = dstz;
462
region.dstSubresource.layerCount = src_box->depth;
463
region.dstOffset.z = 0;
464
break;
465
case PIPE_TEXTURE_3D:
466
/* this uses depth */
467
region.dstSubresource.baseArrayLayer = 0;
468
region.dstSubresource.layerCount = 1;
469
region.dstOffset.z = dstz;
470
break;
471
default:
472
/* these must only copy one layer */
473
region.dstSubresource.baseArrayLayer = 0;
474
region.dstSubresource.layerCount = 1;
475
region.dstOffset.z = 0;
476
}
477
478
region.dstOffset.x = 0;
479
region.dstOffset.y = 0;
480
region.extent.width = src_box->width;
481
region.extent.height = src_box->height;
482
483
VkImageMemoryBarrier imb1;
484
zink_resource_image_barrier_init(&imb1, res, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
485
vkCmdPipelineBarrier(
486
bs->cmdbuf,
487
res->access_stage ? res->access_stage : VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
488
VK_PIPELINE_STAGE_TRANSFER_BIT,
489
0,
490
0, NULL,
491
0, NULL,
492
1, &imb1
493
);
494
495
VkImageSubresourceRange isr = {
496
res->aspect,
497
0, VK_REMAINING_MIP_LEVELS,
498
0, VK_REMAINING_ARRAY_LAYERS
499
};
500
VkImageMemoryBarrier imb = {
501
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
502
NULL,
503
0,
504
VK_ACCESS_TRANSFER_WRITE_BIT,
505
res->scanout_obj_init ? VK_IMAGE_LAYOUT_PRESENT_SRC_KHR : VK_IMAGE_LAYOUT_UNDEFINED,
506
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
507
VK_QUEUE_FAMILY_IGNORED,
508
VK_QUEUE_FAMILY_IGNORED,
509
res->scanout_obj->image,
510
isr
511
};
512
vkCmdPipelineBarrier(
513
bs->cmdbuf,
514
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
515
VK_PIPELINE_STAGE_TRANSFER_BIT,
516
0,
517
0, NULL,
518
0, NULL,
519
1, &imb
520
);
521
522
vkCmdCopyImage(bs->cmdbuf, res->obj->image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
523
res->scanout_obj->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
524
1, &region);
525
imb.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
526
imb.dstAccessMask = 0;
527
imb.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
528
imb.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
529
vkCmdPipelineBarrier(
530
bs->cmdbuf,
531
VK_PIPELINE_STAGE_TRANSFER_BIT,
532
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
533
0,
534
0, NULL,
535
0, NULL,
536
1, &imb
537
);
538
/* separate flag to avoid annoying validation errors for new scanout objs */
539
res->scanout_obj_init = true;
540
}
541
542
void
543
zink_end_batch(struct zink_context *ctx, struct zink_batch *batch)
544
{
545
if (batch->state->flush_res)
546
copy_scanout(batch->state, zink_resource(batch->state->flush_res));
547
if (!ctx->queries_disabled)
548
zink_suspend_queries(ctx, batch);
549
550
tc_driver_internal_flush_notify(ctx->tc);
551
552
struct zink_screen *screen = zink_screen(ctx->base.screen);
553
554
ctx->resource_size += batch->state->resource_size;
555
ctx->last_fence = &batch->state->fence;
556
557
if (screen->device_lost)
558
return;
559
560
if (screen->threaded) {
561
batch->state->queue = screen->thread_queue;
562
util_queue_add_job(&screen->flush_queue, batch->state, &batch->state->flush_completed,
563
submit_queue, post_submit, 0);
564
} else {
565
batch->state->queue = screen->queue;
566
submit_queue(batch->state, NULL, 0);
567
post_submit(batch->state, NULL, 0);
568
}
569
}
570
571
void
572
zink_batch_resource_usage_set(struct zink_batch *batch, struct zink_resource *res, bool write)
573
{
574
if (write) {
575
zink_batch_usage_set(&res->obj->writes, batch->state);
576
if (res->scanout_obj)
577
batch->state->scanout_flush = true;
578
} else {
579
zink_batch_usage_set(&res->obj->reads, batch->state);
580
}
581
/* multiple array entries are fine */
582
if (!res->obj->coherent && res->obj->persistent_maps)
583
util_dynarray_append(&batch->state->persistent_resources, struct zink_resource_object*, res->obj);
584
585
batch->has_work = true;
586
}
587
588
void
589
zink_batch_reference_resource_rw(struct zink_batch *batch, struct zink_resource *res, bool write)
590
{
591
/* if the resource already has usage of any sort set for this batch, we can skip hashing */
592
if (!zink_batch_usage_matches(res->obj->reads, batch->state) &&
593
!zink_batch_usage_matches(res->obj->writes, batch->state)) {
594
zink_batch_reference_resource(batch, res);
595
}
596
zink_batch_resource_usage_set(batch, res, write);
597
}
598
599
bool
600
batch_ptr_add_usage(struct zink_batch *batch, struct set *s, void *ptr)
601
{
602
bool found = false;
603
_mesa_set_search_or_add(s, ptr, &found);
604
return !found;
605
}
606
607
void
608
zink_batch_reference_resource(struct zink_batch *batch, struct zink_resource *res)
609
{
610
if (!batch_ptr_add_usage(batch, batch->state->resources, res->obj))
611
return;
612
pipe_reference(NULL, &res->obj->reference);
613
batch->state->resource_size += res->obj->size;
614
batch->has_work = true;
615
}
616
617
void
618
zink_batch_reference_resource_move(struct zink_batch *batch, struct zink_resource *res)
619
{
620
if (!batch_ptr_add_usage(batch, batch->state->resources, res->obj))
621
return;
622
batch->state->resource_size += res->obj->size;
623
batch->has_work = true;
624
}
625
626
void
627
zink_batch_reference_bufferview(struct zink_batch *batch, struct zink_buffer_view *buffer_view)
628
{
629
if (!batch_ptr_add_usage(batch, batch->state->bufferviews, buffer_view))
630
return;
631
pipe_reference(NULL, &buffer_view->reference);
632
batch->has_work = true;
633
}
634
635
void
636
zink_batch_reference_surface(struct zink_batch *batch, struct zink_surface *surface)
637
{
638
if (!batch_ptr_add_usage(batch, batch->state->surfaces, surface))
639
return;
640
struct pipe_surface *surf = NULL;
641
pipe_surface_reference(&surf, &surface->base);
642
batch->has_work = true;
643
}
644
645
void
646
zink_batch_reference_sampler_view(struct zink_batch *batch,
647
struct zink_sampler_view *sv)
648
{
649
if (sv->base.target == PIPE_BUFFER)
650
zink_batch_reference_bufferview(batch, sv->buffer_view);
651
else
652
zink_batch_reference_surface(batch, sv->image_view);
653
}
654
655
void
656
zink_batch_reference_framebuffer(struct zink_batch *batch,
657
struct zink_framebuffer *fb)
658
{
659
bool found;
660
_mesa_set_search_or_add(batch->state->fbs, fb, &found);
661
if (!found)
662
pipe_reference(NULL, &fb->reference);
663
}
664
665
void
666
zink_batch_reference_program(struct zink_batch *batch,
667
struct zink_program *pg)
668
{
669
if (zink_batch_usage_matches(pg->batch_uses, batch->state) ||
670
!batch_ptr_add_usage(batch, batch->state->programs, pg))
671
return;
672
pipe_reference(NULL, &pg->reference);
673
zink_batch_usage_set(&pg->batch_uses, batch->state);
674
batch->has_work = true;
675
}
676
677
void
678
zink_batch_reference_image_view(struct zink_batch *batch,
679
struct zink_image_view *image_view)
680
{
681
if (image_view->base.resource->target == PIPE_BUFFER)
682
zink_batch_reference_bufferview(batch, image_view->buffer_view);
683
else
684
zink_batch_reference_surface(batch, image_view->surface);
685
}
686
687
bool
688
zink_batch_usage_check_completion(struct zink_context *ctx, const struct zink_batch_usage *u)
689
{
690
if (!zink_batch_usage_exists(u))
691
return true;
692
if (zink_batch_usage_is_unflushed(u))
693
return false;
694
return zink_check_batch_completion(ctx, u->usage);
695
}
696
697
void
698
zink_batch_usage_wait(struct zink_context *ctx, struct zink_batch_usage *u)
699
{
700
if (!zink_batch_usage_exists(u))
701
return;
702
if (zink_batch_usage_is_unflushed(u)) {
703
if (likely(u == &ctx->batch.state->usage))
704
ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
705
else { //multi-context
706
mtx_lock(&u->mtx);
707
cnd_wait(&u->flush, &u->mtx);
708
mtx_unlock(&u->mtx);
709
}
710
}
711
zink_wait_on_batch(ctx, u->usage);
712
}
713
714