Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/virtio/vulkan/vn_queue.c
4560 views
1
/*
2
* Copyright 2019 Google LLC
3
* SPDX-License-Identifier: MIT
4
*
5
* based in part on anv and radv which are:
6
* Copyright © 2015 Intel Corporation
7
* Copyright © 2016 Red Hat.
8
* Copyright © 2016 Bas Nieuwenhuizen
9
*/
10
11
#include "vn_queue.h"
12
13
#include "util/libsync.h"
14
#include "venus-protocol/vn_protocol_driver_event.h"
15
#include "venus-protocol/vn_protocol_driver_fence.h"
16
#include "venus-protocol/vn_protocol_driver_queue.h"
17
#include "venus-protocol/vn_protocol_driver_semaphore.h"
18
19
#include "vn_device.h"
20
#include "vn_device_memory.h"
21
#include "vn_renderer.h"
22
23
/* queue commands */
24
25
void
26
vn_GetDeviceQueue(VkDevice device,
27
uint32_t queueFamilyIndex,
28
uint32_t queueIndex,
29
VkQueue *pQueue)
30
{
31
struct vn_device *dev = vn_device_from_handle(device);
32
33
for (uint32_t i = 0; i < dev->queue_count; i++) {
34
struct vn_queue *queue = &dev->queues[i];
35
if (queue->family == queueFamilyIndex && queue->index == queueIndex) {
36
assert(!queue->flags);
37
*pQueue = vn_queue_to_handle(queue);
38
return;
39
}
40
}
41
unreachable("bad queue family/index");
42
}
43
44
void
45
vn_GetDeviceQueue2(VkDevice device,
46
const VkDeviceQueueInfo2 *pQueueInfo,
47
VkQueue *pQueue)
48
{
49
struct vn_device *dev = vn_device_from_handle(device);
50
51
for (uint32_t i = 0; i < dev->queue_count; i++) {
52
struct vn_queue *queue = &dev->queues[i];
53
if (queue->family == pQueueInfo->queueFamilyIndex &&
54
queue->index == pQueueInfo->queueIndex &&
55
queue->flags == pQueueInfo->flags) {
56
*pQueue = vn_queue_to_handle(queue);
57
return;
58
}
59
}
60
unreachable("bad queue family/index");
61
}
62
63
static void
64
vn_semaphore_reset_wsi(struct vn_device *dev, struct vn_semaphore *sem);
65
66
struct vn_queue_submission {
67
VkStructureType batch_type;
68
VkQueue queue;
69
uint32_t batch_count;
70
union {
71
const void *batches;
72
const VkSubmitInfo *submit_batches;
73
const VkBindSparseInfo *bind_sparse_batches;
74
};
75
VkFence fence;
76
77
uint32_t wait_semaphore_count;
78
uint32_t wait_wsi_count;
79
80
struct {
81
void *storage;
82
83
union {
84
void *batches;
85
VkSubmitInfo *submit_batches;
86
VkBindSparseInfo *bind_sparse_batches;
87
};
88
VkSemaphore *semaphores;
89
} temp;
90
};
91
92
static void
93
vn_queue_submission_count_batch_semaphores(struct vn_queue_submission *submit,
94
uint32_t batch_index)
95
{
96
union {
97
const VkSubmitInfo *submit_batch;
98
const VkBindSparseInfo *bind_sparse_batch;
99
} u;
100
const VkSemaphore *wait_sems;
101
uint32_t wait_count;
102
switch (submit->batch_type) {
103
case VK_STRUCTURE_TYPE_SUBMIT_INFO:
104
u.submit_batch = &submit->submit_batches[batch_index];
105
wait_sems = u.submit_batch->pWaitSemaphores;
106
wait_count = u.submit_batch->waitSemaphoreCount;
107
break;
108
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
109
u.bind_sparse_batch = &submit->bind_sparse_batches[batch_index];
110
wait_sems = u.bind_sparse_batch->pWaitSemaphores;
111
wait_count = u.bind_sparse_batch->waitSemaphoreCount;
112
break;
113
default:
114
unreachable("unexpected batch type");
115
break;
116
}
117
118
submit->wait_semaphore_count += wait_count;
119
for (uint32_t i = 0; i < wait_count; i++) {
120
struct vn_semaphore *sem = vn_semaphore_from_handle(wait_sems[i]);
121
const struct vn_sync_payload *payload = sem->payload;
122
123
if (payload->type == VN_SYNC_TYPE_WSI_SIGNALED)
124
submit->wait_wsi_count++;
125
}
126
}
127
128
static void
129
vn_queue_submission_count_semaphores(struct vn_queue_submission *submit)
130
{
131
submit->wait_semaphore_count = 0;
132
submit->wait_wsi_count = 0;
133
134
for (uint32_t i = 0; i < submit->batch_count; i++)
135
vn_queue_submission_count_batch_semaphores(submit, i);
136
}
137
138
static VkResult
139
vn_queue_submission_alloc_storage(struct vn_queue_submission *submit)
140
{
141
struct vn_queue *queue = vn_queue_from_handle(submit->queue);
142
const VkAllocationCallbacks *alloc = &queue->device->base.base.alloc;
143
size_t alloc_size = 0;
144
size_t semaphores_offset = 0;
145
146
/* we want to filter out VN_SYNC_TYPE_WSI_SIGNALED wait semaphores */
147
if (submit->wait_wsi_count) {
148
switch (submit->batch_type) {
149
case VK_STRUCTURE_TYPE_SUBMIT_INFO:
150
alloc_size += sizeof(VkSubmitInfo) * submit->batch_count;
151
break;
152
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
153
alloc_size += sizeof(VkBindSparseInfo) * submit->batch_count;
154
break;
155
default:
156
unreachable("unexpected batch type");
157
break;
158
}
159
160
semaphores_offset = alloc_size;
161
alloc_size += sizeof(*submit->temp.semaphores) *
162
(submit->wait_semaphore_count - submit->wait_wsi_count);
163
}
164
165
if (!alloc_size) {
166
submit->temp.storage = NULL;
167
return VK_SUCCESS;
168
}
169
170
submit->temp.storage = vk_alloc(alloc, alloc_size, VN_DEFAULT_ALIGN,
171
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
172
if (!submit->temp.storage)
173
return VK_ERROR_OUT_OF_HOST_MEMORY;
174
175
submit->temp.batches = submit->temp.storage;
176
submit->temp.semaphores = submit->temp.storage + semaphores_offset;
177
178
return VK_SUCCESS;
179
}
180
181
static uint32_t
182
vn_queue_submission_filter_batch_wsi_semaphores(
183
struct vn_queue_submission *submit,
184
uint32_t batch_index,
185
uint32_t sem_base)
186
{
187
struct vn_queue *queue = vn_queue_from_handle(submit->queue);
188
189
union {
190
VkSubmitInfo *submit_batch;
191
VkBindSparseInfo *bind_sparse_batch;
192
} u;
193
const VkSemaphore *src_sems;
194
uint32_t src_count;
195
switch (submit->batch_type) {
196
case VK_STRUCTURE_TYPE_SUBMIT_INFO:
197
u.submit_batch = &submit->temp.submit_batches[batch_index];
198
src_sems = u.submit_batch->pWaitSemaphores;
199
src_count = u.submit_batch->waitSemaphoreCount;
200
break;
201
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
202
u.bind_sparse_batch = &submit->temp.bind_sparse_batches[batch_index];
203
src_sems = u.bind_sparse_batch->pWaitSemaphores;
204
src_count = u.bind_sparse_batch->waitSemaphoreCount;
205
break;
206
default:
207
unreachable("unexpected batch type");
208
break;
209
}
210
211
VkSemaphore *dst_sems = &submit->temp.semaphores[sem_base];
212
uint32_t dst_count = 0;
213
214
/* filter out VN_SYNC_TYPE_WSI_SIGNALED wait semaphores */
215
for (uint32_t i = 0; i < src_count; i++) {
216
struct vn_semaphore *sem = vn_semaphore_from_handle(src_sems[i]);
217
const struct vn_sync_payload *payload = sem->payload;
218
219
if (payload->type == VN_SYNC_TYPE_WSI_SIGNALED)
220
vn_semaphore_reset_wsi(queue->device, sem);
221
else
222
dst_sems[dst_count++] = src_sems[i];
223
}
224
225
switch (submit->batch_type) {
226
case VK_STRUCTURE_TYPE_SUBMIT_INFO:
227
u.submit_batch->pWaitSemaphores = dst_sems;
228
u.submit_batch->waitSemaphoreCount = dst_count;
229
break;
230
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
231
u.bind_sparse_batch->pWaitSemaphores = dst_sems;
232
u.bind_sparse_batch->waitSemaphoreCount = dst_count;
233
break;
234
default:
235
break;
236
}
237
238
return dst_count;
239
}
240
241
static void
242
vn_queue_submission_setup_batches(struct vn_queue_submission *submit)
243
{
244
if (!submit->temp.storage)
245
return;
246
247
/* make a copy because we need to filter out WSI semaphores */
248
if (submit->wait_wsi_count) {
249
switch (submit->batch_type) {
250
case VK_STRUCTURE_TYPE_SUBMIT_INFO:
251
memcpy(submit->temp.submit_batches, submit->submit_batches,
252
sizeof(submit->submit_batches[0]) * submit->batch_count);
253
submit->submit_batches = submit->temp.submit_batches;
254
break;
255
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
256
memcpy(submit->temp.bind_sparse_batches, submit->bind_sparse_batches,
257
sizeof(submit->bind_sparse_batches[0]) * submit->batch_count);
258
submit->bind_sparse_batches = submit->temp.bind_sparse_batches;
259
break;
260
default:
261
unreachable("unexpected batch type");
262
break;
263
}
264
}
265
266
uint32_t wait_sem_base = 0;
267
for (uint32_t i = 0; i < submit->batch_count; i++) {
268
if (submit->wait_wsi_count) {
269
wait_sem_base += vn_queue_submission_filter_batch_wsi_semaphores(
270
submit, i, wait_sem_base);
271
}
272
}
273
}
274
275
static VkResult
276
vn_queue_submission_prepare_submit(struct vn_queue_submission *submit,
277
VkQueue queue,
278
uint32_t batch_count,
279
const VkSubmitInfo *submit_batches,
280
VkFence fence)
281
{
282
submit->batch_type = VK_STRUCTURE_TYPE_SUBMIT_INFO;
283
submit->queue = queue;
284
submit->batch_count = batch_count;
285
submit->submit_batches = submit_batches;
286
submit->fence = fence;
287
288
vn_queue_submission_count_semaphores(submit);
289
290
VkResult result = vn_queue_submission_alloc_storage(submit);
291
if (result != VK_SUCCESS)
292
return result;
293
294
vn_queue_submission_setup_batches(submit);
295
296
return VK_SUCCESS;
297
}
298
299
static VkResult
300
vn_queue_submission_prepare_bind_sparse(
301
struct vn_queue_submission *submit,
302
VkQueue queue,
303
uint32_t batch_count,
304
const VkBindSparseInfo *bind_sparse_batches,
305
VkFence fence)
306
{
307
submit->batch_type = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
308
submit->queue = queue;
309
submit->batch_count = batch_count;
310
submit->bind_sparse_batches = bind_sparse_batches;
311
submit->fence = fence;
312
313
vn_queue_submission_count_semaphores(submit);
314
315
VkResult result = vn_queue_submission_alloc_storage(submit);
316
if (result != VK_SUCCESS)
317
return result;
318
319
vn_queue_submission_setup_batches(submit);
320
321
return VK_SUCCESS;
322
}
323
324
static void
325
vn_queue_submission_cleanup(struct vn_queue_submission *submit)
326
{
327
struct vn_queue *queue = vn_queue_from_handle(submit->queue);
328
const VkAllocationCallbacks *alloc = &queue->device->base.base.alloc;
329
330
vk_free(alloc, submit->temp.storage);
331
}
332
333
VkResult
334
vn_QueueSubmit(VkQueue _queue,
335
uint32_t submitCount,
336
const VkSubmitInfo *pSubmits,
337
VkFence fence)
338
{
339
struct vn_queue *queue = vn_queue_from_handle(_queue);
340
struct vn_device *dev = queue->device;
341
342
struct vn_queue_submission submit;
343
VkResult result = vn_queue_submission_prepare_submit(
344
&submit, _queue, submitCount, pSubmits, fence);
345
if (result != VK_SUCCESS)
346
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
347
348
const struct vn_device_memory *wsi_mem = NULL;
349
if (submit.batch_count == 1) {
350
const struct wsi_memory_signal_submit_info *info = vk_find_struct_const(
351
submit.submit_batches[0].pNext, WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
352
if (info) {
353
wsi_mem = vn_device_memory_from_handle(info->memory);
354
assert(!wsi_mem->base_memory && wsi_mem->base_bo);
355
}
356
}
357
358
result =
359
vn_call_vkQueueSubmit(dev->instance, submit.queue, submit.batch_count,
360
submit.submit_batches, submit.fence);
361
if (result != VK_SUCCESS) {
362
vn_queue_submission_cleanup(&submit);
363
return vn_error(dev->instance, result);
364
}
365
366
if (wsi_mem) {
367
/* XXX this is always false and kills the performance */
368
if (dev->instance->renderer_info.has_implicit_fencing) {
369
vn_renderer_submit(dev->renderer, &(const struct vn_renderer_submit){
370
.bos = &wsi_mem->base_bo,
371
.bo_count = 1,
372
});
373
} else {
374
if (VN_DEBUG(WSI)) {
375
static uint32_t ratelimit;
376
if (ratelimit < 10) {
377
vn_log(dev->instance,
378
"forcing vkQueueWaitIdle before presenting");
379
ratelimit++;
380
}
381
}
382
383
vn_QueueWaitIdle(submit.queue);
384
}
385
}
386
387
vn_queue_submission_cleanup(&submit);
388
389
return VK_SUCCESS;
390
}
391
392
VkResult
393
vn_QueueBindSparse(VkQueue _queue,
394
uint32_t bindInfoCount,
395
const VkBindSparseInfo *pBindInfo,
396
VkFence fence)
397
{
398
struct vn_queue *queue = vn_queue_from_handle(_queue);
399
struct vn_device *dev = queue->device;
400
401
struct vn_queue_submission submit;
402
VkResult result = vn_queue_submission_prepare_bind_sparse(
403
&submit, _queue, bindInfoCount, pBindInfo, fence);
404
if (result != VK_SUCCESS)
405
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
406
407
result = vn_call_vkQueueBindSparse(
408
dev->instance, submit.queue, submit.batch_count,
409
submit.bind_sparse_batches, submit.fence);
410
if (result != VK_SUCCESS) {
411
vn_queue_submission_cleanup(&submit);
412
return vn_error(dev->instance, result);
413
}
414
415
vn_queue_submission_cleanup(&submit);
416
417
return VK_SUCCESS;
418
}
419
420
VkResult
421
vn_QueueWaitIdle(VkQueue _queue)
422
{
423
struct vn_queue *queue = vn_queue_from_handle(_queue);
424
VkDevice device = vn_device_to_handle(queue->device);
425
426
VkResult result = vn_QueueSubmit(_queue, 0, NULL, queue->wait_fence);
427
if (result != VK_SUCCESS)
428
return result;
429
430
result = vn_WaitForFences(device, 1, &queue->wait_fence, true, UINT64_MAX);
431
vn_ResetFences(device, 1, &queue->wait_fence);
432
433
return vn_result(queue->device->instance, result);
434
}
435
436
/* fence commands */
437
438
static void
439
vn_sync_payload_release(struct vn_device *dev,
440
struct vn_sync_payload *payload)
441
{
442
payload->type = VN_SYNC_TYPE_INVALID;
443
}
444
445
static VkResult
446
vn_fence_init_payloads(struct vn_device *dev,
447
struct vn_fence *fence,
448
bool signaled,
449
const VkAllocationCallbacks *alloc)
450
{
451
fence->permanent.type = VN_SYNC_TYPE_DEVICE_ONLY;
452
fence->temporary.type = VN_SYNC_TYPE_INVALID;
453
fence->payload = &fence->permanent;
454
455
return VK_SUCCESS;
456
}
457
458
void
459
vn_fence_signal_wsi(struct vn_device *dev, struct vn_fence *fence)
460
{
461
struct vn_sync_payload *temp = &fence->temporary;
462
463
vn_sync_payload_release(dev, temp);
464
temp->type = VN_SYNC_TYPE_WSI_SIGNALED;
465
fence->payload = temp;
466
}
467
468
VkResult
469
vn_CreateFence(VkDevice device,
470
const VkFenceCreateInfo *pCreateInfo,
471
const VkAllocationCallbacks *pAllocator,
472
VkFence *pFence)
473
{
474
struct vn_device *dev = vn_device_from_handle(device);
475
const VkAllocationCallbacks *alloc =
476
pAllocator ? pAllocator : &dev->base.base.alloc;
477
478
VkFenceCreateInfo local_create_info;
479
if (vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO)) {
480
local_create_info = *pCreateInfo;
481
local_create_info.pNext = NULL;
482
pCreateInfo = &local_create_info;
483
}
484
485
struct vn_fence *fence = vk_zalloc(alloc, sizeof(*fence), VN_DEFAULT_ALIGN,
486
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
487
if (!fence)
488
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
489
490
vn_object_base_init(&fence->base, VK_OBJECT_TYPE_FENCE, &dev->base);
491
492
VkResult result = vn_fence_init_payloads(
493
dev, fence, pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT, alloc);
494
if (result != VK_SUCCESS) {
495
vk_free(alloc, fence);
496
return vn_error(dev->instance, result);
497
}
498
499
VkFence fence_handle = vn_fence_to_handle(fence);
500
vn_async_vkCreateFence(dev->instance, device, pCreateInfo, NULL,
501
&fence_handle);
502
503
*pFence = fence_handle;
504
505
return VK_SUCCESS;
506
}
507
508
void
509
vn_DestroyFence(VkDevice device,
510
VkFence _fence,
511
const VkAllocationCallbacks *pAllocator)
512
{
513
struct vn_device *dev = vn_device_from_handle(device);
514
struct vn_fence *fence = vn_fence_from_handle(_fence);
515
const VkAllocationCallbacks *alloc =
516
pAllocator ? pAllocator : &dev->base.base.alloc;
517
518
if (!fence)
519
return;
520
521
vn_async_vkDestroyFence(dev->instance, device, _fence, NULL);
522
523
vn_sync_payload_release(dev, &fence->permanent);
524
vn_sync_payload_release(dev, &fence->temporary);
525
526
vn_object_base_fini(&fence->base);
527
vk_free(alloc, fence);
528
}
529
530
VkResult
531
vn_ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences)
532
{
533
struct vn_device *dev = vn_device_from_handle(device);
534
535
/* TODO if the fence is shared-by-ref, this needs to be synchronous */
536
if (false)
537
vn_call_vkResetFences(dev->instance, device, fenceCount, pFences);
538
else
539
vn_async_vkResetFences(dev->instance, device, fenceCount, pFences);
540
541
for (uint32_t i = 0; i < fenceCount; i++) {
542
struct vn_fence *fence = vn_fence_from_handle(pFences[i]);
543
struct vn_sync_payload *perm = &fence->permanent;
544
545
vn_sync_payload_release(dev, &fence->temporary);
546
547
assert(perm->type == VN_SYNC_TYPE_DEVICE_ONLY);
548
fence->payload = perm;
549
}
550
551
return VK_SUCCESS;
552
}
553
554
VkResult
555
vn_GetFenceStatus(VkDevice device, VkFence _fence)
556
{
557
struct vn_device *dev = vn_device_from_handle(device);
558
struct vn_fence *fence = vn_fence_from_handle(_fence);
559
struct vn_sync_payload *payload = fence->payload;
560
561
VkResult result;
562
switch (payload->type) {
563
case VN_SYNC_TYPE_DEVICE_ONLY:
564
result = vn_call_vkGetFenceStatus(dev->instance, device, _fence);
565
break;
566
case VN_SYNC_TYPE_WSI_SIGNALED:
567
result = VK_SUCCESS;
568
break;
569
default:
570
unreachable("unexpected fence payload type");
571
break;
572
}
573
574
return vn_result(dev->instance, result);
575
}
576
577
static VkResult
578
vn_find_first_signaled_fence(VkDevice device,
579
const VkFence *fences,
580
uint32_t count)
581
{
582
for (uint32_t i = 0; i < count; i++) {
583
VkResult result = vn_GetFenceStatus(device, fences[i]);
584
if (result == VK_SUCCESS || result < 0)
585
return result;
586
}
587
return VK_NOT_READY;
588
}
589
590
static VkResult
591
vn_remove_signaled_fences(VkDevice device, VkFence *fences, uint32_t *count)
592
{
593
uint32_t cur = 0;
594
for (uint32_t i = 0; i < *count; i++) {
595
VkResult result = vn_GetFenceStatus(device, fences[i]);
596
if (result != VK_SUCCESS) {
597
if (result < 0)
598
return result;
599
fences[cur++] = fences[i];
600
}
601
}
602
603
*count = cur;
604
return cur ? VK_NOT_READY : VK_SUCCESS;
605
}
606
607
static VkResult
608
vn_update_sync_result(VkResult result, int64_t abs_timeout, uint32_t *iter)
609
{
610
switch (result) {
611
case VK_NOT_READY:
612
if (abs_timeout != OS_TIMEOUT_INFINITE &&
613
os_time_get_nano() >= abs_timeout)
614
result = VK_TIMEOUT;
615
else
616
vn_relax(iter);
617
break;
618
default:
619
assert(result == VK_SUCCESS || result < 0);
620
break;
621
}
622
623
return result;
624
}
625
626
VkResult
627
vn_WaitForFences(VkDevice device,
628
uint32_t fenceCount,
629
const VkFence *pFences,
630
VkBool32 waitAll,
631
uint64_t timeout)
632
{
633
struct vn_device *dev = vn_device_from_handle(device);
634
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
635
636
const int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
637
VkResult result = VK_NOT_READY;
638
uint32_t iter = 0;
639
if (fenceCount > 1 && waitAll) {
640
VkFence local_fences[8];
641
VkFence *fences = local_fences;
642
if (fenceCount > ARRAY_SIZE(local_fences)) {
643
fences =
644
vk_alloc(alloc, sizeof(*fences) * fenceCount, VN_DEFAULT_ALIGN,
645
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
646
if (!fences)
647
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
648
}
649
memcpy(fences, pFences, sizeof(*fences) * fenceCount);
650
651
while (result == VK_NOT_READY) {
652
result = vn_remove_signaled_fences(device, fences, &fenceCount);
653
result = vn_update_sync_result(result, abs_timeout, &iter);
654
}
655
656
if (fences != local_fences)
657
vk_free(alloc, fences);
658
} else {
659
while (result == VK_NOT_READY) {
660
result = vn_find_first_signaled_fence(device, pFences, fenceCount);
661
result = vn_update_sync_result(result, abs_timeout, &iter);
662
}
663
}
664
665
return vn_result(dev->instance, result);
666
}
667
668
static VkResult
669
vn_create_sync_file(struct vn_device *dev, int *out_fd)
670
{
671
struct vn_renderer_sync *sync;
672
VkResult result = vn_renderer_sync_create(dev->renderer, 0,
673
VN_RENDERER_SYNC_BINARY, &sync);
674
if (result != VK_SUCCESS)
675
return vn_error(dev->instance, result);
676
677
const struct vn_renderer_submit submit = {
678
.batches =
679
&(const struct vn_renderer_submit_batch){
680
.syncs = &sync,
681
.sync_values = &(const uint64_t){ 1 },
682
.sync_count = 1,
683
},
684
.batch_count = 1,
685
};
686
result = vn_renderer_submit(dev->renderer, &submit);
687
if (result != VK_SUCCESS) {
688
vn_renderer_sync_destroy(dev->renderer, sync);
689
return vn_error(dev->instance, result);
690
}
691
692
*out_fd = vn_renderer_sync_export_syncobj(dev->renderer, sync, true);
693
vn_renderer_sync_destroy(dev->renderer, sync);
694
695
return *out_fd >= 0 ? VK_SUCCESS : VK_ERROR_TOO_MANY_OBJECTS;
696
}
697
698
VkResult
699
vn_ImportFenceFdKHR(VkDevice device,
700
const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
701
{
702
struct vn_device *dev = vn_device_from_handle(device);
703
struct vn_fence *fence = vn_fence_from_handle(pImportFenceFdInfo->fence);
704
ASSERTED const bool sync_file = pImportFenceFdInfo->handleType ==
705
VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
706
const int fd = pImportFenceFdInfo->fd;
707
708
assert(dev->instance->experimental.globalFencing);
709
assert(sync_file);
710
if (fd >= 0) {
711
if (sync_wait(fd, -1))
712
return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
713
714
close(fd);
715
}
716
717
/* abuse VN_SYNC_TYPE_WSI_SIGNALED */
718
vn_fence_signal_wsi(dev, fence);
719
720
return VK_SUCCESS;
721
}
722
723
VkResult
724
vn_GetFenceFdKHR(VkDevice device,
725
const VkFenceGetFdInfoKHR *pGetFdInfo,
726
int *pFd)
727
{
728
struct vn_device *dev = vn_device_from_handle(device);
729
struct vn_fence *fence = vn_fence_from_handle(pGetFdInfo->fence);
730
const bool sync_file =
731
pGetFdInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
732
struct vn_sync_payload *payload = fence->payload;
733
734
assert(dev->instance->experimental.globalFencing);
735
assert(sync_file);
736
int fd = -1;
737
if (payload->type == VN_SYNC_TYPE_DEVICE_ONLY) {
738
VkResult result = vn_create_sync_file(dev, &fd);
739
if (result != VK_SUCCESS)
740
return vn_error(dev->instance, result);
741
}
742
743
if (sync_file) {
744
vn_sync_payload_release(dev, &fence->temporary);
745
fence->payload = &fence->permanent;
746
747
/* XXX implies reset operation on the host fence */
748
}
749
750
*pFd = fd;
751
return VK_SUCCESS;
752
}
753
754
/* semaphore commands */
755
756
static VkResult
757
vn_semaphore_init_payloads(struct vn_device *dev,
758
struct vn_semaphore *sem,
759
uint64_t initial_val,
760
const VkAllocationCallbacks *alloc)
761
{
762
sem->permanent.type = VN_SYNC_TYPE_DEVICE_ONLY;
763
sem->temporary.type = VN_SYNC_TYPE_INVALID;
764
sem->payload = &sem->permanent;
765
766
return VK_SUCCESS;
767
}
768
769
static void
770
vn_semaphore_reset_wsi(struct vn_device *dev, struct vn_semaphore *sem)
771
{
772
struct vn_sync_payload *perm = &sem->permanent;
773
774
vn_sync_payload_release(dev, &sem->temporary);
775
776
sem->payload = perm;
777
}
778
779
void
780
vn_semaphore_signal_wsi(struct vn_device *dev, struct vn_semaphore *sem)
781
{
782
struct vn_sync_payload *temp = &sem->temporary;
783
784
vn_sync_payload_release(dev, temp);
785
temp->type = VN_SYNC_TYPE_WSI_SIGNALED;
786
sem->payload = temp;
787
}
788
789
VkResult
790
vn_CreateSemaphore(VkDevice device,
791
const VkSemaphoreCreateInfo *pCreateInfo,
792
const VkAllocationCallbacks *pAllocator,
793
VkSemaphore *pSemaphore)
794
{
795
struct vn_device *dev = vn_device_from_handle(device);
796
const VkAllocationCallbacks *alloc =
797
pAllocator ? pAllocator : &dev->base.base.alloc;
798
799
struct vn_semaphore *sem = vk_zalloc(alloc, sizeof(*sem), VN_DEFAULT_ALIGN,
800
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
801
if (!sem)
802
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
803
804
vn_object_base_init(&sem->base, VK_OBJECT_TYPE_SEMAPHORE, &dev->base);
805
806
const VkSemaphoreTypeCreateInfo *type_info =
807
vk_find_struct_const(pCreateInfo->pNext, SEMAPHORE_TYPE_CREATE_INFO);
808
uint64_t initial_val = 0;
809
if (type_info && type_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE) {
810
sem->type = VK_SEMAPHORE_TYPE_TIMELINE;
811
initial_val = type_info->initialValue;
812
} else {
813
sem->type = VK_SEMAPHORE_TYPE_BINARY;
814
}
815
816
VkResult result = vn_semaphore_init_payloads(dev, sem, initial_val, alloc);
817
if (result != VK_SUCCESS) {
818
vk_free(alloc, sem);
819
return vn_error(dev->instance, result);
820
}
821
822
VkSemaphore sem_handle = vn_semaphore_to_handle(sem);
823
vn_async_vkCreateSemaphore(dev->instance, device, pCreateInfo, NULL,
824
&sem_handle);
825
826
*pSemaphore = sem_handle;
827
828
return VK_SUCCESS;
829
}
830
831
void
832
vn_DestroySemaphore(VkDevice device,
833
VkSemaphore semaphore,
834
const VkAllocationCallbacks *pAllocator)
835
{
836
struct vn_device *dev = vn_device_from_handle(device);
837
struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
838
const VkAllocationCallbacks *alloc =
839
pAllocator ? pAllocator : &dev->base.base.alloc;
840
841
if (!sem)
842
return;
843
844
vn_async_vkDestroySemaphore(dev->instance, device, semaphore, NULL);
845
846
vn_sync_payload_release(dev, &sem->permanent);
847
vn_sync_payload_release(dev, &sem->temporary);
848
849
vn_object_base_fini(&sem->base);
850
vk_free(alloc, sem);
851
}
852
853
VkResult
854
vn_GetSemaphoreCounterValue(VkDevice device,
855
VkSemaphore semaphore,
856
uint64_t *pValue)
857
{
858
struct vn_device *dev = vn_device_from_handle(device);
859
struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
860
ASSERTED struct vn_sync_payload *payload = sem->payload;
861
862
assert(payload->type == VN_SYNC_TYPE_DEVICE_ONLY);
863
return vn_call_vkGetSemaphoreCounterValue(dev->instance, device, semaphore,
864
pValue);
865
}
866
867
VkResult
868
vn_SignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo)
869
{
870
struct vn_device *dev = vn_device_from_handle(device);
871
872
/* TODO if the semaphore is shared-by-ref, this needs to be synchronous */
873
if (false)
874
vn_call_vkSignalSemaphore(dev->instance, device, pSignalInfo);
875
else
876
vn_async_vkSignalSemaphore(dev->instance, device, pSignalInfo);
877
878
return VK_SUCCESS;
879
}
880
881
static VkResult
882
vn_find_first_signaled_semaphore(VkDevice device,
883
const VkSemaphore *semaphores,
884
const uint64_t *values,
885
uint32_t count)
886
{
887
for (uint32_t i = 0; i < count; i++) {
888
uint64_t val = 0;
889
VkResult result =
890
vn_GetSemaphoreCounterValue(device, semaphores[i], &val);
891
if (result != VK_SUCCESS || val >= values[i])
892
return result;
893
}
894
return VK_NOT_READY;
895
}
896
897
static VkResult
898
vn_remove_signaled_semaphores(VkDevice device,
899
VkSemaphore *semaphores,
900
uint64_t *values,
901
uint32_t *count)
902
{
903
uint32_t cur = 0;
904
for (uint32_t i = 0; i < *count; i++) {
905
uint64_t val = 0;
906
VkResult result =
907
vn_GetSemaphoreCounterValue(device, semaphores[i], &val);
908
if (result != VK_SUCCESS)
909
return result;
910
if (val < values[i])
911
semaphores[cur++] = semaphores[i];
912
}
913
914
*count = cur;
915
return cur ? VK_NOT_READY : VK_SUCCESS;
916
}
917
918
VkResult
919
vn_WaitSemaphores(VkDevice device,
920
const VkSemaphoreWaitInfo *pWaitInfo,
921
uint64_t timeout)
922
{
923
struct vn_device *dev = vn_device_from_handle(device);
924
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
925
926
const int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
927
VkResult result = VK_NOT_READY;
928
uint32_t iter = 0;
929
if (pWaitInfo->semaphoreCount > 1 &&
930
!(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT)) {
931
uint32_t semaphore_count = pWaitInfo->semaphoreCount;
932
VkSemaphore local_semaphores[8];
933
uint64_t local_values[8];
934
VkSemaphore *semaphores = local_semaphores;
935
uint64_t *values = local_values;
936
if (semaphore_count > ARRAY_SIZE(local_semaphores)) {
937
semaphores = vk_alloc(
938
alloc, (sizeof(*semaphores) + sizeof(*values)) * semaphore_count,
939
VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
940
if (!semaphores)
941
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
942
943
values = (uint64_t *)&semaphores[semaphore_count];
944
}
945
memcpy(semaphores, pWaitInfo->pSemaphores,
946
sizeof(*semaphores) * semaphore_count);
947
memcpy(values, pWaitInfo->pValues, sizeof(*values) * semaphore_count);
948
949
while (result == VK_NOT_READY) {
950
result = vn_remove_signaled_semaphores(device, semaphores, values,
951
&semaphore_count);
952
result = vn_update_sync_result(result, abs_timeout, &iter);
953
}
954
955
if (semaphores != local_semaphores)
956
vk_free(alloc, semaphores);
957
} else {
958
while (result == VK_NOT_READY) {
959
result = vn_find_first_signaled_semaphore(
960
device, pWaitInfo->pSemaphores, pWaitInfo->pValues,
961
pWaitInfo->semaphoreCount);
962
result = vn_update_sync_result(result, abs_timeout, &iter);
963
}
964
}
965
966
return vn_result(dev->instance, result);
967
}
968
969
VkResult
970
vn_ImportSemaphoreFdKHR(
971
VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
972
{
973
struct vn_device *dev = vn_device_from_handle(device);
974
struct vn_semaphore *sem =
975
vn_semaphore_from_handle(pImportSemaphoreFdInfo->semaphore);
976
ASSERTED const bool sync_file =
977
pImportSemaphoreFdInfo->handleType ==
978
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
979
const int fd = pImportSemaphoreFdInfo->fd;
980
981
assert(dev->instance->experimental.globalFencing);
982
assert(sync_file);
983
if (fd >= 0) {
984
if (sync_wait(fd, -1))
985
return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
986
987
close(fd);
988
}
989
990
/* abuse VN_SYNC_TYPE_WSI_SIGNALED */
991
vn_semaphore_signal_wsi(dev, sem);
992
993
return VK_SUCCESS;
994
}
995
996
VkResult
997
vn_GetSemaphoreFdKHR(VkDevice device,
998
const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
999
int *pFd)
1000
{
1001
struct vn_device *dev = vn_device_from_handle(device);
1002
struct vn_semaphore *sem = vn_semaphore_from_handle(pGetFdInfo->semaphore);
1003
const bool sync_file =
1004
pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1005
struct vn_sync_payload *payload = sem->payload;
1006
1007
assert(dev->instance->experimental.globalFencing);
1008
assert(sync_file);
1009
int fd = -1;
1010
if (payload->type == VN_SYNC_TYPE_DEVICE_ONLY) {
1011
VkResult result = vn_create_sync_file(dev, &fd);
1012
if (result != VK_SUCCESS)
1013
return vn_error(dev->instance, result);
1014
}
1015
1016
if (sync_file) {
1017
vn_sync_payload_release(dev, &sem->temporary);
1018
sem->payload = &sem->permanent;
1019
1020
/* XXX implies wait operation on the host semaphore */
1021
}
1022
1023
*pFd = fd;
1024
return VK_SUCCESS;
1025
}
1026
1027
/* event commands */
1028
1029
VkResult
1030
vn_CreateEvent(VkDevice device,
1031
const VkEventCreateInfo *pCreateInfo,
1032
const VkAllocationCallbacks *pAllocator,
1033
VkEvent *pEvent)
1034
{
1035
struct vn_device *dev = vn_device_from_handle(device);
1036
const VkAllocationCallbacks *alloc =
1037
pAllocator ? pAllocator : &dev->base.base.alloc;
1038
1039
struct vn_event *ev = vk_zalloc(alloc, sizeof(*ev), VN_DEFAULT_ALIGN,
1040
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1041
if (!ev)
1042
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1043
1044
vn_object_base_init(&ev->base, VK_OBJECT_TYPE_EVENT, &dev->base);
1045
1046
VkEvent ev_handle = vn_event_to_handle(ev);
1047
vn_async_vkCreateEvent(dev->instance, device, pCreateInfo, NULL,
1048
&ev_handle);
1049
1050
*pEvent = ev_handle;
1051
1052
return VK_SUCCESS;
1053
}
1054
1055
void
1056
vn_DestroyEvent(VkDevice device,
1057
VkEvent event,
1058
const VkAllocationCallbacks *pAllocator)
1059
{
1060
struct vn_device *dev = vn_device_from_handle(device);
1061
struct vn_event *ev = vn_event_from_handle(event);
1062
const VkAllocationCallbacks *alloc =
1063
pAllocator ? pAllocator : &dev->base.base.alloc;
1064
1065
if (!ev)
1066
return;
1067
1068
vn_async_vkDestroyEvent(dev->instance, device, event, NULL);
1069
1070
vn_object_base_fini(&ev->base);
1071
vk_free(alloc, ev);
1072
}
1073
1074
VkResult
1075
vn_GetEventStatus(VkDevice device, VkEvent event)
1076
{
1077
struct vn_device *dev = vn_device_from_handle(device);
1078
1079
/* TODO When the renderer supports it (requires a new vk extension), there
1080
* should be a coherent memory backing the event.
1081
*/
1082
VkResult result = vn_call_vkGetEventStatus(dev->instance, device, event);
1083
1084
return vn_result(dev->instance, result);
1085
}
1086
1087
VkResult
1088
vn_SetEvent(VkDevice device, VkEvent event)
1089
{
1090
struct vn_device *dev = vn_device_from_handle(device);
1091
1092
VkResult result = vn_call_vkSetEvent(dev->instance, device, event);
1093
1094
return vn_result(dev->instance, result);
1095
}
1096
1097
VkResult
1098
vn_ResetEvent(VkDevice device, VkEvent event)
1099
{
1100
struct vn_device *dev = vn_device_from_handle(device);
1101
1102
VkResult result = vn_call_vkResetEvent(dev->instance, device, event);
1103
1104
return vn_result(dev->instance, result);
1105
}
1106
1107