Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/freedreno/vulkan/tu_kgsl.c
4565 views
1
/*
2
* Copyright © 2020 Google, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
* DEALINGS IN THE SOFTWARE.
22
*/
23
24
#include "tu_private.h"
25
26
#include <errno.h>
27
#include <fcntl.h>
28
#include <stdint.h>
29
#include <sys/ioctl.h>
30
#include <sys/mman.h>
31
32
#include "msm_kgsl.h"
33
#include "vk_util.h"
34
35
struct tu_syncobj {
36
struct vk_object_base base;
37
uint32_t timestamp;
38
bool timestamp_valid;
39
};
40
41
static int
42
safe_ioctl(int fd, unsigned long request, void *arg)
43
{
44
int ret;
45
46
do {
47
ret = ioctl(fd, request, arg);
48
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
49
50
return ret;
51
}
52
53
int
54
tu_drm_submitqueue_new(const struct tu_device *dev,
55
int priority,
56
uint32_t *queue_id)
57
{
58
struct kgsl_drawctxt_create req = {
59
.flags = KGSL_CONTEXT_SAVE_GMEM |
60
KGSL_CONTEXT_NO_GMEM_ALLOC |
61
KGSL_CONTEXT_PREAMBLE,
62
};
63
64
int ret = safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_CREATE, &req);
65
if (ret)
66
return ret;
67
68
*queue_id = req.drawctxt_id;
69
70
return 0;
71
}
72
73
void
74
tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
75
{
76
struct kgsl_drawctxt_destroy req = {
77
.drawctxt_id = queue_id,
78
};
79
80
safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_DESTROY, &req);
81
}
82
83
VkResult
84
tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size,
85
enum tu_bo_alloc_flags flags)
86
{
87
struct kgsl_gpumem_alloc_id req = {
88
.size = size,
89
};
90
91
if (flags & TU_BO_ALLOC_GPU_READ_ONLY)
92
req.flags |= KGSL_MEMFLAGS_GPUREADONLY;
93
94
int ret;
95
96
ret = safe_ioctl(dev->physical_device->local_fd,
97
IOCTL_KGSL_GPUMEM_ALLOC_ID, &req);
98
if (ret) {
99
return vk_errorf(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY,
100
"GPUMEM_ALLOC_ID failed (%s)", strerror(errno));
101
}
102
103
*bo = (struct tu_bo) {
104
.gem_handle = req.id,
105
.size = req.mmapsize,
106
.iova = req.gpuaddr,
107
};
108
109
return VK_SUCCESS;
110
}
111
112
VkResult
113
tu_bo_init_dmabuf(struct tu_device *dev,
114
struct tu_bo *bo,
115
uint64_t size,
116
int fd)
117
{
118
struct kgsl_gpuobj_import_dma_buf import_dmabuf = {
119
.fd = fd,
120
};
121
struct kgsl_gpuobj_import req = {
122
.priv = (uintptr_t)&import_dmabuf,
123
.priv_len = sizeof(import_dmabuf),
124
.flags = 0,
125
.type = KGSL_USER_MEM_TYPE_DMABUF,
126
};
127
int ret;
128
129
ret = safe_ioctl(dev->physical_device->local_fd,
130
IOCTL_KGSL_GPUOBJ_IMPORT, &req);
131
if (ret)
132
return vk_errorf(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY,
133
"Failed to import dma-buf (%s)\n", strerror(errno));
134
135
struct kgsl_gpuobj_info info_req = {
136
.id = req.id,
137
};
138
139
ret = safe_ioctl(dev->physical_device->local_fd,
140
IOCTL_KGSL_GPUOBJ_INFO, &info_req);
141
if (ret)
142
return vk_errorf(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY,
143
"Failed to get dma-buf info (%s)\n", strerror(errno));
144
145
*bo = (struct tu_bo) {
146
.gem_handle = req.id,
147
.size = info_req.size,
148
.iova = info_req.gpuaddr,
149
};
150
151
return VK_SUCCESS;
152
}
153
154
int
155
tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
156
{
157
tu_stub();
158
159
return -1;
160
}
161
162
VkResult
163
tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
164
{
165
if (bo->map)
166
return VK_SUCCESS;
167
168
uint64_t offset = bo->gem_handle << 12;
169
void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
170
dev->physical_device->local_fd, offset);
171
if (map == MAP_FAILED)
172
return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
173
174
bo->map = map;
175
176
return VK_SUCCESS;
177
}
178
179
void
180
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
181
{
182
assert(bo->gem_handle);
183
184
if (bo->map)
185
munmap(bo->map, bo->size);
186
187
struct kgsl_gpumem_free_id req = {
188
.id = bo->gem_handle
189
};
190
191
safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_GPUMEM_FREE_ID, &req);
192
}
193
194
static VkResult
195
get_kgsl_prop(int fd, unsigned int type, void *value, size_t size)
196
{
197
struct kgsl_device_getproperty getprop = {
198
.type = type,
199
.value = value,
200
.sizebytes = size,
201
};
202
203
return safe_ioctl(fd, IOCTL_KGSL_DEVICE_GETPROPERTY, &getprop);
204
}
205
206
VkResult
207
tu_enumerate_devices(struct tu_instance *instance)
208
{
209
static const char path[] = "/dev/kgsl-3d0";
210
int fd;
211
212
struct tu_physical_device *device = &instance->physical_devices[0];
213
214
if (instance->vk.enabled_extensions.KHR_display)
215
return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
216
"I can't KHR_display");
217
218
fd = open(path, O_RDWR | O_CLOEXEC);
219
if (fd < 0) {
220
instance->physical_device_count = 0;
221
return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
222
"failed to open device %s", path);
223
}
224
225
struct kgsl_devinfo info;
226
if (get_kgsl_prop(fd, KGSL_PROP_DEVICE_INFO, &info, sizeof(info)))
227
goto fail;
228
229
uint64_t gmem_iova;
230
if (get_kgsl_prop(fd, KGSL_PROP_UCHE_GMEM_VADDR, &gmem_iova, sizeof(gmem_iova)))
231
goto fail;
232
233
/* kgsl version check? */
234
235
if (instance->debug_flags & TU_DEBUG_STARTUP)
236
mesa_logi("Found compatible device '%s'.", path);
237
238
device->instance = instance;
239
device->master_fd = -1;
240
device->local_fd = fd;
241
242
device->gpu_id =
243
((info.chip_id >> 24) & 0xff) * 100 +
244
((info.chip_id >> 16) & 0xff) * 10 +
245
((info.chip_id >> 8) & 0xff);
246
device->gmem_size = info.gmem_sizebytes;
247
device->gmem_base = gmem_iova;
248
249
device->heap.size = tu_get_system_heap_size();
250
device->heap.used = 0u;
251
device->heap.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
252
253
if (tu_physical_device_init(device, instance) != VK_SUCCESS)
254
goto fail;
255
256
instance->physical_device_count = 1;
257
258
return VK_SUCCESS;
259
260
fail:
261
close(fd);
262
return VK_ERROR_INITIALIZATION_FAILED;
263
}
264
265
static int
266
timestamp_to_fd(struct tu_queue *queue, uint32_t timestamp)
267
{
268
int fd;
269
struct kgsl_timestamp_event event = {
270
.type = KGSL_TIMESTAMP_EVENT_FENCE,
271
.context_id = queue->msm_queue_id,
272
.timestamp = timestamp,
273
.priv = &fd,
274
.len = sizeof(fd),
275
};
276
277
int ret = safe_ioctl(queue->device->fd, IOCTL_KGSL_TIMESTAMP_EVENT, &event);
278
if (ret)
279
return -1;
280
281
return fd;
282
}
283
284
/* return true if timestamp a is greater (more recent) then b
285
* this relies on timestamps never having a difference > (1<<31)
286
*/
287
static inline bool
288
timestamp_cmp(uint32_t a, uint32_t b)
289
{
290
return (int32_t) (a - b) >= 0;
291
}
292
293
static uint32_t
294
max_ts(uint32_t a, uint32_t b)
295
{
296
return timestamp_cmp(a, b) ? a : b;
297
}
298
299
static uint32_t
300
min_ts(uint32_t a, uint32_t b)
301
{
302
return timestamp_cmp(a, b) ? b : a;
303
}
304
305
static struct tu_syncobj
306
sync_merge(const VkSemaphore *syncobjs, uint32_t count, bool wait_all, bool reset)
307
{
308
struct tu_syncobj ret;
309
310
ret.timestamp_valid = false;
311
312
for (uint32_t i = 0; i < count; ++i) {
313
TU_FROM_HANDLE(tu_syncobj, sync, syncobjs[i]);
314
315
/* TODO: this means the fence is unsignaled and will never become signaled */
316
if (!sync->timestamp_valid)
317
continue;
318
319
if (!ret.timestamp_valid)
320
ret.timestamp = sync->timestamp;
321
else if (wait_all)
322
ret.timestamp = max_ts(ret.timestamp, sync->timestamp);
323
else
324
ret.timestamp = min_ts(ret.timestamp, sync->timestamp);
325
326
ret.timestamp_valid = true;
327
if (reset)
328
sync->timestamp_valid = false;
329
330
}
331
return ret;
332
}
333
334
VKAPI_ATTR VkResult VKAPI_CALL
335
tu_QueueSubmit(VkQueue _queue,
336
uint32_t submitCount,
337
const VkSubmitInfo *pSubmits,
338
VkFence _fence)
339
{
340
TU_FROM_HANDLE(tu_queue, queue, _queue);
341
TU_FROM_HANDLE(tu_syncobj, fence, _fence);
342
VkResult result = VK_SUCCESS;
343
344
uint32_t max_entry_count = 0;
345
for (uint32_t i = 0; i < submitCount; ++i) {
346
const VkSubmitInfo *submit = pSubmits + i;
347
348
const VkPerformanceQuerySubmitInfoKHR *perf_info =
349
vk_find_struct_const(pSubmits[i].pNext,
350
PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
351
352
uint32_t entry_count = 0;
353
for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
354
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
355
entry_count += cmdbuf->cs.entry_count;
356
if (perf_info)
357
entry_count++;
358
}
359
360
max_entry_count = MAX2(max_entry_count, entry_count);
361
}
362
363
struct kgsl_command_object *cmds =
364
vk_alloc(&queue->device->vk.alloc,
365
sizeof(cmds[0]) * max_entry_count, 8,
366
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
367
if (cmds == NULL)
368
return vk_error(queue->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
369
370
for (uint32_t i = 0; i < submitCount; ++i) {
371
const VkSubmitInfo *submit = pSubmits + i;
372
uint32_t entry_idx = 0;
373
const VkPerformanceQuerySubmitInfoKHR *perf_info =
374
vk_find_struct_const(pSubmits[i].pNext,
375
PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
376
377
378
for (uint32_t j = 0; j < submit->commandBufferCount; j++) {
379
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
380
struct tu_cs *cs = &cmdbuf->cs;
381
382
if (perf_info) {
383
struct tu_cs_entry *perf_cs_entry =
384
&cmdbuf->device->perfcntrs_pass_cs_entries[perf_info->counterPassIndex];
385
386
cmds[entry_idx++] = (struct kgsl_command_object) {
387
.offset = perf_cs_entry->offset,
388
.gpuaddr = perf_cs_entry->bo->iova,
389
.size = perf_cs_entry->size,
390
.flags = KGSL_CMDLIST_IB,
391
.id = perf_cs_entry->bo->gem_handle,
392
};
393
}
394
395
for (unsigned k = 0; k < cs->entry_count; k++) {
396
cmds[entry_idx++] = (struct kgsl_command_object) {
397
.offset = cs->entries[k].offset,
398
.gpuaddr = cs->entries[k].bo->iova,
399
.size = cs->entries[k].size,
400
.flags = KGSL_CMDLIST_IB,
401
.id = cs->entries[k].bo->gem_handle,
402
};
403
}
404
}
405
406
struct tu_syncobj s = sync_merge(submit->pWaitSemaphores,
407
submit->waitSemaphoreCount,
408
true, true);
409
410
struct kgsl_cmd_syncpoint_timestamp ts = {
411
.context_id = queue->msm_queue_id,
412
.timestamp = s.timestamp,
413
};
414
struct kgsl_command_syncpoint sync = {
415
.type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP,
416
.size = sizeof(ts),
417
.priv = (uintptr_t) &ts,
418
};
419
420
struct kgsl_gpu_command req = {
421
.flags = KGSL_CMDBATCH_SUBMIT_IB_LIST,
422
.context_id = queue->msm_queue_id,
423
.cmdlist = (uint64_t) (uintptr_t) cmds,
424
.numcmds = entry_idx,
425
.cmdsize = sizeof(struct kgsl_command_object),
426
.synclist = (uintptr_t) &sync,
427
.syncsize = sizeof(struct kgsl_command_syncpoint),
428
.numsyncs = s.timestamp_valid ? 1 : 0,
429
};
430
431
int ret = safe_ioctl(queue->device->physical_device->local_fd,
432
IOCTL_KGSL_GPU_COMMAND, &req);
433
if (ret) {
434
result = tu_device_set_lost(queue->device,
435
"submit failed: %s\n", strerror(errno));
436
goto fail;
437
}
438
439
for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
440
TU_FROM_HANDLE(tu_syncobj, sem, submit->pSignalSemaphores[i]);
441
sem->timestamp = req.timestamp;
442
sem->timestamp_valid = true;
443
}
444
445
/* no need to merge fences as queue execution is serialized */
446
if (i == submitCount - 1) {
447
int fd = timestamp_to_fd(queue, req.timestamp);
448
if (fd < 0) {
449
result = tu_device_set_lost(queue->device,
450
"Failed to create sync file for timestamp: %s\n",
451
strerror(errno));
452
goto fail;
453
}
454
455
if (queue->fence >= 0)
456
close(queue->fence);
457
queue->fence = fd;
458
459
if (fence) {
460
fence->timestamp = req.timestamp;
461
fence->timestamp_valid = true;
462
}
463
}
464
}
465
fail:
466
vk_free(&queue->device->vk.alloc, cmds);
467
468
return result;
469
}
470
471
static VkResult
472
sync_create(VkDevice _device,
473
bool signaled,
474
bool fence,
475
const VkAllocationCallbacks *pAllocator,
476
void **p_sync)
477
{
478
TU_FROM_HANDLE(tu_device, device, _device);
479
480
struct tu_syncobj *sync =
481
vk_object_alloc(&device->vk, pAllocator, sizeof(*sync),
482
fence ? VK_OBJECT_TYPE_FENCE : VK_OBJECT_TYPE_SEMAPHORE);
483
if (!sync)
484
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
485
486
if (signaled)
487
tu_finishme("CREATE FENCE SIGNALED");
488
489
sync->timestamp_valid = false;
490
*p_sync = sync;
491
492
return VK_SUCCESS;
493
}
494
495
VKAPI_ATTR VkResult VKAPI_CALL
496
tu_ImportSemaphoreFdKHR(VkDevice _device,
497
const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
498
{
499
tu_finishme("ImportSemaphoreFdKHR");
500
return VK_SUCCESS;
501
}
502
503
VKAPI_ATTR VkResult VKAPI_CALL
504
tu_GetSemaphoreFdKHR(VkDevice _device,
505
const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
506
int *pFd)
507
{
508
tu_finishme("GetSemaphoreFdKHR");
509
return VK_SUCCESS;
510
}
511
512
VKAPI_ATTR VkResult VKAPI_CALL
513
tu_CreateSemaphore(VkDevice device,
514
const VkSemaphoreCreateInfo *pCreateInfo,
515
const VkAllocationCallbacks *pAllocator,
516
VkSemaphore *pSemaphore)
517
{
518
return sync_create(device, false, false, pAllocator, (void**) pSemaphore);
519
}
520
521
VKAPI_ATTR void VKAPI_CALL
522
tu_DestroySemaphore(VkDevice _device,
523
VkSemaphore semaphore,
524
const VkAllocationCallbacks *pAllocator)
525
{
526
TU_FROM_HANDLE(tu_device, device, _device);
527
TU_FROM_HANDLE(tu_syncobj, sync, semaphore);
528
529
if (!sync)
530
return;
531
532
vk_object_free(&device->vk, pAllocator, sync);
533
}
534
535
VKAPI_ATTR VkResult VKAPI_CALL
536
tu_ImportFenceFdKHR(VkDevice _device,
537
const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
538
{
539
tu_stub();
540
541
return VK_SUCCESS;
542
}
543
544
VKAPI_ATTR VkResult VKAPI_CALL
545
tu_GetFenceFdKHR(VkDevice _device,
546
const VkFenceGetFdInfoKHR *pGetFdInfo,
547
int *pFd)
548
{
549
tu_stub();
550
551
return VK_SUCCESS;
552
}
553
554
VKAPI_ATTR VkResult VKAPI_CALL
555
tu_CreateFence(VkDevice device,
556
const VkFenceCreateInfo *info,
557
const VkAllocationCallbacks *pAllocator,
558
VkFence *pFence)
559
{
560
return sync_create(device, info->flags & VK_FENCE_CREATE_SIGNALED_BIT, true,
561
pAllocator, (void**) pFence);
562
}
563
564
VKAPI_ATTR void VKAPI_CALL
565
tu_DestroyFence(VkDevice _device, VkFence fence, const VkAllocationCallbacks *pAllocator)
566
{
567
TU_FROM_HANDLE(tu_device, device, _device);
568
TU_FROM_HANDLE(tu_syncobj, sync, fence);
569
570
if (!sync)
571
return;
572
573
vk_object_free(&device->vk, pAllocator, sync);
574
}
575
576
VKAPI_ATTR VkResult VKAPI_CALL
577
tu_WaitForFences(VkDevice _device,
578
uint32_t count,
579
const VkFence *pFences,
580
VkBool32 waitAll,
581
uint64_t timeout)
582
{
583
TU_FROM_HANDLE(tu_device, device, _device);
584
struct tu_syncobj s = sync_merge((const VkSemaphore*) pFences, count, waitAll, false);
585
586
if (!s.timestamp_valid)
587
return VK_SUCCESS;
588
589
int ret = ioctl(device->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
590
&(struct kgsl_device_waittimestamp_ctxtid) {
591
.context_id = device->queues[0]->msm_queue_id,
592
.timestamp = s.timestamp,
593
.timeout = timeout / 1000000,
594
});
595
if (ret) {
596
assert(errno == ETIME);
597
return VK_TIMEOUT;
598
}
599
600
return VK_SUCCESS;
601
}
602
603
VKAPI_ATTR VkResult VKAPI_CALL
604
tu_ResetFences(VkDevice _device, uint32_t count, const VkFence *pFences)
605
{
606
for (uint32_t i = 0; i < count; i++) {
607
TU_FROM_HANDLE(tu_syncobj, sync, pFences[i]);
608
sync->timestamp_valid = false;
609
}
610
return VK_SUCCESS;
611
}
612
613
VKAPI_ATTR VkResult VKAPI_CALL
614
tu_GetFenceStatus(VkDevice _device, VkFence _fence)
615
{
616
TU_FROM_HANDLE(tu_device, device, _device);
617
TU_FROM_HANDLE(tu_syncobj, sync, _fence);
618
619
if (!sync->timestamp_valid)
620
return VK_NOT_READY;
621
622
int ret = ioctl(device->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
623
&(struct kgsl_device_waittimestamp_ctxtid) {
624
.context_id = device->queues[0]->msm_queue_id,
625
.timestamp = sync->timestamp,
626
.timeout = 0,
627
});
628
if (ret) {
629
assert(errno == ETIME);
630
return VK_NOT_READY;
631
}
632
633
return VK_SUCCESS;
634
}
635
636
int
637
tu_signal_fences(struct tu_device *device, struct tu_syncobj *fence1, struct tu_syncobj *fence2)
638
{
639
tu_finishme("tu_signal_fences");
640
return 0;
641
}
642
643
int
644
tu_syncobj_to_fd(struct tu_device *device, struct tu_syncobj *sync)
645
{
646
tu_finishme("tu_syncobj_to_fd");
647
return -1;
648
}
649
650
VkResult
651
tu_device_submit_deferred_locked(struct tu_device *dev)
652
{
653
tu_finishme("tu_device_submit_deferred_locked");
654
655
return VK_SUCCESS;
656
}
657
658
#ifdef ANDROID
659
VKAPI_ATTR VkResult VKAPI_CALL
660
tu_QueueSignalReleaseImageANDROID(VkQueue _queue,
661
uint32_t waitSemaphoreCount,
662
const VkSemaphore *pWaitSemaphores,
663
VkImage image,
664
int *pNativeFenceFd)
665
{
666
TU_FROM_HANDLE(tu_queue, queue, _queue);
667
if (!pNativeFenceFd)
668
return VK_SUCCESS;
669
670
struct tu_syncobj s = sync_merge(pWaitSemaphores, waitSemaphoreCount, true, true);
671
672
if (!s.timestamp_valid) {
673
*pNativeFenceFd = -1;
674
return VK_SUCCESS;
675
}
676
677
*pNativeFenceFd = timestamp_to_fd(queue, s.timestamp);
678
679
return VK_SUCCESS;
680
}
681
#endif
682
683