Path: blob/21.2-virgl/src/virtio/vulkan/vn_queue.c
4560 views
/*1* Copyright 2019 Google LLC2* SPDX-License-Identifier: MIT3*4* based in part on anv and radv which are:5* Copyright © 2015 Intel Corporation6* Copyright © 2016 Red Hat.7* Copyright © 2016 Bas Nieuwenhuizen8*/910#include "vn_queue.h"1112#include "util/libsync.h"13#include "venus-protocol/vn_protocol_driver_event.h"14#include "venus-protocol/vn_protocol_driver_fence.h"15#include "venus-protocol/vn_protocol_driver_queue.h"16#include "venus-protocol/vn_protocol_driver_semaphore.h"1718#include "vn_device.h"19#include "vn_device_memory.h"20#include "vn_renderer.h"2122/* queue commands */2324void25vn_GetDeviceQueue(VkDevice device,26uint32_t queueFamilyIndex,27uint32_t queueIndex,28VkQueue *pQueue)29{30struct vn_device *dev = vn_device_from_handle(device);3132for (uint32_t i = 0; i < dev->queue_count; i++) {33struct vn_queue *queue = &dev->queues[i];34if (queue->family == queueFamilyIndex && queue->index == queueIndex) {35assert(!queue->flags);36*pQueue = vn_queue_to_handle(queue);37return;38}39}40unreachable("bad queue family/index");41}4243void44vn_GetDeviceQueue2(VkDevice device,45const VkDeviceQueueInfo2 *pQueueInfo,46VkQueue *pQueue)47{48struct vn_device *dev = vn_device_from_handle(device);4950for (uint32_t i = 0; i < dev->queue_count; i++) {51struct vn_queue *queue = &dev->queues[i];52if (queue->family == pQueueInfo->queueFamilyIndex &&53queue->index == pQueueInfo->queueIndex &&54queue->flags == pQueueInfo->flags) {55*pQueue = vn_queue_to_handle(queue);56return;57}58}59unreachable("bad queue family/index");60}6162static void63vn_semaphore_reset_wsi(struct vn_device *dev, struct vn_semaphore *sem);6465struct vn_queue_submission {66VkStructureType batch_type;67VkQueue queue;68uint32_t batch_count;69union {70const void *batches;71const VkSubmitInfo *submit_batches;72const VkBindSparseInfo *bind_sparse_batches;73};74VkFence fence;7576uint32_t wait_semaphore_count;77uint32_t wait_wsi_count;7879struct {80void *storage;8182union {83void *batches;84VkSubmitInfo *submit_batches;85VkBindSparseInfo *bind_sparse_batches;86};87VkSemaphore *semaphores;88} temp;89};9091static void92vn_queue_submission_count_batch_semaphores(struct vn_queue_submission *submit,93uint32_t batch_index)94{95union {96const VkSubmitInfo *submit_batch;97const VkBindSparseInfo *bind_sparse_batch;98} u;99const VkSemaphore *wait_sems;100uint32_t wait_count;101switch (submit->batch_type) {102case VK_STRUCTURE_TYPE_SUBMIT_INFO:103u.submit_batch = &submit->submit_batches[batch_index];104wait_sems = u.submit_batch->pWaitSemaphores;105wait_count = u.submit_batch->waitSemaphoreCount;106break;107case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:108u.bind_sparse_batch = &submit->bind_sparse_batches[batch_index];109wait_sems = u.bind_sparse_batch->pWaitSemaphores;110wait_count = u.bind_sparse_batch->waitSemaphoreCount;111break;112default:113unreachable("unexpected batch type");114break;115}116117submit->wait_semaphore_count += wait_count;118for (uint32_t i = 0; i < wait_count; i++) {119struct vn_semaphore *sem = vn_semaphore_from_handle(wait_sems[i]);120const struct vn_sync_payload *payload = sem->payload;121122if (payload->type == VN_SYNC_TYPE_WSI_SIGNALED)123submit->wait_wsi_count++;124}125}126127static void128vn_queue_submission_count_semaphores(struct vn_queue_submission *submit)129{130submit->wait_semaphore_count = 0;131submit->wait_wsi_count = 0;132133for (uint32_t i = 0; i < submit->batch_count; i++)134vn_queue_submission_count_batch_semaphores(submit, i);135}136137static VkResult138vn_queue_submission_alloc_storage(struct vn_queue_submission *submit)139{140struct vn_queue *queue = vn_queue_from_handle(submit->queue);141const VkAllocationCallbacks *alloc = &queue->device->base.base.alloc;142size_t alloc_size = 0;143size_t semaphores_offset = 0;144145/* we want to filter out VN_SYNC_TYPE_WSI_SIGNALED wait semaphores */146if (submit->wait_wsi_count) {147switch (submit->batch_type) {148case VK_STRUCTURE_TYPE_SUBMIT_INFO:149alloc_size += sizeof(VkSubmitInfo) * submit->batch_count;150break;151case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:152alloc_size += sizeof(VkBindSparseInfo) * submit->batch_count;153break;154default:155unreachable("unexpected batch type");156break;157}158159semaphores_offset = alloc_size;160alloc_size += sizeof(*submit->temp.semaphores) *161(submit->wait_semaphore_count - submit->wait_wsi_count);162}163164if (!alloc_size) {165submit->temp.storage = NULL;166return VK_SUCCESS;167}168169submit->temp.storage = vk_alloc(alloc, alloc_size, VN_DEFAULT_ALIGN,170VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);171if (!submit->temp.storage)172return VK_ERROR_OUT_OF_HOST_MEMORY;173174submit->temp.batches = submit->temp.storage;175submit->temp.semaphores = submit->temp.storage + semaphores_offset;176177return VK_SUCCESS;178}179180static uint32_t181vn_queue_submission_filter_batch_wsi_semaphores(182struct vn_queue_submission *submit,183uint32_t batch_index,184uint32_t sem_base)185{186struct vn_queue *queue = vn_queue_from_handle(submit->queue);187188union {189VkSubmitInfo *submit_batch;190VkBindSparseInfo *bind_sparse_batch;191} u;192const VkSemaphore *src_sems;193uint32_t src_count;194switch (submit->batch_type) {195case VK_STRUCTURE_TYPE_SUBMIT_INFO:196u.submit_batch = &submit->temp.submit_batches[batch_index];197src_sems = u.submit_batch->pWaitSemaphores;198src_count = u.submit_batch->waitSemaphoreCount;199break;200case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:201u.bind_sparse_batch = &submit->temp.bind_sparse_batches[batch_index];202src_sems = u.bind_sparse_batch->pWaitSemaphores;203src_count = u.bind_sparse_batch->waitSemaphoreCount;204break;205default:206unreachable("unexpected batch type");207break;208}209210VkSemaphore *dst_sems = &submit->temp.semaphores[sem_base];211uint32_t dst_count = 0;212213/* filter out VN_SYNC_TYPE_WSI_SIGNALED wait semaphores */214for (uint32_t i = 0; i < src_count; i++) {215struct vn_semaphore *sem = vn_semaphore_from_handle(src_sems[i]);216const struct vn_sync_payload *payload = sem->payload;217218if (payload->type == VN_SYNC_TYPE_WSI_SIGNALED)219vn_semaphore_reset_wsi(queue->device, sem);220else221dst_sems[dst_count++] = src_sems[i];222}223224switch (submit->batch_type) {225case VK_STRUCTURE_TYPE_SUBMIT_INFO:226u.submit_batch->pWaitSemaphores = dst_sems;227u.submit_batch->waitSemaphoreCount = dst_count;228break;229case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:230u.bind_sparse_batch->pWaitSemaphores = dst_sems;231u.bind_sparse_batch->waitSemaphoreCount = dst_count;232break;233default:234break;235}236237return dst_count;238}239240static void241vn_queue_submission_setup_batches(struct vn_queue_submission *submit)242{243if (!submit->temp.storage)244return;245246/* make a copy because we need to filter out WSI semaphores */247if (submit->wait_wsi_count) {248switch (submit->batch_type) {249case VK_STRUCTURE_TYPE_SUBMIT_INFO:250memcpy(submit->temp.submit_batches, submit->submit_batches,251sizeof(submit->submit_batches[0]) * submit->batch_count);252submit->submit_batches = submit->temp.submit_batches;253break;254case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:255memcpy(submit->temp.bind_sparse_batches, submit->bind_sparse_batches,256sizeof(submit->bind_sparse_batches[0]) * submit->batch_count);257submit->bind_sparse_batches = submit->temp.bind_sparse_batches;258break;259default:260unreachable("unexpected batch type");261break;262}263}264265uint32_t wait_sem_base = 0;266for (uint32_t i = 0; i < submit->batch_count; i++) {267if (submit->wait_wsi_count) {268wait_sem_base += vn_queue_submission_filter_batch_wsi_semaphores(269submit, i, wait_sem_base);270}271}272}273274static VkResult275vn_queue_submission_prepare_submit(struct vn_queue_submission *submit,276VkQueue queue,277uint32_t batch_count,278const VkSubmitInfo *submit_batches,279VkFence fence)280{281submit->batch_type = VK_STRUCTURE_TYPE_SUBMIT_INFO;282submit->queue = queue;283submit->batch_count = batch_count;284submit->submit_batches = submit_batches;285submit->fence = fence;286287vn_queue_submission_count_semaphores(submit);288289VkResult result = vn_queue_submission_alloc_storage(submit);290if (result != VK_SUCCESS)291return result;292293vn_queue_submission_setup_batches(submit);294295return VK_SUCCESS;296}297298static VkResult299vn_queue_submission_prepare_bind_sparse(300struct vn_queue_submission *submit,301VkQueue queue,302uint32_t batch_count,303const VkBindSparseInfo *bind_sparse_batches,304VkFence fence)305{306submit->batch_type = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;307submit->queue = queue;308submit->batch_count = batch_count;309submit->bind_sparse_batches = bind_sparse_batches;310submit->fence = fence;311312vn_queue_submission_count_semaphores(submit);313314VkResult result = vn_queue_submission_alloc_storage(submit);315if (result != VK_SUCCESS)316return result;317318vn_queue_submission_setup_batches(submit);319320return VK_SUCCESS;321}322323static void324vn_queue_submission_cleanup(struct vn_queue_submission *submit)325{326struct vn_queue *queue = vn_queue_from_handle(submit->queue);327const VkAllocationCallbacks *alloc = &queue->device->base.base.alloc;328329vk_free(alloc, submit->temp.storage);330}331332VkResult333vn_QueueSubmit(VkQueue _queue,334uint32_t submitCount,335const VkSubmitInfo *pSubmits,336VkFence fence)337{338struct vn_queue *queue = vn_queue_from_handle(_queue);339struct vn_device *dev = queue->device;340341struct vn_queue_submission submit;342VkResult result = vn_queue_submission_prepare_submit(343&submit, _queue, submitCount, pSubmits, fence);344if (result != VK_SUCCESS)345return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);346347const struct vn_device_memory *wsi_mem = NULL;348if (submit.batch_count == 1) {349const struct wsi_memory_signal_submit_info *info = vk_find_struct_const(350submit.submit_batches[0].pNext, WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);351if (info) {352wsi_mem = vn_device_memory_from_handle(info->memory);353assert(!wsi_mem->base_memory && wsi_mem->base_bo);354}355}356357result =358vn_call_vkQueueSubmit(dev->instance, submit.queue, submit.batch_count,359submit.submit_batches, submit.fence);360if (result != VK_SUCCESS) {361vn_queue_submission_cleanup(&submit);362return vn_error(dev->instance, result);363}364365if (wsi_mem) {366/* XXX this is always false and kills the performance */367if (dev->instance->renderer_info.has_implicit_fencing) {368vn_renderer_submit(dev->renderer, &(const struct vn_renderer_submit){369.bos = &wsi_mem->base_bo,370.bo_count = 1,371});372} else {373if (VN_DEBUG(WSI)) {374static uint32_t ratelimit;375if (ratelimit < 10) {376vn_log(dev->instance,377"forcing vkQueueWaitIdle before presenting");378ratelimit++;379}380}381382vn_QueueWaitIdle(submit.queue);383}384}385386vn_queue_submission_cleanup(&submit);387388return VK_SUCCESS;389}390391VkResult392vn_QueueBindSparse(VkQueue _queue,393uint32_t bindInfoCount,394const VkBindSparseInfo *pBindInfo,395VkFence fence)396{397struct vn_queue *queue = vn_queue_from_handle(_queue);398struct vn_device *dev = queue->device;399400struct vn_queue_submission submit;401VkResult result = vn_queue_submission_prepare_bind_sparse(402&submit, _queue, bindInfoCount, pBindInfo, fence);403if (result != VK_SUCCESS)404return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);405406result = vn_call_vkQueueBindSparse(407dev->instance, submit.queue, submit.batch_count,408submit.bind_sparse_batches, submit.fence);409if (result != VK_SUCCESS) {410vn_queue_submission_cleanup(&submit);411return vn_error(dev->instance, result);412}413414vn_queue_submission_cleanup(&submit);415416return VK_SUCCESS;417}418419VkResult420vn_QueueWaitIdle(VkQueue _queue)421{422struct vn_queue *queue = vn_queue_from_handle(_queue);423VkDevice device = vn_device_to_handle(queue->device);424425VkResult result = vn_QueueSubmit(_queue, 0, NULL, queue->wait_fence);426if (result != VK_SUCCESS)427return result;428429result = vn_WaitForFences(device, 1, &queue->wait_fence, true, UINT64_MAX);430vn_ResetFences(device, 1, &queue->wait_fence);431432return vn_result(queue->device->instance, result);433}434435/* fence commands */436437static void438vn_sync_payload_release(struct vn_device *dev,439struct vn_sync_payload *payload)440{441payload->type = VN_SYNC_TYPE_INVALID;442}443444static VkResult445vn_fence_init_payloads(struct vn_device *dev,446struct vn_fence *fence,447bool signaled,448const VkAllocationCallbacks *alloc)449{450fence->permanent.type = VN_SYNC_TYPE_DEVICE_ONLY;451fence->temporary.type = VN_SYNC_TYPE_INVALID;452fence->payload = &fence->permanent;453454return VK_SUCCESS;455}456457void458vn_fence_signal_wsi(struct vn_device *dev, struct vn_fence *fence)459{460struct vn_sync_payload *temp = &fence->temporary;461462vn_sync_payload_release(dev, temp);463temp->type = VN_SYNC_TYPE_WSI_SIGNALED;464fence->payload = temp;465}466467VkResult468vn_CreateFence(VkDevice device,469const VkFenceCreateInfo *pCreateInfo,470const VkAllocationCallbacks *pAllocator,471VkFence *pFence)472{473struct vn_device *dev = vn_device_from_handle(device);474const VkAllocationCallbacks *alloc =475pAllocator ? pAllocator : &dev->base.base.alloc;476477VkFenceCreateInfo local_create_info;478if (vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO)) {479local_create_info = *pCreateInfo;480local_create_info.pNext = NULL;481pCreateInfo = &local_create_info;482}483484struct vn_fence *fence = vk_zalloc(alloc, sizeof(*fence), VN_DEFAULT_ALIGN,485VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);486if (!fence)487return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);488489vn_object_base_init(&fence->base, VK_OBJECT_TYPE_FENCE, &dev->base);490491VkResult result = vn_fence_init_payloads(492dev, fence, pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT, alloc);493if (result != VK_SUCCESS) {494vk_free(alloc, fence);495return vn_error(dev->instance, result);496}497498VkFence fence_handle = vn_fence_to_handle(fence);499vn_async_vkCreateFence(dev->instance, device, pCreateInfo, NULL,500&fence_handle);501502*pFence = fence_handle;503504return VK_SUCCESS;505}506507void508vn_DestroyFence(VkDevice device,509VkFence _fence,510const VkAllocationCallbacks *pAllocator)511{512struct vn_device *dev = vn_device_from_handle(device);513struct vn_fence *fence = vn_fence_from_handle(_fence);514const VkAllocationCallbacks *alloc =515pAllocator ? pAllocator : &dev->base.base.alloc;516517if (!fence)518return;519520vn_async_vkDestroyFence(dev->instance, device, _fence, NULL);521522vn_sync_payload_release(dev, &fence->permanent);523vn_sync_payload_release(dev, &fence->temporary);524525vn_object_base_fini(&fence->base);526vk_free(alloc, fence);527}528529VkResult530vn_ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences)531{532struct vn_device *dev = vn_device_from_handle(device);533534/* TODO if the fence is shared-by-ref, this needs to be synchronous */535if (false)536vn_call_vkResetFences(dev->instance, device, fenceCount, pFences);537else538vn_async_vkResetFences(dev->instance, device, fenceCount, pFences);539540for (uint32_t i = 0; i < fenceCount; i++) {541struct vn_fence *fence = vn_fence_from_handle(pFences[i]);542struct vn_sync_payload *perm = &fence->permanent;543544vn_sync_payload_release(dev, &fence->temporary);545546assert(perm->type == VN_SYNC_TYPE_DEVICE_ONLY);547fence->payload = perm;548}549550return VK_SUCCESS;551}552553VkResult554vn_GetFenceStatus(VkDevice device, VkFence _fence)555{556struct vn_device *dev = vn_device_from_handle(device);557struct vn_fence *fence = vn_fence_from_handle(_fence);558struct vn_sync_payload *payload = fence->payload;559560VkResult result;561switch (payload->type) {562case VN_SYNC_TYPE_DEVICE_ONLY:563result = vn_call_vkGetFenceStatus(dev->instance, device, _fence);564break;565case VN_SYNC_TYPE_WSI_SIGNALED:566result = VK_SUCCESS;567break;568default:569unreachable("unexpected fence payload type");570break;571}572573return vn_result(dev->instance, result);574}575576static VkResult577vn_find_first_signaled_fence(VkDevice device,578const VkFence *fences,579uint32_t count)580{581for (uint32_t i = 0; i < count; i++) {582VkResult result = vn_GetFenceStatus(device, fences[i]);583if (result == VK_SUCCESS || result < 0)584return result;585}586return VK_NOT_READY;587}588589static VkResult590vn_remove_signaled_fences(VkDevice device, VkFence *fences, uint32_t *count)591{592uint32_t cur = 0;593for (uint32_t i = 0; i < *count; i++) {594VkResult result = vn_GetFenceStatus(device, fences[i]);595if (result != VK_SUCCESS) {596if (result < 0)597return result;598fences[cur++] = fences[i];599}600}601602*count = cur;603return cur ? VK_NOT_READY : VK_SUCCESS;604}605606static VkResult607vn_update_sync_result(VkResult result, int64_t abs_timeout, uint32_t *iter)608{609switch (result) {610case VK_NOT_READY:611if (abs_timeout != OS_TIMEOUT_INFINITE &&612os_time_get_nano() >= abs_timeout)613result = VK_TIMEOUT;614else615vn_relax(iter);616break;617default:618assert(result == VK_SUCCESS || result < 0);619break;620}621622return result;623}624625VkResult626vn_WaitForFences(VkDevice device,627uint32_t fenceCount,628const VkFence *pFences,629VkBool32 waitAll,630uint64_t timeout)631{632struct vn_device *dev = vn_device_from_handle(device);633const VkAllocationCallbacks *alloc = &dev->base.base.alloc;634635const int64_t abs_timeout = os_time_get_absolute_timeout(timeout);636VkResult result = VK_NOT_READY;637uint32_t iter = 0;638if (fenceCount > 1 && waitAll) {639VkFence local_fences[8];640VkFence *fences = local_fences;641if (fenceCount > ARRAY_SIZE(local_fences)) {642fences =643vk_alloc(alloc, sizeof(*fences) * fenceCount, VN_DEFAULT_ALIGN,644VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);645if (!fences)646return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);647}648memcpy(fences, pFences, sizeof(*fences) * fenceCount);649650while (result == VK_NOT_READY) {651result = vn_remove_signaled_fences(device, fences, &fenceCount);652result = vn_update_sync_result(result, abs_timeout, &iter);653}654655if (fences != local_fences)656vk_free(alloc, fences);657} else {658while (result == VK_NOT_READY) {659result = vn_find_first_signaled_fence(device, pFences, fenceCount);660result = vn_update_sync_result(result, abs_timeout, &iter);661}662}663664return vn_result(dev->instance, result);665}666667static VkResult668vn_create_sync_file(struct vn_device *dev, int *out_fd)669{670struct vn_renderer_sync *sync;671VkResult result = vn_renderer_sync_create(dev->renderer, 0,672VN_RENDERER_SYNC_BINARY, &sync);673if (result != VK_SUCCESS)674return vn_error(dev->instance, result);675676const struct vn_renderer_submit submit = {677.batches =678&(const struct vn_renderer_submit_batch){679.syncs = &sync,680.sync_values = &(const uint64_t){ 1 },681.sync_count = 1,682},683.batch_count = 1,684};685result = vn_renderer_submit(dev->renderer, &submit);686if (result != VK_SUCCESS) {687vn_renderer_sync_destroy(dev->renderer, sync);688return vn_error(dev->instance, result);689}690691*out_fd = vn_renderer_sync_export_syncobj(dev->renderer, sync, true);692vn_renderer_sync_destroy(dev->renderer, sync);693694return *out_fd >= 0 ? VK_SUCCESS : VK_ERROR_TOO_MANY_OBJECTS;695}696697VkResult698vn_ImportFenceFdKHR(VkDevice device,699const VkImportFenceFdInfoKHR *pImportFenceFdInfo)700{701struct vn_device *dev = vn_device_from_handle(device);702struct vn_fence *fence = vn_fence_from_handle(pImportFenceFdInfo->fence);703ASSERTED const bool sync_file = pImportFenceFdInfo->handleType ==704VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;705const int fd = pImportFenceFdInfo->fd;706707assert(dev->instance->experimental.globalFencing);708assert(sync_file);709if (fd >= 0) {710if (sync_wait(fd, -1))711return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);712713close(fd);714}715716/* abuse VN_SYNC_TYPE_WSI_SIGNALED */717vn_fence_signal_wsi(dev, fence);718719return VK_SUCCESS;720}721722VkResult723vn_GetFenceFdKHR(VkDevice device,724const VkFenceGetFdInfoKHR *pGetFdInfo,725int *pFd)726{727struct vn_device *dev = vn_device_from_handle(device);728struct vn_fence *fence = vn_fence_from_handle(pGetFdInfo->fence);729const bool sync_file =730pGetFdInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;731struct vn_sync_payload *payload = fence->payload;732733assert(dev->instance->experimental.globalFencing);734assert(sync_file);735int fd = -1;736if (payload->type == VN_SYNC_TYPE_DEVICE_ONLY) {737VkResult result = vn_create_sync_file(dev, &fd);738if (result != VK_SUCCESS)739return vn_error(dev->instance, result);740}741742if (sync_file) {743vn_sync_payload_release(dev, &fence->temporary);744fence->payload = &fence->permanent;745746/* XXX implies reset operation on the host fence */747}748749*pFd = fd;750return VK_SUCCESS;751}752753/* semaphore commands */754755static VkResult756vn_semaphore_init_payloads(struct vn_device *dev,757struct vn_semaphore *sem,758uint64_t initial_val,759const VkAllocationCallbacks *alloc)760{761sem->permanent.type = VN_SYNC_TYPE_DEVICE_ONLY;762sem->temporary.type = VN_SYNC_TYPE_INVALID;763sem->payload = &sem->permanent;764765return VK_SUCCESS;766}767768static void769vn_semaphore_reset_wsi(struct vn_device *dev, struct vn_semaphore *sem)770{771struct vn_sync_payload *perm = &sem->permanent;772773vn_sync_payload_release(dev, &sem->temporary);774775sem->payload = perm;776}777778void779vn_semaphore_signal_wsi(struct vn_device *dev, struct vn_semaphore *sem)780{781struct vn_sync_payload *temp = &sem->temporary;782783vn_sync_payload_release(dev, temp);784temp->type = VN_SYNC_TYPE_WSI_SIGNALED;785sem->payload = temp;786}787788VkResult789vn_CreateSemaphore(VkDevice device,790const VkSemaphoreCreateInfo *pCreateInfo,791const VkAllocationCallbacks *pAllocator,792VkSemaphore *pSemaphore)793{794struct vn_device *dev = vn_device_from_handle(device);795const VkAllocationCallbacks *alloc =796pAllocator ? pAllocator : &dev->base.base.alloc;797798struct vn_semaphore *sem = vk_zalloc(alloc, sizeof(*sem), VN_DEFAULT_ALIGN,799VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);800if (!sem)801return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);802803vn_object_base_init(&sem->base, VK_OBJECT_TYPE_SEMAPHORE, &dev->base);804805const VkSemaphoreTypeCreateInfo *type_info =806vk_find_struct_const(pCreateInfo->pNext, SEMAPHORE_TYPE_CREATE_INFO);807uint64_t initial_val = 0;808if (type_info && type_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE) {809sem->type = VK_SEMAPHORE_TYPE_TIMELINE;810initial_val = type_info->initialValue;811} else {812sem->type = VK_SEMAPHORE_TYPE_BINARY;813}814815VkResult result = vn_semaphore_init_payloads(dev, sem, initial_val, alloc);816if (result != VK_SUCCESS) {817vk_free(alloc, sem);818return vn_error(dev->instance, result);819}820821VkSemaphore sem_handle = vn_semaphore_to_handle(sem);822vn_async_vkCreateSemaphore(dev->instance, device, pCreateInfo, NULL,823&sem_handle);824825*pSemaphore = sem_handle;826827return VK_SUCCESS;828}829830void831vn_DestroySemaphore(VkDevice device,832VkSemaphore semaphore,833const VkAllocationCallbacks *pAllocator)834{835struct vn_device *dev = vn_device_from_handle(device);836struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);837const VkAllocationCallbacks *alloc =838pAllocator ? pAllocator : &dev->base.base.alloc;839840if (!sem)841return;842843vn_async_vkDestroySemaphore(dev->instance, device, semaphore, NULL);844845vn_sync_payload_release(dev, &sem->permanent);846vn_sync_payload_release(dev, &sem->temporary);847848vn_object_base_fini(&sem->base);849vk_free(alloc, sem);850}851852VkResult853vn_GetSemaphoreCounterValue(VkDevice device,854VkSemaphore semaphore,855uint64_t *pValue)856{857struct vn_device *dev = vn_device_from_handle(device);858struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);859ASSERTED struct vn_sync_payload *payload = sem->payload;860861assert(payload->type == VN_SYNC_TYPE_DEVICE_ONLY);862return vn_call_vkGetSemaphoreCounterValue(dev->instance, device, semaphore,863pValue);864}865866VkResult867vn_SignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo)868{869struct vn_device *dev = vn_device_from_handle(device);870871/* TODO if the semaphore is shared-by-ref, this needs to be synchronous */872if (false)873vn_call_vkSignalSemaphore(dev->instance, device, pSignalInfo);874else875vn_async_vkSignalSemaphore(dev->instance, device, pSignalInfo);876877return VK_SUCCESS;878}879880static VkResult881vn_find_first_signaled_semaphore(VkDevice device,882const VkSemaphore *semaphores,883const uint64_t *values,884uint32_t count)885{886for (uint32_t i = 0; i < count; i++) {887uint64_t val = 0;888VkResult result =889vn_GetSemaphoreCounterValue(device, semaphores[i], &val);890if (result != VK_SUCCESS || val >= values[i])891return result;892}893return VK_NOT_READY;894}895896static VkResult897vn_remove_signaled_semaphores(VkDevice device,898VkSemaphore *semaphores,899uint64_t *values,900uint32_t *count)901{902uint32_t cur = 0;903for (uint32_t i = 0; i < *count; i++) {904uint64_t val = 0;905VkResult result =906vn_GetSemaphoreCounterValue(device, semaphores[i], &val);907if (result != VK_SUCCESS)908return result;909if (val < values[i])910semaphores[cur++] = semaphores[i];911}912913*count = cur;914return cur ? VK_NOT_READY : VK_SUCCESS;915}916917VkResult918vn_WaitSemaphores(VkDevice device,919const VkSemaphoreWaitInfo *pWaitInfo,920uint64_t timeout)921{922struct vn_device *dev = vn_device_from_handle(device);923const VkAllocationCallbacks *alloc = &dev->base.base.alloc;924925const int64_t abs_timeout = os_time_get_absolute_timeout(timeout);926VkResult result = VK_NOT_READY;927uint32_t iter = 0;928if (pWaitInfo->semaphoreCount > 1 &&929!(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT)) {930uint32_t semaphore_count = pWaitInfo->semaphoreCount;931VkSemaphore local_semaphores[8];932uint64_t local_values[8];933VkSemaphore *semaphores = local_semaphores;934uint64_t *values = local_values;935if (semaphore_count > ARRAY_SIZE(local_semaphores)) {936semaphores = vk_alloc(937alloc, (sizeof(*semaphores) + sizeof(*values)) * semaphore_count,938VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);939if (!semaphores)940return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);941942values = (uint64_t *)&semaphores[semaphore_count];943}944memcpy(semaphores, pWaitInfo->pSemaphores,945sizeof(*semaphores) * semaphore_count);946memcpy(values, pWaitInfo->pValues, sizeof(*values) * semaphore_count);947948while (result == VK_NOT_READY) {949result = vn_remove_signaled_semaphores(device, semaphores, values,950&semaphore_count);951result = vn_update_sync_result(result, abs_timeout, &iter);952}953954if (semaphores != local_semaphores)955vk_free(alloc, semaphores);956} else {957while (result == VK_NOT_READY) {958result = vn_find_first_signaled_semaphore(959device, pWaitInfo->pSemaphores, pWaitInfo->pValues,960pWaitInfo->semaphoreCount);961result = vn_update_sync_result(result, abs_timeout, &iter);962}963}964965return vn_result(dev->instance, result);966}967968VkResult969vn_ImportSemaphoreFdKHR(970VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)971{972struct vn_device *dev = vn_device_from_handle(device);973struct vn_semaphore *sem =974vn_semaphore_from_handle(pImportSemaphoreFdInfo->semaphore);975ASSERTED const bool sync_file =976pImportSemaphoreFdInfo->handleType ==977VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;978const int fd = pImportSemaphoreFdInfo->fd;979980assert(dev->instance->experimental.globalFencing);981assert(sync_file);982if (fd >= 0) {983if (sync_wait(fd, -1))984return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);985986close(fd);987}988989/* abuse VN_SYNC_TYPE_WSI_SIGNALED */990vn_semaphore_signal_wsi(dev, sem);991992return VK_SUCCESS;993}994995VkResult996vn_GetSemaphoreFdKHR(VkDevice device,997const VkSemaphoreGetFdInfoKHR *pGetFdInfo,998int *pFd)999{1000struct vn_device *dev = vn_device_from_handle(device);1001struct vn_semaphore *sem = vn_semaphore_from_handle(pGetFdInfo->semaphore);1002const bool sync_file =1003pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;1004struct vn_sync_payload *payload = sem->payload;10051006assert(dev->instance->experimental.globalFencing);1007assert(sync_file);1008int fd = -1;1009if (payload->type == VN_SYNC_TYPE_DEVICE_ONLY) {1010VkResult result = vn_create_sync_file(dev, &fd);1011if (result != VK_SUCCESS)1012return vn_error(dev->instance, result);1013}10141015if (sync_file) {1016vn_sync_payload_release(dev, &sem->temporary);1017sem->payload = &sem->permanent;10181019/* XXX implies wait operation on the host semaphore */1020}10211022*pFd = fd;1023return VK_SUCCESS;1024}10251026/* event commands */10271028VkResult1029vn_CreateEvent(VkDevice device,1030const VkEventCreateInfo *pCreateInfo,1031const VkAllocationCallbacks *pAllocator,1032VkEvent *pEvent)1033{1034struct vn_device *dev = vn_device_from_handle(device);1035const VkAllocationCallbacks *alloc =1036pAllocator ? pAllocator : &dev->base.base.alloc;10371038struct vn_event *ev = vk_zalloc(alloc, sizeof(*ev), VN_DEFAULT_ALIGN,1039VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);1040if (!ev)1041return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);10421043vn_object_base_init(&ev->base, VK_OBJECT_TYPE_EVENT, &dev->base);10441045VkEvent ev_handle = vn_event_to_handle(ev);1046vn_async_vkCreateEvent(dev->instance, device, pCreateInfo, NULL,1047&ev_handle);10481049*pEvent = ev_handle;10501051return VK_SUCCESS;1052}10531054void1055vn_DestroyEvent(VkDevice device,1056VkEvent event,1057const VkAllocationCallbacks *pAllocator)1058{1059struct vn_device *dev = vn_device_from_handle(device);1060struct vn_event *ev = vn_event_from_handle(event);1061const VkAllocationCallbacks *alloc =1062pAllocator ? pAllocator : &dev->base.base.alloc;10631064if (!ev)1065return;10661067vn_async_vkDestroyEvent(dev->instance, device, event, NULL);10681069vn_object_base_fini(&ev->base);1070vk_free(alloc, ev);1071}10721073VkResult1074vn_GetEventStatus(VkDevice device, VkEvent event)1075{1076struct vn_device *dev = vn_device_from_handle(device);10771078/* TODO When the renderer supports it (requires a new vk extension), there1079* should be a coherent memory backing the event.1080*/1081VkResult result = vn_call_vkGetEventStatus(dev->instance, device, event);10821083return vn_result(dev->instance, result);1084}10851086VkResult1087vn_SetEvent(VkDevice device, VkEvent event)1088{1089struct vn_device *dev = vn_device_from_handle(device);10901091VkResult result = vn_call_vkSetEvent(dev->instance, device, event);10921093return vn_result(dev->instance, result);1094}10951096VkResult1097vn_ResetEvent(VkDevice device, VkEvent event)1098{1099struct vn_device *dev = vn_device_from_handle(device);11001101VkResult result = vn_call_vkResetEvent(dev->instance, device, event);11021103return vn_result(dev->instance, result);1104}110511061107