Path: blob/21.2-virgl/src/virtio/vulkan/vn_renderer_virtgpu.c
4560 views
/*1* Copyright 2020 Google LLC2* SPDX-License-Identifier: MIT3*/45#include <errno.h>6#include <fcntl.h>7#include <poll.h>8#include <sys/mman.h>9#include <sys/stat.h>10#include <sys/types.h>11#include <unistd.h>12#include <xf86drm.h>1314#include "drm-uapi/virtgpu_drm.h"15#include "util/sparse_array.h"16#define VIRGL_RENDERER_UNSTABLE_APIS17#include "virtio-gpu/virglrenderer_hw.h"1819#include "vn_renderer.h"2021/* XXX WIP kernel uapi */22#ifndef VIRTGPU_PARAM_CONTEXT_INIT23#define VIRTGPU_PARAM_CONTEXT_INIT 624#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x000125struct drm_virtgpu_context_set_param {26__u64 param;27__u64 value;28};29struct drm_virtgpu_context_init {30__u32 num_params;31__u32 pad;32__u64 ctx_set_params;33};34#define DRM_VIRTGPU_CONTEXT_INIT 0xb35#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \36DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \37struct drm_virtgpu_context_init)38#endif /* VIRTGPU_PARAM_CONTEXT_INIT */39#ifndef VIRTGPU_PARAM_MAX_SYNC_QUEUE_COUNT40#define VIRTGPU_PARAM_MAX_SYNC_QUEUE_COUNT 10041#endif /* VIRTGPU_PARAM_MAX_SYNC_QUEUE_COUNT */4243/* XXX comment these out to really use kernel uapi */44#define SIMULATE_BO_SIZE_FIX 145//#define SIMULATE_CONTEXT_INIT 146#define SIMULATE_SYNCOBJ 147#define SIMULATE_SUBMIT 14849#define VIRTGPU_PCI_VENDOR_ID 0x1af450#define VIRTGPU_PCI_DEVICE_ID 0x10505152struct virtgpu;5354struct virtgpu_shmem {55struct vn_renderer_shmem base;56uint32_t gem_handle;57};5859struct virtgpu_bo {60struct vn_renderer_bo base;61uint32_t gem_handle;62uint32_t blob_flags;63};6465struct virtgpu_sync {66struct vn_renderer_sync base;6768/*69* drm_syncobj is in one of these states70*71* - value N: drm_syncobj has a signaled fence chain with seqno N72* - pending N->M: drm_syncobj has an unsignaled fence chain with seqno M73* (which may point to another unsignaled fence chain with74* seqno between N and M, and so on)75*76* TODO Do we want to use binary drm_syncobjs? They would be77*78* - value 0: drm_syncobj has no fence79* - value 1: drm_syncobj has a signaled fence with seqno 080*81* They are cheaper but require special care.82*/83uint32_t syncobj_handle;84};8586struct virtgpu {87struct vn_renderer base;8889struct vn_instance *instance;9091int fd;92int version_minor;93drmPciBusInfo bus_info;9495uint32_t max_sync_queue_count;9697struct {98enum virgl_renderer_capset id;99uint32_t version;100struct virgl_renderer_capset_venus data;101} capset;102103/* note that we use gem_handle instead of res_id to index because104* res_id is monotonically increasing by default (see105* virtio_gpu_resource_id_get)106*/107struct util_sparse_array shmem_array;108struct util_sparse_array bo_array;109110mtx_t dma_buf_import_mutex;111};112113#ifdef SIMULATE_SYNCOBJ114115#include "util/hash_table.h"116#include "util/u_idalloc.h"117118static struct {119mtx_t mutex;120struct hash_table *syncobjs;121struct util_idalloc ida;122123int signaled_fd;124} sim;125126struct sim_syncobj {127mtx_t mutex;128uint64_t point;129130int pending_fd;131uint64_t pending_point;132bool pending_cpu;133};134135static uint32_t136sim_syncobj_create(struct virtgpu *gpu, bool signaled)137{138struct sim_syncobj *syncobj = calloc(1, sizeof(*syncobj));139if (!syncobj)140return 0;141142mtx_init(&syncobj->mutex, mtx_plain);143syncobj->pending_fd = -1;144145mtx_lock(&sim.mutex);146147/* initialize lazily */148if (!sim.syncobjs) {149sim.syncobjs = _mesa_pointer_hash_table_create(NULL);150if (!sim.syncobjs) {151mtx_unlock(&sim.mutex);152return 0;153}154155util_idalloc_init(&sim.ida, 32);156157struct drm_virtgpu_execbuffer args = {158.flags = VIRTGPU_EXECBUF_FENCE_FD_OUT,159};160int ret = drmIoctl(gpu->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &args);161if (ret || args.fence_fd < 0) {162_mesa_hash_table_destroy(sim.syncobjs, NULL);163sim.syncobjs = NULL;164mtx_unlock(&sim.mutex);165return 0;166}167168sim.signaled_fd = args.fence_fd;169}170171const unsigned syncobj_handle = util_idalloc_alloc(&sim.ida) + 1;172_mesa_hash_table_insert(sim.syncobjs,173(const void *)(uintptr_t)syncobj_handle, syncobj);174175mtx_unlock(&sim.mutex);176177return syncobj_handle;178}179180static void181sim_syncobj_destroy(struct virtgpu *gpu, uint32_t syncobj_handle)182{183struct sim_syncobj *syncobj = NULL;184185mtx_lock(&sim.mutex);186187struct hash_entry *entry = _mesa_hash_table_search(188sim.syncobjs, (const void *)(uintptr_t)syncobj_handle);189if (entry) {190syncobj = entry->data;191_mesa_hash_table_remove(sim.syncobjs, entry);192util_idalloc_free(&sim.ida, syncobj_handle - 1);193}194195mtx_unlock(&sim.mutex);196197if (syncobj) {198if (syncobj->pending_fd >= 0)199close(syncobj->pending_fd);200mtx_destroy(&syncobj->mutex);201free(syncobj);202}203}204205static VkResult206sim_syncobj_poll(int fd, int poll_timeout)207{208struct pollfd pollfd = {209.fd = fd,210.events = POLLIN,211};212int ret;213do {214ret = poll(&pollfd, 1, poll_timeout);215} while (ret == -1 && (errno == EINTR || errno == EAGAIN));216217if (ret < 0 || (ret > 0 && !(pollfd.revents & POLLIN))) {218return (ret < 0 && errno == ENOMEM) ? VK_ERROR_OUT_OF_HOST_MEMORY219: VK_ERROR_DEVICE_LOST;220}221222return ret ? VK_SUCCESS : VK_TIMEOUT;223}224225static void226sim_syncobj_set_point_locked(struct sim_syncobj *syncobj, uint64_t point)227{228syncobj->point = point;229230if (syncobj->pending_fd >= 0) {231close(syncobj->pending_fd);232syncobj->pending_fd = -1;233syncobj->pending_point = point;234}235}236237static void238sim_syncobj_update_point_locked(struct sim_syncobj *syncobj, int poll_timeout)239{240if (syncobj->pending_fd >= 0) {241VkResult result;242if (syncobj->pending_cpu) {243if (poll_timeout == -1) {244const int max_cpu_timeout = 2000;245poll_timeout = max_cpu_timeout;246result = sim_syncobj_poll(syncobj->pending_fd, poll_timeout);247if (result == VK_TIMEOUT) {248vn_log(NULL, "cpu sync timed out after %dms; ignoring",249poll_timeout);250result = VK_SUCCESS;251}252} else {253result = sim_syncobj_poll(syncobj->pending_fd, poll_timeout);254}255} else {256result = sim_syncobj_poll(syncobj->pending_fd, poll_timeout);257}258if (result == VK_SUCCESS) {259close(syncobj->pending_fd);260syncobj->pending_fd = -1;261syncobj->point = syncobj->pending_point;262}263}264}265266static struct sim_syncobj *267sim_syncobj_lookup(struct virtgpu *gpu, uint32_t syncobj_handle)268{269struct sim_syncobj *syncobj = NULL;270271mtx_lock(&sim.mutex);272struct hash_entry *entry = _mesa_hash_table_search(273sim.syncobjs, (const void *)(uintptr_t)syncobj_handle);274if (entry)275syncobj = entry->data;276mtx_unlock(&sim.mutex);277278return syncobj;279}280281static int282sim_syncobj_reset(struct virtgpu *gpu, uint32_t syncobj_handle)283{284struct sim_syncobj *syncobj = sim_syncobj_lookup(gpu, syncobj_handle);285if (!syncobj)286return -1;287288mtx_lock(&syncobj->mutex);289sim_syncobj_set_point_locked(syncobj, 0);290mtx_unlock(&syncobj->mutex);291292return 0;293}294295static int296sim_syncobj_query(struct virtgpu *gpu,297uint32_t syncobj_handle,298uint64_t *point)299{300struct sim_syncobj *syncobj = sim_syncobj_lookup(gpu, syncobj_handle);301if (!syncobj)302return -1;303304mtx_lock(&syncobj->mutex);305sim_syncobj_update_point_locked(syncobj, 0);306*point = syncobj->point;307mtx_unlock(&syncobj->mutex);308309return 0;310}311312static int313sim_syncobj_signal(struct virtgpu *gpu,314uint32_t syncobj_handle,315uint64_t point)316{317struct sim_syncobj *syncobj = sim_syncobj_lookup(gpu, syncobj_handle);318if (!syncobj)319return -1;320321mtx_lock(&syncobj->mutex);322sim_syncobj_set_point_locked(syncobj, point);323mtx_unlock(&syncobj->mutex);324325return 0;326}327328static int329sim_syncobj_submit(struct virtgpu *gpu,330uint32_t syncobj_handle,331int sync_fd,332uint64_t point,333bool cpu)334{335struct sim_syncobj *syncobj = sim_syncobj_lookup(gpu, syncobj_handle);336if (!syncobj)337return -1;338339int pending_fd = dup(sync_fd);340if (pending_fd < 0) {341vn_log(gpu->instance, "failed to dup sync fd");342return -1;343}344345mtx_lock(&syncobj->mutex);346347if (syncobj->pending_fd >= 0) {348mtx_unlock(&syncobj->mutex);349350/* TODO */351vn_log(gpu->instance, "sorry, no simulated timeline semaphore");352close(pending_fd);353return -1;354}355if (syncobj->point >= point)356vn_log(gpu->instance, "non-monotonic signaling");357358syncobj->pending_fd = pending_fd;359syncobj->pending_point = point;360syncobj->pending_cpu = cpu;361362mtx_unlock(&syncobj->mutex);363364return 0;365}366367static int368timeout_to_poll_timeout(uint64_t timeout)369{370const uint64_t ns_per_ms = 1000000;371const uint64_t ms = (timeout + ns_per_ms - 1) / ns_per_ms;372if (!ms && timeout)373return -1;374return ms <= INT_MAX ? ms : -1;375}376377static int378sim_syncobj_wait(struct virtgpu *gpu,379const struct vn_renderer_wait *wait,380bool wait_avail)381{382if (wait_avail)383return -1;384385const int poll_timeout = timeout_to_poll_timeout(wait->timeout);386387/* TODO poll all fds at the same time */388for (uint32_t i = 0; i < wait->sync_count; i++) {389struct virtgpu_sync *sync = (struct virtgpu_sync *)wait->syncs[i];390const uint64_t point = wait->sync_values[i];391392struct sim_syncobj *syncobj =393sim_syncobj_lookup(gpu, sync->syncobj_handle);394if (!syncobj)395return -1;396397mtx_lock(&syncobj->mutex);398399if (syncobj->point < point)400sim_syncobj_update_point_locked(syncobj, poll_timeout);401402if (syncobj->point < point) {403if (wait->wait_any && i < wait->sync_count - 1 &&404syncobj->pending_fd < 0) {405mtx_unlock(&syncobj->mutex);406continue;407}408errno = ETIME;409mtx_unlock(&syncobj->mutex);410return -1;411}412413mtx_unlock(&syncobj->mutex);414415if (wait->wait_any)416break;417418/* TODO adjust poll_timeout */419}420421return 0;422}423424static int425sim_syncobj_export(struct virtgpu *gpu, uint32_t syncobj_handle)426{427struct sim_syncobj *syncobj = sim_syncobj_lookup(gpu, syncobj_handle);428if (!syncobj)429return -1;430431int fd = -1;432mtx_lock(&syncobj->mutex);433if (syncobj->pending_fd >= 0)434fd = dup(syncobj->pending_fd);435else436fd = dup(sim.signaled_fd);437mtx_unlock(&syncobj->mutex);438439return fd;440}441442static uint32_t443sim_syncobj_import(struct virtgpu *gpu, uint32_t syncobj_handle, int fd)444{445struct sim_syncobj *syncobj = sim_syncobj_lookup(gpu, syncobj_handle);446if (!syncobj)447return 0;448449if (sim_syncobj_submit(gpu, syncobj_handle, fd, 1, false))450return 0;451452return syncobj_handle;453}454455#endif /* SIMULATE_SYNCOBJ */456457#ifdef SIMULATE_SUBMIT458459static int460sim_submit_signal_syncs(struct virtgpu *gpu,461int sync_fd,462struct vn_renderer_sync *const *syncs,463const uint64_t *sync_values,464uint32_t sync_count,465bool cpu)466{467for (uint32_t i = 0; i < sync_count; i++) {468struct virtgpu_sync *sync = (struct virtgpu_sync *)syncs[i];469const uint64_t pending_point = sync_values[i];470471#ifdef SIMULATE_SYNCOBJ472int ret = sim_syncobj_submit(gpu, sync->syncobj_handle, sync_fd,473pending_point, cpu);474if (ret)475return ret;476#else477/* we can in theory do a DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE followed by a478* DRM_IOCTL_SYNCOBJ_TRANSFER479*/480return -1;481#endif482}483484return 0;485}486487static uint32_t *488sim_submit_alloc_gem_handles(struct vn_renderer_bo *const *bos,489uint32_t bo_count)490{491uint32_t *gem_handles = malloc(sizeof(*gem_handles) * bo_count);492if (!gem_handles)493return NULL;494495for (uint32_t i = 0; i < bo_count; i++) {496struct virtgpu_bo *bo = (struct virtgpu_bo *)bos[i];497gem_handles[i] = bo->gem_handle;498}499500return gem_handles;501}502503static int504sim_submit(struct virtgpu *gpu, const struct vn_renderer_submit *submit)505{506/* TODO replace submit->bos by submit->gem_handles to avoid malloc/loop */507uint32_t *gem_handles = NULL;508if (submit->bo_count) {509gem_handles =510sim_submit_alloc_gem_handles(submit->bos, submit->bo_count);511if (!gem_handles)512return -1;513}514515int ret = 0;516for (uint32_t i = 0; i < submit->batch_count; i++) {517const struct vn_renderer_submit_batch *batch = &submit->batches[i];518519struct drm_virtgpu_execbuffer args = {520.flags = batch->sync_count ? VIRTGPU_EXECBUF_FENCE_FD_OUT : 0,521.size = batch->cs_size,522.command = (uintptr_t)batch->cs_data,523.bo_handles = (uintptr_t)gem_handles,524.num_bo_handles = submit->bo_count,525};526527ret = drmIoctl(gpu->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &args);528if (ret) {529vn_log(gpu->instance, "failed to execbuffer: %s", strerror(errno));530break;531}532533if (batch->sync_count) {534ret = sim_submit_signal_syncs(gpu, args.fence_fd, batch->syncs,535batch->sync_values, batch->sync_count,536batch->sync_queue_cpu);537close(args.fence_fd);538if (ret)539break;540}541}542543if (!submit->batch_count && submit->bo_count) {544struct drm_virtgpu_execbuffer args = {545.bo_handles = (uintptr_t)gem_handles,546.num_bo_handles = submit->bo_count,547};548549ret = drmIoctl(gpu->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &args);550if (ret)551vn_log(gpu->instance, "failed to execbuffer: %s", strerror(errno));552}553554free(gem_handles);555556return ret;557}558559#endif /* SIMULATE_SUBMIT */560561static int562virtgpu_ioctl(struct virtgpu *gpu, unsigned long request, void *args)563{564return drmIoctl(gpu->fd, request, args);565}566567static uint64_t568virtgpu_ioctl_getparam(struct virtgpu *gpu, uint64_t param)569{570#ifdef SIMULATE_CONTEXT_INIT571if (param == VIRTGPU_PARAM_CONTEXT_INIT)572return 1;573#endif574#ifdef SIMULATE_SUBMIT575if (param == VIRTGPU_PARAM_MAX_SYNC_QUEUE_COUNT)576return 16;577#endif578579/* val must be zeroed because kernel only writes the lower 32 bits */580uint64_t val = 0;581struct drm_virtgpu_getparam args = {582.param = param,583.value = (uintptr_t)&val,584};585586const int ret = virtgpu_ioctl(gpu, DRM_IOCTL_VIRTGPU_GETPARAM, &args);587return ret ? 0 : val;588}589590static int591virtgpu_ioctl_get_caps(struct virtgpu *gpu,592enum virgl_renderer_capset id,593uint32_t version,594void *capset,595size_t capset_size)596{597#ifdef SIMULATE_CONTEXT_INIT598if (id == VIRGL_RENDERER_CAPSET_VENUS && version == 0)599return 0;600#endif601602struct drm_virtgpu_get_caps args = {603.cap_set_id = id,604.cap_set_ver = version,605.addr = (uintptr_t)capset,606.size = capset_size,607};608609return virtgpu_ioctl(gpu, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);610}611612static int613virtgpu_ioctl_context_init(struct virtgpu *gpu,614enum virgl_renderer_capset capset_id)615{616#ifdef SIMULATE_CONTEXT_INIT617if (capset_id == VIRGL_RENDERER_CAPSET_VENUS)618return 0;619#endif620621struct drm_virtgpu_context_init args = {622.num_params = 1,623.ctx_set_params = (uintptr_t) &624(struct drm_virtgpu_context_set_param){625.param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID,626.value = capset_id,627},628};629630return virtgpu_ioctl(gpu, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &args);631}632633static uint32_t634virtgpu_ioctl_resource_create_blob(struct virtgpu *gpu,635uint32_t blob_mem,636uint32_t blob_flags,637size_t blob_size,638uint64_t blob_id,639uint32_t *res_id)640{641#ifdef SIMULATE_BO_SIZE_FIX642blob_size = align64(blob_size, 4096);643#endif644645struct drm_virtgpu_resource_create_blob args = {646.blob_mem = blob_mem,647.blob_flags = blob_flags,648.size = blob_size,649.blob_id = blob_id,650};651652if (virtgpu_ioctl(gpu, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &args))653return 0;654655*res_id = args.res_handle;656return args.bo_handle;657}658659static int660virtgpu_ioctl_resource_info(struct virtgpu *gpu,661uint32_t gem_handle,662struct drm_virtgpu_resource_info *info)663{664*info = (struct drm_virtgpu_resource_info){665.bo_handle = gem_handle,666};667668return virtgpu_ioctl(gpu, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, info);669}670671static void672virtgpu_ioctl_gem_close(struct virtgpu *gpu, uint32_t gem_handle)673{674struct drm_gem_close args = {675.handle = gem_handle,676};677678ASSERTED const int ret = virtgpu_ioctl(gpu, DRM_IOCTL_GEM_CLOSE, &args);679assert(!ret);680}681682static int683virtgpu_ioctl_prime_handle_to_fd(struct virtgpu *gpu,684uint32_t gem_handle,685bool mappable)686{687struct drm_prime_handle args = {688.handle = gem_handle,689.flags = DRM_CLOEXEC | (mappable ? DRM_RDWR : 0),690};691692const int ret = virtgpu_ioctl(gpu, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);693return ret ? -1 : args.fd;694}695696static uint32_t697virtgpu_ioctl_prime_fd_to_handle(struct virtgpu *gpu, int fd)698{699struct drm_prime_handle args = {700.fd = fd,701};702703const int ret = virtgpu_ioctl(gpu, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);704return ret ? 0 : args.handle;705}706707static void *708virtgpu_ioctl_map(struct virtgpu *gpu, uint32_t gem_handle, size_t size)709{710struct drm_virtgpu_map args = {711.handle = gem_handle,712};713714if (virtgpu_ioctl(gpu, DRM_IOCTL_VIRTGPU_MAP, &args))715return NULL;716717void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, gpu->fd,718args.offset);719if (ptr == MAP_FAILED)720return NULL;721722return ptr;723}724725static uint32_t726virtgpu_ioctl_syncobj_create(struct virtgpu *gpu, bool signaled)727{728#ifdef SIMULATE_SYNCOBJ729return sim_syncobj_create(gpu, signaled);730#endif731732struct drm_syncobj_create args = {733.flags = signaled ? DRM_SYNCOBJ_CREATE_SIGNALED : 0,734};735736const int ret = virtgpu_ioctl(gpu, DRM_IOCTL_SYNCOBJ_CREATE, &args);737return ret ? 0 : args.handle;738}739740static void741virtgpu_ioctl_syncobj_destroy(struct virtgpu *gpu, uint32_t syncobj_handle)742{743#ifdef SIMULATE_SYNCOBJ744sim_syncobj_destroy(gpu, syncobj_handle);745return;746#endif747748struct drm_syncobj_destroy args = {749.handle = syncobj_handle,750};751752ASSERTED const int ret =753virtgpu_ioctl(gpu, DRM_IOCTL_SYNCOBJ_DESTROY, &args);754assert(!ret);755}756757static int758virtgpu_ioctl_syncobj_handle_to_fd(struct virtgpu *gpu,759uint32_t syncobj_handle,760bool sync_file)761{762#ifdef SIMULATE_SYNCOBJ763return sync_file ? sim_syncobj_export(gpu, syncobj_handle) : -1;764#endif765766struct drm_syncobj_handle args = {767.handle = syncobj_handle,768.flags =769sync_file ? DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE : 0,770};771772int ret = virtgpu_ioctl(gpu, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);773if (ret)774return -1;775776return args.fd;777}778779static uint32_t780virtgpu_ioctl_syncobj_fd_to_handle(struct virtgpu *gpu,781int fd,782uint32_t syncobj_handle)783{784#ifdef SIMULATE_SYNCOBJ785return syncobj_handle ? sim_syncobj_import(gpu, syncobj_handle, fd) : 0;786#endif787788struct drm_syncobj_handle args = {789.handle = syncobj_handle,790.flags =791syncobj_handle ? DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE : 0,792.fd = fd,793};794795int ret = virtgpu_ioctl(gpu, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);796if (ret)797return 0;798799return args.handle;800}801802static int803virtgpu_ioctl_syncobj_reset(struct virtgpu *gpu, uint32_t syncobj_handle)804{805#ifdef SIMULATE_SYNCOBJ806return sim_syncobj_reset(gpu, syncobj_handle);807#endif808809struct drm_syncobj_array args = {810.handles = (uintptr_t)&syncobj_handle,811.count_handles = 1,812};813814return virtgpu_ioctl(gpu, DRM_IOCTL_SYNCOBJ_RESET, &args);815}816817static int818virtgpu_ioctl_syncobj_query(struct virtgpu *gpu,819uint32_t syncobj_handle,820uint64_t *point)821{822#ifdef SIMULATE_SYNCOBJ823return sim_syncobj_query(gpu, syncobj_handle, point);824#endif825826struct drm_syncobj_timeline_array args = {827.handles = (uintptr_t)&syncobj_handle,828.points = (uintptr_t)point,829.count_handles = 1,830};831832return virtgpu_ioctl(gpu, DRM_IOCTL_SYNCOBJ_QUERY, &args);833}834835static int836virtgpu_ioctl_syncobj_timeline_signal(struct virtgpu *gpu,837uint32_t syncobj_handle,838uint64_t point)839{840#ifdef SIMULATE_SYNCOBJ841return sim_syncobj_signal(gpu, syncobj_handle, point);842#endif843844struct drm_syncobj_timeline_array args = {845.handles = (uintptr_t)&syncobj_handle,846.points = (uintptr_t)&point,847.count_handles = 1,848};849850return virtgpu_ioctl(gpu, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);851}852853static int854virtgpu_ioctl_syncobj_timeline_wait(struct virtgpu *gpu,855const struct vn_renderer_wait *wait,856bool wait_avail)857{858#ifdef SIMULATE_SYNCOBJ859return sim_syncobj_wait(gpu, wait, wait_avail);860#endif861862/* always enable wait-before-submit */863uint32_t flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;864if (!wait->wait_any)865flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;866/* wait for fences to appear instead of signaling */867if (wait_avail)868flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE;869870/* TODO replace wait->syncs by wait->sync_handles to avoid malloc/loop */871uint32_t *syncobj_handles =872malloc(sizeof(*syncobj_handles) * wait->sync_count);873if (!syncobj_handles)874return -1;875for (uint32_t i = 0; i < wait->sync_count; i++) {876struct virtgpu_sync *sync = (struct virtgpu_sync *)wait->syncs[i];877syncobj_handles[i] = sync->syncobj_handle;878}879880struct drm_syncobj_timeline_wait args = {881.handles = (uintptr_t)syncobj_handles,882.points = (uintptr_t)wait->sync_values,883.timeout_nsec = os_time_get_absolute_timeout(wait->timeout),884.count_handles = wait->sync_count,885.flags = flags,886};887888const int ret = virtgpu_ioctl(gpu, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);889890free(syncobj_handles);891892return ret;893}894895static int896virtgpu_ioctl_submit(struct virtgpu *gpu,897const struct vn_renderer_submit *submit)898{899#ifdef SIMULATE_SUBMIT900return sim_submit(gpu, submit);901#endif902return -1;903}904905static VkResult906virtgpu_sync_write(struct vn_renderer *renderer,907struct vn_renderer_sync *_sync,908uint64_t val)909{910struct virtgpu *gpu = (struct virtgpu *)renderer;911struct virtgpu_sync *sync = (struct virtgpu_sync *)_sync;912913const int ret =914virtgpu_ioctl_syncobj_timeline_signal(gpu, sync->syncobj_handle, val);915916return ret ? VK_ERROR_OUT_OF_DEVICE_MEMORY : VK_SUCCESS;917}918919static VkResult920virtgpu_sync_read(struct vn_renderer *renderer,921struct vn_renderer_sync *_sync,922uint64_t *val)923{924struct virtgpu *gpu = (struct virtgpu *)renderer;925struct virtgpu_sync *sync = (struct virtgpu_sync *)_sync;926927const int ret =928virtgpu_ioctl_syncobj_query(gpu, sync->syncobj_handle, val);929930return ret ? VK_ERROR_OUT_OF_DEVICE_MEMORY : VK_SUCCESS;931}932933static VkResult934virtgpu_sync_reset(struct vn_renderer *renderer,935struct vn_renderer_sync *_sync,936uint64_t initial_val)937{938struct virtgpu *gpu = (struct virtgpu *)renderer;939struct virtgpu_sync *sync = (struct virtgpu_sync *)_sync;940941int ret = virtgpu_ioctl_syncobj_reset(gpu, sync->syncobj_handle);942if (!ret) {943ret = virtgpu_ioctl_syncobj_timeline_signal(gpu, sync->syncobj_handle,944initial_val);945}946947return ret ? VK_ERROR_OUT_OF_DEVICE_MEMORY : VK_SUCCESS;948}949950static int951virtgpu_sync_export_syncobj(struct vn_renderer *renderer,952struct vn_renderer_sync *_sync,953bool sync_file)954{955struct virtgpu *gpu = (struct virtgpu *)renderer;956struct virtgpu_sync *sync = (struct virtgpu_sync *)_sync;957958return virtgpu_ioctl_syncobj_handle_to_fd(gpu, sync->syncobj_handle,959sync_file);960}961962static void963virtgpu_sync_destroy(struct vn_renderer *renderer,964struct vn_renderer_sync *_sync)965{966struct virtgpu *gpu = (struct virtgpu *)renderer;967struct virtgpu_sync *sync = (struct virtgpu_sync *)_sync;968969virtgpu_ioctl_syncobj_destroy(gpu, sync->syncobj_handle);970971free(sync);972}973974static VkResult975virtgpu_sync_create_from_syncobj(struct vn_renderer *renderer,976int fd,977bool sync_file,978struct vn_renderer_sync **out_sync)979{980struct virtgpu *gpu = (struct virtgpu *)renderer;981982uint32_t syncobj_handle;983if (sync_file) {984syncobj_handle = virtgpu_ioctl_syncobj_create(gpu, false);985if (!syncobj_handle)986return VK_ERROR_OUT_OF_HOST_MEMORY;987if (!virtgpu_ioctl_syncobj_fd_to_handle(gpu, fd, syncobj_handle)) {988virtgpu_ioctl_syncobj_destroy(gpu, syncobj_handle);989return VK_ERROR_INVALID_EXTERNAL_HANDLE;990}991} else {992syncobj_handle = virtgpu_ioctl_syncobj_fd_to_handle(gpu, fd, 0);993if (!syncobj_handle)994return VK_ERROR_INVALID_EXTERNAL_HANDLE;995}996997struct virtgpu_sync *sync = calloc(1, sizeof(*sync));998if (!sync) {999virtgpu_ioctl_syncobj_destroy(gpu, syncobj_handle);1000return VK_ERROR_OUT_OF_HOST_MEMORY;1001}10021003sync->syncobj_handle = syncobj_handle;1004sync->base.sync_id = 0; /* TODO */10051006*out_sync = &sync->base;10071008return VK_SUCCESS;1009}10101011static VkResult1012virtgpu_sync_create(struct vn_renderer *renderer,1013uint64_t initial_val,1014uint32_t flags,1015struct vn_renderer_sync **out_sync)1016{1017struct virtgpu *gpu = (struct virtgpu *)renderer;10181019/* TODO */1020if (flags & VN_RENDERER_SYNC_SHAREABLE)1021return VK_ERROR_OUT_OF_DEVICE_MEMORY;10221023/* always false because we don't use binary drm_syncobjs */1024const bool signaled = false;1025const uint32_t syncobj_handle =1026virtgpu_ioctl_syncobj_create(gpu, signaled);1027if (!syncobj_handle)1028return VK_ERROR_OUT_OF_DEVICE_MEMORY;10291030/* add a signaled fence chain with seqno initial_val */1031const int ret =1032virtgpu_ioctl_syncobj_timeline_signal(gpu, syncobj_handle, initial_val);1033if (ret) {1034virtgpu_ioctl_syncobj_destroy(gpu, syncobj_handle);1035return VK_ERROR_OUT_OF_DEVICE_MEMORY;1036}10371038struct virtgpu_sync *sync = calloc(1, sizeof(*sync));1039if (!sync) {1040virtgpu_ioctl_syncobj_destroy(gpu, syncobj_handle);1041return VK_ERROR_OUT_OF_HOST_MEMORY;1042}10431044sync->syncobj_handle = syncobj_handle;1045/* we will have a sync_id when shareable is true and virtio-gpu associates1046* a host sync object with guest drm_syncobj1047*/1048sync->base.sync_id = 0;10491050*out_sync = &sync->base;10511052return VK_SUCCESS;1053}10541055static void1056virtgpu_bo_invalidate(struct vn_renderer *renderer,1057struct vn_renderer_bo *bo,1058VkDeviceSize offset,1059VkDeviceSize size)1060{1061/* nop because kernel makes every mapping coherent */1062}10631064static void1065virtgpu_bo_flush(struct vn_renderer *renderer,1066struct vn_renderer_bo *bo,1067VkDeviceSize offset,1068VkDeviceSize size)1069{1070/* nop because kernel makes every mapping coherent */1071}10721073static void *1074virtgpu_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)1075{1076struct virtgpu *gpu = (struct virtgpu *)renderer;1077struct virtgpu_bo *bo = (struct virtgpu_bo *)_bo;1078const bool mappable = bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE;10791080/* not thread-safe but is fine */1081if (!bo->base.mmap_ptr && mappable) {1082bo->base.mmap_ptr =1083virtgpu_ioctl_map(gpu, bo->gem_handle, bo->base.mmap_size);1084}10851086return bo->base.mmap_ptr;1087}10881089static int1090virtgpu_bo_export_dma_buf(struct vn_renderer *renderer,1091struct vn_renderer_bo *_bo)1092{1093struct virtgpu *gpu = (struct virtgpu *)renderer;1094struct virtgpu_bo *bo = (struct virtgpu_bo *)_bo;1095const bool mappable = bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE;1096const bool shareable = bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_SHAREABLE;10971098return shareable1099? virtgpu_ioctl_prime_handle_to_fd(gpu, bo->gem_handle, mappable)1100: -1;1101}11021103static bool1104virtgpu_bo_destroy(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)1105{1106struct virtgpu *gpu = (struct virtgpu *)renderer;1107struct virtgpu_bo *bo = (struct virtgpu_bo *)_bo;11081109mtx_lock(&gpu->dma_buf_import_mutex);11101111/* Check the refcount again after the import lock is grabbed. Yes, we use1112* the double-checked locking anti-pattern.1113*/1114if (atomic_load_explicit(&bo->base.refcount, memory_order_relaxed) > 0) {1115mtx_unlock(&gpu->dma_buf_import_mutex);1116return false;1117}11181119if (bo->base.mmap_ptr)1120munmap(bo->base.mmap_ptr, bo->base.mmap_size);1121virtgpu_ioctl_gem_close(gpu, bo->gem_handle);11221123/* set gem_handle to 0 to indicate that the bo is invalid */1124bo->gem_handle = 0;11251126mtx_unlock(&gpu->dma_buf_import_mutex);11271128return true;1129}11301131static uint32_t1132virtgpu_bo_blob_flags(VkMemoryPropertyFlags flags,1133VkExternalMemoryHandleTypeFlags external_handles)1134{1135uint32_t blob_flags = 0;1136if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)1137blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;1138if (external_handles)1139blob_flags |= VIRTGPU_BLOB_FLAG_USE_SHAREABLE;1140if (external_handles & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)1141blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;11421143return blob_flags;1144}11451146static VkResult1147virtgpu_bo_create_from_dma_buf(struct vn_renderer *renderer,1148VkDeviceSize size,1149int fd,1150VkMemoryPropertyFlags flags,1151struct vn_renderer_bo **out_bo)1152{1153struct virtgpu *gpu = (struct virtgpu *)renderer;1154struct drm_virtgpu_resource_info info;1155uint32_t gem_handle = 0;1156struct virtgpu_bo *bo = NULL;11571158mtx_lock(&gpu->dma_buf_import_mutex);11591160gem_handle = virtgpu_ioctl_prime_fd_to_handle(gpu, fd);1161if (!gem_handle)1162goto fail;1163bo = util_sparse_array_get(&gpu->bo_array, gem_handle);11641165if (virtgpu_ioctl_resource_info(gpu, gem_handle, &info))1166goto fail;11671168uint32_t blob_flags;1169size_t mmap_size;1170if (info.blob_mem) {1171/* must be VIRTGPU_BLOB_MEM_HOST3D */1172if (info.blob_mem != VIRTGPU_BLOB_MEM_HOST3D)1173goto fail;11741175if (info.size < size)1176goto fail;11771178/* blob_flags is not passed to the kernel and is only for internal use1179* on imports. Set it to what works best for us.1180*/1181blob_flags = virtgpu_bo_blob_flags(flags, 0);1182blob_flags |= VIRTGPU_BLOB_FLAG_USE_SHAREABLE;1183mmap_size = size;1184} else {1185/* must be classic resource here1186* set blob_flags to 0 to fail virtgpu_bo_map1187* set mmap_size to 0 since mapping is not allowed1188*/1189blob_flags = 0;1190mmap_size = 0;1191}11921193/* we check bo->gem_handle instead of bo->refcount because bo->refcount1194* might only be memset to 0 and is not considered initialized in theory1195*/1196if (bo->gem_handle == gem_handle) {1197if (bo->base.mmap_size < mmap_size)1198goto fail;1199if (blob_flags & ~bo->blob_flags)1200goto fail;12011202/* we can't use vn_renderer_bo_ref as the refcount may drop to 01203* temporarily before virtgpu_bo_destroy grabs the lock1204*/1205atomic_fetch_add_explicit(&bo->base.refcount, 1, memory_order_relaxed);1206} else {1207*bo = (struct virtgpu_bo){1208.base = {1209.refcount = 1,1210.res_id = info.res_handle,1211.mmap_size = mmap_size,1212},1213.gem_handle = gem_handle,1214.blob_flags = blob_flags,1215};1216}12171218mtx_unlock(&gpu->dma_buf_import_mutex);12191220*out_bo = &bo->base;12211222return VK_SUCCESS;12231224fail:1225if (gem_handle && bo->gem_handle != gem_handle)1226virtgpu_ioctl_gem_close(gpu, gem_handle);1227mtx_unlock(&gpu->dma_buf_import_mutex);1228return VK_ERROR_INVALID_EXTERNAL_HANDLE;1229}12301231static VkResult1232virtgpu_bo_create_from_device_memory(1233struct vn_renderer *renderer,1234VkDeviceSize size,1235vn_object_id mem_id,1236VkMemoryPropertyFlags flags,1237VkExternalMemoryHandleTypeFlags external_handles,1238struct vn_renderer_bo **out_bo)1239{1240struct virtgpu *gpu = (struct virtgpu *)renderer;1241const uint32_t blob_flags = virtgpu_bo_blob_flags(flags, external_handles);12421243uint32_t res_id;1244uint32_t gem_handle = virtgpu_ioctl_resource_create_blob(1245gpu, VIRTGPU_BLOB_MEM_HOST3D, blob_flags, size, mem_id, &res_id);1246if (!gem_handle)1247return VK_ERROR_OUT_OF_DEVICE_MEMORY;12481249struct virtgpu_bo *bo = util_sparse_array_get(&gpu->bo_array, gem_handle);1250*bo = (struct virtgpu_bo){1251.base = {1252.refcount = 1,1253.res_id = res_id,1254.mmap_size = size,1255},1256.gem_handle = gem_handle,1257.blob_flags = blob_flags,1258};12591260*out_bo = &bo->base;12611262return VK_SUCCESS;1263}12641265static void1266virtgpu_shmem_destroy(struct vn_renderer *renderer,1267struct vn_renderer_shmem *_shmem)1268{1269struct virtgpu *gpu = (struct virtgpu *)renderer;1270struct virtgpu_shmem *shmem = (struct virtgpu_shmem *)_shmem;12711272munmap(shmem->base.mmap_ptr, shmem->base.mmap_size);1273virtgpu_ioctl_gem_close(gpu, shmem->gem_handle);1274}12751276static struct vn_renderer_shmem *1277virtgpu_shmem_create(struct vn_renderer *renderer, size_t size)1278{1279struct virtgpu *gpu = (struct virtgpu *)renderer;12801281uint32_t res_id;1282uint32_t gem_handle = virtgpu_ioctl_resource_create_blob(1283gpu, VIRTGPU_BLOB_MEM_GUEST, VIRTGPU_BLOB_FLAG_USE_MAPPABLE, size, 0,1284&res_id);1285if (!gem_handle)1286return NULL;12871288void *ptr = virtgpu_ioctl_map(gpu, gem_handle, size);1289if (!ptr) {1290virtgpu_ioctl_gem_close(gpu, gem_handle);1291return NULL;1292}12931294struct virtgpu_shmem *shmem =1295util_sparse_array_get(&gpu->shmem_array, gem_handle);1296*shmem = (struct virtgpu_shmem){1297.base = {1298.refcount = 1,1299.res_id = res_id,1300.mmap_size = size,1301.mmap_ptr = ptr,1302},1303.gem_handle = gem_handle,1304};13051306return &shmem->base;1307}13081309static VkResult1310virtgpu_wait(struct vn_renderer *renderer,1311const struct vn_renderer_wait *wait)1312{1313struct virtgpu *gpu = (struct virtgpu *)renderer;13141315const int ret = virtgpu_ioctl_syncobj_timeline_wait(gpu, wait, false);1316if (ret && errno != ETIME)1317return VK_ERROR_DEVICE_LOST;13181319return ret ? VK_TIMEOUT : VK_SUCCESS;1320}13211322static VkResult1323virtgpu_submit(struct vn_renderer *renderer,1324const struct vn_renderer_submit *submit)1325{1326struct virtgpu *gpu = (struct virtgpu *)renderer;13271328const int ret = virtgpu_ioctl_submit(gpu, submit);1329return ret ? VK_ERROR_DEVICE_LOST : VK_SUCCESS;1330}13311332static void1333virtgpu_get_info(struct vn_renderer *renderer, struct vn_renderer_info *info)1334{1335struct virtgpu *gpu = (struct virtgpu *)renderer;13361337memset(info, 0, sizeof(*info));13381339info->pci.vendor_id = VIRTGPU_PCI_VENDOR_ID;1340info->pci.device_id = VIRTGPU_PCI_DEVICE_ID;13411342info->pci.has_bus_info = true;1343info->pci.domain = gpu->bus_info.domain;1344info->pci.bus = gpu->bus_info.bus;1345info->pci.device = gpu->bus_info.dev;1346info->pci.function = gpu->bus_info.func;13471348info->has_dma_buf_import = true;1349/* Kernel makes every mapping coherent. We are better off filtering1350* incoherent memory types out than silently making them coherent.1351*/1352info->has_cache_management = false;1353/* TODO drm_syncobj */1354info->has_external_sync = false;13551356info->has_implicit_fencing = false;13571358info->max_sync_queue_count = gpu->max_sync_queue_count;13591360const struct virgl_renderer_capset_venus *capset = &gpu->capset.data;1361info->wire_format_version = capset->wire_format_version;1362info->vk_xml_version = capset->vk_xml_version;1363info->vk_ext_command_serialization_spec_version =1364capset->vk_ext_command_serialization_spec_version;1365info->vk_mesa_venus_protocol_spec_version =1366capset->vk_mesa_venus_protocol_spec_version;1367}13681369static void1370virtgpu_destroy(struct vn_renderer *renderer,1371const VkAllocationCallbacks *alloc)1372{1373struct virtgpu *gpu = (struct virtgpu *)renderer;13741375if (gpu->fd >= 0)1376close(gpu->fd);13771378mtx_destroy(&gpu->dma_buf_import_mutex);13791380util_sparse_array_finish(&gpu->shmem_array);1381util_sparse_array_finish(&gpu->bo_array);13821383vk_free(alloc, gpu);1384}13851386static VkResult1387virtgpu_init_context(struct virtgpu *gpu)1388{1389assert(!gpu->capset.version);1390const int ret = virtgpu_ioctl_context_init(gpu, gpu->capset.id);1391if (ret) {1392if (VN_DEBUG(INIT)) {1393vn_log(gpu->instance, "failed to initialize context: %s",1394strerror(errno));1395}1396return VK_ERROR_INITIALIZATION_FAILED;1397}13981399return VK_SUCCESS;1400}14011402static VkResult1403virtgpu_init_capset(struct virtgpu *gpu)1404{1405gpu->capset.id = VIRGL_RENDERER_CAPSET_VENUS;1406gpu->capset.version = 0;14071408const int ret =1409virtgpu_ioctl_get_caps(gpu, gpu->capset.id, gpu->capset.version,1410&gpu->capset.data, sizeof(gpu->capset.data));1411if (ret) {1412if (VN_DEBUG(INIT)) {1413vn_log(gpu->instance, "failed to get venus v%d capset: %s",1414gpu->capset.version, strerror(errno));1415}1416return VK_ERROR_INITIALIZATION_FAILED;1417}14181419return VK_SUCCESS;1420}14211422static VkResult1423virtgpu_init_params(struct virtgpu *gpu)1424{1425const uint64_t required_params[] = {1426VIRTGPU_PARAM_3D_FEATURES, VIRTGPU_PARAM_CAPSET_QUERY_FIX,1427VIRTGPU_PARAM_RESOURCE_BLOB, VIRTGPU_PARAM_HOST_VISIBLE,1428VIRTGPU_PARAM_CROSS_DEVICE, VIRTGPU_PARAM_CONTEXT_INIT,1429};1430uint64_t val;1431for (uint32_t i = 0; i < ARRAY_SIZE(required_params); i++) {1432val = virtgpu_ioctl_getparam(gpu, required_params[i]);1433if (!val) {1434if (VN_DEBUG(INIT)) {1435vn_log(gpu->instance, "required kernel param %d is missing",1436(int)required_params[i]);1437}1438return VK_ERROR_INITIALIZATION_FAILED;1439}1440}14411442val = virtgpu_ioctl_getparam(gpu, VIRTGPU_PARAM_MAX_SYNC_QUEUE_COUNT);1443if (!val) {1444if (VN_DEBUG(INIT))1445vn_log(gpu->instance, "no sync queue support");1446return VK_ERROR_INITIALIZATION_FAILED;1447}1448gpu->max_sync_queue_count = val;14491450return VK_SUCCESS;1451}14521453static VkResult1454virtgpu_open_device(struct virtgpu *gpu, const drmDevicePtr dev)1455{1456/* skip unless the device has our PCI vendor/device id and a render node */1457if (!(dev->available_nodes & (1 << DRM_NODE_RENDER)) ||1458dev->bustype != DRM_BUS_PCI ||1459dev->deviceinfo.pci->vendor_id != VIRTGPU_PCI_VENDOR_ID ||1460dev->deviceinfo.pci->device_id != VIRTGPU_PCI_DEVICE_ID) {1461if (VN_DEBUG(INIT)) {1462const char *name = "unknown";1463for (uint32_t i = 0; i < DRM_NODE_MAX; i++) {1464if (dev->available_nodes & (1 << i)) {1465name = dev->nodes[i];1466break;1467}1468}1469vn_log(gpu->instance, "skipping DRM device %s", name);1470}1471return VK_ERROR_INITIALIZATION_FAILED;1472}14731474const char *node_path = dev->nodes[DRM_NODE_RENDER];14751476int fd = open(node_path, O_RDWR | O_CLOEXEC);1477if (fd < 0) {1478if (VN_DEBUG(INIT))1479vn_log(gpu->instance, "failed to open %s", node_path);1480return VK_ERROR_INITIALIZATION_FAILED;1481}14821483drmVersionPtr version = drmGetVersion(fd);1484if (!version || strcmp(version->name, "virtio_gpu") ||1485version->version_major != 0) {1486if (VN_DEBUG(INIT)) {1487if (version) {1488vn_log(gpu->instance, "unknown DRM driver %s version %d",1489version->name, version->version_major);1490} else {1491vn_log(gpu->instance, "failed to get DRM driver version");1492}1493}1494if (version)1495drmFreeVersion(version);1496close(fd);1497return VK_ERROR_INITIALIZATION_FAILED;1498}14991500gpu->fd = fd;1501gpu->version_minor = version->version_minor;1502gpu->bus_info = *dev->businfo.pci;15031504drmFreeVersion(version);15051506if (VN_DEBUG(INIT))1507vn_log(gpu->instance, "using DRM device %s", node_path);15081509return VK_SUCCESS;1510}15111512static VkResult1513virtgpu_open(struct virtgpu *gpu)1514{1515drmDevicePtr devs[8];1516int count = drmGetDevices2(0, devs, ARRAY_SIZE(devs));1517if (count < 0) {1518if (VN_DEBUG(INIT))1519vn_log(gpu->instance, "failed to enumerate DRM devices");1520return VK_ERROR_INITIALIZATION_FAILED;1521}15221523VkResult result = VK_ERROR_INITIALIZATION_FAILED;1524for (int i = 0; i < count; i++) {1525result = virtgpu_open_device(gpu, devs[i]);1526if (result == VK_SUCCESS)1527break;1528}15291530drmFreeDevices(devs, count);15311532return result;1533}15341535static VkResult1536virtgpu_init(struct virtgpu *gpu)1537{1538util_sparse_array_init(&gpu->shmem_array, sizeof(struct virtgpu_shmem),15391024);1540util_sparse_array_init(&gpu->bo_array, sizeof(struct virtgpu_bo), 1024);15411542mtx_init(&gpu->dma_buf_import_mutex, mtx_plain);15431544VkResult result = virtgpu_open(gpu);1545if (result == VK_SUCCESS)1546result = virtgpu_init_params(gpu);1547if (result == VK_SUCCESS)1548result = virtgpu_init_capset(gpu);1549if (result == VK_SUCCESS)1550result = virtgpu_init_context(gpu);1551if (result != VK_SUCCESS)1552return result;15531554gpu->base.ops.destroy = virtgpu_destroy;1555gpu->base.ops.get_info = virtgpu_get_info;1556gpu->base.ops.submit = virtgpu_submit;1557gpu->base.ops.wait = virtgpu_wait;15581559gpu->base.shmem_ops.create = virtgpu_shmem_create;1560gpu->base.shmem_ops.destroy = virtgpu_shmem_destroy;15611562gpu->base.bo_ops.create_from_device_memory =1563virtgpu_bo_create_from_device_memory;1564gpu->base.bo_ops.create_from_dma_buf = virtgpu_bo_create_from_dma_buf;1565gpu->base.bo_ops.destroy = virtgpu_bo_destroy;1566gpu->base.bo_ops.export_dma_buf = virtgpu_bo_export_dma_buf;1567gpu->base.bo_ops.map = virtgpu_bo_map;1568gpu->base.bo_ops.flush = virtgpu_bo_flush;1569gpu->base.bo_ops.invalidate = virtgpu_bo_invalidate;15701571gpu->base.sync_ops.create = virtgpu_sync_create;1572gpu->base.sync_ops.create_from_syncobj = virtgpu_sync_create_from_syncobj;1573gpu->base.sync_ops.destroy = virtgpu_sync_destroy;1574gpu->base.sync_ops.export_syncobj = virtgpu_sync_export_syncobj;1575gpu->base.sync_ops.reset = virtgpu_sync_reset;1576gpu->base.sync_ops.read = virtgpu_sync_read;1577gpu->base.sync_ops.write = virtgpu_sync_write;15781579return VK_SUCCESS;1580}15811582VkResult1583vn_renderer_create_virtgpu(struct vn_instance *instance,1584const VkAllocationCallbacks *alloc,1585struct vn_renderer **renderer)1586{1587struct virtgpu *gpu = vk_zalloc(alloc, sizeof(*gpu), VN_DEFAULT_ALIGN,1588VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);1589if (!gpu)1590return VK_ERROR_OUT_OF_HOST_MEMORY;15911592gpu->instance = instance;1593gpu->fd = -1;15941595VkResult result = virtgpu_init(gpu);1596if (result != VK_SUCCESS) {1597virtgpu_destroy(&gpu->base, alloc);1598return result;1599}16001601*renderer = &gpu->base;16021603return VK_SUCCESS;1604}160516061607