Path: blob/21.2-virgl/src/virtio/vulkan/vn_renderer.h
4560 views
/*1* Copyright 2019 Google LLC2* SPDX-License-Identifier: MIT3*/45#ifndef VN_RENDERER_H6#define VN_RENDERER_H78#include "vn_common.h"910struct vn_renderer_shmem {11atomic_int refcount;12uint32_t res_id;13size_t mmap_size; /* for internal use only (i.e., munmap) */14void *mmap_ptr;15};1617struct vn_renderer_bo {18atomic_int refcount;19uint32_t res_id;20/* for internal use only */21size_t mmap_size;22void *mmap_ptr;23};2425/*26* A sync consists of a uint64_t counter. The counter can be updated by CPU27* or by GPU. It can also be waited on by CPU or by GPU until it reaches28* certain values.29*30* This models after timeline VkSemaphore rather than timeline drm_syncobj.31* The main difference is that drm_syncobj can have unsignaled value 0.32*/33struct vn_renderer_sync {34uint32_t sync_id;35};3637struct vn_renderer_info {38struct {39uint16_t vendor_id;40uint16_t device_id;4142bool has_bus_info;43uint16_t domain;44uint8_t bus;45uint8_t device;46uint8_t function;47} pci;4849bool has_dma_buf_import;50bool has_cache_management;51bool has_external_sync;52bool has_implicit_fencing;5354uint32_t max_sync_queue_count;5556/* hw capset */57uint32_t wire_format_version;58uint32_t vk_xml_version;59uint32_t vk_ext_command_serialization_spec_version;60uint32_t vk_mesa_venus_protocol_spec_version;61};6263struct vn_renderer_submit_batch {64const void *cs_data;65size_t cs_size;6667/*68* Submit cs to the virtual sync queue identified by sync_queue_index. The69* virtual queue is assumed to be associated with the physical VkQueue70* identified by vk_queue_id. After the execution completes on the71* VkQueue, the virtual sync queue is signaled.72*73* sync_queue_index must be less than max_sync_queue_count.74*75* vk_queue_id specifies the object id of a VkQueue.76*77* When sync_queue_cpu is true, it specifies the special CPU sync queue,78* and sync_queue_index/vk_queue_id are ignored. TODO revisit this later79*/80uint32_t sync_queue_index;81bool sync_queue_cpu;82vn_object_id vk_queue_id;8384/* syncs to update when the virtual sync queue is signaled */85struct vn_renderer_sync *const *syncs;86/* TODO allow NULL when syncs are all binary? */87const uint64_t *sync_values;88uint32_t sync_count;89};9091struct vn_renderer_submit {92/* BOs to pin and to fence implicitly93*94* TODO track all bos and automatically pin them. We don't do it yet95* because each vn_command_buffer owns a bo. We can probably make do by96* returning the bos to a bo cache and exclude bo cache from pinning.97*/98struct vn_renderer_bo *const *bos;99uint32_t bo_count;100101const struct vn_renderer_submit_batch *batches;102uint32_t batch_count;103};104105struct vn_renderer_wait {106bool wait_any;107uint64_t timeout;108109struct vn_renderer_sync *const *syncs;110/* TODO allow NULL when syncs are all binary? */111const uint64_t *sync_values;112uint32_t sync_count;113};114115struct vn_renderer_ops {116void (*destroy)(struct vn_renderer *renderer,117const VkAllocationCallbacks *alloc);118119void (*get_info)(struct vn_renderer *renderer,120struct vn_renderer_info *info);121122VkResult (*submit)(struct vn_renderer *renderer,123const struct vn_renderer_submit *submit);124125/*126* On success, returns VK_SUCCESS or VK_TIMEOUT. On failure, returns127* VK_ERROR_DEVICE_LOST or out of device/host memory.128*/129VkResult (*wait)(struct vn_renderer *renderer,130const struct vn_renderer_wait *wait);131};132133struct vn_renderer_shmem_ops {134struct vn_renderer_shmem *(*create)(struct vn_renderer *renderer,135size_t size);136void (*destroy)(struct vn_renderer *renderer,137struct vn_renderer_shmem *shmem);138};139140struct vn_renderer_bo_ops {141VkResult (*create_from_device_memory)(142struct vn_renderer *renderer,143VkDeviceSize size,144vn_object_id mem_id,145VkMemoryPropertyFlags flags,146VkExternalMemoryHandleTypeFlags external_handles,147struct vn_renderer_bo **out_bo);148149VkResult (*create_from_dma_buf)(struct vn_renderer *renderer,150VkDeviceSize size,151int fd,152VkMemoryPropertyFlags flags,153struct vn_renderer_bo **out_bo);154155bool (*destroy)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);156157int (*export_dma_buf)(struct vn_renderer *renderer,158struct vn_renderer_bo *bo);159160/* map is not thread-safe */161void *(*map)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);162163void (*flush)(struct vn_renderer *renderer,164struct vn_renderer_bo *bo,165VkDeviceSize offset,166VkDeviceSize size);167void (*invalidate)(struct vn_renderer *renderer,168struct vn_renderer_bo *bo,169VkDeviceSize offset,170VkDeviceSize size);171};172173enum vn_renderer_sync_flags {174VN_RENDERER_SYNC_SHAREABLE = 1u << 0,175VN_RENDERER_SYNC_BINARY = 1u << 1,176};177178struct vn_renderer_sync_ops {179VkResult (*create)(struct vn_renderer *renderer,180uint64_t initial_val,181uint32_t flags,182struct vn_renderer_sync **out_sync);183184VkResult (*create_from_syncobj)(struct vn_renderer *renderer,185int fd,186bool sync_file,187struct vn_renderer_sync **out_sync);188void (*destroy)(struct vn_renderer *renderer,189struct vn_renderer_sync *sync);190191int (*export_syncobj)(struct vn_renderer *renderer,192struct vn_renderer_sync *sync,193bool sync_file);194195/* reset the counter */196VkResult (*reset)(struct vn_renderer *renderer,197struct vn_renderer_sync *sync,198uint64_t initial_val);199200/* read the current value from the counter */201VkResult (*read)(struct vn_renderer *renderer,202struct vn_renderer_sync *sync,203uint64_t *val);204205/* write a new value (larger than the current one) to the counter */206VkResult (*write)(struct vn_renderer *renderer,207struct vn_renderer_sync *sync,208uint64_t val);209};210211struct vn_renderer {212struct vn_renderer_ops ops;213struct vn_renderer_shmem_ops shmem_ops;214struct vn_renderer_bo_ops bo_ops;215struct vn_renderer_sync_ops sync_ops;216};217218VkResult219vn_renderer_create_virtgpu(struct vn_instance *instance,220const VkAllocationCallbacks *alloc,221struct vn_renderer **renderer);222223VkResult224vn_renderer_create_vtest(struct vn_instance *instance,225const VkAllocationCallbacks *alloc,226struct vn_renderer **renderer);227228static inline VkResult229vn_renderer_create(struct vn_instance *instance,230const VkAllocationCallbacks *alloc,231struct vn_renderer **renderer)232{233if (VN_DEBUG(VTEST)) {234VkResult result = vn_renderer_create_vtest(instance, alloc, renderer);235if (result == VK_SUCCESS)236return VK_SUCCESS;237}238239return vn_renderer_create_virtgpu(instance, alloc, renderer);240}241242static inline void243vn_renderer_destroy(struct vn_renderer *renderer,244const VkAllocationCallbacks *alloc)245{246renderer->ops.destroy(renderer, alloc);247}248249static inline void250vn_renderer_get_info(struct vn_renderer *renderer,251struct vn_renderer_info *info)252{253renderer->ops.get_info(renderer, info);254}255256static inline VkResult257vn_renderer_submit(struct vn_renderer *renderer,258const struct vn_renderer_submit *submit)259{260return renderer->ops.submit(renderer, submit);261}262263static inline VkResult264vn_renderer_submit_simple(struct vn_renderer *renderer,265const void *cs_data,266size_t cs_size)267{268const struct vn_renderer_submit submit = {269.batches =270&(const struct vn_renderer_submit_batch){271.cs_data = cs_data,272.cs_size = cs_size,273},274.batch_count = 1,275};276return vn_renderer_submit(renderer, &submit);277}278279static inline VkResult280vn_renderer_wait(struct vn_renderer *renderer,281const struct vn_renderer_wait *wait)282{283return renderer->ops.wait(renderer, wait);284}285286static inline struct vn_renderer_shmem *287vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)288{289struct vn_renderer_shmem *shmem =290renderer->shmem_ops.create(renderer, size);291if (shmem) {292assert(atomic_load(&shmem->refcount) == 1);293assert(shmem->res_id);294assert(shmem->mmap_size >= size);295assert(shmem->mmap_ptr);296}297298return shmem;299}300301static inline struct vn_renderer_shmem *302vn_renderer_shmem_ref(struct vn_renderer *renderer,303struct vn_renderer_shmem *shmem)304{305ASSERTED const int old =306atomic_fetch_add_explicit(&shmem->refcount, 1, memory_order_relaxed);307assert(old >= 1);308309return shmem;310}311312static inline void313vn_renderer_shmem_unref(struct vn_renderer *renderer,314struct vn_renderer_shmem *shmem)315{316const int old =317atomic_fetch_sub_explicit(&shmem->refcount, 1, memory_order_release);318assert(old >= 1);319320if (old == 1) {321atomic_thread_fence(memory_order_acquire);322renderer->shmem_ops.destroy(renderer, shmem);323}324}325326static inline VkResult327vn_renderer_bo_create_from_device_memory(328struct vn_renderer *renderer,329VkDeviceSize size,330vn_object_id mem_id,331VkMemoryPropertyFlags flags,332VkExternalMemoryHandleTypeFlags external_handles,333struct vn_renderer_bo **out_bo)334{335struct vn_renderer_bo *bo;336VkResult result = renderer->bo_ops.create_from_device_memory(337renderer, size, mem_id, flags, external_handles, &bo);338if (result != VK_SUCCESS)339return result;340341assert(atomic_load(&bo->refcount) == 1);342assert(bo->res_id);343assert(!bo->mmap_size || bo->mmap_size >= size);344345*out_bo = bo;346return VK_SUCCESS;347}348349static inline VkResult350vn_renderer_bo_create_from_dma_buf(struct vn_renderer *renderer,351VkDeviceSize size,352int fd,353VkMemoryPropertyFlags flags,354struct vn_renderer_bo **out_bo)355{356struct vn_renderer_bo *bo;357VkResult result =358renderer->bo_ops.create_from_dma_buf(renderer, size, fd, flags, &bo);359if (result != VK_SUCCESS)360return result;361362assert(atomic_load(&bo->refcount) >= 1);363assert(bo->res_id);364assert(!bo->mmap_size || bo->mmap_size >= size);365366*out_bo = bo;367return VK_SUCCESS;368}369370static inline struct vn_renderer_bo *371vn_renderer_bo_ref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)372{373ASSERTED const int old =374atomic_fetch_add_explicit(&bo->refcount, 1, memory_order_relaxed);375assert(old >= 1);376377return bo;378}379380static inline bool381vn_renderer_bo_unref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)382{383const int old =384atomic_fetch_sub_explicit(&bo->refcount, 1, memory_order_release);385assert(old >= 1);386387if (old == 1) {388atomic_thread_fence(memory_order_acquire);389return renderer->bo_ops.destroy(renderer, bo);390}391392return false;393}394395static inline int396vn_renderer_bo_export_dma_buf(struct vn_renderer *renderer,397struct vn_renderer_bo *bo)398{399return renderer->bo_ops.export_dma_buf(renderer, bo);400}401402static inline void *403vn_renderer_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *bo)404{405return renderer->bo_ops.map(renderer, bo);406}407408static inline void409vn_renderer_bo_flush(struct vn_renderer *renderer,410struct vn_renderer_bo *bo,411VkDeviceSize offset,412VkDeviceSize end)413{414renderer->bo_ops.flush(renderer, bo, offset, end);415}416417static inline void418vn_renderer_bo_invalidate(struct vn_renderer *renderer,419struct vn_renderer_bo *bo,420VkDeviceSize offset,421VkDeviceSize size)422{423renderer->bo_ops.invalidate(renderer, bo, offset, size);424}425426static inline VkResult427vn_renderer_sync_create(struct vn_renderer *renderer,428uint64_t initial_val,429uint32_t flags,430struct vn_renderer_sync **out_sync)431{432return renderer->sync_ops.create(renderer, initial_val, flags, out_sync);433}434435static inline VkResult436vn_renderer_sync_create_from_syncobj(struct vn_renderer *renderer,437int fd,438bool sync_file,439struct vn_renderer_sync **out_sync)440{441return renderer->sync_ops.create_from_syncobj(renderer, fd, sync_file,442out_sync);443}444445static inline void446vn_renderer_sync_destroy(struct vn_renderer *renderer,447struct vn_renderer_sync *sync)448{449renderer->sync_ops.destroy(renderer, sync);450}451452static inline int453vn_renderer_sync_export_syncobj(struct vn_renderer *renderer,454struct vn_renderer_sync *sync,455bool sync_file)456{457return renderer->sync_ops.export_syncobj(renderer, sync, sync_file);458}459460static inline VkResult461vn_renderer_sync_reset(struct vn_renderer *renderer,462struct vn_renderer_sync *sync,463uint64_t initial_val)464{465return renderer->sync_ops.reset(renderer, sync, initial_val);466}467468static inline VkResult469vn_renderer_sync_read(struct vn_renderer *renderer,470struct vn_renderer_sync *sync,471uint64_t *val)472{473return renderer->sync_ops.read(renderer, sync, val);474}475476static inline VkResult477vn_renderer_sync_write(struct vn_renderer *renderer,478struct vn_renderer_sync *sync,479uint64_t val)480{481return renderer->sync_ops.write(renderer, sync, val);482}483484#endif /* VN_RENDERER_H */485486487