Path: blob/21.2-virgl/src/freedreno/drm/freedreno_bo.c
4564 views
/*1* Copyright (C) 2012-2018 Rob Clark <[email protected]>2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,19* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE20* SOFTWARE.21*22* Authors:23* Rob Clark <[email protected]>24*/2526#include "os/os_mman.h"2728#include "freedreno_drmif.h"29#include "freedreno_priv.h"3031simple_mtx_t table_lock = _SIMPLE_MTX_INITIALIZER_NP;32void bo_del(struct fd_bo *bo);3334/* set buffer name, and add to table, call w/ table_lock held: */35static void36set_name(struct fd_bo *bo, uint32_t name)37{38bo->name = name;39/* add ourself into the handle table: */40_mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);41}4243/* lookup a buffer, call w/ table_lock held: */44static struct fd_bo *45lookup_bo(struct hash_table *tbl, uint32_t key)46{47struct fd_bo *bo = NULL;48struct hash_entry *entry = _mesa_hash_table_search(tbl, &key);49if (entry) {50/* found, incr refcnt and return: */51bo = fd_bo_ref(entry->data);5253/* don't break the bucket if this bo was found in one */54list_delinit(&bo->list);55}56return bo;57}5859/* allocate a new buffer object, call w/ table_lock held */60static struct fd_bo *61bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)62{63struct fd_bo *bo;6465simple_mtx_assert_locked(&table_lock);6667bo = dev->funcs->bo_from_handle(dev, size, handle);68if (!bo) {69struct drm_gem_close req = {70.handle = handle,71};72drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);73return NULL;74}75bo->dev = dev;76bo->size = size;77bo->handle = handle;78bo->iova = bo->funcs->iova(bo);79bo->flags = FD_RELOC_FLAGS_INIT;8081p_atomic_set(&bo->refcnt, 1);82list_inithead(&bo->list);83/* add ourself into the handle table: */84_mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);85return bo;86}8788static struct fd_bo *89bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,90struct fd_bo_cache *cache)91{92struct fd_bo *bo = NULL;93uint32_t handle;94int ret;9596bo = fd_bo_cache_alloc(cache, &size, flags);97if (bo)98return bo;99100ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);101if (ret)102return NULL;103104simple_mtx_lock(&table_lock);105bo = bo_from_handle(dev, size, handle);106simple_mtx_unlock(&table_lock);107108bo->max_fences = 1;109bo->fences = &bo->_inline_fence;110111VG_BO_ALLOC(bo);112113return bo;114}115116struct fd_bo *117_fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)118{119struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);120if (bo)121bo->bo_reuse = BO_CACHE;122return bo;123}124125void126_fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)127{128bo->funcs->set_name(bo, fmt, ap);129}130131/* internal function to allocate bo's that use the ringbuffer cache132* instead of the normal bo_cache. The purpose is, because cmdstream133* bo's get vmap'd on the kernel side, and that is expensive, we want134* to re-use cmdstream bo's for cmdstream and not unrelated purposes.135*/136struct fd_bo *137fd_bo_new_ring(struct fd_device *dev, uint32_t size)138{139uint32_t flags = FD_BO_GPUREADONLY;140struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);141if (bo) {142bo->bo_reuse = RING_CACHE;143bo->flags |= FD_RELOC_DUMP;144fd_bo_set_name(bo, "cmdstream");145}146return bo;147}148149struct fd_bo *150fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)151{152struct fd_bo *bo = NULL;153154simple_mtx_lock(&table_lock);155156bo = lookup_bo(dev->handle_table, handle);157if (bo)158goto out_unlock;159160bo = bo_from_handle(dev, size, handle);161162VG_BO_ALLOC(bo);163164out_unlock:165simple_mtx_unlock(&table_lock);166167return bo;168}169170struct fd_bo *171fd_bo_from_dmabuf(struct fd_device *dev, int fd)172{173int ret, size;174uint32_t handle;175struct fd_bo *bo;176177simple_mtx_lock(&table_lock);178ret = drmPrimeFDToHandle(dev->fd, fd, &handle);179if (ret) {180simple_mtx_unlock(&table_lock);181return NULL;182}183184bo = lookup_bo(dev->handle_table, handle);185if (bo)186goto out_unlock;187188/* lseek() to get bo size */189size = lseek(fd, 0, SEEK_END);190lseek(fd, 0, SEEK_CUR);191192bo = bo_from_handle(dev, size, handle);193194VG_BO_ALLOC(bo);195196out_unlock:197simple_mtx_unlock(&table_lock);198199return bo;200}201202struct fd_bo *203fd_bo_from_name(struct fd_device *dev, uint32_t name)204{205struct drm_gem_open req = {206.name = name,207};208struct fd_bo *bo;209210simple_mtx_lock(&table_lock);211212/* check name table first, to see if bo is already open: */213bo = lookup_bo(dev->name_table, name);214if (bo)215goto out_unlock;216217if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {218ERROR_MSG("gem-open failed: %s", strerror(errno));219goto out_unlock;220}221222bo = lookup_bo(dev->handle_table, req.handle);223if (bo)224goto out_unlock;225226bo = bo_from_handle(dev, req.size, req.handle);227if (bo) {228set_name(bo, name);229VG_BO_ALLOC(bo);230}231232out_unlock:233simple_mtx_unlock(&table_lock);234235return bo;236}237238void239fd_bo_mark_for_dump(struct fd_bo *bo)240{241bo->flags |= FD_RELOC_DUMP;242}243244uint64_t245fd_bo_get_iova(struct fd_bo *bo)246{247/* ancient kernels did not support this */248assert(bo->iova != 0);249return bo->iova;250}251252struct fd_bo *253fd_bo_ref(struct fd_bo *bo)254{255p_atomic_inc(&bo->refcnt);256return bo;257}258259static void260bo_del_or_recycle(struct fd_bo *bo)261{262struct fd_device *dev = bo->dev;263264simple_mtx_assert_locked(&table_lock);265266if ((bo->bo_reuse == BO_CACHE) &&267(fd_bo_cache_free(&dev->bo_cache, bo) == 0))268return;269270if ((bo->bo_reuse == RING_CACHE) &&271(fd_bo_cache_free(&dev->ring_cache, bo) == 0))272return;273274bo_del(bo);275}276277void278fd_bo_del_locked(struct fd_bo *bo)279{280simple_mtx_assert_locked(&table_lock);281282if (!p_atomic_dec_zero(&bo->refcnt))283return;284285bo_del_or_recycle(bo);286}287288void289fd_bo_del(struct fd_bo *bo)290{291if (!p_atomic_dec_zero(&bo->refcnt))292return;293294simple_mtx_lock(&table_lock);295bo_del_or_recycle(bo);296simple_mtx_unlock(&table_lock);297}298299/**300* Cleanup fences, dropping pipe references. If 'expired' is true, only301* cleanup expired fences.302*303* Normally we expect at most a single fence, the exception being bo's304* shared between contexts305*/306static void307cleanup_fences(struct fd_bo *bo, bool expired)308{309simple_mtx_assert_locked(&table_lock);310311for (int i = 0; i < bo->nr_fences; i++) {312struct fd_bo_fence *f = &bo->fences[i];313314if (expired && fd_fence_before(f->pipe->control->fence, f->fence))315continue;316317fd_pipe_del_locked(f->pipe);318bo->nr_fences--;319320if (bo->nr_fences > 0) {321/* Shuffle up the last entry to replace the current slot: */322bo->fences[i] = bo->fences[bo->nr_fences];323i--;324}325}326}327328/* Called under table_lock */329void330bo_del(struct fd_bo *bo)331{332VG_BO_FREE(bo);333334simple_mtx_assert_locked(&table_lock);335336cleanup_fences(bo, false);337if (bo->fences != &bo->_inline_fence)338free(bo->fences);339340if (bo->map)341os_munmap(bo->map, bo->size);342343/* TODO probably bo's in bucket list get removed from344* handle table??345*/346347if (bo->handle) {348struct drm_gem_close req = {349.handle = bo->handle,350};351_mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle);352if (bo->name)353_mesa_hash_table_remove_key(bo->dev->name_table, &bo->name);354drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);355}356357bo->funcs->destroy(bo);358}359360static void361bo_flush(struct fd_bo *bo)362{363for (int i = 0; i < bo->nr_fences; i++) {364struct fd_bo_fence *f = &bo->fences[i];365fd_pipe_flush(f->pipe, f->fence);366}367}368369int370fd_bo_get_name(struct fd_bo *bo, uint32_t *name)371{372if (!bo->name) {373struct drm_gem_flink req = {374.handle = bo->handle,375};376int ret;377378ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);379if (ret) {380return ret;381}382383simple_mtx_lock(&table_lock);384set_name(bo, req.name);385simple_mtx_unlock(&table_lock);386bo->bo_reuse = NO_CACHE;387bo->shared = true;388bo_flush(bo);389}390391*name = bo->name;392393return 0;394}395396uint32_t397fd_bo_handle(struct fd_bo *bo)398{399bo->bo_reuse = NO_CACHE;400bo->shared = true;401bo_flush(bo);402return bo->handle;403}404405int406fd_bo_dmabuf(struct fd_bo *bo)407{408int ret, prime_fd;409410ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &prime_fd);411if (ret) {412ERROR_MSG("failed to get dmabuf fd: %d", ret);413return ret;414}415416bo->bo_reuse = NO_CACHE;417bo->shared = true;418bo_flush(bo);419420return prime_fd;421}422423uint32_t424fd_bo_size(struct fd_bo *bo)425{426return bo->size;427}428429void *430fd_bo_map(struct fd_bo *bo)431{432if (!bo->map) {433uint64_t offset;434int ret;435436ret = bo->funcs->offset(bo, &offset);437if (ret) {438return NULL;439}440441bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,442bo->dev->fd, offset);443if (bo->map == MAP_FAILED) {444ERROR_MSG("mmap failed: %s", strerror(errno));445bo->map = NULL;446}447}448return bo->map;449}450451/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */452int453fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)454{455if (op & (FD_BO_PREP_NOSYNC | FD_BO_PREP_FLUSH)) {456simple_mtx_lock(&table_lock);457enum fd_bo_state state = fd_bo_state(bo);458simple_mtx_unlock(&table_lock);459460if (state == FD_BO_STATE_IDLE)461return 0;462463if (op & FD_BO_PREP_FLUSH)464bo_flush(bo);465466/* If we have *only* been asked to flush, then we aren't really467* interested about whether shared buffers are busy, so avoid468* the kernel ioctl.469*/470if ((state == FD_BO_STATE_BUSY) ||471(op == FD_BO_PREP_FLUSH))472return -EBUSY;473}474475/* In case the bo is referenced by a deferred submit, flush up to the476* required fence now:477*/478bo_flush(bo);479480/* FD_BO_PREP_FLUSH is purely a frontend flag, and is not seen/handled481* by backend or kernel:482*/483return bo->funcs->cpu_prep(bo, pipe, op & ~FD_BO_PREP_FLUSH);484}485486void487fd_bo_cpu_fini(struct fd_bo *bo)488{489// TODO until we have cached buffers, the kernel side ioctl does nothing,490// so just skip it. When we have cached buffers, we can make the491// ioctl conditional492// bo->funcs->cpu_fini(bo);493}494495void496fd_bo_add_fence(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t fence)497{498simple_mtx_assert_locked(&table_lock);499500if (bo->nosync)501return;502503/* The common case is bo re-used on the same pipe it had previously504* been used on:505*/506for (int i = 0; i < bo->nr_fences; i++) {507struct fd_bo_fence *f = &bo->fences[i];508if (f->pipe == pipe) {509assert(fd_fence_before(f->fence, fence));510f->fence = fence;511return;512}513}514515cleanup_fences(bo, true);516517/* The first time we grow past a single fence, we need some special518* handling, as we've been using the embedded _inline_fence to avoid519* a separate allocation:520*/521if (unlikely((bo->nr_fences == 1) &&522(bo->fences == &bo->_inline_fence))) {523bo->nr_fences = bo->max_fences = 0;524bo->fences = NULL;525APPEND(bo, fences, bo->_inline_fence);526}527528APPEND(bo, fences, (struct fd_bo_fence){529.pipe = fd_pipe_ref_locked(pipe),530.fence = fence,531});532}533534enum fd_bo_state535fd_bo_state(struct fd_bo *bo)536{537simple_mtx_assert_locked(&table_lock);538539cleanup_fences(bo, true);540541if (bo->shared || bo->nosync)542return FD_BO_STATE_UNKNOWN;543544if (!bo->nr_fences)545return FD_BO_STATE_IDLE;546547return FD_BO_STATE_BUSY;548}549550551552