Path: blob/21.2-virgl/src/gallium/drivers/freedreno/freedreno_fence.c
4570 views
/*1* Copyright (C) 2012 Rob Clark <[email protected]>2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,19* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE20* SOFTWARE.21*22* Authors:23* Rob Clark <[email protected]>24*/2526#include "util/os_file.h"27#include "util/u_inlines.h"2829#include "freedreno_batch.h"30#include "freedreno_context.h"31#include "freedreno_fence.h"32#include "freedreno_util.h"33/* TODO: Use the interface drm/freedreno_drmif.h instead of calling directly */34#include <xf86drm.h>3536static bool37fence_flush(struct pipe_context *pctx, struct pipe_fence_handle *fence,38uint64_t timeout)39/* NOTE: in the !fence_is_signalled() case we may be called from non-driver40* thread, but we don't call fd_batch_flush() in that case41*/42in_dt43{44if (!util_queue_fence_is_signalled(&fence->ready)) {45if (fence->tc_token) {46threaded_context_flush(pctx, fence->tc_token, timeout == 0);47}4849if (!timeout)50return false;5152if (timeout == PIPE_TIMEOUT_INFINITE) {53util_queue_fence_wait(&fence->ready);54} else {55int64_t abs_timeout = os_time_get_absolute_timeout(timeout);56if (!util_queue_fence_wait_timeout(&fence->ready, abs_timeout)) {57return false;58}59}6061util_queue_fence_wait(&fence->submit_fence.ready);6263/* We've already waited for batch to be flushed and fence->batch64* to be cleared:65*/66assert(!fence->batch);67return true;68}6970if (fence->batch)71fd_batch_flush(fence->batch);7273util_queue_fence_wait(&fence->submit_fence.ready);7475debug_assert(!fence->batch);7677return true;78}7980void81fd_fence_repopulate(struct pipe_fence_handle *fence, struct pipe_fence_handle *last_fence)82{83if (last_fence->last_fence)84fd_fence_repopulate(fence, last_fence->last_fence);8586/* The fence we are re-populating must not be an fd-fence (but last_fince87* might have been)88*/89assert(!fence->submit_fence.use_fence_fd);90assert(!last_fence->batch);9192fd_fence_ref(&fence->last_fence, last_fence);9394/* We have nothing to flush, so nothing will clear the batch reference95* (which is normally done when the batch is flushed), so do it now:96*/97fd_fence_set_batch(fence, NULL);98}99100static void101fd_fence_destroy(struct pipe_fence_handle *fence)102{103fd_fence_ref(&fence->last_fence, NULL);104105tc_unflushed_batch_token_reference(&fence->tc_token, NULL);106if (fence->submit_fence.use_fence_fd)107close(fence->submit_fence.fence_fd);108if (fence->syncobj)109drmSyncobjDestroy(fd_device_fd(fence->screen->dev), fence->syncobj);110fd_pipe_del(fence->pipe);111112/* TODO might be worth trying harder to avoid a potential stall here,113* but that would require the submit somehow holding a reference to114* the pipe_fence_handle.. and I'm not sure if it is a thing that is115* likely to matter much.116*/117util_queue_fence_wait(&fence->submit_fence.ready);118119FREE(fence);120}121122void123fd_fence_ref(struct pipe_fence_handle **ptr, struct pipe_fence_handle *pfence)124{125if (pipe_reference(&(*ptr)->reference, &pfence->reference))126fd_fence_destroy(*ptr);127128*ptr = pfence;129}130131bool132fd_fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,133struct pipe_fence_handle *fence, uint64_t timeout)134{135/* Note: for TC deferred fence, pctx->flush() may not have been called136* yet, so always do fence_flush() *first* before delegating to137* fence->last_fence138*/139if (!fence_flush(pctx, fence, timeout))140return false;141142if (fence->last_fence)143return fd_fence_finish(pscreen, pctx, fence->last_fence, timeout);144145if (fence->last_fence)146fence = fence->last_fence;147148if (fence->submit_fence.use_fence_fd) {149int ret = sync_wait(fence->submit_fence.fence_fd, timeout / 1000000);150return ret == 0;151}152153if (fd_pipe_wait_timeout(fence->pipe, &fence->submit_fence.fence, timeout))154return false;155156return true;157}158159static struct pipe_fence_handle *160fence_create(struct fd_context *ctx, struct fd_batch *batch, int fence_fd,161int syncobj)162{163struct pipe_fence_handle *fence;164165fence = CALLOC_STRUCT(pipe_fence_handle);166if (!fence)167return NULL;168169pipe_reference_init(&fence->reference, 1);170util_queue_fence_init(&fence->ready);171util_queue_fence_init(&fence->submit_fence.ready);172173fence->ctx = ctx;174fd_fence_set_batch(fence, batch);175fence->pipe = fd_pipe_ref(ctx->pipe);176fence->screen = ctx->screen;177fence->submit_fence.fence_fd = fence_fd;178fence->submit_fence.use_fence_fd = (fence_fd != -1);179fence->syncobj = syncobj;180181return fence;182}183184void185fd_create_fence_fd(struct pipe_context *pctx, struct pipe_fence_handle **pfence,186int fd, enum pipe_fd_type type)187{188struct fd_context *ctx = fd_context(pctx);189190switch (type) {191case PIPE_FD_TYPE_NATIVE_SYNC:192*pfence =193fence_create(fd_context(pctx), NULL, os_dupfd_cloexec(fd), 0);194break;195case PIPE_FD_TYPE_SYNCOBJ: {196int ret;197uint32_t syncobj;198199assert(ctx->screen->has_syncobj);200ret = drmSyncobjFDToHandle(fd_device_fd(ctx->screen->dev), fd, &syncobj);201if (!ret)202close(fd);203204*pfence = fence_create(fd_context(pctx), NULL, -1, syncobj);205break;206}207default:208unreachable("Unhandled fence type");209}210}211212void213fd_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *fence)214{215struct fd_context *ctx = fd_context(pctx);216217/* NOTE: we don't expect the combination of fence-fd + async-flush-fence,218* so timeout==0 is ok here:219*/220fence_flush(pctx, fence, 0);221222if (fence->last_fence) {223fd_fence_server_sync(pctx, fence->last_fence);224return;225}226227/* if not an external fence, then nothing more to do without preemption: */228if (!fence->submit_fence.use_fence_fd)229return;230231if (sync_accumulate("freedreno", &ctx->in_fence_fd, fence->submit_fence.fence_fd)) {232/* error */233}234}235236void237fd_fence_server_signal(struct pipe_context *pctx,238struct pipe_fence_handle *fence)239{240struct fd_context *ctx = fd_context(pctx);241242if (fence->syncobj) {243drmSyncobjSignal(fd_device_fd(ctx->screen->dev), &fence->syncobj, 1);244}245}246247int248fd_fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *fence)249{250/* We don't expect deferred flush to be combined with fence-fd: */251assert(!fence->last_fence);252253assert(fence->submit_fence.use_fence_fd);254255/* NOTE: in the deferred fence case, the pctx we want is the threaded-ctx256* but if TC is not used, this will be null. Which is fine, we won't call257* threaded_context_flush() in that case258*/259fence_flush(&fence->ctx->tc->base, fence, PIPE_TIMEOUT_INFINITE);260return os_dupfd_cloexec(fence->submit_fence.fence_fd);261}262263bool264fd_fence_is_fd(struct pipe_fence_handle *fence)265{266return fence->submit_fence.use_fence_fd;267}268269struct pipe_fence_handle *270fd_fence_create(struct fd_batch *batch)271{272return fence_create(batch->ctx, batch, -1, 0);273}274275void276fd_fence_set_batch(struct pipe_fence_handle *fence, struct fd_batch *batch)277{278if (batch) {279assert(!fence->batch);280fence->batch = batch;281fd_batch_needs_flush(batch);282} else {283fence->batch = NULL;284285/* When the batch is dis-associated with the fence, we can signal TC286* that the fence is flushed287*/288if (fence->needs_signal) {289util_queue_fence_signal(&fence->ready);290fence->needs_signal = false;291}292}293}294295struct pipe_fence_handle *296fd_fence_create_unflushed(struct pipe_context *pctx,297struct tc_unflushed_batch_token *tc_token)298{299struct pipe_fence_handle *fence =300fence_create(fd_context(pctx), NULL, -1, 0);301fence->needs_signal = true;302util_queue_fence_reset(&fence->ready);303tc_unflushed_batch_token_reference(&fence->tc_token, tc_token);304return fence;305}306307308