Path: blob/21.2-virgl/src/gallium/drivers/zink/zink_fence.c
4570 views
/*1* Copyright 2018 Collabora Ltd.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* on the rights to use, copy, modify, merge, publish, distribute, sub7* license, and/or sell copies of the Software, and to permit persons to whom8* the Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL17* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,18* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR19* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE20* USE OR OTHER DEALINGS IN THE SOFTWARE.21*/2223#include "zink_batch.h"24#include "zink_context.h"25#include "zink_fence.h"2627#include "zink_resource.h"28#include "zink_screen.h"2930#include "util/set.h"31#include "util/u_memory.h"3233static void34destroy_fence(struct zink_screen *screen, struct zink_tc_fence *mfence)35{36struct zink_batch_state *bs = zink_batch_state(mfence->fence);37mfence->fence = NULL;38zink_batch_state_reference(screen, &bs, NULL);39tc_unflushed_batch_token_reference(&mfence->tc_token, NULL);40FREE(mfence);41}4243struct zink_tc_fence *44zink_create_tc_fence(void)45{46struct zink_tc_fence *mfence = CALLOC_STRUCT(zink_tc_fence);47if (!mfence)48return NULL;49pipe_reference_init(&mfence->reference, 1);50util_queue_fence_init(&mfence->ready);51return mfence;52}5354struct pipe_fence_handle *55zink_create_tc_fence_for_tc(struct pipe_context *pctx, struct tc_unflushed_batch_token *tc_token)56{57struct zink_tc_fence *mfence = zink_create_tc_fence();58if (!mfence)59return NULL;60util_queue_fence_reset(&mfence->ready);61tc_unflushed_batch_token_reference(&mfence->tc_token, tc_token);62return (struct pipe_fence_handle*)mfence;63}6465void66zink_fence_reference(struct zink_screen *screen,67struct zink_tc_fence **ptr,68struct zink_tc_fence *mfence)69{70if (pipe_reference(&(*ptr)->reference, &mfence->reference))71destroy_fence(screen, *ptr);7273*ptr = mfence;74}7576static void77fence_reference(struct pipe_screen *pscreen,78struct pipe_fence_handle **pptr,79struct pipe_fence_handle *pfence)80{81zink_fence_reference(zink_screen(pscreen), (struct zink_tc_fence **)pptr,82zink_tc_fence(pfence));83}8485static bool86tc_fence_finish(struct zink_context *ctx, struct zink_tc_fence *mfence, uint64_t *timeout_ns)87{88if (!util_queue_fence_is_signalled(&mfence->ready)) {89int64_t abs_timeout = os_time_get_absolute_timeout(*timeout_ns);90if (mfence->tc_token) {91/* Ensure that zink_flush will be called for92* this mfence, but only if we're in the API thread93* where the context is current.94*95* Note that the batch containing the flush may already96* be in flight in the driver thread, so the mfence97* may not be ready yet when this call returns.98*/99threaded_context_flush(&ctx->base, mfence->tc_token, *timeout_ns == 0);100}101102if (!timeout_ns)103return false;104105/* this is a tc mfence, so we're just waiting on the queue mfence to complete106* after being signaled by the real mfence107*/108if (*timeout_ns == PIPE_TIMEOUT_INFINITE) {109util_queue_fence_wait(&mfence->ready);110} else {111if (!util_queue_fence_wait_timeout(&mfence->ready, abs_timeout))112return false;113}114if (*timeout_ns && *timeout_ns != PIPE_TIMEOUT_INFINITE) {115int64_t time_ns = os_time_get_nano();116*timeout_ns = abs_timeout > time_ns ? abs_timeout - time_ns : 0;117}118}119120return true;121}122123bool124zink_vkfence_wait(struct zink_screen *screen, struct zink_fence *fence, uint64_t timeout_ns)125{126if (screen->device_lost)127return true;128if (p_atomic_read(&fence->completed))129return true;130131assert(fence->batch_id);132assert(fence->submitted);133134bool success = false;135136VkResult ret;137if (timeout_ns)138ret = vkWaitForFences(screen->dev, 1, &fence->fence, VK_TRUE, timeout_ns);139else140ret = vkGetFenceStatus(screen->dev, fence->fence);141success = zink_screen_handle_vkresult(screen, ret);142143if (success) {144p_atomic_set(&fence->completed, true);145zink_batch_state(fence)->usage.usage = 0;146zink_screen_update_last_finished(screen, fence->batch_id);147}148return success;149}150151static bool152zink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct zink_tc_fence *mfence,153uint64_t timeout_ns)154{155pctx = threaded_context_unwrap_sync(pctx);156struct zink_context *ctx = zink_context(pctx);157158if (screen->device_lost)159return true;160161if (pctx && mfence->deferred_ctx == pctx) {162if (mfence->fence == ctx->deferred_fence) {163zink_context(pctx)->batch.has_work = true;164/* this must be the current batch */165pctx->flush(pctx, NULL, !timeout_ns ? PIPE_FLUSH_ASYNC : 0);166if (!timeout_ns)167return false;168}169}170171/* need to ensure the tc mfence has been flushed before we wait */172bool tc_finish = tc_fence_finish(ctx, mfence, &timeout_ns);173/* the submit thread hasn't finished yet */174if (!tc_finish)175return false;176/* this was an invalid flush, just return completed */177if (!mfence->fence)178return true;179180struct zink_fence *fence = mfence->fence;181182unsigned submit_diff = zink_batch_state(mfence->fence)->submit_count - mfence->submit_count;183/* this batch is known to have finished because it has been submitted more than 1 time184* since the tc fence last saw it185*/186if (submit_diff > 1)187return true;188189if (fence->submitted && zink_screen_check_last_finished(screen, fence->batch_id))190return true;191192return zink_vkfence_wait(screen, fence, timeout_ns);193}194195static bool196fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,197struct pipe_fence_handle *pfence, uint64_t timeout_ns)198{199return zink_fence_finish(zink_screen(pscreen), pctx, zink_tc_fence(pfence),200timeout_ns);201}202203void204zink_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *pfence)205{206struct zink_tc_fence *mfence = zink_tc_fence(pfence);207208if (pctx && mfence->deferred_ctx == pctx)209return;210211if (mfence->deferred_ctx) {212zink_context(pctx)->batch.has_work = true;213/* this must be the current batch */214pctx->flush(pctx, NULL, 0);215}216zink_fence_finish(zink_screen(pctx->screen), pctx, mfence, PIPE_TIMEOUT_INFINITE);217}218219void220zink_screen_fence_init(struct pipe_screen *pscreen)221{222pscreen->fence_reference = fence_reference;223pscreen->fence_finish = fence_finish;224}225226227