Path: blob/21.2-virgl/src/gallium/drivers/iris/iris_fence.c
4565 views
/*1* Copyright © 2018 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included11* in all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS14* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER17* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING18* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER19* DEALINGS IN THE SOFTWARE.20*/2122/**23* @file iris_fence.c24*25* Fences for driver and IPC serialisation, scheduling and synchronisation.26*/2728#include "drm-uapi/sync_file.h"29#include "util/u_debug.h"30#include "util/u_inlines.h"31#include "intel/common/intel_gem.h"3233#include "iris_batch.h"34#include "iris_bufmgr.h"35#include "iris_context.h"36#include "iris_fence.h"37#include "iris_screen.h"3839static uint32_t40gem_syncobj_create(int fd, uint32_t flags)41{42struct drm_syncobj_create args = {43.flags = flags,44};4546intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);4748return args.handle;49}5051static void52gem_syncobj_destroy(int fd, uint32_t handle)53{54struct drm_syncobj_destroy args = {55.handle = handle,56};5758intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);59}6061/**62* Make a new sync-point.63*/64struct iris_syncobj *65iris_create_syncobj(struct iris_screen *screen)66{67struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));6869if (!syncobj)70return NULL;7172syncobj->handle = gem_syncobj_create(screen->fd, 0);73assert(syncobj->handle);7475pipe_reference_init(&syncobj->ref, 1);7677return syncobj;78}7980void81iris_syncobj_destroy(struct iris_screen *screen, struct iris_syncobj *syncobj)82{83gem_syncobj_destroy(screen->fd, syncobj->handle);84free(syncobj);85}8687/**88* Add a sync-point to the batch, with the given flags.89*90* \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.91*/92void93iris_batch_add_syncobj(struct iris_batch *batch,94struct iris_syncobj *syncobj,95unsigned flags)96{97struct drm_i915_gem_exec_fence *fence =98util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);99100*fence = (struct drm_i915_gem_exec_fence) {101.handle = syncobj->handle,102.flags = flags,103};104105struct iris_syncobj **store =106util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);107108*store = NULL;109iris_syncobj_reference(batch->screen, store, syncobj);110}111112/**113* Walk through a batch's dependencies (any I915_EXEC_FENCE_WAIT syncobjs)114* and unreference any which have already passed.115*116* Sometimes the compute batch is seldom used, and accumulates references117* to stale render batches that are no longer of interest, so we can free118* those up.119*/120static void121clear_stale_syncobjs(struct iris_batch *batch)122{123struct iris_screen *screen = batch->screen;124125int n = util_dynarray_num_elements(&batch->syncobjs, struct iris_syncobj *);126127assert(n == util_dynarray_num_elements(&batch->exec_fences,128struct drm_i915_gem_exec_fence));129130/* Skip the first syncobj, as it's the signalling one. */131for (int i = n - 1; i > 1; i--) {132struct iris_syncobj **syncobj =133util_dynarray_element(&batch->syncobjs, struct iris_syncobj *, i);134struct drm_i915_gem_exec_fence *fence =135util_dynarray_element(&batch->exec_fences,136struct drm_i915_gem_exec_fence, i);137assert(fence->flags & I915_EXEC_FENCE_WAIT);138139if (iris_wait_syncobj(&screen->base, *syncobj, 0))140continue;141142/* This sync object has already passed, there's no need to continue143* marking it as a dependency; we can stop holding on to the reference.144*/145iris_syncobj_reference(screen, syncobj, NULL);146147/* Remove it from the lists; move the last element here. */148struct iris_syncobj **nth_syncobj =149util_dynarray_pop_ptr(&batch->syncobjs, struct iris_syncobj *);150struct drm_i915_gem_exec_fence *nth_fence =151util_dynarray_pop_ptr(&batch->exec_fences,152struct drm_i915_gem_exec_fence);153154if (syncobj != nth_syncobj) {155*syncobj = *nth_syncobj;156memcpy(fence, nth_fence, sizeof(*fence));157}158}159}160161/* ------------------------------------------------------------------- */162163struct pipe_fence_handle {164struct pipe_reference ref;165166struct pipe_context *unflushed_ctx;167168struct iris_fine_fence *fine[IRIS_BATCH_COUNT];169};170171static void172iris_fence_destroy(struct pipe_screen *p_screen,173struct pipe_fence_handle *fence)174{175struct iris_screen *screen = (struct iris_screen *)p_screen;176177for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)178iris_fine_fence_reference(screen, &fence->fine[i], NULL);179180free(fence);181}182183static void184iris_fence_reference(struct pipe_screen *p_screen,185struct pipe_fence_handle **dst,186struct pipe_fence_handle *src)187{188if (pipe_reference(*dst ? &(*dst)->ref : NULL,189src ? &src->ref : NULL))190iris_fence_destroy(p_screen, *dst);191192*dst = src;193}194195bool196iris_wait_syncobj(struct pipe_screen *p_screen,197struct iris_syncobj *syncobj,198int64_t timeout_nsec)199{200if (!syncobj)201return false;202203struct iris_screen *screen = (struct iris_screen *)p_screen;204struct drm_syncobj_wait args = {205.handles = (uintptr_t)&syncobj->handle,206.count_handles = 1,207.timeout_nsec = timeout_nsec,208};209return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);210}211212#define CSI "\e["213#define BLUE_HEADER CSI "0;97;44m"214#define NORMAL CSI "0m"215216static void217iris_fence_flush(struct pipe_context *ctx,218struct pipe_fence_handle **out_fence,219unsigned flags)220{221struct iris_screen *screen = (void *) ctx->screen;222struct iris_context *ice = (struct iris_context *)ctx;223224/* We require DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (kernel 5.2+) for225* deferred flushes. Just ignore the request to defer on older kernels.226*/227if (!(screen->kernel_features & KERNEL_HAS_WAIT_FOR_SUBMIT))228flags &= ~PIPE_FLUSH_DEFERRED;229230const bool deferred = flags & PIPE_FLUSH_DEFERRED;231232if (flags & PIPE_FLUSH_END_OF_FRAME) {233ice->frame++;234235if (INTEL_DEBUG & DEBUG_SUBMIT) {236fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",237(INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",238ice->frame, ctx, ' ',239(INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");240}241}242243iris_flush_dirty_dmabufs(ice);244245if (!deferred) {246for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)247iris_batch_flush(&ice->batches[i]);248}249250if (flags & PIPE_FLUSH_END_OF_FRAME) {251iris_measure_frame_end(ice);252}253254if (!out_fence)255return;256257struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));258if (!fence)259return;260261pipe_reference_init(&fence->ref, 1);262263if (deferred)264fence->unflushed_ctx = ctx;265266for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {267struct iris_batch *batch = &ice->batches[b];268269if (deferred && iris_batch_bytes_used(batch) > 0) {270struct iris_fine_fence *fine =271iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);272iris_fine_fence_reference(screen, &fence->fine[b], fine);273iris_fine_fence_reference(screen, &fine, NULL);274} else {275/* This batch has no commands queued up (perhaps we just flushed,276* or all the commands are on the other batch). Wait for the last277* syncobj on this engine - unless it's already finished by now.278*/279if (iris_fine_fence_signaled(batch->last_fence))280continue;281282iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);283}284}285286iris_fence_reference(ctx->screen, out_fence, NULL);287*out_fence = fence;288}289290static void291iris_fence_await(struct pipe_context *ctx,292struct pipe_fence_handle *fence)293{294struct iris_context *ice = (struct iris_context *)ctx;295296/* Unflushed fences from the same context are no-ops. */297if (ctx && ctx == fence->unflushed_ctx)298return;299300/* XXX: We can't safely flush the other context, because it might be301* bound to another thread, and poking at its internals wouldn't302* be safe. In the future we should use MI_SEMAPHORE_WAIT and303* block until the other job has been submitted, relying on304* kernel timeslicing to preempt us until the other job is305* actually flushed and the seqno finally passes.306*/307if (fence->unflushed_ctx) {308pipe_debug_message(&ice->dbg, CONFORMANCE, "%s",309"glWaitSync on unflushed fence from another context "310"is unlikely to work without kernel 5.8+\n");311}312313for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {314struct iris_fine_fence *fine = fence->fine[i];315316if (iris_fine_fence_signaled(fine))317continue;318319for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {320struct iris_batch *batch = &ice->batches[b];321322/* We're going to make any future work in this batch wait for our323* fence to have gone by. But any currently queued work doesn't324* need to wait. Flush the batch now, so it can happen sooner.325*/326iris_batch_flush(batch);327328/* Before adding a new reference, clean out any stale ones. */329clear_stale_syncobjs(batch);330331iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);332}333}334}335336#define NSEC_PER_SEC (1000 * USEC_PER_SEC)337#define USEC_PER_SEC (1000 * MSEC_PER_SEC)338#define MSEC_PER_SEC (1000)339340static uint64_t341gettime_ns(void)342{343struct timespec current;344clock_gettime(CLOCK_MONOTONIC, ¤t);345return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;346}347348static uint64_t349rel2abs(uint64_t timeout)350{351if (timeout == 0)352return 0;353354uint64_t current_time = gettime_ns();355uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;356357timeout = MIN2(max_timeout, timeout);358359return current_time + timeout;360}361362static bool363iris_fence_finish(struct pipe_screen *p_screen,364struct pipe_context *ctx,365struct pipe_fence_handle *fence,366uint64_t timeout)367{368ctx = threaded_context_unwrap_sync(ctx);369370struct iris_context *ice = (struct iris_context *)ctx;371struct iris_screen *screen = (struct iris_screen *)p_screen;372373/* If we created the fence with PIPE_FLUSH_DEFERRED, we may not have374* flushed yet. Check if our syncobj is the current batch's signalling375* syncobj - if so, we haven't flushed and need to now.376*377* The Gallium docs mention that a flush will occur if \p ctx matches378* the context the fence was created with. It may be NULL, so we check379* that it matches first.380*/381if (ctx && ctx == fence->unflushed_ctx) {382for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) {383struct iris_fine_fence *fine = fence->fine[i];384385if (iris_fine_fence_signaled(fine))386continue;387388if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))389iris_batch_flush(&ice->batches[i]);390}391392/* The fence is no longer deferred. */393fence->unflushed_ctx = NULL;394}395396unsigned int handle_count = 0;397uint32_t handles[ARRAY_SIZE(fence->fine)];398for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {399struct iris_fine_fence *fine = fence->fine[i];400401if (iris_fine_fence_signaled(fine))402continue;403404handles[handle_count++] = fine->syncobj->handle;405}406407if (handle_count == 0)408return true;409410struct drm_syncobj_wait args = {411.handles = (uintptr_t)handles,412.count_handles = handle_count,413.timeout_nsec = rel2abs(timeout),414.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL415};416417if (fence->unflushed_ctx) {418/* This fence had a deferred flush from another context. We can't419* safely flush it here, because the context might be bound to a420* different thread, and poking at its internals wouldn't be safe.421*422* Instead, use the WAIT_FOR_SUBMIT flag to block and hope that423* another thread submits the work.424*/425args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;426}427428return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;429}430431static int432sync_merge_fd(int sync_fd, int new_fd)433{434if (sync_fd == -1)435return new_fd;436437if (new_fd == -1)438return sync_fd;439440struct sync_merge_data args = {441.name = "iris fence",442.fd2 = new_fd,443.fence = -1,444};445446intel_ioctl(sync_fd, SYNC_IOC_MERGE, &args);447close(new_fd);448close(sync_fd);449450return args.fence;451}452453static int454iris_fence_get_fd(struct pipe_screen *p_screen,455struct pipe_fence_handle *fence)456{457struct iris_screen *screen = (struct iris_screen *)p_screen;458int fd = -1;459460/* Deferred fences aren't supported. */461if (fence->unflushed_ctx)462return -1;463464for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {465struct iris_fine_fence *fine = fence->fine[i];466467if (iris_fine_fence_signaled(fine))468continue;469470struct drm_syncobj_handle args = {471.handle = fine->syncobj->handle,472.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,473.fd = -1,474};475476intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);477fd = sync_merge_fd(fd, args.fd);478}479480if (fd == -1) {481/* Our fence has no syncobj's recorded. This means that all of the482* batches had already completed, their syncobj's had been signalled,483* and so we didn't bother to record them. But we're being asked to484* export such a fence. So export a dummy already-signalled syncobj.485*/486struct drm_syncobj_handle args = {487.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,488};489490args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);491intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);492gem_syncobj_destroy(screen->fd, args.handle);493return args.fd;494}495496return fd;497}498499static void500iris_fence_create_fd(struct pipe_context *ctx,501struct pipe_fence_handle **out,502int fd,503enum pipe_fd_type type)504{505assert(type == PIPE_FD_TYPE_NATIVE_SYNC || type == PIPE_FD_TYPE_SYNCOBJ);506507struct iris_screen *screen = (struct iris_screen *)ctx->screen;508struct drm_syncobj_handle args = {509.fd = fd,510};511512if (type == PIPE_FD_TYPE_NATIVE_SYNC) {513args.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;514args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);515}516517if (intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {518fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",519strerror(errno));520if (type == PIPE_FD_TYPE_NATIVE_SYNC)521gem_syncobj_destroy(screen->fd, args.handle);522*out = NULL;523return;524}525526struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));527if (!syncobj) {528*out = NULL;529return;530}531syncobj->handle = args.handle;532pipe_reference_init(&syncobj->ref, 1);533534struct iris_fine_fence *fine = calloc(1, sizeof(*fine));535if (!fine) {536free(syncobj);537*out = NULL;538return;539}540541static const uint32_t zero = 0;542543/* Fences work in terms of iris_fine_fence, but we don't actually have a544* seqno for an imported fence. So, create a fake one which always545* returns as 'not signaled' so we fall back to using the sync object.546*/547fine->seqno = UINT32_MAX;548fine->map = &zero;549fine->syncobj = syncobj;550fine->flags = IRIS_FENCE_END;551pipe_reference_init(&fine->reference, 1);552553struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));554if (!fence) {555free(fine);556free(syncobj);557*out = NULL;558return;559}560pipe_reference_init(&fence->ref, 1);561fence->fine[0] = fine;562563*out = fence;564}565566static void567iris_fence_signal(struct pipe_context *ctx,568struct pipe_fence_handle *fence)569{570struct iris_context *ice = (struct iris_context *)ctx;571572if (ctx == fence->unflushed_ctx)573return;574575for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {576for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {577struct iris_fine_fence *fine = fence->fine[i];578579/* already signaled fence skipped */580if (iris_fine_fence_signaled(fine))581continue;582583ice->batches[b].contains_fence_signal = true;584iris_batch_add_syncobj(&ice->batches[b], fine->syncobj,585I915_EXEC_FENCE_SIGNAL);586}587}588}589590void591iris_init_screen_fence_functions(struct pipe_screen *screen)592{593screen->fence_reference = iris_fence_reference;594screen->fence_finish = iris_fence_finish;595screen->fence_get_fd = iris_fence_get_fd;596}597598void599iris_init_context_fence_functions(struct pipe_context *ctx)600{601ctx->flush = iris_fence_flush;602ctx->create_fence_fd = iris_fence_create_fd;603ctx->fence_server_sync = iris_fence_await;604ctx->fence_server_signal = iris_fence_signal;605}606607608