Path: blob/21.2-virgl/src/gallium/drivers/iris/iris_batch.c
4565 views
/*1* Copyright © 2017 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included11* in all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS14* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER17* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING18* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER19* DEALINGS IN THE SOFTWARE.20*/2122/**23* @file iris_batch.c24*25* Batchbuffer and command submission module.26*27* Every API draw call results in a number of GPU commands, which we28* collect into a "batch buffer". Typically, many draw calls are grouped29* into a single batch to amortize command submission overhead.30*31* We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.32* One critical piece of data is the "validation list", which contains a33* list of the buffer objects (BOs) which the commands in the GPU need.34* The kernel will make sure these are resident and pinned at the correct35* virtual memory address before executing our batch. If a BO is not in36* the validation list, it effectively does not exist, so take care.37*/3839#include "iris_batch.h"40#include "iris_bufmgr.h"41#include "iris_context.h"42#include "iris_fence.h"4344#include "drm-uapi/i915_drm.h"4546#include "common/intel_aux_map.h"47#include "intel/common/intel_gem.h"48#include "util/hash_table.h"49#include "util/set.h"50#include "util/u_upload_mgr.h"51#include "main/macros.h"5253#include <errno.h>54#include <xf86drm.h>5556#if HAVE_VALGRIND57#include <valgrind.h>58#include <memcheck.h>59#define VG(x) x60#else61#define VG(x)62#endif6364#define FILE_DEBUG_FLAG DEBUG_BUFMGR6566static void67iris_batch_reset(struct iris_batch *batch);6869static unsigned70num_fences(struct iris_batch *batch)71{72return util_dynarray_num_elements(&batch->exec_fences,73struct drm_i915_gem_exec_fence);74}7576/**77* Debugging code to dump the fence list, used by INTEL_DEBUG=submit.78*/79static void80dump_fence_list(struct iris_batch *batch)81{82fprintf(stderr, "Fence list (length %u): ", num_fences(batch));8384util_dynarray_foreach(&batch->exec_fences,85struct drm_i915_gem_exec_fence, f) {86fprintf(stderr, "%s%u%s ",87(f->flags & I915_EXEC_FENCE_WAIT) ? "..." : "",88f->handle,89(f->flags & I915_EXEC_FENCE_SIGNAL) ? "!" : "");90}9192fprintf(stderr, "\n");93}9495/**96* Debugging code to dump the validation list, used by INTEL_DEBUG=submit.97*/98static void99dump_validation_list(struct iris_batch *batch)100{101fprintf(stderr, "Validation list (length %d):\n", batch->exec_count);102103for (int i = 0; i < batch->exec_count; i++) {104uint64_t flags = batch->validation_list[i].flags;105assert(batch->validation_list[i].handle ==106batch->exec_bos[i]->gem_handle);107fprintf(stderr, "[%2d]: %2d %-14s @ 0x%"PRIx64" (%"PRIu64"B)\t %2d refs %s\n",108i,109batch->validation_list[i].handle,110batch->exec_bos[i]->name,111(uint64_t)batch->validation_list[i].offset,112batch->exec_bos[i]->size,113batch->exec_bos[i]->refcount,114(flags & EXEC_OBJECT_WRITE) ? " (write)" : "");115}116}117118/**119* Return BO information to the batch decoder (for debugging).120*/121static struct intel_batch_decode_bo122decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)123{124struct iris_batch *batch = v_batch;125126assert(ppgtt);127128for (int i = 0; i < batch->exec_count; i++) {129struct iris_bo *bo = batch->exec_bos[i];130/* The decoder zeroes out the top 16 bits, so we need to as well */131uint64_t bo_address = bo->gtt_offset & (~0ull >> 16);132133if (address >= bo_address && address < bo_address + bo->size) {134return (struct intel_batch_decode_bo) {135.addr = bo_address,136.size = bo->size,137.map = iris_bo_map(batch->dbg, bo, MAP_READ),138};139}140}141142return (struct intel_batch_decode_bo) { };143}144145static unsigned146decode_get_state_size(void *v_batch,147uint64_t address,148UNUSED uint64_t base_address)149{150struct iris_batch *batch = v_batch;151unsigned size = (uintptr_t)152_mesa_hash_table_u64_search(batch->state_sizes, address);153154return size;155}156157/**158* Decode the current batch.159*/160static void161decode_batch(struct iris_batch *batch)162{163void *map = iris_bo_map(batch->dbg, batch->exec_bos[0], MAP_READ);164intel_print_batch(&batch->decoder, map, batch->primary_batch_size,165batch->exec_bos[0]->gtt_offset, false);166}167168void169iris_init_batch(struct iris_context *ice,170enum iris_batch_name name,171int priority)172{173struct iris_batch *batch = &ice->batches[name];174struct iris_screen *screen = (void *) ice->ctx.screen;175176batch->screen = screen;177batch->dbg = &ice->dbg;178batch->reset = &ice->reset;179batch->state_sizes = ice->state.sizes;180batch->name = name;181batch->ice = ice;182batch->contains_fence_signal = false;183184batch->fine_fences.uploader =185u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM,186PIPE_USAGE_STAGING, 0);187iris_fine_fence_init(batch);188189batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr);190assert(batch->hw_ctx_id);191192iris_hw_context_set_priority(screen->bufmgr, batch->hw_ctx_id, priority);193194util_dynarray_init(&batch->exec_fences, ralloc_context(NULL));195util_dynarray_init(&batch->syncobjs, ralloc_context(NULL));196197batch->exec_count = 0;198batch->exec_array_size = 100;199batch->exec_bos =200malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));201batch->validation_list =202malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));203204batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,205_mesa_key_pointer_equal);206207memset(batch->other_batches, 0, sizeof(batch->other_batches));208209for (int i = 0, j = 0; i < IRIS_BATCH_COUNT; i++) {210if (i != name)211batch->other_batches[j++] = &ice->batches[i];212}213214if (INTEL_DEBUG) {215const unsigned decode_flags =216INTEL_BATCH_DECODE_FULL |217((INTEL_DEBUG & DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |218INTEL_BATCH_DECODE_OFFSETS |219INTEL_BATCH_DECODE_FLOATS;220221intel_batch_decode_ctx_init(&batch->decoder, &screen->devinfo,222stderr, decode_flags, NULL,223decode_get_bo, decode_get_state_size, batch);224batch->decoder.dynamic_base = IRIS_MEMZONE_DYNAMIC_START;225batch->decoder.instruction_base = IRIS_MEMZONE_SHADER_START;226batch->decoder.max_vbo_decoded_lines = 32;227}228229iris_init_batch_measure(ice, batch);230231iris_batch_reset(batch);232}233234static struct drm_i915_gem_exec_object2 *235find_validation_entry(struct iris_batch *batch, struct iris_bo *bo)236{237unsigned index = READ_ONCE(bo->index);238239if (index < batch->exec_count && batch->exec_bos[index] == bo)240return &batch->validation_list[index];241242/* May have been shared between multiple active batches */243for (index = 0; index < batch->exec_count; index++) {244if (batch->exec_bos[index] == bo)245return &batch->validation_list[index];246}247248return NULL;249}250251static void252ensure_exec_obj_space(struct iris_batch *batch, uint32_t count)253{254while (batch->exec_count + count > batch->exec_array_size) {255batch->exec_array_size *= 2;256batch->exec_bos =257realloc(batch->exec_bos,258batch->exec_array_size * sizeof(batch->exec_bos[0]));259batch->validation_list =260realloc(batch->validation_list,261batch->exec_array_size * sizeof(batch->validation_list[0]));262}263}264265/**266* Add a buffer to the current batch's validation list.267*268* You must call this on any BO you wish to use in this batch, to ensure269* that it's resident when the GPU commands execute.270*/271void272iris_use_pinned_bo(struct iris_batch *batch,273struct iris_bo *bo,274bool writable, enum iris_domain access)275{276assert(bo->kflags & EXEC_OBJECT_PINNED);277278/* Never mark the workaround BO with EXEC_OBJECT_WRITE. We don't care279* about the order of any writes to that buffer, and marking it writable280* would introduce data dependencies between multiple batches which share281* the buffer.282*/283if (bo == batch->screen->workaround_bo)284writable = false;285286if (access < NUM_IRIS_DOMAINS) {287assert(batch->sync_region_depth);288iris_bo_bump_seqno(bo, batch->next_seqno, access);289}290291struct drm_i915_gem_exec_object2 *existing_entry =292find_validation_entry(batch, bo);293294if (existing_entry) {295/* The BO is already in the validation list; mark it writable */296if (writable)297existing_entry->flags |= EXEC_OBJECT_WRITE;298299return;300}301302if (bo != batch->bo &&303(!batch->measure || bo != batch->measure->bo)) {304/* This is the first time our batch has seen this BO. Before we use it,305* we may need to flush and synchronize with other batches.306*/307for (int b = 0; b < ARRAY_SIZE(batch->other_batches); b++) {308struct drm_i915_gem_exec_object2 *other_entry =309find_validation_entry(batch->other_batches[b], bo);310311/* If the buffer is referenced by another batch, and either batch312* intends to write it, then flush the other batch and synchronize.313*314* Consider these cases:315*316* 1. They read, we read => No synchronization required.317* 2. They read, we write => Synchronize (they need the old value)318* 3. They write, we read => Synchronize (we need their new value)319* 4. They write, we write => Synchronize (order writes)320*321* The read/read case is very common, as multiple batches usually322* share a streaming state buffer or shader assembly buffer, and323* we want to avoid synchronizing in this case.324*/325if (other_entry &&326((other_entry->flags & EXEC_OBJECT_WRITE) || writable)) {327iris_batch_flush(batch->other_batches[b]);328iris_batch_add_syncobj(batch,329batch->other_batches[b]->last_fence->syncobj,330I915_EXEC_FENCE_WAIT);331}332}333}334335/* Now, take a reference and add it to the validation list. */336iris_bo_reference(bo);337338ensure_exec_obj_space(batch, 1);339340batch->validation_list[batch->exec_count] =341(struct drm_i915_gem_exec_object2) {342.handle = bo->gem_handle,343.offset = bo->gtt_offset,344.flags = bo->kflags | (writable ? EXEC_OBJECT_WRITE : 0),345};346347bo->index = batch->exec_count;348batch->exec_bos[batch->exec_count] = bo;349batch->aperture_space += bo->size;350351batch->exec_count++;352}353354static void355create_batch(struct iris_batch *batch)356{357struct iris_screen *screen = batch->screen;358struct iris_bufmgr *bufmgr = screen->bufmgr;359360batch->bo = iris_bo_alloc(bufmgr, "command buffer",361BATCH_SZ + BATCH_RESERVED, 1,362IRIS_MEMZONE_OTHER, 0);363batch->bo->kflags |= EXEC_OBJECT_CAPTURE;364batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);365batch->map_next = batch->map;366367iris_use_pinned_bo(batch, batch->bo, false, IRIS_DOMAIN_NONE);368}369370static void371iris_batch_maybe_noop(struct iris_batch *batch)372{373/* We only insert the NOOP at the beginning of the batch. */374assert(iris_batch_bytes_used(batch) == 0);375376if (batch->noop_enabled) {377/* Emit MI_BATCH_BUFFER_END to prevent any further command to be378* executed.379*/380uint32_t *map = batch->map_next;381382map[0] = (0xA << 23);383384batch->map_next += 4;385}386}387388static void389iris_batch_reset(struct iris_batch *batch)390{391struct iris_screen *screen = batch->screen;392393iris_bo_unreference(batch->bo);394batch->primary_batch_size = 0;395batch->total_chained_batch_size = 0;396batch->contains_draw = false;397batch->contains_fence_signal = false;398batch->decoder.surface_base = batch->last_surface_base_address;399400create_batch(batch);401assert(batch->bo->index == 0);402403struct iris_syncobj *syncobj = iris_create_syncobj(screen);404iris_batch_add_syncobj(batch, syncobj, I915_EXEC_FENCE_SIGNAL);405iris_syncobj_reference(screen, &syncobj, NULL);406407assert(!batch->sync_region_depth);408iris_batch_sync_boundary(batch);409iris_batch_mark_reset_sync(batch);410411/* Always add the workaround BO, it contains a driver identifier at the412* beginning quite helpful to debug error states.413*/414iris_use_pinned_bo(batch, screen->workaround_bo, false, IRIS_DOMAIN_NONE);415416iris_batch_maybe_noop(batch);417}418419void420iris_batch_free(struct iris_batch *batch)421{422struct iris_screen *screen = batch->screen;423struct iris_bufmgr *bufmgr = screen->bufmgr;424425for (int i = 0; i < batch->exec_count; i++) {426iris_bo_unreference(batch->exec_bos[i]);427}428free(batch->exec_bos);429free(batch->validation_list);430431ralloc_free(batch->exec_fences.mem_ctx);432433pipe_resource_reference(&batch->fine_fences.ref.res, NULL);434435util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)436iris_syncobj_reference(screen, s, NULL);437ralloc_free(batch->syncobjs.mem_ctx);438439iris_fine_fence_reference(batch->screen, &batch->last_fence, NULL);440u_upload_destroy(batch->fine_fences.uploader);441442iris_bo_unreference(batch->bo);443batch->bo = NULL;444batch->map = NULL;445batch->map_next = NULL;446447iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);448449iris_destroy_batch_measure(batch->measure);450batch->measure = NULL;451452_mesa_hash_table_destroy(batch->cache.render, NULL);453454if (INTEL_DEBUG)455intel_batch_decode_ctx_finish(&batch->decoder);456}457458/**459* If we've chained to a secondary batch, or are getting near to the end,460* then flush. This should only be called between draws.461*/462void463iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate)464{465if (batch->bo != batch->exec_bos[0] ||466iris_batch_bytes_used(batch) + estimate >= BATCH_SZ) {467iris_batch_flush(batch);468}469}470471static void472record_batch_sizes(struct iris_batch *batch)473{474unsigned batch_size = iris_batch_bytes_used(batch);475476VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->map, batch_size));477478if (batch->bo == batch->exec_bos[0])479batch->primary_batch_size = batch_size;480481batch->total_chained_batch_size += batch_size;482}483484void485iris_chain_to_new_batch(struct iris_batch *batch)486{487uint32_t *cmd = batch->map_next;488uint64_t *addr = batch->map_next + 4;489batch->map_next += 12;490491record_batch_sizes(batch);492493/* No longer held by batch->bo, still held by validation list */494iris_bo_unreference(batch->bo);495create_batch(batch);496497/* Emit MI_BATCH_BUFFER_START to chain to another batch. */498*cmd = (0x31 << 23) | (1 << 8) | (3 - 2);499*addr = batch->bo->gtt_offset;500}501502static void503add_aux_map_bos_to_batch(struct iris_batch *batch)504{505void *aux_map_ctx = iris_bufmgr_get_aux_map_context(batch->screen->bufmgr);506if (!aux_map_ctx)507return;508509uint32_t count = intel_aux_map_get_num_buffers(aux_map_ctx);510ensure_exec_obj_space(batch, count);511intel_aux_map_fill_bos(aux_map_ctx,512(void**)&batch->exec_bos[batch->exec_count], count);513for (uint32_t i = 0; i < count; i++) {514struct iris_bo *bo = batch->exec_bos[batch->exec_count];515iris_bo_reference(bo);516batch->validation_list[batch->exec_count] =517(struct drm_i915_gem_exec_object2) {518.handle = bo->gem_handle,519.offset = bo->gtt_offset,520.flags = bo->kflags,521};522batch->aperture_space += bo->size;523batch->exec_count++;524}525}526527static void528finish_seqno(struct iris_batch *batch)529{530struct iris_fine_fence *sq = iris_fine_fence_new(batch, IRIS_FENCE_END);531if (!sq)532return;533534iris_fine_fence_reference(batch->screen, &batch->last_fence, sq);535iris_fine_fence_reference(batch->screen, &sq, NULL);536}537538/**539* Terminate a batch with MI_BATCH_BUFFER_END.540*/541static void542iris_finish_batch(struct iris_batch *batch)543{544const struct intel_device_info *devinfo = &batch->screen->devinfo;545546if (devinfo->ver == 12 && batch->name == IRIS_BATCH_RENDER) {547/* We re-emit constants at the beginning of every batch as a hardware548* bug workaround, so invalidate indirect state pointers in order to549* save ourselves the overhead of restoring constants redundantly when550* the next render batch is executed.551*/552iris_emit_pipe_control_flush(batch, "ISP invalidate at batch end",553PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE |554PIPE_CONTROL_STALL_AT_SCOREBOARD |555PIPE_CONTROL_CS_STALL);556}557558add_aux_map_bos_to_batch(batch);559560finish_seqno(batch);561562/* Emit MI_BATCH_BUFFER_END to finish our batch. */563uint32_t *map = batch->map_next;564565map[0] = (0xA << 23);566567batch->map_next += 4;568569record_batch_sizes(batch);570}571572/**573* Replace our current GEM context with a new one (in case it got banned).574*/575static bool576replace_hw_ctx(struct iris_batch *batch)577{578struct iris_screen *screen = batch->screen;579struct iris_bufmgr *bufmgr = screen->bufmgr;580581uint32_t new_ctx = iris_clone_hw_context(bufmgr, batch->hw_ctx_id);582if (!new_ctx)583return false;584585iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);586batch->hw_ctx_id = new_ctx;587588/* Notify the context that state must be re-initialized. */589iris_lost_context_state(batch);590591return true;592}593594enum pipe_reset_status595iris_batch_check_for_reset(struct iris_batch *batch)596{597struct iris_screen *screen = batch->screen;598enum pipe_reset_status status = PIPE_NO_RESET;599struct drm_i915_reset_stats stats = { .ctx_id = batch->hw_ctx_id };600601if (intel_ioctl(screen->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats))602DBG("DRM_IOCTL_I915_GET_RESET_STATS failed: %s\n", strerror(errno));603604if (stats.batch_active != 0) {605/* A reset was observed while a batch from this hardware context was606* executing. Assume that this context was at fault.607*/608status = PIPE_GUILTY_CONTEXT_RESET;609} else if (stats.batch_pending != 0) {610/* A reset was observed while a batch from this context was in progress,611* but the batch was not executing. In this case, assume that the612* context was not at fault.613*/614status = PIPE_INNOCENT_CONTEXT_RESET;615}616617if (status != PIPE_NO_RESET) {618/* Our context is likely banned, or at least in an unknown state.619* Throw it away and start with a fresh context. Ideally this may620* catch the problem before our next execbuf fails with -EIO.621*/622replace_hw_ctx(batch);623}624625return status;626}627628/**629* Submit the batch to the GPU via execbuffer2.630*/631static int632submit_batch(struct iris_batch *batch)633{634iris_bo_unmap(batch->bo);635636/* The requirement for using I915_EXEC_NO_RELOC are:637*638* The addresses written in the objects must match the corresponding639* reloc.gtt_offset which in turn must match the corresponding640* execobject.offset.641*642* Any render targets written to in the batch must be flagged with643* EXEC_OBJECT_WRITE.644*645* To avoid stalling, execobject.offset should match the current646* address of that object within the active context.647*/648struct drm_i915_gem_execbuffer2 execbuf = {649.buffers_ptr = (uintptr_t) batch->validation_list,650.buffer_count = batch->exec_count,651.batch_start_offset = 0,652/* This must be QWord aligned. */653.batch_len = ALIGN(batch->primary_batch_size, 8),654.flags = I915_EXEC_RENDER |655I915_EXEC_NO_RELOC |656I915_EXEC_BATCH_FIRST |657I915_EXEC_HANDLE_LUT,658.rsvd1 = batch->hw_ctx_id, /* rsvd1 is actually the context ID */659};660661if (num_fences(batch)) {662execbuf.flags |= I915_EXEC_FENCE_ARRAY;663execbuf.num_cliprects = num_fences(batch);664execbuf.cliprects_ptr =665(uintptr_t)util_dynarray_begin(&batch->exec_fences);666}667668int ret = 0;669if (!batch->screen->no_hw &&670intel_ioctl(batch->screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf))671ret = -errno;672673for (int i = 0; i < batch->exec_count; i++) {674struct iris_bo *bo = batch->exec_bos[i];675676bo->idle = false;677bo->index = -1;678679iris_bo_unreference(bo);680}681682return ret;683}684685static const char *686batch_name_to_string(enum iris_batch_name name)687{688const char *names[IRIS_BATCH_COUNT] = {689[IRIS_BATCH_RENDER] = "render",690[IRIS_BATCH_COMPUTE] = "compute",691};692return names[name];693}694695/**696* Flush the batch buffer, submitting it to the GPU and resetting it so697* we're ready to emit the next batch.698*/699void700_iris_batch_flush(struct iris_batch *batch, const char *file, int line)701{702struct iris_screen *screen = batch->screen;703704/* If a fence signals we need to flush it. */705if (iris_batch_bytes_used(batch) == 0 && !batch->contains_fence_signal)706return;707708iris_measure_batch_end(batch->ice, batch);709710iris_finish_batch(batch);711712if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL)) {713const char *basefile = strstr(file, "iris/");714if (basefile)715file = basefile + 5;716717fprintf(stderr, "%19s:%-3d: %s batch [%u] flush with %5db (%0.1f%%) "718"(cmds), %4d BOs (%0.1fMb aperture)\n",719file, line, batch_name_to_string(batch->name), batch->hw_ctx_id,720batch->total_chained_batch_size,721100.0f * batch->total_chained_batch_size / BATCH_SZ,722batch->exec_count,723(float) batch->aperture_space / (1024 * 1024));724725if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT)) {726dump_fence_list(batch);727dump_validation_list(batch);728}729730if (INTEL_DEBUG & DEBUG_BATCH) {731decode_batch(batch);732}733}734735int ret = submit_batch(batch);736737batch->exec_count = 0;738batch->aperture_space = 0;739740util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)741iris_syncobj_reference(screen, s, NULL);742util_dynarray_clear(&batch->syncobjs);743744util_dynarray_clear(&batch->exec_fences);745746if (INTEL_DEBUG & DEBUG_SYNC) {747dbg_printf("waiting for idle\n");748iris_bo_wait_rendering(batch->bo); /* if execbuf failed; this is a nop */749}750751/* Start a new batch buffer. */752iris_batch_reset(batch);753754/* EIO means our context is banned. In this case, try and replace it755* with a new logical context, and inform iris_context that all state756* has been lost and needs to be re-initialized. If this succeeds,757* dubiously claim success...758* Also handle ENOMEM here.759*/760if ((ret == -EIO || ret == -ENOMEM) && replace_hw_ctx(batch)) {761if (batch->reset->reset) {762/* Tell gallium frontends the device is lost and it was our fault. */763batch->reset->reset(batch->reset->data, PIPE_GUILTY_CONTEXT_RESET);764}765766ret = 0;767}768769if (ret < 0) {770#ifdef DEBUG771const bool color = INTEL_DEBUG & DEBUG_COLOR;772fprintf(stderr, "%siris: Failed to submit batchbuffer: %-80s%s\n",773color ? "\e[1;41m" : "", strerror(-ret), color ? "\e[0m" : "");774#endif775abort();776}777}778779/**780* Does the current batch refer to the given BO?781*782* (In other words, is the BO in the current batch's validation list?)783*/784bool785iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)786{787return find_validation_entry(batch, bo) != NULL;788}789790/**791* Updates the state of the noop feature. Returns true if there was a noop792* transition that led to state invalidation.793*/794bool795iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable)796{797if (batch->noop_enabled == noop_enable)798return 0;799800batch->noop_enabled = noop_enable;801802iris_batch_flush(batch);803804/* If the batch was empty, flush had no effect, so insert our noop. */805if (iris_batch_bytes_used(batch) == 0)806iris_batch_maybe_noop(batch);807808/* We only need to update the entire state if we transition from noop ->809* not-noop.810*/811return !batch->noop_enabled;812}813814815