Path: blob/21.2-virgl/src/gallium/drivers/crocus/crocus_pipe_control.c
4570 views
/*1* Copyright © 2017 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included11* in all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS14* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER17* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING18* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER19* DEALINGS IN THE SOFTWARE.20*/2122/**23* @file crocus_pipe_control.c24*25* PIPE_CONTROL is the main flushing and synchronization primitive on Intel26* GPUs. It can invalidate caches, stall until rendering reaches various27* stages of completion, write to memory, and other things. In a way, it's28* a swiss army knife command - it has all kinds of capabilities, but some29* significant limitations as well.30*31* Unfortunately, it's notoriously complicated and difficult to use. Many32* sub-commands can't be used together. Some are meant to be used at the33* top of the pipeline (invalidating caches before drawing), while some are34* meant to be used at the end (stalling or flushing after drawing).35*36* Also, there's a list of restrictions a mile long, which vary by generation.37* Do this before doing that, or suffer the consequences (usually a GPU hang).38*39* This file contains helpers for emitting them safely. You can simply call40* crocus_emit_pipe_control_flush() with the desired operations (as logical41* PIPE_CONTROL_* bits), and it will take care of splitting it into multiple42* PIPE_CONTROL commands as necessary. The per-generation workarounds are43* applied in crocus_emit_raw_pipe_control() in crocus_state.c.44*/4546#include "crocus_context.h"47#include "util/hash_table.h"48#include "util/set.h"4950/**51* Emit a PIPE_CONTROL with various flushing flags.52*53* The caller is responsible for deciding what flags are appropriate for the54* given generation.55*/56void57crocus_emit_pipe_control_flush(struct crocus_batch *batch,58const char *reason,59uint32_t flags)60{61const struct intel_device_info *devinfo = &batch->screen->devinfo;6263if (devinfo->ver >= 6 &&64(flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&65(flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {66/* A pipe control command with flush and invalidate bits set67* simultaneously is an inherently racy operation on Gen6+ if the68* contents of the flushed caches were intended to become visible from69* any of the invalidated caches. Split it in two PIPE_CONTROLs, the70* first one should stall the pipeline to make sure that the flushed R/W71* caches are coherent with memory once the specified R/O caches are72* invalidated. On pre-Gen6 hardware the (implicit) R/O cache73* invalidation seems to happen at the bottom of the pipeline together74* with any write cache flush, so this shouldn't be a concern. In order75* to ensure a full stall, we do an end-of-pipe sync.76*/77crocus_emit_end_of_pipe_sync(batch, reason,78flags & PIPE_CONTROL_CACHE_FLUSH_BITS);79flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);80}8182batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);83}8485/**86* Emit a PIPE_CONTROL that writes to a buffer object.87*88* \p flags should contain one of the following items:89* - PIPE_CONTROL_WRITE_IMMEDIATE90* - PIPE_CONTROL_WRITE_TIMESTAMP91* - PIPE_CONTROL_WRITE_DEPTH_COUNT92*/93void94crocus_emit_pipe_control_write(struct crocus_batch *batch,95const char *reason, uint32_t flags,96struct crocus_bo *bo, uint32_t offset,97uint64_t imm)98{99batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);100}101102/**103* Restriction [DevSNB, DevIVB]:104*105* Prior to changing Depth/Stencil Buffer state (i.e. any combination of106* 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,107* 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall108* (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth109* cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by110* another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),111* unless SW can otherwise guarantee that the pipeline from WM onwards is112* already flushed (e.g., via a preceding MI_FLUSH).113*/114void115crocus_emit_depth_stall_flushes(struct crocus_batch *batch)116{117UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;118119assert(devinfo->ver >= 6);120121/* Starting on BDW, these pipe controls are unnecessary.122*123* WM HW will internally manage the draining pipe and flushing of the caches124* when this command is issued. The PIPE_CONTROL restrictions are removed.125*/126if (devinfo->ver >= 8)127return;128129crocus_emit_pipe_control_flush(batch, "depth stall", PIPE_CONTROL_DEPTH_STALL);130crocus_emit_pipe_control_flush(batch, "depth stall", PIPE_CONTROL_DEPTH_CACHE_FLUSH);131crocus_emit_pipe_control_flush(batch, "depth stall", PIPE_CONTROL_DEPTH_STALL);132}133134/*135* From Sandybridge PRM, volume 2, "1.7.2 End-of-Pipe Synchronization":136*137* Write synchronization is a special case of end-of-pipe138* synchronization that requires that the render cache and/or depth139* related caches are flushed to memory, where the data will become140* globally visible. This type of synchronization is required prior to141* SW (CPU) actually reading the result data from memory, or initiating142* an operation that will use as a read surface (such as a texture143* surface) a previous render target and/or depth/stencil buffer144*145* From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":146*147* Exercising the write cache flush bits (Render Target Cache Flush148* Enable, Depth Cache Flush Enable, DC Flush) in PIPE_CONTROL only149* ensures the write caches are flushed and doesn't guarantee the data150* is globally visible.151*152* SW can track the completion of the end-of-pipe-synchronization by153* using "Notify Enable" and "PostSync Operation - Write Immediate154* Data" in the PIPE_CONTROL command.155*/156void157crocus_emit_end_of_pipe_sync(struct crocus_batch *batch,158const char *reason, uint32_t flags)159{160const struct intel_device_info *devinfo = &batch->screen->devinfo;161162if (devinfo->ver >= 6) {163/* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":164*165* "The most common action to perform upon reaching a synchronization166* point is to write a value out to memory. An immediate value167* (included with the synchronization command) may be written."168*169* From Broadwell PRM, volume 7, "End-of-Pipe Synchronization":170*171* "In case the data flushed out by the render engine is to be read172* back in to the render engine in coherent manner, then the render173* engine has to wait for the fence completion before accessing the174* flushed data. This can be achieved by following means on various175* products: PIPE_CONTROL command with CS Stall and the required176* write caches flushed with Post-Sync-Operation as Write Immediate177* Data.178*179* Example:180* - Workload-1 (3D/GPGPU/MEDIA)181* - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write Immediate182* Data, Required Write Cache Flush bits set)183* - Workload-2 (Can use the data produce or output by Workload-1)184*/185crocus_emit_pipe_control_write(batch, reason,186flags | PIPE_CONTROL_CS_STALL |187PIPE_CONTROL_WRITE_IMMEDIATE,188batch->ice->workaround_bo,189batch->ice->workaround_offset, 0);190191if (batch->screen->devinfo.is_haswell) {192#define GEN7_3DPRIM_START_INSTANCE 0x243C193batch->screen->vtbl.load_register_mem32(batch, GEN7_3DPRIM_START_INSTANCE,194batch->ice->workaround_bo,195batch->ice->workaround_offset);196}197} else {198/* On gen4-5, a regular pipe control seems to suffice. */199crocus_emit_pipe_control_flush(batch, reason, flags);200}201}202203/* Emit a pipelined flush to either flush render and texture cache for204* reading from a FBO-drawn texture, or flush so that frontbuffer205* render appears on the screen in DRI1.206*207* This is also used for the always_flush_cache driconf debug option.208*/209void210crocus_emit_mi_flush(struct crocus_batch *batch)211{212const struct intel_device_info *devinfo = &batch->screen->devinfo;213int flags = PIPE_CONTROL_RENDER_TARGET_FLUSH;214if (devinfo->ver >= 6) {215flags |= PIPE_CONTROL_INSTRUCTION_INVALIDATE |216PIPE_CONTROL_CONST_CACHE_INVALIDATE |217PIPE_CONTROL_DATA_CACHE_FLUSH |218PIPE_CONTROL_DEPTH_CACHE_FLUSH |219PIPE_CONTROL_VF_CACHE_INVALIDATE |220PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |221PIPE_CONTROL_CS_STALL;222}223crocus_emit_pipe_control_flush(batch, "mi flush", flags);224}225226/**227* Emits a PIPE_CONTROL with a non-zero post-sync operation, for228* implementing two workarounds on gen6. From section 1.4.7.1229* "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:230*231* [DevSNB-C+{W/A}] Before any depth stall flush (including those232* produced by non-pipelined state commands), software needs to first233* send a PIPE_CONTROL with no bits set except Post-Sync Operation !=234* 0.235*236* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable237* =1, a PIPE_CONTROL with any non-zero post-sync-op is required.238*239* And the workaround for these two requires this workaround first:240*241* [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent242* BEFORE the pipe-control with a post-sync op and no write-cache243* flushes.244*245* And this last workaround is tricky because of the requirements on246* that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM247* volume 2 part 1:248*249* "1 of the following must also be set:250* - Render Target Cache Flush Enable ([12] of DW1)251* - Depth Cache Flush Enable ([0] of DW1)252* - Stall at Pixel Scoreboard ([1] of DW1)253* - Depth Stall ([13] of DW1)254* - Post-Sync Operation ([13] of DW1)255* - Notify Enable ([8] of DW1)"256*257* The cache flushes require the workaround flush that triggered this258* one, so we can't use it. Depth stall would trigger the same.259* Post-sync nonzero is what triggered this second workaround, so we260* can't use that one either. Notify enable is IRQs, which aren't261* really our business. That leaves only stall at scoreboard.262*/263void264crocus_emit_post_sync_nonzero_flush(struct crocus_batch *batch)265{266crocus_emit_pipe_control_flush(batch, "nonzero",267PIPE_CONTROL_CS_STALL |268PIPE_CONTROL_STALL_AT_SCOREBOARD);269270crocus_emit_pipe_control_write(batch, "nonzero",271PIPE_CONTROL_WRITE_IMMEDIATE,272batch->ice->workaround_bo,273batch->ice->workaround_offset, 0);274}275276/**277* Flush and invalidate all caches (for debugging purposes).278*/279void280crocus_flush_all_caches(struct crocus_batch *batch)281{282crocus_emit_pipe_control_flush(batch, "debug: flush all caches",283PIPE_CONTROL_CS_STALL |284PIPE_CONTROL_DATA_CACHE_FLUSH |285PIPE_CONTROL_DEPTH_CACHE_FLUSH |286PIPE_CONTROL_RENDER_TARGET_FLUSH |287PIPE_CONTROL_VF_CACHE_INVALIDATE |288PIPE_CONTROL_INSTRUCTION_INVALIDATE |289PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |290PIPE_CONTROL_CONST_CACHE_INVALIDATE |291PIPE_CONTROL_STATE_CACHE_INVALIDATE);292}293294static void295crocus_texture_barrier(struct pipe_context *ctx, unsigned flags)296{297struct crocus_context *ice = (void *) ctx;298struct crocus_batch *render_batch = &ice->batches[CROCUS_BATCH_RENDER];299struct crocus_batch *compute_batch = &ice->batches[CROCUS_BATCH_COMPUTE];300const struct intel_device_info *devinfo = &render_batch->screen->devinfo;301302if (devinfo->ver < 6) {303crocus_emit_mi_flush(render_batch);304return;305}306307if (render_batch->contains_draw) {308crocus_batch_maybe_flush(render_batch, 48);309crocus_emit_pipe_control_flush(render_batch,310"API: texture barrier (1/2)",311(flags == 1 ? PIPE_CONTROL_DEPTH_CACHE_FLUSH : 0) |312PIPE_CONTROL_RENDER_TARGET_FLUSH |313PIPE_CONTROL_CS_STALL);314crocus_emit_pipe_control_flush(render_batch,315"API: texture barrier (2/2)",316PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);317}318319if (compute_batch->contains_draw) {320crocus_batch_maybe_flush(compute_batch, 48);321crocus_emit_pipe_control_flush(compute_batch,322"API: texture barrier (1/2)",323PIPE_CONTROL_CS_STALL);324crocus_emit_pipe_control_flush(compute_batch,325"API: texture barrier (2/2)",326PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);327}328}329330static void331crocus_memory_barrier(struct pipe_context *ctx, unsigned flags)332{333struct crocus_context *ice = (void *) ctx;334unsigned bits = PIPE_CONTROL_DATA_CACHE_FLUSH | PIPE_CONTROL_CS_STALL;335const struct intel_device_info *devinfo = &ice->batches[0].screen->devinfo;336337assert(devinfo->ver >= 7);338339if (flags & (PIPE_BARRIER_VERTEX_BUFFER |340PIPE_BARRIER_INDEX_BUFFER |341PIPE_BARRIER_INDIRECT_BUFFER)) {342bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;343}344345if (flags & PIPE_BARRIER_CONSTANT_BUFFER) {346bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |347PIPE_CONTROL_CONST_CACHE_INVALIDATE;348}349350if (flags & (PIPE_BARRIER_TEXTURE | PIPE_BARRIER_FRAMEBUFFER)) {351bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |352PIPE_CONTROL_RENDER_TARGET_FLUSH;353}354355/* Typed surface messages are handled by the render cache on IVB, so we356* need to flush it too.357*/358if (devinfo->verx10 < 75)359bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;360361for (int i = 0; i < ice->batch_count; i++) {362if (ice->batches[i].contains_draw) {363crocus_batch_maybe_flush(&ice->batches[i], 24);364crocus_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier",365bits);366}367}368}369370void371crocus_init_flush_functions(struct pipe_context *ctx)372{373ctx->memory_barrier = crocus_memory_barrier;374ctx->texture_barrier = crocus_texture_barrier;375}376377378