Path: blob/21.2-virgl/src/gallium/drivers/iris/iris_pipe_control.c
4565 views
/*1* Copyright © 2017 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included11* in all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS14* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER17* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING18* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER19* DEALINGS IN THE SOFTWARE.20*/2122/**23* @file iris_pipe_control.c24*25* PIPE_CONTROL is the main flushing and synchronization primitive on Intel26* GPUs. It can invalidate caches, stall until rendering reaches various27* stages of completion, write to memory, and other things. In a way, it's28* a swiss army knife command - it has all kinds of capabilities, but some29* significant limitations as well.30*31* Unfortunately, it's notoriously complicated and difficult to use. Many32* sub-commands can't be used together. Some are meant to be used at the33* top of the pipeline (invalidating caches before drawing), while some are34* meant to be used at the end (stalling or flushing after drawing).35*36* Also, there's a list of restrictions a mile long, which vary by generation.37* Do this before doing that, or suffer the consequences (usually a GPU hang).38*39* This file contains helpers for emitting them safely. You can simply call40* iris_emit_pipe_control_flush() with the desired operations (as logical41* PIPE_CONTROL_* bits), and it will take care of splitting it into multiple42* PIPE_CONTROL commands as necessary. The per-generation workarounds are43* applied in iris_emit_raw_pipe_control() in iris_state.c.44*/4546#include "iris_context.h"47#include "util/hash_table.h"48#include "util/set.h"4950/**51* Emit a PIPE_CONTROL with various flushing flags.52*53* The caller is responsible for deciding what flags are appropriate for the54* given generation.55*/56void57iris_emit_pipe_control_flush(struct iris_batch *batch,58const char *reason,59uint32_t flags)60{61if ((flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&62(flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {63/* A pipe control command with flush and invalidate bits set64* simultaneously is an inherently racy operation on Gfx6+ if the65* contents of the flushed caches were intended to become visible from66* any of the invalidated caches. Split it in two PIPE_CONTROLs, the67* first one should stall the pipeline to make sure that the flushed R/W68* caches are coherent with memory once the specified R/O caches are69* invalidated. On pre-Gfx6 hardware the (implicit) R/O cache70* invalidation seems to happen at the bottom of the pipeline together71* with any write cache flush, so this shouldn't be a concern. In order72* to ensure a full stall, we do an end-of-pipe sync.73*/74iris_emit_end_of_pipe_sync(batch, reason,75flags & PIPE_CONTROL_CACHE_FLUSH_BITS);76flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);77}7879batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);80}8182/**83* Emit a PIPE_CONTROL that writes to a buffer object.84*85* \p flags should contain one of the following items:86* - PIPE_CONTROL_WRITE_IMMEDIATE87* - PIPE_CONTROL_WRITE_TIMESTAMP88* - PIPE_CONTROL_WRITE_DEPTH_COUNT89*/90void91iris_emit_pipe_control_write(struct iris_batch *batch,92const char *reason, uint32_t flags,93struct iris_bo *bo, uint32_t offset,94uint64_t imm)95{96batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);97}9899/*100* From Sandybridge PRM, volume 2, "1.7.2 End-of-Pipe Synchronization":101*102* Write synchronization is a special case of end-of-pipe103* synchronization that requires that the render cache and/or depth104* related caches are flushed to memory, where the data will become105* globally visible. This type of synchronization is required prior to106* SW (CPU) actually reading the result data from memory, or initiating107* an operation that will use as a read surface (such as a texture108* surface) a previous render target and/or depth/stencil buffer109*110* From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":111*112* Exercising the write cache flush bits (Render Target Cache Flush113* Enable, Depth Cache Flush Enable, DC Flush) in PIPE_CONTROL only114* ensures the write caches are flushed and doesn't guarantee the data115* is globally visible.116*117* SW can track the completion of the end-of-pipe-synchronization by118* using "Notify Enable" and "PostSync Operation - Write Immediate119* Data" in the PIPE_CONTROL command.120*/121void122iris_emit_end_of_pipe_sync(struct iris_batch *batch,123const char *reason, uint32_t flags)124{125/* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":126*127* "The most common action to perform upon reaching a synchronization128* point is to write a value out to memory. An immediate value129* (included with the synchronization command) may be written."130*131* From Broadwell PRM, volume 7, "End-of-Pipe Synchronization":132*133* "In case the data flushed out by the render engine is to be read134* back in to the render engine in coherent manner, then the render135* engine has to wait for the fence completion before accessing the136* flushed data. This can be achieved by following means on various137* products: PIPE_CONTROL command with CS Stall and the required138* write caches flushed with Post-Sync-Operation as Write Immediate139* Data.140*141* Example:142* - Workload-1 (3D/GPGPU/MEDIA)143* - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write Immediate144* Data, Required Write Cache Flush bits set)145* - Workload-2 (Can use the data produce or output by Workload-1)146*/147iris_emit_pipe_control_write(batch, reason,148flags | PIPE_CONTROL_CS_STALL |149PIPE_CONTROL_WRITE_IMMEDIATE,150batch->screen->workaround_address.bo,151batch->screen->workaround_address.offset, 0);152}153154/**155* Emits appropriate flushes and invalidations for any previous memory156* operations on \p bo to be strictly ordered relative to any subsequent157* memory operations performed from the caching domain \p access.158*159* This is useful because the GPU has separate incoherent caches for the160* render target, sampler, etc., which need to be explicitly invalidated or161* flushed in order to obtain the expected memory ordering in cases where the162* same surface is accessed through multiple caches (e.g. due to163* render-to-texture).164*165* This provides the expected memory ordering guarantees whether or not the166* previous access was performed from the same batch or a different one, but167* only the former case needs to be handled explicitly here, since the kernel168* already inserts implicit flushes and synchronization in order to guarantee169* that any data dependencies between batches are satisfied.170*171* Even though no flushing nor invalidation is required in order to account172* for concurrent updates from other batches, we provide the guarantee that a173* required synchronization operation due to a previous batch-local update174* will never be omitted due to the influence of another thread accessing the175* same buffer concurrently from the same caching domain: Such a concurrent176* update will only ever change the seqno of the last update to a value177* greater than the local value (see iris_bo_bump_seqno()), which means that178* we will always emit at least as much flushing and invalidation as we would179* have for the local seqno (see the coherent_seqnos comparisons below).180*/181void182iris_emit_buffer_barrier_for(struct iris_batch *batch,183struct iris_bo *bo,184enum iris_domain access)185{186const uint32_t all_flush_bits = (PIPE_CONTROL_CACHE_FLUSH_BITS |187PIPE_CONTROL_STALL_AT_SCOREBOARD |188PIPE_CONTROL_FLUSH_ENABLE);189const uint32_t flush_bits[NUM_IRIS_DOMAINS] = {190[IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_RENDER_TARGET_FLUSH,191[IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_DEPTH_CACHE_FLUSH,192[IRIS_DOMAIN_OTHER_WRITE] = PIPE_CONTROL_FLUSH_ENABLE,193[IRIS_DOMAIN_OTHER_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,194};195const uint32_t invalidate_bits[NUM_IRIS_DOMAINS] = {196[IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_RENDER_TARGET_FLUSH,197[IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_DEPTH_CACHE_FLUSH,198[IRIS_DOMAIN_OTHER_WRITE] = PIPE_CONTROL_FLUSH_ENABLE,199[IRIS_DOMAIN_OTHER_READ] = (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |200PIPE_CONTROL_CONST_CACHE_INVALIDATE),201};202uint32_t bits = 0;203204/* Iterate over all read/write domains first in order to handle RaW205* and WaW dependencies, which might involve flushing the domain of206* the previous access and invalidating the specified domain.207*/208for (unsigned i = 0; i < IRIS_DOMAIN_OTHER_WRITE; i++) {209assert(!iris_domain_is_read_only(i));210if (i != access) {211const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);212213/* Invalidate unless the most recent read/write access from214* this domain is already guaranteed to be visible to the215* specified domain. Flush if the most recent access from216* this domain occurred after its most recent flush.217*/218if (seqno > batch->coherent_seqnos[access][i]) {219bits |= invalidate_bits[access];220221if (seqno > batch->coherent_seqnos[i][i])222bits |= flush_bits[i];223}224}225}226227/* All read-only domains can be considered mutually coherent since228* the order of read-only memory operations is immaterial. If the229* specified domain is read/write we need to iterate over them too,230* in order to handle any WaR dependencies.231*/232if (!iris_domain_is_read_only(access)) {233for (unsigned i = IRIS_DOMAIN_OTHER_READ; i < NUM_IRIS_DOMAINS; i++) {234assert(iris_domain_is_read_only(i));235const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);236237/* Flush if the most recent access from this domain occurred238* after its most recent flush.239*/240if (seqno > batch->coherent_seqnos[i][i])241bits |= flush_bits[i];242}243}244245/* The IRIS_DOMAIN_OTHER_WRITE kitchen-sink domain cannot be246* considered coherent with itself since it's really a collection247* of multiple incoherent read/write domains, so we special-case it248* here.249*/250const unsigned i = IRIS_DOMAIN_OTHER_WRITE;251const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);252253/* Invalidate unless the most recent read/write access from this254* domain is already guaranteed to be visible to the specified255* domain. Flush if the most recent access from this domain256* occurred after its most recent flush.257*/258if (seqno > batch->coherent_seqnos[access][i]) {259bits |= invalidate_bits[access];260261if (seqno > batch->coherent_seqnos[i][i])262bits |= flush_bits[i];263}264265if (bits) {266/* Stall-at-scoreboard is not expected to work in combination with other267* flush bits.268*/269if (bits & PIPE_CONTROL_CACHE_FLUSH_BITS)270bits &= ~PIPE_CONTROL_STALL_AT_SCOREBOARD;271272/* Emit any required flushes and invalidations. */273if (bits & all_flush_bits)274iris_emit_end_of_pipe_sync(batch, "cache tracker: flush",275bits & all_flush_bits);276277if (bits & ~all_flush_bits)278iris_emit_pipe_control_flush(batch, "cache tracker: invalidate",279bits & ~all_flush_bits);280}281}282283/**284* Flush and invalidate all caches (for debugging purposes).285*/286void287iris_flush_all_caches(struct iris_batch *batch)288{289iris_emit_pipe_control_flush(batch, "debug: flush all caches",290PIPE_CONTROL_CS_STALL |291PIPE_CONTROL_DATA_CACHE_FLUSH |292PIPE_CONTROL_DEPTH_CACHE_FLUSH |293PIPE_CONTROL_RENDER_TARGET_FLUSH |294PIPE_CONTROL_TILE_CACHE_FLUSH |295PIPE_CONTROL_VF_CACHE_INVALIDATE |296PIPE_CONTROL_INSTRUCTION_INVALIDATE |297PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |298PIPE_CONTROL_CONST_CACHE_INVALIDATE |299PIPE_CONTROL_STATE_CACHE_INVALIDATE);300}301302static void303iris_texture_barrier(struct pipe_context *ctx, unsigned flags)304{305struct iris_context *ice = (void *) ctx;306struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];307struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE];308309if (render_batch->contains_draw) {310iris_batch_maybe_flush(render_batch, 48);311iris_emit_pipe_control_flush(render_batch,312"API: texture barrier (1/2)",313PIPE_CONTROL_DEPTH_CACHE_FLUSH |314PIPE_CONTROL_RENDER_TARGET_FLUSH |315PIPE_CONTROL_CS_STALL);316iris_emit_pipe_control_flush(render_batch,317"API: texture barrier (2/2)",318PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);319}320321if (compute_batch->contains_draw) {322iris_batch_maybe_flush(compute_batch, 48);323iris_emit_pipe_control_flush(compute_batch,324"API: texture barrier (1/2)",325PIPE_CONTROL_CS_STALL);326iris_emit_pipe_control_flush(compute_batch,327"API: texture barrier (2/2)",328PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);329}330}331332static void333iris_memory_barrier(struct pipe_context *ctx, unsigned flags)334{335struct iris_context *ice = (void *) ctx;336unsigned bits = PIPE_CONTROL_DATA_CACHE_FLUSH | PIPE_CONTROL_CS_STALL;337338if (flags & (PIPE_BARRIER_VERTEX_BUFFER |339PIPE_BARRIER_INDEX_BUFFER |340PIPE_BARRIER_INDIRECT_BUFFER)) {341bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;342}343344if (flags & PIPE_BARRIER_CONSTANT_BUFFER) {345bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |346PIPE_CONTROL_CONST_CACHE_INVALIDATE;347}348349if (flags & (PIPE_BARRIER_TEXTURE | PIPE_BARRIER_FRAMEBUFFER)) {350bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |351PIPE_CONTROL_RENDER_TARGET_FLUSH;352}353354for (int i = 0; i < IRIS_BATCH_COUNT; i++) {355if (ice->batches[i].contains_draw) {356iris_batch_maybe_flush(&ice->batches[i], 24);357iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier",358bits);359}360}361}362363void364iris_init_flush_functions(struct pipe_context *ctx)365{366ctx->memory_barrier = iris_memory_barrier;367ctx->texture_barrier = iris_texture_barrier;368}369370371