Path: blob/21.2-virgl/src/gallium/drivers/radeonsi/si_compute_blit.c
4570 views
/*1* Copyright 2018 Advanced Micro Devices, Inc.2* All Rights Reserved.3*4* Permission is hereby granted, free of charge, to any person obtaining a5* copy of this software and associated documentation files (the "Software"),6* to deal in the Software without restriction, including without limitation7* on the rights to use, copy, modify, merge, publish, distribute, sub8* license, and/or sell copies of the Software, and to permit persons to whom9* the Software is furnished to do so, subject to the following conditions:10*11* The above copyright notice and this permission notice (including the next12* paragraph) shall be included in all copies or substantial portions of the13* Software.14*15* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR16* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,17* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL18* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,19* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR20* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE21* USE OR OTHER DEALINGS IN THE SOFTWARE.22*23*/2425#include "si_pipe.h"26#include "util/format/u_format.h"27#include "util/format_srgb.h"28#include "util/u_helpers.h"2930/* Determine the cache policy. */31static enum si_cache_policy get_cache_policy(struct si_context *sctx, enum si_coherency coher,32uint64_t size)33{34if ((sctx->chip_class >= GFX9 && (coher == SI_COHERENCY_CB_META ||35coher == SI_COHERENCY_DB_META ||36coher == SI_COHERENCY_CP)) ||37(sctx->chip_class >= GFX7 && coher == SI_COHERENCY_SHADER))38return L2_LRU; /* it's faster if L2 doesn't evict anything */3940return L2_BYPASS;41}4243unsigned si_get_flush_flags(struct si_context *sctx, enum si_coherency coher,44enum si_cache_policy cache_policy)45{46switch (coher) {47default:48case SI_COHERENCY_NONE:49case SI_COHERENCY_CP:50return 0;51case SI_COHERENCY_SHADER:52return SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |53(cache_policy == L2_BYPASS ? SI_CONTEXT_INV_L2 : 0);54case SI_COHERENCY_CB_META:55return SI_CONTEXT_FLUSH_AND_INV_CB;56case SI_COHERENCY_DB_META:57return SI_CONTEXT_FLUSH_AND_INV_DB;58}59}6061void si_launch_grid_internal(struct si_context *sctx, struct pipe_grid_info *info,62void *shader, unsigned flags)63{6465/* Wait for previous shaders to finish. */66if (flags & SI_OP_SYNC_PS_BEFORE)67sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;6869if (flags & SI_OP_SYNC_CS_BEFORE)70sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;7172if (!(flags & SI_OP_CS_IMAGE))73sctx->flags |= SI_CONTEXT_PFP_SYNC_ME;7475/* Invalidate L0-L1 caches. */76/* sL0 is never invalidated, because src resources don't use it. */77if (!(flags & SI_OP_SKIP_CACHE_INV_BEFORE))78sctx->flags |= SI_CONTEXT_INV_VCACHE;7980/* Set settings for driver-internal compute dispatches. */81sctx->flags &= ~SI_CONTEXT_START_PIPELINE_STATS;82sctx->flags |= SI_CONTEXT_STOP_PIPELINE_STATS;8384if (!(flags & SI_OP_CS_RENDER_COND_ENABLE))85sctx->render_cond_enabled = false;8687/* Skip decompression to prevent infinite recursion. */88sctx->blitter_running = true;8990/* Dispatch compute. */91void *saved_cs = sctx->cs_shader_state.program;92sctx->b.bind_compute_state(&sctx->b, shader);93sctx->b.launch_grid(&sctx->b, info);94sctx->b.bind_compute_state(&sctx->b, saved_cs);9596/* Restore default settings. */97sctx->flags &= ~SI_CONTEXT_STOP_PIPELINE_STATS;98sctx->flags |= SI_CONTEXT_START_PIPELINE_STATS;99sctx->render_cond_enabled = sctx->render_cond;100sctx->blitter_running = false;101102if (flags & SI_OP_SYNC_AFTER) {103sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;104105if (flags & SI_OP_CS_IMAGE) {106/* Make sure image stores are visible to CB, which doesn't use L2 on GFX6-8. */107sctx->flags |= sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0;108/* Make sure image stores are visible to all CUs. */109sctx->flags |= SI_CONTEXT_INV_VCACHE;110} else {111/* Make sure buffer stores are visible to all CUs. */112sctx->flags |= SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE | SI_CONTEXT_PFP_SYNC_ME;113}114}115}116117void si_launch_grid_internal_ssbos(struct si_context *sctx, struct pipe_grid_info *info,118void *shader, unsigned flags, enum si_coherency coher,119unsigned num_buffers, const struct pipe_shader_buffer *buffers,120unsigned writeable_bitmask)121{122if (!(flags & SI_OP_SKIP_CACHE_INV_BEFORE))123sctx->flags |= si_get_flush_flags(sctx, coher, SI_COMPUTE_DST_CACHE_POLICY);124125/* Save states. */126struct pipe_shader_buffer saved_sb[3] = {};127assert(num_buffers <= ARRAY_SIZE(saved_sb));128si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, num_buffers, saved_sb);129130unsigned saved_writable_mask = 0;131for (unsigned i = 0; i < num_buffers; i++) {132if (sctx->const_and_shader_buffers[PIPE_SHADER_COMPUTE].writable_mask &133(1u << si_get_shaderbuf_slot(i)))134saved_writable_mask |= 1 << i;135}136137/* Bind buffers and launch compute. */138sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_buffers, buffers,139writeable_bitmask);140si_launch_grid_internal(sctx, info, shader, flags);141142/* Do cache flushing at the end. */143if (get_cache_policy(sctx, coher, 0) == L2_BYPASS) {144if (flags & SI_OP_SYNC_AFTER)145sctx->flags |= SI_CONTEXT_WB_L2;146} else {147while (writeable_bitmask)148si_resource(buffers[u_bit_scan(&writeable_bitmask)].buffer)->TC_L2_dirty = true;149}150151/* Restore states. */152sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_buffers, saved_sb,153saved_writable_mask);154for (int i = 0; i < num_buffers; i++)155pipe_resource_reference(&saved_sb[i].buffer, NULL);156}157158/**159* Clear a buffer using read-modify-write with a 32-bit write bitmask.160* The clear value has 32 bits.161*/162void si_compute_clear_buffer_rmw(struct si_context *sctx, struct pipe_resource *dst,163unsigned dst_offset, unsigned size,164uint32_t clear_value, uint32_t writebitmask,165unsigned flags, enum si_coherency coher)166{167assert(dst_offset % 4 == 0);168assert(size % 4 == 0);169170assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);171172/* Use buffer_load_dwordx4 and buffer_store_dwordx4 per thread. */173unsigned dwords_per_instruction = 4;174unsigned wave_size = sctx->screen->compute_wave_size;175unsigned dwords_per_wave = dwords_per_instruction * wave_size;176177unsigned num_dwords = size / 4;178unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction);179180struct pipe_grid_info info = {};181info.block[0] = MIN2(wave_size, num_instructions);182info.block[1] = 1;183info.block[2] = 1;184info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave);185info.grid[1] = 1;186info.grid[2] = 1;187188struct pipe_shader_buffer sb = {};189sb.buffer = dst;190sb.buffer_offset = dst_offset;191sb.buffer_size = size;192193sctx->cs_user_data[0] = clear_value & writebitmask;194sctx->cs_user_data[1] = ~writebitmask;195196if (!sctx->cs_clear_buffer_rmw)197sctx->cs_clear_buffer_rmw = si_create_clear_buffer_rmw_cs(&sctx->b);198199si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_buffer_rmw, flags, coher,2001, &sb, 0x1);201}202203static void si_compute_clear_12bytes_buffer(struct si_context *sctx, struct pipe_resource *dst,204unsigned dst_offset, unsigned size,205const uint32_t *clear_value, unsigned flags,206enum si_coherency coher)207{208struct pipe_context *ctx = &sctx->b;209210assert(dst_offset % 4 == 0);211assert(size % 4 == 0);212unsigned size_12 = DIV_ROUND_UP(size, 12);213214struct pipe_shader_buffer sb = {0};215sb.buffer = dst;216sb.buffer_offset = dst_offset;217sb.buffer_size = size;218219memcpy(sctx->cs_user_data, clear_value, 12);220221struct pipe_grid_info info = {0};222223if (!sctx->cs_clear_12bytes_buffer)224sctx->cs_clear_12bytes_buffer = si_clear_12bytes_buffer_shader(ctx);225226info.block[0] = 64;227info.last_block[0] = size_12 % 64;228info.block[1] = 1;229info.block[2] = 1;230info.grid[0] = DIV_ROUND_UP(size_12, 64);231info.grid[1] = 1;232info.grid[2] = 1;233234si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_12bytes_buffer, flags, coher,2351, &sb, 0x1);236}237238static void si_compute_do_clear_or_copy(struct si_context *sctx, struct pipe_resource *dst,239unsigned dst_offset, struct pipe_resource *src,240unsigned src_offset, unsigned size,241const uint32_t *clear_value, unsigned clear_value_size,242unsigned flags, enum si_coherency coher)243{244assert(src_offset % 4 == 0);245assert(dst_offset % 4 == 0);246assert(size % 4 == 0);247248assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);249assert(!src || src_offset + size <= src->width0);250251/* The memory accesses are coalesced, meaning that the 1st instruction writes252* the 1st contiguous block of data for the whole wave, the 2nd instruction253* writes the 2nd contiguous block of data, etc.254*/255unsigned dwords_per_thread =256src ? SI_COMPUTE_COPY_DW_PER_THREAD : SI_COMPUTE_CLEAR_DW_PER_THREAD;257unsigned instructions_per_thread = MAX2(1, dwords_per_thread / 4);258unsigned dwords_per_instruction = dwords_per_thread / instructions_per_thread;259unsigned wave_size = sctx->screen->compute_wave_size;260unsigned dwords_per_wave = dwords_per_thread * wave_size;261262unsigned num_dwords = size / 4;263unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction);264265struct pipe_grid_info info = {};266info.block[0] = MIN2(wave_size, num_instructions);267info.block[1] = 1;268info.block[2] = 1;269info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave);270info.grid[1] = 1;271info.grid[2] = 1;272273struct pipe_shader_buffer sb[2] = {};274sb[0].buffer = dst;275sb[0].buffer_offset = dst_offset;276sb[0].buffer_size = size;277278bool shader_dst_stream_policy = SI_COMPUTE_DST_CACHE_POLICY != L2_LRU;279280if (src) {281sb[1].buffer = src;282sb[1].buffer_offset = src_offset;283sb[1].buffer_size = size;284285if (!sctx->cs_copy_buffer) {286sctx->cs_copy_buffer = si_create_dma_compute_shader(287&sctx->b, SI_COMPUTE_COPY_DW_PER_THREAD, shader_dst_stream_policy, true);288}289290si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_copy_buffer, flags, coher,2912, sb, 0x1);292} else {293assert(clear_value_size >= 4 && clear_value_size <= 16 &&294util_is_power_of_two_or_zero(clear_value_size));295296for (unsigned i = 0; i < 4; i++)297sctx->cs_user_data[i] = clear_value[i % (clear_value_size / 4)];298299if (!sctx->cs_clear_buffer) {300sctx->cs_clear_buffer = si_create_dma_compute_shader(301&sctx->b, SI_COMPUTE_CLEAR_DW_PER_THREAD, shader_dst_stream_policy, false);302}303304si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_buffer, flags, coher,3051, sb, 0x1);306}307}308309void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,310uint64_t offset, uint64_t size, uint32_t *clear_value,311uint32_t clear_value_size, unsigned flags,312enum si_coherency coher, enum si_clear_method method)313{314if (!size)315return;316317ASSERTED unsigned clear_alignment = MIN2(clear_value_size, 4);318319assert(clear_value_size != 3 && clear_value_size != 6); /* 12 is allowed. */320assert(offset % clear_alignment == 0);321assert(size % clear_alignment == 0);322assert(size < (UINT_MAX & ~0xf)); /* TODO: test 64-bit sizes in all codepaths */323324uint32_t clamped;325if (util_lower_clearsize_to_dword(clear_value, (int*)&clear_value_size, &clamped))326clear_value = &clamped;327328if (clear_value_size == 12) {329si_compute_clear_12bytes_buffer(sctx, dst, offset, size, clear_value, flags, coher);330return;331}332333uint64_t aligned_size = size & ~3ull;334if (aligned_size >= 4) {335uint64_t compute_min_size;336337if (sctx->chip_class <= GFX8) {338/* CP DMA clears are terribly slow with GTT on GFX6-8, which can always339* happen due to BO evictions.340*/341compute_min_size = 0;342} else {343/* Use a small enough size because CP DMA is slower than compute with bigger sizes. */344compute_min_size = 4 * 1024;345}346347if (method == SI_AUTO_SELECT_CLEAR_METHOD && (348clear_value_size > 4 ||349(clear_value_size == 4 && offset % 4 == 0 && size > compute_min_size))) {350method = SI_COMPUTE_CLEAR_METHOD;351}352if (method == SI_COMPUTE_CLEAR_METHOD) {353si_compute_do_clear_or_copy(sctx, dst, offset, NULL, 0, aligned_size, clear_value,354clear_value_size, flags, coher);355} else {356assert(clear_value_size == 4);357si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, dst, offset, aligned_size, *clear_value,358flags, coher, get_cache_policy(sctx, coher, size));359}360361offset += aligned_size;362size -= aligned_size;363}364365/* Handle non-dword alignment. */366if (size) {367assert(dst);368assert(dst->target == PIPE_BUFFER);369assert(size < 4);370371pipe_buffer_write(&sctx->b, dst, offset, size, clear_value);372}373}374375void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst, uint64_t offset,376uint64_t size, unsigned value, unsigned flags)377{378struct si_context *ctx = (struct si_context *)sscreen->aux_context;379380simple_mtx_lock(&sscreen->aux_context_lock);381si_clear_buffer(ctx, dst, offset, size, &value, 4, flags,382SI_COHERENCY_SHADER, SI_AUTO_SELECT_CLEAR_METHOD);383sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);384simple_mtx_unlock(&sscreen->aux_context_lock);385}386387static void si_pipe_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,388unsigned offset, unsigned size, const void *clear_value,389int clear_value_size)390{391si_clear_buffer((struct si_context *)ctx, dst, offset, size, (uint32_t *)clear_value,392clear_value_size, SI_OP_SYNC_BEFORE_AFTER, SI_COHERENCY_SHADER,393SI_AUTO_SELECT_CLEAR_METHOD);394}395396void si_copy_buffer(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src,397uint64_t dst_offset, uint64_t src_offset, unsigned size, unsigned flags)398{399if (!size)400return;401402enum si_coherency coher = SI_COHERENCY_SHADER;403enum si_cache_policy cache_policy = get_cache_policy(sctx, coher, size);404uint64_t compute_min_size = 8 * 1024;405406/* Only use compute for VRAM copies on dGPUs. */407if (sctx->screen->info.has_dedicated_vram && si_resource(dst)->domains & RADEON_DOMAIN_VRAM &&408si_resource(src)->domains & RADEON_DOMAIN_VRAM && size > compute_min_size &&409dst_offset % 4 == 0 && src_offset % 4 == 0 && size % 4 == 0) {410si_compute_do_clear_or_copy(sctx, dst, dst_offset, src, src_offset, size, NULL, 0,411flags, coher);412} else {413si_cp_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset, size,414flags, coher, cache_policy);415}416}417418void si_compute_copy_image(struct si_context *sctx, struct pipe_resource *dst, unsigned dst_level,419struct pipe_resource *src, unsigned src_level, unsigned dstx,420unsigned dsty, unsigned dstz, const struct pipe_box *src_box,421bool is_dcc_decompress, unsigned flags)422{423struct pipe_context *ctx = &sctx->b;424unsigned width = src_box->width;425unsigned height = src_box->height;426unsigned depth = src_box->depth;427enum pipe_format src_format = util_format_linear(src->format);428enum pipe_format dst_format = util_format_linear(dst->format);429bool is_linear = ((struct si_texture*)src)->surface.is_linear ||430((struct si_texture*)dst)->surface.is_linear;431432assert(util_format_is_subsampled_422(src_format) == util_format_is_subsampled_422(dst_format));433434if (!vi_dcc_enabled((struct si_texture*)src, src_level) &&435!vi_dcc_enabled((struct si_texture*)dst, dst_level) &&436src_format == dst_format &&437util_format_is_float(src_format) &&438!util_format_is_compressed(src_format)) {439/* Interpret as integer values to avoid NaN issues */440switch(util_format_get_blocksizebits(src_format)) {441case 16:442src_format = dst_format = PIPE_FORMAT_R16_UINT;443break;444case 32:445src_format = dst_format = PIPE_FORMAT_R32_UINT;446break;447case 64:448src_format = dst_format = PIPE_FORMAT_R32G32_UINT;449break;450case 128:451src_format = dst_format = PIPE_FORMAT_R32G32B32A32_UINT;452break;453default:454assert(false);455}456}457458if (util_format_is_subsampled_422(src_format)) {459src_format = dst_format = PIPE_FORMAT_R32_UINT;460/* Interpreting 422 subsampled format (16 bpp) as 32 bpp461* should force us to divide src_box->x, dstx and width by 2.462* But given that ac_surface allocates this format as 32 bpp463* and that surf_size is then modified to pack the values464* we must keep the original values to get the correct results.465*/466}467468if (width == 0 || height == 0)469return;470471/* The driver doesn't decompress resources automatically here. */472si_decompress_subresource(ctx, dst, PIPE_MASK_RGBAZS, dst_level, dstz,473dstz + src_box->depth - 1);474si_decompress_subresource(ctx, src, PIPE_MASK_RGBAZS, src_level, src_box->z,475src_box->z + src_box->depth - 1);476477/* src and dst have the same number of samples. */478si_make_CB_shader_coherent(sctx, src->nr_samples, true,479/* Only src can have DCC.*/480((struct si_texture *)src)->surface.u.gfx9.color.dcc.pipe_aligned);481482struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE];483struct pipe_image_view saved_image[2] = {0};484util_copy_image_view(&saved_image[0], &images->views[0]);485util_copy_image_view(&saved_image[1], &images->views[1]);486487struct pipe_image_view image[2] = {0};488image[0].resource = src;489image[0].shader_access = image[0].access = PIPE_IMAGE_ACCESS_READ;490image[0].format = src_format;491image[0].u.tex.level = src_level;492image[0].u.tex.first_layer = 0;493image[0].u.tex.last_layer = src->target == PIPE_TEXTURE_3D ? u_minify(src->depth0, src_level) - 1494: (unsigned)(src->array_size - 1);495image[1].resource = dst;496image[1].shader_access = image[1].access = PIPE_IMAGE_ACCESS_WRITE;497image[1].format = dst_format;498image[1].u.tex.level = dst_level;499image[1].u.tex.first_layer = 0;500image[1].u.tex.last_layer = dst->target == PIPE_TEXTURE_3D ? u_minify(dst->depth0, dst_level) - 1501: (unsigned)(dst->array_size - 1);502503/* SNORM8 blitting has precision issues on some chips. Use the SINT504* equivalent instead, which doesn't force DCC decompression.505*/506if (util_format_is_snorm8(dst->format)) {507image[0].format = image[1].format = util_format_snorm8_to_sint8(dst->format);508}509510if (is_dcc_decompress)511image[1].access |= SI_IMAGE_ACCESS_DCC_OFF;512else if (sctx->chip_class >= GFX10)513image[1].access |= SI_IMAGE_ACCESS_DCC_WRITE;514515ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 2, 0, image);516517if (!is_dcc_decompress) {518sctx->cs_user_data[0] = src_box->x | (dstx << 16);519sctx->cs_user_data[1] = src_box->y | (dsty << 16);520sctx->cs_user_data[2] = src_box->z | (dstz << 16);521}522523struct pipe_grid_info info = {0};524525if (is_dcc_decompress) {526/* The DCC decompression is a normal blit where the load is compressed527* and the store is uncompressed. The workgroup size is either equal to528* the DCC block size or a multiple thereof. The shader uses a barrier529* between loads and stores to safely overwrite each DCC block of pixels.530*/531struct si_texture *tex = (struct si_texture*)src;532unsigned dim[3] = {src_box->width, src_box->height, src_box->depth};533534assert(src == dst);535assert(dst->target != PIPE_TEXTURE_1D && dst->target != PIPE_TEXTURE_1D_ARRAY);536537if (!sctx->cs_dcc_decompress)538sctx->cs_dcc_decompress = si_create_dcc_decompress_cs(ctx);539540info.block[0] = tex->surface.u.gfx9.color.dcc_block_width;541info.block[1] = tex->surface.u.gfx9.color.dcc_block_height;542info.block[2] = tex->surface.u.gfx9.color.dcc_block_depth;543544/* Make sure the block size is at least the same as wave size. */545while (info.block[0] * info.block[1] * info.block[2] <546sctx->screen->compute_wave_size) {547info.block[0] *= 2;548}549550for (unsigned i = 0; i < 3; i++) {551info.last_block[i] = dim[i] % info.block[i];552info.grid[i] = DIV_ROUND_UP(dim[i], info.block[i]);553}554555si_launch_grid_internal(sctx, &info, sctx->cs_dcc_decompress, flags | SI_OP_CS_IMAGE);556} else if (dst->target == PIPE_TEXTURE_1D_ARRAY && src->target == PIPE_TEXTURE_1D_ARRAY) {557if (!sctx->cs_copy_image_1d_array)558sctx->cs_copy_image_1d_array = si_create_copy_image_compute_shader_1d_array(ctx);559560info.block[0] = 64;561info.last_block[0] = width % 64;562info.block[1] = 1;563info.block[2] = 1;564info.grid[0] = DIV_ROUND_UP(width, 64);565info.grid[1] = depth;566info.grid[2] = 1;567568si_launch_grid_internal(sctx, &info, sctx->cs_copy_image_1d_array, flags | SI_OP_CS_IMAGE);569} else {570if (!sctx->cs_copy_image)571sctx->cs_copy_image = si_create_copy_image_compute_shader(ctx);572573/* This is better for access over PCIe. */574if (is_linear) {575info.block[0] = 64;576info.block[1] = 1;577} else {578info.block[0] = 8;579info.block[1] = 8;580}581info.last_block[0] = width % info.block[0];582info.last_block[1] = height % info.block[1];583info.block[2] = 1;584info.grid[0] = DIV_ROUND_UP(width, info.block[0]);585info.grid[1] = DIV_ROUND_UP(height, info.block[1]);586info.grid[2] = depth;587588si_launch_grid_internal(sctx, &info, sctx->cs_copy_image, flags | SI_OP_CS_IMAGE);589}590591ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 2, 0, saved_image);592for (int i = 0; i < 2; i++)593pipe_resource_reference(&saved_image[i].resource, NULL);594}595596void si_retile_dcc(struct si_context *sctx, struct si_texture *tex)597{598/* Set the DCC buffer. */599assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);600assert(tex->surface.display_dcc_offset && tex->surface.display_dcc_offset <= UINT_MAX);601assert(tex->surface.display_dcc_offset < tex->surface.meta_offset);602assert(tex->buffer.bo_size <= UINT_MAX);603604struct pipe_shader_buffer sb = {};605sb.buffer = &tex->buffer.b.b;606sb.buffer_offset = tex->surface.display_dcc_offset;607sb.buffer_size = tex->buffer.bo_size - sb.buffer_offset;608609sctx->cs_user_data[0] = tex->surface.meta_offset - tex->surface.display_dcc_offset;610sctx->cs_user_data[1] = (tex->surface.u.gfx9.color.dcc_pitch_max + 1) |611(tex->surface.u.gfx9.color.dcc_height << 16);612sctx->cs_user_data[2] = (tex->surface.u.gfx9.color.display_dcc_pitch_max + 1) |613(tex->surface.u.gfx9.color.display_dcc_height << 16);614615/* There is only 1 shader variant because ac_surface only supports displayable DCC616* with one swizzle mode and 32bpp.617*/618assert(tex->surface.bpe == 4);619assert(sctx->chip_class != GFX9 || tex->surface.u.gfx9.swizzle_mode == 25); /* 64KB_S_X */620assert(sctx->chip_class != GFX10 || tex->surface.u.gfx9.swizzle_mode == 27); /* 64KB_R_X */621assert(sctx->chip_class != GFX10_3 || tex->surface.u.gfx9.swizzle_mode == 27); /* 64KB_R_X */622623if (!sctx->cs_dcc_retile)624sctx->cs_dcc_retile = si_create_dcc_retile_cs(sctx, &tex->surface);625626/* Dispatch compute. */627unsigned width = DIV_ROUND_UP(tex->buffer.b.b.width0, tex->surface.u.gfx9.color.dcc_block_width);628unsigned height = DIV_ROUND_UP(tex->buffer.b.b.height0, tex->surface.u.gfx9.color.dcc_block_height);629630struct pipe_grid_info info = {};631info.block[0] = 8;632info.block[1] = 8;633info.block[2] = 1;634info.last_block[0] = width % info.block[0];635info.last_block[1] = height % info.block[1];636info.grid[0] = DIV_ROUND_UP(width, info.block[0]);637info.grid[1] = DIV_ROUND_UP(height, info.block[1]);638info.grid[2] = 1;639640si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_dcc_retile, SI_OP_SYNC_BEFORE,641SI_COHERENCY_CB_META, 1, &sb, 0x1);642643/* Don't flush caches. L2 will be flushed by the kernel fence. */644}645646void gfx9_clear_dcc_msaa(struct si_context *sctx, struct pipe_resource *res, uint32_t clear_value,647unsigned flags, enum si_coherency coher)648{649struct si_texture *tex = (struct si_texture*)res;650651/* Set the DCC buffer. */652assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);653assert(tex->buffer.bo_size <= UINT_MAX);654655struct pipe_shader_buffer sb = {};656sb.buffer = &tex->buffer.b.b;657sb.buffer_offset = tex->surface.meta_offset;658sb.buffer_size = tex->buffer.bo_size - sb.buffer_offset;659660sctx->cs_user_data[0] = (tex->surface.u.gfx9.color.dcc_pitch_max + 1) |661(tex->surface.u.gfx9.color.dcc_height << 16);662sctx->cs_user_data[1] = (clear_value & 0xffff) |663((uint32_t)tex->surface.tile_swizzle << 16);664665/* These variables identify the shader variant. */666unsigned swizzle_mode = tex->surface.u.gfx9.swizzle_mode;667unsigned bpe_log2 = util_logbase2(tex->surface.bpe);668unsigned log2_samples = util_logbase2(tex->buffer.b.b.nr_samples);669bool fragments8 = tex->buffer.b.b.nr_storage_samples == 8;670bool is_array = tex->buffer.b.b.array_size > 1;671void **shader = &sctx->cs_clear_dcc_msaa[swizzle_mode][bpe_log2][fragments8][log2_samples - 2][is_array];672673if (!*shader)674*shader = gfx9_create_clear_dcc_msaa_cs(sctx, tex);675676/* Dispatch compute. */677unsigned width = DIV_ROUND_UP(tex->buffer.b.b.width0, tex->surface.u.gfx9.color.dcc_block_width);678unsigned height = DIV_ROUND_UP(tex->buffer.b.b.height0, tex->surface.u.gfx9.color.dcc_block_height);679unsigned depth = DIV_ROUND_UP(tex->buffer.b.b.array_size, tex->surface.u.gfx9.color.dcc_block_depth);680681struct pipe_grid_info info = {};682info.block[0] = 8;683info.block[1] = 8;684info.block[2] = 1;685info.last_block[0] = width % info.block[0];686info.last_block[1] = height % info.block[1];687info.grid[0] = DIV_ROUND_UP(width, info.block[0]);688info.grid[1] = DIV_ROUND_UP(height, info.block[1]);689info.grid[2] = depth;690691si_launch_grid_internal_ssbos(sctx, &info, *shader, flags, coher, 1, &sb, 0x1);692}693694/* Expand FMASK to make it identity, so that image stores can ignore it. */695void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex)696{697struct si_context *sctx = (struct si_context *)ctx;698bool is_array = tex->target == PIPE_TEXTURE_2D_ARRAY;699unsigned log_fragments = util_logbase2(tex->nr_storage_samples);700unsigned log_samples = util_logbase2(tex->nr_samples);701assert(tex->nr_samples >= 2);702703/* EQAA FMASK expansion is unimplemented. */704if (tex->nr_samples != tex->nr_storage_samples)705return;706707si_make_CB_shader_coherent(sctx, tex->nr_samples, true,708true /* DCC is not possible with image stores */);709710/* Save states. */711struct pipe_image_view saved_image = {0};712util_copy_image_view(&saved_image, &sctx->images[PIPE_SHADER_COMPUTE].views[0]);713714/* Bind the image. */715struct pipe_image_view image = {0};716image.resource = tex;717/* Don't set WRITE so as not to trigger FMASK expansion, causing718* an infinite loop. */719image.shader_access = image.access = PIPE_IMAGE_ACCESS_READ;720image.format = util_format_linear(tex->format);721if (is_array)722image.u.tex.last_layer = tex->array_size - 1;723724ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &image);725726/* Bind the shader. */727void **shader = &sctx->cs_fmask_expand[log_samples - 1][is_array];728if (!*shader)729*shader = si_create_fmask_expand_cs(ctx, tex->nr_samples, is_array);730731/* Dispatch compute. */732struct pipe_grid_info info = {0};733info.block[0] = 8;734info.last_block[0] = tex->width0 % 8;735info.block[1] = 8;736info.last_block[1] = tex->height0 % 8;737info.block[2] = 1;738info.grid[0] = DIV_ROUND_UP(tex->width0, 8);739info.grid[1] = DIV_ROUND_UP(tex->height0, 8);740info.grid[2] = is_array ? tex->array_size : 1;741742si_launch_grid_internal(sctx, &info, *shader, SI_OP_SYNC_BEFORE_AFTER);743744/* Restore previous states. */745ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &saved_image);746pipe_resource_reference(&saved_image.resource, NULL);747748/* Array of fully expanded FMASK values, arranged by [log2(fragments)][log2(samples)-1]. */749#define INVALID 0 /* never used */750static const uint64_t fmask_expand_values[][4] = {751/* samples */752/* 2 (8 bpp) 4 (8 bpp) 8 (8-32bpp) 16 (16-64bpp) fragments */753{0x02020202, 0x0E0E0E0E, 0xFEFEFEFE, 0xFFFEFFFE}, /* 1 */754{0x02020202, 0xA4A4A4A4, 0xAAA4AAA4, 0xAAAAAAA4}, /* 2 */755{INVALID, 0xE4E4E4E4, 0x44443210, 0x4444444444443210}, /* 4 */756{INVALID, INVALID, 0x76543210, 0x8888888876543210}, /* 8 */757};758759/* Clear FMASK to identity. */760struct si_texture *stex = (struct si_texture *)tex;761si_clear_buffer(sctx, tex, stex->surface.fmask_offset, stex->surface.fmask_size,762(uint32_t *)&fmask_expand_values[log_fragments][log_samples - 1],763log_fragments >= 2 && log_samples == 4 ? 8 : 4, SI_OP_SYNC_AFTER,764SI_COHERENCY_SHADER, SI_AUTO_SELECT_CLEAR_METHOD);765}766767void si_init_compute_blit_functions(struct si_context *sctx)768{769sctx->b.clear_buffer = si_pipe_clear_buffer;770}771772/* Clear a region of a color surface to a constant value. */773void si_compute_clear_render_target(struct pipe_context *ctx, struct pipe_surface *dstsurf,774const union pipe_color_union *color, unsigned dstx,775unsigned dsty, unsigned width, unsigned height,776bool render_condition_enabled)777{778struct si_context *sctx = (struct si_context *)ctx;779unsigned num_layers = dstsurf->u.tex.last_layer - dstsurf->u.tex.first_layer + 1;780unsigned data[4 + sizeof(color->ui)] = {dstx, dsty, dstsurf->u.tex.first_layer, 0};781782if (width == 0 || height == 0)783return;784785/* The driver doesn't decompress resources automatically here. */786si_decompress_subresource(ctx, dstsurf->texture, PIPE_MASK_RGBA, dstsurf->u.tex.level,787dstsurf->u.tex.first_layer, dstsurf->u.tex.last_layer);788789if (util_format_is_srgb(dstsurf->format)) {790union pipe_color_union color_srgb;791for (int i = 0; i < 3; i++)792color_srgb.f[i] = util_format_linear_to_srgb_float(color->f[i]);793color_srgb.f[3] = color->f[3];794memcpy(data + 4, color_srgb.ui, sizeof(color->ui));795} else {796memcpy(data + 4, color->ui, sizeof(color->ui));797}798799si_make_CB_shader_coherent(sctx, dstsurf->texture->nr_samples, true,800true /* DCC is not possible with image stores */);801802struct pipe_constant_buffer saved_cb = {};803si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);804805struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE];806struct pipe_image_view saved_image = {0};807util_copy_image_view(&saved_image, &images->views[0]);808809struct pipe_constant_buffer cb = {};810cb.buffer_size = sizeof(data);811cb.user_buffer = data;812ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, false, &cb);813814struct pipe_image_view image = {0};815image.resource = dstsurf->texture;816image.shader_access = image.access = PIPE_IMAGE_ACCESS_WRITE;817image.format = util_format_linear(dstsurf->format);818image.u.tex.level = dstsurf->u.tex.level;819image.u.tex.first_layer = 0; /* 3D images ignore first_layer (BASE_ARRAY) */820image.u.tex.last_layer = dstsurf->u.tex.last_layer;821822ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &image);823824struct pipe_grid_info info = {0};825void *shader;826827if (dstsurf->texture->target != PIPE_TEXTURE_1D_ARRAY) {828if (!sctx->cs_clear_render_target)829sctx->cs_clear_render_target = si_clear_render_target_shader(ctx);830shader = sctx->cs_clear_render_target;831832info.block[0] = 8;833info.last_block[0] = width % 8;834info.block[1] = 8;835info.last_block[1] = height % 8;836info.block[2] = 1;837info.grid[0] = DIV_ROUND_UP(width, 8);838info.grid[1] = DIV_ROUND_UP(height, 8);839info.grid[2] = num_layers;840} else {841if (!sctx->cs_clear_render_target_1d_array)842sctx->cs_clear_render_target_1d_array = si_clear_render_target_shader_1d_array(ctx);843shader = sctx->cs_clear_render_target_1d_array;844845info.block[0] = 64;846info.last_block[0] = width % 64;847info.block[1] = 1;848info.block[2] = 1;849info.grid[0] = DIV_ROUND_UP(width, 64);850info.grid[1] = num_layers;851info.grid[2] = 1;852}853854si_launch_grid_internal(sctx, &info, shader, SI_OP_SYNC_BEFORE_AFTER | SI_OP_CS_IMAGE |855(render_condition_enabled ? SI_OP_CS_RENDER_COND_ENABLE : 0));856857ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &saved_image);858ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, true, &saved_cb);859pipe_resource_reference(&saved_image.resource, NULL);860}861862863