Path: blob/21.2-virgl/src/freedreno/ir3/ir3_nir_lower_io_offsets.c
4565 views
/*1* Copyright © 2018-2019 Igalia S.L.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*/2223#include "compiler/nir/nir_builder.h"24#include "ir3_nir.h"2526/**27* This pass moves to NIR certain offset computations for different I/O28* ops that are currently implemented on the IR3 backend compiler, to29* give NIR a chance to optimize them:30*31* - Dword-offset for SSBO load, store and atomics: A new, similar intrinsic32* is emitted that replaces the original one, adding a new source that33* holds the result of the original byte-offset source divided by 4.34*/3536/* Returns the ir3-specific intrinsic opcode corresponding to an SSBO37* instruction that is handled by this pass. It also conveniently returns38* the offset source index in @offset_src_idx.39*40* If @intrinsic is not SSBO, or it is not handled by the pass, -1 is41* returned.42*/43static int44get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,45uint8_t *offset_src_idx)46{47debug_assert(offset_src_idx);4849*offset_src_idx = 1;5051switch (intrinsic) {52case nir_intrinsic_store_ssbo:53*offset_src_idx = 2;54return nir_intrinsic_store_ssbo_ir3;55case nir_intrinsic_load_ssbo:56return nir_intrinsic_load_ssbo_ir3;57case nir_intrinsic_ssbo_atomic_add:58return nir_intrinsic_ssbo_atomic_add_ir3;59case nir_intrinsic_ssbo_atomic_imin:60return nir_intrinsic_ssbo_atomic_imin_ir3;61case nir_intrinsic_ssbo_atomic_umin:62return nir_intrinsic_ssbo_atomic_umin_ir3;63case nir_intrinsic_ssbo_atomic_imax:64return nir_intrinsic_ssbo_atomic_imax_ir3;65case nir_intrinsic_ssbo_atomic_umax:66return nir_intrinsic_ssbo_atomic_umax_ir3;67case nir_intrinsic_ssbo_atomic_and:68return nir_intrinsic_ssbo_atomic_and_ir3;69case nir_intrinsic_ssbo_atomic_or:70return nir_intrinsic_ssbo_atomic_or_ir3;71case nir_intrinsic_ssbo_atomic_xor:72return nir_intrinsic_ssbo_atomic_xor_ir3;73case nir_intrinsic_ssbo_atomic_exchange:74return nir_intrinsic_ssbo_atomic_exchange_ir3;75case nir_intrinsic_ssbo_atomic_comp_swap:76return nir_intrinsic_ssbo_atomic_comp_swap_ir3;77default:78break;79}8081return -1;82}8384static nir_ssa_def *85check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,86int32_t direction, int32_t shift)87{88debug_assert(alu_instr->src[1].src.is_ssa);89nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;9091/* Only propagate if the shift is a const value so we can check value range92* statically.93*/94nir_const_value *const_val = nir_src_as_const_value(alu_instr->src[1].src);95if (!const_val)96return NULL;9798int32_t current_shift = const_val[0].i32 * direction;99int32_t new_shift = current_shift + shift;100101/* If the merge would reverse the direction, bail out.102* e.g, 'x << 2' then 'x >> 4' is not 'x >> 2'.103*/104if (current_shift * new_shift < 0)105return NULL;106107/* If the propagation would overflow an int32_t, bail out too to be on the108* safe side.109*/110if (new_shift < -31 || new_shift > 31)111return NULL;112113/* Add or substract shift depending on the final direction (SHR vs. SHL). */114if (shift * direction < 0)115shift_ssa = nir_isub(b, shift_ssa, nir_imm_int(b, abs(shift)));116else117shift_ssa = nir_iadd(b, shift_ssa, nir_imm_int(b, abs(shift)));118119return shift_ssa;120}121122nir_ssa_def *123ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset,124int32_t shift)125{126nir_instr *offset_instr = offset->parent_instr;127if (offset_instr->type != nir_instr_type_alu)128return NULL;129130nir_alu_instr *alu = nir_instr_as_alu(offset_instr);131nir_ssa_def *shift_ssa;132nir_ssa_def *new_offset = NULL;133134/* the first src could be something like ssa_18.x, but we only want135* the single component. Otherwise the ishl/ishr/ushr could turn136* into a vec4 operation:137*/138nir_ssa_def *src0 = nir_mov_alu(b, alu->src[0], 1);139140switch (alu->op) {141case nir_op_ishl:142shift_ssa = check_and_propagate_bit_shift32(b, alu, 1, shift);143if (shift_ssa)144new_offset = nir_ishl(b, src0, shift_ssa);145break;146case nir_op_ishr:147shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);148if (shift_ssa)149new_offset = nir_ishr(b, src0, shift_ssa);150break;151case nir_op_ushr:152shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);153if (shift_ssa)154new_offset = nir_ushr(b, src0, shift_ssa);155break;156default:157return NULL;158}159160return new_offset;161}162163static bool164lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,165unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)166{167unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;168int shift = 2;169170bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;171nir_ssa_def *new_dest = NULL;172173/* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */174if ((has_dest && intrinsic->dest.ssa.bit_size == 16) ||175(!has_dest && intrinsic->src[0].ssa->bit_size == 16))176shift = 1;177178/* Here we create a new intrinsic and copy over all contents from the old179* one. */180181nir_intrinsic_instr *new_intrinsic;182nir_src *target_src;183184b->cursor = nir_before_instr(&intrinsic->instr);185186/* 'offset_src_idx' holds the index of the source that represent the offset. */187new_intrinsic = nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);188189debug_assert(intrinsic->src[offset_src_idx].is_ssa);190nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;191192/* Since we don't have value range checking, we first try to propagate193* the division by 4 ('offset >> 2') into another bit-shift instruction that194* possibly defines the offset. If that's the case, we emit a similar195* instructions adjusting (merging) the shift value.196*197* Here we use the convention that shifting right is negative while shifting198* left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.199*/200nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);201202/* The new source that will hold the dword-offset is always the last203* one for every intrinsic.204*/205target_src = &new_intrinsic->src[num_srcs];206*target_src = nir_src_for_ssa(offset);207208if (has_dest) {209debug_assert(intrinsic->dest.is_ssa);210nir_ssa_def *dest = &intrinsic->dest.ssa;211nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,212dest->num_components, dest->bit_size, NULL);213new_dest = &new_intrinsic->dest.ssa;214}215216for (unsigned i = 0; i < num_srcs; i++)217new_intrinsic->src[i] = nir_src_for_ssa(intrinsic->src[i].ssa);218219nir_intrinsic_copy_const_indices(new_intrinsic, intrinsic);220221new_intrinsic->num_components = intrinsic->num_components;222223/* If we managed to propagate the division by 4, just use the new offset224* register and don't emit the SHR.225*/226if (new_offset)227offset = new_offset;228else229offset = nir_ushr(b, offset, nir_imm_int(b, shift));230231/* Insert the new intrinsic right before the old one. */232nir_builder_instr_insert(b, &new_intrinsic->instr);233234/* Replace the last source of the new intrinsic by the result of235* the offset divided by 4.236*/237nir_instr_rewrite_src(&new_intrinsic->instr, target_src,238nir_src_for_ssa(offset));239240if (has_dest) {241/* Replace the uses of the original destination by that242* of the new intrinsic.243*/244nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa, new_dest);245}246247/* Finally remove the original intrinsic. */248nir_instr_remove(&intrinsic->instr);249250return true;251}252253static bool254lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx,255int gpu_id)256{257bool progress = false;258259nir_foreach_instr_safe (instr, block) {260if (instr->type != nir_instr_type_intrinsic)261continue;262263nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);264265/* SSBO */266int ir3_intrinsic;267uint8_t offset_src_idx;268ir3_intrinsic =269get_ir3_intrinsic_for_ssbo_intrinsic(intr->intrinsic, &offset_src_idx);270if (ir3_intrinsic != -1) {271progress |= lower_offset_for_ssbo(intr, b, (unsigned)ir3_intrinsic,272offset_src_idx);273}274}275276return progress;277}278279static bool280lower_io_offsets_func(nir_function_impl *impl, int gpu_id)281{282void *mem_ctx = ralloc_parent(impl);283nir_builder b;284nir_builder_init(&b, impl);285286bool progress = false;287nir_foreach_block_safe (block, impl) {288progress |= lower_io_offsets_block(block, &b, mem_ctx, gpu_id);289}290291if (progress) {292nir_metadata_preserve(impl,293nir_metadata_block_index | nir_metadata_dominance);294}295296return progress;297}298299bool300ir3_nir_lower_io_offsets(nir_shader *shader, int gpu_id)301{302bool progress = false;303304nir_foreach_function (function, shader) {305if (function->impl)306progress |= lower_io_offsets_func(function->impl, gpu_id);307}308309return progress;310}311312313