Path: blob/21.2-virgl/src/panfrost/bifrost/bi_lower_divergent_indirects.c
4564 views
/*1* Copyright (C) 2021 Collabora, Ltd.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,19* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE20* SOFTWARE.21*/2223#include "compiler.h"24#include "compiler/nir/nir_builder.h"2526/* Divergent attribute access is undefined behaviour. To avoid divergence,27* lower to an if-chain like:28*29* value = 0;30* if (lane == 0)31* value = ld()32* else if (lane == 1)33* value = ld()34* ...35* else if (lane == MAX_LANE)36* value = ld()37*/3839static bool40bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data)41{42if (instr->type != nir_instr_type_intrinsic)43return false;4445nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);46gl_shader_stage stage = b->shader->info.stage;47nir_src *offset;4849/* Not all indirect access needs this workaround */50switch (intr->intrinsic) {51case nir_intrinsic_load_input:52case nir_intrinsic_load_interpolated_input:53/* Attributes and varyings */54offset = nir_get_io_offset_src(intr);55break;5657case nir_intrinsic_store_output:58/* Varyings only */59if (stage == MESA_SHADER_FRAGMENT)60return false;6162offset = nir_get_io_offset_src(intr);63break;6465case nir_intrinsic_image_atomic_add:66case nir_intrinsic_image_atomic_imin:67case nir_intrinsic_image_atomic_umin:68case nir_intrinsic_image_atomic_imax:69case nir_intrinsic_image_atomic_umax:70case nir_intrinsic_image_atomic_and:71case nir_intrinsic_image_atomic_or:72case nir_intrinsic_image_atomic_xor:73case nir_intrinsic_image_load:74case nir_intrinsic_image_store:75/* Any image access */76offset = &intr->src[0];77break;78default:79return false;80}8182if (!nir_src_is_divergent(*offset))83return false;8485/* This indirect does need it */8687b->cursor = nir_before_instr(instr);88nir_ssa_def *lane = nir_load_subgroup_invocation(b);89unsigned *lanes = data;9091/* Write zero in a funny way to bypass lower_load_const_to_scalar */92bool has_dest = nir_intrinsic_infos[intr->intrinsic].has_dest;93unsigned size = has_dest ? nir_dest_bit_size(intr->dest) : 32;94nir_ssa_def *zero = has_dest ? nir_imm_zero(b, 1, size) : NULL;95nir_ssa_def *zeroes[4] = { zero, zero, zero, zero };96nir_ssa_def *res = has_dest ?97nir_vec(b, zeroes, nir_dest_num_components(intr->dest)) : NULL;9899for (unsigned i = 0; i < (*lanes); ++i) {100nir_push_if(b, nir_ieq_imm(b, lane, i));101102nir_instr *c = nir_instr_clone(b->shader, instr);103nir_intrinsic_instr *c_intr = nir_instr_as_intrinsic(c);104nir_builder_instr_insert(b, c);105nir_pop_if(b, NULL);106107if (has_dest) {108assert(c_intr->dest.is_ssa);109nir_ssa_def *c_ssa = &c_intr->dest.ssa;110res = nir_if_phi(b, c_ssa, res);111}112}113114if (has_dest)115nir_ssa_def_rewrite_uses(&intr->dest.ssa, res);116117nir_instr_remove(instr);118return true;119}120121bool122bi_lower_divergent_indirects(nir_shader *shader, unsigned lanes)123{124return nir_shader_instructions_pass(shader,125bi_lower_divergent_indirects_impl,126nir_metadata_none, &lanes);127}128129130