Path: blob/21.2-virgl/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c
4545 views
/*1* Copyright (C) 2005-2007 Brian Paul All Rights Reserved.2* Copyright (C) 2008 VMware, Inc. All Rights Reserved.3* Copyright © 2014 Intel Corporation4* Copyright © 2017 Advanced Micro Devices, Inc.5*6* Permission is hereby granted, free of charge, to any person obtaining a7* copy of this software and associated documentation files (the "Software"),8* to deal in the Software without restriction, including without limitation9* the rights to use, copy, modify, merge, publish, distribute, sublicense,10* and/or sell copies of the Software, and to permit persons to whom the11* Software is furnished to do so, subject to the following conditions:12*13* The above copyright notice and this permission notice (including the next14* paragraph) shall be included in all copies or substantial portions of the15* Software.16*17* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR18* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,19* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL20* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER21* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING22* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER23* DEALINGS IN THE SOFTWARE.24*/2526/**27* \file28*29* Lower sampler and image references of (non-bindless) uniforms by removing30* struct dereferences, and synthesizing new uniform variables without structs31* if required.32*33* This will allow backends to have a simple, uniform treatment of bindless and34* non-bindless samplers and images.35*36* Example:37*38* struct S {39* sampler2D tex[2];40* sampler2D other;41* };42* uniform S s[2];43*44* tmp = texture(s[n].tex[m], coord);45*46* Becomes:47*48* decl_var uniform INTERP_MODE_NONE sampler2D[2][2] [email protected] (...)49*50* vec1 32 ssa_idx = $(2 * n + m)51* vec4 32 ssa_out = tex ssa_coord (coord), [email protected][n][m] (texture), [email protected][n][m] (sampler)52*53* and [email protected] has var->data.binding set to the base index as defined by54* the opaque uniform mapping.55*/5657#include "compiler/nir/nir.h"58#include "compiler/nir/nir_builder.h"59#include "compiler/nir/nir_deref.h"60#include "gl_nir.h"61#include "ir_uniform.h"6263#include "util/compiler.h"64#include "main/mtypes.h"6566struct lower_samplers_as_deref_state {67nir_shader *shader;68const struct gl_shader_program *shader_program;69struct hash_table *remap_table;70};7172/* Prepare for removing struct derefs. This pre-pass generates the name73* of the lowered deref, and calculates the lowered type and location.74* After that, once looking up (or creating if needed) the lowered var,75* constructing the new chain of deref instructions is a simple loop76* that skips the struct deref's77*78* path: appended to as we descend down the chain of deref instrs79* and remove struct derefs80* location: increased as we descend down and remove struct derefs81* type: updated as we recurse back up the chain of deref instrs82* with the resulting type after removing struct derefs83*/84static void85remove_struct_derefs_prep(nir_deref_instr **p, char **name,86unsigned *location, const struct glsl_type **type)87{88nir_deref_instr *cur = p[0], *next = p[1];8990if (!next) {91*type = cur->type;92return;93}9495switch (next->deref_type) {96case nir_deref_type_array: {97unsigned length = glsl_get_length(cur->type);9899remove_struct_derefs_prep(&p[1], name, location, type);100101*type = glsl_array_type(*type, length, glsl_get_explicit_stride(cur->type));102break;103}104105case nir_deref_type_struct: {106*location += glsl_get_struct_location_offset(cur->type, next->strct.index);107ralloc_asprintf_append(name, ".%s",108glsl_get_struct_elem_name(cur->type, next->strct.index));109110remove_struct_derefs_prep(&p[1], name, location, type);111break;112}113114default:115unreachable("Invalid deref type");116break;117}118}119120static void121record_images_used(struct shader_info *info,122nir_intrinsic_instr *instr)123{124nir_variable *var =125nir_deref_instr_get_variable(nir_src_as_deref(instr->src[0]));126127/* Structs have been lowered already, so get_aoa_size is sufficient. */128const unsigned size =129glsl_type_is_array(var->type) ? glsl_get_aoa_size(var->type) : 1;130unsigned mask = ((1ull << MAX2(size, 1)) - 1) << var->data.binding;131132info->images_used |= mask;133}134135136static nir_deref_instr *137lower_deref(nir_builder *b, struct lower_samplers_as_deref_state *state,138nir_deref_instr *deref)139{140nir_variable *var = nir_deref_instr_get_variable(deref);141gl_shader_stage stage = state->shader->info.stage;142143if (var->data.bindless || var->data.mode != nir_var_uniform)144return NULL;145146nir_deref_path path;147nir_deref_path_init(&path, deref, state->remap_table);148assert(path.path[0]->deref_type == nir_deref_type_var);149150char *name = ralloc_asprintf(state->remap_table, "lower@%s", var->name);151unsigned location = var->data.location;152const struct glsl_type *type = NULL;153unsigned binding;154155/*156* We end up needing to do this in two passes, in order to generate157* the name of the lowered var (and detecting whether there even are158* any struct deref's), and then the second pass to construct the159* actual deref instructions after looking up / generating a new160* nir_variable (since we need to construct the deref_var first)161*/162163remove_struct_derefs_prep(path.path, &name, &location, &type);164165if (state->shader_program && var->data.how_declared != nir_var_hidden) {166/* For GLSL programs, look up the bindings in the uniform storage. */167assert(location < state->shader_program->data->NumUniformStorage &&168state->shader_program->data->UniformStorage[location].opaque[stage].active);169170binding = state->shader_program->data->UniformStorage[location].opaque[stage].index;171} else {172/* For ARB programs, built-in shaders, or internally generated sampler173* variables in GLSL programs, assume that whoever created the shader174* set the bindings correctly already.175*/176assert(var->data.explicit_binding);177binding = var->data.binding;178}179180if (var->type == type) {181/* Fast path: We did not encounter any struct derefs. */182var->data.binding = binding;183return deref;184}185186uint32_t hash = _mesa_hash_string(name);187struct hash_entry *h =188_mesa_hash_table_search_pre_hashed(state->remap_table, hash, name);189190if (h) {191var = (nir_variable *)h->data;192} else {193var = nir_variable_create(state->shader, nir_var_uniform, type, name);194var->data.binding = binding;195196/* Don't set var->data.location. The old structure location could be197* used to index into gl_uniform_storage, assuming the full structure198* was walked in order. With the new split variables, this invariant199* no longer holds and there's no meaningful way to start from a base200* location and access a particular array element. Just leave it 0.201*/202203_mesa_hash_table_insert_pre_hashed(state->remap_table, hash, name, var);204}205206/* construct a new deref based on lowered var (skipping the struct deref's207* from the original deref:208*/209nir_deref_instr *new_deref = nir_build_deref_var(b, var);210for (nir_deref_instr **p = &path.path[1]; *p; p++) {211if ((*p)->deref_type == nir_deref_type_struct)212continue;213214assert((*p)->deref_type == nir_deref_type_array);215216new_deref = nir_build_deref_array(b, new_deref,217nir_ssa_for_src(b, (*p)->arr.index, 1));218}219220return new_deref;221}222223static void224record_textures_used(struct shader_info *info,225nir_deref_instr *deref,226nir_texop op)227{228nir_variable *var = nir_deref_instr_get_variable(deref);229230/* Structs have been lowered already, so get_aoa_size is sufficient. */231const unsigned size =232glsl_type_is_array(var->type) ? glsl_get_aoa_size(var->type) : 1;233234BITSET_SET_RANGE(info->textures_used, var->data.binding, var->data.binding + (MAX2(size, 1) - 1));235236if (op == nir_texop_txf ||237op == nir_texop_txf_ms ||238op == nir_texop_txf_ms_mcs)239BITSET_SET_RANGE(info->textures_used_by_txf, var->data.binding, var->data.binding + (MAX2(size, 1) - 1));240}241242static bool243lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,244nir_builder *b)245{246int texture_idx =247nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);248int sampler_idx =249nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);250251b->cursor = nir_before_instr(&instr->instr);252253if (texture_idx >= 0) {254assert(instr->src[texture_idx].src.is_ssa);255256nir_deref_instr *texture_deref =257lower_deref(b, state, nir_src_as_deref(instr->src[texture_idx].src));258/* only lower non-bindless: */259if (texture_deref) {260nir_instr_rewrite_src(&instr->instr, &instr->src[texture_idx].src,261nir_src_for_ssa(&texture_deref->dest.ssa));262record_textures_used(&b->shader->info, texture_deref, instr->op);263}264}265266if (sampler_idx >= 0) {267assert(instr->src[sampler_idx].src.is_ssa);268nir_deref_instr *sampler_deref =269lower_deref(b, state, nir_src_as_deref(instr->src[sampler_idx].src));270/* only lower non-bindless: */271if (sampler_deref) {272nir_instr_rewrite_src(&instr->instr, &instr->src[sampler_idx].src,273nir_src_for_ssa(&sampler_deref->dest.ssa));274}275}276277return true;278}279280static bool281lower_intrinsic(nir_intrinsic_instr *instr,282struct lower_samplers_as_deref_state *state,283nir_builder *b)284{285if (instr->intrinsic == nir_intrinsic_image_deref_load ||286instr->intrinsic == nir_intrinsic_image_deref_store ||287instr->intrinsic == nir_intrinsic_image_deref_atomic_add ||288instr->intrinsic == nir_intrinsic_image_deref_atomic_imin ||289instr->intrinsic == nir_intrinsic_image_deref_atomic_umin ||290instr->intrinsic == nir_intrinsic_image_deref_atomic_imax ||291instr->intrinsic == nir_intrinsic_image_deref_atomic_umax ||292instr->intrinsic == nir_intrinsic_image_deref_atomic_and ||293instr->intrinsic == nir_intrinsic_image_deref_atomic_or ||294instr->intrinsic == nir_intrinsic_image_deref_atomic_xor ||295instr->intrinsic == nir_intrinsic_image_deref_atomic_exchange ||296instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap ||297instr->intrinsic == nir_intrinsic_image_deref_atomic_fadd ||298instr->intrinsic == nir_intrinsic_image_deref_size) {299300b->cursor = nir_before_instr(&instr->instr);301nir_deref_instr *deref =302lower_deref(b, state, nir_src_as_deref(instr->src[0]));303304record_images_used(&state->shader->info, instr);305306/* don't lower bindless: */307if (!deref)308return false;309nir_instr_rewrite_src(&instr->instr, &instr->src[0],310nir_src_for_ssa(&deref->dest.ssa));311return true;312}313314return false;315}316317static bool318lower_impl(nir_function_impl *impl, struct lower_samplers_as_deref_state *state)319{320nir_builder b;321nir_builder_init(&b, impl);322bool progress = false;323324nir_foreach_block(block, impl) {325nir_foreach_instr(instr, block) {326if (instr->type == nir_instr_type_tex)327progress |= lower_sampler(nir_instr_as_tex(instr), state, &b);328else if (instr->type == nir_instr_type_intrinsic)329progress |= lower_intrinsic(nir_instr_as_intrinsic(instr), state, &b);330}331}332333if (progress) {334nir_metadata_preserve(impl, nir_metadata_block_index |335nir_metadata_dominance);336} else {337nir_metadata_preserve(impl, nir_metadata_all);338}339340return progress;341}342343bool344gl_nir_lower_samplers_as_deref(nir_shader *shader,345const struct gl_shader_program *shader_program)346{347bool progress = false;348struct lower_samplers_as_deref_state state;349350state.shader = shader;351state.shader_program = shader_program;352state.remap_table = _mesa_hash_table_create(NULL, _mesa_hash_string,353_mesa_key_string_equal);354355nir_foreach_function(function, shader) {356if (function->impl)357progress |= lower_impl(function->impl, &state);358}359360/* keys are freed automatically by ralloc */361_mesa_hash_table_destroy(state.remap_table, NULL);362363if (progress)364nir_remove_dead_derefs(shader);365366return progress;367}368369370