Path: blob/21.2-virgl/src/intel/vulkan/anv_nir_lower_ubo_loads.c
4547 views
/*1* Copyright © 2020 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*/2223#include "anv_nir.h"24#include "nir_builder.h"2526static bool27lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)28{29if (instr->type != nir_instr_type_intrinsic)30return false;3132nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);33if (load->intrinsic != nir_intrinsic_load_global_constant_offset &&34load->intrinsic != nir_intrinsic_load_global_constant_bounded)35return false;3637b->cursor = nir_before_instr(instr);3839nir_ssa_def *base_addr = load->src[0].ssa;40nir_ssa_def *bound = NULL;41if (load->intrinsic == nir_intrinsic_load_global_constant_bounded)42bound = load->src[2].ssa;4344unsigned bit_size = load->dest.ssa.bit_size;45assert(bit_size >= 8 && bit_size % 8 == 0);46unsigned byte_size = bit_size / 8;4748nir_ssa_def *val;49if (nir_src_is_const(load->src[1])) {50uint32_t offset = nir_src_as_uint(load->src[1]);5152/* Things should be component-aligned. */53assert(offset % byte_size == 0);5455assert(ANV_UBO_ALIGNMENT == 64);5657unsigned suboffset = offset % 64;58uint64_t aligned_offset = offset - suboffset;5960/* Load two just in case we go over a 64B boundary */61nir_ssa_def *data[2];62for (unsigned i = 0; i < 2; i++) {63nir_ssa_def *pred;64if (bound) {65pred = nir_ilt(b, nir_imm_int(b, aligned_offset + i * 64 + 63),66bound);67} else {68pred = nir_imm_true(b);69}7071nir_ssa_def *addr = nir_iadd_imm(b, base_addr,72aligned_offset + i * 64);7374data[i] = nir_load_global_const_block_intel(b, 16, addr, pred);75}7677val = nir_extract_bits(b, data, 2, suboffset * 8,78load->num_components, bit_size);79} else {80nir_ssa_def *offset = load->src[1].ssa;81nir_ssa_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset));8283if (bound) {84nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);8586unsigned load_size = byte_size * load->num_components;87nir_ssa_def *in_bounds =88nir_ilt(b, nir_iadd_imm(b, offset, load_size - 1), bound);8990nir_push_if(b, in_bounds);9192nir_ssa_def *load_val =93nir_build_load_global_constant(b, load->dest.ssa.num_components,94load->dest.ssa.bit_size, addr,95.access = nir_intrinsic_access(load),96.align_mul = nir_intrinsic_align_mul(load),97.align_offset = nir_intrinsic_align_offset(load));9899nir_pop_if(b, NULL);100101val = nir_if_phi(b, load_val, zero);102} else {103val = nir_build_load_global_constant(b, load->dest.ssa.num_components,104load->dest.ssa.bit_size, addr,105.access = nir_intrinsic_access(load),106.align_mul = nir_intrinsic_align_mul(load),107.align_offset = nir_intrinsic_align_offset(load));108}109}110111nir_ssa_def_rewrite_uses(&load->dest.ssa, val);112nir_instr_remove(&load->instr);113114return true;115}116117bool118anv_nir_lower_ubo_loads(nir_shader *shader)119{120return nir_shader_instructions_pass(shader, lower_ubo_load_instr,121nir_metadata_block_index |122nir_metadata_dominance,123NULL);124}125126127