Path: blob/21.2-virgl/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c
4565 views
/*1* Copyright © 2019 Google, Inc.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,19* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE20* SOFTWARE.21*/2223#include "compiler/nir/nir.h"24#include "compiler/nir/nir_builder.h"25#include "util/u_math.h"26#include "ir3_compiler.h"27#include "ir3_nir.h"2829static inline bool30get_ubo_load_range(nir_shader *nir, nir_intrinsic_instr *instr,31uint32_t alignment, struct ir3_ubo_range *r)32{33uint32_t offset = nir_intrinsic_range_base(instr);34uint32_t size = nir_intrinsic_range(instr);3536/* If the offset is constant, the range is trivial (and NIR may not have37* figured it out).38*/39if (nir_src_is_const(instr->src[1])) {40offset = nir_src_as_uint(instr->src[1]);41size = nir_intrinsic_dest_components(instr) * 4;42}4344/* If we haven't figured out the range accessed in the UBO, bail. */45if (size == ~0)46return false;4748r->start = ROUND_DOWN_TO(offset, alignment * 16);49r->end = ALIGN(offset + size, alignment * 16);5051return true;52}5354static bool55get_ubo_info(nir_intrinsic_instr *instr, struct ir3_ubo_info *ubo)56{57if (nir_src_is_const(instr->src[0])) {58ubo->block = nir_src_as_uint(instr->src[0]);59ubo->bindless_base = 0;60ubo->bindless = false;61return true;62} else {63nir_intrinsic_instr *rsrc = ir3_bindless_resource(instr->src[0]);64if (rsrc && nir_src_is_const(rsrc->src[0])) {65ubo->block = nir_src_as_uint(rsrc->src[0]);66ubo->bindless_base = nir_intrinsic_desc_set(rsrc);67ubo->bindless = true;68return true;69}70}71return false;72}7374/**75* Finds the given instruction's UBO load in the UBO upload plan, if any.76*/77static const struct ir3_ubo_range *78get_existing_range(nir_intrinsic_instr *instr,79const struct ir3_ubo_analysis_state *state,80struct ir3_ubo_range *r)81{82struct ir3_ubo_info ubo = {};8384if (!get_ubo_info(instr, &ubo))85return NULL;8687for (int i = 0; i < state->num_enabled; i++) {88const struct ir3_ubo_range *range = &state->range[i];89if (!memcmp(&range->ubo, &ubo, sizeof(ubo)) && r->start >= range->start &&90r->end <= range->end) {91return range;92}93}9495return NULL;96}9798/**99* Merges together neighboring/overlapping ranges in the range plan with a100* newly updated range.101*/102static void103merge_neighbors(struct ir3_ubo_analysis_state *state, int index)104{105struct ir3_ubo_range *a = &state->range[index];106107/* index is always the first slot that would have neighbored/overlapped with108* the new range.109*/110for (int i = index + 1; i < state->num_enabled; i++) {111struct ir3_ubo_range *b = &state->range[i];112if (memcmp(&a->ubo, &b->ubo, sizeof(a->ubo)))113continue;114115if (a->start > b->end || a->end < b->start)116continue;117118/* Merge B into A. */119a->start = MIN2(a->start, b->start);120a->end = MAX2(a->end, b->end);121122/* Swap the last enabled range into B's now unused slot */123*b = state->range[--state->num_enabled];124}125}126127/**128* During the first pass over the shader, makes the plan of which UBO upload129* should include the range covering this UBO load.130*131* We are passed in an upload_remaining of how much space is left for us in132* the const file, and we make sure our plan doesn't exceed that.133*/134static void135gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,136struct ir3_ubo_analysis_state *state, uint32_t alignment,137uint32_t *upload_remaining)138{139if (ir3_shader_debug & IR3_DBG_NOUBOOPT)140return;141142struct ir3_ubo_info ubo = {};143if (!get_ubo_info(instr, &ubo))144return;145146struct ir3_ubo_range r;147if (!get_ubo_load_range(nir, instr, alignment, &r))148return;149150/* See if there's an existing range for this UBO we want to merge into. */151for (int i = 0; i < state->num_enabled; i++) {152struct ir3_ubo_range *plan_r = &state->range[i];153if (memcmp(&plan_r->ubo, &ubo, sizeof(ubo)))154continue;155156/* Don't extend existing uploads unless they're157* neighboring/overlapping.158*/159if (r.start > plan_r->end || r.end < plan_r->start)160continue;161162r.start = MIN2(r.start, plan_r->start);163r.end = MAX2(r.end, plan_r->end);164165uint32_t added = (plan_r->start - r.start) + (r.end - plan_r->end);166if (added >= *upload_remaining)167return;168169plan_r->start = r.start;170plan_r->end = r.end;171*upload_remaining -= added;172173merge_neighbors(state, i);174return;175}176177if (state->num_enabled == ARRAY_SIZE(state->range))178return;179180uint32_t added = r.end - r.start;181if (added >= *upload_remaining)182return;183184struct ir3_ubo_range *plan_r = &state->range[state->num_enabled++];185plan_r->ubo = ubo;186plan_r->start = r.start;187plan_r->end = r.end;188*upload_remaining -= added;189}190191/* For indirect offset, it is common to see a pattern of multiple192* loads with the same base, but different constant offset, ie:193*194* vec1 32 ssa_33 = iadd ssa_base, const_offset195* vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)196*197* Detect this, and peel out the const_offset part, to end up with:198*199* vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset,200* 0, 0)201*202* Or similarly:203*204* vec1 32 ssa_33 = imad24_ir3 a, b, const_offset205* vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)206*207* Can be converted to:208*209* vec1 32 ssa_base = imul24 a, b210* vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset,211* 0, 0)212*213* This gives the other opt passes something much easier to work214* with (ie. not requiring value range tracking)215*/216static void217handle_partial_const(nir_builder *b, nir_ssa_def **srcp, int *offp)218{219if ((*srcp)->parent_instr->type != nir_instr_type_alu)220return;221222nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);223224if (alu->op == nir_op_imad24_ir3) {225/* This case is slightly more complicated as we need to226* replace the imad24_ir3 with an imul24:227*/228if (!nir_src_is_const(alu->src[2].src))229return;230231*offp += nir_src_as_uint(alu->src[2].src);232*srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),233nir_ssa_for_alu_src(b, alu, 1));234235return;236}237238if (alu->op != nir_op_iadd)239return;240241if (!(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa))242return;243244if (nir_src_is_const(alu->src[0].src)) {245*offp += nir_src_as_uint(alu->src[0].src);246*srcp = alu->src[1].src.ssa;247} else if (nir_src_is_const(alu->src[1].src)) {248*srcp = alu->src[0].src.ssa;249*offp += nir_src_as_uint(alu->src[1].src);250}251}252253/* Tracks the maximum bindful UBO accessed so that we reduce the UBO254* descriptors emitted in the fast path for GL.255*/256static void257track_ubo_use(nir_intrinsic_instr *instr, nir_builder *b, int *num_ubos)258{259if (ir3_bindless_resource(instr->src[0])) {260assert(!b->shader->info.first_ubo_is_default_ubo); /* only set for GL */261return;262}263264if (nir_src_is_const(instr->src[0])) {265int block = nir_src_as_uint(instr->src[0]);266*num_ubos = MAX2(*num_ubos, block + 1);267} else {268*num_ubos = b->shader->info.num_ubos;269}270}271272static bool273lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,274const struct ir3_ubo_analysis_state *state,275int *num_ubos, uint32_t alignment)276{277b->cursor = nir_before_instr(&instr->instr);278279struct ir3_ubo_range r;280if (!get_ubo_load_range(b->shader, instr, alignment, &r)) {281track_ubo_use(instr, b, num_ubos);282return false;283}284285/* We don't lower dynamic block index UBO loads to load_uniform, but we286* could probably with some effort determine a block stride in number of287* registers.288*/289const struct ir3_ubo_range *range = get_existing_range(instr, state, &r);290if (!range) {291track_ubo_use(instr, b, num_ubos);292return false;293}294295nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);296int const_offset = 0;297298handle_partial_const(b, &ubo_offset, &const_offset);299300/* UBO offset is in bytes, but uniform offset is in units of301* dwords, so we need to divide by 4 (right-shift by 2). For ldc the302* offset is in units of 16 bytes, so we need to multiply by 4. And303* also the same for the constant part of the offset:304*/305const int shift = -2;306nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);307nir_ssa_def *uniform_offset = NULL;308if (new_offset) {309uniform_offset = new_offset;310} else {311uniform_offset = shift > 0312? nir_ishl(b, ubo_offset, nir_imm_int(b, shift))313: nir_ushr(b, ubo_offset, nir_imm_int(b, -shift));314}315316debug_assert(!(const_offset & 0x3));317const_offset >>= 2;318319const int range_offset = ((int)range->offset - (int)range->start) / 4;320const_offset += range_offset;321322/* The range_offset could be negative, if if only part of the UBO323* block is accessed, range->start can be greater than range->offset.324* But we can't underflow const_offset. If necessary we need to325* insert nir instructions to compensate (which can hopefully be326* optimized away)327*/328if (const_offset < 0) {329uniform_offset = nir_iadd_imm(b, uniform_offset, const_offset);330const_offset = 0;331}332333nir_ssa_def *uniform =334nir_load_uniform(b, instr->num_components, instr->dest.ssa.bit_size,335uniform_offset, .base = const_offset);336337nir_ssa_def_rewrite_uses(&instr->dest.ssa, uniform);338339nir_instr_remove(&instr->instr);340341return true;342}343344static bool345instr_is_load_ubo(nir_instr *instr)346{347if (instr->type != nir_instr_type_intrinsic)348return false;349350nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;351352/* nir_lower_ubo_vec4 happens after this pass. */353assert(op != nir_intrinsic_load_ubo_vec4);354355return op == nir_intrinsic_load_ubo;356}357358void359ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)360{361struct ir3_const_state *const_state = ir3_const_state(v);362struct ir3_ubo_analysis_state *state = &const_state->ubo_state;363struct ir3_compiler *compiler = v->shader->compiler;364365/* Limit our uploads to the amount of constant buffer space available in366* the hardware, minus what the shader compiler may need for various367* driver params. We do this UBO-to-push-constant before the real368* allocation of the driver params' const space, because UBO pointers can369* be driver params but this pass usually eliminatings them.370*/371struct ir3_const_state worst_case_const_state = {};372ir3_setup_const_state(nir, v, &worst_case_const_state);373const uint32_t max_upload =374(ir3_max_const(v) - worst_case_const_state.offsets.immediate) * 16;375376memset(state, 0, sizeof(*state));377378uint32_t upload_remaining = max_upload;379nir_foreach_function (function, nir) {380if (function->impl) {381nir_foreach_block (block, function->impl) {382nir_foreach_instr (instr, block) {383if (instr_is_load_ubo(instr))384gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), state,385compiler->const_upload_unit,386&upload_remaining);387}388}389}390}391392/* For now, everything we upload is accessed statically and thus will be393* used by the shader. Once we can upload dynamically indexed data, we may394* upload sparsely accessed arrays, at which point we probably want to395* give priority to smaller UBOs, on the assumption that big UBOs will be396* accessed dynamically. Alternatively, we can track statically and397* dynamically accessed ranges separately and upload static rangtes398* first.399*/400401uint32_t offset = v->shader->num_reserved_user_consts * 16;402for (uint32_t i = 0; i < state->num_enabled; i++) {403uint32_t range_size = state->range[i].end - state->range[i].start;404405debug_assert(offset <= max_upload);406state->range[i].offset = offset;407assert(offset <= max_upload);408offset += range_size;409}410state->size = offset;411}412413bool414ir3_nir_lower_ubo_loads(nir_shader *nir, struct ir3_shader_variant *v)415{416struct ir3_compiler *compiler = v->shader->compiler;417/* For the binning pass variant, we re-use the corresponding draw-pass418* variants const_state and ubo state. To make these clear, in this419* pass it is const (read-only)420*/421const struct ir3_const_state *const_state = ir3_const_state(v);422const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;423424int num_ubos = 0;425bool progress = false;426nir_foreach_function (function, nir) {427if (function->impl) {428nir_builder builder;429nir_builder_init(&builder, function->impl);430nir_foreach_block (block, function->impl) {431nir_foreach_instr_safe (instr, block) {432if (!instr_is_load_ubo(instr))433continue;434progress |= lower_ubo_load_to_uniform(435nir_instr_as_intrinsic(instr), &builder, state, &num_ubos,436compiler->const_upload_unit);437}438}439440nir_metadata_preserve(441function->impl, nir_metadata_block_index | nir_metadata_dominance);442}443}444/* Update the num_ubos field for GL (first_ubo_is_default_ubo). With445* Vulkan's bindless, we don't use the num_ubos field, so we can leave it446* incremented.447*/448if (nir->info.first_ubo_is_default_ubo)449nir->info.num_ubos = num_ubos;450451return progress;452}453454static bool455fixup_load_uniform_filter(const nir_instr *instr, const void *arg)456{457if (instr->type != nir_instr_type_intrinsic)458return false;459return nir_instr_as_intrinsic(instr)->intrinsic ==460nir_intrinsic_load_uniform;461}462463static nir_ssa_def *464fixup_load_uniform_instr(struct nir_builder *b, nir_instr *instr, void *arg)465{466nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);467468/* We don't need to worry about non-indirect case: */469if (nir_src_is_const(intr->src[0]))470return NULL;471472const unsigned base_offset_limit = (1 << 9); /* 9 bits */473unsigned base_offset = nir_intrinsic_base(intr);474475/* Or cases were base offset is lower than the hw limit: */476if (base_offset < base_offset_limit)477return NULL;478479b->cursor = nir_before_instr(instr);480481nir_ssa_def *offset = nir_ssa_for_src(b, intr->src[0], 1);482483/* We'd like to avoid a sequence like:484*485* vec4 32 ssa_18 = intrinsic load_uniform (ssa_4) (1024, 0, 0)486* vec4 32 ssa_19 = intrinsic load_uniform (ssa_4) (1072, 0, 0)487* vec4 32 ssa_20 = intrinsic load_uniform (ssa_4) (1120, 0, 0)488*489* From turning into a unique offset value (which requires reloading490* a0.x for each instruction). So instead of just adding the constant491* base_offset to the non-const offset, be a bit more clever and only492* extract the part that cannot be encoded. Afterwards CSE should493* turn the result into:494*495* vec1 32 ssa_5 = load_const (1024)496* vec4 32 ssa_6 = iadd ssa4_, ssa_5497* vec4 32 ssa_18 = intrinsic load_uniform (ssa_5) (0, 0, 0)498* vec4 32 ssa_19 = intrinsic load_uniform (ssa_5) (48, 0, 0)499* vec4 32 ssa_20 = intrinsic load_uniform (ssa_5) (96, 0, 0)500*/501unsigned new_base_offset = base_offset % base_offset_limit;502503nir_intrinsic_set_base(intr, new_base_offset);504offset = nir_iadd_imm(b, offset, base_offset - new_base_offset);505506nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(offset));507508return NIR_LOWER_INSTR_PROGRESS;509}510511/**512* For relative CONST file access, we can only encode 10b worth of fixed offset,513* so in cases where the base offset is larger, we need to peel it out into514* ALU instructions.515*516* This should run late, after constant folding has had a chance to do it's517* thing, so we can actually know if it is an indirect uniform offset or not.518*/519bool520ir3_nir_fixup_load_uniform(nir_shader *nir)521{522return nir_shader_lower_instructions(nir, fixup_load_uniform_filter,523fixup_load_uniform_instr, NULL);524}525static nir_ssa_def *526ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)527{528struct ir3_const_state *const_state = data;529nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in_instr);530531/* Pick a UBO index to use as our constant data. Skip UBO 0 since that's532* reserved for gallium's cb0.533*/534if (const_state->constant_data_ubo == -1) {535if (b->shader->info.num_ubos == 0)536b->shader->info.num_ubos++;537const_state->constant_data_ubo = b->shader->info.num_ubos++;538}539540unsigned num_components = instr->num_components;541if (nir_dest_bit_size(instr->dest) == 16) {542/* We can't do 16b loads -- either from LDC (32-bit only in any of our543* traces, and disasm that doesn't look like it really supports it) or544* from the constant file (where CONSTANT_DEMOTION_ENABLE means we get545* automatic 32b-to-16b conversions when we ask for 16b from it).546* Instead, we'll load 32b from a UBO and unpack from there.547*/548num_components = DIV_ROUND_UP(num_components, 2);549}550unsigned base = nir_intrinsic_base(instr);551nir_ssa_def *index = nir_imm_int(b, const_state->constant_data_ubo);552nir_ssa_def *offset =553nir_iadd_imm(b, nir_ssa_for_src(b, instr->src[0], 1), base);554555nir_ssa_def *result =556nir_load_ubo(b, num_components, 32, index, offset,557.align_mul = nir_intrinsic_align_mul(instr),558.align_offset = nir_intrinsic_align_offset(instr),559.range_base = base, .range = nir_intrinsic_range(instr));560561if (nir_dest_bit_size(instr->dest) == 16) {562result = nir_bitcast_vector(b, result, 16);563result = nir_channels(b, result, BITSET_MASK(instr->num_components));564}565566return result;567}568569static bool570ir3_lower_load_const_filter(const nir_instr *instr, const void *data)571{572return (instr->type == nir_instr_type_intrinsic &&573nir_instr_as_intrinsic(instr)->intrinsic ==574nir_intrinsic_load_constant);575}576577/* Lowers load_constant intrinsics to UBO accesses so we can run them through578* the general "upload to const file or leave as UBO access" code.579*/580bool581ir3_nir_lower_load_constant(nir_shader *nir, struct ir3_shader_variant *v)582{583struct ir3_const_state *const_state = ir3_const_state(v);584585const_state->constant_data_ubo = -1;586587bool progress = nir_shader_lower_instructions(588nir, ir3_lower_load_const_filter, ir3_nir_lower_load_const_instr,589const_state);590591if (progress) {592struct ir3_compiler *compiler = v->shader->compiler;593594/* Save a copy of the NIR constant data to the variant for595* inclusion in the final assembly.596*/597v->constant_data_size =598align(nir->constant_data_size,599compiler->const_upload_unit * 4 * sizeof(uint32_t));600v->constant_data = rzalloc_size(v, v->constant_data_size);601memcpy(v->constant_data, nir->constant_data, nir->constant_data_size);602}603604return progress;605}606607608