Path: blob/21.2-virgl/src/broadcom/compiler/vir.c
4564 views
/*1* Copyright © 2016-2017 Broadcom2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*/2223#include "broadcom/common/v3d_device_info.h"24#include "v3d_compiler.h"25#include "util/u_prim.h"26#include "compiler/nir/nir_schedule.h"27#include "compiler/nir/nir_builder.h"2829int30vir_get_nsrc(struct qinst *inst)31{32switch (inst->qpu.type) {33case V3D_QPU_INSTR_TYPE_BRANCH:34return 0;35case V3D_QPU_INSTR_TYPE_ALU:36if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)37return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);38else39return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);40}4142return 0;43}4445/**46* Returns whether the instruction has any side effects that must be47* preserved.48*/49bool50vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)51{52switch (inst->qpu.type) {53case V3D_QPU_INSTR_TYPE_BRANCH:54return true;55case V3D_QPU_INSTR_TYPE_ALU:56switch (inst->qpu.alu.add.op) {57case V3D_QPU_A_SETREVF:58case V3D_QPU_A_SETMSF:59case V3D_QPU_A_VPMSETUP:60case V3D_QPU_A_STVPMV:61case V3D_QPU_A_STVPMD:62case V3D_QPU_A_STVPMP:63case V3D_QPU_A_VPMWT:64case V3D_QPU_A_TMUWT:65return true;66default:67break;68}6970switch (inst->qpu.alu.mul.op) {71case V3D_QPU_M_MULTOP:72return true;73default:74break;75}76}7778if (inst->qpu.sig.ldtmu ||79inst->qpu.sig.ldvary ||80inst->qpu.sig.ldtlbu ||81inst->qpu.sig.ldtlb ||82inst->qpu.sig.wrtmuc ||83inst->qpu.sig.thrsw) {84return true;85}8687/* ldunifa works like ldunif: it reads an element and advances the88* pointer, so each read has a side effect (we don't care for ldunif89* because we reconstruct the uniform stream buffer after compiling90* with the surviving uniforms), so allowing DCE to remove91* one would break follow-up loads. We could fix this by emiting a92* unifa for each ldunifa, but each unifa requires 3 delay slots93* before a ldunifa, so that would be quite expensive.94*/95if (inst->qpu.sig.ldunifa || inst->qpu.sig.ldunifarf)96return true;9798return false;99}100101bool102vir_is_raw_mov(struct qinst *inst)103{104if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||105(inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&106inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {107return false;108}109110if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||111inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {112return false;113}114115if (inst->qpu.alu.add.a_unpack != V3D_QPU_UNPACK_NONE ||116inst->qpu.alu.add.b_unpack != V3D_QPU_UNPACK_NONE ||117inst->qpu.alu.mul.a_unpack != V3D_QPU_UNPACK_NONE ||118inst->qpu.alu.mul.b_unpack != V3D_QPU_UNPACK_NONE) {119return false;120}121122if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||123inst->qpu.flags.mc != V3D_QPU_COND_NONE)124return false;125126return true;127}128129bool130vir_is_add(struct qinst *inst)131{132return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&133inst->qpu.alu.add.op != V3D_QPU_A_NOP);134}135136bool137vir_is_mul(struct qinst *inst)138{139return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&140inst->qpu.alu.mul.op != V3D_QPU_M_NOP);141}142143bool144vir_is_tex(const struct v3d_device_info *devinfo, struct qinst *inst)145{146if (inst->dst.file == QFILE_MAGIC)147return v3d_qpu_magic_waddr_is_tmu(devinfo, inst->dst.index);148149if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&150inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {151return true;152}153154return false;155}156157bool158vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)159{160for (int i = 0; i < vir_get_nsrc(inst); i++) {161switch (inst->src[i].file) {162case QFILE_VPM:163return true;164default:165break;166}167}168169if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||170inst->qpu.sig.ldtlb ||171inst->qpu.sig.ldtlbu ||172inst->qpu.sig.ldvpm)) {173return true;174}175176return false;177}178179bool180vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)181{182switch (inst->dst.file) {183case QFILE_MAGIC:184switch (inst->dst.index) {185case V3D_QPU_WADDR_RECIP:186case V3D_QPU_WADDR_RSQRT:187case V3D_QPU_WADDR_EXP:188case V3D_QPU_WADDR_LOG:189case V3D_QPU_WADDR_SIN:190return true;191}192break;193default:194break;195}196197if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)198return true;199200return false;201}202203void204vir_set_unpack(struct qinst *inst, int src,205enum v3d_qpu_input_unpack unpack)206{207assert(src == 0 || src == 1);208209if (vir_is_add(inst)) {210if (src == 0)211inst->qpu.alu.add.a_unpack = unpack;212else213inst->qpu.alu.add.b_unpack = unpack;214} else {215assert(vir_is_mul(inst));216if (src == 0)217inst->qpu.alu.mul.a_unpack = unpack;218else219inst->qpu.alu.mul.b_unpack = unpack;220}221}222223void224vir_set_pack(struct qinst *inst, enum v3d_qpu_output_pack pack)225{226if (vir_is_add(inst)) {227inst->qpu.alu.add.output_pack = pack;228} else {229assert(vir_is_mul(inst));230inst->qpu.alu.mul.output_pack = pack;231}232}233234void235vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)236{237if (vir_is_add(inst)) {238inst->qpu.flags.ac = cond;239} else {240assert(vir_is_mul(inst));241inst->qpu.flags.mc = cond;242}243}244245void246vir_set_pf(struct v3d_compile *c, struct qinst *inst, enum v3d_qpu_pf pf)247{248c->flags_temp = -1;249if (vir_is_add(inst)) {250inst->qpu.flags.apf = pf;251} else {252assert(vir_is_mul(inst));253inst->qpu.flags.mpf = pf;254}255}256257void258vir_set_uf(struct v3d_compile *c, struct qinst *inst, enum v3d_qpu_uf uf)259{260c->flags_temp = -1;261if (vir_is_add(inst)) {262inst->qpu.flags.auf = uf;263} else {264assert(vir_is_mul(inst));265inst->qpu.flags.muf = uf;266}267}268269#if 0270uint8_t271vir_channels_written(struct qinst *inst)272{273if (vir_is_mul(inst)) {274switch (inst->dst.pack) {275case QPU_PACK_MUL_NOP:276case QPU_PACK_MUL_8888:277return 0xf;278case QPU_PACK_MUL_8A:279return 0x1;280case QPU_PACK_MUL_8B:281return 0x2;282case QPU_PACK_MUL_8C:283return 0x4;284case QPU_PACK_MUL_8D:285return 0x8;286}287} else {288switch (inst->dst.pack) {289case QPU_PACK_A_NOP:290case QPU_PACK_A_8888:291case QPU_PACK_A_8888_SAT:292case QPU_PACK_A_32_SAT:293return 0xf;294case QPU_PACK_A_8A:295case QPU_PACK_A_8A_SAT:296return 0x1;297case QPU_PACK_A_8B:298case QPU_PACK_A_8B_SAT:299return 0x2;300case QPU_PACK_A_8C:301case QPU_PACK_A_8C_SAT:302return 0x4;303case QPU_PACK_A_8D:304case QPU_PACK_A_8D_SAT:305return 0x8;306case QPU_PACK_A_16A:307case QPU_PACK_A_16A_SAT:308return 0x3;309case QPU_PACK_A_16B:310case QPU_PACK_A_16B_SAT:311return 0xc;312}313}314unreachable("Bad pack field");315}316#endif317318struct qreg319vir_get_temp(struct v3d_compile *c)320{321struct qreg reg;322323reg.file = QFILE_TEMP;324reg.index = c->num_temps++;325326if (c->num_temps > c->defs_array_size) {327uint32_t old_size = c->defs_array_size;328c->defs_array_size = MAX2(old_size * 2, 16);329330c->defs = reralloc(c, c->defs, struct qinst *,331c->defs_array_size);332memset(&c->defs[old_size], 0,333sizeof(c->defs[0]) * (c->defs_array_size - old_size));334335c->spillable = reralloc(c, c->spillable,336BITSET_WORD,337BITSET_WORDS(c->defs_array_size));338for (int i = old_size; i < c->defs_array_size; i++)339BITSET_SET(c->spillable, i);340}341342return reg;343}344345struct qinst *346vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)347{348struct qinst *inst = calloc(1, sizeof(*inst));349350inst->qpu = v3d_qpu_nop();351inst->qpu.alu.add.op = op;352353inst->dst = dst;354inst->src[0] = src0;355inst->src[1] = src1;356inst->uniform = ~0;357358return inst;359}360361struct qinst *362vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)363{364struct qinst *inst = calloc(1, sizeof(*inst));365366inst->qpu = v3d_qpu_nop();367inst->qpu.alu.mul.op = op;368369inst->dst = dst;370inst->src[0] = src0;371inst->src[1] = src1;372inst->uniform = ~0;373374return inst;375}376377struct qinst *378vir_branch_inst(struct v3d_compile *c, enum v3d_qpu_branch_cond cond)379{380struct qinst *inst = calloc(1, sizeof(*inst));381382inst->qpu = v3d_qpu_nop();383inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;384inst->qpu.branch.cond = cond;385inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;386inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;387inst->qpu.branch.ub = true;388inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;389390inst->dst = vir_nop_reg();391inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, 0);392393return inst;394}395396static void397vir_emit(struct v3d_compile *c, struct qinst *inst)398{399switch (c->cursor.mode) {400case vir_cursor_add:401list_add(&inst->link, c->cursor.link);402break;403case vir_cursor_addtail:404list_addtail(&inst->link, c->cursor.link);405break;406}407408c->cursor = vir_after_inst(inst);409c->live_intervals_valid = false;410}411412/* Updates inst to write to a new temporary, emits it, and notes the def. */413struct qreg414vir_emit_def(struct v3d_compile *c, struct qinst *inst)415{416assert(inst->dst.file == QFILE_NULL);417418/* If we're emitting an instruction that's a def, it had better be419* writing a register.420*/421if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {422assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP ||423v3d_qpu_add_op_has_dst(inst->qpu.alu.add.op));424assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP ||425v3d_qpu_mul_op_has_dst(inst->qpu.alu.mul.op));426}427428inst->dst = vir_get_temp(c);429430if (inst->dst.file == QFILE_TEMP)431c->defs[inst->dst.index] = inst;432433vir_emit(c, inst);434435return inst->dst;436}437438struct qinst *439vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)440{441if (inst->dst.file == QFILE_TEMP)442c->defs[inst->dst.index] = NULL;443444vir_emit(c, inst);445446return inst;447}448449struct qblock *450vir_new_block(struct v3d_compile *c)451{452struct qblock *block = rzalloc(c, struct qblock);453454list_inithead(&block->instructions);455456block->predecessors = _mesa_set_create(block,457_mesa_hash_pointer,458_mesa_key_pointer_equal);459460block->index = c->next_block_index++;461462return block;463}464465void466vir_set_emit_block(struct v3d_compile *c, struct qblock *block)467{468c->cur_block = block;469c->cursor = vir_after_block(block);470list_addtail(&block->link, &c->blocks);471}472473struct qblock *474vir_entry_block(struct v3d_compile *c)475{476return list_first_entry(&c->blocks, struct qblock, link);477}478479struct qblock *480vir_exit_block(struct v3d_compile *c)481{482return list_last_entry(&c->blocks, struct qblock, link);483}484485void486vir_link_blocks(struct qblock *predecessor, struct qblock *successor)487{488_mesa_set_add(successor->predecessors, predecessor);489if (predecessor->successors[0]) {490assert(!predecessor->successors[1]);491predecessor->successors[1] = successor;492} else {493predecessor->successors[0] = successor;494}495}496497const struct v3d_compiler *498v3d_compiler_init(const struct v3d_device_info *devinfo)499{500struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);501if (!compiler)502return NULL;503504compiler->devinfo = devinfo;505506if (!vir_init_reg_sets(compiler)) {507ralloc_free(compiler);508return NULL;509}510511return compiler;512}513514void515v3d_compiler_free(const struct v3d_compiler *compiler)516{517ralloc_free((void *)compiler);518}519520static struct v3d_compile *521vir_compile_init(const struct v3d_compiler *compiler,522struct v3d_key *key,523nir_shader *s,524void (*debug_output)(const char *msg,525void *debug_output_data),526void *debug_output_data,527int program_id, int variant_id,528uint32_t max_threads,529uint32_t min_threads_for_reg_alloc,530bool tmu_spilling_allowed,531bool disable_loop_unrolling,532bool disable_constant_ubo_load_sorting,533bool disable_tmu_pipelining,534bool fallback_scheduler)535{536struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);537538c->compiler = compiler;539c->devinfo = compiler->devinfo;540c->key = key;541c->program_id = program_id;542c->variant_id = variant_id;543c->threads = max_threads;544c->debug_output = debug_output;545c->debug_output_data = debug_output_data;546c->compilation_result = V3D_COMPILATION_SUCCEEDED;547c->min_threads_for_reg_alloc = min_threads_for_reg_alloc;548c->tmu_spilling_allowed = tmu_spilling_allowed;549c->fallback_scheduler = fallback_scheduler;550c->disable_tmu_pipelining = disable_tmu_pipelining;551c->disable_constant_ubo_load_sorting = disable_constant_ubo_load_sorting;552c->disable_loop_unrolling = disable_loop_unrolling;553554s = nir_shader_clone(c, s);555c->s = s;556557list_inithead(&c->blocks);558vir_set_emit_block(c, vir_new_block(c));559560c->output_position_index = -1;561c->output_sample_mask_index = -1;562563c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,564_mesa_key_pointer_equal);565566c->tmu.outstanding_regs = _mesa_pointer_set_create(c);567c->flags_temp = -1;568569return c;570}571572static int573type_size_vec4(const struct glsl_type *type, bool bindless)574{575return glsl_count_attribute_slots(type, false);576}577578static void579v3d_lower_nir(struct v3d_compile *c)580{581struct nir_lower_tex_options tex_options = {582.lower_txd = true,583.lower_tg4_broadcom_swizzle = true,584585.lower_rect = false, /* XXX: Use this on V3D 3.x */586.lower_txp = ~0,587/* Apply swizzles to all samplers. */588.swizzle_result = ~0,589};590591/* Lower the format swizzle and (for 32-bit returns)592* ARB_texture_swizzle-style swizzle.593*/594assert(c->key->num_tex_used <= ARRAY_SIZE(c->key->tex));595for (int i = 0; i < c->key->num_tex_used; i++) {596for (int j = 0; j < 4; j++)597tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];598}599600assert(c->key->num_samplers_used <= ARRAY_SIZE(c->key->sampler));601for (int i = 0; i < c->key->num_samplers_used; i++) {602if (c->key->sampler[i].return_size == 16) {603tex_options.lower_tex_packing[i] =604nir_lower_tex_packing_16;605}606}607608/* CS textures may not have return_size reflecting the shadow state. */609nir_foreach_uniform_variable(var, c->s) {610const struct glsl_type *type = glsl_without_array(var->type);611unsigned array_len = MAX2(glsl_get_length(var->type), 1);612613if (!glsl_type_is_sampler(type) ||614!glsl_sampler_type_is_shadow(type))615continue;616617for (int i = 0; i < array_len; i++) {618tex_options.lower_tex_packing[var->data.binding + i] =619nir_lower_tex_packing_16;620}621}622623NIR_PASS_V(c->s, nir_lower_tex, &tex_options);624NIR_PASS_V(c->s, nir_lower_system_values);625NIR_PASS_V(c->s, nir_lower_compute_system_values, NULL);626627NIR_PASS_V(c->s, nir_lower_vars_to_scratch,628nir_var_function_temp,6290,630glsl_get_natural_size_align_bytes);631NIR_PASS_V(c->s, v3d_nir_lower_scratch);632}633634static void635v3d_set_prog_data_uniforms(struct v3d_compile *c,636struct v3d_prog_data *prog_data)637{638int count = c->num_uniforms;639struct v3d_uniform_list *ulist = &prog_data->uniforms;640641ulist->count = count;642ulist->data = ralloc_array(prog_data, uint32_t, count);643memcpy(ulist->data, c->uniform_data,644count * sizeof(*ulist->data));645ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);646memcpy(ulist->contents, c->uniform_contents,647count * sizeof(*ulist->contents));648}649650static void651v3d_vs_set_prog_data(struct v3d_compile *c,652struct v3d_vs_prog_data *prog_data)653{654/* The vertex data gets format converted by the VPM so that655* each attribute channel takes up a VPM column. Precompute656* the sizes for the shader record.657*/658for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {659prog_data->vattr_sizes[i] = c->vattr_sizes[i];660prog_data->vpm_input_size += c->vattr_sizes[i];661}662663memset(prog_data->driver_location_map, -1,664sizeof(prog_data->driver_location_map));665666nir_foreach_shader_in_variable(var, c->s) {667prog_data->driver_location_map[var->data.location] =668var->data.driver_location;669}670671prog_data->uses_vid = BITSET_TEST(c->s->info.system_values_read,672SYSTEM_VALUE_VERTEX_ID) ||673BITSET_TEST(c->s->info.system_values_read,674SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);675676prog_data->uses_biid = BITSET_TEST(c->s->info.system_values_read,677SYSTEM_VALUE_BASE_INSTANCE);678679prog_data->uses_iid = BITSET_TEST(c->s->info.system_values_read,680SYSTEM_VALUE_INSTANCE_ID) ||681BITSET_TEST(c->s->info.system_values_read,682SYSTEM_VALUE_INSTANCE_INDEX);683684if (prog_data->uses_vid)685prog_data->vpm_input_size++;686if (prog_data->uses_biid)687prog_data->vpm_input_size++;688if (prog_data->uses_iid)689prog_data->vpm_input_size++;690691/* Input/output segment size are in sectors (8 rows of 32 bits per692* channel).693*/694prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;695prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;696697/* Set us up for shared input/output segments. This is apparently698* necessary for our VCM setup to avoid varying corruption.699*/700prog_data->separate_segments = false;701prog_data->vpm_output_size = MAX2(prog_data->vpm_output_size,702prog_data->vpm_input_size);703prog_data->vpm_input_size = 0;704705/* Compute VCM cache size. We set up our program to take up less than706* half of the VPM, so that any set of bin and render programs won't707* run out of space. We need space for at least one input segment,708* and then allocate the rest to output segments (one for the current709* program, the rest to VCM). The valid range of the VCM cache size710* field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4711* batches.712*/713assert(c->devinfo->vpm_size);714int sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;715int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size;716int half_vpm = vpm_size_in_sectors / 2;717int vpm_output_sectors = half_vpm - prog_data->vpm_input_size;718int vpm_output_batches = vpm_output_sectors / prog_data->vpm_output_size;719assert(vpm_output_batches >= 2);720prog_data->vcm_cache_size = CLAMP(vpm_output_batches - 1, 2, 4);721}722723static void724v3d_gs_set_prog_data(struct v3d_compile *c,725struct v3d_gs_prog_data *prog_data)726{727prog_data->num_inputs = c->num_inputs;728memcpy(prog_data->input_slots, c->input_slots,729c->num_inputs * sizeof(*c->input_slots));730731/* gl_PrimitiveIdIn is written by the GBG into the first word of the732* VPM output header automatically and the shader will overwrite733* it after reading it if necessary, so it doesn't add to the VPM734* size requirements.735*/736prog_data->uses_pid = BITSET_TEST(c->s->info.system_values_read,737SYSTEM_VALUE_PRIMITIVE_ID);738739/* Output segment size is in sectors (8 rows of 32 bits per channel) */740prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;741742/* Compute SIMD dispatch width and update VPM output size accordingly743* to ensure we can fit our program in memory. Available widths are744* 16, 8, 4, 1.745*746* Notice that at draw time we will have to consider VPM memory747* requirements from other stages and choose a smaller dispatch748* width if needed to fit the program in VPM memory.749*/750prog_data->simd_width = 16;751while ((prog_data->simd_width > 1 && prog_data->vpm_output_size > 16) ||752prog_data->simd_width == 2) {753prog_data->simd_width >>= 1;754prog_data->vpm_output_size =755align(prog_data->vpm_output_size, 2) / 2;756}757assert(prog_data->vpm_output_size <= 16);758assert(prog_data->simd_width != 2);759760prog_data->out_prim_type = c->s->info.gs.output_primitive;761prog_data->num_invocations = c->s->info.gs.invocations;762763prog_data->writes_psiz =764c->s->info.outputs_written & (1 << VARYING_SLOT_PSIZ);765}766767static void768v3d_set_fs_prog_data_inputs(struct v3d_compile *c,769struct v3d_fs_prog_data *prog_data)770{771prog_data->num_inputs = c->num_inputs;772memcpy(prog_data->input_slots, c->input_slots,773c->num_inputs * sizeof(*c->input_slots));774775STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >776(V3D_MAX_FS_INPUTS - 1) / 24);777for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {778if (BITSET_TEST(c->flat_shade_flags, i))779prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);780781if (BITSET_TEST(c->noperspective_flags, i))782prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);783784if (BITSET_TEST(c->centroid_flags, i))785prog_data->centroid_flags[i / 24] |= 1 << (i % 24);786}787}788789static void790v3d_fs_set_prog_data(struct v3d_compile *c,791struct v3d_fs_prog_data *prog_data)792{793v3d_set_fs_prog_data_inputs(c, prog_data);794prog_data->writes_z = c->writes_z;795prog_data->disable_ez = !c->s->info.fs.early_fragment_tests;796prog_data->uses_center_w = c->uses_center_w;797prog_data->uses_implicit_point_line_varyings =798c->uses_implicit_point_line_varyings;799prog_data->lock_scoreboard_on_first_thrsw =800c->lock_scoreboard_on_first_thrsw;801prog_data->force_per_sample_msaa = c->force_per_sample_msaa;802prog_data->uses_pid = c->fs_uses_primitive_id;803}804805static void806v3d_cs_set_prog_data(struct v3d_compile *c,807struct v3d_compute_prog_data *prog_data)808{809prog_data->shared_size = c->s->info.shared_size;810811prog_data->local_size[0] = c->s->info.workgroup_size[0];812prog_data->local_size[1] = c->s->info.workgroup_size[1];813prog_data->local_size[2] = c->s->info.workgroup_size[2];814815prog_data->has_subgroups = c->has_subgroups;816}817818static void819v3d_set_prog_data(struct v3d_compile *c,820struct v3d_prog_data *prog_data)821{822prog_data->threads = c->threads;823prog_data->single_seg = !c->last_thrsw;824prog_data->spill_size = c->spill_size;825prog_data->tmu_dirty_rcl = c->tmu_dirty_rcl;826prog_data->has_control_barrier = c->s->info.uses_control_barrier;827828v3d_set_prog_data_uniforms(c, prog_data);829830switch (c->s->info.stage) {831case MESA_SHADER_VERTEX:832v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data);833break;834case MESA_SHADER_GEOMETRY:835v3d_gs_set_prog_data(c, (struct v3d_gs_prog_data *)prog_data);836break;837case MESA_SHADER_FRAGMENT:838v3d_fs_set_prog_data(c, (struct v3d_fs_prog_data *)prog_data);839break;840case MESA_SHADER_COMPUTE:841v3d_cs_set_prog_data(c, (struct v3d_compute_prog_data *)prog_data);842break;843default:844unreachable("unsupported shader stage");845}846}847848static uint64_t *849v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)850{851*final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);852853uint64_t *qpu_insts = malloc(*final_assembly_size);854if (!qpu_insts)855return NULL;856857memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);858859vir_compile_destroy(c);860861return qpu_insts;862}863864static void865v3d_nir_lower_vs_early(struct v3d_compile *c)866{867/* Split our I/O vars and dead code eliminate the unused868* components.869*/870NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,871nir_var_shader_in | nir_var_shader_out);872uint64_t used_outputs[4] = {0};873for (int i = 0; i < c->vs_key->num_used_outputs; i++) {874int slot = v3d_slot_get_slot(c->vs_key->used_outputs[i]);875int comp = v3d_slot_get_component(c->vs_key->used_outputs[i]);876used_outputs[comp] |= 1ull << slot;877}878NIR_PASS_V(c->s, nir_remove_unused_io_vars,879nir_var_shader_out, used_outputs, NULL); /* demotes to globals */880NIR_PASS_V(c->s, nir_lower_global_vars_to_local);881v3d_optimize_nir(c, c->s);882NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);883884/* This must go before nir_lower_io */885if (c->vs_key->per_vertex_point_size)886NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);887888NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,889type_size_vec4,890(nir_lower_io_options)0);891/* clean up nir_lower_io's deref_var remains and do a constant folding pass892* on the code it generated.893*/894NIR_PASS_V(c->s, nir_opt_dce);895NIR_PASS_V(c->s, nir_opt_constant_folding);896}897898static void899v3d_nir_lower_gs_early(struct v3d_compile *c)900{901/* Split our I/O vars and dead code eliminate the unused902* components.903*/904NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,905nir_var_shader_in | nir_var_shader_out);906uint64_t used_outputs[4] = {0};907for (int i = 0; i < c->gs_key->num_used_outputs; i++) {908int slot = v3d_slot_get_slot(c->gs_key->used_outputs[i]);909int comp = v3d_slot_get_component(c->gs_key->used_outputs[i]);910used_outputs[comp] |= 1ull << slot;911}912NIR_PASS_V(c->s, nir_remove_unused_io_vars,913nir_var_shader_out, used_outputs, NULL); /* demotes to globals */914NIR_PASS_V(c->s, nir_lower_global_vars_to_local);915v3d_optimize_nir(c, c->s);916NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);917918/* This must go before nir_lower_io */919if (c->gs_key->per_vertex_point_size)920NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);921922NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,923type_size_vec4,924(nir_lower_io_options)0);925/* clean up nir_lower_io's deref_var remains */926NIR_PASS_V(c->s, nir_opt_dce);927}928929static void930v3d_fixup_fs_output_types(struct v3d_compile *c)931{932nir_foreach_shader_out_variable(var, c->s) {933uint32_t mask = 0;934935switch (var->data.location) {936case FRAG_RESULT_COLOR:937mask = ~0;938break;939case FRAG_RESULT_DATA0:940case FRAG_RESULT_DATA1:941case FRAG_RESULT_DATA2:942case FRAG_RESULT_DATA3:943mask = 1 << (var->data.location - FRAG_RESULT_DATA0);944break;945}946947if (c->fs_key->int_color_rb & mask) {948var->type =949glsl_vector_type(GLSL_TYPE_INT,950glsl_get_components(var->type));951} else if (c->fs_key->uint_color_rb & mask) {952var->type =953glsl_vector_type(GLSL_TYPE_UINT,954glsl_get_components(var->type));955}956}957}958959static void960v3d_nir_lower_fs_early(struct v3d_compile *c)961{962if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)963v3d_fixup_fs_output_types(c);964965NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c);966967if (c->fs_key->line_smoothing) {968v3d_nir_lower_line_smooth(c->s);969NIR_PASS_V(c->s, nir_lower_global_vars_to_local);970/* The lowering pass can introduce new sysval reads */971nir_shader_gather_info(c->s, nir_shader_get_entrypoint(c->s));972}973974/* If the shader has no non-TLB side effects, we can promote it to975* enabling early_fragment_tests even if the user didn't.976*/977if (!(c->s->info.num_images ||978c->s->info.num_ssbos)) {979c->s->info.fs.early_fragment_tests = true;980}981}982983static void984v3d_nir_lower_gs_late(struct v3d_compile *c)985{986if (c->key->ucp_enables) {987NIR_PASS_V(c->s, nir_lower_clip_gs, c->key->ucp_enables,988false, NULL);989}990991/* Note: GS output scalarizing must happen after nir_lower_clip_gs. */992NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);993}994995static void996v3d_nir_lower_vs_late(struct v3d_compile *c)997{998if (c->key->ucp_enables) {999NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables,1000false, false, NULL);1001NIR_PASS_V(c->s, nir_lower_io_to_scalar,1002nir_var_shader_out);1003}10041005/* Note: VS output scalarizing must happen after nir_lower_clip_vs. */1006NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);1007}10081009static void1010v3d_nir_lower_fs_late(struct v3d_compile *c)1011{1012/* In OpenGL the fragment shader can't read gl_ClipDistance[], but1013* Vulkan allows it, in which case the SPIR-V compiler will declare1014* VARING_SLOT_CLIP_DIST0 as compact array variable. Pass true as1015* the last parameter to always operate with a compact array in both1016* OpenGL and Vulkan so we do't have to care about the API we1017* are using.1018*/1019if (c->key->ucp_enables)1020NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables, true);10211022NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);1023}10241025static uint32_t1026vir_get_max_temps(struct v3d_compile *c)1027{1028int max_ip = 0;1029vir_for_each_inst_inorder(inst, c)1030max_ip++;10311032uint32_t *pressure = rzalloc_array(NULL, uint32_t, max_ip);10331034for (int t = 0; t < c->num_temps; t++) {1035for (int i = c->temp_start[t]; (i < c->temp_end[t] &&1036i < max_ip); i++) {1037if (i > max_ip)1038break;1039pressure[i]++;1040}1041}10421043uint32_t max_temps = 0;1044for (int i = 0; i < max_ip; i++)1045max_temps = MAX2(max_temps, pressure[i]);10461047ralloc_free(pressure);10481049return max_temps;1050}10511052enum v3d_dependency_class {1053V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_01054};10551056static bool1057v3d_intrinsic_dependency_cb(nir_intrinsic_instr *intr,1058nir_schedule_dependency *dep,1059void *user_data)1060{1061struct v3d_compile *c = user_data;10621063switch (intr->intrinsic) {1064case nir_intrinsic_store_output:1065/* Writing to location 0 overwrites the value passed in for1066* gl_PrimitiveID on geometry shaders1067*/1068if (c->s->info.stage != MESA_SHADER_GEOMETRY ||1069nir_intrinsic_base(intr) != 0)1070break;10711072nir_const_value *const_value =1073nir_src_as_const_value(intr->src[1]);10741075if (const_value == NULL)1076break;10771078uint64_t offset =1079nir_const_value_as_uint(*const_value,1080nir_src_bit_size(intr->src[1]));1081if (offset != 0)1082break;10831084dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0;1085dep->type = NIR_SCHEDULE_WRITE_DEPENDENCY;1086return true;10871088case nir_intrinsic_load_primitive_id:1089if (c->s->info.stage != MESA_SHADER_GEOMETRY)1090break;10911092dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0;1093dep->type = NIR_SCHEDULE_READ_DEPENDENCY;1094return true;10951096default:1097break;1098}10991100return false;1101}11021103static bool1104should_split_wrmask(const nir_instr *instr, const void *data)1105{1106nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);1107switch (intr->intrinsic) {1108case nir_intrinsic_store_ssbo:1109case nir_intrinsic_store_shared:1110case nir_intrinsic_store_global:1111case nir_intrinsic_store_scratch:1112return true;1113default:1114return false;1115}1116}11171118static nir_intrinsic_instr *1119nir_instr_as_constant_ubo_load(nir_instr *inst)1120{1121if (inst->type != nir_instr_type_intrinsic)1122return NULL;11231124nir_intrinsic_instr *intr = nir_instr_as_intrinsic(inst);1125if (intr->intrinsic != nir_intrinsic_load_ubo)1126return NULL;11271128assert(nir_src_is_const(intr->src[0]));1129if (!nir_src_is_const(intr->src[1]))1130return NULL;11311132return intr;1133}11341135static bool1136v3d_nir_sort_constant_ubo_load(nir_block *block, nir_intrinsic_instr *ref)1137{1138bool progress = false;11391140nir_instr *ref_inst = &ref->instr;1141uint32_t ref_offset = nir_src_as_uint(ref->src[1]);1142uint32_t ref_index = nir_src_as_uint(ref->src[0]);11431144/* Go through all instructions after ref searching for constant UBO1145* loads for the same UBO index.1146*/1147bool seq_break = false;1148nir_instr *inst = &ref->instr;1149nir_instr *next_inst = NULL;1150while (true) {1151inst = next_inst ? next_inst : nir_instr_next(inst);1152if (!inst)1153break;11541155next_inst = NULL;11561157if (inst->type != nir_instr_type_intrinsic)1158continue;11591160nir_intrinsic_instr *intr = nir_instr_as_intrinsic(inst);1161if (intr->intrinsic != nir_intrinsic_load_ubo)1162continue;11631164/* We only produce unifa sequences for non-divergent loads */1165if (nir_src_is_divergent(intr->src[1]))1166continue;11671168/* If there are any UBO loads that are not constant or that1169* use a different UBO index in between the reference load and1170* any other constant load for the same index, they would break1171* the unifa sequence. We will flag that so we can then move1172* all constant UBO loads for the reference index before these1173* and not just the ones that are not ordered to avoid breaking1174* the sequence and reduce unifa writes.1175*/1176if (!nir_src_is_const(intr->src[1])) {1177seq_break = true;1178continue;1179}1180uint32_t offset = nir_src_as_uint(intr->src[1]);11811182assert(nir_src_is_const(intr->src[0]));1183uint32_t index = nir_src_as_uint(intr->src[0]);1184if (index != ref_index) {1185seq_break = true;1186continue;1187}11881189/* Only move loads with an offset that is close enough to the1190* reference offset, since otherwise we would not be able to1191* skip the unifa write for them. See ntq_emit_load_ubo_unifa.1192*/1193if (abs(ref_offset - offset) > MAX_UNIFA_SKIP_DISTANCE)1194continue;11951196/* We will move this load if its offset is smaller than ref's1197* (in which case we will move it before ref) or if the offset1198* is larger than ref's but there are sequence breakers in1199* in between (in which case we will move it after ref and1200* before the sequence breakers).1201*/1202if (!seq_break && offset >= ref_offset)1203continue;12041205/* Find where exactly we want to move this load:1206*1207* If we are moving it before ref, we want to check any other1208* UBO loads we placed before ref and make sure we insert this1209* one properly ordered with them. Likewise, if we are moving1210* it after ref.1211*/1212nir_instr *pos = ref_inst;1213nir_instr *tmp = pos;1214do {1215if (offset < ref_offset)1216tmp = nir_instr_prev(tmp);1217else1218tmp = nir_instr_next(tmp);12191220if (!tmp || tmp == inst)1221break;12221223/* Ignore non-unifa UBO loads */1224if (tmp->type != nir_instr_type_intrinsic)1225continue;12261227nir_intrinsic_instr *tmp_intr =1228nir_instr_as_intrinsic(tmp);1229if (tmp_intr->intrinsic != nir_intrinsic_load_ubo)1230continue;12311232if (nir_src_is_divergent(tmp_intr->src[1]))1233continue;12341235/* Stop if we find a unifa UBO load that breaks the1236* sequence.1237*/1238if (!nir_src_is_const(tmp_intr->src[1]))1239break;12401241if (nir_src_as_uint(tmp_intr->src[0]) != index)1242break;12431244uint32_t tmp_offset = nir_src_as_uint(tmp_intr->src[1]);1245if (offset < ref_offset) {1246if (tmp_offset < offset ||1247tmp_offset >= ref_offset) {1248break;1249} else {1250pos = tmp;1251}1252} else {1253if (tmp_offset > offset ||1254tmp_offset <= ref_offset) {1255break;1256} else {1257pos = tmp;1258}1259}1260} while (true);12611262/* We can't move the UBO load before the instruction that1263* defines its constant offset. If that instruction is placed1264* in between the new location (pos) and the current location1265* of this load, we will have to move that instruction too.1266*1267* We don't care about the UBO index definition because that1268* is optimized to be reused by all UBO loads for the same1269* index and therefore is certain to be defined before the1270* first UBO load that uses it.1271*/1272nir_instr *offset_inst = NULL;1273tmp = inst;1274while ((tmp = nir_instr_prev(tmp)) != NULL) {1275if (pos == tmp) {1276/* We reached the target location without1277* finding the instruction that defines the1278* offset, so that instruction must be before1279* the new position and we don't have to fix it.1280*/1281break;1282}1283if (intr->src[1].ssa->parent_instr == tmp) {1284offset_inst = tmp;1285break;1286}1287}12881289if (offset_inst) {1290exec_node_remove(&offset_inst->node);1291exec_node_insert_node_before(&pos->node,1292&offset_inst->node);1293}12941295/* Since we are moving the instruction before its current1296* location, grab its successor before the move so that1297* we can continue the next iteration of the main loop from1298* that instruction.1299*/1300next_inst = nir_instr_next(inst);13011302/* Move this load to the selected location */1303exec_node_remove(&inst->node);1304if (offset < ref_offset)1305exec_node_insert_node_before(&pos->node, &inst->node);1306else1307exec_node_insert_after(&pos->node, &inst->node);13081309progress = true;1310}13111312return progress;1313}13141315static bool1316v3d_nir_sort_constant_ubo_loads_block(struct v3d_compile *c,1317nir_block *block)1318{1319bool progress = false;1320bool local_progress;1321do {1322local_progress = false;1323nir_foreach_instr_safe(inst, block) {1324nir_intrinsic_instr *intr =1325nir_instr_as_constant_ubo_load(inst);1326if (intr) {1327local_progress |=1328v3d_nir_sort_constant_ubo_load(block, intr);1329}1330}1331progress |= local_progress;1332} while (local_progress);13331334return progress;1335}13361337/**1338* Sorts constant UBO loads in each block by offset to maximize chances of1339* skipping unifa writes when converting to VIR. This can increase register1340* pressure.1341*/1342static bool1343v3d_nir_sort_constant_ubo_loads(nir_shader *s, struct v3d_compile *c)1344{1345nir_foreach_function(function, s) {1346if (function->impl) {1347nir_foreach_block(block, function->impl) {1348c->sorted_any_ubo_loads |=1349v3d_nir_sort_constant_ubo_loads_block(c, block);1350}1351nir_metadata_preserve(function->impl,1352nir_metadata_block_index |1353nir_metadata_dominance);1354}1355}1356return c->sorted_any_ubo_loads;1357}13581359static void1360lower_load_num_subgroups(struct v3d_compile *c,1361nir_builder *b,1362nir_intrinsic_instr *intr)1363{1364assert(c->s->info.stage == MESA_SHADER_COMPUTE);1365assert(intr->intrinsic == nir_intrinsic_load_num_subgroups);13661367b->cursor = nir_after_instr(&intr->instr);1368uint32_t num_subgroups =1369DIV_ROUND_UP(c->s->info.workgroup_size[0] *1370c->s->info.workgroup_size[1] *1371c->s->info.workgroup_size[2], V3D_CHANNELS);1372nir_ssa_def *result = nir_imm_int(b, num_subgroups);1373nir_ssa_def_rewrite_uses(&intr->dest.ssa, result);1374nir_instr_remove(&intr->instr);1375}13761377static bool1378lower_subgroup_intrinsics(struct v3d_compile *c,1379nir_block *block, nir_builder *b)1380{1381bool progress = false;1382nir_foreach_instr_safe(inst, block) {1383if (inst->type != nir_instr_type_intrinsic)1384continue;;13851386nir_intrinsic_instr *intr =1387nir_instr_as_intrinsic(inst);1388if (!intr)1389continue;13901391switch (intr->intrinsic) {1392case nir_intrinsic_load_num_subgroups:1393lower_load_num_subgroups(c, b, intr);1394progress = true;1395FALLTHROUGH;1396case nir_intrinsic_load_subgroup_id:1397case nir_intrinsic_load_subgroup_size:1398case nir_intrinsic_load_subgroup_invocation:1399case nir_intrinsic_elect:1400c->has_subgroups = true;1401break;1402default:1403break;1404}1405}14061407return progress;1408}14091410static bool1411v3d_nir_lower_subgroup_intrinsics(nir_shader *s, struct v3d_compile *c)1412{1413bool progress = false;1414nir_foreach_function(function, s) {1415if (function->impl) {1416nir_builder b;1417nir_builder_init(&b, function->impl);14181419nir_foreach_block(block, function->impl)1420progress |= lower_subgroup_intrinsics(c, block, &b);14211422nir_metadata_preserve(function->impl,1423nir_metadata_block_index |1424nir_metadata_dominance);1425}1426}1427return progress;1428}14291430static void1431v3d_attempt_compile(struct v3d_compile *c)1432{1433switch (c->s->info.stage) {1434case MESA_SHADER_VERTEX:1435c->vs_key = (struct v3d_vs_key *) c->key;1436break;1437case MESA_SHADER_GEOMETRY:1438c->gs_key = (struct v3d_gs_key *) c->key;1439break;1440case MESA_SHADER_FRAGMENT:1441c->fs_key = (struct v3d_fs_key *) c->key;1442break;1443case MESA_SHADER_COMPUTE:1444break;1445default:1446unreachable("unsupported shader stage");1447}14481449switch (c->s->info.stage) {1450case MESA_SHADER_VERTEX:1451v3d_nir_lower_vs_early(c);1452break;1453case MESA_SHADER_GEOMETRY:1454v3d_nir_lower_gs_early(c);1455break;1456case MESA_SHADER_FRAGMENT:1457v3d_nir_lower_fs_early(c);1458break;1459default:1460break;1461}14621463v3d_lower_nir(c);14641465switch (c->s->info.stage) {1466case MESA_SHADER_VERTEX:1467v3d_nir_lower_vs_late(c);1468break;1469case MESA_SHADER_GEOMETRY:1470v3d_nir_lower_gs_late(c);1471break;1472case MESA_SHADER_FRAGMENT:1473v3d_nir_lower_fs_late(c);1474break;1475default:1476break;1477}14781479NIR_PASS_V(c->s, v3d_nir_lower_io, c);1480NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);1481NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);1482nir_lower_idiv_options idiv_options = {1483.imprecise_32bit_lowering = true,1484.allow_fp16 = true,1485};1486NIR_PASS_V(c->s, nir_lower_idiv, &idiv_options);14871488if (c->key->robust_buffer_access) {1489/* v3d_nir_lower_robust_buffer_access assumes constant buffer1490* indices on ubo/ssbo intrinsics so run copy propagation and1491* constant folding passes before we run the lowering to warrant1492* this. We also want to run the lowering before v3d_optimize to1493* clean-up redundant get_buffer_size calls produced in the pass.1494*/1495NIR_PASS_V(c->s, nir_copy_prop);1496NIR_PASS_V(c->s, nir_opt_constant_folding);1497NIR_PASS_V(c->s, v3d_nir_lower_robust_buffer_access, c);1498}14991500NIR_PASS_V(c->s, nir_lower_wrmasks, should_split_wrmask, c->s);15011502NIR_PASS_V(c->s, v3d_nir_lower_subgroup_intrinsics, c);15031504v3d_optimize_nir(c, c->s);15051506/* Do late algebraic optimization to turn add(a, neg(b)) back into1507* subs, then the mandatory cleanup after algebraic. Note that it may1508* produce fnegs, and if so then we need to keep running to squash1509* fneg(fneg(a)).1510*/1511bool more_late_algebraic = true;1512while (more_late_algebraic) {1513more_late_algebraic = false;1514NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);1515NIR_PASS_V(c->s, nir_opt_constant_folding);1516NIR_PASS_V(c->s, nir_copy_prop);1517NIR_PASS_V(c->s, nir_opt_dce);1518NIR_PASS_V(c->s, nir_opt_cse);1519}15201521NIR_PASS_V(c->s, nir_lower_bool_to_int32);1522nir_convert_to_lcssa(c->s, true, true);1523NIR_PASS_V(c->s, nir_divergence_analysis);1524NIR_PASS_V(c->s, nir_convert_from_ssa, true);15251526struct nir_schedule_options schedule_options = {1527/* Schedule for about half our register space, to enable more1528* shaders to hit 4 threads.1529*/1530.threshold = 24,15311532/* Vertex shaders share the same memory for inputs and outputs,1533* fragement and geometry shaders do not.1534*/1535.stages_with_shared_io_memory =1536(((1 << MESA_ALL_SHADER_STAGES) - 1) &1537~((1 << MESA_SHADER_FRAGMENT) |1538(1 << MESA_SHADER_GEOMETRY))),15391540.fallback = c->fallback_scheduler,15411542.intrinsic_cb = v3d_intrinsic_dependency_cb,1543.intrinsic_cb_data = c,1544};1545NIR_PASS_V(c->s, nir_schedule, &schedule_options);15461547if (!c->disable_constant_ubo_load_sorting)1548NIR_PASS_V(c->s, v3d_nir_sort_constant_ubo_loads, c);15491550v3d_nir_to_vir(c);1551}15521553uint32_t1554v3d_prog_data_size(gl_shader_stage stage)1555{1556static const int prog_data_size[] = {1557[MESA_SHADER_VERTEX] = sizeof(struct v3d_vs_prog_data),1558[MESA_SHADER_GEOMETRY] = sizeof(struct v3d_gs_prog_data),1559[MESA_SHADER_FRAGMENT] = sizeof(struct v3d_fs_prog_data),1560[MESA_SHADER_COMPUTE] = sizeof(struct v3d_compute_prog_data),1561};15621563assert(stage >= 0 &&1564stage < ARRAY_SIZE(prog_data_size) &&1565prog_data_size[stage]);15661567return prog_data_size[stage];1568}15691570int v3d_shaderdb_dump(struct v3d_compile *c,1571char **shaderdb_str)1572{1573if (c == NULL || c->compilation_result != V3D_COMPILATION_SUCCEEDED)1574return -1;15751576return asprintf(shaderdb_str,1577"%s shader: %d inst, %d threads, %d loops, "1578"%d uniforms, %d max-temps, %d:%d spills:fills, "1579"%d sfu-stalls, %d inst-and-stalls, %d nops",1580vir_get_stage_name(c),1581c->qpu_inst_count,1582c->threads,1583c->loops,1584c->num_uniforms,1585vir_get_max_temps(c),1586c->spills,1587c->fills,1588c->qpu_inst_stalled_count,1589c->qpu_inst_count + c->qpu_inst_stalled_count,1590c->nop_count);1591}15921593/* This is a list of incremental changes to the compilation strategy1594* that will be used to try to compile the shader successfully. The1595* default strategy is to enable all optimizations which will have1596* the highest register pressure but is expected to produce most1597* optimal code. Following strategies incrementally disable specific1598* optimizations that are known to contribute to register pressure1599* in order to be able to compile the shader successfully while meeting1600* thread count requirements.1601*1602* V3D 4.1+ has a min thread count of 2, but we can use 1 here to also1603* cover previous hardware as well (meaning that we are not limiting1604* register allocation to any particular thread count). This is fine1605* because v3d_nir_to_vir will cap this to the actual minimum.1606*/1607struct v3d_compiler_strategy {1608const char *name;1609uint32_t max_threads;1610uint32_t min_threads;1611bool disable_loop_unrolling;1612bool disable_ubo_load_sorting;1613bool disable_tmu_pipelining;1614bool tmu_spilling_allowed;1615} static const strategies[] = {1616/*0*/ { "default", 4, 4, false, false, false, false },1617/*1*/ { "disable loop unrolling", 4, 4, true, false, false, false },1618/*2*/ { "disable UBO load sorting", 4, 4, true, true, false, false },1619/*3*/ { "disable TMU pipelining", 4, 4, true, true, true, false },1620/*4*/ { "lower thread count", 2, 1, false, false, false, false },1621/*5*/ { "disable loop unrolling (ltc)", 2, 1, true, false, false, false },1622/*6*/ { "disable UBO load sorting (ltc)", 2, 1, true, true, false, false },1623/*7*/ { "disable TMU pipelining (ltc)", 2, 1, true, true, true, true },1624/*8*/ { "fallback scheduler", 2, 1, true, true, true, true }1625};16261627/**1628* If a particular optimization didn't make any progress during a compile1629* attempt disabling it alone won't allow us to compile the shader successfuly,1630* since we'll end up with the same code. Detect these scenarios so we can1631* avoid wasting time with useless compiles. We should also consider if the1632* strategy changes other aspects of the compilation process though, like1633* spilling, and not skip it in that case.1634*/1635static bool1636skip_compile_strategy(struct v3d_compile *c, uint32_t idx)1637{1638/* We decide if we can skip a strategy based on the optimizations that1639* were active in the previous strategy, so we should only be calling this1640* for strategies after the first.1641*/1642assert(idx > 0);16431644/* Don't skip a strategy that changes spilling behavior */1645if (strategies[idx].tmu_spilling_allowed !=1646strategies[idx - 1].tmu_spilling_allowed) {1647return false;1648}16491650switch (idx) {1651/* Loop unrolling: skip if we didn't unroll any loops */1652case 1:1653case 5:1654return !c->unrolled_any_loops;1655/* UBO load sorting: skip if we didn't sort any loads */1656case 2:1657case 6:1658return !c->sorted_any_ubo_loads;1659/* TMU pipelining: skip if we didn't pipeline any TMU ops */1660case 3:1661case 7:1662return !c->pipelined_any_tmu;1663/* Lower thread count: skip if we already tried less that 4 threads */1664case 4:1665return c->threads < 4;1666default:1667return false;1668};1669}1670uint64_t *v3d_compile(const struct v3d_compiler *compiler,1671struct v3d_key *key,1672struct v3d_prog_data **out_prog_data,1673nir_shader *s,1674void (*debug_output)(const char *msg,1675void *debug_output_data),1676void *debug_output_data,1677int program_id, int variant_id,1678uint32_t *final_assembly_size)1679{1680struct v3d_compile *c = NULL;1681for (int i = 0; i < ARRAY_SIZE(strategies); i++) {1682/* Fallback strategy */1683if (i > 0) {1684assert(c);1685if (skip_compile_strategy(c, i))1686continue;16871688char *debug_msg;1689int ret = asprintf(&debug_msg,1690"Falling back to strategy '%s' for %s",1691strategies[i].name,1692vir_get_stage_name(c));16931694if (ret >= 0) {1695if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF))1696fprintf(stderr, "%s\n", debug_msg);16971698c->debug_output(debug_msg, c->debug_output_data);1699free(debug_msg);1700}17011702vir_compile_destroy(c);1703}17041705c = vir_compile_init(compiler, key, s,1706debug_output, debug_output_data,1707program_id, variant_id,1708strategies[i].max_threads,1709strategies[i].min_threads,1710strategies[i].tmu_spilling_allowed,1711strategies[i].disable_loop_unrolling,1712strategies[i].disable_ubo_load_sorting,1713strategies[i].disable_tmu_pipelining,1714i == ARRAY_SIZE(strategies) - 1);17151716v3d_attempt_compile(c);17171718if (i >= ARRAY_SIZE(strategies) - 1 ||1719c->compilation_result !=1720V3D_COMPILATION_FAILED_REGISTER_ALLOCATION) {1721break;1722}1723}17241725if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF) &&1726c->compilation_result !=1727V3D_COMPILATION_FAILED_REGISTER_ALLOCATION &&1728c->spills > 0) {1729char *debug_msg;1730int ret = asprintf(&debug_msg,1731"Compiled %s with %d spills and %d fills",1732vir_get_stage_name(c),1733c->spills, c->fills);1734fprintf(stderr, "%s\n", debug_msg);17351736if (ret >= 0) {1737c->debug_output(debug_msg, c->debug_output_data);1738free(debug_msg);1739}1740}17411742if (c->compilation_result != V3D_COMPILATION_SUCCEEDED) {1743fprintf(stderr, "Failed to compile %s with any strategy.\n",1744vir_get_stage_name(c));1745}17461747struct v3d_prog_data *prog_data;17481749prog_data = rzalloc_size(NULL, v3d_prog_data_size(c->s->info.stage));17501751v3d_set_prog_data(c, prog_data);17521753*out_prog_data = prog_data;17541755char *shaderdb;1756int ret = v3d_shaderdb_dump(c, &shaderdb);1757if (ret >= 0) {1758if (V3D_DEBUG & V3D_DEBUG_SHADERDB)1759fprintf(stderr, "SHADER-DB: %s\n", shaderdb);17601761c->debug_output(shaderdb, c->debug_output_data);1762free(shaderdb);1763}17641765return v3d_return_qpu_insts(c, final_assembly_size);1766}17671768void1769vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)1770{1771if (qinst->dst.file == QFILE_TEMP)1772c->defs[qinst->dst.index] = NULL;17731774assert(&qinst->link != c->cursor.link);17751776list_del(&qinst->link);1777free(qinst);17781779c->live_intervals_valid = false;1780}17811782struct qreg1783vir_follow_movs(struct v3d_compile *c, struct qreg reg)1784{1785/* XXX1786int pack = reg.pack;17871788while (reg.file == QFILE_TEMP &&1789c->defs[reg.index] &&1790(c->defs[reg.index]->op == QOP_MOV ||1791c->defs[reg.index]->op == QOP_FMOV) &&1792!c->defs[reg.index]->dst.pack &&1793!c->defs[reg.index]->src[0].pack) {1794reg = c->defs[reg.index]->src[0];1795}17961797reg.pack = pack;1798*/1799return reg;1800}18011802void1803vir_compile_destroy(struct v3d_compile *c)1804{1805/* Defuse the assert that we aren't removing the cursor's instruction.1806*/1807c->cursor.link = NULL;18081809vir_for_each_block(block, c) {1810while (!list_is_empty(&block->instructions)) {1811struct qinst *qinst =1812list_first_entry(&block->instructions,1813struct qinst, link);1814vir_remove_instruction(c, qinst);1815}1816}18171818ralloc_free(c);1819}18201821uint32_t1822vir_get_uniform_index(struct v3d_compile *c,1823enum quniform_contents contents,1824uint32_t data)1825{1826for (int i = 0; i < c->num_uniforms; i++) {1827if (c->uniform_contents[i] == contents &&1828c->uniform_data[i] == data) {1829return i;1830}1831}18321833uint32_t uniform = c->num_uniforms++;18341835if (uniform >= c->uniform_array_size) {1836c->uniform_array_size = MAX2(MAX2(16, uniform + 1),1837c->uniform_array_size * 2);18381839c->uniform_data = reralloc(c, c->uniform_data,1840uint32_t,1841c->uniform_array_size);1842c->uniform_contents = reralloc(c, c->uniform_contents,1843enum quniform_contents,1844c->uniform_array_size);1845}18461847c->uniform_contents[uniform] = contents;1848c->uniform_data[uniform] = data;18491850return uniform;1851}18521853/* Looks back into the current block to find the ldunif that wrote the uniform1854* at the requested index. If it finds it, it returns true and writes the1855* destination register of the ldunif instruction to 'unif'.1856*1857* This can impact register pressure and end up leading to worse code, so we1858* limit the number of instructions we are willing to look back through to1859* strike a good balance.1860*/1861static bool1862try_opt_ldunif(struct v3d_compile *c, uint32_t index, struct qreg *unif)1863{1864uint32_t count = 20;1865struct qinst *prev_inst = NULL;1866list_for_each_entry_from_rev(struct qinst, inst, c->cursor.link->prev,1867&c->cur_block->instructions, link) {1868if ((inst->qpu.sig.ldunif || inst->qpu.sig.ldunifrf) &&1869inst->uniform == index) {1870prev_inst = inst;1871break;1872}18731874if (--count == 0)1875break;1876}18771878if (!prev_inst)1879return false;188018811882list_for_each_entry_from(struct qinst, inst, prev_inst->link.next,1883&c->cur_block->instructions, link) {1884if (inst->dst.file == prev_inst->dst.file &&1885inst->dst.index == prev_inst->dst.index) {1886return false;1887}1888}18891890*unif = prev_inst->dst;1891return true;1892}18931894struct qreg1895vir_uniform(struct v3d_compile *c,1896enum quniform_contents contents,1897uint32_t data)1898{1899const int num_uniforms = c->num_uniforms;1900const int index = vir_get_uniform_index(c, contents, data);19011902/* If this is not the first time we see this uniform try to reuse the1903* result of the last ldunif that loaded it.1904*/1905const bool is_new_uniform = num_uniforms != c->num_uniforms;1906if (!is_new_uniform && !c->disable_ldunif_opt) {1907struct qreg ldunif_dst;1908if (try_opt_ldunif(c, index, &ldunif_dst))1909return ldunif_dst;1910}19111912struct qinst *inst = vir_NOP(c);1913inst->qpu.sig.ldunif = true;1914inst->uniform = index;1915inst->dst = vir_get_temp(c);1916c->defs[inst->dst.index] = inst;1917return inst->dst;1918}19191920#define OPTPASS(func) \1921do { \1922bool stage_progress = func(c); \1923if (stage_progress) { \1924progress = true; \1925if (print_opt_debug) { \1926fprintf(stderr, \1927"VIR opt pass %2d: %s progress\n", \1928pass, #func); \1929} \1930/*XXX vir_validate(c);*/ \1931} \1932} while (0)19331934void1935vir_optimize(struct v3d_compile *c)1936{1937bool print_opt_debug = false;1938int pass = 1;19391940while (true) {1941bool progress = false;19421943OPTPASS(vir_opt_copy_propagate);1944OPTPASS(vir_opt_redundant_flags);1945OPTPASS(vir_opt_dead_code);1946OPTPASS(vir_opt_small_immediates);1947OPTPASS(vir_opt_constant_alu);19481949if (!progress)1950break;19511952pass++;1953}1954}19551956const char *1957vir_get_stage_name(struct v3d_compile *c)1958{1959if (c->vs_key && c->vs_key->is_coord)1960return "MESA_SHADER_VERTEX_BIN";1961else if (c->gs_key && c->gs_key->is_coord)1962return "MESA_SHADER_GEOMETRY_BIN";1963else1964return gl_shader_stage_name(c->s->info.stage);1965}19661967static inline uint32_t1968compute_vpm_size_in_sectors(const struct v3d_device_info *devinfo)1969{1970assert(devinfo->vpm_size > 0);1971const uint32_t sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;1972return devinfo->vpm_size / sector_size;1973}19741975/* Computes various parameters affecting VPM memory configuration for programs1976* involving geometry shaders to ensure the program fits in memory and honors1977* requirements described in section "VPM usage" of the programming manual.1978*/1979static bool1980compute_vpm_config_gs(struct v3d_device_info *devinfo,1981struct v3d_vs_prog_data *vs,1982struct v3d_gs_prog_data *gs,1983struct vpm_config *vpm_cfg_out)1984{1985const uint32_t A = vs->separate_segments ? 1 : 0;1986const uint32_t Ad = vs->vpm_input_size;1987const uint32_t Vd = vs->vpm_output_size;19881989const uint32_t vpm_size = compute_vpm_size_in_sectors(devinfo);19901991/* Try to fit program into our VPM memory budget by adjusting1992* configurable parameters iteratively. We do this in two phases:1993* the first phase tries to fit the program into the total available1994* VPM memory. If we succeed at that, then the second phase attempts1995* to fit the program into half of that budget so we can run bin and1996* render programs in parallel.1997*/1998struct vpm_config vpm_cfg[2];1999struct vpm_config *final_vpm_cfg = NULL;2000uint32_t phase = 0;20012002vpm_cfg[phase].As = 1;2003vpm_cfg[phase].Gs = 1;2004vpm_cfg[phase].Gd = gs->vpm_output_size;2005vpm_cfg[phase].gs_width = gs->simd_width;20062007/* While there is a requirement that Vc >= [Vn / 16], this is2008* always the case when tessellation is not present because in that2009* case Vn can only be 6 at most (when input primitive is triangles2010* with adjacency).2011*2012* We always choose Vc=2. We can't go lower than this due to GFXH-1744,2013* and Broadcom has not found it worth it to increase it beyond this2014* in general. Increasing Vc also increases VPM memory pressure which2015* can turn up being detrimental for performance in some scenarios.2016*/2017vpm_cfg[phase].Vc = 2;20182019/* Gv is a constraint on the hardware to not exceed the2020* specified number of vertex segments per GS batch. If adding a2021* new primitive to a GS batch would result in a range of more2022* than Gv vertex segments being referenced by the batch, then2023* the hardware will flush the batch and start a new one. This2024* means that we can choose any value we want, we just need to2025* be aware that larger values improve GS batch utilization2026* at the expense of more VPM memory pressure (which can affect2027* other performance aspects, such as GS dispatch width).2028* We start with the largest value, and will reduce it if we2029* find that total memory pressure is too high.2030*/2031vpm_cfg[phase].Gv = 3;2032do {2033/* When GS is present in absence of TES, then we need to satisfy2034* that Ve >= Gv. We go with the smallest value of Ve to avoid2035* increasing memory pressure.2036*/2037vpm_cfg[phase].Ve = vpm_cfg[phase].Gv;20382039uint32_t vpm_sectors =2040A * vpm_cfg[phase].As * Ad +2041(vpm_cfg[phase].Vc + vpm_cfg[phase].Ve) * Vd +2042vpm_cfg[phase].Gs * vpm_cfg[phase].Gd;20432044/* Ideally we want to use no more than half of the available2045* memory so we can execute a bin and render program in parallel2046* without stalls. If we achieved that then we are done.2047*/2048if (vpm_sectors <= vpm_size / 2) {2049final_vpm_cfg = &vpm_cfg[phase];2050break;2051}20522053/* At the very least, we should not allocate more than the2054* total available VPM memory. If we have a configuration that2055* succeeds at this we save it and continue to see if we can2056* meet the half-memory-use criteria too.2057*/2058if (phase == 0 && vpm_sectors <= vpm_size) {2059vpm_cfg[1] = vpm_cfg[0];2060phase = 1;2061}20622063/* Try lowering Gv */2064if (vpm_cfg[phase].Gv > 0) {2065vpm_cfg[phase].Gv--;2066continue;2067}20682069/* Try lowering GS dispatch width */2070if (vpm_cfg[phase].gs_width > 1) {2071do {2072vpm_cfg[phase].gs_width >>= 1;2073vpm_cfg[phase].Gd = align(vpm_cfg[phase].Gd, 2) / 2;2074} while (vpm_cfg[phase].gs_width == 2);20752076/* Reset Gv to max after dropping dispatch width */2077vpm_cfg[phase].Gv = 3;2078continue;2079}20802081/* We ran out of options to reduce memory pressure. If we2082* are at phase 1 we have at least a valid configuration, so we2083* we use that.2084*/2085if (phase == 1)2086final_vpm_cfg = &vpm_cfg[0];2087break;2088} while (true);20892090if (!final_vpm_cfg)2091return false;20922093assert(final_vpm_cfg);2094assert(final_vpm_cfg->Gd <= 16);2095assert(final_vpm_cfg->Gv < 4);2096assert(final_vpm_cfg->Ve < 4);2097assert(final_vpm_cfg->Vc >= 2 && final_vpm_cfg->Vc <= 4);2098assert(final_vpm_cfg->gs_width == 1 ||2099final_vpm_cfg->gs_width == 4 ||2100final_vpm_cfg->gs_width == 8 ||2101final_vpm_cfg->gs_width == 16);21022103*vpm_cfg_out = *final_vpm_cfg;2104return true;2105}21062107bool2108v3d_compute_vpm_config(struct v3d_device_info *devinfo,2109struct v3d_vs_prog_data *vs_bin,2110struct v3d_vs_prog_data *vs,2111struct v3d_gs_prog_data *gs_bin,2112struct v3d_gs_prog_data *gs,2113struct vpm_config *vpm_cfg_bin,2114struct vpm_config *vpm_cfg)2115{2116assert(vs && vs_bin);2117assert((gs != NULL) == (gs_bin != NULL));21182119if (!gs) {2120vpm_cfg_bin->As = 1;2121vpm_cfg_bin->Ve = 0;2122vpm_cfg_bin->Vc = vs_bin->vcm_cache_size;21232124vpm_cfg->As = 1;2125vpm_cfg->Ve = 0;2126vpm_cfg->Vc = vs->vcm_cache_size;2127} else {2128if (!compute_vpm_config_gs(devinfo, vs_bin, gs_bin, vpm_cfg_bin))2129return false;21302131if (!compute_vpm_config_gs(devinfo, vs, gs, vpm_cfg))2132return false;2133}21342135return true;2136}213721382139