Path: blob/21.2-virgl/src/gallium/drivers/etnaviv/etnaviv_compiler_nir_ra.c
4570 views
/*1* Copyright (c) 2019 Zodiac Inflight Innovations2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sub license,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the11* next paragraph) shall be included in all copies or substantial portions12* of the Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER20* DEALINGS IN THE SOFTWARE.21*22* Authors:23* Jonathan Marek <[email protected]>24*/2526#include "etnaviv_compiler_nir.h"27#include "util/register_allocate.h"2829/* use "r63.z" for depth reg, it will wrap around to r0.z by reg_get_base30* (fs registers are offset by 1 to avoid reserving r0)31*/32#define REG_FRAG_DEPTH ((ETNA_MAX_TEMPS - 1) * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_Z)3334/* precomputed by register_allocate */35static unsigned int *q_values[] = {36(unsigned int[]) {1, 2, 3, 4, 2, 2, 3, },37(unsigned int[]) {3, 5, 6, 6, 5, 5, 6, },38(unsigned int[]) {3, 4, 4, 4, 4, 4, 4, },39(unsigned int[]) {1, 1, 1, 1, 1, 1, 1, },40(unsigned int[]) {1, 2, 2, 2, 1, 2, 2, },41(unsigned int[]) {2, 3, 3, 3, 2, 3, 3, },42(unsigned int[]) {2, 2, 2, 2, 2, 2, 2, },43};4445static inline int reg_get_class(int virt_reg)46{47switch (reg_get_type(virt_reg)) {48case REG_TYPE_VEC4:49return REG_CLASS_VEC4;50case REG_TYPE_VIRT_VEC3_XYZ:51case REG_TYPE_VIRT_VEC3_XYW:52case REG_TYPE_VIRT_VEC3_XZW:53case REG_TYPE_VIRT_VEC3_YZW:54return REG_CLASS_VIRT_VEC3;55case REG_TYPE_VIRT_VEC2_XY:56case REG_TYPE_VIRT_VEC2_XZ:57case REG_TYPE_VIRT_VEC2_XW:58case REG_TYPE_VIRT_VEC2_YZ:59case REG_TYPE_VIRT_VEC2_YW:60case REG_TYPE_VIRT_VEC2_ZW:61return REG_CLASS_VIRT_VEC2;62case REG_TYPE_VIRT_SCALAR_X:63case REG_TYPE_VIRT_SCALAR_Y:64case REG_TYPE_VIRT_SCALAR_Z:65case REG_TYPE_VIRT_SCALAR_W:66return REG_CLASS_VIRT_SCALAR;67case REG_TYPE_VIRT_VEC2T_XY:68case REG_TYPE_VIRT_VEC2T_ZW:69return REG_CLASS_VIRT_VEC2T;70case REG_TYPE_VIRT_VEC2C_XY:71case REG_TYPE_VIRT_VEC2C_YZ:72case REG_TYPE_VIRT_VEC2C_ZW:73return REG_CLASS_VIRT_VEC2C;74case REG_TYPE_VIRT_VEC3C_XYZ:75case REG_TYPE_VIRT_VEC3C_YZW:76return REG_CLASS_VIRT_VEC3C;77}7879assert(false);80return 0;81}8283struct ra_regs *84etna_ra_setup(void *mem_ctx)85{86struct ra_regs *regs = ra_alloc_reg_set(mem_ctx, ETNA_MAX_TEMPS *87NUM_REG_TYPES, false);8889/* classes always be created from index 0, so equal to the class enum90* which represents a register with (c+1) components91*/92struct ra_class *classes[NUM_REG_CLASSES];93for (int c = 0; c < NUM_REG_CLASSES; c++)94classes[c] = ra_alloc_reg_class(regs);95/* add each register of each class */96for (int r = 0; r < NUM_REG_TYPES * ETNA_MAX_TEMPS; r++)97ra_class_add_reg(classes[reg_get_class(r)], r);98/* set conflicts */99for (int r = 0; r < ETNA_MAX_TEMPS; r++) {100for (int i = 0; i < NUM_REG_TYPES; i++) {101for (int j = 0; j < i; j++) {102if (reg_writemask[i] & reg_writemask[j]) {103ra_add_reg_conflict(regs, NUM_REG_TYPES * r + i,104NUM_REG_TYPES * r + j);105}106}107}108}109ra_set_finalize(regs, q_values);110111return regs;112}113114void115etna_ra_assign(struct etna_compile *c, nir_shader *shader)116{117struct etna_compiler *compiler = c->variant->shader->compiler;118struct ra_regs *regs = compiler->regs;119120nir_function_impl *impl = nir_shader_get_entrypoint(shader);121122/* liveness and interference */123124nir_index_blocks(impl);125nir_index_ssa_defs(impl);126nir_foreach_block(block, impl) {127nir_foreach_instr(instr, block)128instr->pass_flags = 0;129}130131/* this gives an approximation/upper limit on how many nodes are needed132* (some ssa values do not represent an allocated register)133*/134unsigned max_nodes = impl->ssa_alloc + impl->reg_alloc;135unsigned *live_map = ralloc_array(NULL, unsigned, max_nodes);136memset(live_map, 0xff, sizeof(unsigned) * max_nodes);137struct live_def *defs = rzalloc_array(NULL, struct live_def, max_nodes);138139unsigned num_nodes = etna_live_defs(impl, defs, live_map);140struct ra_graph *g = ra_alloc_interference_graph(regs, num_nodes);141142/* set classes from num_components */143for (unsigned i = 0; i < num_nodes; i++) {144nir_instr *instr = defs[i].instr;145nir_dest *dest = defs[i].dest;146unsigned comp = nir_dest_num_components(*dest) - 1;147148if (instr->type == nir_instr_type_alu &&149c->specs->has_new_transcendentals) {150switch (nir_instr_as_alu(instr)->op) {151case nir_op_fdiv:152case nir_op_flog2:153case nir_op_fsin:154case nir_op_fcos:155assert(dest->is_ssa);156comp = REG_CLASS_VIRT_VEC2T;157break;158default:159break;160}161}162163if (instr->type == nir_instr_type_intrinsic) {164nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);165/* can't have dst swizzle or sparse writemask on UBO loads */166if (intr->intrinsic == nir_intrinsic_load_ubo) {167assert(dest == &intr->dest);168if (dest->ssa.num_components == 2)169comp = REG_CLASS_VIRT_VEC2C;170if (dest->ssa.num_components == 3)171comp = REG_CLASS_VIRT_VEC3C;172}173}174175ra_set_node_class(g, i, ra_get_class_from_index(regs, comp));176}177178nir_foreach_block(block, impl) {179nir_foreach_instr(instr, block) {180if (instr->type != nir_instr_type_intrinsic)181continue;182183nir_dest *dest = dest_for_instr(instr);184nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);185unsigned reg;186187switch (intr->intrinsic) {188case nir_intrinsic_store_deref: {189/* don't want outputs to be swizzled190* TODO: better would be to set the type to X/XY/XYZ/XYZW191* TODO: what if fragcoord.z is read after writing fragdepth?192*/193nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);194unsigned index = live_map[src_index(impl, &intr->src[1])];195196if (shader->info.stage == MESA_SHADER_FRAGMENT &&197deref->var->data.location == FRAG_RESULT_DEPTH) {198ra_set_node_reg(g, index, REG_FRAG_DEPTH);199} else {200ra_set_node_class(g, index, ra_get_class_from_index(regs, REG_CLASS_VEC4));201}202} continue;203case nir_intrinsic_load_input:204reg = nir_intrinsic_base(intr) * NUM_REG_TYPES + (unsigned[]) {205REG_TYPE_VIRT_SCALAR_X,206REG_TYPE_VIRT_VEC2_XY,207REG_TYPE_VIRT_VEC3_XYZ,208REG_TYPE_VEC4,209}[nir_dest_num_components(*dest) - 1];210break;211case nir_intrinsic_load_instance_id:212reg = c->variant->infile.num_reg * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_Y;213break;214default:215continue;216}217218ra_set_node_reg(g, live_map[dest_index(impl, dest)], reg);219}220}221222/* add interference for intersecting live ranges */223for (unsigned i = 0; i < num_nodes; i++) {224assert(defs[i].live_start < defs[i].live_end);225for (unsigned j = 0; j < i; j++) {226if (defs[i].live_start >= defs[j].live_end || defs[j].live_start >= defs[i].live_end)227continue;228ra_add_node_interference(g, i, j);229}230}231232ralloc_free(defs);233234/* Allocate registers */235ASSERTED bool ok = ra_allocate(g);236assert(ok);237238c->g = g;239c->live_map = live_map;240c->num_nodes = num_nodes;241}242243unsigned244etna_ra_finish(struct etna_compile *c)245{246/* TODO: better way to get number of registers used? */247unsigned j = 0;248for (unsigned i = 0; i < c->num_nodes; i++) {249j = MAX2(j, reg_get_base(c, ra_get_node_reg(c->g, i)) + 1);250}251252ralloc_free(c->g);253ralloc_free(c->live_map);254255return j;256}257258259