Path: blob/21.2-virgl/src/panfrost/midgard/mir.c
4564 views
/*1* Copyright (C) 2019 Alyssa Rosenzweig <[email protected]>2* Copyright (C) 2019-2020 Collabora, Ltd.3*4* Permission is hereby granted, free of charge, to any person obtaining a5* copy of this software and associated documentation files (the "Software"),6* to deal in the Software without restriction, including without limitation7* the rights to use, copy, modify, merge, publish, distribute, sublicense,8* and/or sell copies of the Software, and to permit persons to whom the9* Software is furnished to do so, subject to the following conditions:10*11* The above copyright notice and this permission notice (including the next12* paragraph) shall be included in all copies or substantial portions of the13* Software.14*15* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR16* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,17* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL18* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER19* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,20* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE21* SOFTWARE.22*/2324#include "compiler.h"25#include "midgard_ops.h"2627void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)28{29mir_foreach_src(ins, i) {30if (ins->src[i] == old)31ins->src[i] = new;32}33}3435void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)36{37if (ins->dest == old)38ins->dest = new;39}4041static void42mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned *swizzle)43{44for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {45if (ins->src[i] != old) continue;4647ins->src[i] = new;48mir_compose_swizzle(ins->swizzle[i], swizzle, ins->swizzle[i]);49}50}5152void53mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)54{55mir_foreach_instr_global(ctx, ins) {56mir_rewrite_index_src_single(ins, old, new);57}58}5960void61mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle)62{63mir_foreach_instr_global(ctx, ins) {64mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);65}66}6768void69mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)70{71mir_foreach_instr_global(ctx, ins) {72mir_rewrite_index_dst_single(ins, old, new);73}7475/* Implicitly written before the shader */76if (ctx->blend_input == old)77ctx->blend_input = new;7879if (ctx->blend_src1 == old)80ctx->blend_src1 = new;81}8283void84mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)85{86mir_rewrite_index_src(ctx, old, new);87mir_rewrite_index_dst(ctx, old, new);88}8990unsigned91mir_use_count(compiler_context *ctx, unsigned value)92{93unsigned used_count = 0;9495mir_foreach_instr_global(ctx, ins) {96if (mir_has_arg(ins, value))97++used_count;98}99100if (ctx->blend_input == value)101++used_count;102103if (ctx->blend_src1 == value)104++used_count;105106return used_count;107}108109/* Checks if a value is used only once (or totally dead), which is an important110* heuristic to figure out if certain optimizations are Worth It (TM) */111112bool113mir_single_use(compiler_context *ctx, unsigned value)114{115/* We can replicate constants in places so who cares */116if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))117return true;118119return mir_use_count(ctx, value) <= 1;120}121122bool123mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle)124{125bool is_int = midgard_is_integer_op(ins->op);126127if (is_int) {128if (ins->src_shift[i]) return true;129} else {130if (ins->src_neg[i]) return true;131if (ins->src_abs[i]) return true;132}133134if (ins->dest_type != ins->src_types[i]) return true;135136if (check_swizzle) {137for (unsigned c = 0; c < 16; ++c) {138if (!(ins->mask & (1 << c))) continue;139if (ins->swizzle[i][c] != c) return true;140}141}142143return false;144}145146bool147mir_nontrivial_outmod(midgard_instruction *ins)148{149bool is_int = midgard_is_integer_op(ins->op);150unsigned mod = ins->outmod;151152if (ins->dest_type != ins->src_types[1])153return true;154155if (is_int)156return mod != midgard_outmod_keeplo;157else158return mod != midgard_outmod_none;159}160161/* 128 / sz = exp2(log2(128 / sz))162* = exp2(log2(128) - log2(sz))163* = exp2(7 - log2(sz))164* = 1 << (7 - log2(sz))165*/166167static unsigned168mir_components_for_bits(unsigned bits)169{170return 1 << (7 - util_logbase2(bits));171}172173unsigned174mir_components_for_type(nir_alu_type T)175{176unsigned sz = nir_alu_type_get_type_size(T);177return mir_components_for_bits(sz);178}179180uint16_t181mir_from_bytemask(uint16_t bytemask, unsigned bits)182{183unsigned value = 0;184unsigned count = bits / 8;185186for (unsigned c = 0, d = 0; c < 16; c += count, ++d) {187bool a = (bytemask & (1 << c)) != 0;188189for (unsigned q = c; q < count; ++q)190assert(((bytemask & (1 << q)) != 0) == a);191192value |= (a << d);193}194195return value;196}197198/* Rounds up a bytemask to fill a given component count. Iterate each199* component, and check if any bytes in the component are masked on */200201uint16_t202mir_round_bytemask_up(uint16_t mask, unsigned bits)203{204unsigned bytes = bits / 8;205unsigned maxmask = mask_of(bytes);206unsigned channels = mir_components_for_bits(bits);207208for (unsigned c = 0; c < channels; ++c) {209unsigned submask = maxmask << (c * bytes);210211if (mask & submask)212mask |= submask;213}214215return mask;216}217218/* Grabs the per-byte mask of an instruction (as opposed to per-component) */219220uint16_t221mir_bytemask(midgard_instruction *ins)222{223unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);224return pan_to_bytemask(type_size, ins->mask);225}226227void228mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask)229{230unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);231ins->mask = mir_from_bytemask(bytemask, type_size);232}233234/* Checks if we should use an upper destination override, rather than the lower235* one in the IR. Returns zero if no, returns the bytes to shift otherwise */236237signed238mir_upper_override(midgard_instruction *ins, unsigned inst_size)239{240unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);241242/* If the sizes are the same, there's nothing to override */243if (type_size == inst_size)244return -1;245246/* There are 16 bytes per vector, so there are (16/bytes)247* components per vector. So the magic half is half of248* (16/bytes), which simplifies to 8/bytes = 8 / (bits / 8) = 64 / bits249* */250251unsigned threshold = mir_components_for_bits(type_size) >> 1;252253/* How many components did we shift over? */254unsigned zeroes = __builtin_ctz(ins->mask);255256/* Did we hit the threshold? */257return (zeroes >= threshold) ? threshold : 0;258}259260/* Creates a mask of the components of a node read by an instruction, by261* analyzing the swizzle with respect to the instruction's mask. E.g.:262*263* fadd r0.xz, r1.yyyy, r2.zwyx264*265* will return a mask of Z/Y for r2266*/267268static uint16_t269mir_bytemask_of_read_components_single(unsigned *swizzle, unsigned inmask, unsigned bits)270{271unsigned cmask = 0;272273for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {274if (!(inmask & (1 << c))) continue;275cmask |= (1 << swizzle[c]);276}277278return pan_to_bytemask(bits, cmask);279}280281uint16_t282mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)283{284/* Conditional branches read one 32-bit component = 4 bytes (TODO: multi branch??) */285if (ins->compact_branch && ins->branch.conditional && (i == 0))286return 0xF;287288/* ALU ops act componentwise so we need to pay attention to289* their mask. Texture/ldst does not so we don't clamp source290* readmasks based on the writemask */291unsigned qmask = ~0;292293/* Handle dot products and things */294if (ins->type == TAG_ALU_4 && !ins->compact_branch) {295unsigned props = alu_opcode_props[ins->op].props;296297unsigned channel_override = GET_CHANNEL_COUNT(props);298299if (channel_override)300qmask = mask_of(channel_override);301else302qmask = ins->mask;303}304305return mir_bytemask_of_read_components_single(ins->swizzle[i], qmask,306nir_alu_type_get_type_size(ins->src_types[i]));307}308309uint16_t310mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)311{312uint16_t mask = 0;313314if (node == ~0)315return 0;316317mir_foreach_src(ins, i) {318if (ins->src[i] != node) continue;319mask |= mir_bytemask_of_read_components_index(ins, i);320}321322return mask;323}324325/* Register allocation occurs after instruction scheduling, which is fine until326* we start needing to spill registers and therefore insert instructions into327* an already-scheduled program. We don't have to be terribly efficient about328* this, since spilling is already slow. So just semantically we need to insert329* the instruction into a new bundle before/after the bundle of the instruction330* in question */331332static midgard_bundle333mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)334{335midgard_instruction *u = mir_upload_ins(ctx, ins);336337midgard_bundle bundle = {338.tag = ins.type,339.instruction_count = 1,340.instructions = { u },341};342343if (bundle.tag == TAG_ALU_4) {344assert(OP_IS_MOVE(u->op));345u->unit = UNIT_VMUL;346347size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);348bundle.padding = ~(bytes_emitted - 1) & 0xF;349bundle.control = ins.type | u->unit;350}351352return bundle;353}354355static unsigned356mir_bundle_idx_for_ins(midgard_instruction *tag, midgard_block *block)357{358midgard_bundle *bundles =359(midgard_bundle *) block->bundles.data;360361size_t count = (block->bundles.size / sizeof(midgard_bundle));362363for (unsigned i = 0; i < count; ++i) {364for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {365if (bundles[i].instructions[j] == tag)366return i;367}368}369370mir_print_instruction(tag);371unreachable("Instruction not scheduled in block");372}373374void375mir_insert_instruction_before_scheduled(376compiler_context *ctx,377midgard_block *block,378midgard_instruction *tag,379midgard_instruction ins)380{381unsigned before = mir_bundle_idx_for_ins(tag, block);382size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);383UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);384385midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;386memmove(bundles + before + 1, bundles + before, (count - before) * sizeof(midgard_bundle));387midgard_bundle *before_bundle = bundles + before + 1;388389midgard_bundle new = mir_bundle_for_op(ctx, ins);390memcpy(bundles + before, &new, sizeof(new));391392list_addtail(&new.instructions[0]->link, &before_bundle->instructions[0]->link);393block->quadword_count += midgard_tag_props[new.tag].size;394}395396void397mir_insert_instruction_after_scheduled(398compiler_context *ctx,399midgard_block *block,400midgard_instruction *tag,401midgard_instruction ins)402{403/* We need to grow the bundles array to add our new bundle */404size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);405UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);406407/* Find the bundle that we want to insert after */408unsigned after = mir_bundle_idx_for_ins(tag, block);409410/* All the bundles after that one, we move ahead by one */411midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;412memmove(bundles + after + 2, bundles + after + 1, (count - after - 1) * sizeof(midgard_bundle));413midgard_bundle *after_bundle = bundles + after;414415midgard_bundle new = mir_bundle_for_op(ctx, ins);416memcpy(bundles + after + 1, &new, sizeof(new));417list_add(&new.instructions[0]->link, &after_bundle->instructions[after_bundle->instruction_count - 1]->link);418block->quadword_count += midgard_tag_props[new.tag].size;419}420421/* Flip the first-two arguments of a (binary) op. Currently ALU422* only, no known uses for ldst/tex */423424void425mir_flip(midgard_instruction *ins)426{427unsigned temp = ins->src[0];428ins->src[0] = ins->src[1];429ins->src[1] = temp;430431assert(ins->type == TAG_ALU_4);432433temp = ins->src_types[0];434ins->src_types[0] = ins->src_types[1];435ins->src_types[1] = temp;436437temp = ins->src_abs[0];438ins->src_abs[0] = ins->src_abs[1];439ins->src_abs[1] = temp;440441temp = ins->src_neg[0];442ins->src_neg[0] = ins->src_neg[1];443ins->src_neg[1] = temp;444445temp = ins->src_invert[0];446ins->src_invert[0] = ins->src_invert[1];447ins->src_invert[1] = temp;448449unsigned temp_swizzle[16];450memcpy(temp_swizzle, ins->swizzle[0], sizeof(ins->swizzle[0]));451memcpy(ins->swizzle[0], ins->swizzle[1], sizeof(ins->swizzle[0]));452memcpy(ins->swizzle[1], temp_swizzle, sizeof(ins->swizzle[0]));453}454455/* Before squashing, calculate ctx->temp_count just by observing the MIR */456457void458mir_compute_temp_count(compiler_context *ctx)459{460if (ctx->temp_count)461return;462463unsigned max_dest = 0;464465mir_foreach_instr_global(ctx, ins) {466if (ins->dest < SSA_FIXED_MINIMUM)467max_dest = MAX2(max_dest, ins->dest + 1);468}469470ctx->temp_count = max_dest;471}472473474