Path: blob/21.2-virgl/src/compiler/nir/nir_lower_alu.c
4546 views
/*1* Copyright © 2010 Intel Corporation2* Copyright © 2018 Broadcom3*4* Permission is hereby granted, free of charge, to any person obtaining a5* copy of this software and associated documentation files (the "Software"),6* to deal in the Software without restriction, including without limitation7* the rights to use, copy, modify, merge, publish, distribute, sublicense,8* and/or sell copies of the Software, and to permit persons to whom the9* Software is furnished to do so, subject to the following conditions:10*11* The above copyright notice and this permission notice (including the next12* paragraph) shall be included in all copies or substantial portions of the13* Software.14*15* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR16* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,17* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL18* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER19* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING20* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER21* DEALINGS IN THE SOFTWARE.22*/2324#include "nir.h"25#include "nir_builder.h"2627/** nir_lower_alu.c28*29* NIR's home for miscellaneous ALU operation lowering implementations.30*31* Most NIR ALU lowering occurs in nir_opt_algebraic.py, since it's generally32* easy to write them there. However, if terms appear multiple times in the33* lowered code, it can get very verbose and cause a lot of work for CSE, so34* it may end up being easier to write out in C code.35*36* The shader must be in SSA for this pass.37*/3839#define LOWER_MUL_HIGH (1 << 0)4041static bool42lower_alu_instr(nir_alu_instr *instr, nir_builder *b)43{44nir_ssa_def *lowered = NULL;4546assert(instr->dest.dest.is_ssa);4748b->cursor = nir_before_instr(&instr->instr);49b->exact = instr->exact;5051switch (instr->op) {52case nir_op_bitfield_reverse:53if (b->shader->options->lower_bitfield_reverse) {54/* For more details, see:55*56* http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel57*/58nir_ssa_def *c1 = nir_imm_int(b, 1);59nir_ssa_def *c2 = nir_imm_int(b, 2);60nir_ssa_def *c4 = nir_imm_int(b, 4);61nir_ssa_def *c8 = nir_imm_int(b, 8);62nir_ssa_def *c16 = nir_imm_int(b, 16);63nir_ssa_def *c33333333 = nir_imm_int(b, 0x33333333);64nir_ssa_def *c55555555 = nir_imm_int(b, 0x55555555);65nir_ssa_def *c0f0f0f0f = nir_imm_int(b, 0x0f0f0f0f);66nir_ssa_def *c00ff00ff = nir_imm_int(b, 0x00ff00ff);6768lowered = nir_ssa_for_alu_src(b, instr, 0);6970/* Swap odd and even bits. */71lowered = nir_ior(b,72nir_iand(b, nir_ushr(b, lowered, c1), c55555555),73nir_ishl(b, nir_iand(b, lowered, c55555555), c1));7475/* Swap consecutive pairs. */76lowered = nir_ior(b,77nir_iand(b, nir_ushr(b, lowered, c2), c33333333),78nir_ishl(b, nir_iand(b, lowered, c33333333), c2));7980/* Swap nibbles. */81lowered = nir_ior(b,82nir_iand(b, nir_ushr(b, lowered, c4), c0f0f0f0f),83nir_ishl(b, nir_iand(b, lowered, c0f0f0f0f), c4));8485/* Swap bytes. */86lowered = nir_ior(b,87nir_iand(b, nir_ushr(b, lowered, c8), c00ff00ff),88nir_ishl(b, nir_iand(b, lowered, c00ff00ff), c8));8990lowered = nir_ior(b,91nir_ushr(b, lowered, c16),92nir_ishl(b, lowered, c16));93}94break;9596case nir_op_bit_count:97if (b->shader->options->lower_bit_count) {98/* For more details, see:99*100* http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel101*/102nir_ssa_def *c1 = nir_imm_int(b, 1);103nir_ssa_def *c2 = nir_imm_int(b, 2);104nir_ssa_def *c4 = nir_imm_int(b, 4);105nir_ssa_def *c24 = nir_imm_int(b, 24);106nir_ssa_def *c33333333 = nir_imm_int(b, 0x33333333);107nir_ssa_def *c55555555 = nir_imm_int(b, 0x55555555);108nir_ssa_def *c0f0f0f0f = nir_imm_int(b, 0x0f0f0f0f);109nir_ssa_def *c01010101 = nir_imm_int(b, 0x01010101);110111lowered = nir_ssa_for_alu_src(b, instr, 0);112113lowered = nir_isub(b, lowered,114nir_iand(b, nir_ushr(b, lowered, c1), c55555555));115116lowered = nir_iadd(b,117nir_iand(b, lowered, c33333333),118nir_iand(b, nir_ushr(b, lowered, c2), c33333333));119120lowered = nir_ushr(b,121nir_imul(b,122nir_iand(b,123nir_iadd(b,124lowered,125nir_ushr(b, lowered, c4)),126c0f0f0f0f),127c01010101),128c24);129}130break;131132case nir_op_imul_high:133case nir_op_umul_high:134if (b->shader->options->lower_mul_high) {135nir_ssa_def *src0 = nir_ssa_for_alu_src(b, instr, 0);136nir_ssa_def *src1 = nir_ssa_for_alu_src(b, instr, 1);137if (src0->bit_size < 32) {138/* Just do the math in 32-bit space and shift the result */139nir_alu_type base_type = nir_op_infos[instr->op].output_type;140nir_op upcast_op = nir_type_conversion_op(base_type | src0->bit_size, base_type | 32, nir_rounding_mode_undef);141nir_op downscast_op = nir_type_conversion_op(base_type | 32, base_type | src0->bit_size, nir_rounding_mode_undef);142143nir_ssa_def *src0_32 = nir_build_alu(b, upcast_op, src0, NULL, NULL, NULL);144nir_ssa_def *src1_32 = nir_build_alu(b, upcast_op, src1, NULL, NULL, NULL);145nir_ssa_def *dest_32 = nir_imul(b, src0_32, src1_32);146nir_ssa_def *dest_shifted = nir_ishr(b, dest_32, nir_imm_int(b, src0->bit_size));147lowered = nir_build_alu(b, downscast_op, dest_shifted, NULL, NULL, NULL);148} else {149nir_ssa_def *c1 = nir_imm_intN_t(b, 1, src0->bit_size);150nir_ssa_def *cshift = nir_imm_int(b, src0->bit_size / 2);151nir_ssa_def *cmask = nir_imm_intN_t(b, (1ull << (src0->bit_size / 2)) - 1, src0->bit_size);152nir_ssa_def *different_signs = NULL;153if (instr->op == nir_op_imul_high) {154nir_ssa_def *c0 = nir_imm_intN_t(b, 0, src0->bit_size);155different_signs = nir_ixor(b,156nir_ilt(b, src0, c0),157nir_ilt(b, src1, c0));158src0 = nir_iabs(b, src0);159src1 = nir_iabs(b, src1);160}161162/* ABCD163* * EFGH164* ======165* (GH * CD) + (GH * AB) << 16 + (EF * CD) << 16 + (EF * AB) << 32166*167* Start by splitting into the 4 multiplies.168*/169nir_ssa_def *src0l = nir_iand(b, src0, cmask);170nir_ssa_def *src1l = nir_iand(b, src1, cmask);171nir_ssa_def *src0h = nir_ushr(b, src0, cshift);172nir_ssa_def *src1h = nir_ushr(b, src1, cshift);173174nir_ssa_def *lo = nir_imul(b, src0l, src1l);175nir_ssa_def *m1 = nir_imul(b, src0l, src1h);176nir_ssa_def *m2 = nir_imul(b, src0h, src1l);177nir_ssa_def *hi = nir_imul(b, src0h, src1h);178179nir_ssa_def *tmp;180181tmp = nir_ishl(b, m1, cshift);182hi = nir_iadd(b, hi, nir_iand(b, nir_uadd_carry(b, lo, tmp), c1));183lo = nir_iadd(b, lo, tmp);184hi = nir_iadd(b, hi, nir_ushr(b, m1, cshift));185186tmp = nir_ishl(b, m2, cshift);187hi = nir_iadd(b, hi, nir_iand(b, nir_uadd_carry(b, lo, tmp), c1));188lo = nir_iadd(b, lo, tmp);189hi = nir_iadd(b, hi, nir_ushr(b, m2, cshift));190191if (instr->op == nir_op_imul_high) {192/* For channels where different_signs is set we have to perform a193* 64-bit negation. This is *not* the same as just negating the194* high 32-bits. Consider -3 * 2. The high 32-bits is 0, but the195* desired result is -1, not -0! Recall -x == ~x + 1.196*/197hi = nir_bcsel(b, different_signs,198nir_iadd(b,199nir_inot(b, hi),200nir_iand(b,201nir_uadd_carry(b,202nir_inot(b, lo),203c1),204nir_imm_intN_t(b, 1, src0->bit_size))),205hi);206}207208lowered = hi;209}210}211break;212213default:214break;215}216217if (lowered) {218nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, lowered);219nir_instr_remove(&instr->instr);220return true;221} else {222return false;223}224}225226bool227nir_lower_alu(nir_shader *shader)228{229bool progress = false;230231if (!shader->options->lower_bitfield_reverse &&232!shader->options->lower_mul_high)233return false;234235nir_foreach_function(function, shader) {236if (function->impl) {237nir_builder builder;238nir_builder_init(&builder, function->impl);239240nir_foreach_block(block, function->impl) {241nir_foreach_instr_safe(instr, block) {242if (instr->type == nir_instr_type_alu) {243progress = lower_alu_instr(nir_instr_as_alu(instr),244&builder) || progress;245}246}247}248249if (progress) {250nir_metadata_preserve(function->impl,251nir_metadata_block_index |252nir_metadata_dominance);253}254}255}256257return progress;258}259260261