Path: blob/21.2-virgl/src/compiler/nir/nir_lower_bit_size.c
4546 views
/*1* Copyright © 2018 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*/2223#include "nir_builder.h"2425/**26* Some ALU operations may not be supported in hardware in specific bit-sizes.27* This pass allows implementations to selectively lower such operations to28* a bit-size that is supported natively and then converts the result back to29* the original bit-size.30*/3132static nir_ssa_def *convert_to_bit_size(nir_builder *bld, nir_ssa_def *src,33nir_alu_type type, unsigned bit_size)34{35/* create b2i32(a) instead of i2i32(b2i8(a))/i2i32(b2i16(a)) */36nir_alu_instr *alu = nir_src_as_alu_instr(nir_src_for_ssa(src));37if ((type & (nir_type_uint | nir_type_int)) && bit_size == 32 &&38alu && (alu->op == nir_op_b2i8 || alu->op == nir_op_b2i16)) {39nir_alu_instr *instr = nir_alu_instr_create(bld->shader, nir_op_b2i32);40nir_alu_src_copy(&instr->src[0], &alu->src[0], instr);41return nir_builder_alu_instr_finish_and_insert(bld, instr);42}4344return nir_convert_to_bit_size(bld, src, type, bit_size);45}4647static void48lower_alu_instr(nir_builder *bld, nir_alu_instr *alu, unsigned bit_size)49{50const nir_op op = alu->op;51unsigned dst_bit_size = alu->dest.dest.ssa.bit_size;5253bld->cursor = nir_before_instr(&alu->instr);5455/* Convert each source to the requested bit-size */56nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS] = { NULL };57for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {58nir_ssa_def *src = nir_ssa_for_alu_src(bld, alu, i);5960nir_alu_type type = nir_op_infos[op].input_types[i];61if (nir_alu_type_get_type_size(type) == 0)62src = convert_to_bit_size(bld, src, type, bit_size);6364if (i == 1 && (op == nir_op_ishl || op == nir_op_ishr || op == nir_op_ushr)) {65assert(util_is_power_of_two_nonzero(dst_bit_size));66src = nir_iand(bld, src, nir_imm_int(bld, dst_bit_size - 1));67}6869srcs[i] = src;70}7172/* Emit the lowered ALU instruction */73nir_ssa_def *lowered_dst = NULL;74if (op == nir_op_imul_high || op == nir_op_umul_high) {75assert(dst_bit_size * 2 <= bit_size);76nir_ssa_def *lowered_dst = nir_imul(bld, srcs[0], srcs[1]);77if (nir_op_infos[op].output_type & nir_type_uint)78lowered_dst = nir_ushr_imm(bld, lowered_dst, dst_bit_size);79else80lowered_dst = nir_ishr_imm(bld, lowered_dst, dst_bit_size);81} else {82lowered_dst = nir_build_alu_src_arr(bld, op, srcs);83}848586/* Convert result back to the original bit-size */87if (nir_alu_type_get_type_size(nir_op_infos[op].output_type) == 0 &&88dst_bit_size != bit_size) {89nir_alu_type type = nir_op_infos[op].output_type;90nir_ssa_def *dst = nir_convert_to_bit_size(bld, lowered_dst, type, dst_bit_size);91nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, dst);92} else {93nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, lowered_dst);94}95}9697static void98lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,99unsigned bit_size)100{101switch (intrin->intrinsic) {102case nir_intrinsic_read_invocation:103case nir_intrinsic_read_first_invocation:104case nir_intrinsic_vote_feq:105case nir_intrinsic_vote_ieq:106case nir_intrinsic_shuffle:107case nir_intrinsic_shuffle_xor:108case nir_intrinsic_shuffle_up:109case nir_intrinsic_shuffle_down:110case nir_intrinsic_quad_broadcast:111case nir_intrinsic_quad_swap_horizontal:112case nir_intrinsic_quad_swap_vertical:113case nir_intrinsic_quad_swap_diagonal:114case nir_intrinsic_reduce:115case nir_intrinsic_inclusive_scan:116case nir_intrinsic_exclusive_scan: {117assert(intrin->src[0].is_ssa && intrin->dest.is_ssa);118const unsigned old_bit_size = intrin->dest.ssa.bit_size;119assert(old_bit_size < bit_size);120121nir_alu_type type = nir_type_uint;122if (nir_intrinsic_has_reduction_op(intrin))123type = nir_op_infos[nir_intrinsic_reduction_op(intrin)].input_types[0];124else if (intrin->intrinsic == nir_intrinsic_vote_feq)125type = nir_type_float;126127b->cursor = nir_before_instr(&intrin->instr);128nir_intrinsic_instr *new_intrin =129nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intrin->instr));130131nir_ssa_def *new_src = nir_convert_to_bit_size(b, intrin->src[0].ssa,132type, bit_size);133new_intrin->src[0] = nir_src_for_ssa(new_src);134135if (intrin->intrinsic == nir_intrinsic_vote_feq ||136intrin->intrinsic == nir_intrinsic_vote_ieq) {137/* These return a Boolean; it's always 1-bit */138assert(new_intrin->dest.ssa.bit_size == 1);139} else {140/* These return the same bit size as the source; we need to adjust141* the size and then we'll have to emit a down-cast.142*/143assert(intrin->src[0].ssa->bit_size == intrin->dest.ssa.bit_size);144new_intrin->dest.ssa.bit_size = bit_size;145}146147nir_builder_instr_insert(b, &new_intrin->instr);148149nir_ssa_def *res = &new_intrin->dest.ssa;150if (intrin->intrinsic == nir_intrinsic_exclusive_scan) {151/* For exclusive scan, we have to be careful because the identity152* value for the higher bit size may get added into the mix by153* disabled channels. For some cases (imin/imax in particular),154* this value won't convert to the right identity value when we155* down-cast so we have to clamp it.156*/157switch (nir_intrinsic_reduction_op(intrin)) {158case nir_op_imin: {159int64_t int_max = (1ull << (old_bit_size - 1)) - 1;160res = nir_imin(b, res, nir_imm_intN_t(b, int_max, bit_size));161break;162}163case nir_op_imax: {164int64_t int_min = -(int64_t)(1ull << (old_bit_size - 1));165res = nir_imax(b, res, nir_imm_intN_t(b, int_min, bit_size));166break;167}168default:169break;170}171}172173if (intrin->intrinsic != nir_intrinsic_vote_feq &&174intrin->intrinsic != nir_intrinsic_vote_ieq)175res = nir_u2u(b, res, old_bit_size);176177nir_ssa_def_rewrite_uses(&intrin->dest.ssa, res);178break;179}180181default:182unreachable("Unsupported instruction");183}184}185186static void187lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,188nir_phi_instr *last_phi)189{190assert(phi->dest.is_ssa);191unsigned old_bit_size = phi->dest.ssa.bit_size;192assert(old_bit_size < bit_size);193194nir_foreach_phi_src(src, phi) {195b->cursor = nir_after_block_before_jump(src->pred);196assert(src->src.is_ssa);197nir_ssa_def *new_src = nir_u2u(b, src->src.ssa, bit_size);198199nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src));200}201202phi->dest.ssa.bit_size = bit_size;203204b->cursor = nir_after_instr(&last_phi->instr);205206nir_ssa_def *new_dest = nir_u2u(b, &phi->dest.ssa, old_bit_size);207nir_ssa_def_rewrite_uses_after(&phi->dest.ssa, new_dest,208new_dest->parent_instr);209}210211static bool212lower_impl(nir_function_impl *impl,213nir_lower_bit_size_callback callback,214void *callback_data)215{216nir_builder b;217nir_builder_init(&b, impl);218bool progress = false;219220nir_foreach_block(block, impl) {221/* Stash this so we can rewrite phi destinations quickly. */222nir_phi_instr *last_phi = nir_block_last_phi_instr(block);223224nir_foreach_instr_safe(instr, block) {225unsigned lower_bit_size = callback(instr, callback_data);226if (lower_bit_size == 0)227continue;228229switch (instr->type) {230case nir_instr_type_alu:231lower_alu_instr(&b, nir_instr_as_alu(instr), lower_bit_size);232break;233234case nir_instr_type_intrinsic:235lower_intrinsic_instr(&b, nir_instr_as_intrinsic(instr),236lower_bit_size);237break;238239case nir_instr_type_phi:240lower_phi_instr(&b, nir_instr_as_phi(instr),241lower_bit_size, last_phi);242break;243244default:245unreachable("Unsupported instruction type");246}247progress = true;248}249}250251if (progress) {252nir_metadata_preserve(impl, nir_metadata_block_index |253nir_metadata_dominance);254} else {255nir_metadata_preserve(impl, nir_metadata_all);256}257258return progress;259}260261bool262nir_lower_bit_size(nir_shader *shader,263nir_lower_bit_size_callback callback,264void *callback_data)265{266bool progress = false;267268nir_foreach_function(function, shader) {269if (function->impl)270progress |= lower_impl(function->impl, callback, callback_data);271}272273return progress;274}275276static void277split_phi(nir_builder *b, nir_phi_instr *phi)278{279nir_phi_instr *lowered[2] = {280nir_phi_instr_create(b->shader),281nir_phi_instr_create(b->shader)282};283int num_components = phi->dest.ssa.num_components;284assert(phi->dest.ssa.bit_size == 64);285286nir_foreach_phi_src(src, phi) {287assert(num_components == src->src.ssa->num_components);288289b->cursor = nir_before_src(&src->src, false);290291nir_ssa_def *x = nir_unpack_64_2x32_split_x(b, src->src.ssa);292nir_ssa_def *y = nir_unpack_64_2x32_split_y(b, src->src.ssa);293294nir_phi_src *xsrc = rzalloc(lowered[0], nir_phi_src);295xsrc->pred = src->pred;296xsrc->src = nir_src_for_ssa(x);297exec_list_push_tail(&lowered[0]->srcs, &xsrc->node);298299nir_phi_src *ysrc = rzalloc(lowered[1], nir_phi_src);300ysrc->pred = src->pred;301ysrc->src = nir_src_for_ssa(y);302exec_list_push_tail(&lowered[1]->srcs, &ysrc->node);303}304305nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest,306num_components, 32, NULL);307nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest,308num_components, 32, NULL);309310b->cursor = nir_before_instr(&phi->instr);311nir_builder_instr_insert(b, &lowered[0]->instr);312nir_builder_instr_insert(b, &lowered[1]->instr);313314b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));315nir_ssa_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);316nir_ssa_def_rewrite_uses(&phi->dest.ssa, merged);317nir_instr_remove(&phi->instr);318}319320static bool321lower_64bit_phi_impl(nir_function_impl *impl)322{323nir_builder b;324nir_builder_init(&b, impl);325bool progress = false;326327nir_foreach_block(block, impl) {328nir_foreach_instr_safe(instr, block) {329if (instr->type != nir_instr_type_phi)330break;331332nir_phi_instr *phi = nir_instr_as_phi(instr);333assert(phi->dest.is_ssa);334335if (phi->dest.ssa.bit_size <= 32)336continue;337338split_phi(&b, phi);339progress = true;340}341}342343if (progress) {344nir_metadata_preserve(impl, nir_metadata_block_index |345nir_metadata_dominance);346} else {347nir_metadata_preserve(impl, nir_metadata_all);348}349350return progress;351}352353bool354nir_lower_64bit_phis(nir_shader *shader)355{356bool progress = false;357358nir_foreach_function(function, shader) {359if (function->impl)360progress |= lower_64bit_phi_impl(function->impl);361}362363return progress;364}365366367