Path: blob/21.2-virgl/src/broadcom/compiler/v3d40_tex.c
4564 views
/*1* Copyright © 2016-2018 Broadcom2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*/2223#include "v3d_compiler.h"2425/* We don't do any address packing. */26#define __gen_user_data void27#define __gen_address_type uint32_t28#define __gen_address_offset(reloc) (*reloc)29#define __gen_emit_reloc(cl, reloc)30#include "cle/v3d_packet_v41_pack.h"3132static inline void33vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val)34{35/* XXX perf: We should figure out how to merge ALU operations36* producing the val with this MOV, when possible.37*/38vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);39}4041static inline void42vir_TMU_WRITE_or_count(struct v3d_compile *c,43enum v3d_qpu_waddr waddr,44struct qreg val,45uint32_t *tmu_writes)46{47if (tmu_writes)48(*tmu_writes)++;49else50vir_TMU_WRITE(c, waddr, val);51}5253static void54vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)55{56struct qinst *inst = vir_NOP(c);57inst->qpu.sig.wrtmuc = true;58inst->uniform = vir_get_uniform_index(c, contents, data);59}6061static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {62.per_pixel_mask_enable = true,63};6465static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {66.op = V3D_TMU_OP_REGULAR,67};6869/**70* If 'tmu_writes' is not NULL, then it just counts required register writes,71* otherwise, it emits the actual register writes.72*73* It is important to notice that emitting register writes for the current74* TMU operation may trigger a TMU flush, since it is possible that any75* of the inputs required for the register writes is the result of a pending76* TMU operation. If that happens we need to make sure that it doesn't happen77* in the middle of the TMU register writes for the current TMU operation,78* which is why we always call ntq_get_src() even if we are only interested in79* register write counts.80*/81static void82handle_tex_src(struct v3d_compile *c,83nir_tex_instr *instr,84unsigned src_idx,85unsigned non_array_components,86struct V3D41_TMU_CONFIG_PARAMETER_2 *p2_unpacked,87struct qreg *s_out,88unsigned *tmu_writes)89{90/* Either we are calling this just to count required TMU writes, or we91* are calling this to emit the actual TMU writes.92*/93assert(tmu_writes || (s_out && p2_unpacked));9495struct qreg s;96switch (instr->src[src_idx].src_type) {97case nir_tex_src_coord:98/* S triggers the lookup, so save it for the end. */99s = ntq_get_src(c, instr->src[src_idx].src, 0);100if (tmu_writes)101(*tmu_writes)++;102else103*s_out = s;104105if (non_array_components > 1) {106struct qreg src =107ntq_get_src(c, instr->src[src_idx].src, 1);108vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUT, src,109tmu_writes);110}111112if (non_array_components > 2) {113struct qreg src =114ntq_get_src(c, instr->src[src_idx].src, 2);115vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUR, src,116tmu_writes);117}118119if (instr->is_array) {120struct qreg src =121ntq_get_src(c, instr->src[src_idx].src,122instr->coord_components - 1);123vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUI, src,124tmu_writes);125}126break;127128case nir_tex_src_bias: {129struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);130vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUB, src, tmu_writes);131break;132}133134case nir_tex_src_lod: {135struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);136vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUB, src, tmu_writes);137if (!tmu_writes) {138/* With texel fetch automatic LOD is already disabled,139* and disable_autolod must not be enabled. For140* non-cubes we can use the register TMUSLOD, that141* implicitly sets disable_autolod.142*/143assert(p2_unpacked);144if (instr->op != nir_texop_txf &&145instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {146p2_unpacked->disable_autolod = true;147}148}149break;150}151152case nir_tex_src_comparator: {153struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);154vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUDREF, src, tmu_writes);155break;156}157158case nir_tex_src_offset: {159bool is_const_offset = nir_src_is_const(instr->src[src_idx].src);160if (is_const_offset) {161if (!tmu_writes) {162p2_unpacked->offset_s =163nir_src_comp_as_int(instr->src[src_idx].src, 0);164if (non_array_components >= 2)165p2_unpacked->offset_t =166nir_src_comp_as_int(instr->src[src_idx].src, 1);167if (non_array_components >= 3)168p2_unpacked->offset_r =169nir_src_comp_as_int(instr->src[src_idx].src, 2);170}171} else {172struct qreg src_0 =173ntq_get_src(c, instr->src[src_idx].src, 0);174struct qreg src_1 =175ntq_get_src(c, instr->src[src_idx].src, 1);176if (!tmu_writes) {177struct qreg mask = vir_uniform_ui(c, 0xf);178struct qreg x, y, offset;179180x = vir_AND(c, src_0, mask);181y = vir_AND(c, src_1, mask);182offset = vir_OR(c, x,183vir_SHL(c, y, vir_uniform_ui(c, 4)));184185vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUOFF, offset);186} else {187(*tmu_writes)++;188}189}190break;191}192193default:194unreachable("unknown texture source");195}196}197198static void199vir_tex_handle_srcs(struct v3d_compile *c,200nir_tex_instr *instr,201struct V3D41_TMU_CONFIG_PARAMETER_2 *p2_unpacked,202struct qreg *s,203unsigned *tmu_writes)204{205unsigned non_array_components = instr->op != nir_texop_lod ?206instr->coord_components - instr->is_array :207instr->coord_components;208209for (unsigned i = 0; i < instr->num_srcs; i++) {210handle_tex_src(c, instr, i, non_array_components,211p2_unpacked, s, tmu_writes);212}213}214215static unsigned216get_required_tex_tmu_writes(struct v3d_compile *c, nir_tex_instr *instr)217{218unsigned tmu_writes = 0;219vir_tex_handle_srcs(c, instr, NULL, NULL, &tmu_writes);220return tmu_writes;221}222223void224v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)225{226assert(instr->op != nir_texop_lod || c->devinfo->ver >= 42);227228unsigned texture_idx = instr->texture_index;229unsigned sampler_idx = instr->sampler_index;230231struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {232};233234/* Limit the number of channels returned to both how many the NIR235* instruction writes and how many the instruction could produce.236*/237p0_unpacked.return_words_of_texture_data =238instr->dest.is_ssa ?239nir_ssa_def_components_read(&instr->dest.ssa) :240(1 << instr->dest.reg.reg->num_components) - 1;241assert(p0_unpacked.return_words_of_texture_data != 0);242243struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {244.op = V3D_TMU_OP_REGULAR,245.gather_mode = instr->op == nir_texop_tg4,246.gather_component = instr->component,247.coefficient_mode = instr->op == nir_texop_txd,248.disable_autolod = instr->op == nir_texop_tg4249};250251const unsigned tmu_writes = get_required_tex_tmu_writes(c, instr);252253/* The input FIFO has 16 slots across all threads so if we require254* more than that we need to lower thread count.255*/256while (tmu_writes > 16 / c->threads)257c->threads /= 2;258259/* If pipelining this TMU operation would overflow TMU fifos, we need260* to flush any outstanding TMU operations.261*/262const unsigned dest_components =263util_bitcount(p0_unpacked.return_words_of_texture_data);264if (ntq_tmu_fifo_overflow(c, dest_components))265ntq_flush_tmu(c);266267/* Process tex sources emitting corresponding TMU writes */268struct qreg s = { };269vir_tex_handle_srcs(c, instr, &p2_unpacked, &s, NULL);270271uint32_t p0_packed;272V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,273(uint8_t *)&p0_packed,274&p0_unpacked);275276uint32_t p2_packed;277V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,278(uint8_t *)&p2_packed,279&p2_unpacked);280281/* We manually set the LOD Query bit (see282* V3D42_TMU_CONFIG_PARAMETER_2) as right now is the only V42 specific283* feature over V41 we are using284*/285if (instr->op == nir_texop_lod)286p2_packed |= 1UL << 24;287288/* Load texture_idx number into the high bits of the texture address field,289* which will be be used by the driver to decide which texture to put290* in the actual address field.291*/292p0_packed |= texture_idx << 24;293294vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);295296/* Even if the texture operation doesn't need a sampler by297* itself, we still need to add the sampler configuration298* parameter if the output is 32 bit299*/300bool output_type_32_bit =301c->key->sampler[sampler_idx].return_size == 32 &&302!instr->is_shadow;303304/* p1 is optional, but we can skip it only if p2 can be skipped too */305bool needs_p2_config =306(instr->op == nir_texop_lod ||307memcmp(&p2_unpacked, &p2_unpacked_default,308sizeof(p2_unpacked)) != 0);309310/* To handle the cases were we can't just use p1_unpacked_default */311bool non_default_p1_config = nir_tex_instr_need_sampler(instr) ||312output_type_32_bit;313314if (non_default_p1_config) {315struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {316.output_type_32_bit = output_type_32_bit,317318.unnormalized_coordinates = (instr->sampler_dim ==319GLSL_SAMPLER_DIM_RECT),320};321322/* Word enables can't ask for more channels than the323* output type could provide (2 for f16, 4 for324* 32-bit).325*/326assert(!p1_unpacked.output_type_32_bit ||327p0_unpacked.return_words_of_texture_data < (1 << 4));328assert(p1_unpacked.output_type_32_bit ||329p0_unpacked.return_words_of_texture_data < (1 << 2));330331uint32_t p1_packed;332V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,333(uint8_t *)&p1_packed,334&p1_unpacked);335336if (nir_tex_instr_need_sampler(instr)) {337/* Load sampler_idx number into the high bits of the338* sampler address field, which will be be used by the339* driver to decide which sampler to put in the actual340* address field.341*/342p1_packed |= sampler_idx << 24;343344vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);345} else {346/* In this case, we don't need to merge in any347* sampler state from the API and can just use348* our packed bits */349vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);350}351} else if (needs_p2_config) {352/* Configuration parameters need to be set up in353* order, and if P2 is needed, you need to set up P1354* too even if sampler info is not needed by the355* texture operation. But we can set up default info,356* and avoid asking the driver for the sampler state357* address358*/359uint32_t p1_packed_default;360V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,361(uint8_t *)&p1_packed_default,362&p1_unpacked_default);363vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed_default);364}365366if (needs_p2_config)367vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);368369/* Emit retiring TMU write */370if (instr->op == nir_texop_txf) {371assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);372vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s);373} else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {374vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s);375} else if (instr->op == nir_texop_txl) {376vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSLOD, s);377} else {378vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s);379}380381ntq_add_pending_tmu_flush(c, &instr->dest,382p0_unpacked.return_words_of_texture_data);383}384385static uint32_t386v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)387{388switch (instr->intrinsic) {389case nir_intrinsic_image_load:390case nir_intrinsic_image_store:391return V3D_TMU_OP_REGULAR;392case nir_intrinsic_image_atomic_add:393return v3d_get_op_for_atomic_add(instr, 3);394case nir_intrinsic_image_atomic_imin:395return V3D_TMU_OP_WRITE_SMIN;396case nir_intrinsic_image_atomic_umin:397return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;398case nir_intrinsic_image_atomic_imax:399return V3D_TMU_OP_WRITE_SMAX;400case nir_intrinsic_image_atomic_umax:401return V3D_TMU_OP_WRITE_UMAX;402case nir_intrinsic_image_atomic_and:403return V3D_TMU_OP_WRITE_AND_READ_INC;404case nir_intrinsic_image_atomic_or:405return V3D_TMU_OP_WRITE_OR_READ_DEC;406case nir_intrinsic_image_atomic_xor:407return V3D_TMU_OP_WRITE_XOR_READ_NOT;408case nir_intrinsic_image_atomic_exchange:409return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;410case nir_intrinsic_image_atomic_comp_swap:411return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;412default:413unreachable("unknown image intrinsic");414};415}416417/**418* If 'tmu_writes' is not NULL, then it just counts required register writes,419* otherwise, it emits the actual register writes.420*421* It is important to notice that emitting register writes for the current422* TMU operation may trigger a TMU flush, since it is possible that any423* of the inputs required for the register writes is the result of a pending424* TMU operation. If that happens we need to make sure that it doesn't happen425* in the middle of the TMU register writes for the current TMU operation,426* which is why we always call ntq_get_src() even if we are only interested in427* register write counts.428*/429static void430vir_image_emit_register_writes(struct v3d_compile *c,431nir_intrinsic_instr *instr,432bool atomic_add_replaced,433uint32_t *tmu_writes)434{435if (tmu_writes)436*tmu_writes = 0;437438bool is_1d = false;439switch (nir_intrinsic_image_dim(instr)) {440case GLSL_SAMPLER_DIM_1D:441is_1d = true;442break;443case GLSL_SAMPLER_DIM_BUF:444break;445case GLSL_SAMPLER_DIM_2D:446case GLSL_SAMPLER_DIM_RECT:447case GLSL_SAMPLER_DIM_CUBE: {448struct qreg src = ntq_get_src(c, instr->src[1], 1);449vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUT, src, tmu_writes);450break;451}452case GLSL_SAMPLER_DIM_3D: {453struct qreg src_1_1 = ntq_get_src(c, instr->src[1], 1);454struct qreg src_1_2 = ntq_get_src(c, instr->src[1], 2);455vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUT, src_1_1, tmu_writes);456vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUR, src_1_2, tmu_writes);457break;458}459default:460unreachable("bad image sampler dim");461}462463/* In order to fetch on a cube map, we need to interpret it as464* 2D arrays, where the third coord would be the face index.465*/466if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_CUBE ||467nir_intrinsic_image_array(instr)) {468struct qreg src = ntq_get_src(c, instr->src[1], is_1d ? 1 : 2);469vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUI, src, tmu_writes);470}471472/* Emit the data writes for atomics or image store. */473if (instr->intrinsic != nir_intrinsic_image_load &&474!atomic_add_replaced) {475for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {476struct qreg src_3_i = ntq_get_src(c, instr->src[3], i);477vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUD, src_3_i,478tmu_writes);479}480481/* Second atomic argument */482if (instr->intrinsic == nir_intrinsic_image_atomic_comp_swap) {483struct qreg src_4_0 = ntq_get_src(c, instr->src[4], 0);484vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUD, src_4_0,485tmu_writes);486}487}488489struct qreg src_1_0 = ntq_get_src(c, instr->src[1], 0);490if (!tmu_writes && vir_in_nonuniform_control_flow(c) &&491instr->intrinsic != nir_intrinsic_image_load) {492vir_set_pf(c, vir_MOV_dest(c, vir_nop_reg(), c->execute),493V3D_QPU_PF_PUSHZ);494}495496vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUSF, src_1_0, tmu_writes);497498if (!tmu_writes && vir_in_nonuniform_control_flow(c) &&499instr->intrinsic != nir_intrinsic_image_load) {500struct qinst *last_inst =501(struct qinst *)c->cur_block->instructions.prev;502vir_set_cond(last_inst, V3D_QPU_COND_IFA);503}504}505506static unsigned507get_required_image_tmu_writes(struct v3d_compile *c,508nir_intrinsic_instr *instr,509bool atomic_add_replaced)510{511unsigned tmu_writes;512vir_image_emit_register_writes(c, instr, atomic_add_replaced,513&tmu_writes);514return tmu_writes;515}516517void518v3d40_vir_emit_image_load_store(struct v3d_compile *c,519nir_intrinsic_instr *instr)520{521unsigned format = nir_intrinsic_format(instr);522unsigned unit = nir_src_as_uint(instr->src[0]);523524struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {525};526527struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {528.per_pixel_mask_enable = true,529.output_type_32_bit = v3d_gl_format_is_return_32(format),530};531532struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };533534/* Limit the number of channels returned to both how many the NIR535* instruction writes and how many the instruction could produce.536*/537uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);538if (!p1_unpacked.output_type_32_bit)539instr_return_channels = (instr_return_channels + 1) / 2;540541p0_unpacked.return_words_of_texture_data =542(1 << instr_return_channels) - 1;543544p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);545546/* If we were able to replace atomic_add for an inc/dec, then we547* need/can to do things slightly different, like not loading the548* amount to add/sub, as that is implicit.549*/550bool atomic_add_replaced =551(instr->intrinsic == nir_intrinsic_image_atomic_add &&552(p2_unpacked.op == V3D_TMU_OP_WRITE_AND_READ_INC ||553p2_unpacked.op == V3D_TMU_OP_WRITE_OR_READ_DEC));554555uint32_t p0_packed;556V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,557(uint8_t *)&p0_packed,558&p0_unpacked);559560/* Load unit number into the high bits of the texture or sampler561* address field, which will be be used by the driver to decide which562* texture to put in the actual address field.563*/564p0_packed |= unit << 24;565566uint32_t p1_packed;567V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,568(uint8_t *)&p1_packed,569&p1_unpacked);570571uint32_t p2_packed;572V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,573(uint8_t *)&p2_packed,574&p2_unpacked);575576if (instr->intrinsic != nir_intrinsic_image_load)577c->tmu_dirty_rcl = true;578579580const uint32_t tmu_writes =581get_required_image_tmu_writes(c, instr, atomic_add_replaced);582583/* The input FIFO has 16 slots across all threads so if we require584* more than that we need to lower thread count.585*/586while (tmu_writes > 16 / c->threads)587c->threads /= 2;588589/* If pipelining this TMU operation would overflow TMU fifos, we need590* to flush any outstanding TMU operations.591*/592if (ntq_tmu_fifo_overflow(c, instr_return_channels))593ntq_flush_tmu(c);594595vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);596if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)))597vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);598if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)))599vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);600601vir_image_emit_register_writes(c, instr, atomic_add_replaced, NULL);602603ntq_add_pending_tmu_flush(c, &instr->dest,604p0_unpacked.return_words_of_texture_data);605}606607608