Path: blob/21.2-virgl/src/freedreno/vulkan/tu_pipeline.c
4565 views
/*1* Copyright © 2016 Red Hat.2* Copyright © 2016 Bas Nieuwenhuizen3*4* based in part on anv driver which is:5* Copyright © 2015 Intel Corporation6*7* Permission is hereby granted, free of charge, to any person obtaining a8* copy of this software and associated documentation files (the "Software"),9* to deal in the Software without restriction, including without limitation10* the rights to use, copy, modify, merge, publish, distribute, sublicense,11* and/or sell copies of the Software, and to permit persons to whom the12* Software is furnished to do so, subject to the following conditions:13*14* The above copyright notice and this permission notice (including the next15* paragraph) shall be included in all copies or substantial portions of the16* Software.17*18* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR19* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,20* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL21* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER22* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING23* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER24* DEALINGS IN THE SOFTWARE.25*/2627#include "common/freedreno_guardband.h"28#include "tu_private.h"2930#include "ir3/ir3_nir.h"31#include "main/menums.h"32#include "nir/nir.h"33#include "nir/nir_builder.h"34#include "spirv/nir_spirv.h"35#include "util/debug.h"36#include "util/mesa-sha1.h"37#include "util/u_atomic.h"38#include "vk_format.h"39#include "vk_util.h"4041#include "tu_cs.h"4243/* Emit IB that preloads the descriptors that the shader uses */4445static void46emit_load_state(struct tu_cs *cs, unsigned opcode, enum a6xx_state_type st,47enum a6xx_state_block sb, unsigned base, unsigned offset,48unsigned count)49{50/* Note: just emit one packet, even if count overflows NUM_UNIT. It's not51* clear if emitting more packets will even help anything. Presumably the52* descriptor cache is relatively small, and these packets stop doing53* anything when there are too many descriptors.54*/55tu_cs_emit_pkt7(cs, opcode, 3);56tu_cs_emit(cs,57CP_LOAD_STATE6_0_STATE_TYPE(st) |58CP_LOAD_STATE6_0_STATE_SRC(SS6_BINDLESS) |59CP_LOAD_STATE6_0_STATE_BLOCK(sb) |60CP_LOAD_STATE6_0_NUM_UNIT(MIN2(count, 1024-1)));61tu_cs_emit_qw(cs, offset | (base << 28));62}6364static unsigned65tu6_load_state_size(struct tu_pipeline *pipeline, bool compute)66{67const unsigned load_state_size = 4;68unsigned size = 0;69for (unsigned i = 0; i < pipeline->layout->num_sets; i++) {70if (!(pipeline->active_desc_sets & (1u << i)))71continue;7273struct tu_descriptor_set_layout *set_layout = pipeline->layout->set[i].layout;74for (unsigned j = 0; j < set_layout->binding_count; j++) {75struct tu_descriptor_set_binding_layout *binding = &set_layout->binding[j];76unsigned count = 0;77/* Note: some users, like amber for example, pass in78* VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so79* filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.80*/81VkShaderStageFlags stages = compute ?82binding->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT :83binding->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;84unsigned stage_count = util_bitcount(stages);8586if (!binding->array_size)87continue;8889switch (binding->type) {90case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:91case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:92case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:93case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:94/* IBO-backed resources only need one packet for all graphics stages */95if (stages & ~VK_SHADER_STAGE_COMPUTE_BIT)96count += 1;97if (stages & VK_SHADER_STAGE_COMPUTE_BIT)98count += 1;99break;100case VK_DESCRIPTOR_TYPE_SAMPLER:101case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:102case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:103case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:104case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:105/* Textures and UBO's needs a packet for each stage */106count = stage_count;107break;108case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:109/* Because of how we pack combined images and samplers, we110* currently can't use one packet for the whole array.111*/112count = stage_count * binding->array_size * 2;113break;114case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:115break;116default:117unreachable("bad descriptor type");118}119size += count * load_state_size;120}121}122return size;123}124125static void126tu6_emit_load_state(struct tu_pipeline *pipeline, bool compute)127{128unsigned size = tu6_load_state_size(pipeline, compute);129if (size == 0)130return;131132struct tu_cs cs;133tu_cs_begin_sub_stream(&pipeline->cs, size, &cs);134135struct tu_pipeline_layout *layout = pipeline->layout;136for (unsigned i = 0; i < layout->num_sets; i++) {137/* From 13.2.7. Descriptor Set Binding:138*139* A compatible descriptor set must be bound for all set numbers that140* any shaders in a pipeline access, at the time that a draw or141* dispatch command is recorded to execute using that pipeline.142* However, if none of the shaders in a pipeline statically use any143* bindings with a particular set number, then no descriptor set need144* be bound for that set number, even if the pipeline layout includes145* a non-trivial descriptor set layout for that set number.146*147* This means that descriptor sets unused by the pipeline may have a148* garbage or 0 BINDLESS_BASE register, which will cause context faults149* when prefetching descriptors from these sets. Skip prefetching for150* descriptors from them to avoid this. This is also an optimization,151* since these prefetches would be useless.152*/153if (!(pipeline->active_desc_sets & (1u << i)))154continue;155156struct tu_descriptor_set_layout *set_layout = layout->set[i].layout;157for (unsigned j = 0; j < set_layout->binding_count; j++) {158struct tu_descriptor_set_binding_layout *binding = &set_layout->binding[j];159unsigned base = i;160unsigned offset = binding->offset / 4;161/* Note: some users, like amber for example, pass in162* VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so163* filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.164*/165VkShaderStageFlags stages = compute ?166binding->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT :167binding->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;168unsigned count = binding->array_size;169if (count == 0 || stages == 0)170continue;171switch (binding->type) {172case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:173base = MAX_SETS;174offset = (layout->set[i].dynamic_offset_start +175binding->dynamic_offset_offset) * A6XX_TEX_CONST_DWORDS;176FALLTHROUGH;177case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:178case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:179case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:180/* IBO-backed resources only need one packet for all graphics stages */181if (stages & ~VK_SHADER_STAGE_COMPUTE_BIT) {182emit_load_state(&cs, CP_LOAD_STATE6, ST6_SHADER, SB6_IBO,183base, offset, count);184}185if (stages & VK_SHADER_STAGE_COMPUTE_BIT) {186emit_load_state(&cs, CP_LOAD_STATE6_FRAG, ST6_IBO, SB6_CS_SHADER,187base, offset, count);188}189break;190case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:191/* nothing - input attachment doesn't use bindless */192break;193case VK_DESCRIPTOR_TYPE_SAMPLER:194case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:195case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {196tu_foreach_stage(stage, stages) {197emit_load_state(&cs, tu6_stage2opcode(stage),198binding->type == VK_DESCRIPTOR_TYPE_SAMPLER ?199ST6_SHADER : ST6_CONSTANTS,200tu6_stage2texsb(stage), base, offset, count);201}202break;203}204case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:205base = MAX_SETS;206offset = (layout->set[i].dynamic_offset_start +207binding->dynamic_offset_offset) * A6XX_TEX_CONST_DWORDS;208FALLTHROUGH;209case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {210tu_foreach_stage(stage, stages) {211emit_load_state(&cs, tu6_stage2opcode(stage), ST6_UBO,212tu6_stage2shadersb(stage), base, offset, count);213}214break;215}216case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {217tu_foreach_stage(stage, stages) {218/* TODO: We could emit less CP_LOAD_STATE6 if we used219* struct-of-arrays instead of array-of-structs.220*/221for (unsigned i = 0; i < count; i++) {222unsigned tex_offset = offset + 2 * i * A6XX_TEX_CONST_DWORDS;223unsigned sam_offset = offset + (2 * i + 1) * A6XX_TEX_CONST_DWORDS;224emit_load_state(&cs, tu6_stage2opcode(stage),225ST6_CONSTANTS, tu6_stage2texsb(stage),226base, tex_offset, 1);227emit_load_state(&cs, tu6_stage2opcode(stage),228ST6_SHADER, tu6_stage2texsb(stage),229base, sam_offset, 1);230}231}232break;233}234default:235unreachable("bad descriptor type");236}237}238}239240pipeline->load_state = tu_cs_end_draw_state(&pipeline->cs, &cs);241}242243struct tu_pipeline_builder244{245struct tu_device *device;246struct tu_pipeline_cache *cache;247struct tu_pipeline_layout *layout;248const VkAllocationCallbacks *alloc;249const VkGraphicsPipelineCreateInfo *create_info;250251struct tu_shader *shaders[MESA_SHADER_FRAGMENT + 1];252struct ir3_shader_variant *variants[MESA_SHADER_FRAGMENT + 1];253struct ir3_shader_variant *binning_variant;254uint64_t shader_iova[MESA_SHADER_FRAGMENT + 1];255uint64_t binning_vs_iova;256257struct tu_pvtmem_config pvtmem;258259bool rasterizer_discard;260/* these states are affectd by rasterizer_discard */261bool emit_msaa_state;262VkSampleCountFlagBits samples;263bool use_color_attachments;264bool use_dual_src_blend;265bool alpha_to_coverage;266uint32_t color_attachment_count;267VkFormat color_attachment_formats[MAX_RTS];268VkFormat depth_attachment_format;269uint32_t render_components;270uint32_t multiview_mask;271};272273static bool274tu_logic_op_reads_dst(VkLogicOp op)275{276switch (op) {277case VK_LOGIC_OP_CLEAR:278case VK_LOGIC_OP_COPY:279case VK_LOGIC_OP_COPY_INVERTED:280case VK_LOGIC_OP_SET:281return false;282default:283return true;284}285}286287static VkBlendFactor288tu_blend_factor_no_dst_alpha(VkBlendFactor factor)289{290/* treat dst alpha as 1.0 and avoid reading it */291switch (factor) {292case VK_BLEND_FACTOR_DST_ALPHA:293return VK_BLEND_FACTOR_ONE;294case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:295return VK_BLEND_FACTOR_ZERO;296default:297return factor;298}299}300301static bool tu_blend_factor_is_dual_src(VkBlendFactor factor)302{303switch (factor) {304case VK_BLEND_FACTOR_SRC1_COLOR:305case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR:306case VK_BLEND_FACTOR_SRC1_ALPHA:307case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA:308return true;309default:310return false;311}312}313314static bool315tu_blend_state_is_dual_src(const VkPipelineColorBlendStateCreateInfo *info)316{317if (!info)318return false;319320for (unsigned i = 0; i < info->attachmentCount; i++) {321const VkPipelineColorBlendAttachmentState *blend = &info->pAttachments[i];322if (tu_blend_factor_is_dual_src(blend->srcColorBlendFactor) ||323tu_blend_factor_is_dual_src(blend->dstColorBlendFactor) ||324tu_blend_factor_is_dual_src(blend->srcAlphaBlendFactor) ||325tu_blend_factor_is_dual_src(blend->dstAlphaBlendFactor))326return true;327}328329return false;330}331332static const struct xs_config {333uint16_t reg_sp_xs_ctrl;334uint16_t reg_sp_xs_config;335uint16_t reg_sp_xs_instrlen;336uint16_t reg_hlsq_xs_ctrl;337uint16_t reg_sp_xs_first_exec_offset;338uint16_t reg_sp_xs_pvt_mem_hw_stack_offset;339} xs_config[] = {340[MESA_SHADER_VERTEX] = {341REG_A6XX_SP_VS_CTRL_REG0,342REG_A6XX_SP_VS_CONFIG,343REG_A6XX_SP_VS_INSTRLEN,344REG_A6XX_HLSQ_VS_CNTL,345REG_A6XX_SP_VS_OBJ_FIRST_EXEC_OFFSET,346REG_A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET,347},348[MESA_SHADER_TESS_CTRL] = {349REG_A6XX_SP_HS_CTRL_REG0,350REG_A6XX_SP_HS_CONFIG,351REG_A6XX_SP_HS_INSTRLEN,352REG_A6XX_HLSQ_HS_CNTL,353REG_A6XX_SP_HS_OBJ_FIRST_EXEC_OFFSET,354REG_A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET,355},356[MESA_SHADER_TESS_EVAL] = {357REG_A6XX_SP_DS_CTRL_REG0,358REG_A6XX_SP_DS_CONFIG,359REG_A6XX_SP_DS_INSTRLEN,360REG_A6XX_HLSQ_DS_CNTL,361REG_A6XX_SP_DS_OBJ_FIRST_EXEC_OFFSET,362REG_A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET,363},364[MESA_SHADER_GEOMETRY] = {365REG_A6XX_SP_GS_CTRL_REG0,366REG_A6XX_SP_GS_CONFIG,367REG_A6XX_SP_GS_INSTRLEN,368REG_A6XX_HLSQ_GS_CNTL,369REG_A6XX_SP_GS_OBJ_FIRST_EXEC_OFFSET,370REG_A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET,371},372[MESA_SHADER_FRAGMENT] = {373REG_A6XX_SP_FS_CTRL_REG0,374REG_A6XX_SP_FS_CONFIG,375REG_A6XX_SP_FS_INSTRLEN,376REG_A6XX_HLSQ_FS_CNTL,377REG_A6XX_SP_FS_OBJ_FIRST_EXEC_OFFSET,378REG_A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET,379},380[MESA_SHADER_COMPUTE] = {381REG_A6XX_SP_CS_CTRL_REG0,382REG_A6XX_SP_CS_CONFIG,383REG_A6XX_SP_CS_INSTRLEN,384REG_A6XX_HLSQ_CS_CNTL,385REG_A6XX_SP_CS_OBJ_FIRST_EXEC_OFFSET,386REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET,387},388};389390void391tu6_emit_xs_config(struct tu_cs *cs,392gl_shader_stage stage, /* xs->type, but xs may be NULL */393const struct ir3_shader_variant *xs)394{395const struct xs_config *cfg = &xs_config[stage];396397if (!xs) {398/* shader stage disabled */399tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_config, 1);400tu_cs_emit(cs, 0);401402tu_cs_emit_pkt4(cs, cfg->reg_hlsq_xs_ctrl, 1);403tu_cs_emit(cs, 0);404return;405}406407tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_config, 1);408tu_cs_emit(cs, A6XX_SP_VS_CONFIG_ENABLED |409COND(xs->bindless_tex, A6XX_SP_VS_CONFIG_BINDLESS_TEX) |410COND(xs->bindless_samp, A6XX_SP_VS_CONFIG_BINDLESS_SAMP) |411COND(xs->bindless_ibo, A6XX_SP_VS_CONFIG_BINDLESS_IBO) |412COND(xs->bindless_ubo, A6XX_SP_VS_CONFIG_BINDLESS_UBO) |413A6XX_SP_VS_CONFIG_NTEX(xs->num_samp) |414A6XX_SP_VS_CONFIG_NSAMP(xs->num_samp));415416tu_cs_emit_pkt4(cs, cfg->reg_hlsq_xs_ctrl, 1);417tu_cs_emit(cs, A6XX_HLSQ_VS_CNTL_CONSTLEN(xs->constlen) |418A6XX_HLSQ_VS_CNTL_ENABLED);419}420421void422tu6_emit_xs(struct tu_cs *cs,423gl_shader_stage stage, /* xs->type, but xs may be NULL */424const struct ir3_shader_variant *xs,425const struct tu_pvtmem_config *pvtmem,426uint64_t binary_iova)427{428const struct xs_config *cfg = &xs_config[stage];429430if (!xs) {431/* shader stage disabled */432return;433}434435enum a6xx_threadsize thrsz =436xs->info.double_threadsize ? THREAD128 : THREAD64;437switch (stage) {438case MESA_SHADER_VERTEX:439tu_cs_emit_regs(cs, A6XX_SP_VS_CTRL_REG0(440.fullregfootprint = xs->info.max_reg + 1,441.halfregfootprint = xs->info.max_half_reg + 1,442.branchstack = ir3_shader_branchstack_hw(xs),443.mergedregs = xs->mergedregs,444));445break;446case MESA_SHADER_TESS_CTRL:447tu_cs_emit_regs(cs, A6XX_SP_HS_CTRL_REG0(448.fullregfootprint = xs->info.max_reg + 1,449.halfregfootprint = xs->info.max_half_reg + 1,450.branchstack = ir3_shader_branchstack_hw(xs),451));452break;453case MESA_SHADER_TESS_EVAL:454tu_cs_emit_regs(cs, A6XX_SP_DS_CTRL_REG0(455.fullregfootprint = xs->info.max_reg + 1,456.halfregfootprint = xs->info.max_half_reg + 1,457.branchstack = ir3_shader_branchstack_hw(xs),458.mergedregs = xs->mergedregs,459));460break;461case MESA_SHADER_GEOMETRY:462tu_cs_emit_regs(cs, A6XX_SP_GS_CTRL_REG0(463.fullregfootprint = xs->info.max_reg + 1,464.halfregfootprint = xs->info.max_half_reg + 1,465.branchstack = ir3_shader_branchstack_hw(xs),466));467break;468case MESA_SHADER_FRAGMENT:469tu_cs_emit_regs(cs, A6XX_SP_FS_CTRL_REG0(470.fullregfootprint = xs->info.max_reg + 1,471.halfregfootprint = xs->info.max_half_reg + 1,472.branchstack = ir3_shader_branchstack_hw(xs),473.mergedregs = xs->mergedregs,474.threadsize = thrsz,475.pixlodenable = xs->need_pixlod,476.diff_fine = xs->need_fine_derivatives,477.varying = xs->total_in != 0,478/* unknown bit, seems unnecessary */479.unk24 = true,480));481break;482case MESA_SHADER_COMPUTE:483tu_cs_emit_regs(cs, A6XX_SP_CS_CTRL_REG0(484.fullregfootprint = xs->info.max_reg + 1,485.halfregfootprint = xs->info.max_half_reg + 1,486.branchstack = ir3_shader_branchstack_hw(xs),487.mergedregs = xs->mergedregs,488.threadsize = thrsz,489));490break;491default:492unreachable("bad shader stage");493}494495tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_instrlen, 1);496tu_cs_emit(cs, xs->instrlen);497498/* emit program binary & private memory layout499* binary_iova should be aligned to 1 instrlen unit (128 bytes)500*/501502assert((binary_iova & 0x7f) == 0);503assert((pvtmem->iova & 0x1f) == 0);504505tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_first_exec_offset, 7);506tu_cs_emit(cs, 0);507tu_cs_emit_qw(cs, binary_iova);508tu_cs_emit(cs,509A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(pvtmem->per_fiber_size));510tu_cs_emit_qw(cs, pvtmem->iova);511tu_cs_emit(cs, A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(pvtmem->per_sp_size) |512COND(pvtmem->per_wave, A6XX_SP_VS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT));513514tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_pvt_mem_hw_stack_offset, 1);515tu_cs_emit(cs, A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET(pvtmem->per_sp_size));516517tu_cs_emit_pkt7(cs, tu6_stage2opcode(stage), 3);518tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |519CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |520CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |521CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage)) |522CP_LOAD_STATE6_0_NUM_UNIT(xs->instrlen));523tu_cs_emit_qw(cs, binary_iova);524525/* emit immediates */526527const struct ir3_const_state *const_state = ir3_const_state(xs);528uint32_t base = const_state->offsets.immediate;529int size = DIV_ROUND_UP(const_state->immediates_count, 4);530531/* truncate size to avoid writing constants that shader532* does not use:533*/534size = MIN2(size + base, xs->constlen) - base;535536if (size > 0) {537tu_cs_emit_pkt7(cs, tu6_stage2opcode(stage), 3 + size * 4);538tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(base) |539CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |540CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |541CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage)) |542CP_LOAD_STATE6_0_NUM_UNIT(size));543tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));544tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));545546tu_cs_emit_array(cs, const_state->immediates, size * 4);547}548549if (const_state->constant_data_ubo != -1) {550uint64_t iova = binary_iova + xs->info.constant_data_offset;551552/* Upload UBO state for the constant data. */553tu_cs_emit_pkt7(cs, tu6_stage2opcode(stage), 5);554tu_cs_emit(cs,555CP_LOAD_STATE6_0_DST_OFF(const_state->constant_data_ubo) |556CP_LOAD_STATE6_0_STATE_TYPE(ST6_UBO)|557CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |558CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage)) |559CP_LOAD_STATE6_0_NUM_UNIT(1));560tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));561tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));562int size_vec4s = DIV_ROUND_UP(xs->constant_data_size, 16);563tu_cs_emit_qw(cs,564iova |565(uint64_t)A6XX_UBO_1_SIZE(size_vec4s) << 32);566567/* Upload the constant data to the const file if needed. */568const struct ir3_ubo_analysis_state *ubo_state = &const_state->ubo_state;569570for (int i = 0; i < ubo_state->num_enabled; i++) {571if (ubo_state->range[i].ubo.block != const_state->constant_data_ubo ||572ubo_state->range[i].ubo.bindless) {573continue;574}575576uint32_t start = ubo_state->range[i].start;577uint32_t end = ubo_state->range[i].end;578uint32_t size = MIN2(end - start,579(16 * xs->constlen) - ubo_state->range[i].offset);580581tu_cs_emit_pkt7(cs, tu6_stage2opcode(stage), 3);582tu_cs_emit(cs,583CP_LOAD_STATE6_0_DST_OFF(ubo_state->range[i].offset / 16) |584CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |585CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |586CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage)) |587CP_LOAD_STATE6_0_NUM_UNIT(size / 16));588tu_cs_emit_qw(cs, iova + start);589}590}591}592593static void594tu6_emit_cs_config(struct tu_cs *cs, const struct tu_shader *shader,595const struct ir3_shader_variant *v,596const struct tu_pvtmem_config *pvtmem,597uint64_t binary_iova)598{599tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(600.cs_state = true,601.cs_ibo = true));602603tu6_emit_xs_config(cs, MESA_SHADER_COMPUTE, v);604tu6_emit_xs(cs, MESA_SHADER_COMPUTE, v, pvtmem, binary_iova);605606uint32_t shared_size = MAX2(((int)v->shared_size - 1) / 1024, 1);607tu_cs_emit_pkt4(cs, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);608tu_cs_emit(cs, A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(shared_size) |609A6XX_SP_CS_UNKNOWN_A9B1_UNK6);610611uint32_t local_invocation_id =612ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);613uint32_t work_group_id =614ir3_find_sysval_regid(v, SYSTEM_VALUE_WORKGROUP_ID);615616enum a6xx_threadsize thrsz = v->info.double_threadsize ? THREAD128 : THREAD64;617tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CS_CNTL_0, 2);618tu_cs_emit(cs,619A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |620A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |621A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |622A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));623tu_cs_emit(cs, A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(regid(63, 0)) |624A6XX_HLSQ_CS_CNTL_1_THREADSIZE(thrsz));625}626627static void628tu6_emit_vs_system_values(struct tu_cs *cs,629const struct ir3_shader_variant *vs,630const struct ir3_shader_variant *hs,631const struct ir3_shader_variant *ds,632const struct ir3_shader_variant *gs,633bool primid_passthru)634{635const uint32_t vertexid_regid =636ir3_find_sysval_regid(vs, SYSTEM_VALUE_VERTEX_ID);637const uint32_t instanceid_regid =638ir3_find_sysval_regid(vs, SYSTEM_VALUE_INSTANCE_ID);639const uint32_t tess_coord_x_regid = hs ?640ir3_find_sysval_regid(ds, SYSTEM_VALUE_TESS_COORD) :641regid(63, 0);642const uint32_t tess_coord_y_regid = VALIDREG(tess_coord_x_regid) ?643tess_coord_x_regid + 1 :644regid(63, 0);645const uint32_t hs_patch_regid = hs ?646ir3_find_sysval_regid(hs, SYSTEM_VALUE_PRIMITIVE_ID) :647regid(63, 0);648const uint32_t ds_patch_regid = hs ?649ir3_find_sysval_regid(ds, SYSTEM_VALUE_PRIMITIVE_ID) :650regid(63, 0);651const uint32_t hs_invocation_regid = hs ?652ir3_find_sysval_regid(hs, SYSTEM_VALUE_TCS_HEADER_IR3) :653regid(63, 0);654const uint32_t primitiveid_regid = gs ?655ir3_find_sysval_regid(gs, SYSTEM_VALUE_PRIMITIVE_ID) :656regid(63, 0);657const uint32_t gsheader_regid = gs ?658ir3_find_sysval_regid(gs, SYSTEM_VALUE_GS_HEADER_IR3) :659regid(63, 0);660661/* Note: we currently don't support multiview with tess or GS. If we did,662* and the HW actually works, then we'd have to somehow share this across663* stages. Note that the blob doesn't support this either.664*/665const uint32_t viewid_regid =666ir3_find_sysval_regid(vs, SYSTEM_VALUE_VIEW_INDEX);667668tu_cs_emit_pkt4(cs, REG_A6XX_VFD_CONTROL_1, 6);669tu_cs_emit(cs, A6XX_VFD_CONTROL_1_REGID4VTX(vertexid_regid) |670A6XX_VFD_CONTROL_1_REGID4INST(instanceid_regid) |671A6XX_VFD_CONTROL_1_REGID4PRIMID(primitiveid_regid) |672A6XX_VFD_CONTROL_1_REGID4VIEWID(viewid_regid));673tu_cs_emit(cs, A6XX_VFD_CONTROL_2_REGID_HSPATCHID(hs_patch_regid) |674A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(hs_invocation_regid));675tu_cs_emit(cs, A6XX_VFD_CONTROL_3_REGID_DSPATCHID(ds_patch_regid) |676A6XX_VFD_CONTROL_3_REGID_TESSX(tess_coord_x_regid) |677A6XX_VFD_CONTROL_3_REGID_TESSY(tess_coord_y_regid) |6780xfc);679tu_cs_emit(cs, 0x000000fc); /* VFD_CONTROL_4 */680tu_cs_emit(cs, A6XX_VFD_CONTROL_5_REGID_GSHEADER(gsheader_regid) |6810xfc00); /* VFD_CONTROL_5 */682tu_cs_emit(cs, COND(primid_passthru, A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU)); /* VFD_CONTROL_6 */683}684685static void686tu6_setup_streamout(struct tu_cs *cs,687const struct ir3_shader_variant *v,688struct ir3_shader_linkage *l)689{690const struct ir3_stream_output_info *info = &v->shader->stream_output;691/* Note: 64 here comes from the HW layout of the program RAM. The program692* for stream N is at DWORD 64 * N.693*/694#define A6XX_SO_PROG_DWORDS 64695uint32_t prog[A6XX_SO_PROG_DWORDS * IR3_MAX_SO_STREAMS] = {};696BITSET_DECLARE(valid_dwords, A6XX_SO_PROG_DWORDS * IR3_MAX_SO_STREAMS) = {0};697uint32_t ncomp[IR3_MAX_SO_BUFFERS] = {};698699/* TODO: streamout state should be in a non-GMEM draw state */700701/* no streamout: */702if (info->num_outputs == 0) {703tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);704tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);705tu_cs_emit(cs, 0);706tu_cs_emit(cs, REG_A6XX_VPC_SO_STREAM_CNTL);707tu_cs_emit(cs, 0);708return;709}710711/* is there something to do with info->stride[i]? */712713for (unsigned i = 0; i < info->num_outputs; i++) {714const struct ir3_stream_output *out = &info->output[i];715unsigned k = out->register_index;716unsigned idx;717718/* Skip it, if there's an unused reg in the middle of outputs. */719if (v->outputs[k].regid == INVALID_REG)720continue;721722ncomp[out->output_buffer] += out->num_components;723724/* linkage map sorted by order frag shader wants things, so725* a bit less ideal here..726*/727for (idx = 0; idx < l->cnt; idx++)728if (l->var[idx].regid == v->outputs[k].regid)729break;730731debug_assert(idx < l->cnt);732733for (unsigned j = 0; j < out->num_components; j++) {734unsigned c = j + out->start_component;735unsigned loc = l->var[idx].loc + c;736unsigned off = j + out->dst_offset; /* in dwords */737738assert(loc < A6XX_SO_PROG_DWORDS * 2);739unsigned dword = out->stream * A6XX_SO_PROG_DWORDS + loc/2;740if (loc & 1) {741prog[dword] |= A6XX_VPC_SO_PROG_B_EN |742A6XX_VPC_SO_PROG_B_BUF(out->output_buffer) |743A6XX_VPC_SO_PROG_B_OFF(off * 4);744} else {745prog[dword] |= A6XX_VPC_SO_PROG_A_EN |746A6XX_VPC_SO_PROG_A_BUF(out->output_buffer) |747A6XX_VPC_SO_PROG_A_OFF(off * 4);748}749BITSET_SET(valid_dwords, dword);750}751}752753unsigned prog_count = 0;754unsigned start, end;755BITSET_FOREACH_RANGE(start, end, valid_dwords,756A6XX_SO_PROG_DWORDS * IR3_MAX_SO_STREAMS) {757prog_count += end - start + 1;758}759760tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 10 + 2 * prog_count);761tu_cs_emit(cs, REG_A6XX_VPC_SO_STREAM_CNTL);762tu_cs_emit(cs, A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(info->streams_written) |763COND(ncomp[0] > 0,764A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM(1 + info->buffer_to_stream[0])) |765COND(ncomp[1] > 0,766A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM(1 + info->buffer_to_stream[1])) |767COND(ncomp[2] > 0,768A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM(1 + info->buffer_to_stream[2])) |769COND(ncomp[3] > 0,770A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM(1 + info->buffer_to_stream[3])));771for (uint32_t i = 0; i < 4; i++) {772tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(i));773tu_cs_emit(cs, ncomp[i]);774}775bool first = true;776BITSET_FOREACH_RANGE(start, end, valid_dwords,777A6XX_SO_PROG_DWORDS * IR3_MAX_SO_STREAMS) {778tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);779tu_cs_emit(cs, COND(first, A6XX_VPC_SO_CNTL_RESET) |780A6XX_VPC_SO_CNTL_ADDR(start));781for (unsigned i = start; i < end; i++) {782tu_cs_emit(cs, REG_A6XX_VPC_SO_PROG);783tu_cs_emit(cs, prog[i]);784}785first = false;786}787}788789static void790tu6_emit_const(struct tu_cs *cs, uint32_t opcode, uint32_t base,791enum a6xx_state_block block, uint32_t offset,792uint32_t size, const uint32_t *dwords) {793assert(size % 4 == 0);794795tu_cs_emit_pkt7(cs, opcode, 3 + size);796tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(base) |797CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |798CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |799CP_LOAD_STATE6_0_STATE_BLOCK(block) |800CP_LOAD_STATE6_0_NUM_UNIT(size / 4));801802tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));803tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));804dwords = (uint32_t *)&((uint8_t *)dwords)[offset];805806tu_cs_emit_array(cs, dwords, size);807}808809static void810tu6_emit_link_map(struct tu_cs *cs,811const struct ir3_shader_variant *producer,812const struct ir3_shader_variant *consumer,813enum a6xx_state_block sb)814{815const struct ir3_const_state *const_state = ir3_const_state(consumer);816uint32_t base = const_state->offsets.primitive_map;817int size = DIV_ROUND_UP(consumer->input_size, 4);818819size = (MIN2(size + base, consumer->constlen) - base) * 4;820if (size <= 0)821return;822823tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, base, sb, 0, size,824producer->output_loc);825}826827static uint16_t828gl_primitive_to_tess(uint16_t primitive) {829switch (primitive) {830case GL_POINTS:831return TESS_POINTS;832case GL_LINE_STRIP:833return TESS_LINES;834case GL_TRIANGLE_STRIP:835return TESS_CW_TRIS;836default:837unreachable("");838}839}840841void842tu6_emit_vpc(struct tu_cs *cs,843const struct ir3_shader_variant *vs,844const struct ir3_shader_variant *hs,845const struct ir3_shader_variant *ds,846const struct ir3_shader_variant *gs,847const struct ir3_shader_variant *fs,848uint32_t patch_control_points)849{850/* note: doesn't compile as static because of the array regs.. */851const struct reg_config {852uint16_t reg_sp_xs_out_reg;853uint16_t reg_sp_xs_vpc_dst_reg;854uint16_t reg_vpc_xs_pack;855uint16_t reg_vpc_xs_clip_cntl;856uint16_t reg_gras_xs_cl_cntl;857uint16_t reg_pc_xs_out_cntl;858uint16_t reg_sp_xs_primitive_cntl;859uint16_t reg_vpc_xs_layer_cntl;860uint16_t reg_gras_xs_layer_cntl;861} reg_config[] = {862[MESA_SHADER_VERTEX] = {863REG_A6XX_SP_VS_OUT_REG(0),864REG_A6XX_SP_VS_VPC_DST_REG(0),865REG_A6XX_VPC_VS_PACK,866REG_A6XX_VPC_VS_CLIP_CNTL,867REG_A6XX_GRAS_VS_CL_CNTL,868REG_A6XX_PC_VS_OUT_CNTL,869REG_A6XX_SP_VS_PRIMITIVE_CNTL,870REG_A6XX_VPC_VS_LAYER_CNTL,871REG_A6XX_GRAS_VS_LAYER_CNTL872},873[MESA_SHADER_TESS_EVAL] = {874REG_A6XX_SP_DS_OUT_REG(0),875REG_A6XX_SP_DS_VPC_DST_REG(0),876REG_A6XX_VPC_DS_PACK,877REG_A6XX_VPC_DS_CLIP_CNTL,878REG_A6XX_GRAS_DS_CL_CNTL,879REG_A6XX_PC_DS_OUT_CNTL,880REG_A6XX_SP_DS_PRIMITIVE_CNTL,881REG_A6XX_VPC_DS_LAYER_CNTL,882REG_A6XX_GRAS_DS_LAYER_CNTL883},884[MESA_SHADER_GEOMETRY] = {885REG_A6XX_SP_GS_OUT_REG(0),886REG_A6XX_SP_GS_VPC_DST_REG(0),887REG_A6XX_VPC_GS_PACK,888REG_A6XX_VPC_GS_CLIP_CNTL,889REG_A6XX_GRAS_GS_CL_CNTL,890REG_A6XX_PC_GS_OUT_CNTL,891REG_A6XX_SP_GS_PRIMITIVE_CNTL,892REG_A6XX_VPC_GS_LAYER_CNTL,893REG_A6XX_GRAS_GS_LAYER_CNTL894},895};896897const struct ir3_shader_variant *last_shader;898if (gs) {899last_shader = gs;900} else if (hs) {901last_shader = ds;902} else {903last_shader = vs;904}905906const struct reg_config *cfg = ®_config[last_shader->type];907908struct ir3_shader_linkage linkage = {909.primid_loc = 0xff,910.clip0_loc = 0xff,911.clip1_loc = 0xff,912};913if (fs)914ir3_link_shaders(&linkage, last_shader, fs, true);915916if (last_shader->shader->stream_output.num_outputs)917ir3_link_stream_out(&linkage, last_shader);918919/* We do this after linking shaders in order to know whether PrimID920* passthrough needs to be enabled.921*/922bool primid_passthru = linkage.primid_loc != 0xff;923tu6_emit_vs_system_values(cs, vs, hs, ds, gs, primid_passthru);924925tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VAR_DISABLE(0), 4);926tu_cs_emit(cs, ~linkage.varmask[0]);927tu_cs_emit(cs, ~linkage.varmask[1]);928tu_cs_emit(cs, ~linkage.varmask[2]);929tu_cs_emit(cs, ~linkage.varmask[3]);930931/* a6xx finds position/pointsize at the end */932const uint32_t pointsize_regid =933ir3_find_output_regid(last_shader, VARYING_SLOT_PSIZ);934const uint32_t layer_regid =935ir3_find_output_regid(last_shader, VARYING_SLOT_LAYER);936const uint32_t view_regid =937ir3_find_output_regid(last_shader, VARYING_SLOT_VIEWPORT);938const uint32_t clip0_regid =939ir3_find_output_regid(last_shader, VARYING_SLOT_CLIP_DIST0);940const uint32_t clip1_regid =941ir3_find_output_regid(last_shader, VARYING_SLOT_CLIP_DIST1);942uint32_t primitive_regid = gs ?943ir3_find_sysval_regid(gs, SYSTEM_VALUE_PRIMITIVE_ID) : regid(63, 0);944uint32_t flags_regid = gs ?945ir3_find_output_regid(gs, VARYING_SLOT_GS_VERTEX_FLAGS_IR3) : 0;946947uint32_t pointsize_loc = 0xff, position_loc = 0xff, layer_loc = 0xff, view_loc = 0xff;948949if (layer_regid != regid(63, 0)) {950layer_loc = linkage.max_loc;951ir3_link_add(&linkage, layer_regid, 0x1, linkage.max_loc);952}953954if (view_regid != regid(63, 0)) {955view_loc = linkage.max_loc;956ir3_link_add(&linkage, view_regid, 0x1, linkage.max_loc);957}958959unsigned extra_pos = 0;960961for (unsigned i = 0; i < last_shader->outputs_count; i++) {962if (last_shader->outputs[i].slot != VARYING_SLOT_POS)963continue;964965if (position_loc == 0xff)966position_loc = linkage.max_loc;967968ir3_link_add(&linkage, last_shader->outputs[i].regid,9690xf, position_loc + 4 * last_shader->outputs[i].view);970extra_pos = MAX2(extra_pos, last_shader->outputs[i].view);971}972973if (pointsize_regid != regid(63, 0)) {974pointsize_loc = linkage.max_loc;975ir3_link_add(&linkage, pointsize_regid, 0x1, linkage.max_loc);976}977978uint8_t clip_cull_mask = last_shader->clip_mask | last_shader->cull_mask;979980/* Handle the case where clip/cull distances aren't read by the FS */981uint32_t clip0_loc = linkage.clip0_loc, clip1_loc = linkage.clip1_loc;982if (clip0_loc == 0xff && clip0_regid != regid(63, 0)) {983clip0_loc = linkage.max_loc;984ir3_link_add(&linkage, clip0_regid, clip_cull_mask & 0xf, linkage.max_loc);985}986if (clip1_loc == 0xff && clip1_regid != regid(63, 0)) {987clip1_loc = linkage.max_loc;988ir3_link_add(&linkage, clip1_regid, clip_cull_mask >> 4, linkage.max_loc);989}990991tu6_setup_streamout(cs, last_shader, &linkage);992993/* The GPU hangs on some models when there are no outputs (xs_pack::CNT),994* at least when a DS is the last stage, so add a dummy output to keep it995* happy if there aren't any. We do this late in order to avoid emitting996* any unused code and make sure that optimizations don't remove it.997*/998if (linkage.cnt == 0)999ir3_link_add(&linkage, 0, 0x1, linkage.max_loc);10001001/* map outputs of the last shader to VPC */1002assert(linkage.cnt <= 32);1003const uint32_t sp_out_count = DIV_ROUND_UP(linkage.cnt, 2);1004const uint32_t sp_vpc_dst_count = DIV_ROUND_UP(linkage.cnt, 4);1005uint32_t sp_out[16] = {0};1006uint32_t sp_vpc_dst[8] = {0};1007for (uint32_t i = 0; i < linkage.cnt; i++) {1008((uint16_t *) sp_out)[i] =1009A6XX_SP_VS_OUT_REG_A_REGID(linkage.var[i].regid) |1010A6XX_SP_VS_OUT_REG_A_COMPMASK(linkage.var[i].compmask);1011((uint8_t *) sp_vpc_dst)[i] =1012A6XX_SP_VS_VPC_DST_REG_OUTLOC0(linkage.var[i].loc);1013}10141015tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_out_reg, sp_out_count);1016tu_cs_emit_array(cs, sp_out, sp_out_count);10171018tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_vpc_dst_reg, sp_vpc_dst_count);1019tu_cs_emit_array(cs, sp_vpc_dst, sp_vpc_dst_count);10201021tu_cs_emit_pkt4(cs, cfg->reg_vpc_xs_pack, 1);1022tu_cs_emit(cs, A6XX_VPC_VS_PACK_POSITIONLOC(position_loc) |1023A6XX_VPC_VS_PACK_PSIZELOC(pointsize_loc) |1024A6XX_VPC_VS_PACK_STRIDE_IN_VPC(linkage.max_loc) |1025A6XX_VPC_VS_PACK_EXTRAPOS(extra_pos));10261027tu_cs_emit_pkt4(cs, cfg->reg_vpc_xs_clip_cntl, 1);1028tu_cs_emit(cs, A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK(clip_cull_mask) |1029A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC(clip0_loc) |1030A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC(clip1_loc));10311032tu_cs_emit_pkt4(cs, cfg->reg_gras_xs_cl_cntl, 1);1033tu_cs_emit(cs, A6XX_GRAS_VS_CL_CNTL_CLIP_MASK(last_shader->clip_mask) |1034A6XX_GRAS_VS_CL_CNTL_CULL_MASK(last_shader->cull_mask));10351036tu_cs_emit_pkt4(cs, cfg->reg_pc_xs_out_cntl, 1);1037tu_cs_emit(cs, A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(linkage.max_loc) |1038CONDREG(pointsize_regid, A6XX_PC_VS_OUT_CNTL_PSIZE) |1039CONDREG(layer_regid, A6XX_PC_VS_OUT_CNTL_LAYER) |1040CONDREG(view_regid, A6XX_PC_VS_OUT_CNTL_VIEW) |1041CONDREG(primitive_regid, A6XX_PC_VS_OUT_CNTL_PRIMITIVE_ID) |1042A6XX_PC_VS_OUT_CNTL_CLIP_MASK(clip_cull_mask));10431044tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_primitive_cntl, 1);1045tu_cs_emit(cs, A6XX_SP_VS_PRIMITIVE_CNTL_OUT(linkage.cnt) |1046A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(flags_regid));10471048tu_cs_emit_pkt4(cs, cfg->reg_vpc_xs_layer_cntl, 1);1049tu_cs_emit(cs, A6XX_VPC_VS_LAYER_CNTL_LAYERLOC(layer_loc) |1050A6XX_VPC_VS_LAYER_CNTL_VIEWLOC(view_loc));10511052tu_cs_emit_pkt4(cs, cfg->reg_gras_xs_layer_cntl, 1);1053tu_cs_emit(cs, CONDREG(layer_regid, A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER) |1054CONDREG(view_regid, A6XX_GRAS_GS_LAYER_CNTL_WRITES_VIEW));10551056tu_cs_emit_regs(cs, A6XX_PC_PRIMID_PASSTHRU(primid_passthru));10571058tu_cs_emit_pkt4(cs, REG_A6XX_VPC_CNTL_0, 1);1059tu_cs_emit(cs, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs ? fs->total_in : 0) |1060COND(fs && fs->total_in, A6XX_VPC_CNTL_0_VARYING) |1061A6XX_VPC_CNTL_0_PRIMIDLOC(linkage.primid_loc) |1062A6XX_VPC_CNTL_0_VIEWIDLOC(linkage.viewid_loc));10631064if (hs) {1065shader_info *hs_info = &hs->shader->nir->info;10661067tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESS_NUM_VERTEX, 1);1068tu_cs_emit(cs, hs_info->tess.tcs_vertices_out);10691070/* Total attribute slots in HS incoming patch. */1071tu_cs_emit_pkt4(cs, REG_A6XX_PC_HS_INPUT_SIZE, 1);1072tu_cs_emit(cs, patch_control_points * vs->output_size / 4);10731074const uint32_t wavesize = 64;1075const uint32_t max_wave_input_size = 64;10761077/* note: if HS is really just the VS extended, then this1078* should be by MAX2(patch_control_points, hs_info->tess.tcs_vertices_out)1079* however that doesn't match the blob, and fails some dEQP tests.1080*/1081uint32_t prims_per_wave = wavesize / hs_info->tess.tcs_vertices_out;1082uint32_t max_prims_per_wave =1083max_wave_input_size * wavesize / (vs->output_size * patch_control_points);1084prims_per_wave = MIN2(prims_per_wave, max_prims_per_wave);10851086uint32_t total_size = vs->output_size * patch_control_points * prims_per_wave;1087uint32_t wave_input_size = DIV_ROUND_UP(total_size, wavesize);10881089tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_WAVE_INPUT_SIZE, 1);1090tu_cs_emit(cs, wave_input_size);10911092/* In SPIR-V generated from GLSL, the tessellation primitive params are1093* are specified in the tess eval shader, but in SPIR-V generated from1094* HLSL, they are specified in the tess control shader. */1095shader_info *tess_info =1096ds->shader->nir->info.tess.spacing == TESS_SPACING_UNSPECIFIED ?1097&hs->shader->nir->info : &ds->shader->nir->info;1098tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESS_CNTL, 1);1099uint32_t output;1100if (tess_info->tess.point_mode)1101output = TESS_POINTS;1102else if (tess_info->tess.primitive_mode == GL_ISOLINES)1103output = TESS_LINES;1104else if (tess_info->tess.ccw)1105output = TESS_CCW_TRIS;1106else1107output = TESS_CW_TRIS;11081109enum a6xx_tess_spacing spacing;1110switch (tess_info->tess.spacing) {1111case TESS_SPACING_EQUAL:1112spacing = TESS_EQUAL;1113break;1114case TESS_SPACING_FRACTIONAL_ODD:1115spacing = TESS_FRACTIONAL_ODD;1116break;1117case TESS_SPACING_FRACTIONAL_EVEN:1118spacing = TESS_FRACTIONAL_EVEN;1119break;1120case TESS_SPACING_UNSPECIFIED:1121default:1122unreachable("invalid tess spacing");1123}1124tu_cs_emit(cs, A6XX_PC_TESS_CNTL_SPACING(spacing) |1125A6XX_PC_TESS_CNTL_OUTPUT(output));11261127tu6_emit_link_map(cs, vs, hs, SB6_HS_SHADER);1128tu6_emit_link_map(cs, hs, ds, SB6_DS_SHADER);1129}113011311132if (gs) {1133uint32_t vertices_out, invocations, output, vec4_size;1134uint32_t prev_stage_output_size = ds ? ds->output_size : vs->output_size;11351136/* this detects the tu_clear_blit path, which doesn't set ->nir */1137if (gs->shader->nir) {1138if (hs) {1139tu6_emit_link_map(cs, ds, gs, SB6_GS_SHADER);1140} else {1141tu6_emit_link_map(cs, vs, gs, SB6_GS_SHADER);1142}1143vertices_out = gs->shader->nir->info.gs.vertices_out - 1;1144output = gl_primitive_to_tess(gs->shader->nir->info.gs.output_primitive);1145invocations = gs->shader->nir->info.gs.invocations - 1;1146/* Size of per-primitive alloction in ldlw memory in vec4s. */1147vec4_size = gs->shader->nir->info.gs.vertices_in *1148DIV_ROUND_UP(prev_stage_output_size, 4);1149} else {1150vertices_out = 3;1151output = TESS_CW_TRIS;1152invocations = 0;1153vec4_size = 0;1154}11551156tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_5, 1);1157tu_cs_emit(cs,1158A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(vertices_out) |1159A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(output) |1160A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(invocations));11611162tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_3, 1);1163tu_cs_emit(cs, 0);11641165tu_cs_emit_pkt4(cs, REG_A6XX_VPC_UNKNOWN_9100, 1);1166tu_cs_emit(cs, 0xff);11671168tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 1);1169tu_cs_emit(cs, A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(vec4_size));11701171uint32_t prim_size = prev_stage_output_size;1172if (prim_size > 64)1173prim_size = 64;1174else if (prim_size == 64)1175prim_size = 63;1176tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_PRIM_SIZE, 1);1177tu_cs_emit(cs, prim_size);1178}1179}11801181static int1182tu6_vpc_varying_mode(const struct ir3_shader_variant *fs,1183uint32_t index,1184uint8_t *interp_mode,1185uint8_t *ps_repl_mode)1186{1187enum1188{1189INTERP_SMOOTH = 0,1190INTERP_FLAT = 1,1191INTERP_ZERO = 2,1192INTERP_ONE = 3,1193};1194enum1195{1196PS_REPL_NONE = 0,1197PS_REPL_S = 1,1198PS_REPL_T = 2,1199PS_REPL_ONE_MINUS_T = 3,1200};12011202const uint32_t compmask = fs->inputs[index].compmask;12031204/* NOTE: varyings are packed, so if compmask is 0xb then first, second, and1205* fourth component occupy three consecutive varying slots1206*/1207int shift = 0;1208*interp_mode = 0;1209*ps_repl_mode = 0;1210if (fs->inputs[index].slot == VARYING_SLOT_PNTC) {1211if (compmask & 0x1) {1212*ps_repl_mode |= PS_REPL_S << shift;1213shift += 2;1214}1215if (compmask & 0x2) {1216*ps_repl_mode |= PS_REPL_T << shift;1217shift += 2;1218}1219if (compmask & 0x4) {1220*interp_mode |= INTERP_ZERO << shift;1221shift += 2;1222}1223if (compmask & 0x8) {1224*interp_mode |= INTERP_ONE << 6;1225shift += 2;1226}1227} else if (fs->inputs[index].flat) {1228for (int i = 0; i < 4; i++) {1229if (compmask & (1 << i)) {1230*interp_mode |= INTERP_FLAT << shift;1231shift += 2;1232}1233}1234}12351236return shift;1237}12381239static void1240tu6_emit_vpc_varying_modes(struct tu_cs *cs,1241const struct ir3_shader_variant *fs)1242{1243uint32_t interp_modes[8] = { 0 };1244uint32_t ps_repl_modes[8] = { 0 };12451246if (fs) {1247for (int i = -1;1248(i = ir3_next_varying(fs, i)) < (int) fs->inputs_count;) {12491250/* get the mode for input i */1251uint8_t interp_mode;1252uint8_t ps_repl_mode;1253const int bits =1254tu6_vpc_varying_mode(fs, i, &interp_mode, &ps_repl_mode);12551256/* OR the mode into the array */1257const uint32_t inloc = fs->inputs[i].inloc * 2;1258uint32_t n = inloc / 32;1259uint32_t shift = inloc % 32;1260interp_modes[n] |= interp_mode << shift;1261ps_repl_modes[n] |= ps_repl_mode << shift;1262if (shift + bits > 32) {1263n++;1264shift = 32 - shift;12651266interp_modes[n] |= interp_mode >> shift;1267ps_repl_modes[n] |= ps_repl_mode >> shift;1268}1269}1270}12711272tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);1273tu_cs_emit_array(cs, interp_modes, 8);12741275tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);1276tu_cs_emit_array(cs, ps_repl_modes, 8);1277}12781279void1280tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs)1281{1282uint32_t face_regid, coord_regid, zwcoord_regid, samp_id_regid;1283uint32_t ij_regid[IJ_COUNT];1284uint32_t smask_in_regid;12851286bool sample_shading = fs->per_samp | fs->key.sample_shading;1287bool enable_varyings = fs->total_in > 0;12881289samp_id_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_ID);1290smask_in_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_MASK_IN);1291face_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRONT_FACE);1292coord_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRAG_COORD);1293zwcoord_regid = VALIDREG(coord_regid) ? coord_regid + 2 : regid(63, 0);1294for (unsigned i = 0; i < ARRAY_SIZE(ij_regid); i++)1295ij_regid[i] = ir3_find_sysval_regid(fs, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL + i);12961297if (VALIDREG(ij_regid[IJ_LINEAR_SAMPLE]))1298tu_finishme("linear sample varying");12991300if (VALIDREG(ij_regid[IJ_LINEAR_CENTROID]))1301tu_finishme("linear centroid varying");13021303if (fs->num_sampler_prefetch > 0) {1304assert(VALIDREG(ij_regid[IJ_PERSP_PIXEL]));1305/* also, it seems like ij_pix is *required* to be r0.x */1306assert(ij_regid[IJ_PERSP_PIXEL] == regid(0, 0));1307}13081309tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_PREFETCH_CNTL, 1 + fs->num_sampler_prefetch);1310tu_cs_emit(cs, A6XX_SP_FS_PREFETCH_CNTL_COUNT(fs->num_sampler_prefetch) |1311A6XX_SP_FS_PREFETCH_CNTL_UNK4(regid(63, 0)) |13120x7000); // XXX);1313for (int i = 0; i < fs->num_sampler_prefetch; i++) {1314const struct ir3_sampler_prefetch *prefetch = &fs->sampler_prefetch[i];1315tu_cs_emit(cs, A6XX_SP_FS_PREFETCH_CMD_SRC(prefetch->src) |1316A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(prefetch->samp_id) |1317A6XX_SP_FS_PREFETCH_CMD_TEX_ID(prefetch->tex_id) |1318A6XX_SP_FS_PREFETCH_CMD_DST(prefetch->dst) |1319A6XX_SP_FS_PREFETCH_CMD_WRMASK(prefetch->wrmask) |1320COND(prefetch->half_precision, A6XX_SP_FS_PREFETCH_CMD_HALF) |1321A6XX_SP_FS_PREFETCH_CMD_CMD(prefetch->cmd));1322}13231324if (fs->num_sampler_prefetch > 0) {1325tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(0), fs->num_sampler_prefetch);1326for (int i = 0; i < fs->num_sampler_prefetch; i++) {1327const struct ir3_sampler_prefetch *prefetch = &fs->sampler_prefetch[i];1328tu_cs_emit(cs,1329A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(prefetch->samp_bindless_id) |1330A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(prefetch->tex_bindless_id));1331}1332}13331334tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CONTROL_1_REG, 5);1335tu_cs_emit(cs, 0x7);1336tu_cs_emit(cs, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid) |1337A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(samp_id_regid) |1338A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(smask_in_regid) |1339A6XX_HLSQ_CONTROL_2_REG_SIZE(ij_regid[IJ_PERSP_SIZE]));1340tu_cs_emit(cs, A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(ij_regid[IJ_PERSP_PIXEL]) |1341A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(ij_regid[IJ_LINEAR_PIXEL]) |1342A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(ij_regid[IJ_PERSP_CENTROID]) |1343A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(ij_regid[IJ_LINEAR_CENTROID]));1344tu_cs_emit(cs, A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(coord_regid) |1345A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(zwcoord_regid) |1346A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(ij_regid[IJ_PERSP_SAMPLE]) |1347A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(ij_regid[IJ_LINEAR_SAMPLE]));1348tu_cs_emit(cs, 0xfc);13491350enum a6xx_threadsize thrsz = fs->info.double_threadsize ? THREAD128 : THREAD64;1351tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_FS_CNTL_0, 1);1352tu_cs_emit(cs, A6XX_HLSQ_FS_CNTL_0_THREADSIZE(thrsz) |1353COND(enable_varyings, A6XX_HLSQ_FS_CNTL_0_VARYINGS));13541355bool need_size = fs->frag_face || fs->fragcoord_compmask != 0;1356bool need_size_persamp = false;1357if (VALIDREG(ij_regid[IJ_PERSP_SIZE])) {1358if (sample_shading)1359need_size_persamp = true;1360else1361need_size = true;1362}1363if (VALIDREG(ij_regid[IJ_LINEAR_PIXEL]))1364need_size = true;13651366tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CNTL, 1);1367tu_cs_emit(cs,1368CONDREG(ij_regid[IJ_PERSP_PIXEL], A6XX_GRAS_CNTL_IJ_PERSP_PIXEL) |1369CONDREG(ij_regid[IJ_PERSP_CENTROID], A6XX_GRAS_CNTL_IJ_PERSP_CENTROID) |1370CONDREG(ij_regid[IJ_PERSP_SAMPLE], A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE) |1371COND(need_size, A6XX_GRAS_CNTL_SIZE) |1372COND(need_size_persamp, A6XX_GRAS_CNTL_SIZE_PERSAMP) |1373COND(fs->fragcoord_compmask != 0, A6XX_GRAS_CNTL_COORD_MASK(fs->fragcoord_compmask)));13741375tu_cs_emit_pkt4(cs, REG_A6XX_RB_RENDER_CONTROL0, 2);1376tu_cs_emit(cs,1377CONDREG(ij_regid[IJ_PERSP_PIXEL], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL) |1378CONDREG(ij_regid[IJ_PERSP_CENTROID], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID) |1379CONDREG(ij_regid[IJ_PERSP_SAMPLE], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE) |1380COND(need_size, A6XX_RB_RENDER_CONTROL0_SIZE) |1381COND(enable_varyings, A6XX_RB_RENDER_CONTROL0_UNK10) |1382COND(need_size_persamp, A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP) |1383COND(fs->fragcoord_compmask != 0,1384A6XX_RB_RENDER_CONTROL0_COORD_MASK(fs->fragcoord_compmask)));1385tu_cs_emit(cs,1386/* these two bits (UNK4/UNK5) relate to fragcoord1387* without them, fragcoord is the same for all samples1388*/1389COND(sample_shading, A6XX_RB_RENDER_CONTROL1_UNK4) |1390COND(sample_shading, A6XX_RB_RENDER_CONTROL1_UNK5) |1391CONDREG(smask_in_regid, A6XX_RB_RENDER_CONTROL1_SAMPLEMASK) |1392CONDREG(samp_id_regid, A6XX_RB_RENDER_CONTROL1_SAMPLEID) |1393CONDREG(ij_regid[IJ_PERSP_SIZE], A6XX_RB_RENDER_CONTROL1_SIZE) |1394COND(fs->frag_face, A6XX_RB_RENDER_CONTROL1_FACENESS));13951396tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CNTL, 1);1397tu_cs_emit(cs, COND(sample_shading, A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE));13981399tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_UNKNOWN_8101, 1);1400tu_cs_emit(cs, COND(sample_shading, 0x6)); // XXX14011402tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CNTL, 1);1403tu_cs_emit(cs, COND(sample_shading, A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE));1404}14051406static void1407tu6_emit_fs_outputs(struct tu_cs *cs,1408const struct ir3_shader_variant *fs,1409uint32_t mrt_count, bool dual_src_blend,1410uint32_t render_components,1411bool no_earlyz,1412struct tu_pipeline *pipeline)1413{1414uint32_t smask_regid, posz_regid, stencilref_regid;14151416posz_regid = ir3_find_output_regid(fs, FRAG_RESULT_DEPTH);1417smask_regid = ir3_find_output_regid(fs, FRAG_RESULT_SAMPLE_MASK);1418stencilref_regid = ir3_find_output_regid(fs, FRAG_RESULT_STENCIL);14191420uint32_t fragdata_regid[8];1421if (fs->color0_mrt) {1422fragdata_regid[0] = ir3_find_output_regid(fs, FRAG_RESULT_COLOR);1423for (uint32_t i = 1; i < ARRAY_SIZE(fragdata_regid); i++)1424fragdata_regid[i] = fragdata_regid[0];1425} else {1426for (uint32_t i = 0; i < ARRAY_SIZE(fragdata_regid); i++)1427fragdata_regid[i] = ir3_find_output_regid(fs, FRAG_RESULT_DATA0 + i);1428}14291430tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2);1431tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(posz_regid) |1432A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(smask_regid) |1433A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID(stencilref_regid) |1434COND(dual_src_blend, A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE));1435tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count));14361437uint32_t fs_render_components = 0;14381439tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);1440for (uint32_t i = 0; i < ARRAY_SIZE(fragdata_regid); i++) {1441// TODO we could have a mix of half and full precision outputs,1442// we really need to figure out half-precision from IR3_REG_HALF1443tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_REG_REGID(fragdata_regid[i]) |1444(false ? A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION : 0));14451446if (VALIDREG(fragdata_regid[i])) {1447fs_render_components |= 0xf << (i * 4);1448}1449}14501451/* dual source blending has an extra fs output in the 2nd slot */1452if (dual_src_blend) {1453fs_render_components |= 0xf << 4;1454}14551456/* There is no point in having component enabled which is not written1457* by the shader. Per VK spec it is an UB, however a few apps depend on1458* attachment not being changed if FS doesn't have corresponding output.1459*/1460fs_render_components &= render_components;14611462tu_cs_emit_regs(cs,1463A6XX_SP_FS_RENDER_COMPONENTS(.dword = fs_render_components));14641465tu_cs_emit_pkt4(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 2);1466tu_cs_emit(cs, COND(fs->writes_pos, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z) |1467COND(fs->writes_smask, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK) |1468COND(fs->writes_stencilref, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_STENCILREF) |1469COND(dual_src_blend, A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE));1470tu_cs_emit(cs, A6XX_RB_FS_OUTPUT_CNTL1_MRT(mrt_count));14711472tu_cs_emit_regs(cs,1473A6XX_RB_RENDER_COMPONENTS(.dword = fs_render_components));14741475if (pipeline) {1476pipeline->lrz.fs_has_kill = fs->has_kill;1477pipeline->lrz.early_fragment_tests = fs->shader->nir->info.fs.early_fragment_tests;14781479if ((fs->shader && !fs->shader->nir->info.fs.early_fragment_tests) &&1480(fs->no_earlyz || fs->has_kill || fs->writes_pos || fs->writes_stencilref || no_earlyz || fs->writes_smask)) {1481pipeline->lrz.force_late_z = true;1482}1483}1484}14851486static void1487tu6_emit_geom_tess_consts(struct tu_cs *cs,1488const struct ir3_shader_variant *vs,1489const struct ir3_shader_variant *hs,1490const struct ir3_shader_variant *ds,1491const struct ir3_shader_variant *gs,1492uint32_t cps_per_patch)1493{1494uint32_t num_vertices =1495hs ? cps_per_patch : gs->shader->nir->info.gs.vertices_in;14961497uint32_t vs_params[4] = {1498vs->output_size * num_vertices * 4, /* vs primitive stride */1499vs->output_size * 4, /* vs vertex stride */15000,15010,1502};1503uint32_t vs_base = ir3_const_state(vs)->offsets.primitive_param;1504tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, vs_base, SB6_VS_SHADER, 0,1505ARRAY_SIZE(vs_params), vs_params);15061507if (hs) {1508assert(ds->type != MESA_SHADER_NONE);1509uint32_t hs_params[4] = {1510vs->output_size * num_vertices * 4, /* hs primitive stride */1511vs->output_size * 4, /* hs vertex stride */1512hs->output_size,1513cps_per_patch,1514};15151516uint32_t hs_base = hs->const_state->offsets.primitive_param;1517tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, hs_base, SB6_HS_SHADER, 0,1518ARRAY_SIZE(hs_params), hs_params);1519if (gs)1520num_vertices = gs->shader->nir->info.gs.vertices_in;15211522uint32_t ds_params[4] = {1523ds->output_size * num_vertices * 4, /* ds primitive stride */1524ds->output_size * 4, /* ds vertex stride */1525hs->output_size, /* hs vertex stride (dwords) */1526hs->shader->nir->info.tess.tcs_vertices_out1527};15281529uint32_t ds_base = ds->const_state->offsets.primitive_param;1530tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, ds_base, SB6_DS_SHADER, 0,1531ARRAY_SIZE(ds_params), ds_params);1532}15331534if (gs) {1535const struct ir3_shader_variant *prev = ds ? ds : vs;1536uint32_t gs_params[4] = {1537prev->output_size * num_vertices * 4, /* gs primitive stride */1538prev->output_size * 4, /* gs vertex stride */15390,15400,1541};1542uint32_t gs_base = gs->const_state->offsets.primitive_param;1543tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, gs_base, SB6_GS_SHADER, 0,1544ARRAY_SIZE(gs_params), gs_params);1545}1546}15471548static void1549tu6_emit_program_config(struct tu_cs *cs,1550struct tu_pipeline_builder *builder)1551{1552gl_shader_stage stage = MESA_SHADER_VERTEX;15531554STATIC_ASSERT(MESA_SHADER_VERTEX == 0);15551556tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(1557.vs_state = true,1558.hs_state = true,1559.ds_state = true,1560.gs_state = true,1561.fs_state = true,1562.gfx_ibo = true));1563for (; stage < ARRAY_SIZE(builder->shaders); stage++) {1564tu6_emit_xs_config(cs, stage, builder->variants[stage]);1565}1566}15671568static void1569tu6_emit_program(struct tu_cs *cs,1570struct tu_pipeline_builder *builder,1571bool binning_pass,1572struct tu_pipeline *pipeline)1573{1574const struct ir3_shader_variant *vs = builder->variants[MESA_SHADER_VERTEX];1575const struct ir3_shader_variant *bs = builder->binning_variant;1576const struct ir3_shader_variant *hs = builder->variants[MESA_SHADER_TESS_CTRL];1577const struct ir3_shader_variant *ds = builder->variants[MESA_SHADER_TESS_EVAL];1578const struct ir3_shader_variant *gs = builder->variants[MESA_SHADER_GEOMETRY];1579const struct ir3_shader_variant *fs = builder->variants[MESA_SHADER_FRAGMENT];1580gl_shader_stage stage = MESA_SHADER_VERTEX;1581uint32_t cps_per_patch = builder->create_info->pTessellationState ?1582builder->create_info->pTessellationState->patchControlPoints : 0;1583bool multi_pos_output = builder->shaders[MESA_SHADER_VERTEX]->multi_pos_output;15841585/* Don't use the binning pass variant when GS is present because we don't1586* support compiling correct binning pass variants with GS.1587*/1588if (binning_pass && !gs) {1589vs = bs;1590tu6_emit_xs(cs, stage, bs, &builder->pvtmem, builder->binning_vs_iova);1591stage++;1592}15931594for (; stage < ARRAY_SIZE(builder->shaders); stage++) {1595const struct ir3_shader_variant *xs = builder->variants[stage];15961597if (stage == MESA_SHADER_FRAGMENT && binning_pass)1598fs = xs = NULL;15991600tu6_emit_xs(cs, stage, xs, &builder->pvtmem, builder->shader_iova[stage]);1601}16021603uint32_t multiview_views = util_logbase2(builder->multiview_mask) + 1;1604uint32_t multiview_cntl = builder->multiview_mask ?1605A6XX_PC_MULTIVIEW_CNTL_ENABLE |1606A6XX_PC_MULTIVIEW_CNTL_VIEWS(multiview_views) |1607COND(!multi_pos_output, A6XX_PC_MULTIVIEW_CNTL_DISABLEMULTIPOS)1608: 0;16091610/* Copy what the blob does here. This will emit an extra 0x3f1611* CP_EVENT_WRITE when multiview is disabled. I'm not exactly sure what1612* this is working around yet.1613*/1614if (builder->device->physical_device->info->a6xx.has_cp_reg_write) {1615tu_cs_emit_pkt7(cs, CP_REG_WRITE, 3);1616tu_cs_emit(cs, CP_REG_WRITE_0_TRACKER(UNK_EVENT_WRITE));1617tu_cs_emit(cs, REG_A6XX_PC_MULTIVIEW_CNTL);1618} else {1619tu_cs_emit_pkt4(cs, REG_A6XX_PC_MULTIVIEW_CNTL, 1);1620}1621tu_cs_emit(cs, multiview_cntl);16221623tu_cs_emit_pkt4(cs, REG_A6XX_VFD_MULTIVIEW_CNTL, 1);1624tu_cs_emit(cs, multiview_cntl);16251626if (multiview_cntl &&1627builder->device->physical_device->info->a6xx.supports_multiview_mask) {1628tu_cs_emit_pkt4(cs, REG_A6XX_PC_MULTIVIEW_MASK, 1);1629tu_cs_emit(cs, builder->multiview_mask);1630}16311632tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_WAVE_INPUT_SIZE, 1);1633tu_cs_emit(cs, 0);16341635tu6_emit_vpc(cs, vs, hs, ds, gs, fs, cps_per_patch);1636tu6_emit_vpc_varying_modes(cs, fs);16371638bool no_earlyz = builder->depth_attachment_format == VK_FORMAT_S8_UINT;1639uint32_t mrt_count = builder->color_attachment_count;1640uint32_t render_components = builder->render_components;16411642if (builder->alpha_to_coverage) {1643/* alpha to coverage can behave like a discard */1644no_earlyz = true;1645/* alpha value comes from first mrt */1646render_components |= 0xf;1647if (!mrt_count) {1648mrt_count = 1;1649/* Disable memory write for dummy mrt because it doesn't get set otherwise */1650tu_cs_emit_regs(cs, A6XX_RB_MRT_CONTROL(0, .component_enable = 0));1651}1652}16531654if (fs) {1655tu6_emit_fs_inputs(cs, fs);1656tu6_emit_fs_outputs(cs, fs, mrt_count,1657builder->use_dual_src_blend,1658render_components,1659no_earlyz,1660pipeline);1661} else {1662/* TODO: check if these can be skipped if fs is disabled */1663struct ir3_shader_variant dummy_variant = {};1664tu6_emit_fs_inputs(cs, &dummy_variant);1665tu6_emit_fs_outputs(cs, &dummy_variant, mrt_count,1666builder->use_dual_src_blend,1667render_components,1668no_earlyz,1669NULL);1670}16711672if (gs || hs) {1673tu6_emit_geom_tess_consts(cs, vs, hs, ds, gs, cps_per_patch);1674}1675}16761677static void1678tu6_emit_vertex_input(struct tu_pipeline *pipeline,1679struct tu_cs *cs,1680const struct ir3_shader_variant *vs,1681const VkPipelineVertexInputStateCreateInfo *info)1682{1683uint32_t vfd_decode_idx = 0;1684uint32_t binding_instanced = 0; /* bitmask of instanced bindings */1685uint32_t step_rate[MAX_VBS];16861687for (uint32_t i = 0; i < info->vertexBindingDescriptionCount; i++) {1688const VkVertexInputBindingDescription *binding =1689&info->pVertexBindingDescriptions[i];16901691if (!(pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_VB_STRIDE))) {1692tu_cs_emit_regs(cs,1693A6XX_VFD_FETCH_STRIDE(binding->binding, binding->stride));1694}16951696if (binding->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE)1697binding_instanced |= 1 << binding->binding;16981699step_rate[binding->binding] = 1;1700}17011702const VkPipelineVertexInputDivisorStateCreateInfoEXT *div_state =1703vk_find_struct_const(info->pNext, PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);1704if (div_state) {1705for (uint32_t i = 0; i < div_state->vertexBindingDivisorCount; i++) {1706const VkVertexInputBindingDivisorDescriptionEXT *desc =1707&div_state->pVertexBindingDivisors[i];1708step_rate[desc->binding] = desc->divisor;1709}1710}17111712/* TODO: emit all VFD_DECODE/VFD_DEST_CNTL in same (two) pkt4 */17131714for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {1715const VkVertexInputAttributeDescription *attr =1716&info->pVertexAttributeDescriptions[i];1717uint32_t input_idx;17181719for (input_idx = 0; input_idx < vs->inputs_count; input_idx++) {1720if ((vs->inputs[input_idx].slot - VERT_ATTRIB_GENERIC0) == attr->location)1721break;1722}17231724/* attribute not used, skip it */1725if (input_idx == vs->inputs_count)1726continue;17271728const struct tu_native_format format = tu6_format_vtx(attr->format);1729tu_cs_emit_regs(cs,1730A6XX_VFD_DECODE_INSTR(vfd_decode_idx,1731.idx = attr->binding,1732.offset = attr->offset,1733.instanced = binding_instanced & (1 << attr->binding),1734.format = format.fmt,1735.swap = format.swap,1736.unk30 = 1,1737._float = !vk_format_is_int(attr->format)),1738A6XX_VFD_DECODE_STEP_RATE(vfd_decode_idx, step_rate[attr->binding]));17391740tu_cs_emit_regs(cs,1741A6XX_VFD_DEST_CNTL_INSTR(vfd_decode_idx,1742.writemask = vs->inputs[input_idx].compmask,1743.regid = vs->inputs[input_idx].regid));17441745vfd_decode_idx++;1746}17471748tu_cs_emit_regs(cs,1749A6XX_VFD_CONTROL_0(1750.fetch_cnt = vfd_decode_idx, /* decode_cnt for binning pass ? */1751.decode_cnt = vfd_decode_idx));1752}17531754void1755tu6_emit_viewport(struct tu_cs *cs, const VkViewport *viewports, uint32_t num_viewport)1756{1757VkExtent2D guardband = {511, 511};17581759tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CL_VPORT_XOFFSET(0), num_viewport * 6);1760for (uint32_t i = 0; i < num_viewport; i++) {1761const VkViewport *viewport = &viewports[i];1762float offsets[3];1763float scales[3];1764scales[0] = viewport->width / 2.0f;1765scales[1] = viewport->height / 2.0f;1766scales[2] = viewport->maxDepth - viewport->minDepth;1767offsets[0] = viewport->x + scales[0];1768offsets[1] = viewport->y + scales[1];1769offsets[2] = viewport->minDepth;1770for (uint32_t j = 0; j < 3; j++) {1771tu_cs_emit(cs, fui(offsets[j]));1772tu_cs_emit(cs, fui(scales[j]));1773}17741775guardband.width =1776MIN2(guardband.width, fd_calc_guardband(offsets[0], scales[0], false));1777guardband.height =1778MIN2(guardband.height, fd_calc_guardband(offsets[1], scales[1], false));1779}17801781tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(0), num_viewport * 2);1782for (uint32_t i = 0; i < num_viewport; i++) {1783const VkViewport *viewport = &viewports[i];1784VkOffset2D min;1785VkOffset2D max;1786min.x = (int32_t) viewport->x;1787max.x = (int32_t) ceilf(viewport->x + viewport->width);1788if (viewport->height >= 0.0f) {1789min.y = (int32_t) viewport->y;1790max.y = (int32_t) ceilf(viewport->y + viewport->height);1791} else {1792min.y = (int32_t)(viewport->y + viewport->height);1793max.y = (int32_t) ceilf(viewport->y);1794}1795/* the spec allows viewport->height to be 0.0f */1796if (min.y == max.y)1797max.y++;1798/* allow viewport->width = 0.0f for un-initialized viewports: */1799if (min.x == max.x)1800max.x++;18011802min.x = MAX2(min.x, 0);1803min.y = MAX2(min.y, 0);18041805assert(min.x < max.x);1806assert(min.y < max.y);1807tu_cs_emit(cs, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(min.x) |1808A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(min.y));1809tu_cs_emit(cs, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(max.x - 1) |1810A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(max.y - 1));1811}18121813tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CL_Z_CLAMP(0), num_viewport * 2);1814for (uint32_t i = 0; i < num_viewport; i++) {1815const VkViewport *viewport = &viewports[i];1816tu_cs_emit(cs, fui(MIN2(viewport->minDepth, viewport->maxDepth)));1817tu_cs_emit(cs, fui(MAX2(viewport->minDepth, viewport->maxDepth)));1818}1819tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ, 1);1820tu_cs_emit(cs, A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(guardband.width) |1821A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(guardband.height));18221823/* TODO: what to do about this and multi viewport ? */1824float z_clamp_min = num_viewport ? MIN2(viewports[0].minDepth, viewports[0].maxDepth) : 0;1825float z_clamp_max = num_viewport ? MAX2(viewports[0].minDepth, viewports[0].maxDepth) : 0;18261827tu_cs_emit_regs(cs,1828A6XX_RB_Z_CLAMP_MIN(z_clamp_min),1829A6XX_RB_Z_CLAMP_MAX(z_clamp_max));1830}18311832void1833tu6_emit_scissor(struct tu_cs *cs, const VkRect2D *scissors, uint32_t scissor_count)1834{1835tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL(0), scissor_count * 2);18361837for (uint32_t i = 0; i < scissor_count; i++) {1838const VkRect2D *scissor = &scissors[i];18391840uint32_t min_x = scissor->offset.x;1841uint32_t min_y = scissor->offset.y;1842uint32_t max_x = min_x + scissor->extent.width - 1;1843uint32_t max_y = min_y + scissor->extent.height - 1;18441845if (!scissor->extent.width || !scissor->extent.height) {1846min_x = min_y = 1;1847max_x = max_y = 0;1848} else {1849/* avoid overflow */1850uint32_t scissor_max = BITFIELD_MASK(15);1851min_x = MIN2(scissor_max, min_x);1852min_y = MIN2(scissor_max, min_y);1853max_x = MIN2(scissor_max, max_x);1854max_y = MIN2(scissor_max, max_y);1855}18561857tu_cs_emit(cs, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X(min_x) |1858A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(min_y));1859tu_cs_emit(cs, A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X(max_x) |1860A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(max_y));1861}1862}18631864void1865tu6_emit_sample_locations(struct tu_cs *cs, const VkSampleLocationsInfoEXT *samp_loc)1866{1867if (!samp_loc) {1868tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CONFIG, 1);1869tu_cs_emit(cs, 0);18701871tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CONFIG, 1);1872tu_cs_emit(cs, 0);18731874tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_SAMPLE_CONFIG, 1);1875tu_cs_emit(cs, 0);1876return;1877}18781879assert(samp_loc->sampleLocationsPerPixel == samp_loc->sampleLocationsCount);1880assert(samp_loc->sampleLocationGridSize.width == 1);1881assert(samp_loc->sampleLocationGridSize.height == 1);18821883uint32_t sample_config =1884A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE;1885uint32_t sample_locations = 0;1886for (uint32_t i = 0; i < samp_loc->sampleLocationsCount; i++) {1887sample_locations |=1888(A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(samp_loc->pSampleLocations[i].x) |1889A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(samp_loc->pSampleLocations[i].y)) << i*8;1890}18911892tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CONFIG, 2);1893tu_cs_emit(cs, sample_config);1894tu_cs_emit(cs, sample_locations);18951896tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CONFIG, 2);1897tu_cs_emit(cs, sample_config);1898tu_cs_emit(cs, sample_locations);18991900tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_SAMPLE_CONFIG, 2);1901tu_cs_emit(cs, sample_config);1902tu_cs_emit(cs, sample_locations);1903}19041905static uint32_t1906tu6_gras_su_cntl(const VkPipelineRasterizationStateCreateInfo *rast_info,1907VkSampleCountFlagBits samples,1908bool multiview)1909{1910uint32_t gras_su_cntl = 0;19111912if (rast_info->cullMode & VK_CULL_MODE_FRONT_BIT)1913gras_su_cntl |= A6XX_GRAS_SU_CNTL_CULL_FRONT;1914if (rast_info->cullMode & VK_CULL_MODE_BACK_BIT)1915gras_su_cntl |= A6XX_GRAS_SU_CNTL_CULL_BACK;19161917if (rast_info->frontFace == VK_FRONT_FACE_CLOCKWISE)1918gras_su_cntl |= A6XX_GRAS_SU_CNTL_FRONT_CW;19191920gras_su_cntl |=1921A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(rast_info->lineWidth / 2.0f);19221923if (rast_info->depthBiasEnable)1924gras_su_cntl |= A6XX_GRAS_SU_CNTL_POLY_OFFSET;19251926if (samples > VK_SAMPLE_COUNT_1_BIT)1927gras_su_cntl |= A6XX_GRAS_SU_CNTL_MSAA_ENABLE;19281929if (multiview) {1930gras_su_cntl |=1931A6XX_GRAS_SU_CNTL_UNK17 |1932A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE;1933}19341935return gras_su_cntl;1936}19371938void1939tu6_emit_depth_bias(struct tu_cs *cs,1940float constant_factor,1941float clamp,1942float slope_factor)1943{1944tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE, 3);1945tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_SCALE(slope_factor).value);1946tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_OFFSET(constant_factor).value);1947tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(clamp).value);1948}19491950static uint32_t1951tu6_rb_mrt_blend_control(const VkPipelineColorBlendAttachmentState *att,1952bool has_alpha)1953{1954const enum a3xx_rb_blend_opcode color_op = tu6_blend_op(att->colorBlendOp);1955const enum adreno_rb_blend_factor src_color_factor = tu6_blend_factor(1956has_alpha ? att->srcColorBlendFactor1957: tu_blend_factor_no_dst_alpha(att->srcColorBlendFactor));1958const enum adreno_rb_blend_factor dst_color_factor = tu6_blend_factor(1959has_alpha ? att->dstColorBlendFactor1960: tu_blend_factor_no_dst_alpha(att->dstColorBlendFactor));1961const enum a3xx_rb_blend_opcode alpha_op = tu6_blend_op(att->alphaBlendOp);1962const enum adreno_rb_blend_factor src_alpha_factor =1963tu6_blend_factor(att->srcAlphaBlendFactor);1964const enum adreno_rb_blend_factor dst_alpha_factor =1965tu6_blend_factor(att->dstAlphaBlendFactor);19661967return A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(src_color_factor) |1968A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(color_op) |1969A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(dst_color_factor) |1970A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(src_alpha_factor) |1971A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(alpha_op) |1972A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(dst_alpha_factor);1973}19741975static uint32_t1976tu6_rb_mrt_control(const VkPipelineColorBlendAttachmentState *att,1977uint32_t rb_mrt_control_rop,1978bool has_alpha)1979{1980uint32_t rb_mrt_control =1981A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(att->colorWriteMask);19821983rb_mrt_control |= rb_mrt_control_rop;19841985if (att->blendEnable) {1986rb_mrt_control |= A6XX_RB_MRT_CONTROL_BLEND;19871988if (has_alpha)1989rb_mrt_control |= A6XX_RB_MRT_CONTROL_BLEND2;1990}19911992return rb_mrt_control;1993}19941995static void1996tu6_emit_rb_mrt_controls(struct tu_cs *cs,1997const VkPipelineColorBlendStateCreateInfo *blend_info,1998const VkFormat attachment_formats[MAX_RTS],1999uint32_t *blend_enable_mask)2000{2001*blend_enable_mask = 0;20022003bool rop_reads_dst = false;2004uint32_t rb_mrt_control_rop = 0;2005if (blend_info->logicOpEnable) {2006rop_reads_dst = tu_logic_op_reads_dst(blend_info->logicOp);2007rb_mrt_control_rop =2008A6XX_RB_MRT_CONTROL_ROP_ENABLE |2009A6XX_RB_MRT_CONTROL_ROP_CODE(tu6_rop(blend_info->logicOp));2010}20112012for (uint32_t i = 0; i < blend_info->attachmentCount; i++) {2013const VkPipelineColorBlendAttachmentState *att =2014&blend_info->pAttachments[i];2015const VkFormat format = attachment_formats[i];20162017uint32_t rb_mrt_control = 0;2018uint32_t rb_mrt_blend_control = 0;2019if (format != VK_FORMAT_UNDEFINED) {2020const bool has_alpha = vk_format_has_alpha(format);20212022rb_mrt_control =2023tu6_rb_mrt_control(att, rb_mrt_control_rop, has_alpha);2024rb_mrt_blend_control = tu6_rb_mrt_blend_control(att, has_alpha);20252026if (att->blendEnable || rop_reads_dst)2027*blend_enable_mask |= 1 << i;2028}20292030tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_CONTROL(i), 2);2031tu_cs_emit(cs, rb_mrt_control);2032tu_cs_emit(cs, rb_mrt_blend_control);2033}2034}20352036static void2037tu6_emit_blend_control(struct tu_cs *cs,2038uint32_t blend_enable_mask,2039bool dual_src_blend,2040const VkPipelineMultisampleStateCreateInfo *msaa_info)2041{2042const uint32_t sample_mask =2043msaa_info->pSampleMask ? (*msaa_info->pSampleMask & 0xffff)2044: ((1 << msaa_info->rasterizationSamples) - 1);20452046tu_cs_emit_regs(cs,2047A6XX_SP_BLEND_CNTL(.enable_blend = blend_enable_mask,2048.dual_color_in_enable = dual_src_blend,2049.alpha_to_coverage = msaa_info->alphaToCoverageEnable,2050.unk8 = true));20512052/* set A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND only when enabled? */2053tu_cs_emit_regs(cs,2054A6XX_RB_BLEND_CNTL(.enable_blend = blend_enable_mask,2055.independent_blend = true,2056.sample_mask = sample_mask,2057.dual_color_in_enable = dual_src_blend,2058.alpha_to_coverage = msaa_info->alphaToCoverageEnable,2059.alpha_to_one = msaa_info->alphaToOneEnable));2060}20612062static uint32_t2063calc_pvtmem_size(struct tu_device *dev, struct tu_pvtmem_config *config,2064uint32_t pvtmem_bytes)2065{2066uint32_t per_fiber_size = ALIGN(pvtmem_bytes, 512);2067uint32_t per_sp_size =2068ALIGN(per_fiber_size * dev->physical_device->info->a6xx.fibers_per_sp, 1 << 12);20692070if (config) {2071config->per_fiber_size = per_fiber_size;2072config->per_sp_size = per_sp_size;2073}20742075return dev->physical_device->info->num_sp_cores * per_sp_size;2076}20772078static VkResult2079tu_setup_pvtmem(struct tu_device *dev,2080struct tu_pipeline *pipeline,2081struct tu_pvtmem_config *config,2082uint32_t pvtmem_bytes, bool per_wave)2083{2084if (!pvtmem_bytes) {2085memset(config, 0, sizeof(*config));2086return VK_SUCCESS;2087}20882089uint32_t total_size = calc_pvtmem_size(dev, config, pvtmem_bytes);2090config->per_wave = per_wave;20912092VkResult result =2093tu_bo_init_new(dev, &pipeline->pvtmem_bo, total_size,2094TU_BO_ALLOC_NO_FLAGS);2095if (result != VK_SUCCESS)2096return result;20972098config->iova = pipeline->pvtmem_bo.iova;20992100return result;2101}210221032104static VkResult2105tu_pipeline_allocate_cs(struct tu_device *dev,2106struct tu_pipeline *pipeline,2107struct tu_pipeline_builder *builder,2108struct ir3_shader_variant *compute)2109{2110uint32_t size = 2048 + tu6_load_state_size(pipeline, compute);21112112/* graphics case: */2113if (builder) {2114uint32_t pvtmem_bytes = 0;2115for (uint32_t i = 0; i < ARRAY_SIZE(builder->variants); i++) {2116if (builder->variants[i]) {2117size += builder->variants[i]->info.size / 4;2118pvtmem_bytes = MAX2(pvtmem_bytes, builder->variants[i]->pvtmem_size);2119}2120}21212122size += builder->binning_variant->info.size / 4;2123pvtmem_bytes = MAX2(pvtmem_bytes, builder->binning_variant->pvtmem_size);21242125size += calc_pvtmem_size(dev, NULL, pvtmem_bytes) / 4;2126} else {2127size += compute->info.size / 4;2128size += calc_pvtmem_size(dev, NULL, compute->pvtmem_size) / 4;2129}21302131tu_cs_init(&pipeline->cs, dev, TU_CS_MODE_SUB_STREAM, size);21322133/* Reserve the space now such that tu_cs_begin_sub_stream never fails. Note2134* that LOAD_STATE can potentially take up a large amount of space so we2135* calculate its size explicitly.2136*/2137return tu_cs_reserve_space(&pipeline->cs, size);2138}21392140static void2141tu_pipeline_shader_key_init(struct ir3_shader_key *key,2142const VkGraphicsPipelineCreateInfo *pipeline_info)2143{2144for (uint32_t i = 0; i < pipeline_info->stageCount; i++) {2145if (pipeline_info->pStages[i].stage == VK_SHADER_STAGE_GEOMETRY_BIT) {2146key->has_gs = true;2147break;2148}2149}21502151if (pipeline_info->pRasterizationState->rasterizerDiscardEnable)2152return;21532154const VkPipelineMultisampleStateCreateInfo *msaa_info = pipeline_info->pMultisampleState;2155const struct VkPipelineSampleLocationsStateCreateInfoEXT *sample_locations =2156vk_find_struct_const(msaa_info->pNext, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT);2157if (msaa_info->rasterizationSamples > 1 ||2158/* also set msaa key when sample location is not the default2159* since this affects varying interpolation */2160(sample_locations && sample_locations->sampleLocationsEnable)) {2161key->msaa = true;2162}21632164/* note: not actually used by ir3, just checked in tu6_emit_fs_inputs */2165if (msaa_info->sampleShadingEnable)2166key->sample_shading = true;21672168/* We set this after we compile to NIR because we need the prim mode */2169key->tessellation = IR3_TESS_NONE;2170}21712172static uint32_t2173tu6_get_tessmode(struct tu_shader* shader)2174{2175uint32_t primitive_mode = shader->ir3_shader->nir->info.tess.primitive_mode;2176switch (primitive_mode) {2177case GL_ISOLINES:2178return IR3_TESS_ISOLINES;2179case GL_TRIANGLES:2180return IR3_TESS_TRIANGLES;2181case GL_QUADS:2182return IR3_TESS_QUADS;2183case GL_NONE:2184return IR3_TESS_NONE;2185default:2186unreachable("bad tessmode");2187}2188}21892190static uint64_t2191tu_upload_variant(struct tu_pipeline *pipeline,2192const struct ir3_shader_variant *variant)2193{2194struct tu_cs_memory memory;21952196if (!variant)2197return 0;21982199/* this expects to get enough alignment because shaders are allocated first2200* and total size is always aligned correctly2201* note: an assert in tu6_emit_xs_config validates the alignment2202*/2203tu_cs_alloc(&pipeline->cs, variant->info.size / 4, 1, &memory);22042205memcpy(memory.map, variant->bin, variant->info.size);2206return memory.iova;2207}22082209static void2210tu_append_executable(struct tu_pipeline *pipeline, struct ir3_shader_variant *variant,2211char *nir_from_spirv)2212{2213ralloc_steal(pipeline->executables_mem_ctx, variant->disasm_info.nir);2214ralloc_steal(pipeline->executables_mem_ctx, variant->disasm_info.disasm);22152216struct tu_pipeline_executable exe = {2217.stage = variant->shader->type,2218.nir_from_spirv = nir_from_spirv,2219.nir_final = variant->disasm_info.nir,2220.disasm = variant->disasm_info.disasm,2221.stats = variant->info,2222.is_binning = variant->binning_pass,2223};22242225util_dynarray_append(&pipeline->executables, struct tu_pipeline_executable, exe);2226}22272228static VkResult2229tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,2230struct tu_pipeline *pipeline)2231{2232const struct ir3_compiler *compiler = builder->device->compiler;2233const VkPipelineShaderStageCreateInfo *stage_infos[MESA_SHADER_STAGES] = {2234NULL2235};2236for (uint32_t i = 0; i < builder->create_info->stageCount; i++) {2237gl_shader_stage stage =2238vk_to_mesa_shader_stage(builder->create_info->pStages[i].stage);2239stage_infos[stage] = &builder->create_info->pStages[i];2240}22412242struct ir3_shader_key key = {};2243tu_pipeline_shader_key_init(&key, builder->create_info);22442245nir_shader *nir[ARRAY_SIZE(builder->shaders)] = { NULL };22462247for (gl_shader_stage stage = MESA_SHADER_VERTEX;2248stage < ARRAY_SIZE(nir); stage++) {2249const VkPipelineShaderStageCreateInfo *stage_info = stage_infos[stage];2250if (!stage_info)2251continue;22522253nir[stage] = tu_spirv_to_nir(builder->device, stage_info, stage);2254if (!nir[stage])2255return VK_ERROR_OUT_OF_HOST_MEMORY;2256}22572258if (!nir[MESA_SHADER_FRAGMENT]) {2259const nir_shader_compiler_options *nir_options =2260ir3_get_compiler_options(builder->device->compiler);2261nir_builder fs_b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT,2262nir_options,2263"noop_fs");2264nir[MESA_SHADER_FRAGMENT] = fs_b.shader;2265}22662267const bool executable_info = builder->create_info->flags &2268VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR;22692270char *nir_initial_disasm[ARRAY_SIZE(builder->shaders)] = { NULL };22712272if (executable_info) {2273for (gl_shader_stage stage = MESA_SHADER_VERTEX;2274stage < ARRAY_SIZE(nir); stage++) {2275if (!nir[stage])2276continue;22772278nir_initial_disasm[stage] =2279nir_shader_as_str(nir[stage], pipeline->executables_mem_ctx);2280}2281}22822283/* TODO do intra-stage linking here */22842285uint32_t desc_sets = 0;2286for (gl_shader_stage stage = MESA_SHADER_VERTEX;2287stage < ARRAY_SIZE(nir); stage++) {2288if (!nir[stage])2289continue;22902291struct tu_shader *shader =2292tu_shader_create(builder->device, nir[stage],2293builder->multiview_mask, builder->layout,2294builder->alloc);2295if (!shader)2296return VK_ERROR_OUT_OF_HOST_MEMORY;22972298/* In SPIR-V generated from GLSL, the primitive mode is specified in the2299* tessellation evaluation shader, but in SPIR-V generated from HLSL,2300* the mode is specified in the tessellation control shader. */2301if ((stage == MESA_SHADER_TESS_EVAL || stage == MESA_SHADER_TESS_CTRL) &&2302key.tessellation == IR3_TESS_NONE) {2303key.tessellation = tu6_get_tessmode(shader);2304}23052306/* Keep track of the status of each shader's active descriptor sets,2307* which is set in tu_lower_io. */2308desc_sets |= shader->active_desc_sets;23092310builder->shaders[stage] = shader;2311}2312pipeline->active_desc_sets = desc_sets;23132314struct tu_shader *last_shader = builder->shaders[MESA_SHADER_GEOMETRY];2315if (!last_shader)2316last_shader = builder->shaders[MESA_SHADER_TESS_EVAL];2317if (!last_shader)2318last_shader = builder->shaders[MESA_SHADER_VERTEX];23192320uint64_t outputs_written = last_shader->ir3_shader->nir->info.outputs_written;23212322key.layer_zero = !(outputs_written & VARYING_BIT_LAYER);2323key.view_zero = !(outputs_written & VARYING_BIT_VIEWPORT);23242325pipeline->tess.patch_type = key.tessellation;23262327for (gl_shader_stage stage = MESA_SHADER_VERTEX;2328stage < ARRAY_SIZE(builder->shaders); stage++) {2329if (!builder->shaders[stage])2330continue;23312332bool created;2333builder->variants[stage] =2334ir3_shader_get_variant(builder->shaders[stage]->ir3_shader,2335&key, false, executable_info, &created);2336if (!builder->variants[stage])2337return VK_ERROR_OUT_OF_HOST_MEMORY;2338}23392340uint32_t safe_constlens = ir3_trim_constlen(builder->variants, compiler);23412342key.safe_constlen = true;23432344for (gl_shader_stage stage = MESA_SHADER_VERTEX;2345stage < ARRAY_SIZE(builder->shaders); stage++) {2346if (!builder->shaders[stage])2347continue;23482349if (safe_constlens & (1 << stage)) {2350bool created;2351builder->variants[stage] =2352ir3_shader_get_variant(builder->shaders[stage]->ir3_shader,2353&key, false, executable_info, &created);2354if (!builder->variants[stage])2355return VK_ERROR_OUT_OF_HOST_MEMORY;2356}2357}23582359const struct tu_shader *vs = builder->shaders[MESA_SHADER_VERTEX];2360struct ir3_shader_variant *variant;23612362if (vs->ir3_shader->stream_output.num_outputs ||2363!ir3_has_binning_vs(&key)) {2364variant = builder->variants[MESA_SHADER_VERTEX];2365} else {2366bool created;2367key.safe_constlen = !!(safe_constlens & (1 << MESA_SHADER_VERTEX));2368variant = ir3_shader_get_variant(vs->ir3_shader, &key,2369true, executable_info, &created);2370if (!variant)2371return VK_ERROR_OUT_OF_HOST_MEMORY;2372}23732374builder->binning_variant = variant;23752376for (gl_shader_stage stage = MESA_SHADER_VERTEX;2377stage < ARRAY_SIZE(nir); stage++) {2378if (builder->variants[stage]) {2379tu_append_executable(pipeline, builder->variants[stage],2380nir_initial_disasm[stage]);2381}2382}23832384if (builder->binning_variant != builder->variants[MESA_SHADER_VERTEX]) {2385tu_append_executable(pipeline, builder->binning_variant, NULL);2386}23872388return VK_SUCCESS;2389}23902391static void2392tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder *builder,2393struct tu_pipeline *pipeline)2394{2395const VkPipelineDynamicStateCreateInfo *dynamic_info =2396builder->create_info->pDynamicState;23972398pipeline->gras_su_cntl_mask = ~0u;2399pipeline->rb_depth_cntl_mask = ~0u;2400pipeline->rb_stencil_cntl_mask = ~0u;24012402if (!dynamic_info)2403return;24042405for (uint32_t i = 0; i < dynamic_info->dynamicStateCount; i++) {2406VkDynamicState state = dynamic_info->pDynamicStates[i];2407switch (state) {2408case VK_DYNAMIC_STATE_VIEWPORT ... VK_DYNAMIC_STATE_STENCIL_REFERENCE:2409if (state == VK_DYNAMIC_STATE_LINE_WIDTH)2410pipeline->gras_su_cntl_mask &= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;2411pipeline->dynamic_state_mask |= BIT(state);2412break;2413case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT:2414pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_SAMPLE_LOCATIONS);2415break;2416case VK_DYNAMIC_STATE_CULL_MODE_EXT:2417pipeline->gras_su_cntl_mask &=2418~(A6XX_GRAS_SU_CNTL_CULL_BACK | A6XX_GRAS_SU_CNTL_CULL_FRONT);2419pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_GRAS_SU_CNTL);2420break;2421case VK_DYNAMIC_STATE_FRONT_FACE_EXT:2422pipeline->gras_su_cntl_mask &= ~A6XX_GRAS_SU_CNTL_FRONT_CW;2423pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_GRAS_SU_CNTL);2424break;2425case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT:2426pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY);2427break;2428case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT:2429pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_VB_STRIDE);2430break;2431case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT:2432pipeline->dynamic_state_mask |= BIT(VK_DYNAMIC_STATE_VIEWPORT);2433break;2434case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT:2435pipeline->dynamic_state_mask |= BIT(VK_DYNAMIC_STATE_SCISSOR);2436break;2437case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT:2438pipeline->rb_depth_cntl_mask &=2439~(A6XX_RB_DEPTH_CNTL_Z_ENABLE | A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE);2440pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);2441break;2442case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT:2443pipeline->rb_depth_cntl_mask &= ~A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE;2444pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);2445break;2446case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT:2447pipeline->rb_depth_cntl_mask &= ~A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;2448pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);2449break;2450case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT:2451pipeline->rb_depth_cntl_mask &=2452~(A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE | A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE);2453pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);2454break;2455case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT:2456pipeline->rb_stencil_cntl_mask &= ~(A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE |2457A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF |2458A6XX_RB_STENCIL_CONTROL_STENCIL_READ);2459pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_STENCIL_CNTL);2460break;2461case VK_DYNAMIC_STATE_STENCIL_OP_EXT:2462pipeline->rb_stencil_cntl_mask &= ~(A6XX_RB_STENCIL_CONTROL_FUNC__MASK |2463A6XX_RB_STENCIL_CONTROL_FAIL__MASK |2464A6XX_RB_STENCIL_CONTROL_ZPASS__MASK |2465A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK |2466A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK |2467A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK |2468A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK |2469A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK);2470pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_STENCIL_CNTL);2471break;2472default:2473assert(!"unsupported dynamic state");2474break;2475}2476}2477}24782479static void2480tu_pipeline_set_linkage(struct tu_program_descriptor_linkage *link,2481struct tu_shader *shader,2482struct ir3_shader_variant *v)2483{2484link->const_state = *ir3_const_state(v);2485link->constlen = v->constlen;2486link->push_consts = shader->push_consts;2487}24882489static void2490tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder *builder,2491struct tu_pipeline *pipeline)2492{2493struct tu_cs prog_cs;24942495/* Emit HLSQ_xS_CNTL/HLSQ_SP_xS_CONFIG *first*, before emitting anything2496* else that could depend on that state (like push constants)2497*2498* Note also that this always uses the full VS even in binning pass. The2499* binning pass variant has the same const layout as the full VS, and2500* the constlen for the VS will be the same or greater than the constlen2501* for the binning pass variant. It is required that the constlen state2502* matches between binning and draw passes, as some parts of the push2503* consts are emitted in state groups that are shared between the binning2504* and draw passes.2505*/2506tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);2507tu6_emit_program_config(&prog_cs, builder);2508pipeline->program.config_state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);25092510tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);2511tu6_emit_program(&prog_cs, builder, false, pipeline);2512pipeline->program.state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);25132514tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);2515tu6_emit_program(&prog_cs, builder, true, pipeline);2516pipeline->program.binning_state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);25172518VkShaderStageFlags stages = 0;2519for (unsigned i = 0; i < builder->create_info->stageCount; i++) {2520stages |= builder->create_info->pStages[i].stage;2521}2522pipeline->active_stages = stages;25232524for (unsigned i = 0; i < ARRAY_SIZE(builder->shaders); i++) {2525if (!builder->shaders[i])2526continue;25272528tu_pipeline_set_linkage(&pipeline->program.link[i],2529builder->shaders[i],2530builder->variants[i]);2531}2532}25332534static void2535tu_pipeline_builder_parse_vertex_input(struct tu_pipeline_builder *builder,2536struct tu_pipeline *pipeline)2537{2538const VkPipelineVertexInputStateCreateInfo *vi_info =2539builder->create_info->pVertexInputState;2540const struct ir3_shader_variant *vs = builder->variants[MESA_SHADER_VERTEX];2541const struct ir3_shader_variant *bs = builder->binning_variant;25422543pipeline->num_vbs = vi_info->vertexBindingDescriptionCount;25442545struct tu_cs vi_cs;2546tu_cs_begin_sub_stream(&pipeline->cs,2547MAX_VERTEX_ATTRIBS * 7 + 2, &vi_cs);2548tu6_emit_vertex_input(pipeline, &vi_cs, vs, vi_info);2549pipeline->vi.state = tu_cs_end_draw_state(&pipeline->cs, &vi_cs);25502551if (bs) {2552tu_cs_begin_sub_stream(&pipeline->cs,2553MAX_VERTEX_ATTRIBS * 7 + 2, &vi_cs);2554tu6_emit_vertex_input(pipeline, &vi_cs, bs, vi_info);2555pipeline->vi.binning_state =2556tu_cs_end_draw_state(&pipeline->cs, &vi_cs);2557}2558}25592560static void2561tu_pipeline_builder_parse_input_assembly(struct tu_pipeline_builder *builder,2562struct tu_pipeline *pipeline)2563{2564const VkPipelineInputAssemblyStateCreateInfo *ia_info =2565builder->create_info->pInputAssemblyState;25662567pipeline->ia.primtype = tu6_primtype(ia_info->topology);2568pipeline->ia.primitive_restart = ia_info->primitiveRestartEnable;2569}25702571static bool2572tu_pipeline_static_state(struct tu_pipeline *pipeline, struct tu_cs *cs,2573uint32_t id, uint32_t size)2574{2575assert(id < ARRAY_SIZE(pipeline->dynamic_state));25762577if (pipeline->dynamic_state_mask & BIT(id))2578return false;25792580pipeline->dynamic_state[id] = tu_cs_draw_state(&pipeline->cs, cs, size);2581return true;2582}25832584static void2585tu_pipeline_builder_parse_tessellation(struct tu_pipeline_builder *builder,2586struct tu_pipeline *pipeline)2587{2588if (!(pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ||2589!(pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))2590return;25912592const VkPipelineTessellationStateCreateInfo *tess_info =2593builder->create_info->pTessellationState;25942595assert(!(pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY)));25962597assert(pipeline->ia.primtype == DI_PT_PATCHES0);2598assert(tess_info->patchControlPoints <= 32);2599pipeline->ia.primtype += tess_info->patchControlPoints;2600const VkPipelineTessellationDomainOriginStateCreateInfo *domain_info =2601vk_find_struct_const(tess_info->pNext, PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO);2602pipeline->tess.upper_left_domain_origin = !domain_info ||2603domain_info->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT;2604const struct ir3_shader_variant *hs = builder->variants[MESA_SHADER_TESS_CTRL];2605const struct ir3_shader_variant *ds = builder->variants[MESA_SHADER_TESS_EVAL];2606pipeline->tess.param_stride = hs->output_size * 4;2607pipeline->tess.hs_bo_regid = hs->const_state->offsets.primitive_param + 1;2608pipeline->tess.ds_bo_regid = ds->const_state->offsets.primitive_param + 1;2609}26102611static void2612tu_pipeline_builder_parse_viewport(struct tu_pipeline_builder *builder,2613struct tu_pipeline *pipeline)2614{2615/* The spec says:2616*2617* pViewportState is a pointer to an instance of the2618* VkPipelineViewportStateCreateInfo structure, and is ignored if the2619* pipeline has rasterization disabled."2620*2621* We leave the relevant registers stale in that case.2622*/2623if (builder->rasterizer_discard)2624return;26252626const VkPipelineViewportStateCreateInfo *vp_info =2627builder->create_info->pViewportState;26282629struct tu_cs cs;26302631if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_VIEWPORT, 8 + 10 * vp_info->viewportCount))2632tu6_emit_viewport(&cs, vp_info->pViewports, vp_info->viewportCount);26332634if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_SCISSOR, 1 + 2 * vp_info->scissorCount))2635tu6_emit_scissor(&cs, vp_info->pScissors, vp_info->scissorCount);2636}26372638static void2639tu_pipeline_builder_parse_rasterization(struct tu_pipeline_builder *builder,2640struct tu_pipeline *pipeline)2641{2642const VkPipelineRasterizationStateCreateInfo *rast_info =2643builder->create_info->pRasterizationState;26442645enum a6xx_polygon_mode mode = tu6_polygon_mode(rast_info->polygonMode);26462647bool depth_clip_disable = rast_info->depthClampEnable;26482649const VkPipelineRasterizationDepthClipStateCreateInfoEXT *depth_clip_state =2650vk_find_struct_const(rast_info, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);2651if (depth_clip_state)2652depth_clip_disable = !depth_clip_state->depthClipEnable;26532654struct tu_cs cs;2655uint32_t cs_size = 13 + (builder->emit_msaa_state ? 11 : 0);2656pipeline->rast_state = tu_cs_draw_state(&pipeline->cs, &cs, cs_size);26572658tu_cs_emit_regs(&cs,2659A6XX_GRAS_CL_CNTL(2660.znear_clip_disable = depth_clip_disable,2661.zfar_clip_disable = depth_clip_disable,2662/* TODO should this be depth_clip_disable instead? */2663.unk5 = rast_info->depthClampEnable,2664.zero_gb_scale_z = 1,2665.vp_clip_code_ignore = 1));26662667tu_cs_emit_regs(&cs,2668A6XX_VPC_POLYGON_MODE(mode));26692670tu_cs_emit_regs(&cs,2671A6XX_PC_POLYGON_MODE(mode));26722673/* move to hw ctx init? */2674tu_cs_emit_regs(&cs,2675A6XX_GRAS_SU_POINT_MINMAX(.min = 1.0f / 16.0f, .max = 4092.0f),2676A6XX_GRAS_SU_POINT_SIZE(1.0f));26772678const VkPipelineRasterizationStateStreamCreateInfoEXT *stream_info =2679vk_find_struct_const(rast_info->pNext,2680PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT);2681unsigned stream = stream_info ? stream_info->rasterizationStream : 0;2682tu_cs_emit_regs(&cs,2683A6XX_PC_RASTER_CNTL(.stream = stream,2684.discard = rast_info->rasterizerDiscardEnable));2685tu_cs_emit_regs(&cs,2686A6XX_VPC_UNKNOWN_9107(.raster_discard = rast_info->rasterizerDiscardEnable));26872688/* If samples count couldn't be devised from the subpass, we should emit it here.2689* It happens when subpass doesn't use any color/depth attachment.2690*/2691if (builder->emit_msaa_state)2692tu6_emit_msaa(&cs, builder->samples);26932694pipeline->gras_su_cntl =2695tu6_gras_su_cntl(rast_info, builder->samples, builder->multiview_mask != 0);26962697if (tu_pipeline_static_state(pipeline, &cs, TU_DYNAMIC_STATE_GRAS_SU_CNTL, 2))2698tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = pipeline->gras_su_cntl));26992700if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_DEPTH_BIAS, 4)) {2701tu6_emit_depth_bias(&cs, rast_info->depthBiasConstantFactor,2702rast_info->depthBiasClamp,2703rast_info->depthBiasSlopeFactor);2704}27052706const struct VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *provoking_vtx_state =2707vk_find_struct_const(rast_info->pNext, PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT);2708pipeline->provoking_vertex_last = provoking_vtx_state &&2709provoking_vtx_state->provokingVertexMode == VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT;2710}27112712static void2713tu_pipeline_builder_parse_depth_stencil(struct tu_pipeline_builder *builder,2714struct tu_pipeline *pipeline)2715{2716/* The spec says:2717*2718* pDepthStencilState is a pointer to an instance of the2719* VkPipelineDepthStencilStateCreateInfo structure, and is ignored if2720* the pipeline has rasterization disabled or if the subpass of the2721* render pass the pipeline is created against does not use a2722* depth/stencil attachment.2723*/2724const VkPipelineDepthStencilStateCreateInfo *ds_info =2725builder->create_info->pDepthStencilState;2726const VkPipelineRasterizationStateCreateInfo *rast_info =2727builder->create_info->pRasterizationState;2728uint32_t rb_depth_cntl = 0, rb_stencil_cntl = 0;2729struct tu_cs cs;27302731if (builder->depth_attachment_format != VK_FORMAT_UNDEFINED &&2732builder->depth_attachment_format != VK_FORMAT_S8_UINT) {2733if (ds_info->depthTestEnable) {2734rb_depth_cntl |=2735A6XX_RB_DEPTH_CNTL_Z_ENABLE |2736A6XX_RB_DEPTH_CNTL_ZFUNC(tu6_compare_func(ds_info->depthCompareOp)) |2737A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE; /* TODO: don't set for ALWAYS/NEVER */27382739if (rast_info->depthClampEnable)2740rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE;27412742if (ds_info->depthWriteEnable)2743rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE;2744}27452746if (ds_info->depthBoundsTestEnable)2747rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE | A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE;2748} else {2749/* if RB_DEPTH_CNTL is set dynamically, we need to make sure it is set2750* to 0 when this pipeline is used, as enabling depth test when there2751* is no depth attachment is a problem (at least for the S8_UINT case)2752*/2753if (pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL))2754pipeline->rb_depth_cntl_disable = true;2755}27562757if (builder->depth_attachment_format != VK_FORMAT_UNDEFINED) {2758const VkStencilOpState *front = &ds_info->front;2759const VkStencilOpState *back = &ds_info->back;27602761rb_stencil_cntl |=2762A6XX_RB_STENCIL_CONTROL_FUNC(tu6_compare_func(front->compareOp)) |2763A6XX_RB_STENCIL_CONTROL_FAIL(tu6_stencil_op(front->failOp)) |2764A6XX_RB_STENCIL_CONTROL_ZPASS(tu6_stencil_op(front->passOp)) |2765A6XX_RB_STENCIL_CONTROL_ZFAIL(tu6_stencil_op(front->depthFailOp)) |2766A6XX_RB_STENCIL_CONTROL_FUNC_BF(tu6_compare_func(back->compareOp)) |2767A6XX_RB_STENCIL_CONTROL_FAIL_BF(tu6_stencil_op(back->failOp)) |2768A6XX_RB_STENCIL_CONTROL_ZPASS_BF(tu6_stencil_op(back->passOp)) |2769A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(tu6_stencil_op(back->depthFailOp));27702771if (ds_info->stencilTestEnable) {2772rb_stencil_cntl |=2773A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE |2774A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF |2775A6XX_RB_STENCIL_CONTROL_STENCIL_READ;2776}2777}27782779if (tu_pipeline_static_state(pipeline, &cs, TU_DYNAMIC_STATE_RB_DEPTH_CNTL, 2)) {2780tu_cs_emit_pkt4(&cs, REG_A6XX_RB_DEPTH_CNTL, 1);2781tu_cs_emit(&cs, rb_depth_cntl);2782}2783pipeline->rb_depth_cntl = rb_depth_cntl;27842785if (tu_pipeline_static_state(pipeline, &cs, TU_DYNAMIC_STATE_RB_STENCIL_CNTL, 2)) {2786tu_cs_emit_pkt4(&cs, REG_A6XX_RB_STENCIL_CONTROL, 1);2787tu_cs_emit(&cs, rb_stencil_cntl);2788}2789pipeline->rb_stencil_cntl = rb_stencil_cntl;27902791/* the remaining draw states arent used if there is no d/s, leave them empty */2792if (builder->depth_attachment_format == VK_FORMAT_UNDEFINED)2793return;27942795if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_DEPTH_BOUNDS, 3)) {2796tu_cs_emit_regs(&cs,2797A6XX_RB_Z_BOUNDS_MIN(ds_info->minDepthBounds),2798A6XX_RB_Z_BOUNDS_MAX(ds_info->maxDepthBounds));2799}28002801if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, 2)) {2802tu_cs_emit_regs(&cs, A6XX_RB_STENCILMASK(.mask = ds_info->front.compareMask & 0xff,2803.bfmask = ds_info->back.compareMask & 0xff));2804}28052806if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, 2)) {2807update_stencil_mask(&pipeline->stencil_wrmask, VK_STENCIL_FACE_FRONT_BIT, ds_info->front.writeMask);2808update_stencil_mask(&pipeline->stencil_wrmask, VK_STENCIL_FACE_BACK_BIT, ds_info->back.writeMask);2809tu_cs_emit_regs(&cs, A6XX_RB_STENCILWRMASK(.dword = pipeline->stencil_wrmask));2810}28112812if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_STENCIL_REFERENCE, 2)) {2813tu_cs_emit_regs(&cs, A6XX_RB_STENCILREF(.ref = ds_info->front.reference & 0xff,2814.bfref = ds_info->back.reference & 0xff));2815}28162817if (builder->shaders[MESA_SHADER_FRAGMENT]) {2818const struct ir3_shader_variant *fs = &builder->shaders[MESA_SHADER_FRAGMENT]->ir3_shader->variants[0];2819if (fs->has_kill || fs->no_earlyz || fs->writes_pos) {2820pipeline->lrz.force_disable_mask |= TU_LRZ_FORCE_DISABLE_WRITE;2821}2822if (fs->no_earlyz || fs->writes_pos) {2823pipeline->lrz.force_disable_mask = TU_LRZ_FORCE_DISABLE_LRZ;2824}2825}2826}28272828static void2829tu_pipeline_builder_parse_multisample_and_color_blend(2830struct tu_pipeline_builder *builder, struct tu_pipeline *pipeline)2831{2832/* The spec says:2833*2834* pMultisampleState is a pointer to an instance of the2835* VkPipelineMultisampleStateCreateInfo, and is ignored if the pipeline2836* has rasterization disabled.2837*2838* Also,2839*2840* pColorBlendState is a pointer to an instance of the2841* VkPipelineColorBlendStateCreateInfo structure, and is ignored if the2842* pipeline has rasterization disabled or if the subpass of the render2843* pass the pipeline is created against does not use any color2844* attachments.2845*2846* We leave the relevant registers stale when rasterization is disabled.2847*/2848if (builder->rasterizer_discard)2849return;28502851static const VkPipelineColorBlendStateCreateInfo dummy_blend_info;2852const VkPipelineMultisampleStateCreateInfo *msaa_info =2853builder->create_info->pMultisampleState;2854const VkPipelineColorBlendStateCreateInfo *blend_info =2855builder->use_color_attachments ? builder->create_info->pColorBlendState2856: &dummy_blend_info;28572858struct tu_cs cs;2859pipeline->blend_state =2860tu_cs_draw_state(&pipeline->cs, &cs, blend_info->attachmentCount * 3 + 4);28612862uint32_t blend_enable_mask;2863tu6_emit_rb_mrt_controls(&cs, blend_info,2864builder->color_attachment_formats,2865&blend_enable_mask);28662867tu6_emit_blend_control(&cs, blend_enable_mask,2868builder->use_dual_src_blend, msaa_info);28692870assert(cs.cur == cs.end); /* validate draw state size */28712872if (blend_enable_mask) {2873for (int i = 0; i < blend_info->attachmentCount; i++) {2874VkPipelineColorBlendAttachmentState blendAttachment = blend_info->pAttachments[i];2875/* Disable LRZ writes when blend is enabled, since the2876* resulting pixel value from the blend-draw2877* depends on an earlier draw, which LRZ in the draw pass2878* could early-reject if the previous blend-enabled draw wrote LRZ.2879*2880* From the PoV of LRZ, having masked color channels is2881* the same as having blend enabled, in that the draw will2882* care about the fragments from an earlier draw.2883*2884* TODO: We need to disable LRZ writes only for the binning pass.2885* Therefore, we need to emit it in a separate draw state. We keep2886* it disabled for sysmem path as well for the moment.2887*/2888if (blendAttachment.blendEnable || blendAttachment.colorWriteMask != 0xf) {2889pipeline->lrz.force_disable_mask |= TU_LRZ_FORCE_DISABLE_WRITE;2890}2891}2892}28932894if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_BLEND_CONSTANTS, 5)) {2895tu_cs_emit_pkt4(&cs, REG_A6XX_RB_BLEND_RED_F32, 4);2896tu_cs_emit_array(&cs, (const uint32_t *) blend_info->blendConstants, 4);2897}28982899const struct VkPipelineSampleLocationsStateCreateInfoEXT *sample_locations =2900vk_find_struct_const(msaa_info->pNext, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT);2901const VkSampleLocationsInfoEXT *samp_loc = NULL;29022903if (sample_locations && sample_locations->sampleLocationsEnable)2904samp_loc = &sample_locations->sampleLocationsInfo;29052906if (tu_pipeline_static_state(pipeline, &cs, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS,2907samp_loc ? 9 : 6)) {2908tu6_emit_sample_locations(&cs, samp_loc);2909}2910}29112912static void2913tu_pipeline_finish(struct tu_pipeline *pipeline,2914struct tu_device *dev,2915const VkAllocationCallbacks *alloc)2916{2917tu_cs_finish(&pipeline->cs);29182919if (pipeline->pvtmem_bo.size)2920tu_bo_finish(dev, &pipeline->pvtmem_bo);29212922ralloc_free(pipeline->executables_mem_ctx);2923}29242925static VkResult2926tu_pipeline_builder_build(struct tu_pipeline_builder *builder,2927struct tu_pipeline **pipeline)2928{2929VkResult result;29302931*pipeline = vk_object_zalloc(&builder->device->vk, builder->alloc,2932sizeof(**pipeline), VK_OBJECT_TYPE_PIPELINE);2933if (!*pipeline)2934return VK_ERROR_OUT_OF_HOST_MEMORY;29352936(*pipeline)->layout = builder->layout;2937(*pipeline)->executables_mem_ctx = ralloc_context(NULL);2938util_dynarray_init(&(*pipeline)->executables, (*pipeline)->executables_mem_ctx);29392940/* compile and upload shaders */2941result = tu_pipeline_builder_compile_shaders(builder, *pipeline);2942if (result != VK_SUCCESS) {2943vk_object_free(&builder->device->vk, builder->alloc, *pipeline);2944return result;2945}29462947result = tu_pipeline_allocate_cs(builder->device, *pipeline, builder, NULL);2948if (result != VK_SUCCESS) {2949vk_object_free(&builder->device->vk, builder->alloc, *pipeline);2950return result;2951}29522953for (uint32_t i = 0; i < ARRAY_SIZE(builder->variants); i++)2954builder->shader_iova[i] = tu_upload_variant(*pipeline, builder->variants[i]);29552956builder->binning_vs_iova =2957tu_upload_variant(*pipeline, builder->binning_variant);29582959/* Setup private memory. Note that because we're sharing the same private2960* memory for all stages, all stages must use the same config, or else2961* fibers from one stage might overwrite fibers in another.2962*/29632964uint32_t pvtmem_size = 0;2965bool per_wave = true;2966for (uint32_t i = 0; i < ARRAY_SIZE(builder->variants); i++) {2967if (builder->variants[i]) {2968pvtmem_size = MAX2(pvtmem_size, builder->variants[i]->pvtmem_size);2969if (!builder->variants[i]->pvtmem_per_wave)2970per_wave = false;2971}2972}29732974if (builder->binning_variant) {2975pvtmem_size = MAX2(pvtmem_size, builder->binning_variant->pvtmem_size);2976if (!builder->binning_variant->pvtmem_per_wave)2977per_wave = false;2978}29792980result = tu_setup_pvtmem(builder->device, *pipeline, &builder->pvtmem,2981pvtmem_size, per_wave);2982if (result != VK_SUCCESS) {2983vk_object_free(&builder->device->vk, builder->alloc, *pipeline);2984return result;2985}29862987tu_pipeline_builder_parse_dynamic(builder, *pipeline);2988tu_pipeline_builder_parse_shader_stages(builder, *pipeline);2989tu_pipeline_builder_parse_vertex_input(builder, *pipeline);2990tu_pipeline_builder_parse_input_assembly(builder, *pipeline);2991tu_pipeline_builder_parse_tessellation(builder, *pipeline);2992tu_pipeline_builder_parse_viewport(builder, *pipeline);2993tu_pipeline_builder_parse_rasterization(builder, *pipeline);2994tu_pipeline_builder_parse_depth_stencil(builder, *pipeline);2995tu_pipeline_builder_parse_multisample_and_color_blend(builder, *pipeline);2996tu6_emit_load_state(*pipeline, false);29972998/* we should have reserved enough space upfront such that the CS never2999* grows3000*/3001assert((*pipeline)->cs.bo_count == 1);30023003return VK_SUCCESS;3004}30053006static void3007tu_pipeline_builder_finish(struct tu_pipeline_builder *builder)3008{3009for (uint32_t i = 0; i < ARRAY_SIZE(builder->shaders); i++) {3010if (!builder->shaders[i])3011continue;3012tu_shader_destroy(builder->device, builder->shaders[i], builder->alloc);3013}3014}30153016static void3017tu_pipeline_builder_init_graphics(3018struct tu_pipeline_builder *builder,3019struct tu_device *dev,3020struct tu_pipeline_cache *cache,3021const VkGraphicsPipelineCreateInfo *create_info,3022const VkAllocationCallbacks *alloc)3023{3024TU_FROM_HANDLE(tu_pipeline_layout, layout, create_info->layout);30253026*builder = (struct tu_pipeline_builder) {3027.device = dev,3028.cache = cache,3029.create_info = create_info,3030.alloc = alloc,3031.layout = layout,3032};30333034const struct tu_render_pass *pass =3035tu_render_pass_from_handle(create_info->renderPass);3036const struct tu_subpass *subpass =3037&pass->subpasses[create_info->subpass];30383039builder->multiview_mask = subpass->multiview_mask;30403041builder->rasterizer_discard =3042create_info->pRasterizationState->rasterizerDiscardEnable;30433044/* variableMultisampleRate support */3045builder->emit_msaa_state = (subpass->samples == 0) && !builder->rasterizer_discard;30463047if (builder->rasterizer_discard) {3048builder->samples = VK_SAMPLE_COUNT_1_BIT;3049} else {3050builder->samples = create_info->pMultisampleState->rasterizationSamples;3051builder->alpha_to_coverage = create_info->pMultisampleState->alphaToCoverageEnable;30523053const uint32_t a = subpass->depth_stencil_attachment.attachment;3054builder->depth_attachment_format = (a != VK_ATTACHMENT_UNUSED) ?3055pass->attachments[a].format : VK_FORMAT_UNDEFINED;30563057assert(subpass->color_count == 0 ||3058!create_info->pColorBlendState ||3059subpass->color_count == create_info->pColorBlendState->attachmentCount);3060builder->color_attachment_count = subpass->color_count;3061for (uint32_t i = 0; i < subpass->color_count; i++) {3062const uint32_t a = subpass->color_attachments[i].attachment;3063if (a == VK_ATTACHMENT_UNUSED)3064continue;30653066builder->color_attachment_formats[i] = pass->attachments[a].format;3067builder->use_color_attachments = true;3068builder->render_components |= 0xf << (i * 4);3069}30703071if (tu_blend_state_is_dual_src(create_info->pColorBlendState)) {3072builder->color_attachment_count++;3073builder->use_dual_src_blend = true;3074/* dual source blending has an extra fs output in the 2nd slot */3075if (subpass->color_attachments[0].attachment != VK_ATTACHMENT_UNUSED)3076builder->render_components |= 0xf << 4;3077}3078}3079}30803081static VkResult3082tu_graphics_pipeline_create(VkDevice device,3083VkPipelineCache pipelineCache,3084const VkGraphicsPipelineCreateInfo *pCreateInfo,3085const VkAllocationCallbacks *pAllocator,3086VkPipeline *pPipeline)3087{3088TU_FROM_HANDLE(tu_device, dev, device);3089TU_FROM_HANDLE(tu_pipeline_cache, cache, pipelineCache);30903091struct tu_pipeline_builder builder;3092tu_pipeline_builder_init_graphics(&builder, dev, cache,3093pCreateInfo, pAllocator);30943095struct tu_pipeline *pipeline = NULL;3096VkResult result = tu_pipeline_builder_build(&builder, &pipeline);3097tu_pipeline_builder_finish(&builder);30983099if (result == VK_SUCCESS)3100*pPipeline = tu_pipeline_to_handle(pipeline);3101else3102*pPipeline = VK_NULL_HANDLE;31033104return result;3105}31063107VKAPI_ATTR VkResult VKAPI_CALL3108tu_CreateGraphicsPipelines(VkDevice device,3109VkPipelineCache pipelineCache,3110uint32_t count,3111const VkGraphicsPipelineCreateInfo *pCreateInfos,3112const VkAllocationCallbacks *pAllocator,3113VkPipeline *pPipelines)3114{3115VkResult final_result = VK_SUCCESS;31163117for (uint32_t i = 0; i < count; i++) {3118VkResult result = tu_graphics_pipeline_create(device, pipelineCache,3119&pCreateInfos[i], pAllocator,3120&pPipelines[i]);31213122if (result != VK_SUCCESS)3123final_result = result;3124}31253126return final_result;3127}31283129static VkResult3130tu_compute_pipeline_create(VkDevice device,3131VkPipelineCache _cache,3132const VkComputePipelineCreateInfo *pCreateInfo,3133const VkAllocationCallbacks *pAllocator,3134VkPipeline *pPipeline)3135{3136TU_FROM_HANDLE(tu_device, dev, device);3137TU_FROM_HANDLE(tu_pipeline_layout, layout, pCreateInfo->layout);3138const VkPipelineShaderStageCreateInfo *stage_info = &pCreateInfo->stage;3139VkResult result;31403141struct tu_pipeline *pipeline;31423143*pPipeline = VK_NULL_HANDLE;31443145pipeline = vk_object_zalloc(&dev->vk, pAllocator, sizeof(*pipeline),3146VK_OBJECT_TYPE_PIPELINE);3147if (!pipeline)3148return VK_ERROR_OUT_OF_HOST_MEMORY;31493150pipeline->layout = layout;31513152pipeline->executables_mem_ctx = ralloc_context(NULL);3153util_dynarray_init(&pipeline->executables, pipeline->executables_mem_ctx);31543155struct ir3_shader_key key = {};31563157nir_shader *nir = tu_spirv_to_nir(dev, stage_info, MESA_SHADER_COMPUTE);31583159const bool executable_info = pCreateInfo->flags &3160VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR;31613162char *nir_initial_disasm = executable_info ?3163nir_shader_as_str(nir, pipeline->executables_mem_ctx) : NULL;31643165struct tu_shader *shader =3166tu_shader_create(dev, nir, 0, layout, pAllocator);3167if (!shader) {3168result = VK_ERROR_OUT_OF_HOST_MEMORY;3169goto fail;3170}31713172pipeline->active_desc_sets = shader->active_desc_sets;31733174bool created;3175struct ir3_shader_variant *v =3176ir3_shader_get_variant(shader->ir3_shader, &key, false, executable_info, &created);3177if (!v) {3178result = VK_ERROR_OUT_OF_HOST_MEMORY;3179goto fail;3180}31813182tu_pipeline_set_linkage(&pipeline->program.link[MESA_SHADER_COMPUTE],3183shader, v);31843185result = tu_pipeline_allocate_cs(dev, pipeline, NULL, v);3186if (result != VK_SUCCESS)3187goto fail;31883189uint64_t shader_iova = tu_upload_variant(pipeline, v);31903191struct tu_pvtmem_config pvtmem;3192tu_setup_pvtmem(dev, pipeline, &pvtmem, v->pvtmem_size, v->pvtmem_per_wave);31933194for (int i = 0; i < 3; i++)3195pipeline->compute.local_size[i] = v->local_size[i];31963197pipeline->compute.subgroup_size = v->info.double_threadsize ? 128 : 64;31983199struct tu_cs prog_cs;3200tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);3201tu6_emit_cs_config(&prog_cs, shader, v, &pvtmem, shader_iova);3202pipeline->program.state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);32033204tu6_emit_load_state(pipeline, true);32053206tu_append_executable(pipeline, v, nir_initial_disasm);32073208tu_shader_destroy(dev, shader, pAllocator);32093210*pPipeline = tu_pipeline_to_handle(pipeline);32113212return VK_SUCCESS;32133214fail:3215if (shader)3216tu_shader_destroy(dev, shader, pAllocator);32173218vk_object_free(&dev->vk, pAllocator, pipeline);32193220return result;3221}32223223VKAPI_ATTR VkResult VKAPI_CALL3224tu_CreateComputePipelines(VkDevice device,3225VkPipelineCache pipelineCache,3226uint32_t count,3227const VkComputePipelineCreateInfo *pCreateInfos,3228const VkAllocationCallbacks *pAllocator,3229VkPipeline *pPipelines)3230{3231VkResult final_result = VK_SUCCESS;32323233for (uint32_t i = 0; i < count; i++) {3234VkResult result = tu_compute_pipeline_create(device, pipelineCache,3235&pCreateInfos[i],3236pAllocator, &pPipelines[i]);3237if (result != VK_SUCCESS)3238final_result = result;3239}32403241return final_result;3242}32433244VKAPI_ATTR void VKAPI_CALL3245tu_DestroyPipeline(VkDevice _device,3246VkPipeline _pipeline,3247const VkAllocationCallbacks *pAllocator)3248{3249TU_FROM_HANDLE(tu_device, dev, _device);3250TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);32513252if (!_pipeline)3253return;32543255tu_pipeline_finish(pipeline, dev, pAllocator);3256vk_object_free(&dev->vk, pAllocator, pipeline);3257}32583259#define WRITE_STR(field, ...) ({ \3260memset(field, 0, sizeof(field)); \3261UNUSED int _i = snprintf(field, sizeof(field), __VA_ARGS__); \3262assert(_i > 0 && _i < sizeof(field)); \3263})32643265static const struct tu_pipeline_executable *3266tu_pipeline_get_executable(struct tu_pipeline *pipeline, uint32_t index)3267{3268assert(index < util_dynarray_num_elements(&pipeline->executables,3269struct tu_pipeline_executable));3270return util_dynarray_element(3271&pipeline->executables, struct tu_pipeline_executable, index);3272}32733274VKAPI_ATTR VkResult VKAPI_CALL3275tu_GetPipelineExecutablePropertiesKHR(3276VkDevice _device,3277const VkPipelineInfoKHR* pPipelineInfo,3278uint32_t* pExecutableCount,3279VkPipelineExecutablePropertiesKHR* pProperties)3280{3281TU_FROM_HANDLE(tu_device, dev, _device);3282TU_FROM_HANDLE(tu_pipeline, pipeline, pPipelineInfo->pipeline);3283VK_OUTARRAY_MAKE(out, pProperties, pExecutableCount);32843285util_dynarray_foreach (&pipeline->executables, struct tu_pipeline_executable, exe) {3286vk_outarray_append(&out, props) {3287gl_shader_stage stage = exe->stage;3288props->stages = mesa_to_vk_shader_stage(stage);32893290if (!exe->is_binning)3291WRITE_STR(props->name, "%s", _mesa_shader_stage_to_abbrev(stage));3292else3293WRITE_STR(props->name, "Binning VS");32943295WRITE_STR(props->description, "%s", _mesa_shader_stage_to_string(stage));32963297props->subgroupSize =3298dev->compiler->threadsize_base * (exe->stats.double_threadsize ? 2 : 1);3299}3300}33013302return vk_outarray_status(&out);3303}33043305VKAPI_ATTR VkResult VKAPI_CALL3306tu_GetPipelineExecutableStatisticsKHR(3307VkDevice _device,3308const VkPipelineExecutableInfoKHR* pExecutableInfo,3309uint32_t* pStatisticCount,3310VkPipelineExecutableStatisticKHR* pStatistics)3311{3312TU_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline);3313VK_OUTARRAY_MAKE(out, pStatistics, pStatisticCount);33143315const struct tu_pipeline_executable *exe =3316tu_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);33173318vk_outarray_append(&out, stat) {3319WRITE_STR(stat->name, "Max Waves Per Core");3320WRITE_STR(stat->description,3321"Maximum number of simultaneous waves per core.");3322stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3323stat->value.u64 = exe->stats.max_waves;3324}33253326vk_outarray_append(&out, stat) {3327WRITE_STR(stat->name, "Instruction Count");3328WRITE_STR(stat->description,3329"Total number of IR3 instructions in the final generated "3330"shader executable.");3331stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3332stat->value.u64 = exe->stats.instrs_count;3333}33343335vk_outarray_append(&out, stat) {3336WRITE_STR(stat->name, "NOPs Count");3337WRITE_STR(stat->description,3338"Number of NOP instructions in the final generated "3339"shader executable.");3340stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3341stat->value.u64 = exe->stats.nops_count;3342}33433344vk_outarray_append(&out, stat) {3345WRITE_STR(stat->name, "MOV Count");3346WRITE_STR(stat->description,3347"Number of MOV instructions in the final generated "3348"shader executable.");3349stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3350stat->value.u64 = exe->stats.mov_count;3351}33523353vk_outarray_append(&out, stat) {3354WRITE_STR(stat->name, "COV Count");3355WRITE_STR(stat->description,3356"Number of COV instructions in the final generated "3357"shader executable.");3358stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3359stat->value.u64 = exe->stats.cov_count;3360}33613362vk_outarray_append(&out, stat) {3363WRITE_STR(stat->name, "Registers used");3364WRITE_STR(stat->description,3365"Number of registers used in the final generated "3366"shader executable.");3367stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3368stat->value.u64 = exe->stats.max_reg + 1;3369}33703371vk_outarray_append(&out, stat) {3372WRITE_STR(stat->name, "Half-registers used");3373WRITE_STR(stat->description,3374"Number of half-registers used in the final generated "3375"shader executable.");3376stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3377stat->value.u64 = exe->stats.max_half_reg + 1;3378}33793380vk_outarray_append(&out, stat) {3381WRITE_STR(stat->name, "Instructions with SS sync bit");3382WRITE_STR(stat->description,3383"SS bit is set for instructions which depend on a result "3384"of \"long\" instructions to prevent RAW hazard.");3385stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3386stat->value.u64 = exe->stats.ss;3387}33883389vk_outarray_append(&out, stat) {3390WRITE_STR(stat->name, "Instructions with SY sync bit");3391WRITE_STR(stat->description,3392"SY bit is set for instructions which depend on a result "3393"of loads from global memory to prevent RAW hazard.");3394stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3395stat->value.u64 = exe->stats.sy;3396}33973398vk_outarray_append(&out, stat) {3399WRITE_STR(stat->name, "Estimated cycles stalled on SS");3400WRITE_STR(stat->description,3401"A better metric to estimate the impact of SS syncs.");3402stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3403stat->value.u64 = exe->stats.sstall;3404}34053406for (int i = 0; i < ARRAY_SIZE(exe->stats.instrs_per_cat); i++) {3407vk_outarray_append(&out, stat) {3408WRITE_STR(stat->name, "cat%d instructions", i);3409WRITE_STR(stat->description,3410"Number of cat%d instructions.", i);3411stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;3412stat->value.u64 = exe->stats.instrs_per_cat[i];3413}3414}34153416return vk_outarray_status(&out);3417}34183419static bool3420write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,3421const char *data)3422{3423ir->isText = VK_TRUE;34243425size_t data_len = strlen(data) + 1;34263427if (ir->pData == NULL) {3428ir->dataSize = data_len;3429return true;3430}34313432strncpy(ir->pData, data, ir->dataSize);3433if (ir->dataSize < data_len)3434return false;34353436ir->dataSize = data_len;3437return true;3438}34393440VKAPI_ATTR VkResult VKAPI_CALL3441tu_GetPipelineExecutableInternalRepresentationsKHR(3442VkDevice _device,3443const VkPipelineExecutableInfoKHR* pExecutableInfo,3444uint32_t* pInternalRepresentationCount,3445VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)3446{3447TU_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline);3448VK_OUTARRAY_MAKE(out, pInternalRepresentations, pInternalRepresentationCount);3449bool incomplete_text = false;34503451const struct tu_pipeline_executable *exe =3452tu_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);34533454if (exe->nir_from_spirv) {3455vk_outarray_append(&out, ir) {3456WRITE_STR(ir->name, "NIR from SPIRV");3457WRITE_STR(ir->description,3458"Initial NIR before any optimizations");34593460if (!write_ir_text(ir, exe->nir_from_spirv))3461incomplete_text = true;3462}3463}34643465if (exe->nir_final) {3466vk_outarray_append(&out, ir) {3467WRITE_STR(ir->name, "Final NIR");3468WRITE_STR(ir->description,3469"Final NIR before going into the back-end compiler");34703471if (!write_ir_text(ir, exe->nir_final))3472incomplete_text = true;3473}3474}34753476if (exe->disasm) {3477vk_outarray_append(&out, ir) {3478WRITE_STR(ir->name, "IR3 Assembly");3479WRITE_STR(ir->description,3480"Final IR3 assembly for the generated shader binary");34813482if (!write_ir_text(ir, exe->disasm))3483incomplete_text = true;3484}3485}34863487return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);3488}348934903491