Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/freedreno/vulkan/tu_pipeline.c
4565 views
1
/*
2
* Copyright © 2016 Red Hat.
3
* Copyright © 2016 Bas Nieuwenhuizen
4
*
5
* based in part on anv driver which is:
6
* Copyright © 2015 Intel Corporation
7
*
8
* Permission is hereby granted, free of charge, to any person obtaining a
9
* copy of this software and associated documentation files (the "Software"),
10
* to deal in the Software without restriction, including without limitation
11
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
12
* and/or sell copies of the Software, and to permit persons to whom the
13
* Software is furnished to do so, subject to the following conditions:
14
*
15
* The above copyright notice and this permission notice (including the next
16
* paragraph) shall be included in all copies or substantial portions of the
17
* Software.
18
*
19
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25
* DEALINGS IN THE SOFTWARE.
26
*/
27
28
#include "common/freedreno_guardband.h"
29
#include "tu_private.h"
30
31
#include "ir3/ir3_nir.h"
32
#include "main/menums.h"
33
#include "nir/nir.h"
34
#include "nir/nir_builder.h"
35
#include "spirv/nir_spirv.h"
36
#include "util/debug.h"
37
#include "util/mesa-sha1.h"
38
#include "util/u_atomic.h"
39
#include "vk_format.h"
40
#include "vk_util.h"
41
42
#include "tu_cs.h"
43
44
/* Emit IB that preloads the descriptors that the shader uses */
45
46
static void
47
emit_load_state(struct tu_cs *cs, unsigned opcode, enum a6xx_state_type st,
48
enum a6xx_state_block sb, unsigned base, unsigned offset,
49
unsigned count)
50
{
51
/* Note: just emit one packet, even if count overflows NUM_UNIT. It's not
52
* clear if emitting more packets will even help anything. Presumably the
53
* descriptor cache is relatively small, and these packets stop doing
54
* anything when there are too many descriptors.
55
*/
56
tu_cs_emit_pkt7(cs, opcode, 3);
57
tu_cs_emit(cs,
58
CP_LOAD_STATE6_0_STATE_TYPE(st) |
59
CP_LOAD_STATE6_0_STATE_SRC(SS6_BINDLESS) |
60
CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
61
CP_LOAD_STATE6_0_NUM_UNIT(MIN2(count, 1024-1)));
62
tu_cs_emit_qw(cs, offset | (base << 28));
63
}
64
65
static unsigned
66
tu6_load_state_size(struct tu_pipeline *pipeline, bool compute)
67
{
68
const unsigned load_state_size = 4;
69
unsigned size = 0;
70
for (unsigned i = 0; i < pipeline->layout->num_sets; i++) {
71
if (!(pipeline->active_desc_sets & (1u << i)))
72
continue;
73
74
struct tu_descriptor_set_layout *set_layout = pipeline->layout->set[i].layout;
75
for (unsigned j = 0; j < set_layout->binding_count; j++) {
76
struct tu_descriptor_set_binding_layout *binding = &set_layout->binding[j];
77
unsigned count = 0;
78
/* Note: some users, like amber for example, pass in
79
* VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so
80
* filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.
81
*/
82
VkShaderStageFlags stages = compute ?
83
binding->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT :
84
binding->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
85
unsigned stage_count = util_bitcount(stages);
86
87
if (!binding->array_size)
88
continue;
89
90
switch (binding->type) {
91
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
92
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
93
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
94
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
95
/* IBO-backed resources only need one packet for all graphics stages */
96
if (stages & ~VK_SHADER_STAGE_COMPUTE_BIT)
97
count += 1;
98
if (stages & VK_SHADER_STAGE_COMPUTE_BIT)
99
count += 1;
100
break;
101
case VK_DESCRIPTOR_TYPE_SAMPLER:
102
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
103
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
104
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
105
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
106
/* Textures and UBO's needs a packet for each stage */
107
count = stage_count;
108
break;
109
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
110
/* Because of how we pack combined images and samplers, we
111
* currently can't use one packet for the whole array.
112
*/
113
count = stage_count * binding->array_size * 2;
114
break;
115
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
116
break;
117
default:
118
unreachable("bad descriptor type");
119
}
120
size += count * load_state_size;
121
}
122
}
123
return size;
124
}
125
126
static void
127
tu6_emit_load_state(struct tu_pipeline *pipeline, bool compute)
128
{
129
unsigned size = tu6_load_state_size(pipeline, compute);
130
if (size == 0)
131
return;
132
133
struct tu_cs cs;
134
tu_cs_begin_sub_stream(&pipeline->cs, size, &cs);
135
136
struct tu_pipeline_layout *layout = pipeline->layout;
137
for (unsigned i = 0; i < layout->num_sets; i++) {
138
/* From 13.2.7. Descriptor Set Binding:
139
*
140
* A compatible descriptor set must be bound for all set numbers that
141
* any shaders in a pipeline access, at the time that a draw or
142
* dispatch command is recorded to execute using that pipeline.
143
* However, if none of the shaders in a pipeline statically use any
144
* bindings with a particular set number, then no descriptor set need
145
* be bound for that set number, even if the pipeline layout includes
146
* a non-trivial descriptor set layout for that set number.
147
*
148
* This means that descriptor sets unused by the pipeline may have a
149
* garbage or 0 BINDLESS_BASE register, which will cause context faults
150
* when prefetching descriptors from these sets. Skip prefetching for
151
* descriptors from them to avoid this. This is also an optimization,
152
* since these prefetches would be useless.
153
*/
154
if (!(pipeline->active_desc_sets & (1u << i)))
155
continue;
156
157
struct tu_descriptor_set_layout *set_layout = layout->set[i].layout;
158
for (unsigned j = 0; j < set_layout->binding_count; j++) {
159
struct tu_descriptor_set_binding_layout *binding = &set_layout->binding[j];
160
unsigned base = i;
161
unsigned offset = binding->offset / 4;
162
/* Note: some users, like amber for example, pass in
163
* VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so
164
* filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.
165
*/
166
VkShaderStageFlags stages = compute ?
167
binding->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT :
168
binding->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
169
unsigned count = binding->array_size;
170
if (count == 0 || stages == 0)
171
continue;
172
switch (binding->type) {
173
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
174
base = MAX_SETS;
175
offset = (layout->set[i].dynamic_offset_start +
176
binding->dynamic_offset_offset) * A6XX_TEX_CONST_DWORDS;
177
FALLTHROUGH;
178
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
179
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
180
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
181
/* IBO-backed resources only need one packet for all graphics stages */
182
if (stages & ~VK_SHADER_STAGE_COMPUTE_BIT) {
183
emit_load_state(&cs, CP_LOAD_STATE6, ST6_SHADER, SB6_IBO,
184
base, offset, count);
185
}
186
if (stages & VK_SHADER_STAGE_COMPUTE_BIT) {
187
emit_load_state(&cs, CP_LOAD_STATE6_FRAG, ST6_IBO, SB6_CS_SHADER,
188
base, offset, count);
189
}
190
break;
191
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
192
/* nothing - input attachment doesn't use bindless */
193
break;
194
case VK_DESCRIPTOR_TYPE_SAMPLER:
195
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
196
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
197
tu_foreach_stage(stage, stages) {
198
emit_load_state(&cs, tu6_stage2opcode(stage),
199
binding->type == VK_DESCRIPTOR_TYPE_SAMPLER ?
200
ST6_SHADER : ST6_CONSTANTS,
201
tu6_stage2texsb(stage), base, offset, count);
202
}
203
break;
204
}
205
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
206
base = MAX_SETS;
207
offset = (layout->set[i].dynamic_offset_start +
208
binding->dynamic_offset_offset) * A6XX_TEX_CONST_DWORDS;
209
FALLTHROUGH;
210
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
211
tu_foreach_stage(stage, stages) {
212
emit_load_state(&cs, tu6_stage2opcode(stage), ST6_UBO,
213
tu6_stage2shadersb(stage), base, offset, count);
214
}
215
break;
216
}
217
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
218
tu_foreach_stage(stage, stages) {
219
/* TODO: We could emit less CP_LOAD_STATE6 if we used
220
* struct-of-arrays instead of array-of-structs.
221
*/
222
for (unsigned i = 0; i < count; i++) {
223
unsigned tex_offset = offset + 2 * i * A6XX_TEX_CONST_DWORDS;
224
unsigned sam_offset = offset + (2 * i + 1) * A6XX_TEX_CONST_DWORDS;
225
emit_load_state(&cs, tu6_stage2opcode(stage),
226
ST6_CONSTANTS, tu6_stage2texsb(stage),
227
base, tex_offset, 1);
228
emit_load_state(&cs, tu6_stage2opcode(stage),
229
ST6_SHADER, tu6_stage2texsb(stage),
230
base, sam_offset, 1);
231
}
232
}
233
break;
234
}
235
default:
236
unreachable("bad descriptor type");
237
}
238
}
239
}
240
241
pipeline->load_state = tu_cs_end_draw_state(&pipeline->cs, &cs);
242
}
243
244
struct tu_pipeline_builder
245
{
246
struct tu_device *device;
247
struct tu_pipeline_cache *cache;
248
struct tu_pipeline_layout *layout;
249
const VkAllocationCallbacks *alloc;
250
const VkGraphicsPipelineCreateInfo *create_info;
251
252
struct tu_shader *shaders[MESA_SHADER_FRAGMENT + 1];
253
struct ir3_shader_variant *variants[MESA_SHADER_FRAGMENT + 1];
254
struct ir3_shader_variant *binning_variant;
255
uint64_t shader_iova[MESA_SHADER_FRAGMENT + 1];
256
uint64_t binning_vs_iova;
257
258
struct tu_pvtmem_config pvtmem;
259
260
bool rasterizer_discard;
261
/* these states are affectd by rasterizer_discard */
262
bool emit_msaa_state;
263
VkSampleCountFlagBits samples;
264
bool use_color_attachments;
265
bool use_dual_src_blend;
266
bool alpha_to_coverage;
267
uint32_t color_attachment_count;
268
VkFormat color_attachment_formats[MAX_RTS];
269
VkFormat depth_attachment_format;
270
uint32_t render_components;
271
uint32_t multiview_mask;
272
};
273
274
static bool
275
tu_logic_op_reads_dst(VkLogicOp op)
276
{
277
switch (op) {
278
case VK_LOGIC_OP_CLEAR:
279
case VK_LOGIC_OP_COPY:
280
case VK_LOGIC_OP_COPY_INVERTED:
281
case VK_LOGIC_OP_SET:
282
return false;
283
default:
284
return true;
285
}
286
}
287
288
static VkBlendFactor
289
tu_blend_factor_no_dst_alpha(VkBlendFactor factor)
290
{
291
/* treat dst alpha as 1.0 and avoid reading it */
292
switch (factor) {
293
case VK_BLEND_FACTOR_DST_ALPHA:
294
return VK_BLEND_FACTOR_ONE;
295
case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
296
return VK_BLEND_FACTOR_ZERO;
297
default:
298
return factor;
299
}
300
}
301
302
static bool tu_blend_factor_is_dual_src(VkBlendFactor factor)
303
{
304
switch (factor) {
305
case VK_BLEND_FACTOR_SRC1_COLOR:
306
case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR:
307
case VK_BLEND_FACTOR_SRC1_ALPHA:
308
case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA:
309
return true;
310
default:
311
return false;
312
}
313
}
314
315
static bool
316
tu_blend_state_is_dual_src(const VkPipelineColorBlendStateCreateInfo *info)
317
{
318
if (!info)
319
return false;
320
321
for (unsigned i = 0; i < info->attachmentCount; i++) {
322
const VkPipelineColorBlendAttachmentState *blend = &info->pAttachments[i];
323
if (tu_blend_factor_is_dual_src(blend->srcColorBlendFactor) ||
324
tu_blend_factor_is_dual_src(blend->dstColorBlendFactor) ||
325
tu_blend_factor_is_dual_src(blend->srcAlphaBlendFactor) ||
326
tu_blend_factor_is_dual_src(blend->dstAlphaBlendFactor))
327
return true;
328
}
329
330
return false;
331
}
332
333
static const struct xs_config {
334
uint16_t reg_sp_xs_ctrl;
335
uint16_t reg_sp_xs_config;
336
uint16_t reg_sp_xs_instrlen;
337
uint16_t reg_hlsq_xs_ctrl;
338
uint16_t reg_sp_xs_first_exec_offset;
339
uint16_t reg_sp_xs_pvt_mem_hw_stack_offset;
340
} xs_config[] = {
341
[MESA_SHADER_VERTEX] = {
342
REG_A6XX_SP_VS_CTRL_REG0,
343
REG_A6XX_SP_VS_CONFIG,
344
REG_A6XX_SP_VS_INSTRLEN,
345
REG_A6XX_HLSQ_VS_CNTL,
346
REG_A6XX_SP_VS_OBJ_FIRST_EXEC_OFFSET,
347
REG_A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET,
348
},
349
[MESA_SHADER_TESS_CTRL] = {
350
REG_A6XX_SP_HS_CTRL_REG0,
351
REG_A6XX_SP_HS_CONFIG,
352
REG_A6XX_SP_HS_INSTRLEN,
353
REG_A6XX_HLSQ_HS_CNTL,
354
REG_A6XX_SP_HS_OBJ_FIRST_EXEC_OFFSET,
355
REG_A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET,
356
},
357
[MESA_SHADER_TESS_EVAL] = {
358
REG_A6XX_SP_DS_CTRL_REG0,
359
REG_A6XX_SP_DS_CONFIG,
360
REG_A6XX_SP_DS_INSTRLEN,
361
REG_A6XX_HLSQ_DS_CNTL,
362
REG_A6XX_SP_DS_OBJ_FIRST_EXEC_OFFSET,
363
REG_A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET,
364
},
365
[MESA_SHADER_GEOMETRY] = {
366
REG_A6XX_SP_GS_CTRL_REG0,
367
REG_A6XX_SP_GS_CONFIG,
368
REG_A6XX_SP_GS_INSTRLEN,
369
REG_A6XX_HLSQ_GS_CNTL,
370
REG_A6XX_SP_GS_OBJ_FIRST_EXEC_OFFSET,
371
REG_A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET,
372
},
373
[MESA_SHADER_FRAGMENT] = {
374
REG_A6XX_SP_FS_CTRL_REG0,
375
REG_A6XX_SP_FS_CONFIG,
376
REG_A6XX_SP_FS_INSTRLEN,
377
REG_A6XX_HLSQ_FS_CNTL,
378
REG_A6XX_SP_FS_OBJ_FIRST_EXEC_OFFSET,
379
REG_A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET,
380
},
381
[MESA_SHADER_COMPUTE] = {
382
REG_A6XX_SP_CS_CTRL_REG0,
383
REG_A6XX_SP_CS_CONFIG,
384
REG_A6XX_SP_CS_INSTRLEN,
385
REG_A6XX_HLSQ_CS_CNTL,
386
REG_A6XX_SP_CS_OBJ_FIRST_EXEC_OFFSET,
387
REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET,
388
},
389
};
390
391
void
392
tu6_emit_xs_config(struct tu_cs *cs,
393
gl_shader_stage stage, /* xs->type, but xs may be NULL */
394
const struct ir3_shader_variant *xs)
395
{
396
const struct xs_config *cfg = &xs_config[stage];
397
398
if (!xs) {
399
/* shader stage disabled */
400
tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_config, 1);
401
tu_cs_emit(cs, 0);
402
403
tu_cs_emit_pkt4(cs, cfg->reg_hlsq_xs_ctrl, 1);
404
tu_cs_emit(cs, 0);
405
return;
406
}
407
408
tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_config, 1);
409
tu_cs_emit(cs, A6XX_SP_VS_CONFIG_ENABLED |
410
COND(xs->bindless_tex, A6XX_SP_VS_CONFIG_BINDLESS_TEX) |
411
COND(xs->bindless_samp, A6XX_SP_VS_CONFIG_BINDLESS_SAMP) |
412
COND(xs->bindless_ibo, A6XX_SP_VS_CONFIG_BINDLESS_IBO) |
413
COND(xs->bindless_ubo, A6XX_SP_VS_CONFIG_BINDLESS_UBO) |
414
A6XX_SP_VS_CONFIG_NTEX(xs->num_samp) |
415
A6XX_SP_VS_CONFIG_NSAMP(xs->num_samp));
416
417
tu_cs_emit_pkt4(cs, cfg->reg_hlsq_xs_ctrl, 1);
418
tu_cs_emit(cs, A6XX_HLSQ_VS_CNTL_CONSTLEN(xs->constlen) |
419
A6XX_HLSQ_VS_CNTL_ENABLED);
420
}
421
422
void
423
tu6_emit_xs(struct tu_cs *cs,
424
gl_shader_stage stage, /* xs->type, but xs may be NULL */
425
const struct ir3_shader_variant *xs,
426
const struct tu_pvtmem_config *pvtmem,
427
uint64_t binary_iova)
428
{
429
const struct xs_config *cfg = &xs_config[stage];
430
431
if (!xs) {
432
/* shader stage disabled */
433
return;
434
}
435
436
enum a6xx_threadsize thrsz =
437
xs->info.double_threadsize ? THREAD128 : THREAD64;
438
switch (stage) {
439
case MESA_SHADER_VERTEX:
440
tu_cs_emit_regs(cs, A6XX_SP_VS_CTRL_REG0(
441
.fullregfootprint = xs->info.max_reg + 1,
442
.halfregfootprint = xs->info.max_half_reg + 1,
443
.branchstack = ir3_shader_branchstack_hw(xs),
444
.mergedregs = xs->mergedregs,
445
));
446
break;
447
case MESA_SHADER_TESS_CTRL:
448
tu_cs_emit_regs(cs, A6XX_SP_HS_CTRL_REG0(
449
.fullregfootprint = xs->info.max_reg + 1,
450
.halfregfootprint = xs->info.max_half_reg + 1,
451
.branchstack = ir3_shader_branchstack_hw(xs),
452
));
453
break;
454
case MESA_SHADER_TESS_EVAL:
455
tu_cs_emit_regs(cs, A6XX_SP_DS_CTRL_REG0(
456
.fullregfootprint = xs->info.max_reg + 1,
457
.halfregfootprint = xs->info.max_half_reg + 1,
458
.branchstack = ir3_shader_branchstack_hw(xs),
459
.mergedregs = xs->mergedregs,
460
));
461
break;
462
case MESA_SHADER_GEOMETRY:
463
tu_cs_emit_regs(cs, A6XX_SP_GS_CTRL_REG0(
464
.fullregfootprint = xs->info.max_reg + 1,
465
.halfregfootprint = xs->info.max_half_reg + 1,
466
.branchstack = ir3_shader_branchstack_hw(xs),
467
));
468
break;
469
case MESA_SHADER_FRAGMENT:
470
tu_cs_emit_regs(cs, A6XX_SP_FS_CTRL_REG0(
471
.fullregfootprint = xs->info.max_reg + 1,
472
.halfregfootprint = xs->info.max_half_reg + 1,
473
.branchstack = ir3_shader_branchstack_hw(xs),
474
.mergedregs = xs->mergedregs,
475
.threadsize = thrsz,
476
.pixlodenable = xs->need_pixlod,
477
.diff_fine = xs->need_fine_derivatives,
478
.varying = xs->total_in != 0,
479
/* unknown bit, seems unnecessary */
480
.unk24 = true,
481
));
482
break;
483
case MESA_SHADER_COMPUTE:
484
tu_cs_emit_regs(cs, A6XX_SP_CS_CTRL_REG0(
485
.fullregfootprint = xs->info.max_reg + 1,
486
.halfregfootprint = xs->info.max_half_reg + 1,
487
.branchstack = ir3_shader_branchstack_hw(xs),
488
.mergedregs = xs->mergedregs,
489
.threadsize = thrsz,
490
));
491
break;
492
default:
493
unreachable("bad shader stage");
494
}
495
496
tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_instrlen, 1);
497
tu_cs_emit(cs, xs->instrlen);
498
499
/* emit program binary & private memory layout
500
* binary_iova should be aligned to 1 instrlen unit (128 bytes)
501
*/
502
503
assert((binary_iova & 0x7f) == 0);
504
assert((pvtmem->iova & 0x1f) == 0);
505
506
tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_first_exec_offset, 7);
507
tu_cs_emit(cs, 0);
508
tu_cs_emit_qw(cs, binary_iova);
509
tu_cs_emit(cs,
510
A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(pvtmem->per_fiber_size));
511
tu_cs_emit_qw(cs, pvtmem->iova);
512
tu_cs_emit(cs, A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(pvtmem->per_sp_size) |
513
COND(pvtmem->per_wave, A6XX_SP_VS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT));
514
515
tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_pvt_mem_hw_stack_offset, 1);
516
tu_cs_emit(cs, A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET(pvtmem->per_sp_size));
517
518
tu_cs_emit_pkt7(cs, tu6_stage2opcode(stage), 3);
519
tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
520
CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
521
CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
522
CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage)) |
523
CP_LOAD_STATE6_0_NUM_UNIT(xs->instrlen));
524
tu_cs_emit_qw(cs, binary_iova);
525
526
/* emit immediates */
527
528
const struct ir3_const_state *const_state = ir3_const_state(xs);
529
uint32_t base = const_state->offsets.immediate;
530
int size = DIV_ROUND_UP(const_state->immediates_count, 4);
531
532
/* truncate size to avoid writing constants that shader
533
* does not use:
534
*/
535
size = MIN2(size + base, xs->constlen) - base;
536
537
if (size > 0) {
538
tu_cs_emit_pkt7(cs, tu6_stage2opcode(stage), 3 + size * 4);
539
tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(base) |
540
CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
541
CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
542
CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage)) |
543
CP_LOAD_STATE6_0_NUM_UNIT(size));
544
tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
545
tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
546
547
tu_cs_emit_array(cs, const_state->immediates, size * 4);
548
}
549
550
if (const_state->constant_data_ubo != -1) {
551
uint64_t iova = binary_iova + xs->info.constant_data_offset;
552
553
/* Upload UBO state for the constant data. */
554
tu_cs_emit_pkt7(cs, tu6_stage2opcode(stage), 5);
555
tu_cs_emit(cs,
556
CP_LOAD_STATE6_0_DST_OFF(const_state->constant_data_ubo) |
557
CP_LOAD_STATE6_0_STATE_TYPE(ST6_UBO)|
558
CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
559
CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage)) |
560
CP_LOAD_STATE6_0_NUM_UNIT(1));
561
tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
562
tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
563
int size_vec4s = DIV_ROUND_UP(xs->constant_data_size, 16);
564
tu_cs_emit_qw(cs,
565
iova |
566
(uint64_t)A6XX_UBO_1_SIZE(size_vec4s) << 32);
567
568
/* Upload the constant data to the const file if needed. */
569
const struct ir3_ubo_analysis_state *ubo_state = &const_state->ubo_state;
570
571
for (int i = 0; i < ubo_state->num_enabled; i++) {
572
if (ubo_state->range[i].ubo.block != const_state->constant_data_ubo ||
573
ubo_state->range[i].ubo.bindless) {
574
continue;
575
}
576
577
uint32_t start = ubo_state->range[i].start;
578
uint32_t end = ubo_state->range[i].end;
579
uint32_t size = MIN2(end - start,
580
(16 * xs->constlen) - ubo_state->range[i].offset);
581
582
tu_cs_emit_pkt7(cs, tu6_stage2opcode(stage), 3);
583
tu_cs_emit(cs,
584
CP_LOAD_STATE6_0_DST_OFF(ubo_state->range[i].offset / 16) |
585
CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
586
CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
587
CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage)) |
588
CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
589
tu_cs_emit_qw(cs, iova + start);
590
}
591
}
592
}
593
594
static void
595
tu6_emit_cs_config(struct tu_cs *cs, const struct tu_shader *shader,
596
const struct ir3_shader_variant *v,
597
const struct tu_pvtmem_config *pvtmem,
598
uint64_t binary_iova)
599
{
600
tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
601
.cs_state = true,
602
.cs_ibo = true));
603
604
tu6_emit_xs_config(cs, MESA_SHADER_COMPUTE, v);
605
tu6_emit_xs(cs, MESA_SHADER_COMPUTE, v, pvtmem, binary_iova);
606
607
uint32_t shared_size = MAX2(((int)v->shared_size - 1) / 1024, 1);
608
tu_cs_emit_pkt4(cs, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
609
tu_cs_emit(cs, A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(shared_size) |
610
A6XX_SP_CS_UNKNOWN_A9B1_UNK6);
611
612
uint32_t local_invocation_id =
613
ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
614
uint32_t work_group_id =
615
ir3_find_sysval_regid(v, SYSTEM_VALUE_WORKGROUP_ID);
616
617
enum a6xx_threadsize thrsz = v->info.double_threadsize ? THREAD128 : THREAD64;
618
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CS_CNTL_0, 2);
619
tu_cs_emit(cs,
620
A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
621
A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
622
A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
623
A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
624
tu_cs_emit(cs, A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(regid(63, 0)) |
625
A6XX_HLSQ_CS_CNTL_1_THREADSIZE(thrsz));
626
}
627
628
static void
629
tu6_emit_vs_system_values(struct tu_cs *cs,
630
const struct ir3_shader_variant *vs,
631
const struct ir3_shader_variant *hs,
632
const struct ir3_shader_variant *ds,
633
const struct ir3_shader_variant *gs,
634
bool primid_passthru)
635
{
636
const uint32_t vertexid_regid =
637
ir3_find_sysval_regid(vs, SYSTEM_VALUE_VERTEX_ID);
638
const uint32_t instanceid_regid =
639
ir3_find_sysval_regid(vs, SYSTEM_VALUE_INSTANCE_ID);
640
const uint32_t tess_coord_x_regid = hs ?
641
ir3_find_sysval_regid(ds, SYSTEM_VALUE_TESS_COORD) :
642
regid(63, 0);
643
const uint32_t tess_coord_y_regid = VALIDREG(tess_coord_x_regid) ?
644
tess_coord_x_regid + 1 :
645
regid(63, 0);
646
const uint32_t hs_patch_regid = hs ?
647
ir3_find_sysval_regid(hs, SYSTEM_VALUE_PRIMITIVE_ID) :
648
regid(63, 0);
649
const uint32_t ds_patch_regid = hs ?
650
ir3_find_sysval_regid(ds, SYSTEM_VALUE_PRIMITIVE_ID) :
651
regid(63, 0);
652
const uint32_t hs_invocation_regid = hs ?
653
ir3_find_sysval_regid(hs, SYSTEM_VALUE_TCS_HEADER_IR3) :
654
regid(63, 0);
655
const uint32_t primitiveid_regid = gs ?
656
ir3_find_sysval_regid(gs, SYSTEM_VALUE_PRIMITIVE_ID) :
657
regid(63, 0);
658
const uint32_t gsheader_regid = gs ?
659
ir3_find_sysval_regid(gs, SYSTEM_VALUE_GS_HEADER_IR3) :
660
regid(63, 0);
661
662
/* Note: we currently don't support multiview with tess or GS. If we did,
663
* and the HW actually works, then we'd have to somehow share this across
664
* stages. Note that the blob doesn't support this either.
665
*/
666
const uint32_t viewid_regid =
667
ir3_find_sysval_regid(vs, SYSTEM_VALUE_VIEW_INDEX);
668
669
tu_cs_emit_pkt4(cs, REG_A6XX_VFD_CONTROL_1, 6);
670
tu_cs_emit(cs, A6XX_VFD_CONTROL_1_REGID4VTX(vertexid_regid) |
671
A6XX_VFD_CONTROL_1_REGID4INST(instanceid_regid) |
672
A6XX_VFD_CONTROL_1_REGID4PRIMID(primitiveid_regid) |
673
A6XX_VFD_CONTROL_1_REGID4VIEWID(viewid_regid));
674
tu_cs_emit(cs, A6XX_VFD_CONTROL_2_REGID_HSPATCHID(hs_patch_regid) |
675
A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(hs_invocation_regid));
676
tu_cs_emit(cs, A6XX_VFD_CONTROL_3_REGID_DSPATCHID(ds_patch_regid) |
677
A6XX_VFD_CONTROL_3_REGID_TESSX(tess_coord_x_regid) |
678
A6XX_VFD_CONTROL_3_REGID_TESSY(tess_coord_y_regid) |
679
0xfc);
680
tu_cs_emit(cs, 0x000000fc); /* VFD_CONTROL_4 */
681
tu_cs_emit(cs, A6XX_VFD_CONTROL_5_REGID_GSHEADER(gsheader_regid) |
682
0xfc00); /* VFD_CONTROL_5 */
683
tu_cs_emit(cs, COND(primid_passthru, A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU)); /* VFD_CONTROL_6 */
684
}
685
686
static void
687
tu6_setup_streamout(struct tu_cs *cs,
688
const struct ir3_shader_variant *v,
689
struct ir3_shader_linkage *l)
690
{
691
const struct ir3_stream_output_info *info = &v->shader->stream_output;
692
/* Note: 64 here comes from the HW layout of the program RAM. The program
693
* for stream N is at DWORD 64 * N.
694
*/
695
#define A6XX_SO_PROG_DWORDS 64
696
uint32_t prog[A6XX_SO_PROG_DWORDS * IR3_MAX_SO_STREAMS] = {};
697
BITSET_DECLARE(valid_dwords, A6XX_SO_PROG_DWORDS * IR3_MAX_SO_STREAMS) = {0};
698
uint32_t ncomp[IR3_MAX_SO_BUFFERS] = {};
699
700
/* TODO: streamout state should be in a non-GMEM draw state */
701
702
/* no streamout: */
703
if (info->num_outputs == 0) {
704
tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
705
tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
706
tu_cs_emit(cs, 0);
707
tu_cs_emit(cs, REG_A6XX_VPC_SO_STREAM_CNTL);
708
tu_cs_emit(cs, 0);
709
return;
710
}
711
712
/* is there something to do with info->stride[i]? */
713
714
for (unsigned i = 0; i < info->num_outputs; i++) {
715
const struct ir3_stream_output *out = &info->output[i];
716
unsigned k = out->register_index;
717
unsigned idx;
718
719
/* Skip it, if there's an unused reg in the middle of outputs. */
720
if (v->outputs[k].regid == INVALID_REG)
721
continue;
722
723
ncomp[out->output_buffer] += out->num_components;
724
725
/* linkage map sorted by order frag shader wants things, so
726
* a bit less ideal here..
727
*/
728
for (idx = 0; idx < l->cnt; idx++)
729
if (l->var[idx].regid == v->outputs[k].regid)
730
break;
731
732
debug_assert(idx < l->cnt);
733
734
for (unsigned j = 0; j < out->num_components; j++) {
735
unsigned c = j + out->start_component;
736
unsigned loc = l->var[idx].loc + c;
737
unsigned off = j + out->dst_offset; /* in dwords */
738
739
assert(loc < A6XX_SO_PROG_DWORDS * 2);
740
unsigned dword = out->stream * A6XX_SO_PROG_DWORDS + loc/2;
741
if (loc & 1) {
742
prog[dword] |= A6XX_VPC_SO_PROG_B_EN |
743
A6XX_VPC_SO_PROG_B_BUF(out->output_buffer) |
744
A6XX_VPC_SO_PROG_B_OFF(off * 4);
745
} else {
746
prog[dword] |= A6XX_VPC_SO_PROG_A_EN |
747
A6XX_VPC_SO_PROG_A_BUF(out->output_buffer) |
748
A6XX_VPC_SO_PROG_A_OFF(off * 4);
749
}
750
BITSET_SET(valid_dwords, dword);
751
}
752
}
753
754
unsigned prog_count = 0;
755
unsigned start, end;
756
BITSET_FOREACH_RANGE(start, end, valid_dwords,
757
A6XX_SO_PROG_DWORDS * IR3_MAX_SO_STREAMS) {
758
prog_count += end - start + 1;
759
}
760
761
tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 10 + 2 * prog_count);
762
tu_cs_emit(cs, REG_A6XX_VPC_SO_STREAM_CNTL);
763
tu_cs_emit(cs, A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(info->streams_written) |
764
COND(ncomp[0] > 0,
765
A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM(1 + info->buffer_to_stream[0])) |
766
COND(ncomp[1] > 0,
767
A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM(1 + info->buffer_to_stream[1])) |
768
COND(ncomp[2] > 0,
769
A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM(1 + info->buffer_to_stream[2])) |
770
COND(ncomp[3] > 0,
771
A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM(1 + info->buffer_to_stream[3])));
772
for (uint32_t i = 0; i < 4; i++) {
773
tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(i));
774
tu_cs_emit(cs, ncomp[i]);
775
}
776
bool first = true;
777
BITSET_FOREACH_RANGE(start, end, valid_dwords,
778
A6XX_SO_PROG_DWORDS * IR3_MAX_SO_STREAMS) {
779
tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
780
tu_cs_emit(cs, COND(first, A6XX_VPC_SO_CNTL_RESET) |
781
A6XX_VPC_SO_CNTL_ADDR(start));
782
for (unsigned i = start; i < end; i++) {
783
tu_cs_emit(cs, REG_A6XX_VPC_SO_PROG);
784
tu_cs_emit(cs, prog[i]);
785
}
786
first = false;
787
}
788
}
789
790
static void
791
tu6_emit_const(struct tu_cs *cs, uint32_t opcode, uint32_t base,
792
enum a6xx_state_block block, uint32_t offset,
793
uint32_t size, const uint32_t *dwords) {
794
assert(size % 4 == 0);
795
796
tu_cs_emit_pkt7(cs, opcode, 3 + size);
797
tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(base) |
798
CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
799
CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
800
CP_LOAD_STATE6_0_STATE_BLOCK(block) |
801
CP_LOAD_STATE6_0_NUM_UNIT(size / 4));
802
803
tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
804
tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
805
dwords = (uint32_t *)&((uint8_t *)dwords)[offset];
806
807
tu_cs_emit_array(cs, dwords, size);
808
}
809
810
static void
811
tu6_emit_link_map(struct tu_cs *cs,
812
const struct ir3_shader_variant *producer,
813
const struct ir3_shader_variant *consumer,
814
enum a6xx_state_block sb)
815
{
816
const struct ir3_const_state *const_state = ir3_const_state(consumer);
817
uint32_t base = const_state->offsets.primitive_map;
818
int size = DIV_ROUND_UP(consumer->input_size, 4);
819
820
size = (MIN2(size + base, consumer->constlen) - base) * 4;
821
if (size <= 0)
822
return;
823
824
tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, base, sb, 0, size,
825
producer->output_loc);
826
}
827
828
static uint16_t
829
gl_primitive_to_tess(uint16_t primitive) {
830
switch (primitive) {
831
case GL_POINTS:
832
return TESS_POINTS;
833
case GL_LINE_STRIP:
834
return TESS_LINES;
835
case GL_TRIANGLE_STRIP:
836
return TESS_CW_TRIS;
837
default:
838
unreachable("");
839
}
840
}
841
842
void
843
tu6_emit_vpc(struct tu_cs *cs,
844
const struct ir3_shader_variant *vs,
845
const struct ir3_shader_variant *hs,
846
const struct ir3_shader_variant *ds,
847
const struct ir3_shader_variant *gs,
848
const struct ir3_shader_variant *fs,
849
uint32_t patch_control_points)
850
{
851
/* note: doesn't compile as static because of the array regs.. */
852
const struct reg_config {
853
uint16_t reg_sp_xs_out_reg;
854
uint16_t reg_sp_xs_vpc_dst_reg;
855
uint16_t reg_vpc_xs_pack;
856
uint16_t reg_vpc_xs_clip_cntl;
857
uint16_t reg_gras_xs_cl_cntl;
858
uint16_t reg_pc_xs_out_cntl;
859
uint16_t reg_sp_xs_primitive_cntl;
860
uint16_t reg_vpc_xs_layer_cntl;
861
uint16_t reg_gras_xs_layer_cntl;
862
} reg_config[] = {
863
[MESA_SHADER_VERTEX] = {
864
REG_A6XX_SP_VS_OUT_REG(0),
865
REG_A6XX_SP_VS_VPC_DST_REG(0),
866
REG_A6XX_VPC_VS_PACK,
867
REG_A6XX_VPC_VS_CLIP_CNTL,
868
REG_A6XX_GRAS_VS_CL_CNTL,
869
REG_A6XX_PC_VS_OUT_CNTL,
870
REG_A6XX_SP_VS_PRIMITIVE_CNTL,
871
REG_A6XX_VPC_VS_LAYER_CNTL,
872
REG_A6XX_GRAS_VS_LAYER_CNTL
873
},
874
[MESA_SHADER_TESS_EVAL] = {
875
REG_A6XX_SP_DS_OUT_REG(0),
876
REG_A6XX_SP_DS_VPC_DST_REG(0),
877
REG_A6XX_VPC_DS_PACK,
878
REG_A6XX_VPC_DS_CLIP_CNTL,
879
REG_A6XX_GRAS_DS_CL_CNTL,
880
REG_A6XX_PC_DS_OUT_CNTL,
881
REG_A6XX_SP_DS_PRIMITIVE_CNTL,
882
REG_A6XX_VPC_DS_LAYER_CNTL,
883
REG_A6XX_GRAS_DS_LAYER_CNTL
884
},
885
[MESA_SHADER_GEOMETRY] = {
886
REG_A6XX_SP_GS_OUT_REG(0),
887
REG_A6XX_SP_GS_VPC_DST_REG(0),
888
REG_A6XX_VPC_GS_PACK,
889
REG_A6XX_VPC_GS_CLIP_CNTL,
890
REG_A6XX_GRAS_GS_CL_CNTL,
891
REG_A6XX_PC_GS_OUT_CNTL,
892
REG_A6XX_SP_GS_PRIMITIVE_CNTL,
893
REG_A6XX_VPC_GS_LAYER_CNTL,
894
REG_A6XX_GRAS_GS_LAYER_CNTL
895
},
896
};
897
898
const struct ir3_shader_variant *last_shader;
899
if (gs) {
900
last_shader = gs;
901
} else if (hs) {
902
last_shader = ds;
903
} else {
904
last_shader = vs;
905
}
906
907
const struct reg_config *cfg = &reg_config[last_shader->type];
908
909
struct ir3_shader_linkage linkage = {
910
.primid_loc = 0xff,
911
.clip0_loc = 0xff,
912
.clip1_loc = 0xff,
913
};
914
if (fs)
915
ir3_link_shaders(&linkage, last_shader, fs, true);
916
917
if (last_shader->shader->stream_output.num_outputs)
918
ir3_link_stream_out(&linkage, last_shader);
919
920
/* We do this after linking shaders in order to know whether PrimID
921
* passthrough needs to be enabled.
922
*/
923
bool primid_passthru = linkage.primid_loc != 0xff;
924
tu6_emit_vs_system_values(cs, vs, hs, ds, gs, primid_passthru);
925
926
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VAR_DISABLE(0), 4);
927
tu_cs_emit(cs, ~linkage.varmask[0]);
928
tu_cs_emit(cs, ~linkage.varmask[1]);
929
tu_cs_emit(cs, ~linkage.varmask[2]);
930
tu_cs_emit(cs, ~linkage.varmask[3]);
931
932
/* a6xx finds position/pointsize at the end */
933
const uint32_t pointsize_regid =
934
ir3_find_output_regid(last_shader, VARYING_SLOT_PSIZ);
935
const uint32_t layer_regid =
936
ir3_find_output_regid(last_shader, VARYING_SLOT_LAYER);
937
const uint32_t view_regid =
938
ir3_find_output_regid(last_shader, VARYING_SLOT_VIEWPORT);
939
const uint32_t clip0_regid =
940
ir3_find_output_regid(last_shader, VARYING_SLOT_CLIP_DIST0);
941
const uint32_t clip1_regid =
942
ir3_find_output_regid(last_shader, VARYING_SLOT_CLIP_DIST1);
943
uint32_t primitive_regid = gs ?
944
ir3_find_sysval_regid(gs, SYSTEM_VALUE_PRIMITIVE_ID) : regid(63, 0);
945
uint32_t flags_regid = gs ?
946
ir3_find_output_regid(gs, VARYING_SLOT_GS_VERTEX_FLAGS_IR3) : 0;
947
948
uint32_t pointsize_loc = 0xff, position_loc = 0xff, layer_loc = 0xff, view_loc = 0xff;
949
950
if (layer_regid != regid(63, 0)) {
951
layer_loc = linkage.max_loc;
952
ir3_link_add(&linkage, layer_regid, 0x1, linkage.max_loc);
953
}
954
955
if (view_regid != regid(63, 0)) {
956
view_loc = linkage.max_loc;
957
ir3_link_add(&linkage, view_regid, 0x1, linkage.max_loc);
958
}
959
960
unsigned extra_pos = 0;
961
962
for (unsigned i = 0; i < last_shader->outputs_count; i++) {
963
if (last_shader->outputs[i].slot != VARYING_SLOT_POS)
964
continue;
965
966
if (position_loc == 0xff)
967
position_loc = linkage.max_loc;
968
969
ir3_link_add(&linkage, last_shader->outputs[i].regid,
970
0xf, position_loc + 4 * last_shader->outputs[i].view);
971
extra_pos = MAX2(extra_pos, last_shader->outputs[i].view);
972
}
973
974
if (pointsize_regid != regid(63, 0)) {
975
pointsize_loc = linkage.max_loc;
976
ir3_link_add(&linkage, pointsize_regid, 0x1, linkage.max_loc);
977
}
978
979
uint8_t clip_cull_mask = last_shader->clip_mask | last_shader->cull_mask;
980
981
/* Handle the case where clip/cull distances aren't read by the FS */
982
uint32_t clip0_loc = linkage.clip0_loc, clip1_loc = linkage.clip1_loc;
983
if (clip0_loc == 0xff && clip0_regid != regid(63, 0)) {
984
clip0_loc = linkage.max_loc;
985
ir3_link_add(&linkage, clip0_regid, clip_cull_mask & 0xf, linkage.max_loc);
986
}
987
if (clip1_loc == 0xff && clip1_regid != regid(63, 0)) {
988
clip1_loc = linkage.max_loc;
989
ir3_link_add(&linkage, clip1_regid, clip_cull_mask >> 4, linkage.max_loc);
990
}
991
992
tu6_setup_streamout(cs, last_shader, &linkage);
993
994
/* The GPU hangs on some models when there are no outputs (xs_pack::CNT),
995
* at least when a DS is the last stage, so add a dummy output to keep it
996
* happy if there aren't any. We do this late in order to avoid emitting
997
* any unused code and make sure that optimizations don't remove it.
998
*/
999
if (linkage.cnt == 0)
1000
ir3_link_add(&linkage, 0, 0x1, linkage.max_loc);
1001
1002
/* map outputs of the last shader to VPC */
1003
assert(linkage.cnt <= 32);
1004
const uint32_t sp_out_count = DIV_ROUND_UP(linkage.cnt, 2);
1005
const uint32_t sp_vpc_dst_count = DIV_ROUND_UP(linkage.cnt, 4);
1006
uint32_t sp_out[16] = {0};
1007
uint32_t sp_vpc_dst[8] = {0};
1008
for (uint32_t i = 0; i < linkage.cnt; i++) {
1009
((uint16_t *) sp_out)[i] =
1010
A6XX_SP_VS_OUT_REG_A_REGID(linkage.var[i].regid) |
1011
A6XX_SP_VS_OUT_REG_A_COMPMASK(linkage.var[i].compmask);
1012
((uint8_t *) sp_vpc_dst)[i] =
1013
A6XX_SP_VS_VPC_DST_REG_OUTLOC0(linkage.var[i].loc);
1014
}
1015
1016
tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_out_reg, sp_out_count);
1017
tu_cs_emit_array(cs, sp_out, sp_out_count);
1018
1019
tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_vpc_dst_reg, sp_vpc_dst_count);
1020
tu_cs_emit_array(cs, sp_vpc_dst, sp_vpc_dst_count);
1021
1022
tu_cs_emit_pkt4(cs, cfg->reg_vpc_xs_pack, 1);
1023
tu_cs_emit(cs, A6XX_VPC_VS_PACK_POSITIONLOC(position_loc) |
1024
A6XX_VPC_VS_PACK_PSIZELOC(pointsize_loc) |
1025
A6XX_VPC_VS_PACK_STRIDE_IN_VPC(linkage.max_loc) |
1026
A6XX_VPC_VS_PACK_EXTRAPOS(extra_pos));
1027
1028
tu_cs_emit_pkt4(cs, cfg->reg_vpc_xs_clip_cntl, 1);
1029
tu_cs_emit(cs, A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK(clip_cull_mask) |
1030
A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC(clip0_loc) |
1031
A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC(clip1_loc));
1032
1033
tu_cs_emit_pkt4(cs, cfg->reg_gras_xs_cl_cntl, 1);
1034
tu_cs_emit(cs, A6XX_GRAS_VS_CL_CNTL_CLIP_MASK(last_shader->clip_mask) |
1035
A6XX_GRAS_VS_CL_CNTL_CULL_MASK(last_shader->cull_mask));
1036
1037
tu_cs_emit_pkt4(cs, cfg->reg_pc_xs_out_cntl, 1);
1038
tu_cs_emit(cs, A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(linkage.max_loc) |
1039
CONDREG(pointsize_regid, A6XX_PC_VS_OUT_CNTL_PSIZE) |
1040
CONDREG(layer_regid, A6XX_PC_VS_OUT_CNTL_LAYER) |
1041
CONDREG(view_regid, A6XX_PC_VS_OUT_CNTL_VIEW) |
1042
CONDREG(primitive_regid, A6XX_PC_VS_OUT_CNTL_PRIMITIVE_ID) |
1043
A6XX_PC_VS_OUT_CNTL_CLIP_MASK(clip_cull_mask));
1044
1045
tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_primitive_cntl, 1);
1046
tu_cs_emit(cs, A6XX_SP_VS_PRIMITIVE_CNTL_OUT(linkage.cnt) |
1047
A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(flags_regid));
1048
1049
tu_cs_emit_pkt4(cs, cfg->reg_vpc_xs_layer_cntl, 1);
1050
tu_cs_emit(cs, A6XX_VPC_VS_LAYER_CNTL_LAYERLOC(layer_loc) |
1051
A6XX_VPC_VS_LAYER_CNTL_VIEWLOC(view_loc));
1052
1053
tu_cs_emit_pkt4(cs, cfg->reg_gras_xs_layer_cntl, 1);
1054
tu_cs_emit(cs, CONDREG(layer_regid, A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER) |
1055
CONDREG(view_regid, A6XX_GRAS_GS_LAYER_CNTL_WRITES_VIEW));
1056
1057
tu_cs_emit_regs(cs, A6XX_PC_PRIMID_PASSTHRU(primid_passthru));
1058
1059
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_CNTL_0, 1);
1060
tu_cs_emit(cs, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs ? fs->total_in : 0) |
1061
COND(fs && fs->total_in, A6XX_VPC_CNTL_0_VARYING) |
1062
A6XX_VPC_CNTL_0_PRIMIDLOC(linkage.primid_loc) |
1063
A6XX_VPC_CNTL_0_VIEWIDLOC(linkage.viewid_loc));
1064
1065
if (hs) {
1066
shader_info *hs_info = &hs->shader->nir->info;
1067
1068
tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESS_NUM_VERTEX, 1);
1069
tu_cs_emit(cs, hs_info->tess.tcs_vertices_out);
1070
1071
/* Total attribute slots in HS incoming patch. */
1072
tu_cs_emit_pkt4(cs, REG_A6XX_PC_HS_INPUT_SIZE, 1);
1073
tu_cs_emit(cs, patch_control_points * vs->output_size / 4);
1074
1075
const uint32_t wavesize = 64;
1076
const uint32_t max_wave_input_size = 64;
1077
1078
/* note: if HS is really just the VS extended, then this
1079
* should be by MAX2(patch_control_points, hs_info->tess.tcs_vertices_out)
1080
* however that doesn't match the blob, and fails some dEQP tests.
1081
*/
1082
uint32_t prims_per_wave = wavesize / hs_info->tess.tcs_vertices_out;
1083
uint32_t max_prims_per_wave =
1084
max_wave_input_size * wavesize / (vs->output_size * patch_control_points);
1085
prims_per_wave = MIN2(prims_per_wave, max_prims_per_wave);
1086
1087
uint32_t total_size = vs->output_size * patch_control_points * prims_per_wave;
1088
uint32_t wave_input_size = DIV_ROUND_UP(total_size, wavesize);
1089
1090
tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_WAVE_INPUT_SIZE, 1);
1091
tu_cs_emit(cs, wave_input_size);
1092
1093
/* In SPIR-V generated from GLSL, the tessellation primitive params are
1094
* are specified in the tess eval shader, but in SPIR-V generated from
1095
* HLSL, they are specified in the tess control shader. */
1096
shader_info *tess_info =
1097
ds->shader->nir->info.tess.spacing == TESS_SPACING_UNSPECIFIED ?
1098
&hs->shader->nir->info : &ds->shader->nir->info;
1099
tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESS_CNTL, 1);
1100
uint32_t output;
1101
if (tess_info->tess.point_mode)
1102
output = TESS_POINTS;
1103
else if (tess_info->tess.primitive_mode == GL_ISOLINES)
1104
output = TESS_LINES;
1105
else if (tess_info->tess.ccw)
1106
output = TESS_CCW_TRIS;
1107
else
1108
output = TESS_CW_TRIS;
1109
1110
enum a6xx_tess_spacing spacing;
1111
switch (tess_info->tess.spacing) {
1112
case TESS_SPACING_EQUAL:
1113
spacing = TESS_EQUAL;
1114
break;
1115
case TESS_SPACING_FRACTIONAL_ODD:
1116
spacing = TESS_FRACTIONAL_ODD;
1117
break;
1118
case TESS_SPACING_FRACTIONAL_EVEN:
1119
spacing = TESS_FRACTIONAL_EVEN;
1120
break;
1121
case TESS_SPACING_UNSPECIFIED:
1122
default:
1123
unreachable("invalid tess spacing");
1124
}
1125
tu_cs_emit(cs, A6XX_PC_TESS_CNTL_SPACING(spacing) |
1126
A6XX_PC_TESS_CNTL_OUTPUT(output));
1127
1128
tu6_emit_link_map(cs, vs, hs, SB6_HS_SHADER);
1129
tu6_emit_link_map(cs, hs, ds, SB6_DS_SHADER);
1130
}
1131
1132
1133
if (gs) {
1134
uint32_t vertices_out, invocations, output, vec4_size;
1135
uint32_t prev_stage_output_size = ds ? ds->output_size : vs->output_size;
1136
1137
/* this detects the tu_clear_blit path, which doesn't set ->nir */
1138
if (gs->shader->nir) {
1139
if (hs) {
1140
tu6_emit_link_map(cs, ds, gs, SB6_GS_SHADER);
1141
} else {
1142
tu6_emit_link_map(cs, vs, gs, SB6_GS_SHADER);
1143
}
1144
vertices_out = gs->shader->nir->info.gs.vertices_out - 1;
1145
output = gl_primitive_to_tess(gs->shader->nir->info.gs.output_primitive);
1146
invocations = gs->shader->nir->info.gs.invocations - 1;
1147
/* Size of per-primitive alloction in ldlw memory in vec4s. */
1148
vec4_size = gs->shader->nir->info.gs.vertices_in *
1149
DIV_ROUND_UP(prev_stage_output_size, 4);
1150
} else {
1151
vertices_out = 3;
1152
output = TESS_CW_TRIS;
1153
invocations = 0;
1154
vec4_size = 0;
1155
}
1156
1157
tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_5, 1);
1158
tu_cs_emit(cs,
1159
A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(vertices_out) |
1160
A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(output) |
1161
A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(invocations));
1162
1163
tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_3, 1);
1164
tu_cs_emit(cs, 0);
1165
1166
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_UNKNOWN_9100, 1);
1167
tu_cs_emit(cs, 0xff);
1168
1169
tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 1);
1170
tu_cs_emit(cs, A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(vec4_size));
1171
1172
uint32_t prim_size = prev_stage_output_size;
1173
if (prim_size > 64)
1174
prim_size = 64;
1175
else if (prim_size == 64)
1176
prim_size = 63;
1177
tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_PRIM_SIZE, 1);
1178
tu_cs_emit(cs, prim_size);
1179
}
1180
}
1181
1182
static int
1183
tu6_vpc_varying_mode(const struct ir3_shader_variant *fs,
1184
uint32_t index,
1185
uint8_t *interp_mode,
1186
uint8_t *ps_repl_mode)
1187
{
1188
enum
1189
{
1190
INTERP_SMOOTH = 0,
1191
INTERP_FLAT = 1,
1192
INTERP_ZERO = 2,
1193
INTERP_ONE = 3,
1194
};
1195
enum
1196
{
1197
PS_REPL_NONE = 0,
1198
PS_REPL_S = 1,
1199
PS_REPL_T = 2,
1200
PS_REPL_ONE_MINUS_T = 3,
1201
};
1202
1203
const uint32_t compmask = fs->inputs[index].compmask;
1204
1205
/* NOTE: varyings are packed, so if compmask is 0xb then first, second, and
1206
* fourth component occupy three consecutive varying slots
1207
*/
1208
int shift = 0;
1209
*interp_mode = 0;
1210
*ps_repl_mode = 0;
1211
if (fs->inputs[index].slot == VARYING_SLOT_PNTC) {
1212
if (compmask & 0x1) {
1213
*ps_repl_mode |= PS_REPL_S << shift;
1214
shift += 2;
1215
}
1216
if (compmask & 0x2) {
1217
*ps_repl_mode |= PS_REPL_T << shift;
1218
shift += 2;
1219
}
1220
if (compmask & 0x4) {
1221
*interp_mode |= INTERP_ZERO << shift;
1222
shift += 2;
1223
}
1224
if (compmask & 0x8) {
1225
*interp_mode |= INTERP_ONE << 6;
1226
shift += 2;
1227
}
1228
} else if (fs->inputs[index].flat) {
1229
for (int i = 0; i < 4; i++) {
1230
if (compmask & (1 << i)) {
1231
*interp_mode |= INTERP_FLAT << shift;
1232
shift += 2;
1233
}
1234
}
1235
}
1236
1237
return shift;
1238
}
1239
1240
static void
1241
tu6_emit_vpc_varying_modes(struct tu_cs *cs,
1242
const struct ir3_shader_variant *fs)
1243
{
1244
uint32_t interp_modes[8] = { 0 };
1245
uint32_t ps_repl_modes[8] = { 0 };
1246
1247
if (fs) {
1248
for (int i = -1;
1249
(i = ir3_next_varying(fs, i)) < (int) fs->inputs_count;) {
1250
1251
/* get the mode for input i */
1252
uint8_t interp_mode;
1253
uint8_t ps_repl_mode;
1254
const int bits =
1255
tu6_vpc_varying_mode(fs, i, &interp_mode, &ps_repl_mode);
1256
1257
/* OR the mode into the array */
1258
const uint32_t inloc = fs->inputs[i].inloc * 2;
1259
uint32_t n = inloc / 32;
1260
uint32_t shift = inloc % 32;
1261
interp_modes[n] |= interp_mode << shift;
1262
ps_repl_modes[n] |= ps_repl_mode << shift;
1263
if (shift + bits > 32) {
1264
n++;
1265
shift = 32 - shift;
1266
1267
interp_modes[n] |= interp_mode >> shift;
1268
ps_repl_modes[n] |= ps_repl_mode >> shift;
1269
}
1270
}
1271
}
1272
1273
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
1274
tu_cs_emit_array(cs, interp_modes, 8);
1275
1276
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
1277
tu_cs_emit_array(cs, ps_repl_modes, 8);
1278
}
1279
1280
void
1281
tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs)
1282
{
1283
uint32_t face_regid, coord_regid, zwcoord_regid, samp_id_regid;
1284
uint32_t ij_regid[IJ_COUNT];
1285
uint32_t smask_in_regid;
1286
1287
bool sample_shading = fs->per_samp | fs->key.sample_shading;
1288
bool enable_varyings = fs->total_in > 0;
1289
1290
samp_id_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_ID);
1291
smask_in_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_MASK_IN);
1292
face_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRONT_FACE);
1293
coord_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRAG_COORD);
1294
zwcoord_regid = VALIDREG(coord_regid) ? coord_regid + 2 : regid(63, 0);
1295
for (unsigned i = 0; i < ARRAY_SIZE(ij_regid); i++)
1296
ij_regid[i] = ir3_find_sysval_regid(fs, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL + i);
1297
1298
if (VALIDREG(ij_regid[IJ_LINEAR_SAMPLE]))
1299
tu_finishme("linear sample varying");
1300
1301
if (VALIDREG(ij_regid[IJ_LINEAR_CENTROID]))
1302
tu_finishme("linear centroid varying");
1303
1304
if (fs->num_sampler_prefetch > 0) {
1305
assert(VALIDREG(ij_regid[IJ_PERSP_PIXEL]));
1306
/* also, it seems like ij_pix is *required* to be r0.x */
1307
assert(ij_regid[IJ_PERSP_PIXEL] == regid(0, 0));
1308
}
1309
1310
tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_PREFETCH_CNTL, 1 + fs->num_sampler_prefetch);
1311
tu_cs_emit(cs, A6XX_SP_FS_PREFETCH_CNTL_COUNT(fs->num_sampler_prefetch) |
1312
A6XX_SP_FS_PREFETCH_CNTL_UNK4(regid(63, 0)) |
1313
0x7000); // XXX);
1314
for (int i = 0; i < fs->num_sampler_prefetch; i++) {
1315
const struct ir3_sampler_prefetch *prefetch = &fs->sampler_prefetch[i];
1316
tu_cs_emit(cs, A6XX_SP_FS_PREFETCH_CMD_SRC(prefetch->src) |
1317
A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(prefetch->samp_id) |
1318
A6XX_SP_FS_PREFETCH_CMD_TEX_ID(prefetch->tex_id) |
1319
A6XX_SP_FS_PREFETCH_CMD_DST(prefetch->dst) |
1320
A6XX_SP_FS_PREFETCH_CMD_WRMASK(prefetch->wrmask) |
1321
COND(prefetch->half_precision, A6XX_SP_FS_PREFETCH_CMD_HALF) |
1322
A6XX_SP_FS_PREFETCH_CMD_CMD(prefetch->cmd));
1323
}
1324
1325
if (fs->num_sampler_prefetch > 0) {
1326
tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(0), fs->num_sampler_prefetch);
1327
for (int i = 0; i < fs->num_sampler_prefetch; i++) {
1328
const struct ir3_sampler_prefetch *prefetch = &fs->sampler_prefetch[i];
1329
tu_cs_emit(cs,
1330
A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(prefetch->samp_bindless_id) |
1331
A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(prefetch->tex_bindless_id));
1332
}
1333
}
1334
1335
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CONTROL_1_REG, 5);
1336
tu_cs_emit(cs, 0x7);
1337
tu_cs_emit(cs, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid) |
1338
A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(samp_id_regid) |
1339
A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(smask_in_regid) |
1340
A6XX_HLSQ_CONTROL_2_REG_SIZE(ij_regid[IJ_PERSP_SIZE]));
1341
tu_cs_emit(cs, A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(ij_regid[IJ_PERSP_PIXEL]) |
1342
A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(ij_regid[IJ_LINEAR_PIXEL]) |
1343
A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(ij_regid[IJ_PERSP_CENTROID]) |
1344
A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(ij_regid[IJ_LINEAR_CENTROID]));
1345
tu_cs_emit(cs, A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(coord_regid) |
1346
A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(zwcoord_regid) |
1347
A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(ij_regid[IJ_PERSP_SAMPLE]) |
1348
A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(ij_regid[IJ_LINEAR_SAMPLE]));
1349
tu_cs_emit(cs, 0xfc);
1350
1351
enum a6xx_threadsize thrsz = fs->info.double_threadsize ? THREAD128 : THREAD64;
1352
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_FS_CNTL_0, 1);
1353
tu_cs_emit(cs, A6XX_HLSQ_FS_CNTL_0_THREADSIZE(thrsz) |
1354
COND(enable_varyings, A6XX_HLSQ_FS_CNTL_0_VARYINGS));
1355
1356
bool need_size = fs->frag_face || fs->fragcoord_compmask != 0;
1357
bool need_size_persamp = false;
1358
if (VALIDREG(ij_regid[IJ_PERSP_SIZE])) {
1359
if (sample_shading)
1360
need_size_persamp = true;
1361
else
1362
need_size = true;
1363
}
1364
if (VALIDREG(ij_regid[IJ_LINEAR_PIXEL]))
1365
need_size = true;
1366
1367
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CNTL, 1);
1368
tu_cs_emit(cs,
1369
CONDREG(ij_regid[IJ_PERSP_PIXEL], A6XX_GRAS_CNTL_IJ_PERSP_PIXEL) |
1370
CONDREG(ij_regid[IJ_PERSP_CENTROID], A6XX_GRAS_CNTL_IJ_PERSP_CENTROID) |
1371
CONDREG(ij_regid[IJ_PERSP_SAMPLE], A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE) |
1372
COND(need_size, A6XX_GRAS_CNTL_SIZE) |
1373
COND(need_size_persamp, A6XX_GRAS_CNTL_SIZE_PERSAMP) |
1374
COND(fs->fragcoord_compmask != 0, A6XX_GRAS_CNTL_COORD_MASK(fs->fragcoord_compmask)));
1375
1376
tu_cs_emit_pkt4(cs, REG_A6XX_RB_RENDER_CONTROL0, 2);
1377
tu_cs_emit(cs,
1378
CONDREG(ij_regid[IJ_PERSP_PIXEL], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL) |
1379
CONDREG(ij_regid[IJ_PERSP_CENTROID], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID) |
1380
CONDREG(ij_regid[IJ_PERSP_SAMPLE], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE) |
1381
COND(need_size, A6XX_RB_RENDER_CONTROL0_SIZE) |
1382
COND(enable_varyings, A6XX_RB_RENDER_CONTROL0_UNK10) |
1383
COND(need_size_persamp, A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP) |
1384
COND(fs->fragcoord_compmask != 0,
1385
A6XX_RB_RENDER_CONTROL0_COORD_MASK(fs->fragcoord_compmask)));
1386
tu_cs_emit(cs,
1387
/* these two bits (UNK4/UNK5) relate to fragcoord
1388
* without them, fragcoord is the same for all samples
1389
*/
1390
COND(sample_shading, A6XX_RB_RENDER_CONTROL1_UNK4) |
1391
COND(sample_shading, A6XX_RB_RENDER_CONTROL1_UNK5) |
1392
CONDREG(smask_in_regid, A6XX_RB_RENDER_CONTROL1_SAMPLEMASK) |
1393
CONDREG(samp_id_regid, A6XX_RB_RENDER_CONTROL1_SAMPLEID) |
1394
CONDREG(ij_regid[IJ_PERSP_SIZE], A6XX_RB_RENDER_CONTROL1_SIZE) |
1395
COND(fs->frag_face, A6XX_RB_RENDER_CONTROL1_FACENESS));
1396
1397
tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CNTL, 1);
1398
tu_cs_emit(cs, COND(sample_shading, A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE));
1399
1400
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_UNKNOWN_8101, 1);
1401
tu_cs_emit(cs, COND(sample_shading, 0x6)); // XXX
1402
1403
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CNTL, 1);
1404
tu_cs_emit(cs, COND(sample_shading, A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE));
1405
}
1406
1407
static void
1408
tu6_emit_fs_outputs(struct tu_cs *cs,
1409
const struct ir3_shader_variant *fs,
1410
uint32_t mrt_count, bool dual_src_blend,
1411
uint32_t render_components,
1412
bool no_earlyz,
1413
struct tu_pipeline *pipeline)
1414
{
1415
uint32_t smask_regid, posz_regid, stencilref_regid;
1416
1417
posz_regid = ir3_find_output_regid(fs, FRAG_RESULT_DEPTH);
1418
smask_regid = ir3_find_output_regid(fs, FRAG_RESULT_SAMPLE_MASK);
1419
stencilref_regid = ir3_find_output_regid(fs, FRAG_RESULT_STENCIL);
1420
1421
uint32_t fragdata_regid[8];
1422
if (fs->color0_mrt) {
1423
fragdata_regid[0] = ir3_find_output_regid(fs, FRAG_RESULT_COLOR);
1424
for (uint32_t i = 1; i < ARRAY_SIZE(fragdata_regid); i++)
1425
fragdata_regid[i] = fragdata_regid[0];
1426
} else {
1427
for (uint32_t i = 0; i < ARRAY_SIZE(fragdata_regid); i++)
1428
fragdata_regid[i] = ir3_find_output_regid(fs, FRAG_RESULT_DATA0 + i);
1429
}
1430
1431
tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2);
1432
tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(posz_regid) |
1433
A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(smask_regid) |
1434
A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID(stencilref_regid) |
1435
COND(dual_src_blend, A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE));
1436
tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count));
1437
1438
uint32_t fs_render_components = 0;
1439
1440
tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);
1441
for (uint32_t i = 0; i < ARRAY_SIZE(fragdata_regid); i++) {
1442
// TODO we could have a mix of half and full precision outputs,
1443
// we really need to figure out half-precision from IR3_REG_HALF
1444
tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_REG_REGID(fragdata_regid[i]) |
1445
(false ? A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION : 0));
1446
1447
if (VALIDREG(fragdata_regid[i])) {
1448
fs_render_components |= 0xf << (i * 4);
1449
}
1450
}
1451
1452
/* dual source blending has an extra fs output in the 2nd slot */
1453
if (dual_src_blend) {
1454
fs_render_components |= 0xf << 4;
1455
}
1456
1457
/* There is no point in having component enabled which is not written
1458
* by the shader. Per VK spec it is an UB, however a few apps depend on
1459
* attachment not being changed if FS doesn't have corresponding output.
1460
*/
1461
fs_render_components &= render_components;
1462
1463
tu_cs_emit_regs(cs,
1464
A6XX_SP_FS_RENDER_COMPONENTS(.dword = fs_render_components));
1465
1466
tu_cs_emit_pkt4(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 2);
1467
tu_cs_emit(cs, COND(fs->writes_pos, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z) |
1468
COND(fs->writes_smask, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK) |
1469
COND(fs->writes_stencilref, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_STENCILREF) |
1470
COND(dual_src_blend, A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE));
1471
tu_cs_emit(cs, A6XX_RB_FS_OUTPUT_CNTL1_MRT(mrt_count));
1472
1473
tu_cs_emit_regs(cs,
1474
A6XX_RB_RENDER_COMPONENTS(.dword = fs_render_components));
1475
1476
if (pipeline) {
1477
pipeline->lrz.fs_has_kill = fs->has_kill;
1478
pipeline->lrz.early_fragment_tests = fs->shader->nir->info.fs.early_fragment_tests;
1479
1480
if ((fs->shader && !fs->shader->nir->info.fs.early_fragment_tests) &&
1481
(fs->no_earlyz || fs->has_kill || fs->writes_pos || fs->writes_stencilref || no_earlyz || fs->writes_smask)) {
1482
pipeline->lrz.force_late_z = true;
1483
}
1484
}
1485
}
1486
1487
static void
1488
tu6_emit_geom_tess_consts(struct tu_cs *cs,
1489
const struct ir3_shader_variant *vs,
1490
const struct ir3_shader_variant *hs,
1491
const struct ir3_shader_variant *ds,
1492
const struct ir3_shader_variant *gs,
1493
uint32_t cps_per_patch)
1494
{
1495
uint32_t num_vertices =
1496
hs ? cps_per_patch : gs->shader->nir->info.gs.vertices_in;
1497
1498
uint32_t vs_params[4] = {
1499
vs->output_size * num_vertices * 4, /* vs primitive stride */
1500
vs->output_size * 4, /* vs vertex stride */
1501
0,
1502
0,
1503
};
1504
uint32_t vs_base = ir3_const_state(vs)->offsets.primitive_param;
1505
tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, vs_base, SB6_VS_SHADER, 0,
1506
ARRAY_SIZE(vs_params), vs_params);
1507
1508
if (hs) {
1509
assert(ds->type != MESA_SHADER_NONE);
1510
uint32_t hs_params[4] = {
1511
vs->output_size * num_vertices * 4, /* hs primitive stride */
1512
vs->output_size * 4, /* hs vertex stride */
1513
hs->output_size,
1514
cps_per_patch,
1515
};
1516
1517
uint32_t hs_base = hs->const_state->offsets.primitive_param;
1518
tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, hs_base, SB6_HS_SHADER, 0,
1519
ARRAY_SIZE(hs_params), hs_params);
1520
if (gs)
1521
num_vertices = gs->shader->nir->info.gs.vertices_in;
1522
1523
uint32_t ds_params[4] = {
1524
ds->output_size * num_vertices * 4, /* ds primitive stride */
1525
ds->output_size * 4, /* ds vertex stride */
1526
hs->output_size, /* hs vertex stride (dwords) */
1527
hs->shader->nir->info.tess.tcs_vertices_out
1528
};
1529
1530
uint32_t ds_base = ds->const_state->offsets.primitive_param;
1531
tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, ds_base, SB6_DS_SHADER, 0,
1532
ARRAY_SIZE(ds_params), ds_params);
1533
}
1534
1535
if (gs) {
1536
const struct ir3_shader_variant *prev = ds ? ds : vs;
1537
uint32_t gs_params[4] = {
1538
prev->output_size * num_vertices * 4, /* gs primitive stride */
1539
prev->output_size * 4, /* gs vertex stride */
1540
0,
1541
0,
1542
};
1543
uint32_t gs_base = gs->const_state->offsets.primitive_param;
1544
tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, gs_base, SB6_GS_SHADER, 0,
1545
ARRAY_SIZE(gs_params), gs_params);
1546
}
1547
}
1548
1549
static void
1550
tu6_emit_program_config(struct tu_cs *cs,
1551
struct tu_pipeline_builder *builder)
1552
{
1553
gl_shader_stage stage = MESA_SHADER_VERTEX;
1554
1555
STATIC_ASSERT(MESA_SHADER_VERTEX == 0);
1556
1557
tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
1558
.vs_state = true,
1559
.hs_state = true,
1560
.ds_state = true,
1561
.gs_state = true,
1562
.fs_state = true,
1563
.gfx_ibo = true));
1564
for (; stage < ARRAY_SIZE(builder->shaders); stage++) {
1565
tu6_emit_xs_config(cs, stage, builder->variants[stage]);
1566
}
1567
}
1568
1569
static void
1570
tu6_emit_program(struct tu_cs *cs,
1571
struct tu_pipeline_builder *builder,
1572
bool binning_pass,
1573
struct tu_pipeline *pipeline)
1574
{
1575
const struct ir3_shader_variant *vs = builder->variants[MESA_SHADER_VERTEX];
1576
const struct ir3_shader_variant *bs = builder->binning_variant;
1577
const struct ir3_shader_variant *hs = builder->variants[MESA_SHADER_TESS_CTRL];
1578
const struct ir3_shader_variant *ds = builder->variants[MESA_SHADER_TESS_EVAL];
1579
const struct ir3_shader_variant *gs = builder->variants[MESA_SHADER_GEOMETRY];
1580
const struct ir3_shader_variant *fs = builder->variants[MESA_SHADER_FRAGMENT];
1581
gl_shader_stage stage = MESA_SHADER_VERTEX;
1582
uint32_t cps_per_patch = builder->create_info->pTessellationState ?
1583
builder->create_info->pTessellationState->patchControlPoints : 0;
1584
bool multi_pos_output = builder->shaders[MESA_SHADER_VERTEX]->multi_pos_output;
1585
1586
/* Don't use the binning pass variant when GS is present because we don't
1587
* support compiling correct binning pass variants with GS.
1588
*/
1589
if (binning_pass && !gs) {
1590
vs = bs;
1591
tu6_emit_xs(cs, stage, bs, &builder->pvtmem, builder->binning_vs_iova);
1592
stage++;
1593
}
1594
1595
for (; stage < ARRAY_SIZE(builder->shaders); stage++) {
1596
const struct ir3_shader_variant *xs = builder->variants[stage];
1597
1598
if (stage == MESA_SHADER_FRAGMENT && binning_pass)
1599
fs = xs = NULL;
1600
1601
tu6_emit_xs(cs, stage, xs, &builder->pvtmem, builder->shader_iova[stage]);
1602
}
1603
1604
uint32_t multiview_views = util_logbase2(builder->multiview_mask) + 1;
1605
uint32_t multiview_cntl = builder->multiview_mask ?
1606
A6XX_PC_MULTIVIEW_CNTL_ENABLE |
1607
A6XX_PC_MULTIVIEW_CNTL_VIEWS(multiview_views) |
1608
COND(!multi_pos_output, A6XX_PC_MULTIVIEW_CNTL_DISABLEMULTIPOS)
1609
: 0;
1610
1611
/* Copy what the blob does here. This will emit an extra 0x3f
1612
* CP_EVENT_WRITE when multiview is disabled. I'm not exactly sure what
1613
* this is working around yet.
1614
*/
1615
if (builder->device->physical_device->info->a6xx.has_cp_reg_write) {
1616
tu_cs_emit_pkt7(cs, CP_REG_WRITE, 3);
1617
tu_cs_emit(cs, CP_REG_WRITE_0_TRACKER(UNK_EVENT_WRITE));
1618
tu_cs_emit(cs, REG_A6XX_PC_MULTIVIEW_CNTL);
1619
} else {
1620
tu_cs_emit_pkt4(cs, REG_A6XX_PC_MULTIVIEW_CNTL, 1);
1621
}
1622
tu_cs_emit(cs, multiview_cntl);
1623
1624
tu_cs_emit_pkt4(cs, REG_A6XX_VFD_MULTIVIEW_CNTL, 1);
1625
tu_cs_emit(cs, multiview_cntl);
1626
1627
if (multiview_cntl &&
1628
builder->device->physical_device->info->a6xx.supports_multiview_mask) {
1629
tu_cs_emit_pkt4(cs, REG_A6XX_PC_MULTIVIEW_MASK, 1);
1630
tu_cs_emit(cs, builder->multiview_mask);
1631
}
1632
1633
tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_WAVE_INPUT_SIZE, 1);
1634
tu_cs_emit(cs, 0);
1635
1636
tu6_emit_vpc(cs, vs, hs, ds, gs, fs, cps_per_patch);
1637
tu6_emit_vpc_varying_modes(cs, fs);
1638
1639
bool no_earlyz = builder->depth_attachment_format == VK_FORMAT_S8_UINT;
1640
uint32_t mrt_count = builder->color_attachment_count;
1641
uint32_t render_components = builder->render_components;
1642
1643
if (builder->alpha_to_coverage) {
1644
/* alpha to coverage can behave like a discard */
1645
no_earlyz = true;
1646
/* alpha value comes from first mrt */
1647
render_components |= 0xf;
1648
if (!mrt_count) {
1649
mrt_count = 1;
1650
/* Disable memory write for dummy mrt because it doesn't get set otherwise */
1651
tu_cs_emit_regs(cs, A6XX_RB_MRT_CONTROL(0, .component_enable = 0));
1652
}
1653
}
1654
1655
if (fs) {
1656
tu6_emit_fs_inputs(cs, fs);
1657
tu6_emit_fs_outputs(cs, fs, mrt_count,
1658
builder->use_dual_src_blend,
1659
render_components,
1660
no_earlyz,
1661
pipeline);
1662
} else {
1663
/* TODO: check if these can be skipped if fs is disabled */
1664
struct ir3_shader_variant dummy_variant = {};
1665
tu6_emit_fs_inputs(cs, &dummy_variant);
1666
tu6_emit_fs_outputs(cs, &dummy_variant, mrt_count,
1667
builder->use_dual_src_blend,
1668
render_components,
1669
no_earlyz,
1670
NULL);
1671
}
1672
1673
if (gs || hs) {
1674
tu6_emit_geom_tess_consts(cs, vs, hs, ds, gs, cps_per_patch);
1675
}
1676
}
1677
1678
static void
1679
tu6_emit_vertex_input(struct tu_pipeline *pipeline,
1680
struct tu_cs *cs,
1681
const struct ir3_shader_variant *vs,
1682
const VkPipelineVertexInputStateCreateInfo *info)
1683
{
1684
uint32_t vfd_decode_idx = 0;
1685
uint32_t binding_instanced = 0; /* bitmask of instanced bindings */
1686
uint32_t step_rate[MAX_VBS];
1687
1688
for (uint32_t i = 0; i < info->vertexBindingDescriptionCount; i++) {
1689
const VkVertexInputBindingDescription *binding =
1690
&info->pVertexBindingDescriptions[i];
1691
1692
if (!(pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_VB_STRIDE))) {
1693
tu_cs_emit_regs(cs,
1694
A6XX_VFD_FETCH_STRIDE(binding->binding, binding->stride));
1695
}
1696
1697
if (binding->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE)
1698
binding_instanced |= 1 << binding->binding;
1699
1700
step_rate[binding->binding] = 1;
1701
}
1702
1703
const VkPipelineVertexInputDivisorStateCreateInfoEXT *div_state =
1704
vk_find_struct_const(info->pNext, PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
1705
if (div_state) {
1706
for (uint32_t i = 0; i < div_state->vertexBindingDivisorCount; i++) {
1707
const VkVertexInputBindingDivisorDescriptionEXT *desc =
1708
&div_state->pVertexBindingDivisors[i];
1709
step_rate[desc->binding] = desc->divisor;
1710
}
1711
}
1712
1713
/* TODO: emit all VFD_DECODE/VFD_DEST_CNTL in same (two) pkt4 */
1714
1715
for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
1716
const VkVertexInputAttributeDescription *attr =
1717
&info->pVertexAttributeDescriptions[i];
1718
uint32_t input_idx;
1719
1720
for (input_idx = 0; input_idx < vs->inputs_count; input_idx++) {
1721
if ((vs->inputs[input_idx].slot - VERT_ATTRIB_GENERIC0) == attr->location)
1722
break;
1723
}
1724
1725
/* attribute not used, skip it */
1726
if (input_idx == vs->inputs_count)
1727
continue;
1728
1729
const struct tu_native_format format = tu6_format_vtx(attr->format);
1730
tu_cs_emit_regs(cs,
1731
A6XX_VFD_DECODE_INSTR(vfd_decode_idx,
1732
.idx = attr->binding,
1733
.offset = attr->offset,
1734
.instanced = binding_instanced & (1 << attr->binding),
1735
.format = format.fmt,
1736
.swap = format.swap,
1737
.unk30 = 1,
1738
._float = !vk_format_is_int(attr->format)),
1739
A6XX_VFD_DECODE_STEP_RATE(vfd_decode_idx, step_rate[attr->binding]));
1740
1741
tu_cs_emit_regs(cs,
1742
A6XX_VFD_DEST_CNTL_INSTR(vfd_decode_idx,
1743
.writemask = vs->inputs[input_idx].compmask,
1744
.regid = vs->inputs[input_idx].regid));
1745
1746
vfd_decode_idx++;
1747
}
1748
1749
tu_cs_emit_regs(cs,
1750
A6XX_VFD_CONTROL_0(
1751
.fetch_cnt = vfd_decode_idx, /* decode_cnt for binning pass ? */
1752
.decode_cnt = vfd_decode_idx));
1753
}
1754
1755
void
1756
tu6_emit_viewport(struct tu_cs *cs, const VkViewport *viewports, uint32_t num_viewport)
1757
{
1758
VkExtent2D guardband = {511, 511};
1759
1760
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CL_VPORT_XOFFSET(0), num_viewport * 6);
1761
for (uint32_t i = 0; i < num_viewport; i++) {
1762
const VkViewport *viewport = &viewports[i];
1763
float offsets[3];
1764
float scales[3];
1765
scales[0] = viewport->width / 2.0f;
1766
scales[1] = viewport->height / 2.0f;
1767
scales[2] = viewport->maxDepth - viewport->minDepth;
1768
offsets[0] = viewport->x + scales[0];
1769
offsets[1] = viewport->y + scales[1];
1770
offsets[2] = viewport->minDepth;
1771
for (uint32_t j = 0; j < 3; j++) {
1772
tu_cs_emit(cs, fui(offsets[j]));
1773
tu_cs_emit(cs, fui(scales[j]));
1774
}
1775
1776
guardband.width =
1777
MIN2(guardband.width, fd_calc_guardband(offsets[0], scales[0], false));
1778
guardband.height =
1779
MIN2(guardband.height, fd_calc_guardband(offsets[1], scales[1], false));
1780
}
1781
1782
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(0), num_viewport * 2);
1783
for (uint32_t i = 0; i < num_viewport; i++) {
1784
const VkViewport *viewport = &viewports[i];
1785
VkOffset2D min;
1786
VkOffset2D max;
1787
min.x = (int32_t) viewport->x;
1788
max.x = (int32_t) ceilf(viewport->x + viewport->width);
1789
if (viewport->height >= 0.0f) {
1790
min.y = (int32_t) viewport->y;
1791
max.y = (int32_t) ceilf(viewport->y + viewport->height);
1792
} else {
1793
min.y = (int32_t)(viewport->y + viewport->height);
1794
max.y = (int32_t) ceilf(viewport->y);
1795
}
1796
/* the spec allows viewport->height to be 0.0f */
1797
if (min.y == max.y)
1798
max.y++;
1799
/* allow viewport->width = 0.0f for un-initialized viewports: */
1800
if (min.x == max.x)
1801
max.x++;
1802
1803
min.x = MAX2(min.x, 0);
1804
min.y = MAX2(min.y, 0);
1805
1806
assert(min.x < max.x);
1807
assert(min.y < max.y);
1808
tu_cs_emit(cs, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(min.x) |
1809
A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(min.y));
1810
tu_cs_emit(cs, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(max.x - 1) |
1811
A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(max.y - 1));
1812
}
1813
1814
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CL_Z_CLAMP(0), num_viewport * 2);
1815
for (uint32_t i = 0; i < num_viewport; i++) {
1816
const VkViewport *viewport = &viewports[i];
1817
tu_cs_emit(cs, fui(MIN2(viewport->minDepth, viewport->maxDepth)));
1818
tu_cs_emit(cs, fui(MAX2(viewport->minDepth, viewport->maxDepth)));
1819
}
1820
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ, 1);
1821
tu_cs_emit(cs, A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(guardband.width) |
1822
A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(guardband.height));
1823
1824
/* TODO: what to do about this and multi viewport ? */
1825
float z_clamp_min = num_viewport ? MIN2(viewports[0].minDepth, viewports[0].maxDepth) : 0;
1826
float z_clamp_max = num_viewport ? MAX2(viewports[0].minDepth, viewports[0].maxDepth) : 0;
1827
1828
tu_cs_emit_regs(cs,
1829
A6XX_RB_Z_CLAMP_MIN(z_clamp_min),
1830
A6XX_RB_Z_CLAMP_MAX(z_clamp_max));
1831
}
1832
1833
void
1834
tu6_emit_scissor(struct tu_cs *cs, const VkRect2D *scissors, uint32_t scissor_count)
1835
{
1836
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL(0), scissor_count * 2);
1837
1838
for (uint32_t i = 0; i < scissor_count; i++) {
1839
const VkRect2D *scissor = &scissors[i];
1840
1841
uint32_t min_x = scissor->offset.x;
1842
uint32_t min_y = scissor->offset.y;
1843
uint32_t max_x = min_x + scissor->extent.width - 1;
1844
uint32_t max_y = min_y + scissor->extent.height - 1;
1845
1846
if (!scissor->extent.width || !scissor->extent.height) {
1847
min_x = min_y = 1;
1848
max_x = max_y = 0;
1849
} else {
1850
/* avoid overflow */
1851
uint32_t scissor_max = BITFIELD_MASK(15);
1852
min_x = MIN2(scissor_max, min_x);
1853
min_y = MIN2(scissor_max, min_y);
1854
max_x = MIN2(scissor_max, max_x);
1855
max_y = MIN2(scissor_max, max_y);
1856
}
1857
1858
tu_cs_emit(cs, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X(min_x) |
1859
A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(min_y));
1860
tu_cs_emit(cs, A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X(max_x) |
1861
A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(max_y));
1862
}
1863
}
1864
1865
void
1866
tu6_emit_sample_locations(struct tu_cs *cs, const VkSampleLocationsInfoEXT *samp_loc)
1867
{
1868
if (!samp_loc) {
1869
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CONFIG, 1);
1870
tu_cs_emit(cs, 0);
1871
1872
tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CONFIG, 1);
1873
tu_cs_emit(cs, 0);
1874
1875
tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_SAMPLE_CONFIG, 1);
1876
tu_cs_emit(cs, 0);
1877
return;
1878
}
1879
1880
assert(samp_loc->sampleLocationsPerPixel == samp_loc->sampleLocationsCount);
1881
assert(samp_loc->sampleLocationGridSize.width == 1);
1882
assert(samp_loc->sampleLocationGridSize.height == 1);
1883
1884
uint32_t sample_config =
1885
A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE;
1886
uint32_t sample_locations = 0;
1887
for (uint32_t i = 0; i < samp_loc->sampleLocationsCount; i++) {
1888
sample_locations |=
1889
(A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(samp_loc->pSampleLocations[i].x) |
1890
A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(samp_loc->pSampleLocations[i].y)) << i*8;
1891
}
1892
1893
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CONFIG, 2);
1894
tu_cs_emit(cs, sample_config);
1895
tu_cs_emit(cs, sample_locations);
1896
1897
tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CONFIG, 2);
1898
tu_cs_emit(cs, sample_config);
1899
tu_cs_emit(cs, sample_locations);
1900
1901
tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_SAMPLE_CONFIG, 2);
1902
tu_cs_emit(cs, sample_config);
1903
tu_cs_emit(cs, sample_locations);
1904
}
1905
1906
static uint32_t
1907
tu6_gras_su_cntl(const VkPipelineRasterizationStateCreateInfo *rast_info,
1908
VkSampleCountFlagBits samples,
1909
bool multiview)
1910
{
1911
uint32_t gras_su_cntl = 0;
1912
1913
if (rast_info->cullMode & VK_CULL_MODE_FRONT_BIT)
1914
gras_su_cntl |= A6XX_GRAS_SU_CNTL_CULL_FRONT;
1915
if (rast_info->cullMode & VK_CULL_MODE_BACK_BIT)
1916
gras_su_cntl |= A6XX_GRAS_SU_CNTL_CULL_BACK;
1917
1918
if (rast_info->frontFace == VK_FRONT_FACE_CLOCKWISE)
1919
gras_su_cntl |= A6XX_GRAS_SU_CNTL_FRONT_CW;
1920
1921
gras_su_cntl |=
1922
A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(rast_info->lineWidth / 2.0f);
1923
1924
if (rast_info->depthBiasEnable)
1925
gras_su_cntl |= A6XX_GRAS_SU_CNTL_POLY_OFFSET;
1926
1927
if (samples > VK_SAMPLE_COUNT_1_BIT)
1928
gras_su_cntl |= A6XX_GRAS_SU_CNTL_MSAA_ENABLE;
1929
1930
if (multiview) {
1931
gras_su_cntl |=
1932
A6XX_GRAS_SU_CNTL_UNK17 |
1933
A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE;
1934
}
1935
1936
return gras_su_cntl;
1937
}
1938
1939
void
1940
tu6_emit_depth_bias(struct tu_cs *cs,
1941
float constant_factor,
1942
float clamp,
1943
float slope_factor)
1944
{
1945
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE, 3);
1946
tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_SCALE(slope_factor).value);
1947
tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_OFFSET(constant_factor).value);
1948
tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(clamp).value);
1949
}
1950
1951
static uint32_t
1952
tu6_rb_mrt_blend_control(const VkPipelineColorBlendAttachmentState *att,
1953
bool has_alpha)
1954
{
1955
const enum a3xx_rb_blend_opcode color_op = tu6_blend_op(att->colorBlendOp);
1956
const enum adreno_rb_blend_factor src_color_factor = tu6_blend_factor(
1957
has_alpha ? att->srcColorBlendFactor
1958
: tu_blend_factor_no_dst_alpha(att->srcColorBlendFactor));
1959
const enum adreno_rb_blend_factor dst_color_factor = tu6_blend_factor(
1960
has_alpha ? att->dstColorBlendFactor
1961
: tu_blend_factor_no_dst_alpha(att->dstColorBlendFactor));
1962
const enum a3xx_rb_blend_opcode alpha_op = tu6_blend_op(att->alphaBlendOp);
1963
const enum adreno_rb_blend_factor src_alpha_factor =
1964
tu6_blend_factor(att->srcAlphaBlendFactor);
1965
const enum adreno_rb_blend_factor dst_alpha_factor =
1966
tu6_blend_factor(att->dstAlphaBlendFactor);
1967
1968
return A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(src_color_factor) |
1969
A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(color_op) |
1970
A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(dst_color_factor) |
1971
A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(src_alpha_factor) |
1972
A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(alpha_op) |
1973
A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(dst_alpha_factor);
1974
}
1975
1976
static uint32_t
1977
tu6_rb_mrt_control(const VkPipelineColorBlendAttachmentState *att,
1978
uint32_t rb_mrt_control_rop,
1979
bool has_alpha)
1980
{
1981
uint32_t rb_mrt_control =
1982
A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(att->colorWriteMask);
1983
1984
rb_mrt_control |= rb_mrt_control_rop;
1985
1986
if (att->blendEnable) {
1987
rb_mrt_control |= A6XX_RB_MRT_CONTROL_BLEND;
1988
1989
if (has_alpha)
1990
rb_mrt_control |= A6XX_RB_MRT_CONTROL_BLEND2;
1991
}
1992
1993
return rb_mrt_control;
1994
}
1995
1996
static void
1997
tu6_emit_rb_mrt_controls(struct tu_cs *cs,
1998
const VkPipelineColorBlendStateCreateInfo *blend_info,
1999
const VkFormat attachment_formats[MAX_RTS],
2000
uint32_t *blend_enable_mask)
2001
{
2002
*blend_enable_mask = 0;
2003
2004
bool rop_reads_dst = false;
2005
uint32_t rb_mrt_control_rop = 0;
2006
if (blend_info->logicOpEnable) {
2007
rop_reads_dst = tu_logic_op_reads_dst(blend_info->logicOp);
2008
rb_mrt_control_rop =
2009
A6XX_RB_MRT_CONTROL_ROP_ENABLE |
2010
A6XX_RB_MRT_CONTROL_ROP_CODE(tu6_rop(blend_info->logicOp));
2011
}
2012
2013
for (uint32_t i = 0; i < blend_info->attachmentCount; i++) {
2014
const VkPipelineColorBlendAttachmentState *att =
2015
&blend_info->pAttachments[i];
2016
const VkFormat format = attachment_formats[i];
2017
2018
uint32_t rb_mrt_control = 0;
2019
uint32_t rb_mrt_blend_control = 0;
2020
if (format != VK_FORMAT_UNDEFINED) {
2021
const bool has_alpha = vk_format_has_alpha(format);
2022
2023
rb_mrt_control =
2024
tu6_rb_mrt_control(att, rb_mrt_control_rop, has_alpha);
2025
rb_mrt_blend_control = tu6_rb_mrt_blend_control(att, has_alpha);
2026
2027
if (att->blendEnable || rop_reads_dst)
2028
*blend_enable_mask |= 1 << i;
2029
}
2030
2031
tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_CONTROL(i), 2);
2032
tu_cs_emit(cs, rb_mrt_control);
2033
tu_cs_emit(cs, rb_mrt_blend_control);
2034
}
2035
}
2036
2037
static void
2038
tu6_emit_blend_control(struct tu_cs *cs,
2039
uint32_t blend_enable_mask,
2040
bool dual_src_blend,
2041
const VkPipelineMultisampleStateCreateInfo *msaa_info)
2042
{
2043
const uint32_t sample_mask =
2044
msaa_info->pSampleMask ? (*msaa_info->pSampleMask & 0xffff)
2045
: ((1 << msaa_info->rasterizationSamples) - 1);
2046
2047
tu_cs_emit_regs(cs,
2048
A6XX_SP_BLEND_CNTL(.enable_blend = blend_enable_mask,
2049
.dual_color_in_enable = dual_src_blend,
2050
.alpha_to_coverage = msaa_info->alphaToCoverageEnable,
2051
.unk8 = true));
2052
2053
/* set A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND only when enabled? */
2054
tu_cs_emit_regs(cs,
2055
A6XX_RB_BLEND_CNTL(.enable_blend = blend_enable_mask,
2056
.independent_blend = true,
2057
.sample_mask = sample_mask,
2058
.dual_color_in_enable = dual_src_blend,
2059
.alpha_to_coverage = msaa_info->alphaToCoverageEnable,
2060
.alpha_to_one = msaa_info->alphaToOneEnable));
2061
}
2062
2063
static uint32_t
2064
calc_pvtmem_size(struct tu_device *dev, struct tu_pvtmem_config *config,
2065
uint32_t pvtmem_bytes)
2066
{
2067
uint32_t per_fiber_size = ALIGN(pvtmem_bytes, 512);
2068
uint32_t per_sp_size =
2069
ALIGN(per_fiber_size * dev->physical_device->info->a6xx.fibers_per_sp, 1 << 12);
2070
2071
if (config) {
2072
config->per_fiber_size = per_fiber_size;
2073
config->per_sp_size = per_sp_size;
2074
}
2075
2076
return dev->physical_device->info->num_sp_cores * per_sp_size;
2077
}
2078
2079
static VkResult
2080
tu_setup_pvtmem(struct tu_device *dev,
2081
struct tu_pipeline *pipeline,
2082
struct tu_pvtmem_config *config,
2083
uint32_t pvtmem_bytes, bool per_wave)
2084
{
2085
if (!pvtmem_bytes) {
2086
memset(config, 0, sizeof(*config));
2087
return VK_SUCCESS;
2088
}
2089
2090
uint32_t total_size = calc_pvtmem_size(dev, config, pvtmem_bytes);
2091
config->per_wave = per_wave;
2092
2093
VkResult result =
2094
tu_bo_init_new(dev, &pipeline->pvtmem_bo, total_size,
2095
TU_BO_ALLOC_NO_FLAGS);
2096
if (result != VK_SUCCESS)
2097
return result;
2098
2099
config->iova = pipeline->pvtmem_bo.iova;
2100
2101
return result;
2102
}
2103
2104
2105
static VkResult
2106
tu_pipeline_allocate_cs(struct tu_device *dev,
2107
struct tu_pipeline *pipeline,
2108
struct tu_pipeline_builder *builder,
2109
struct ir3_shader_variant *compute)
2110
{
2111
uint32_t size = 2048 + tu6_load_state_size(pipeline, compute);
2112
2113
/* graphics case: */
2114
if (builder) {
2115
uint32_t pvtmem_bytes = 0;
2116
for (uint32_t i = 0; i < ARRAY_SIZE(builder->variants); i++) {
2117
if (builder->variants[i]) {
2118
size += builder->variants[i]->info.size / 4;
2119
pvtmem_bytes = MAX2(pvtmem_bytes, builder->variants[i]->pvtmem_size);
2120
}
2121
}
2122
2123
size += builder->binning_variant->info.size / 4;
2124
pvtmem_bytes = MAX2(pvtmem_bytes, builder->binning_variant->pvtmem_size);
2125
2126
size += calc_pvtmem_size(dev, NULL, pvtmem_bytes) / 4;
2127
} else {
2128
size += compute->info.size / 4;
2129
size += calc_pvtmem_size(dev, NULL, compute->pvtmem_size) / 4;
2130
}
2131
2132
tu_cs_init(&pipeline->cs, dev, TU_CS_MODE_SUB_STREAM, size);
2133
2134
/* Reserve the space now such that tu_cs_begin_sub_stream never fails. Note
2135
* that LOAD_STATE can potentially take up a large amount of space so we
2136
* calculate its size explicitly.
2137
*/
2138
return tu_cs_reserve_space(&pipeline->cs, size);
2139
}
2140
2141
static void
2142
tu_pipeline_shader_key_init(struct ir3_shader_key *key,
2143
const VkGraphicsPipelineCreateInfo *pipeline_info)
2144
{
2145
for (uint32_t i = 0; i < pipeline_info->stageCount; i++) {
2146
if (pipeline_info->pStages[i].stage == VK_SHADER_STAGE_GEOMETRY_BIT) {
2147
key->has_gs = true;
2148
break;
2149
}
2150
}
2151
2152
if (pipeline_info->pRasterizationState->rasterizerDiscardEnable)
2153
return;
2154
2155
const VkPipelineMultisampleStateCreateInfo *msaa_info = pipeline_info->pMultisampleState;
2156
const struct VkPipelineSampleLocationsStateCreateInfoEXT *sample_locations =
2157
vk_find_struct_const(msaa_info->pNext, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT);
2158
if (msaa_info->rasterizationSamples > 1 ||
2159
/* also set msaa key when sample location is not the default
2160
* since this affects varying interpolation */
2161
(sample_locations && sample_locations->sampleLocationsEnable)) {
2162
key->msaa = true;
2163
}
2164
2165
/* note: not actually used by ir3, just checked in tu6_emit_fs_inputs */
2166
if (msaa_info->sampleShadingEnable)
2167
key->sample_shading = true;
2168
2169
/* We set this after we compile to NIR because we need the prim mode */
2170
key->tessellation = IR3_TESS_NONE;
2171
}
2172
2173
static uint32_t
2174
tu6_get_tessmode(struct tu_shader* shader)
2175
{
2176
uint32_t primitive_mode = shader->ir3_shader->nir->info.tess.primitive_mode;
2177
switch (primitive_mode) {
2178
case GL_ISOLINES:
2179
return IR3_TESS_ISOLINES;
2180
case GL_TRIANGLES:
2181
return IR3_TESS_TRIANGLES;
2182
case GL_QUADS:
2183
return IR3_TESS_QUADS;
2184
case GL_NONE:
2185
return IR3_TESS_NONE;
2186
default:
2187
unreachable("bad tessmode");
2188
}
2189
}
2190
2191
static uint64_t
2192
tu_upload_variant(struct tu_pipeline *pipeline,
2193
const struct ir3_shader_variant *variant)
2194
{
2195
struct tu_cs_memory memory;
2196
2197
if (!variant)
2198
return 0;
2199
2200
/* this expects to get enough alignment because shaders are allocated first
2201
* and total size is always aligned correctly
2202
* note: an assert in tu6_emit_xs_config validates the alignment
2203
*/
2204
tu_cs_alloc(&pipeline->cs, variant->info.size / 4, 1, &memory);
2205
2206
memcpy(memory.map, variant->bin, variant->info.size);
2207
return memory.iova;
2208
}
2209
2210
static void
2211
tu_append_executable(struct tu_pipeline *pipeline, struct ir3_shader_variant *variant,
2212
char *nir_from_spirv)
2213
{
2214
ralloc_steal(pipeline->executables_mem_ctx, variant->disasm_info.nir);
2215
ralloc_steal(pipeline->executables_mem_ctx, variant->disasm_info.disasm);
2216
2217
struct tu_pipeline_executable exe = {
2218
.stage = variant->shader->type,
2219
.nir_from_spirv = nir_from_spirv,
2220
.nir_final = variant->disasm_info.nir,
2221
.disasm = variant->disasm_info.disasm,
2222
.stats = variant->info,
2223
.is_binning = variant->binning_pass,
2224
};
2225
2226
util_dynarray_append(&pipeline->executables, struct tu_pipeline_executable, exe);
2227
}
2228
2229
static VkResult
2230
tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
2231
struct tu_pipeline *pipeline)
2232
{
2233
const struct ir3_compiler *compiler = builder->device->compiler;
2234
const VkPipelineShaderStageCreateInfo *stage_infos[MESA_SHADER_STAGES] = {
2235
NULL
2236
};
2237
for (uint32_t i = 0; i < builder->create_info->stageCount; i++) {
2238
gl_shader_stage stage =
2239
vk_to_mesa_shader_stage(builder->create_info->pStages[i].stage);
2240
stage_infos[stage] = &builder->create_info->pStages[i];
2241
}
2242
2243
struct ir3_shader_key key = {};
2244
tu_pipeline_shader_key_init(&key, builder->create_info);
2245
2246
nir_shader *nir[ARRAY_SIZE(builder->shaders)] = { NULL };
2247
2248
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
2249
stage < ARRAY_SIZE(nir); stage++) {
2250
const VkPipelineShaderStageCreateInfo *stage_info = stage_infos[stage];
2251
if (!stage_info)
2252
continue;
2253
2254
nir[stage] = tu_spirv_to_nir(builder->device, stage_info, stage);
2255
if (!nir[stage])
2256
return VK_ERROR_OUT_OF_HOST_MEMORY;
2257
}
2258
2259
if (!nir[MESA_SHADER_FRAGMENT]) {
2260
const nir_shader_compiler_options *nir_options =
2261
ir3_get_compiler_options(builder->device->compiler);
2262
nir_builder fs_b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT,
2263
nir_options,
2264
"noop_fs");
2265
nir[MESA_SHADER_FRAGMENT] = fs_b.shader;
2266
}
2267
2268
const bool executable_info = builder->create_info->flags &
2269
VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR;
2270
2271
char *nir_initial_disasm[ARRAY_SIZE(builder->shaders)] = { NULL };
2272
2273
if (executable_info) {
2274
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
2275
stage < ARRAY_SIZE(nir); stage++) {
2276
if (!nir[stage])
2277
continue;
2278
2279
nir_initial_disasm[stage] =
2280
nir_shader_as_str(nir[stage], pipeline->executables_mem_ctx);
2281
}
2282
}
2283
2284
/* TODO do intra-stage linking here */
2285
2286
uint32_t desc_sets = 0;
2287
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
2288
stage < ARRAY_SIZE(nir); stage++) {
2289
if (!nir[stage])
2290
continue;
2291
2292
struct tu_shader *shader =
2293
tu_shader_create(builder->device, nir[stage],
2294
builder->multiview_mask, builder->layout,
2295
builder->alloc);
2296
if (!shader)
2297
return VK_ERROR_OUT_OF_HOST_MEMORY;
2298
2299
/* In SPIR-V generated from GLSL, the primitive mode is specified in the
2300
* tessellation evaluation shader, but in SPIR-V generated from HLSL,
2301
* the mode is specified in the tessellation control shader. */
2302
if ((stage == MESA_SHADER_TESS_EVAL || stage == MESA_SHADER_TESS_CTRL) &&
2303
key.tessellation == IR3_TESS_NONE) {
2304
key.tessellation = tu6_get_tessmode(shader);
2305
}
2306
2307
/* Keep track of the status of each shader's active descriptor sets,
2308
* which is set in tu_lower_io. */
2309
desc_sets |= shader->active_desc_sets;
2310
2311
builder->shaders[stage] = shader;
2312
}
2313
pipeline->active_desc_sets = desc_sets;
2314
2315
struct tu_shader *last_shader = builder->shaders[MESA_SHADER_GEOMETRY];
2316
if (!last_shader)
2317
last_shader = builder->shaders[MESA_SHADER_TESS_EVAL];
2318
if (!last_shader)
2319
last_shader = builder->shaders[MESA_SHADER_VERTEX];
2320
2321
uint64_t outputs_written = last_shader->ir3_shader->nir->info.outputs_written;
2322
2323
key.layer_zero = !(outputs_written & VARYING_BIT_LAYER);
2324
key.view_zero = !(outputs_written & VARYING_BIT_VIEWPORT);
2325
2326
pipeline->tess.patch_type = key.tessellation;
2327
2328
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
2329
stage < ARRAY_SIZE(builder->shaders); stage++) {
2330
if (!builder->shaders[stage])
2331
continue;
2332
2333
bool created;
2334
builder->variants[stage] =
2335
ir3_shader_get_variant(builder->shaders[stage]->ir3_shader,
2336
&key, false, executable_info, &created);
2337
if (!builder->variants[stage])
2338
return VK_ERROR_OUT_OF_HOST_MEMORY;
2339
}
2340
2341
uint32_t safe_constlens = ir3_trim_constlen(builder->variants, compiler);
2342
2343
key.safe_constlen = true;
2344
2345
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
2346
stage < ARRAY_SIZE(builder->shaders); stage++) {
2347
if (!builder->shaders[stage])
2348
continue;
2349
2350
if (safe_constlens & (1 << stage)) {
2351
bool created;
2352
builder->variants[stage] =
2353
ir3_shader_get_variant(builder->shaders[stage]->ir3_shader,
2354
&key, false, executable_info, &created);
2355
if (!builder->variants[stage])
2356
return VK_ERROR_OUT_OF_HOST_MEMORY;
2357
}
2358
}
2359
2360
const struct tu_shader *vs = builder->shaders[MESA_SHADER_VERTEX];
2361
struct ir3_shader_variant *variant;
2362
2363
if (vs->ir3_shader->stream_output.num_outputs ||
2364
!ir3_has_binning_vs(&key)) {
2365
variant = builder->variants[MESA_SHADER_VERTEX];
2366
} else {
2367
bool created;
2368
key.safe_constlen = !!(safe_constlens & (1 << MESA_SHADER_VERTEX));
2369
variant = ir3_shader_get_variant(vs->ir3_shader, &key,
2370
true, executable_info, &created);
2371
if (!variant)
2372
return VK_ERROR_OUT_OF_HOST_MEMORY;
2373
}
2374
2375
builder->binning_variant = variant;
2376
2377
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
2378
stage < ARRAY_SIZE(nir); stage++) {
2379
if (builder->variants[stage]) {
2380
tu_append_executable(pipeline, builder->variants[stage],
2381
nir_initial_disasm[stage]);
2382
}
2383
}
2384
2385
if (builder->binning_variant != builder->variants[MESA_SHADER_VERTEX]) {
2386
tu_append_executable(pipeline, builder->binning_variant, NULL);
2387
}
2388
2389
return VK_SUCCESS;
2390
}
2391
2392
static void
2393
tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder *builder,
2394
struct tu_pipeline *pipeline)
2395
{
2396
const VkPipelineDynamicStateCreateInfo *dynamic_info =
2397
builder->create_info->pDynamicState;
2398
2399
pipeline->gras_su_cntl_mask = ~0u;
2400
pipeline->rb_depth_cntl_mask = ~0u;
2401
pipeline->rb_stencil_cntl_mask = ~0u;
2402
2403
if (!dynamic_info)
2404
return;
2405
2406
for (uint32_t i = 0; i < dynamic_info->dynamicStateCount; i++) {
2407
VkDynamicState state = dynamic_info->pDynamicStates[i];
2408
switch (state) {
2409
case VK_DYNAMIC_STATE_VIEWPORT ... VK_DYNAMIC_STATE_STENCIL_REFERENCE:
2410
if (state == VK_DYNAMIC_STATE_LINE_WIDTH)
2411
pipeline->gras_su_cntl_mask &= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
2412
pipeline->dynamic_state_mask |= BIT(state);
2413
break;
2414
case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT:
2415
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_SAMPLE_LOCATIONS);
2416
break;
2417
case VK_DYNAMIC_STATE_CULL_MODE_EXT:
2418
pipeline->gras_su_cntl_mask &=
2419
~(A6XX_GRAS_SU_CNTL_CULL_BACK | A6XX_GRAS_SU_CNTL_CULL_FRONT);
2420
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_GRAS_SU_CNTL);
2421
break;
2422
case VK_DYNAMIC_STATE_FRONT_FACE_EXT:
2423
pipeline->gras_su_cntl_mask &= ~A6XX_GRAS_SU_CNTL_FRONT_CW;
2424
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_GRAS_SU_CNTL);
2425
break;
2426
case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT:
2427
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY);
2428
break;
2429
case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT:
2430
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_VB_STRIDE);
2431
break;
2432
case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT:
2433
pipeline->dynamic_state_mask |= BIT(VK_DYNAMIC_STATE_VIEWPORT);
2434
break;
2435
case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT:
2436
pipeline->dynamic_state_mask |= BIT(VK_DYNAMIC_STATE_SCISSOR);
2437
break;
2438
case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT:
2439
pipeline->rb_depth_cntl_mask &=
2440
~(A6XX_RB_DEPTH_CNTL_Z_ENABLE | A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE);
2441
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);
2442
break;
2443
case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT:
2444
pipeline->rb_depth_cntl_mask &= ~A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE;
2445
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);
2446
break;
2447
case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT:
2448
pipeline->rb_depth_cntl_mask &= ~A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
2449
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);
2450
break;
2451
case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT:
2452
pipeline->rb_depth_cntl_mask &=
2453
~(A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE | A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE);
2454
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL);
2455
break;
2456
case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT:
2457
pipeline->rb_stencil_cntl_mask &= ~(A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE |
2458
A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF |
2459
A6XX_RB_STENCIL_CONTROL_STENCIL_READ);
2460
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_STENCIL_CNTL);
2461
break;
2462
case VK_DYNAMIC_STATE_STENCIL_OP_EXT:
2463
pipeline->rb_stencil_cntl_mask &= ~(A6XX_RB_STENCIL_CONTROL_FUNC__MASK |
2464
A6XX_RB_STENCIL_CONTROL_FAIL__MASK |
2465
A6XX_RB_STENCIL_CONTROL_ZPASS__MASK |
2466
A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK |
2467
A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK |
2468
A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK |
2469
A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK |
2470
A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK);
2471
pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_RB_STENCIL_CNTL);
2472
break;
2473
default:
2474
assert(!"unsupported dynamic state");
2475
break;
2476
}
2477
}
2478
}
2479
2480
static void
2481
tu_pipeline_set_linkage(struct tu_program_descriptor_linkage *link,
2482
struct tu_shader *shader,
2483
struct ir3_shader_variant *v)
2484
{
2485
link->const_state = *ir3_const_state(v);
2486
link->constlen = v->constlen;
2487
link->push_consts = shader->push_consts;
2488
}
2489
2490
static void
2491
tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder *builder,
2492
struct tu_pipeline *pipeline)
2493
{
2494
struct tu_cs prog_cs;
2495
2496
/* Emit HLSQ_xS_CNTL/HLSQ_SP_xS_CONFIG *first*, before emitting anything
2497
* else that could depend on that state (like push constants)
2498
*
2499
* Note also that this always uses the full VS even in binning pass. The
2500
* binning pass variant has the same const layout as the full VS, and
2501
* the constlen for the VS will be the same or greater than the constlen
2502
* for the binning pass variant. It is required that the constlen state
2503
* matches between binning and draw passes, as some parts of the push
2504
* consts are emitted in state groups that are shared between the binning
2505
* and draw passes.
2506
*/
2507
tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
2508
tu6_emit_program_config(&prog_cs, builder);
2509
pipeline->program.config_state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);
2510
2511
tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
2512
tu6_emit_program(&prog_cs, builder, false, pipeline);
2513
pipeline->program.state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);
2514
2515
tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
2516
tu6_emit_program(&prog_cs, builder, true, pipeline);
2517
pipeline->program.binning_state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);
2518
2519
VkShaderStageFlags stages = 0;
2520
for (unsigned i = 0; i < builder->create_info->stageCount; i++) {
2521
stages |= builder->create_info->pStages[i].stage;
2522
}
2523
pipeline->active_stages = stages;
2524
2525
for (unsigned i = 0; i < ARRAY_SIZE(builder->shaders); i++) {
2526
if (!builder->shaders[i])
2527
continue;
2528
2529
tu_pipeline_set_linkage(&pipeline->program.link[i],
2530
builder->shaders[i],
2531
builder->variants[i]);
2532
}
2533
}
2534
2535
static void
2536
tu_pipeline_builder_parse_vertex_input(struct tu_pipeline_builder *builder,
2537
struct tu_pipeline *pipeline)
2538
{
2539
const VkPipelineVertexInputStateCreateInfo *vi_info =
2540
builder->create_info->pVertexInputState;
2541
const struct ir3_shader_variant *vs = builder->variants[MESA_SHADER_VERTEX];
2542
const struct ir3_shader_variant *bs = builder->binning_variant;
2543
2544
pipeline->num_vbs = vi_info->vertexBindingDescriptionCount;
2545
2546
struct tu_cs vi_cs;
2547
tu_cs_begin_sub_stream(&pipeline->cs,
2548
MAX_VERTEX_ATTRIBS * 7 + 2, &vi_cs);
2549
tu6_emit_vertex_input(pipeline, &vi_cs, vs, vi_info);
2550
pipeline->vi.state = tu_cs_end_draw_state(&pipeline->cs, &vi_cs);
2551
2552
if (bs) {
2553
tu_cs_begin_sub_stream(&pipeline->cs,
2554
MAX_VERTEX_ATTRIBS * 7 + 2, &vi_cs);
2555
tu6_emit_vertex_input(pipeline, &vi_cs, bs, vi_info);
2556
pipeline->vi.binning_state =
2557
tu_cs_end_draw_state(&pipeline->cs, &vi_cs);
2558
}
2559
}
2560
2561
static void
2562
tu_pipeline_builder_parse_input_assembly(struct tu_pipeline_builder *builder,
2563
struct tu_pipeline *pipeline)
2564
{
2565
const VkPipelineInputAssemblyStateCreateInfo *ia_info =
2566
builder->create_info->pInputAssemblyState;
2567
2568
pipeline->ia.primtype = tu6_primtype(ia_info->topology);
2569
pipeline->ia.primitive_restart = ia_info->primitiveRestartEnable;
2570
}
2571
2572
static bool
2573
tu_pipeline_static_state(struct tu_pipeline *pipeline, struct tu_cs *cs,
2574
uint32_t id, uint32_t size)
2575
{
2576
assert(id < ARRAY_SIZE(pipeline->dynamic_state));
2577
2578
if (pipeline->dynamic_state_mask & BIT(id))
2579
return false;
2580
2581
pipeline->dynamic_state[id] = tu_cs_draw_state(&pipeline->cs, cs, size);
2582
return true;
2583
}
2584
2585
static void
2586
tu_pipeline_builder_parse_tessellation(struct tu_pipeline_builder *builder,
2587
struct tu_pipeline *pipeline)
2588
{
2589
if (!(pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ||
2590
!(pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
2591
return;
2592
2593
const VkPipelineTessellationStateCreateInfo *tess_info =
2594
builder->create_info->pTessellationState;
2595
2596
assert(!(pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY)));
2597
2598
assert(pipeline->ia.primtype == DI_PT_PATCHES0);
2599
assert(tess_info->patchControlPoints <= 32);
2600
pipeline->ia.primtype += tess_info->patchControlPoints;
2601
const VkPipelineTessellationDomainOriginStateCreateInfo *domain_info =
2602
vk_find_struct_const(tess_info->pNext, PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO);
2603
pipeline->tess.upper_left_domain_origin = !domain_info ||
2604
domain_info->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT;
2605
const struct ir3_shader_variant *hs = builder->variants[MESA_SHADER_TESS_CTRL];
2606
const struct ir3_shader_variant *ds = builder->variants[MESA_SHADER_TESS_EVAL];
2607
pipeline->tess.param_stride = hs->output_size * 4;
2608
pipeline->tess.hs_bo_regid = hs->const_state->offsets.primitive_param + 1;
2609
pipeline->tess.ds_bo_regid = ds->const_state->offsets.primitive_param + 1;
2610
}
2611
2612
static void
2613
tu_pipeline_builder_parse_viewport(struct tu_pipeline_builder *builder,
2614
struct tu_pipeline *pipeline)
2615
{
2616
/* The spec says:
2617
*
2618
* pViewportState is a pointer to an instance of the
2619
* VkPipelineViewportStateCreateInfo structure, and is ignored if the
2620
* pipeline has rasterization disabled."
2621
*
2622
* We leave the relevant registers stale in that case.
2623
*/
2624
if (builder->rasterizer_discard)
2625
return;
2626
2627
const VkPipelineViewportStateCreateInfo *vp_info =
2628
builder->create_info->pViewportState;
2629
2630
struct tu_cs cs;
2631
2632
if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_VIEWPORT, 8 + 10 * vp_info->viewportCount))
2633
tu6_emit_viewport(&cs, vp_info->pViewports, vp_info->viewportCount);
2634
2635
if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_SCISSOR, 1 + 2 * vp_info->scissorCount))
2636
tu6_emit_scissor(&cs, vp_info->pScissors, vp_info->scissorCount);
2637
}
2638
2639
static void
2640
tu_pipeline_builder_parse_rasterization(struct tu_pipeline_builder *builder,
2641
struct tu_pipeline *pipeline)
2642
{
2643
const VkPipelineRasterizationStateCreateInfo *rast_info =
2644
builder->create_info->pRasterizationState;
2645
2646
enum a6xx_polygon_mode mode = tu6_polygon_mode(rast_info->polygonMode);
2647
2648
bool depth_clip_disable = rast_info->depthClampEnable;
2649
2650
const VkPipelineRasterizationDepthClipStateCreateInfoEXT *depth_clip_state =
2651
vk_find_struct_const(rast_info, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
2652
if (depth_clip_state)
2653
depth_clip_disable = !depth_clip_state->depthClipEnable;
2654
2655
struct tu_cs cs;
2656
uint32_t cs_size = 13 + (builder->emit_msaa_state ? 11 : 0);
2657
pipeline->rast_state = tu_cs_draw_state(&pipeline->cs, &cs, cs_size);
2658
2659
tu_cs_emit_regs(&cs,
2660
A6XX_GRAS_CL_CNTL(
2661
.znear_clip_disable = depth_clip_disable,
2662
.zfar_clip_disable = depth_clip_disable,
2663
/* TODO should this be depth_clip_disable instead? */
2664
.unk5 = rast_info->depthClampEnable,
2665
.zero_gb_scale_z = 1,
2666
.vp_clip_code_ignore = 1));
2667
2668
tu_cs_emit_regs(&cs,
2669
A6XX_VPC_POLYGON_MODE(mode));
2670
2671
tu_cs_emit_regs(&cs,
2672
A6XX_PC_POLYGON_MODE(mode));
2673
2674
/* move to hw ctx init? */
2675
tu_cs_emit_regs(&cs,
2676
A6XX_GRAS_SU_POINT_MINMAX(.min = 1.0f / 16.0f, .max = 4092.0f),
2677
A6XX_GRAS_SU_POINT_SIZE(1.0f));
2678
2679
const VkPipelineRasterizationStateStreamCreateInfoEXT *stream_info =
2680
vk_find_struct_const(rast_info->pNext,
2681
PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT);
2682
unsigned stream = stream_info ? stream_info->rasterizationStream : 0;
2683
tu_cs_emit_regs(&cs,
2684
A6XX_PC_RASTER_CNTL(.stream = stream,
2685
.discard = rast_info->rasterizerDiscardEnable));
2686
tu_cs_emit_regs(&cs,
2687
A6XX_VPC_UNKNOWN_9107(.raster_discard = rast_info->rasterizerDiscardEnable));
2688
2689
/* If samples count couldn't be devised from the subpass, we should emit it here.
2690
* It happens when subpass doesn't use any color/depth attachment.
2691
*/
2692
if (builder->emit_msaa_state)
2693
tu6_emit_msaa(&cs, builder->samples);
2694
2695
pipeline->gras_su_cntl =
2696
tu6_gras_su_cntl(rast_info, builder->samples, builder->multiview_mask != 0);
2697
2698
if (tu_pipeline_static_state(pipeline, &cs, TU_DYNAMIC_STATE_GRAS_SU_CNTL, 2))
2699
tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = pipeline->gras_su_cntl));
2700
2701
if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_DEPTH_BIAS, 4)) {
2702
tu6_emit_depth_bias(&cs, rast_info->depthBiasConstantFactor,
2703
rast_info->depthBiasClamp,
2704
rast_info->depthBiasSlopeFactor);
2705
}
2706
2707
const struct VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *provoking_vtx_state =
2708
vk_find_struct_const(rast_info->pNext, PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT);
2709
pipeline->provoking_vertex_last = provoking_vtx_state &&
2710
provoking_vtx_state->provokingVertexMode == VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT;
2711
}
2712
2713
static void
2714
tu_pipeline_builder_parse_depth_stencil(struct tu_pipeline_builder *builder,
2715
struct tu_pipeline *pipeline)
2716
{
2717
/* The spec says:
2718
*
2719
* pDepthStencilState is a pointer to an instance of the
2720
* VkPipelineDepthStencilStateCreateInfo structure, and is ignored if
2721
* the pipeline has rasterization disabled or if the subpass of the
2722
* render pass the pipeline is created against does not use a
2723
* depth/stencil attachment.
2724
*/
2725
const VkPipelineDepthStencilStateCreateInfo *ds_info =
2726
builder->create_info->pDepthStencilState;
2727
const VkPipelineRasterizationStateCreateInfo *rast_info =
2728
builder->create_info->pRasterizationState;
2729
uint32_t rb_depth_cntl = 0, rb_stencil_cntl = 0;
2730
struct tu_cs cs;
2731
2732
if (builder->depth_attachment_format != VK_FORMAT_UNDEFINED &&
2733
builder->depth_attachment_format != VK_FORMAT_S8_UINT) {
2734
if (ds_info->depthTestEnable) {
2735
rb_depth_cntl |=
2736
A6XX_RB_DEPTH_CNTL_Z_ENABLE |
2737
A6XX_RB_DEPTH_CNTL_ZFUNC(tu6_compare_func(ds_info->depthCompareOp)) |
2738
A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE; /* TODO: don't set for ALWAYS/NEVER */
2739
2740
if (rast_info->depthClampEnable)
2741
rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE;
2742
2743
if (ds_info->depthWriteEnable)
2744
rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE;
2745
}
2746
2747
if (ds_info->depthBoundsTestEnable)
2748
rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE | A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE;
2749
} else {
2750
/* if RB_DEPTH_CNTL is set dynamically, we need to make sure it is set
2751
* to 0 when this pipeline is used, as enabling depth test when there
2752
* is no depth attachment is a problem (at least for the S8_UINT case)
2753
*/
2754
if (pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_RB_DEPTH_CNTL))
2755
pipeline->rb_depth_cntl_disable = true;
2756
}
2757
2758
if (builder->depth_attachment_format != VK_FORMAT_UNDEFINED) {
2759
const VkStencilOpState *front = &ds_info->front;
2760
const VkStencilOpState *back = &ds_info->back;
2761
2762
rb_stencil_cntl |=
2763
A6XX_RB_STENCIL_CONTROL_FUNC(tu6_compare_func(front->compareOp)) |
2764
A6XX_RB_STENCIL_CONTROL_FAIL(tu6_stencil_op(front->failOp)) |
2765
A6XX_RB_STENCIL_CONTROL_ZPASS(tu6_stencil_op(front->passOp)) |
2766
A6XX_RB_STENCIL_CONTROL_ZFAIL(tu6_stencil_op(front->depthFailOp)) |
2767
A6XX_RB_STENCIL_CONTROL_FUNC_BF(tu6_compare_func(back->compareOp)) |
2768
A6XX_RB_STENCIL_CONTROL_FAIL_BF(tu6_stencil_op(back->failOp)) |
2769
A6XX_RB_STENCIL_CONTROL_ZPASS_BF(tu6_stencil_op(back->passOp)) |
2770
A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(tu6_stencil_op(back->depthFailOp));
2771
2772
if (ds_info->stencilTestEnable) {
2773
rb_stencil_cntl |=
2774
A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE |
2775
A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF |
2776
A6XX_RB_STENCIL_CONTROL_STENCIL_READ;
2777
}
2778
}
2779
2780
if (tu_pipeline_static_state(pipeline, &cs, TU_DYNAMIC_STATE_RB_DEPTH_CNTL, 2)) {
2781
tu_cs_emit_pkt4(&cs, REG_A6XX_RB_DEPTH_CNTL, 1);
2782
tu_cs_emit(&cs, rb_depth_cntl);
2783
}
2784
pipeline->rb_depth_cntl = rb_depth_cntl;
2785
2786
if (tu_pipeline_static_state(pipeline, &cs, TU_DYNAMIC_STATE_RB_STENCIL_CNTL, 2)) {
2787
tu_cs_emit_pkt4(&cs, REG_A6XX_RB_STENCIL_CONTROL, 1);
2788
tu_cs_emit(&cs, rb_stencil_cntl);
2789
}
2790
pipeline->rb_stencil_cntl = rb_stencil_cntl;
2791
2792
/* the remaining draw states arent used if there is no d/s, leave them empty */
2793
if (builder->depth_attachment_format == VK_FORMAT_UNDEFINED)
2794
return;
2795
2796
if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_DEPTH_BOUNDS, 3)) {
2797
tu_cs_emit_regs(&cs,
2798
A6XX_RB_Z_BOUNDS_MIN(ds_info->minDepthBounds),
2799
A6XX_RB_Z_BOUNDS_MAX(ds_info->maxDepthBounds));
2800
}
2801
2802
if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, 2)) {
2803
tu_cs_emit_regs(&cs, A6XX_RB_STENCILMASK(.mask = ds_info->front.compareMask & 0xff,
2804
.bfmask = ds_info->back.compareMask & 0xff));
2805
}
2806
2807
if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, 2)) {
2808
update_stencil_mask(&pipeline->stencil_wrmask, VK_STENCIL_FACE_FRONT_BIT, ds_info->front.writeMask);
2809
update_stencil_mask(&pipeline->stencil_wrmask, VK_STENCIL_FACE_BACK_BIT, ds_info->back.writeMask);
2810
tu_cs_emit_regs(&cs, A6XX_RB_STENCILWRMASK(.dword = pipeline->stencil_wrmask));
2811
}
2812
2813
if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_STENCIL_REFERENCE, 2)) {
2814
tu_cs_emit_regs(&cs, A6XX_RB_STENCILREF(.ref = ds_info->front.reference & 0xff,
2815
.bfref = ds_info->back.reference & 0xff));
2816
}
2817
2818
if (builder->shaders[MESA_SHADER_FRAGMENT]) {
2819
const struct ir3_shader_variant *fs = &builder->shaders[MESA_SHADER_FRAGMENT]->ir3_shader->variants[0];
2820
if (fs->has_kill || fs->no_earlyz || fs->writes_pos) {
2821
pipeline->lrz.force_disable_mask |= TU_LRZ_FORCE_DISABLE_WRITE;
2822
}
2823
if (fs->no_earlyz || fs->writes_pos) {
2824
pipeline->lrz.force_disable_mask = TU_LRZ_FORCE_DISABLE_LRZ;
2825
}
2826
}
2827
}
2828
2829
static void
2830
tu_pipeline_builder_parse_multisample_and_color_blend(
2831
struct tu_pipeline_builder *builder, struct tu_pipeline *pipeline)
2832
{
2833
/* The spec says:
2834
*
2835
* pMultisampleState is a pointer to an instance of the
2836
* VkPipelineMultisampleStateCreateInfo, and is ignored if the pipeline
2837
* has rasterization disabled.
2838
*
2839
* Also,
2840
*
2841
* pColorBlendState is a pointer to an instance of the
2842
* VkPipelineColorBlendStateCreateInfo structure, and is ignored if the
2843
* pipeline has rasterization disabled or if the subpass of the render
2844
* pass the pipeline is created against does not use any color
2845
* attachments.
2846
*
2847
* We leave the relevant registers stale when rasterization is disabled.
2848
*/
2849
if (builder->rasterizer_discard)
2850
return;
2851
2852
static const VkPipelineColorBlendStateCreateInfo dummy_blend_info;
2853
const VkPipelineMultisampleStateCreateInfo *msaa_info =
2854
builder->create_info->pMultisampleState;
2855
const VkPipelineColorBlendStateCreateInfo *blend_info =
2856
builder->use_color_attachments ? builder->create_info->pColorBlendState
2857
: &dummy_blend_info;
2858
2859
struct tu_cs cs;
2860
pipeline->blend_state =
2861
tu_cs_draw_state(&pipeline->cs, &cs, blend_info->attachmentCount * 3 + 4);
2862
2863
uint32_t blend_enable_mask;
2864
tu6_emit_rb_mrt_controls(&cs, blend_info,
2865
builder->color_attachment_formats,
2866
&blend_enable_mask);
2867
2868
tu6_emit_blend_control(&cs, blend_enable_mask,
2869
builder->use_dual_src_blend, msaa_info);
2870
2871
assert(cs.cur == cs.end); /* validate draw state size */
2872
2873
if (blend_enable_mask) {
2874
for (int i = 0; i < blend_info->attachmentCount; i++) {
2875
VkPipelineColorBlendAttachmentState blendAttachment = blend_info->pAttachments[i];
2876
/* Disable LRZ writes when blend is enabled, since the
2877
* resulting pixel value from the blend-draw
2878
* depends on an earlier draw, which LRZ in the draw pass
2879
* could early-reject if the previous blend-enabled draw wrote LRZ.
2880
*
2881
* From the PoV of LRZ, having masked color channels is
2882
* the same as having blend enabled, in that the draw will
2883
* care about the fragments from an earlier draw.
2884
*
2885
* TODO: We need to disable LRZ writes only for the binning pass.
2886
* Therefore, we need to emit it in a separate draw state. We keep
2887
* it disabled for sysmem path as well for the moment.
2888
*/
2889
if (blendAttachment.blendEnable || blendAttachment.colorWriteMask != 0xf) {
2890
pipeline->lrz.force_disable_mask |= TU_LRZ_FORCE_DISABLE_WRITE;
2891
}
2892
}
2893
}
2894
2895
if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_BLEND_CONSTANTS, 5)) {
2896
tu_cs_emit_pkt4(&cs, REG_A6XX_RB_BLEND_RED_F32, 4);
2897
tu_cs_emit_array(&cs, (const uint32_t *) blend_info->blendConstants, 4);
2898
}
2899
2900
const struct VkPipelineSampleLocationsStateCreateInfoEXT *sample_locations =
2901
vk_find_struct_const(msaa_info->pNext, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT);
2902
const VkSampleLocationsInfoEXT *samp_loc = NULL;
2903
2904
if (sample_locations && sample_locations->sampleLocationsEnable)
2905
samp_loc = &sample_locations->sampleLocationsInfo;
2906
2907
if (tu_pipeline_static_state(pipeline, &cs, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS,
2908
samp_loc ? 9 : 6)) {
2909
tu6_emit_sample_locations(&cs, samp_loc);
2910
}
2911
}
2912
2913
static void
2914
tu_pipeline_finish(struct tu_pipeline *pipeline,
2915
struct tu_device *dev,
2916
const VkAllocationCallbacks *alloc)
2917
{
2918
tu_cs_finish(&pipeline->cs);
2919
2920
if (pipeline->pvtmem_bo.size)
2921
tu_bo_finish(dev, &pipeline->pvtmem_bo);
2922
2923
ralloc_free(pipeline->executables_mem_ctx);
2924
}
2925
2926
static VkResult
2927
tu_pipeline_builder_build(struct tu_pipeline_builder *builder,
2928
struct tu_pipeline **pipeline)
2929
{
2930
VkResult result;
2931
2932
*pipeline = vk_object_zalloc(&builder->device->vk, builder->alloc,
2933
sizeof(**pipeline), VK_OBJECT_TYPE_PIPELINE);
2934
if (!*pipeline)
2935
return VK_ERROR_OUT_OF_HOST_MEMORY;
2936
2937
(*pipeline)->layout = builder->layout;
2938
(*pipeline)->executables_mem_ctx = ralloc_context(NULL);
2939
util_dynarray_init(&(*pipeline)->executables, (*pipeline)->executables_mem_ctx);
2940
2941
/* compile and upload shaders */
2942
result = tu_pipeline_builder_compile_shaders(builder, *pipeline);
2943
if (result != VK_SUCCESS) {
2944
vk_object_free(&builder->device->vk, builder->alloc, *pipeline);
2945
return result;
2946
}
2947
2948
result = tu_pipeline_allocate_cs(builder->device, *pipeline, builder, NULL);
2949
if (result != VK_SUCCESS) {
2950
vk_object_free(&builder->device->vk, builder->alloc, *pipeline);
2951
return result;
2952
}
2953
2954
for (uint32_t i = 0; i < ARRAY_SIZE(builder->variants); i++)
2955
builder->shader_iova[i] = tu_upload_variant(*pipeline, builder->variants[i]);
2956
2957
builder->binning_vs_iova =
2958
tu_upload_variant(*pipeline, builder->binning_variant);
2959
2960
/* Setup private memory. Note that because we're sharing the same private
2961
* memory for all stages, all stages must use the same config, or else
2962
* fibers from one stage might overwrite fibers in another.
2963
*/
2964
2965
uint32_t pvtmem_size = 0;
2966
bool per_wave = true;
2967
for (uint32_t i = 0; i < ARRAY_SIZE(builder->variants); i++) {
2968
if (builder->variants[i]) {
2969
pvtmem_size = MAX2(pvtmem_size, builder->variants[i]->pvtmem_size);
2970
if (!builder->variants[i]->pvtmem_per_wave)
2971
per_wave = false;
2972
}
2973
}
2974
2975
if (builder->binning_variant) {
2976
pvtmem_size = MAX2(pvtmem_size, builder->binning_variant->pvtmem_size);
2977
if (!builder->binning_variant->pvtmem_per_wave)
2978
per_wave = false;
2979
}
2980
2981
result = tu_setup_pvtmem(builder->device, *pipeline, &builder->pvtmem,
2982
pvtmem_size, per_wave);
2983
if (result != VK_SUCCESS) {
2984
vk_object_free(&builder->device->vk, builder->alloc, *pipeline);
2985
return result;
2986
}
2987
2988
tu_pipeline_builder_parse_dynamic(builder, *pipeline);
2989
tu_pipeline_builder_parse_shader_stages(builder, *pipeline);
2990
tu_pipeline_builder_parse_vertex_input(builder, *pipeline);
2991
tu_pipeline_builder_parse_input_assembly(builder, *pipeline);
2992
tu_pipeline_builder_parse_tessellation(builder, *pipeline);
2993
tu_pipeline_builder_parse_viewport(builder, *pipeline);
2994
tu_pipeline_builder_parse_rasterization(builder, *pipeline);
2995
tu_pipeline_builder_parse_depth_stencil(builder, *pipeline);
2996
tu_pipeline_builder_parse_multisample_and_color_blend(builder, *pipeline);
2997
tu6_emit_load_state(*pipeline, false);
2998
2999
/* we should have reserved enough space upfront such that the CS never
3000
* grows
3001
*/
3002
assert((*pipeline)->cs.bo_count == 1);
3003
3004
return VK_SUCCESS;
3005
}
3006
3007
static void
3008
tu_pipeline_builder_finish(struct tu_pipeline_builder *builder)
3009
{
3010
for (uint32_t i = 0; i < ARRAY_SIZE(builder->shaders); i++) {
3011
if (!builder->shaders[i])
3012
continue;
3013
tu_shader_destroy(builder->device, builder->shaders[i], builder->alloc);
3014
}
3015
}
3016
3017
static void
3018
tu_pipeline_builder_init_graphics(
3019
struct tu_pipeline_builder *builder,
3020
struct tu_device *dev,
3021
struct tu_pipeline_cache *cache,
3022
const VkGraphicsPipelineCreateInfo *create_info,
3023
const VkAllocationCallbacks *alloc)
3024
{
3025
TU_FROM_HANDLE(tu_pipeline_layout, layout, create_info->layout);
3026
3027
*builder = (struct tu_pipeline_builder) {
3028
.device = dev,
3029
.cache = cache,
3030
.create_info = create_info,
3031
.alloc = alloc,
3032
.layout = layout,
3033
};
3034
3035
const struct tu_render_pass *pass =
3036
tu_render_pass_from_handle(create_info->renderPass);
3037
const struct tu_subpass *subpass =
3038
&pass->subpasses[create_info->subpass];
3039
3040
builder->multiview_mask = subpass->multiview_mask;
3041
3042
builder->rasterizer_discard =
3043
create_info->pRasterizationState->rasterizerDiscardEnable;
3044
3045
/* variableMultisampleRate support */
3046
builder->emit_msaa_state = (subpass->samples == 0) && !builder->rasterizer_discard;
3047
3048
if (builder->rasterizer_discard) {
3049
builder->samples = VK_SAMPLE_COUNT_1_BIT;
3050
} else {
3051
builder->samples = create_info->pMultisampleState->rasterizationSamples;
3052
builder->alpha_to_coverage = create_info->pMultisampleState->alphaToCoverageEnable;
3053
3054
const uint32_t a = subpass->depth_stencil_attachment.attachment;
3055
builder->depth_attachment_format = (a != VK_ATTACHMENT_UNUSED) ?
3056
pass->attachments[a].format : VK_FORMAT_UNDEFINED;
3057
3058
assert(subpass->color_count == 0 ||
3059
!create_info->pColorBlendState ||
3060
subpass->color_count == create_info->pColorBlendState->attachmentCount);
3061
builder->color_attachment_count = subpass->color_count;
3062
for (uint32_t i = 0; i < subpass->color_count; i++) {
3063
const uint32_t a = subpass->color_attachments[i].attachment;
3064
if (a == VK_ATTACHMENT_UNUSED)
3065
continue;
3066
3067
builder->color_attachment_formats[i] = pass->attachments[a].format;
3068
builder->use_color_attachments = true;
3069
builder->render_components |= 0xf << (i * 4);
3070
}
3071
3072
if (tu_blend_state_is_dual_src(create_info->pColorBlendState)) {
3073
builder->color_attachment_count++;
3074
builder->use_dual_src_blend = true;
3075
/* dual source blending has an extra fs output in the 2nd slot */
3076
if (subpass->color_attachments[0].attachment != VK_ATTACHMENT_UNUSED)
3077
builder->render_components |= 0xf << 4;
3078
}
3079
}
3080
}
3081
3082
static VkResult
3083
tu_graphics_pipeline_create(VkDevice device,
3084
VkPipelineCache pipelineCache,
3085
const VkGraphicsPipelineCreateInfo *pCreateInfo,
3086
const VkAllocationCallbacks *pAllocator,
3087
VkPipeline *pPipeline)
3088
{
3089
TU_FROM_HANDLE(tu_device, dev, device);
3090
TU_FROM_HANDLE(tu_pipeline_cache, cache, pipelineCache);
3091
3092
struct tu_pipeline_builder builder;
3093
tu_pipeline_builder_init_graphics(&builder, dev, cache,
3094
pCreateInfo, pAllocator);
3095
3096
struct tu_pipeline *pipeline = NULL;
3097
VkResult result = tu_pipeline_builder_build(&builder, &pipeline);
3098
tu_pipeline_builder_finish(&builder);
3099
3100
if (result == VK_SUCCESS)
3101
*pPipeline = tu_pipeline_to_handle(pipeline);
3102
else
3103
*pPipeline = VK_NULL_HANDLE;
3104
3105
return result;
3106
}
3107
3108
VKAPI_ATTR VkResult VKAPI_CALL
3109
tu_CreateGraphicsPipelines(VkDevice device,
3110
VkPipelineCache pipelineCache,
3111
uint32_t count,
3112
const VkGraphicsPipelineCreateInfo *pCreateInfos,
3113
const VkAllocationCallbacks *pAllocator,
3114
VkPipeline *pPipelines)
3115
{
3116
VkResult final_result = VK_SUCCESS;
3117
3118
for (uint32_t i = 0; i < count; i++) {
3119
VkResult result = tu_graphics_pipeline_create(device, pipelineCache,
3120
&pCreateInfos[i], pAllocator,
3121
&pPipelines[i]);
3122
3123
if (result != VK_SUCCESS)
3124
final_result = result;
3125
}
3126
3127
return final_result;
3128
}
3129
3130
static VkResult
3131
tu_compute_pipeline_create(VkDevice device,
3132
VkPipelineCache _cache,
3133
const VkComputePipelineCreateInfo *pCreateInfo,
3134
const VkAllocationCallbacks *pAllocator,
3135
VkPipeline *pPipeline)
3136
{
3137
TU_FROM_HANDLE(tu_device, dev, device);
3138
TU_FROM_HANDLE(tu_pipeline_layout, layout, pCreateInfo->layout);
3139
const VkPipelineShaderStageCreateInfo *stage_info = &pCreateInfo->stage;
3140
VkResult result;
3141
3142
struct tu_pipeline *pipeline;
3143
3144
*pPipeline = VK_NULL_HANDLE;
3145
3146
pipeline = vk_object_zalloc(&dev->vk, pAllocator, sizeof(*pipeline),
3147
VK_OBJECT_TYPE_PIPELINE);
3148
if (!pipeline)
3149
return VK_ERROR_OUT_OF_HOST_MEMORY;
3150
3151
pipeline->layout = layout;
3152
3153
pipeline->executables_mem_ctx = ralloc_context(NULL);
3154
util_dynarray_init(&pipeline->executables, pipeline->executables_mem_ctx);
3155
3156
struct ir3_shader_key key = {};
3157
3158
nir_shader *nir = tu_spirv_to_nir(dev, stage_info, MESA_SHADER_COMPUTE);
3159
3160
const bool executable_info = pCreateInfo->flags &
3161
VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR;
3162
3163
char *nir_initial_disasm = executable_info ?
3164
nir_shader_as_str(nir, pipeline->executables_mem_ctx) : NULL;
3165
3166
struct tu_shader *shader =
3167
tu_shader_create(dev, nir, 0, layout, pAllocator);
3168
if (!shader) {
3169
result = VK_ERROR_OUT_OF_HOST_MEMORY;
3170
goto fail;
3171
}
3172
3173
pipeline->active_desc_sets = shader->active_desc_sets;
3174
3175
bool created;
3176
struct ir3_shader_variant *v =
3177
ir3_shader_get_variant(shader->ir3_shader, &key, false, executable_info, &created);
3178
if (!v) {
3179
result = VK_ERROR_OUT_OF_HOST_MEMORY;
3180
goto fail;
3181
}
3182
3183
tu_pipeline_set_linkage(&pipeline->program.link[MESA_SHADER_COMPUTE],
3184
shader, v);
3185
3186
result = tu_pipeline_allocate_cs(dev, pipeline, NULL, v);
3187
if (result != VK_SUCCESS)
3188
goto fail;
3189
3190
uint64_t shader_iova = tu_upload_variant(pipeline, v);
3191
3192
struct tu_pvtmem_config pvtmem;
3193
tu_setup_pvtmem(dev, pipeline, &pvtmem, v->pvtmem_size, v->pvtmem_per_wave);
3194
3195
for (int i = 0; i < 3; i++)
3196
pipeline->compute.local_size[i] = v->local_size[i];
3197
3198
pipeline->compute.subgroup_size = v->info.double_threadsize ? 128 : 64;
3199
3200
struct tu_cs prog_cs;
3201
tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
3202
tu6_emit_cs_config(&prog_cs, shader, v, &pvtmem, shader_iova);
3203
pipeline->program.state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);
3204
3205
tu6_emit_load_state(pipeline, true);
3206
3207
tu_append_executable(pipeline, v, nir_initial_disasm);
3208
3209
tu_shader_destroy(dev, shader, pAllocator);
3210
3211
*pPipeline = tu_pipeline_to_handle(pipeline);
3212
3213
return VK_SUCCESS;
3214
3215
fail:
3216
if (shader)
3217
tu_shader_destroy(dev, shader, pAllocator);
3218
3219
vk_object_free(&dev->vk, pAllocator, pipeline);
3220
3221
return result;
3222
}
3223
3224
VKAPI_ATTR VkResult VKAPI_CALL
3225
tu_CreateComputePipelines(VkDevice device,
3226
VkPipelineCache pipelineCache,
3227
uint32_t count,
3228
const VkComputePipelineCreateInfo *pCreateInfos,
3229
const VkAllocationCallbacks *pAllocator,
3230
VkPipeline *pPipelines)
3231
{
3232
VkResult final_result = VK_SUCCESS;
3233
3234
for (uint32_t i = 0; i < count; i++) {
3235
VkResult result = tu_compute_pipeline_create(device, pipelineCache,
3236
&pCreateInfos[i],
3237
pAllocator, &pPipelines[i]);
3238
if (result != VK_SUCCESS)
3239
final_result = result;
3240
}
3241
3242
return final_result;
3243
}
3244
3245
VKAPI_ATTR void VKAPI_CALL
3246
tu_DestroyPipeline(VkDevice _device,
3247
VkPipeline _pipeline,
3248
const VkAllocationCallbacks *pAllocator)
3249
{
3250
TU_FROM_HANDLE(tu_device, dev, _device);
3251
TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
3252
3253
if (!_pipeline)
3254
return;
3255
3256
tu_pipeline_finish(pipeline, dev, pAllocator);
3257
vk_object_free(&dev->vk, pAllocator, pipeline);
3258
}
3259
3260
#define WRITE_STR(field, ...) ({ \
3261
memset(field, 0, sizeof(field)); \
3262
UNUSED int _i = snprintf(field, sizeof(field), __VA_ARGS__); \
3263
assert(_i > 0 && _i < sizeof(field)); \
3264
})
3265
3266
static const struct tu_pipeline_executable *
3267
tu_pipeline_get_executable(struct tu_pipeline *pipeline, uint32_t index)
3268
{
3269
assert(index < util_dynarray_num_elements(&pipeline->executables,
3270
struct tu_pipeline_executable));
3271
return util_dynarray_element(
3272
&pipeline->executables, struct tu_pipeline_executable, index);
3273
}
3274
3275
VKAPI_ATTR VkResult VKAPI_CALL
3276
tu_GetPipelineExecutablePropertiesKHR(
3277
VkDevice _device,
3278
const VkPipelineInfoKHR* pPipelineInfo,
3279
uint32_t* pExecutableCount,
3280
VkPipelineExecutablePropertiesKHR* pProperties)
3281
{
3282
TU_FROM_HANDLE(tu_device, dev, _device);
3283
TU_FROM_HANDLE(tu_pipeline, pipeline, pPipelineInfo->pipeline);
3284
VK_OUTARRAY_MAKE(out, pProperties, pExecutableCount);
3285
3286
util_dynarray_foreach (&pipeline->executables, struct tu_pipeline_executable, exe) {
3287
vk_outarray_append(&out, props) {
3288
gl_shader_stage stage = exe->stage;
3289
props->stages = mesa_to_vk_shader_stage(stage);
3290
3291
if (!exe->is_binning)
3292
WRITE_STR(props->name, "%s", _mesa_shader_stage_to_abbrev(stage));
3293
else
3294
WRITE_STR(props->name, "Binning VS");
3295
3296
WRITE_STR(props->description, "%s", _mesa_shader_stage_to_string(stage));
3297
3298
props->subgroupSize =
3299
dev->compiler->threadsize_base * (exe->stats.double_threadsize ? 2 : 1);
3300
}
3301
}
3302
3303
return vk_outarray_status(&out);
3304
}
3305
3306
VKAPI_ATTR VkResult VKAPI_CALL
3307
tu_GetPipelineExecutableStatisticsKHR(
3308
VkDevice _device,
3309
const VkPipelineExecutableInfoKHR* pExecutableInfo,
3310
uint32_t* pStatisticCount,
3311
VkPipelineExecutableStatisticKHR* pStatistics)
3312
{
3313
TU_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline);
3314
VK_OUTARRAY_MAKE(out, pStatistics, pStatisticCount);
3315
3316
const struct tu_pipeline_executable *exe =
3317
tu_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
3318
3319
vk_outarray_append(&out, stat) {
3320
WRITE_STR(stat->name, "Max Waves Per Core");
3321
WRITE_STR(stat->description,
3322
"Maximum number of simultaneous waves per core.");
3323
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3324
stat->value.u64 = exe->stats.max_waves;
3325
}
3326
3327
vk_outarray_append(&out, stat) {
3328
WRITE_STR(stat->name, "Instruction Count");
3329
WRITE_STR(stat->description,
3330
"Total number of IR3 instructions in the final generated "
3331
"shader executable.");
3332
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3333
stat->value.u64 = exe->stats.instrs_count;
3334
}
3335
3336
vk_outarray_append(&out, stat) {
3337
WRITE_STR(stat->name, "NOPs Count");
3338
WRITE_STR(stat->description,
3339
"Number of NOP instructions in the final generated "
3340
"shader executable.");
3341
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3342
stat->value.u64 = exe->stats.nops_count;
3343
}
3344
3345
vk_outarray_append(&out, stat) {
3346
WRITE_STR(stat->name, "MOV Count");
3347
WRITE_STR(stat->description,
3348
"Number of MOV instructions in the final generated "
3349
"shader executable.");
3350
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3351
stat->value.u64 = exe->stats.mov_count;
3352
}
3353
3354
vk_outarray_append(&out, stat) {
3355
WRITE_STR(stat->name, "COV Count");
3356
WRITE_STR(stat->description,
3357
"Number of COV instructions in the final generated "
3358
"shader executable.");
3359
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3360
stat->value.u64 = exe->stats.cov_count;
3361
}
3362
3363
vk_outarray_append(&out, stat) {
3364
WRITE_STR(stat->name, "Registers used");
3365
WRITE_STR(stat->description,
3366
"Number of registers used in the final generated "
3367
"shader executable.");
3368
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3369
stat->value.u64 = exe->stats.max_reg + 1;
3370
}
3371
3372
vk_outarray_append(&out, stat) {
3373
WRITE_STR(stat->name, "Half-registers used");
3374
WRITE_STR(stat->description,
3375
"Number of half-registers used in the final generated "
3376
"shader executable.");
3377
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3378
stat->value.u64 = exe->stats.max_half_reg + 1;
3379
}
3380
3381
vk_outarray_append(&out, stat) {
3382
WRITE_STR(stat->name, "Instructions with SS sync bit");
3383
WRITE_STR(stat->description,
3384
"SS bit is set for instructions which depend on a result "
3385
"of \"long\" instructions to prevent RAW hazard.");
3386
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3387
stat->value.u64 = exe->stats.ss;
3388
}
3389
3390
vk_outarray_append(&out, stat) {
3391
WRITE_STR(stat->name, "Instructions with SY sync bit");
3392
WRITE_STR(stat->description,
3393
"SY bit is set for instructions which depend on a result "
3394
"of loads from global memory to prevent RAW hazard.");
3395
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3396
stat->value.u64 = exe->stats.sy;
3397
}
3398
3399
vk_outarray_append(&out, stat) {
3400
WRITE_STR(stat->name, "Estimated cycles stalled on SS");
3401
WRITE_STR(stat->description,
3402
"A better metric to estimate the impact of SS syncs.");
3403
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3404
stat->value.u64 = exe->stats.sstall;
3405
}
3406
3407
for (int i = 0; i < ARRAY_SIZE(exe->stats.instrs_per_cat); i++) {
3408
vk_outarray_append(&out, stat) {
3409
WRITE_STR(stat->name, "cat%d instructions", i);
3410
WRITE_STR(stat->description,
3411
"Number of cat%d instructions.", i);
3412
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
3413
stat->value.u64 = exe->stats.instrs_per_cat[i];
3414
}
3415
}
3416
3417
return vk_outarray_status(&out);
3418
}
3419
3420
static bool
3421
write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
3422
const char *data)
3423
{
3424
ir->isText = VK_TRUE;
3425
3426
size_t data_len = strlen(data) + 1;
3427
3428
if (ir->pData == NULL) {
3429
ir->dataSize = data_len;
3430
return true;
3431
}
3432
3433
strncpy(ir->pData, data, ir->dataSize);
3434
if (ir->dataSize < data_len)
3435
return false;
3436
3437
ir->dataSize = data_len;
3438
return true;
3439
}
3440
3441
VKAPI_ATTR VkResult VKAPI_CALL
3442
tu_GetPipelineExecutableInternalRepresentationsKHR(
3443
VkDevice _device,
3444
const VkPipelineExecutableInfoKHR* pExecutableInfo,
3445
uint32_t* pInternalRepresentationCount,
3446
VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
3447
{
3448
TU_FROM_HANDLE(tu_pipeline, pipeline, pExecutableInfo->pipeline);
3449
VK_OUTARRAY_MAKE(out, pInternalRepresentations, pInternalRepresentationCount);
3450
bool incomplete_text = false;
3451
3452
const struct tu_pipeline_executable *exe =
3453
tu_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
3454
3455
if (exe->nir_from_spirv) {
3456
vk_outarray_append(&out, ir) {
3457
WRITE_STR(ir->name, "NIR from SPIRV");
3458
WRITE_STR(ir->description,
3459
"Initial NIR before any optimizations");
3460
3461
if (!write_ir_text(ir, exe->nir_from_spirv))
3462
incomplete_text = true;
3463
}
3464
}
3465
3466
if (exe->nir_final) {
3467
vk_outarray_append(&out, ir) {
3468
WRITE_STR(ir->name, "Final NIR");
3469
WRITE_STR(ir->description,
3470
"Final NIR before going into the back-end compiler");
3471
3472
if (!write_ir_text(ir, exe->nir_final))
3473
incomplete_text = true;
3474
}
3475
}
3476
3477
if (exe->disasm) {
3478
vk_outarray_append(&out, ir) {
3479
WRITE_STR(ir->name, "IR3 Assembly");
3480
WRITE_STR(ir->description,
3481
"Final IR3 assembly for the generated shader binary");
3482
3483
if (!write_ir_text(ir, exe->disasm))
3484
incomplete_text = true;
3485
}
3486
}
3487
3488
return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
3489
}
3490
3491