Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/radeonsi/si_shader_llvm.c
4570 views
1
/*
2
* Copyright 2016 Advanced Micro Devices, Inc.
3
* All Rights Reserved.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* on the rights to use, copy, modify, merge, publish, distribute, sub
9
* license, and/or sell copies of the Software, and to permit persons to whom
10
* the Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22
* USE OR OTHER DEALINGS IN THE SOFTWARE.
23
*/
24
25
#include "ac_nir_to_llvm.h"
26
#include "ac_rtld.h"
27
#include "si_pipe.h"
28
#include "si_shader_internal.h"
29
#include "sid.h"
30
#include "tgsi/tgsi_from_mesa.h"
31
#include "util/u_memory.h"
32
33
struct si_llvm_diagnostics {
34
struct pipe_debug_callback *debug;
35
unsigned retval;
36
};
37
38
static void si_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
39
{
40
struct si_llvm_diagnostics *diag = (struct si_llvm_diagnostics *)context;
41
LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
42
const char *severity_str = NULL;
43
44
switch (severity) {
45
case LLVMDSError:
46
severity_str = "error";
47
break;
48
case LLVMDSWarning:
49
severity_str = "warning";
50
break;
51
case LLVMDSRemark:
52
case LLVMDSNote:
53
default:
54
return;
55
}
56
57
char *description = LLVMGetDiagInfoDescription(di);
58
59
pipe_debug_message(diag->debug, SHADER_INFO, "LLVM diagnostic (%s): %s", severity_str,
60
description);
61
62
if (severity == LLVMDSError) {
63
diag->retval = 1;
64
fprintf(stderr, "LLVM triggered Diagnostic Handler: %s\n", description);
65
}
66
67
LLVMDisposeMessage(description);
68
}
69
70
bool si_compile_llvm(struct si_screen *sscreen, struct si_shader_binary *binary,
71
struct ac_shader_config *conf, struct ac_llvm_compiler *compiler,
72
struct ac_llvm_context *ac, struct pipe_debug_callback *debug,
73
gl_shader_stage stage, const char *name, bool less_optimized)
74
{
75
unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
76
77
if (si_can_dump_shader(sscreen, stage)) {
78
fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
79
80
if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
81
fprintf(stderr, "%s LLVM IR:\n\n", name);
82
ac_dump_module(ac->module);
83
fprintf(stderr, "\n");
84
}
85
}
86
87
if (sscreen->record_llvm_ir) {
88
char *ir = LLVMPrintModuleToString(ac->module);
89
binary->llvm_ir_string = strdup(ir);
90
LLVMDisposeMessage(ir);
91
}
92
93
if (!si_replace_shader(count, binary)) {
94
struct ac_compiler_passes *passes = compiler->passes;
95
96
if (less_optimized && compiler->low_opt_passes)
97
passes = compiler->low_opt_passes;
98
99
struct si_llvm_diagnostics diag = {debug};
100
LLVMContextSetDiagnosticHandler(ac->context, si_diagnostic_handler, &diag);
101
102
if (!ac_compile_module_to_elf(passes, ac->module, (char **)&binary->elf_buffer,
103
&binary->elf_size))
104
diag.retval = 1;
105
106
if (diag.retval != 0) {
107
pipe_debug_message(debug, SHADER_INFO, "LLVM compilation failed");
108
return false;
109
}
110
}
111
112
struct ac_rtld_binary rtld;
113
if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
114
.info = &sscreen->info,
115
.shader_type = stage,
116
.wave_size = ac->wave_size,
117
.num_parts = 1,
118
.elf_ptrs = &binary->elf_buffer,
119
.elf_sizes = &binary->elf_size}))
120
return false;
121
122
bool ok = ac_rtld_read_config(&sscreen->info, &rtld, conf);
123
ac_rtld_close(&rtld);
124
return ok;
125
}
126
127
void si_llvm_context_init(struct si_shader_context *ctx, struct si_screen *sscreen,
128
struct ac_llvm_compiler *compiler, unsigned wave_size)
129
{
130
memset(ctx, 0, sizeof(*ctx));
131
ctx->screen = sscreen;
132
ctx->compiler = compiler;
133
134
ac_llvm_context_init(&ctx->ac, compiler, sscreen->info.chip_class, sscreen->info.family,
135
&sscreen->info, AC_FLOAT_MODE_DEFAULT_OPENGL, wave_size, 64);
136
}
137
138
void si_llvm_create_func(struct si_shader_context *ctx, const char *name, LLVMTypeRef *return_types,
139
unsigned num_return_elems, unsigned max_workgroup_size)
140
{
141
LLVMTypeRef ret_type;
142
enum ac_llvm_calling_convention call_conv;
143
144
if (num_return_elems)
145
ret_type = LLVMStructTypeInContext(ctx->ac.context, return_types, num_return_elems, true);
146
else
147
ret_type = ctx->ac.voidt;
148
149
gl_shader_stage real_stage = ctx->stage;
150
151
/* LS is merged into HS (TCS), and ES is merged into GS. */
152
if (ctx->screen->info.chip_class >= GFX9) {
153
if (ctx->shader->key.as_ls)
154
real_stage = MESA_SHADER_TESS_CTRL;
155
else if (ctx->shader->key.as_es || ctx->shader->key.as_ngg)
156
real_stage = MESA_SHADER_GEOMETRY;
157
}
158
159
switch (real_stage) {
160
case MESA_SHADER_VERTEX:
161
case MESA_SHADER_TESS_EVAL:
162
call_conv = AC_LLVM_AMDGPU_VS;
163
break;
164
case MESA_SHADER_TESS_CTRL:
165
call_conv = AC_LLVM_AMDGPU_HS;
166
break;
167
case MESA_SHADER_GEOMETRY:
168
call_conv = AC_LLVM_AMDGPU_GS;
169
break;
170
case MESA_SHADER_FRAGMENT:
171
call_conv = AC_LLVM_AMDGPU_PS;
172
break;
173
case MESA_SHADER_COMPUTE:
174
call_conv = AC_LLVM_AMDGPU_CS;
175
break;
176
default:
177
unreachable("Unhandle shader type");
178
}
179
180
/* Setup the function */
181
ctx->return_type = ret_type;
182
ctx->main_fn = ac_build_main(&ctx->args, &ctx->ac, call_conv, name, ret_type, ctx->ac.module);
183
ctx->return_value = LLVMGetUndef(ctx->return_type);
184
185
if (ctx->screen->info.address32_hi) {
186
ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-32bit-address-high-bits",
187
ctx->screen->info.address32_hi);
188
}
189
190
ac_llvm_set_workgroup_size(ctx->main_fn, max_workgroup_size);
191
ac_llvm_set_target_features(ctx->main_fn, &ctx->ac);
192
}
193
194
void si_llvm_create_main_func(struct si_shader_context *ctx, bool ngg_cull_shader)
195
{
196
struct si_shader *shader = ctx->shader;
197
LLVMTypeRef returns[AC_MAX_ARGS];
198
unsigned i;
199
200
si_init_shader_args(ctx, ngg_cull_shader);
201
202
for (i = 0; i < ctx->args.num_sgprs_returned; i++)
203
returns[i] = ctx->ac.i32; /* SGPR */
204
for (; i < ctx->args.return_count; i++)
205
returns[i] = ctx->ac.f32; /* VGPR */
206
207
si_llvm_create_func(ctx, ngg_cull_shader ? "ngg_cull_main" : "main", returns,
208
ctx->args.return_count, si_get_max_workgroup_size(shader));
209
210
/* Reserve register locations for VGPR inputs the PS prolog may need. */
211
if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
212
ac_llvm_add_target_dep_function_attr(
213
ctx->main_fn, "InitialPSInputAddr",
214
S_0286D0_PERSP_SAMPLE_ENA(1) | S_0286D0_PERSP_CENTER_ENA(1) |
215
S_0286D0_PERSP_CENTROID_ENA(1) | S_0286D0_LINEAR_SAMPLE_ENA(1) |
216
S_0286D0_LINEAR_CENTER_ENA(1) | S_0286D0_LINEAR_CENTROID_ENA(1) |
217
S_0286D0_FRONT_FACE_ENA(1) | S_0286D0_ANCILLARY_ENA(1) | S_0286D0_POS_FIXED_PT_ENA(1));
218
}
219
220
221
if (shader->key.as_ls || ctx->stage == MESA_SHADER_TESS_CTRL) {
222
if (USE_LDS_SYMBOLS) {
223
/* The LSHS size is not known until draw time, so we append it
224
* at the end of whatever LDS use there may be in the rest of
225
* the shader (currently none, unless LLVM decides to do its
226
* own LDS-based lowering).
227
*/
228
ctx->ac.lds = LLVMAddGlobalInAddressSpace(ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
229
"__lds_end", AC_ADDR_SPACE_LDS);
230
LLVMSetAlignment(ctx->ac.lds, 256);
231
} else {
232
ac_declare_lds_as_pointer(&ctx->ac);
233
}
234
}
235
236
/* Unlike radv, we override these arguments in the prolog, so to the
237
* API shader they appear as normal arguments.
238
*/
239
if (ctx->stage == MESA_SHADER_VERTEX) {
240
ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
241
ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
242
} else if (ctx->stage == MESA_SHADER_FRAGMENT) {
243
ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
244
ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
245
}
246
}
247
248
void si_llvm_optimize_module(struct si_shader_context *ctx)
249
{
250
/* Dump LLVM IR before any optimization passes */
251
if (ctx->screen->debug_flags & DBG(PREOPT_IR) && si_can_dump_shader(ctx->screen, ctx->stage))
252
LLVMDumpModule(ctx->ac.module);
253
254
/* Run the pass */
255
LLVMRunPassManager(ctx->compiler->passmgr, ctx->ac.module);
256
LLVMDisposeBuilder(ctx->ac.builder);
257
}
258
259
void si_llvm_dispose(struct si_shader_context *ctx)
260
{
261
LLVMDisposeModule(ctx->ac.module);
262
LLVMContextDispose(ctx->ac.context);
263
ac_llvm_context_dispose(&ctx->ac);
264
}
265
266
/**
267
* Load a dword from a constant buffer.
268
*/
269
LLVMValueRef si_buffer_load_const(struct si_shader_context *ctx, LLVMValueRef resource,
270
LLVMValueRef offset)
271
{
272
return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL, 0, ctx->ac.f32,
273
0, true, true);
274
}
275
276
void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
277
{
278
if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
279
LLVMBuildRetVoid(ctx->ac.builder);
280
else
281
LLVMBuildRet(ctx->ac.builder, ret);
282
}
283
284
LLVMValueRef si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
285
struct ac_arg param, unsigned return_index)
286
{
287
return LLVMBuildInsertValue(ctx->ac.builder, ret, ac_get_arg(&ctx->ac, param), return_index, "");
288
}
289
290
LLVMValueRef si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
291
struct ac_arg param, unsigned return_index)
292
{
293
LLVMBuilderRef builder = ctx->ac.builder;
294
LLVMValueRef p = ac_get_arg(&ctx->ac, param);
295
296
return LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, p), return_index, "");
297
}
298
299
LLVMValueRef si_insert_input_ptr(struct si_shader_context *ctx, LLVMValueRef ret,
300
struct ac_arg param, unsigned return_index)
301
{
302
LLVMBuilderRef builder = ctx->ac.builder;
303
LLVMValueRef ptr = ac_get_arg(&ctx->ac, param);
304
ptr = LLVMBuildPtrToInt(builder, ptr, ctx->ac.i32, "");
305
return LLVMBuildInsertValue(builder, ret, ptr, return_index, "");
306
}
307
308
LLVMValueRef si_prolog_get_internal_bindings(struct si_shader_context *ctx)
309
{
310
LLVMValueRef ptr[2], list;
311
bool merged_shader = si_is_merged_shader(ctx->shader);
312
313
ptr[0] = LLVMGetParam(ctx->main_fn, (merged_shader ? 8 : 0) + SI_SGPR_INTERNAL_BINDINGS);
314
list =
315
LLVMBuildIntToPtr(ctx->ac.builder, ptr[0], ac_array_in_const32_addr_space(ctx->ac.v4i32), "");
316
return list;
317
}
318
319
void si_llvm_emit_barrier(struct si_shader_context *ctx)
320
{
321
/* GFX6 only (thanks to a hw bug workaround):
322
* The real barrier instruction isn’t needed, because an entire patch
323
* always fits into a single wave.
324
*/
325
if (ctx->screen->info.chip_class == GFX6 && ctx->stage == MESA_SHADER_TESS_CTRL) {
326
ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE);
327
return;
328
}
329
330
ac_build_s_barrier(&ctx->ac);
331
}
332
333
/* Ensure that the esgs ring is declared.
334
*
335
* We declare it with 64KB alignment as a hint that the
336
* pointer value will always be 0.
337
*/
338
void si_llvm_declare_esgs_ring(struct si_shader_context *ctx)
339
{
340
if (ctx->esgs_ring)
341
return;
342
343
assert(!LLVMGetNamedGlobal(ctx->ac.module, "esgs_ring"));
344
345
ctx->esgs_ring = LLVMAddGlobalInAddressSpace(ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
346
"esgs_ring", AC_ADDR_SPACE_LDS);
347
LLVMSetLinkage(ctx->esgs_ring, LLVMExternalLinkage);
348
LLVMSetAlignment(ctx->esgs_ring, 64 * 1024);
349
}
350
351
static void si_init_exec_from_input(struct si_shader_context *ctx, struct ac_arg param,
352
unsigned bitoffset)
353
{
354
LLVMValueRef args[] = {
355
ac_get_arg(&ctx->ac, param),
356
LLVMConstInt(ctx->ac.i32, bitoffset, 0),
357
};
358
ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.init.exec.from.input", ctx->ac.voidt, args, 2,
359
AC_FUNC_ATTR_CONVERGENT);
360
}
361
362
/**
363
* Get the value of a shader input parameter and extract a bitfield.
364
*/
365
static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx, LLVMValueRef value,
366
unsigned rshift, unsigned bitwidth)
367
{
368
if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
369
value = ac_to_integer(&ctx->ac, value);
370
371
if (rshift)
372
value = LLVMBuildLShr(ctx->ac.builder, value, LLVMConstInt(ctx->ac.i32, rshift, 0), "");
373
374
if (rshift + bitwidth < 32) {
375
unsigned mask = (1 << bitwidth) - 1;
376
value = LLVMBuildAnd(ctx->ac.builder, value, LLVMConstInt(ctx->ac.i32, mask, 0), "");
377
}
378
379
return value;
380
}
381
382
LLVMValueRef si_unpack_param(struct si_shader_context *ctx, struct ac_arg param, unsigned rshift,
383
unsigned bitwidth)
384
{
385
LLVMValueRef value = ac_get_arg(&ctx->ac, param);
386
387
return unpack_llvm_param(ctx, value, rshift, bitwidth);
388
}
389
390
LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx, unsigned swizzle)
391
{
392
if (swizzle > 0)
393
return ctx->ac.i32_0;
394
395
switch (ctx->stage) {
396
case MESA_SHADER_VERTEX:
397
return ac_get_arg(&ctx->ac, ctx->args.vs_prim_id);
398
case MESA_SHADER_TESS_CTRL:
399
return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
400
case MESA_SHADER_TESS_EVAL:
401
return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
402
case MESA_SHADER_GEOMETRY:
403
return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
404
default:
405
assert(0);
406
return ctx->ac.i32_0;
407
}
408
}
409
410
static LLVMValueRef si_llvm_get_block_size(struct ac_shader_abi *abi)
411
{
412
struct si_shader_context *ctx = si_shader_context_from_abi(abi);
413
414
assert(ctx->shader->selector->info.base.workgroup_size_variable &&
415
ctx->shader->selector->info.uses_variable_block_size);
416
417
LLVMValueRef chan[3] = {
418
si_unpack_param(ctx, ctx->block_size, 0, 10),
419
si_unpack_param(ctx, ctx->block_size, 10, 10),
420
si_unpack_param(ctx, ctx->block_size, 20, 10),
421
};
422
return ac_build_gather_values(&ctx->ac, chan, 3);
423
}
424
425
static void si_llvm_declare_compute_memory(struct si_shader_context *ctx)
426
{
427
struct si_shader_selector *sel = ctx->shader->selector;
428
unsigned lds_size = sel->info.base.shared_size;
429
430
LLVMTypeRef i8p = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_LDS);
431
LLVMValueRef var;
432
433
assert(!ctx->ac.lds);
434
435
var = LLVMAddGlobalInAddressSpace(ctx->ac.module, LLVMArrayType(ctx->ac.i8, lds_size),
436
"compute_lds", AC_ADDR_SPACE_LDS);
437
LLVMSetAlignment(var, 64 * 1024);
438
439
ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
440
}
441
442
static bool si_nir_build_llvm(struct si_shader_context *ctx, struct nir_shader *nir)
443
{
444
if (nir->info.stage == MESA_SHADER_VERTEX) {
445
si_llvm_load_vs_inputs(ctx, nir);
446
} else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
447
unsigned colors_read = ctx->shader->selector->info.colors_read;
448
LLVMValueRef main_fn = ctx->main_fn;
449
450
LLVMValueRef undef = LLVMGetUndef(ctx->ac.f32);
451
452
unsigned offset = SI_PARAM_POS_FIXED_PT + 1;
453
454
if (colors_read & 0x0f) {
455
unsigned mask = colors_read & 0x0f;
456
LLVMValueRef values[4];
457
values[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : undef;
458
values[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : undef;
459
values[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : undef;
460
values[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : undef;
461
ctx->abi.color0 = ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, 4));
462
}
463
if (colors_read & 0xf0) {
464
unsigned mask = (colors_read & 0xf0) >> 4;
465
LLVMValueRef values[4];
466
values[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : undef;
467
values[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : undef;
468
values[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : undef;
469
values[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : undef;
470
ctx->abi.color1 = ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, 4));
471
}
472
473
ctx->abi.interp_at_sample_force_center =
474
ctx->shader->key.mono.u.ps.interpolate_at_sample_force_center;
475
476
ctx->abi.kill_ps_if_inf_interp =
477
ctx->screen->options.no_infinite_interp &&
478
(ctx->shader->selector->info.uses_persp_center ||
479
ctx->shader->selector->info.uses_persp_centroid ||
480
ctx->shader->selector->info.uses_persp_sample);
481
482
} else if (nir->info.stage == MESA_SHADER_COMPUTE) {
483
if (nir->info.cs.user_data_components_amd) {
484
ctx->abi.user_data = ac_get_arg(&ctx->ac, ctx->cs_user_data);
485
ctx->abi.user_data = ac_build_expand_to_vec4(&ctx->ac, ctx->abi.user_data,
486
nir->info.cs.user_data_components_amd);
487
}
488
489
if (ctx->shader->selector->info.base.shared_size)
490
si_llvm_declare_compute_memory(ctx);
491
}
492
493
ctx->abi.inputs = &ctx->inputs[0];
494
ctx->abi.clamp_shadow_reference = true;
495
ctx->abi.robust_buffer_access = true;
496
ctx->abi.convert_undef_to_zero = true;
497
ctx->abi.clamp_div_by_zero = ctx->screen->options.clamp_div_by_zero;
498
ctx->abi.adjust_frag_coord_z = false;
499
500
const struct si_shader_info *info = &ctx->shader->selector->info;
501
for (unsigned i = 0; i < info->num_outputs; i++) {
502
LLVMTypeRef type = ctx->ac.f32;
503
504
/* Only FS uses unpacked f16. Other stages pack 16-bit outputs into low and high bits of f32. */
505
if (nir->info.stage == MESA_SHADER_FRAGMENT &&
506
nir_alu_type_get_type_size(ctx->shader->selector->info.output_type[i]) == 16)
507
type = ctx->ac.f16;
508
509
for (unsigned j = 0; j < 4; j++)
510
ctx->abi.outputs[i * 4 + j] = ac_build_alloca_undef(&ctx->ac, type, "");
511
}
512
513
ac_nir_translate(&ctx->ac, &ctx->abi, &ctx->args, nir);
514
515
return true;
516
}
517
518
/**
519
* Given a list of shader part functions, build a wrapper function that
520
* runs them in sequence to form a monolithic shader.
521
*/
522
void si_build_wrapper_function(struct si_shader_context *ctx, LLVMValueRef *parts,
523
unsigned num_parts, unsigned main_part,
524
unsigned next_shader_first_part, bool same_thread_count)
525
{
526
LLVMBuilderRef builder = ctx->ac.builder;
527
/* PS epilog has one arg per color component; gfx9 merged shader
528
* prologs need to forward 40 SGPRs.
529
*/
530
LLVMValueRef initial[AC_MAX_ARGS], out[AC_MAX_ARGS];
531
LLVMTypeRef function_type;
532
unsigned num_first_params;
533
unsigned num_out, initial_num_out;
534
ASSERTED unsigned num_out_sgpr; /* used in debug checks */
535
ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
536
unsigned num_sgprs, num_vgprs;
537
unsigned gprs;
538
539
memset(&ctx->args, 0, sizeof(ctx->args));
540
541
for (unsigned i = 0; i < num_parts; ++i) {
542
ac_add_function_attr(ctx->ac.context, parts[i], -1, AC_FUNC_ATTR_ALWAYSINLINE);
543
LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
544
}
545
546
/* The parameters of the wrapper function correspond to those of the
547
* first part in terms of SGPRs and VGPRs, but we use the types of the
548
* main part to get the right types. This is relevant for the
549
* dereferenceable attribute on descriptor table pointers.
550
*/
551
num_sgprs = 0;
552
num_vgprs = 0;
553
554
function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
555
num_first_params = LLVMCountParamTypes(function_type);
556
557
for (unsigned i = 0; i < num_first_params; ++i) {
558
LLVMValueRef param = LLVMGetParam(parts[0], i);
559
560
if (ac_is_sgpr_param(param)) {
561
assert(num_vgprs == 0);
562
num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
563
} else {
564
num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
565
}
566
}
567
568
gprs = 0;
569
while (gprs < num_sgprs + num_vgprs) {
570
LLVMValueRef param = LLVMGetParam(parts[main_part], ctx->args.arg_count);
571
LLVMTypeRef type = LLVMTypeOf(param);
572
unsigned size = ac_get_type_size(type) / 4;
573
574
/* This is going to get casted anyways, so we don't have to
575
* have the exact same type. But we do have to preserve the
576
* pointer-ness so that LLVM knows about it.
577
*/
578
enum ac_arg_type arg_type = AC_ARG_INT;
579
if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
580
type = LLVMGetElementType(type);
581
582
if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
583
if (LLVMGetVectorSize(type) == 4)
584
arg_type = AC_ARG_CONST_DESC_PTR;
585
else if (LLVMGetVectorSize(type) == 8)
586
arg_type = AC_ARG_CONST_IMAGE_PTR;
587
else
588
assert(0);
589
} else if (type == ctx->ac.f32) {
590
arg_type = AC_ARG_CONST_FLOAT_PTR;
591
} else {
592
assert(0);
593
}
594
}
595
596
ac_add_arg(&ctx->args, gprs < num_sgprs ? AC_ARG_SGPR : AC_ARG_VGPR, size, arg_type, NULL);
597
598
assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
599
assert(gprs + size <= num_sgprs + num_vgprs &&
600
(gprs >= num_sgprs || gprs + size <= num_sgprs));
601
602
gprs += size;
603
}
604
605
/* Prepare the return type. */
606
unsigned num_returns = 0;
607
LLVMTypeRef returns[AC_MAX_ARGS], last_func_type, return_type;
608
609
last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
610
return_type = LLVMGetReturnType(last_func_type);
611
612
switch (LLVMGetTypeKind(return_type)) {
613
case LLVMStructTypeKind:
614
num_returns = LLVMCountStructElementTypes(return_type);
615
assert(num_returns <= ARRAY_SIZE(returns));
616
LLVMGetStructElementTypes(return_type, returns);
617
break;
618
case LLVMVoidTypeKind:
619
break;
620
default:
621
unreachable("unexpected type");
622
}
623
624
si_llvm_create_func(ctx, "wrapper", returns, num_returns,
625
si_get_max_workgroup_size(ctx->shader));
626
627
if (si_is_merged_shader(ctx->shader) && !same_thread_count)
628
ac_init_exec_full_mask(&ctx->ac);
629
630
/* Record the arguments of the function as if they were an output of
631
* a previous part.
632
*/
633
num_out = 0;
634
num_out_sgpr = 0;
635
636
for (unsigned i = 0; i < ctx->args.arg_count; ++i) {
637
LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
638
LLVMTypeRef param_type = LLVMTypeOf(param);
639
LLVMTypeRef out_type = ctx->args.args[i].file == AC_ARG_SGPR ? ctx->ac.i32 : ctx->ac.f32;
640
unsigned size = ac_get_type_size(param_type) / 4;
641
642
if (size == 1) {
643
if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
644
param = LLVMBuildPtrToInt(builder, param, ctx->ac.i32, "");
645
param_type = ctx->ac.i32;
646
}
647
648
if (param_type != out_type)
649
param = LLVMBuildBitCast(builder, param, out_type, "");
650
out[num_out++] = param;
651
} else {
652
LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
653
654
if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
655
param = LLVMBuildPtrToInt(builder, param, ctx->ac.i64, "");
656
param_type = ctx->ac.i64;
657
}
658
659
if (param_type != vector_type)
660
param = LLVMBuildBitCast(builder, param, vector_type, "");
661
662
for (unsigned j = 0; j < size; ++j)
663
out[num_out++] =
664
LLVMBuildExtractElement(builder, param, LLVMConstInt(ctx->ac.i32, j, 0), "");
665
}
666
667
if (ctx->args.args[i].file == AC_ARG_SGPR)
668
num_out_sgpr = num_out;
669
}
670
671
memcpy(initial, out, sizeof(out));
672
initial_num_out = num_out;
673
initial_num_out_sgpr = num_out_sgpr;
674
675
/* Now chain the parts. */
676
LLVMValueRef ret = NULL;
677
for (unsigned part = 0; part < num_parts; ++part) {
678
LLVMValueRef in[AC_MAX_ARGS];
679
LLVMTypeRef ret_type;
680
unsigned out_idx = 0;
681
unsigned num_params = LLVMCountParams(parts[part]);
682
683
/* Merged shaders are executed conditionally depending
684
* on the number of enabled threads passed in the input SGPRs. */
685
if (si_is_multi_part_shader(ctx->shader) && part == 0) {
686
if (same_thread_count) {
687
struct ac_arg arg;
688
arg.arg_index = 3;
689
arg.used = true;
690
691
si_init_exec_from_input(ctx, arg, 0);
692
} else {
693
LLVMValueRef ena, count = initial[3];
694
695
count = LLVMBuildAnd(builder, count, LLVMConstInt(ctx->ac.i32, 0x7f, 0), "");
696
ena = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), count, "");
697
ac_build_ifcc(&ctx->ac, ena, 6506);
698
}
699
}
700
701
/* Derive arguments for the next part from outputs of the
702
* previous one.
703
*/
704
for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
705
LLVMValueRef param;
706
LLVMTypeRef param_type;
707
bool is_sgpr;
708
unsigned param_size;
709
LLVMValueRef arg = NULL;
710
711
param = LLVMGetParam(parts[part], param_idx);
712
param_type = LLVMTypeOf(param);
713
param_size = ac_get_type_size(param_type) / 4;
714
is_sgpr = ac_is_sgpr_param(param);
715
716
if (is_sgpr) {
717
ac_add_function_attr(ctx->ac.context, parts[part], param_idx + 1, AC_FUNC_ATTR_INREG);
718
} else if (out_idx < num_out_sgpr) {
719
/* Skip returned SGPRs the current part doesn't
720
* declare on the input. */
721
out_idx = num_out_sgpr;
722
}
723
724
assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
725
726
if (param_size == 1)
727
arg = out[out_idx];
728
else
729
arg = ac_build_gather_values(&ctx->ac, &out[out_idx], param_size);
730
731
if (LLVMTypeOf(arg) != param_type) {
732
if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
733
if (LLVMGetPointerAddressSpace(param_type) == AC_ADDR_SPACE_CONST_32BIT) {
734
arg = LLVMBuildBitCast(builder, arg, ctx->ac.i32, "");
735
arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
736
} else {
737
arg = LLVMBuildBitCast(builder, arg, ctx->ac.i64, "");
738
arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
739
}
740
} else {
741
arg = LLVMBuildBitCast(builder, arg, param_type, "");
742
}
743
}
744
745
in[param_idx] = arg;
746
out_idx += param_size;
747
}
748
749
ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
750
751
if (!same_thread_count &&
752
si_is_multi_part_shader(ctx->shader) && part + 1 == next_shader_first_part) {
753
ac_build_endif(&ctx->ac, 6506);
754
755
/* The second half of the merged shader should use
756
* the inputs from the toplevel (wrapper) function,
757
* not the return value from the last call.
758
*
759
* That's because the last call was executed condi-
760
* tionally, so we can't consume it in the main
761
* block.
762
*/
763
memcpy(out, initial, sizeof(initial));
764
num_out = initial_num_out;
765
num_out_sgpr = initial_num_out_sgpr;
766
767
/* Execute the second shader conditionally based on the number of
768
* enabled threads there.
769
*/
770
if (ctx->stage == MESA_SHADER_TESS_CTRL) {
771
LLVMValueRef ena, count = initial[3];
772
773
count = LLVMBuildLShr(builder, count, LLVMConstInt(ctx->ac.i32, 8, 0), "");
774
count = LLVMBuildAnd(builder, count, LLVMConstInt(ctx->ac.i32, 0x7f, 0), "");
775
ena = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), count, "");
776
ac_build_ifcc(&ctx->ac, ena, 6507);
777
}
778
continue;
779
}
780
781
/* Extract the returned GPRs. */
782
ret_type = LLVMTypeOf(ret);
783
num_out = 0;
784
num_out_sgpr = 0;
785
786
if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
787
assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
788
789
unsigned ret_size = LLVMCountStructElementTypes(ret_type);
790
791
for (unsigned i = 0; i < ret_size; ++i) {
792
LLVMValueRef val = LLVMBuildExtractValue(builder, ret, i, "");
793
794
assert(num_out < ARRAY_SIZE(out));
795
out[num_out++] = val;
796
797
if (LLVMTypeOf(val) == ctx->ac.i32) {
798
assert(num_out_sgpr + 1 == num_out);
799
num_out_sgpr = num_out;
800
}
801
}
802
}
803
}
804
805
/* Close the conditional wrapping the second shader. */
806
if (ctx->stage == MESA_SHADER_TESS_CTRL &&
807
!same_thread_count && si_is_multi_part_shader(ctx->shader))
808
ac_build_endif(&ctx->ac, 6507);
809
810
/* Return the value from the last part. It's non-void only for the prim
811
* discard compute shader.
812
*/
813
if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
814
LLVMBuildRetVoid(builder);
815
else
816
LLVMBuildRet(builder, ret);
817
}
818
819
bool si_llvm_translate_nir(struct si_shader_context *ctx, struct si_shader *shader,
820
struct nir_shader *nir, bool free_nir, bool ngg_cull_shader)
821
{
822
struct si_shader_selector *sel = shader->selector;
823
const struct si_shader_info *info = &sel->info;
824
825
ctx->shader = shader;
826
ctx->stage = sel->info.stage;
827
828
ctx->num_const_buffers = info->base.num_ubos;
829
ctx->num_shader_buffers = info->base.num_ssbos;
830
831
ctx->num_samplers = BITSET_LAST_BIT(info->base.textures_used);
832
ctx->num_images = info->base.num_images;
833
834
si_llvm_init_resource_callbacks(ctx);
835
836
switch (ctx->stage) {
837
case MESA_SHADER_VERTEX:
838
si_llvm_init_vs_callbacks(ctx, ngg_cull_shader);
839
break;
840
case MESA_SHADER_TESS_CTRL:
841
si_llvm_init_tcs_callbacks(ctx);
842
break;
843
case MESA_SHADER_TESS_EVAL:
844
si_llvm_init_tes_callbacks(ctx, ngg_cull_shader);
845
break;
846
case MESA_SHADER_GEOMETRY:
847
si_llvm_init_gs_callbacks(ctx);
848
break;
849
case MESA_SHADER_FRAGMENT:
850
si_llvm_init_ps_callbacks(ctx);
851
break;
852
case MESA_SHADER_COMPUTE:
853
ctx->abi.load_local_group_size = si_llvm_get_block_size;
854
break;
855
default:
856
assert(!"Unsupported shader type");
857
return false;
858
}
859
860
si_llvm_create_main_func(ctx, ngg_cull_shader);
861
862
if (ctx->shader->key.as_es || ctx->stage == MESA_SHADER_GEOMETRY)
863
si_preload_esgs_ring(ctx);
864
865
if (ctx->stage == MESA_SHADER_GEOMETRY)
866
si_preload_gs_rings(ctx);
867
else if (ctx->stage == MESA_SHADER_TESS_EVAL)
868
si_llvm_preload_tes_rings(ctx);
869
870
if (ctx->stage == MESA_SHADER_TESS_CTRL && sel->info.tessfactors_are_def_in_all_invocs) {
871
for (unsigned i = 0; i < 6; i++) {
872
ctx->invoc0_tess_factors[i] = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
873
}
874
}
875
876
if (ctx->stage == MESA_SHADER_GEOMETRY) {
877
for (unsigned i = 0; i < 4; i++) {
878
ctx->gs_next_vertex[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
879
}
880
if (shader->key.as_ngg) {
881
for (unsigned i = 0; i < 4; ++i) {
882
ctx->gs_curprim_verts[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
883
ctx->gs_generated_prims[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
884
}
885
886
assert(!ctx->gs_ngg_scratch);
887
LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, gfx10_ngg_get_scratch_dw_size(shader));
888
ctx->gs_ngg_scratch =
889
LLVMAddGlobalInAddressSpace(ctx->ac.module, ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
890
LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
891
LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
892
893
ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(
894
ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
895
LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
896
LLVMSetAlignment(ctx->gs_ngg_emit, 4);
897
}
898
}
899
900
if (ctx->stage != MESA_SHADER_GEOMETRY && (shader->key.as_ngg && !shader->key.as_es)) {
901
/* Unconditionally declare scratch space base for streamout and
902
* vertex compaction. Whether space is actually allocated is
903
* determined during linking / PM4 creation.
904
*
905
* Add an extra dword per vertex to ensure an odd stride, which
906
* avoids bank conflicts for SoA accesses.
907
*/
908
if (!gfx10_is_ngg_passthrough(shader))
909
si_llvm_declare_esgs_ring(ctx);
910
911
/* This is really only needed when streamout and / or vertex
912
* compaction is enabled.
913
*/
914
if (!ctx->gs_ngg_scratch && (sel->so.num_outputs || shader->key.opt.ngg_culling)) {
915
LLVMTypeRef asi32 = LLVMArrayType(ctx->ac.i32, gfx10_ngg_get_scratch_dw_size(shader));
916
ctx->gs_ngg_scratch =
917
LLVMAddGlobalInAddressSpace(ctx->ac.module, asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
918
LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
919
LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
920
}
921
}
922
923
/* For merged shaders (VS-TCS, VS-GS, TES-GS): */
924
if (ctx->screen->info.chip_class >= GFX9 && si_is_merged_shader(shader)) {
925
LLVMValueRef thread_enabled = NULL;
926
927
/* TES is special because it has only 1 shader part if NGG shader culling is disabled,
928
* and therefore it doesn't use the wrapper function.
929
*/
930
bool no_wrapper_func = ctx->stage == MESA_SHADER_TESS_EVAL && !shader->key.as_es &&
931
!shader->key.opt.ngg_culling;
932
933
/* Set EXEC = ~0 before the first shader. If the prolog is present, EXEC is set there
934
* instead. For monolithic shaders, the wrapper function does this.
935
*/
936
if ((!shader->is_monolithic || no_wrapper_func) &&
937
(ctx->stage == MESA_SHADER_TESS_EVAL ||
938
(ctx->stage == MESA_SHADER_VERTEX &&
939
!si_vs_needs_prolog(sel, &shader->key.part.vs.prolog, &shader->key, ngg_cull_shader))))
940
ac_init_exec_full_mask(&ctx->ac);
941
942
/* NGG VS and NGG TES: Send gs_alloc_req and the prim export at the beginning to decrease
943
* register usage.
944
*/
945
if ((ctx->stage == MESA_SHADER_VERTEX || ctx->stage == MESA_SHADER_TESS_EVAL) &&
946
shader->key.as_ngg && !shader->key.as_es && !shader->key.opt.ngg_culling) {
947
/* GFX10 requires a barrier before gs_alloc_req due to a hw bug. */
948
if (ctx->screen->info.chip_class == GFX10)
949
ac_build_s_barrier(&ctx->ac);
950
951
gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
952
953
/* Build the primitive export at the beginning
954
* of the shader if possible.
955
*/
956
if (gfx10_ngg_export_prim_early(shader))
957
gfx10_ngg_build_export_prim(ctx, NULL, NULL);
958
}
959
960
/* NGG GS: Initialize LDS and insert s_barrier, which must not be inside the if statement. */
961
if (ctx->stage == MESA_SHADER_GEOMETRY && shader->key.as_ngg)
962
gfx10_ngg_gs_emit_prologue(ctx);
963
964
if (ctx->stage == MESA_SHADER_GEOMETRY ||
965
(ctx->stage == MESA_SHADER_TESS_CTRL && !shader->is_monolithic)) {
966
/* Wrap both shaders in an if statement according to the number of enabled threads
967
* there. For monolithic TCS, the if statement is inserted by the wrapper function,
968
* not here.
969
*/
970
thread_enabled = si_is_gs_thread(ctx); /* 2nd shader: thread enabled bool */
971
} else if (((shader->key.as_ls || shader->key.as_es) && !shader->is_monolithic) ||
972
(shader->key.as_ngg && !shader->key.as_es)) {
973
/* This is NGG VS or NGG TES or VS before GS or TES before GS or VS before TCS.
974
* For monolithic LS (VS before TCS) and ES (VS before GS and TES before GS),
975
* the if statement is inserted by the wrapper function.
976
*/
977
thread_enabled = si_is_es_thread(ctx); /* 1st shader: thread enabled bool */
978
}
979
980
if (thread_enabled) {
981
ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
982
ctx->merged_wrap_if_label = 11500;
983
ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
984
}
985
986
/* Execute a barrier before the second shader in
987
* a merged shader.
988
*
989
* Execute the barrier inside the conditional block,
990
* so that empty waves can jump directly to s_endpgm,
991
* which will also signal the barrier.
992
*
993
* This is possible in gfx9, because an empty wave
994
* for the second shader does not participate in
995
* the epilogue. With NGG, empty waves may still
996
* be required to export data (e.g. GS output vertices),
997
* so we cannot let them exit early.
998
*
999
* If the shader is TCS and the TCS epilog is present
1000
* and contains a barrier, it will wait there and then
1001
* reach s_endpgm.
1002
*/
1003
if (ctx->stage == MESA_SHADER_TESS_CTRL) {
1004
/* We need the barrier only if TCS inputs are read from LDS. */
1005
if (!shader->key.opt.same_patch_vertices ||
1006
shader->selector->info.base.inputs_read &
1007
~shader->selector->tcs_vgpr_only_inputs)
1008
ac_build_s_barrier(&ctx->ac);
1009
} else if (ctx->stage == MESA_SHADER_GEOMETRY && !shader->key.as_ngg) {
1010
/* gfx10_ngg_gs_emit_prologue inserts the barrier for NGG. */
1011
ac_build_s_barrier(&ctx->ac);
1012
}
1013
}
1014
1015
bool success = si_nir_build_llvm(ctx, nir);
1016
if (free_nir)
1017
ralloc_free(nir);
1018
if (!success) {
1019
fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
1020
return false;
1021
}
1022
1023
si_llvm_build_ret(ctx, ctx->return_value);
1024
return true;
1025
}
1026
1027
static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
1028
struct si_shader_selector *sel)
1029
{
1030
if (!compiler->low_opt_passes)
1031
return false;
1032
1033
/* Assume a slow CPU. */
1034
assert(!sel->screen->info.has_dedicated_vram && sel->screen->info.chip_class <= GFX8);
1035
1036
/* For a crazy dEQP test containing 2597 memory opcodes, mostly
1037
* buffer stores. */
1038
return sel->info.stage == MESA_SHADER_COMPUTE && sel->info.num_memory_stores > 1000;
1039
}
1040
1041
static void si_optimize_vs_outputs(struct si_shader_context *ctx)
1042
{
1043
struct si_shader *shader = ctx->shader;
1044
struct si_shader_info *info = &shader->selector->info;
1045
unsigned skip_vs_optim_mask = 0;
1046
1047
if ((ctx->stage != MESA_SHADER_VERTEX && ctx->stage != MESA_SHADER_TESS_EVAL) ||
1048
shader->key.as_ls || shader->key.as_es)
1049
return;
1050
1051
/* Optimizing these outputs is not possible, since they might be overriden
1052
* at runtime with S_028644_PT_SPRITE_TEX. */
1053
for (int i = 0; i < info->num_outputs; i++) {
1054
if (info->output_semantic[i] == VARYING_SLOT_PNTC ||
1055
(info->output_semantic[i] >= VARYING_SLOT_TEX0 &&
1056
info->output_semantic[i] <= VARYING_SLOT_TEX7)) {
1057
skip_vs_optim_mask |= 1u << shader->info.vs_output_param_offset[i];
1058
}
1059
}
1060
1061
ac_optimize_vs_outputs(&ctx->ac, ctx->main_fn, shader->info.vs_output_param_offset,
1062
info->num_outputs, skip_vs_optim_mask,
1063
&shader->info.nr_param_exports);
1064
}
1065
1066
bool si_llvm_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
1067
struct si_shader *shader, struct pipe_debug_callback *debug,
1068
struct nir_shader *nir, bool free_nir)
1069
{
1070
struct si_shader_selector *sel = shader->selector;
1071
struct si_shader_context ctx;
1072
1073
si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
1074
1075
LLVMValueRef ngg_cull_main_fn = NULL;
1076
if (shader->key.opt.ngg_culling) {
1077
if (!si_llvm_translate_nir(&ctx, shader, nir, false, true)) {
1078
si_llvm_dispose(&ctx);
1079
return false;
1080
}
1081
ngg_cull_main_fn = ctx.main_fn;
1082
ctx.main_fn = NULL;
1083
}
1084
1085
if (!si_llvm_translate_nir(&ctx, shader, nir, free_nir, false)) {
1086
si_llvm_dispose(&ctx);
1087
return false;
1088
}
1089
1090
if (shader->is_monolithic && ctx.stage == MESA_SHADER_VERTEX) {
1091
LLVMValueRef parts[4];
1092
unsigned num_parts = 0;
1093
bool first_is_prolog = false;
1094
LLVMValueRef main_fn = ctx.main_fn;
1095
1096
if (ngg_cull_main_fn) {
1097
if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog, &shader->key, true)) {
1098
union si_shader_part_key prolog_key;
1099
si_get_vs_prolog_key(&sel->info, shader->info.num_input_sgprs, true,
1100
&shader->key.part.vs.prolog, shader, &prolog_key);
1101
prolog_key.vs_prolog.is_monolithic = true;
1102
si_llvm_build_vs_prolog(&ctx, &prolog_key);
1103
parts[num_parts++] = ctx.main_fn;
1104
first_is_prolog = true;
1105
}
1106
parts[num_parts++] = ngg_cull_main_fn;
1107
}
1108
1109
if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog, &shader->key, false)) {
1110
union si_shader_part_key prolog_key;
1111
si_get_vs_prolog_key(&sel->info, shader->info.num_input_sgprs, false,
1112
&shader->key.part.vs.prolog, shader, &prolog_key);
1113
prolog_key.vs_prolog.is_monolithic = true;
1114
si_llvm_build_vs_prolog(&ctx, &prolog_key);
1115
parts[num_parts++] = ctx.main_fn;
1116
if (num_parts == 1)
1117
first_is_prolog = true;
1118
}
1119
parts[num_parts++] = main_fn;
1120
1121
si_build_wrapper_function(&ctx, parts, num_parts, first_is_prolog ? 1 : 0, 0, false);
1122
1123
if (ctx.shader->key.opt.vs_as_prim_discard_cs)
1124
si_build_prim_discard_compute_shader(&ctx);
1125
} else if (shader->is_monolithic && ctx.stage == MESA_SHADER_TESS_EVAL && ngg_cull_main_fn) {
1126
LLVMValueRef parts[3], prolog, main_fn = ctx.main_fn;
1127
1128
/* We reuse the VS prolog code for TES just to load the input VGPRs from LDS. */
1129
union si_shader_part_key prolog_key;
1130
memset(&prolog_key, 0, sizeof(prolog_key));
1131
prolog_key.vs_prolog.num_input_sgprs = shader->info.num_input_sgprs;
1132
prolog_key.vs_prolog.num_merged_next_stage_vgprs = 5;
1133
prolog_key.vs_prolog.as_ngg = 1;
1134
prolog_key.vs_prolog.load_vgprs_after_culling = 1;
1135
prolog_key.vs_prolog.is_monolithic = true;
1136
si_llvm_build_vs_prolog(&ctx, &prolog_key);
1137
prolog = ctx.main_fn;
1138
1139
parts[0] = ngg_cull_main_fn;
1140
parts[1] = prolog;
1141
parts[2] = main_fn;
1142
1143
si_build_wrapper_function(&ctx, parts, 3, 0, 0, false);
1144
} else if (shader->is_monolithic && ctx.stage == MESA_SHADER_TESS_CTRL) {
1145
if (sscreen->info.chip_class >= GFX9) {
1146
struct si_shader_selector *ls = shader->key.part.tcs.ls;
1147
LLVMValueRef parts[4];
1148
bool vs_needs_prolog =
1149
si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog, &shader->key, false);
1150
1151
/* TCS main part */
1152
parts[2] = ctx.main_fn;
1153
1154
/* TCS epilog */
1155
union si_shader_part_key tcs_epilog_key;
1156
memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
1157
tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1158
si_llvm_build_tcs_epilog(&ctx, &tcs_epilog_key);
1159
parts[3] = ctx.main_fn;
1160
1161
/* VS as LS main part */
1162
ctx.next_shader_sel = ctx.shader->selector;
1163
nir = si_get_nir_shader(ls, NULL, &free_nir);
1164
struct si_shader shader_ls = {};
1165
shader_ls.selector = ls;
1166
shader_ls.key.as_ls = 1;
1167
shader_ls.key.mono = shader->key.mono;
1168
shader_ls.key.opt = shader->key.opt;
1169
shader_ls.is_monolithic = true;
1170
1171
if (!si_llvm_translate_nir(&ctx, &shader_ls, nir, free_nir, false)) {
1172
si_llvm_dispose(&ctx);
1173
return false;
1174
}
1175
shader->info.uses_instanceid |= ls->info.uses_instanceid;
1176
parts[1] = ctx.main_fn;
1177
1178
/* LS prolog */
1179
if (vs_needs_prolog) {
1180
union si_shader_part_key vs_prolog_key;
1181
si_get_vs_prolog_key(&ls->info, shader_ls.info.num_input_sgprs, false,
1182
&shader->key.part.tcs.ls_prolog, shader, &vs_prolog_key);
1183
vs_prolog_key.vs_prolog.is_monolithic = true;
1184
si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1185
parts[0] = ctx.main_fn;
1186
}
1187
1188
/* Reset the shader context. */
1189
ctx.shader = shader;
1190
ctx.stage = MESA_SHADER_TESS_CTRL;
1191
1192
si_build_wrapper_function(&ctx, parts + !vs_needs_prolog, 4 - !vs_needs_prolog,
1193
vs_needs_prolog, vs_needs_prolog ? 2 : 1,
1194
shader->key.opt.same_patch_vertices);
1195
} else {
1196
LLVMValueRef parts[2];
1197
union si_shader_part_key epilog_key;
1198
1199
parts[0] = ctx.main_fn;
1200
1201
memset(&epilog_key, 0, sizeof(epilog_key));
1202
epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1203
si_llvm_build_tcs_epilog(&ctx, &epilog_key);
1204
parts[1] = ctx.main_fn;
1205
1206
si_build_wrapper_function(&ctx, parts, 2, 0, 0, false);
1207
}
1208
} else if (shader->is_monolithic && ctx.stage == MESA_SHADER_GEOMETRY) {
1209
if (ctx.screen->info.chip_class >= GFX9) {
1210
struct si_shader_selector *es = shader->key.part.gs.es;
1211
LLVMValueRef es_prolog = NULL;
1212
LLVMValueRef es_main = NULL;
1213
LLVMValueRef gs_prolog = NULL;
1214
LLVMValueRef gs_main = ctx.main_fn;
1215
1216
/* GS prolog */
1217
union si_shader_part_key gs_prolog_key;
1218
memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
1219
gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1220
gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
1221
si_llvm_build_gs_prolog(&ctx, &gs_prolog_key);
1222
gs_prolog = ctx.main_fn;
1223
1224
/* ES main part */
1225
nir = si_get_nir_shader(es, NULL, &free_nir);
1226
struct si_shader shader_es = {};
1227
shader_es.selector = es;
1228
shader_es.key.as_es = 1;
1229
shader_es.key.as_ngg = shader->key.as_ngg;
1230
shader_es.key.mono = shader->key.mono;
1231
shader_es.key.opt = shader->key.opt;
1232
shader_es.is_monolithic = true;
1233
1234
if (!si_llvm_translate_nir(&ctx, &shader_es, nir, free_nir, false)) {
1235
si_llvm_dispose(&ctx);
1236
return false;
1237
}
1238
shader->info.uses_instanceid |= es->info.uses_instanceid;
1239
es_main = ctx.main_fn;
1240
1241
/* ES prolog */
1242
if (es->info.stage == MESA_SHADER_VERTEX &&
1243
si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog, &shader->key, false)) {
1244
union si_shader_part_key vs_prolog_key;
1245
si_get_vs_prolog_key(&es->info, shader_es.info.num_input_sgprs, false,
1246
&shader->key.part.gs.vs_prolog, shader, &vs_prolog_key);
1247
vs_prolog_key.vs_prolog.is_monolithic = true;
1248
si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1249
es_prolog = ctx.main_fn;
1250
}
1251
1252
/* Reset the shader context. */
1253
ctx.shader = shader;
1254
ctx.stage = MESA_SHADER_GEOMETRY;
1255
1256
/* Prepare the array of shader parts. */
1257
LLVMValueRef parts[4];
1258
unsigned num_parts = 0, main_part, next_first_part;
1259
1260
if (es_prolog)
1261
parts[num_parts++] = es_prolog;
1262
1263
parts[main_part = num_parts++] = es_main;
1264
parts[next_first_part = num_parts++] = gs_prolog;
1265
parts[num_parts++] = gs_main;
1266
1267
si_build_wrapper_function(&ctx, parts, num_parts, main_part, next_first_part, false);
1268
} else {
1269
LLVMValueRef parts[2];
1270
union si_shader_part_key prolog_key;
1271
1272
parts[1] = ctx.main_fn;
1273
1274
memset(&prolog_key, 0, sizeof(prolog_key));
1275
prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1276
si_llvm_build_gs_prolog(&ctx, &prolog_key);
1277
parts[0] = ctx.main_fn;
1278
1279
si_build_wrapper_function(&ctx, parts, 2, 1, 0, false);
1280
}
1281
} else if (shader->is_monolithic && ctx.stage == MESA_SHADER_FRAGMENT) {
1282
si_llvm_build_monolithic_ps(&ctx, shader);
1283
}
1284
1285
si_llvm_optimize_module(&ctx);
1286
1287
/* Post-optimization transformations and analysis. */
1288
si_optimize_vs_outputs(&ctx);
1289
1290
if ((debug && debug->debug_message) || si_can_dump_shader(sscreen, ctx.stage)) {
1291
ctx.shader->info.private_mem_vgprs = ac_count_scratch_private_memory(ctx.main_fn);
1292
}
1293
1294
/* Make sure the input is a pointer and not integer followed by inttoptr. */
1295
if (!shader->key.opt.vs_as_prim_discard_cs)
1296
assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) == LLVMPointerTypeKind);
1297
1298
/* Compile to bytecode. */
1299
if (!si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler, &ctx.ac, debug,
1300
ctx.stage, si_get_shader_name(shader),
1301
si_should_optimize_less(compiler, shader->selector))) {
1302
si_llvm_dispose(&ctx);
1303
fprintf(stderr, "LLVM failed to compile shader\n");
1304
return false;
1305
}
1306
1307
si_llvm_dispose(&ctx);
1308
return true;
1309
}
1310
1311