Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/compiler/glsl/gl_nir_link_uniform_blocks.c
4545 views
1
/*
2
* Copyright © 2019 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "nir.h"
25
#include "gl_nir_linker.h"
26
#include "ir_uniform.h" /* for gl_uniform_storage */
27
#include "linker_util.h"
28
#include "main/mtypes.h"
29
30
/**
31
* This file contains code to do a nir-based linking for uniform blocks. This
32
* includes ubos and ssbos.
33
*
34
* For the case of ARB_gl_spirv there are some differences compared with GLSL:
35
*
36
* 1. Linking doesn't use names: GLSL linking use names as core concept. But
37
* on SPIR-V, uniform block name, fields names, and other names are
38
* considered optional debug infor so could not be present. So the linking
39
* should work without it, and it is optional to not handle them at
40
* all. From ARB_gl_spirv spec.
41
*
42
* "19. How should the program interface query operations behave for program
43
* objects created from SPIR-V shaders?
44
*
45
* DISCUSSION: we previously said we didn't need reflection to work for
46
* SPIR-V shaders (at least for the first version), however we are left
47
* with specifying how it should "not work". The primary issue is that
48
* SPIR-V binaries are not required to have names associated with
49
* variables. They can be associated in debug information, but there is no
50
* requirement for that to be present, and it should not be relied upon.
51
*
52
* Options:
53
*
54
* <skip>
55
*
56
* C) Allow as much as possible to work "naturally". You can query for the
57
* number of active resources, and for details about them. Anything that
58
* doesn't query by name will work as expected. Queries for maximum length
59
* of names return one. Queries for anything "by name" return INVALID_INDEX
60
* (or -1). Querying the name property of a resource returns an empty
61
* string. This may allow many queries to work, but it's not clear how
62
* useful it would be if you can't actually know which specific variable
63
* you are retrieving information on. If everything is specified a-priori
64
* by location/binding/offset/index/component in the shader, this may be
65
* sufficient.
66
*
67
* RESOLVED. Pick (c), but also allow debug names to be returned if an
68
* implementation wants to."
69
*
70
* When linking SPIR-V shaders this implemention doesn't care for the names,
71
* as the main objective is functional, and not support optional debug
72
* features.
73
*
74
* 2. Terminology: this file handles both UBO and SSBO, including both as
75
* "uniform blocks" analogously to what is done in the GLSL (IR) path.
76
*
77
* From ARB_gl_spirv spec:
78
* "Mapping of Storage Classes:
79
* <skip>
80
* uniform blockN { ... } ...; -> Uniform, with Block decoration
81
* <skip>
82
* buffer blockN { ... } ...; -> Uniform, with BufferBlock decoration"
83
*
84
* 3. Explicit data: for the SPIR-V path the code assumes that all structure
85
* members have an Offset decoration, all arrays have an ArrayStride and
86
* all matrices have a MatrixStride, even for nested structures. That way
87
* we don’t have to worry about the different layout modes. This is
88
* explicitly required in the SPIR-V spec:
89
*
90
* "Composite objects in the UniformConstant, Uniform, and PushConstant
91
* Storage Classes must be explicitly laid out. The following apply to all
92
* the aggregate and matrix types describing such an object, recursively
93
* through their nested types:
94
*
95
* – Each structure-type member must have an Offset Decoration.
96
* – Each array type must have an ArrayStride Decoration.
97
* – Each structure-type member that is a matrix or array-of-matrices must
98
* have be decorated with a MatrixStride Decoration, and one of the
99
* RowMajor or ColMajor Decorations."
100
*
101
* Additionally, the structure members are expected to be presented in
102
* increasing offset order:
103
*
104
* "a structure has lower-numbered members appearing at smaller offsets than
105
* higher-numbered members"
106
*/
107
108
enum block_type {
109
BLOCK_UBO,
110
BLOCK_SSBO
111
};
112
113
/*
114
* It is worth to note that ARB_gl_spirv spec doesn't require us to do this
115
* validation, but at the same time, it allow us to do it. The following
116
* validation is easy and a nice-to-have.
117
*/
118
static bool
119
link_blocks_are_compatible(const struct gl_uniform_block *a,
120
const struct gl_uniform_block *b)
121
{
122
/*
123
* Names on ARB_gl_spirv are optional, so we are ignoring them. So
124
* meanwhile on the equivalent GLSL method the matching is done using the
125
* name, here we use the binding, that for SPIR-V binaries is explicit, and
126
* mandatory, from OpenGL 4.6 spec, section "7.4.2. SPIR-V Shader Interface
127
* Matching":
128
* "Uniform and shader storage block variables must also be decorated
129
* with a Binding"
130
*/
131
if (a->Binding != b->Binding)
132
return false;
133
134
/* We are explicitly ignoring the names, so it would be good to check that
135
* this is happening.
136
*/
137
assert(a->Name == NULL);
138
assert(b->Name == NULL);
139
140
if (a->NumUniforms != b->NumUniforms)
141
return false;
142
143
if (a->_Packing != b->_Packing)
144
return false;
145
146
if (a->_RowMajor != b->_RowMajor)
147
return false;
148
149
for (unsigned i = 0; i < a->NumUniforms; i++) {
150
if (a->Uniforms[i].Type != b->Uniforms[i].Type)
151
return false;
152
153
if (a->Uniforms[i].RowMajor != b->Uniforms[i].RowMajor)
154
return false;
155
156
if (a->Uniforms[i].Offset != b->Uniforms[i].Offset)
157
return false;
158
159
/* See comment on previous assert */
160
assert(a->Uniforms[i].Name == NULL);
161
assert(b->Uniforms[i].Name == NULL);
162
}
163
164
return true;
165
}
166
167
/**
168
* Merges a buffer block into an array of buffer blocks that may or may not
169
* already contain a copy of it.
170
*
171
* Returns the index of the block in the array (new if it was needed, or the
172
* index of the copy of it). -1 if there are two incompatible block
173
* definitions with the same binding.
174
*
175
*/
176
static int
177
link_cross_validate_uniform_block(void *mem_ctx,
178
struct gl_uniform_block **linked_blocks,
179
unsigned int *num_linked_blocks,
180
struct gl_uniform_block *new_block)
181
{
182
/* We first check if new_block was already linked */
183
for (unsigned int i = 0; i < *num_linked_blocks; i++) {
184
struct gl_uniform_block *old_block = &(*linked_blocks)[i];
185
186
if (old_block->Binding == new_block->Binding)
187
return link_blocks_are_compatible(old_block, new_block) ? i : -1;
188
}
189
190
*linked_blocks = reralloc(mem_ctx, *linked_blocks,
191
struct gl_uniform_block,
192
*num_linked_blocks + 1);
193
int linked_block_index = (*num_linked_blocks)++;
194
struct gl_uniform_block *linked_block = &(*linked_blocks)[linked_block_index];
195
196
memcpy(linked_block, new_block, sizeof(*new_block));
197
linked_block->Uniforms = ralloc_array(*linked_blocks,
198
struct gl_uniform_buffer_variable,
199
linked_block->NumUniforms);
200
201
memcpy(linked_block->Uniforms,
202
new_block->Uniforms,
203
sizeof(*linked_block->Uniforms) * linked_block->NumUniforms);
204
205
return linked_block_index;
206
}
207
208
209
/**
210
* Accumulates the array of buffer blocks and checks that all definitions of
211
* blocks agree on their contents.
212
*/
213
static bool
214
nir_interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog,
215
enum block_type block_type)
216
{
217
int *interfaceBlockStageIndex[MESA_SHADER_STAGES];
218
struct gl_uniform_block *blks = NULL;
219
unsigned *num_blks = block_type == BLOCK_SSBO ? &prog->data->NumShaderStorageBlocks :
220
&prog->data->NumUniformBlocks;
221
222
unsigned max_num_buffer_blocks = 0;
223
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
224
if (prog->_LinkedShaders[i]) {
225
if (block_type == BLOCK_SSBO) {
226
max_num_buffer_blocks +=
227
prog->_LinkedShaders[i]->Program->info.num_ssbos;
228
} else {
229
max_num_buffer_blocks +=
230
prog->_LinkedShaders[i]->Program->info.num_ubos;
231
}
232
}
233
}
234
235
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
236
struct gl_linked_shader *sh = prog->_LinkedShaders[i];
237
238
interfaceBlockStageIndex[i] = malloc(max_num_buffer_blocks * sizeof(int));
239
for (unsigned int j = 0; j < max_num_buffer_blocks; j++)
240
interfaceBlockStageIndex[i][j] = -1;
241
242
if (sh == NULL)
243
continue;
244
245
unsigned sh_num_blocks;
246
struct gl_uniform_block **sh_blks;
247
if (block_type == BLOCK_SSBO) {
248
sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ssbos;
249
sh_blks = sh->Program->sh.ShaderStorageBlocks;
250
} else {
251
sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ubos;
252
sh_blks = sh->Program->sh.UniformBlocks;
253
}
254
255
for (unsigned int j = 0; j < sh_num_blocks; j++) {
256
int index = link_cross_validate_uniform_block(prog->data, &blks,
257
num_blks, sh_blks[j]);
258
259
if (index == -1) {
260
/* We use the binding as we are ignoring the names */
261
linker_error(prog, "buffer block with binding `%i' has mismatching "
262
"definitions\n", sh_blks[j]->Binding);
263
264
for (unsigned k = 0; k <= i; k++) {
265
free(interfaceBlockStageIndex[k]);
266
}
267
268
/* Reset the block count. This will help avoid various segfaults
269
* from api calls that assume the array exists due to the count
270
* being non-zero.
271
*/
272
*num_blks = 0;
273
return false;
274
}
275
276
interfaceBlockStageIndex[i][index] = j;
277
}
278
}
279
280
/* Update per stage block pointers to point to the program list.
281
*/
282
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
283
for (unsigned j = 0; j < *num_blks; j++) {
284
int stage_index = interfaceBlockStageIndex[i][j];
285
286
if (stage_index != -1) {
287
struct gl_linked_shader *sh = prog->_LinkedShaders[i];
288
289
struct gl_uniform_block **sh_blks = block_type == BLOCK_SSBO ?
290
sh->Program->sh.ShaderStorageBlocks :
291
sh->Program->sh.UniformBlocks;
292
293
blks[j].stageref |= sh_blks[stage_index]->stageref;
294
sh_blks[stage_index] = &blks[j];
295
}
296
}
297
}
298
299
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
300
free(interfaceBlockStageIndex[i]);
301
}
302
303
if (block_type == BLOCK_SSBO)
304
prog->data->ShaderStorageBlocks = blks;
305
else {
306
prog->data->NumUniformBlocks = *num_blks;
307
prog->data->UniformBlocks = blks;
308
}
309
310
return true;
311
}
312
313
/*
314
* Iterates @type in order to compute how many individual leaf variables
315
* contains.
316
*/
317
static void
318
iterate_type_count_variables(const struct glsl_type *type,
319
unsigned int *num_variables)
320
{
321
for (unsigned i = 0; i < glsl_get_length(type); i++) {
322
const struct glsl_type *field_type;
323
324
if (glsl_type_is_struct_or_ifc(type))
325
field_type = glsl_get_struct_field(type, i);
326
else
327
field_type = glsl_get_array_element(type);
328
329
if (glsl_type_is_leaf(field_type))
330
(*num_variables)++;
331
else
332
iterate_type_count_variables(field_type, num_variables);
333
}
334
}
335
336
337
static void
338
fill_individual_variable(const struct glsl_type *type,
339
struct gl_uniform_buffer_variable *variables,
340
unsigned int *variable_index,
341
unsigned int *offset,
342
struct gl_shader_program *prog,
343
struct gl_uniform_block *block)
344
{
345
/* ARB_gl_spirv: allowed to ignore names. Thus, we don't need to initialize
346
* the variable's Name or IndexName.
347
*/
348
variables[*variable_index].Type = type;
349
350
if (glsl_type_is_matrix(type)) {
351
variables[*variable_index].RowMajor = glsl_matrix_type_is_row_major(type);
352
} else {
353
/* default value, better that potential meaningless garbage */
354
variables[*variable_index].RowMajor = false;
355
}
356
357
/**
358
* Although ARB_gl_spirv points that the offsets need to be included (see
359
* "Mappings of layouts"), in the end those are only valid for
360
* root-variables, and we would need to recompute offsets when we iterate
361
* over non-trivial types, like aoa. So we compute the offset always.
362
*/
363
variables[*variable_index].Offset = *offset;
364
(*offset) += glsl_get_explicit_size(type, true);
365
366
(*variable_index)++;
367
}
368
369
static void
370
iterate_type_fill_variables(const struct glsl_type *type,
371
struct gl_uniform_buffer_variable *variables,
372
unsigned int *variable_index,
373
unsigned int *offset,
374
struct gl_shader_program *prog,
375
struct gl_uniform_block *block)
376
{
377
unsigned int struct_base_offset;
378
379
for (unsigned i = 0; i < glsl_get_length(type); i++) {
380
const struct glsl_type *field_type;
381
382
if (glsl_type_is_struct_or_ifc(type)) {
383
field_type = glsl_get_struct_field(type, i);
384
385
if (i == 0) {
386
struct_base_offset = *offset;
387
}
388
389
*offset = struct_base_offset + glsl_get_struct_field_offset(type, i);
390
} else {
391
field_type = glsl_get_array_element(type);
392
}
393
394
if (glsl_type_is_leaf(field_type)) {
395
fill_individual_variable(field_type, variables, variable_index,
396
offset, prog, block);
397
} else {
398
iterate_type_fill_variables(field_type, variables, variable_index,
399
offset, prog, block);
400
}
401
}
402
}
403
404
/*
405
* In opposite to the equivalent glsl one, this one only allocates the needed
406
* space. We do a initial count here, just to avoid re-allocating for each one
407
* we find.
408
*/
409
static void
410
allocate_uniform_blocks(void *mem_ctx,
411
struct gl_linked_shader *shader,
412
struct gl_uniform_block **out_blks, unsigned *num_blocks,
413
struct gl_uniform_buffer_variable **out_variables,
414
unsigned *num_variables,
415
enum block_type block_type)
416
{
417
*num_variables = 0;
418
*num_blocks = 0;
419
420
nir_foreach_variable_in_shader(var, shader->Program->nir) {
421
if (block_type == BLOCK_UBO && !nir_variable_is_in_ubo(var))
422
continue;
423
424
if (block_type == BLOCK_SSBO && !nir_variable_is_in_ssbo(var))
425
continue;
426
427
const struct glsl_type *type = glsl_without_array(var->type);
428
unsigned aoa_size = glsl_get_aoa_size(var->type);
429
unsigned buffer_count = aoa_size == 0 ? 1 : aoa_size;
430
431
*num_blocks += buffer_count;
432
433
unsigned int block_variables = 0;
434
iterate_type_count_variables(type, &block_variables);
435
436
*num_variables += block_variables * buffer_count;
437
}
438
439
if (*num_blocks == 0) {
440
assert(*num_variables == 0);
441
return;
442
}
443
444
assert(*num_variables != 0);
445
446
struct gl_uniform_block *blocks =
447
rzalloc_array(mem_ctx, struct gl_uniform_block, *num_blocks);
448
449
struct gl_uniform_buffer_variable *variables =
450
rzalloc_array(blocks, struct gl_uniform_buffer_variable, *num_variables);
451
452
*out_blks = blocks;
453
*out_variables = variables;
454
}
455
456
static void
457
fill_block(struct gl_uniform_block *block,
458
nir_variable *var,
459
struct gl_uniform_buffer_variable *variables,
460
unsigned *variable_index,
461
unsigned array_index,
462
struct gl_shader_program *prog,
463
const gl_shader_stage stage)
464
{
465
const struct glsl_type *type = glsl_without_array(var->type);
466
467
block->Name = NULL; /* ARB_gl_spirv: allowed to ignore names */
468
/* From ARB_gl_spirv spec:
469
* "Vulkan uses only one binding point for a resource array,
470
* while OpenGL still uses multiple binding points, so binding
471
* numbers are counted differently for SPIR-V used in Vulkan
472
* and OpenGL
473
*/
474
block->Binding = var->data.binding + array_index;
475
block->Uniforms = &variables[*variable_index];
476
block->stageref = 1U << stage;
477
478
/* From SPIR-V 1.0 spec, 3.20, Decoration:
479
* "RowMajor
480
* Applies only to a member of a structure type.
481
* Only valid on a matrix or array whose most basic
482
* element is a matrix. Indicates that components
483
* within a row are contiguous in memory."
484
*
485
* So the SPIR-V binary doesn't report if the block was defined as RowMajor
486
* or not. In any case, for the components it is mandatory to set it, so it
487
* is not needed a default RowMajor value to know it.
488
*
489
* Setting to the default, but it should be ignored.
490
*/
491
block->_RowMajor = false;
492
493
/* From ARB_gl_spirv spec:
494
* "Mapping of layouts
495
*
496
* std140/std430 -> explicit *Offset*, *ArrayStride*, and
497
* *MatrixStride* Decoration on struct members
498
* shared/packed -> not allowed"
499
*
500
* So we would not have a value for _Packing, and in fact it would be
501
* useless so far. Using a default value. It should be ignored.
502
*/
503
block->_Packing = 0;
504
block->linearized_array_index = array_index;
505
506
unsigned old_variable_index = *variable_index;
507
unsigned offset = 0;
508
iterate_type_fill_variables(type, variables, variable_index, &offset, prog, block);
509
block->NumUniforms = *variable_index - old_variable_index;
510
511
block->UniformBufferSize = glsl_get_explicit_size(type, false);
512
513
/* From OpenGL 4.6 spec, section 7.6.2.3, "SPIR-V Uniform Offsets and
514
* strides"
515
*
516
* "If the variable is decorated as a BufferBlock , its offsets and
517
* strides must not contradict std430 alignment and minimum offset
518
* requirements. Otherwise, its offsets and strides must not contradict
519
* std140 alignment and minimum offset requirements."
520
*
521
* So although we are computing the size based on the offsets and
522
* array/matrix strides, at the end we need to ensure that the alignment is
523
* the same that with std140. From ARB_uniform_buffer_object spec:
524
*
525
* "For uniform blocks laid out according to [std140] rules, the minimum
526
* buffer object size returned by the UNIFORM_BLOCK_DATA_SIZE query is
527
* derived by taking the offset of the last basic machine unit consumed
528
* by the last uniform of the uniform block (including any end-of-array
529
* or end-of-structure padding), adding one, and rounding up to the next
530
* multiple of the base alignment required for a vec4."
531
*/
532
block->UniformBufferSize = glsl_align(block->UniformBufferSize, 16);
533
}
534
535
/*
536
* Link ubos/ssbos for a given linked_shader/stage.
537
*/
538
static void
539
link_linked_shader_uniform_blocks(void *mem_ctx,
540
struct gl_context *ctx,
541
struct gl_shader_program *prog,
542
struct gl_linked_shader *shader,
543
struct gl_uniform_block **blocks,
544
unsigned *num_blocks,
545
enum block_type block_type)
546
{
547
struct gl_uniform_buffer_variable *variables = NULL;
548
unsigned num_variables = 0;
549
550
allocate_uniform_blocks(mem_ctx, shader,
551
blocks, num_blocks,
552
&variables, &num_variables,
553
block_type);
554
555
/* Fill the content of uniforms and variables */
556
unsigned block_index = 0;
557
unsigned variable_index = 0;
558
struct gl_uniform_block *blks = *blocks;
559
560
nir_foreach_variable_in_shader(var, shader->Program->nir) {
561
if (block_type == BLOCK_UBO && !nir_variable_is_in_ubo(var))
562
continue;
563
564
if (block_type == BLOCK_SSBO && !nir_variable_is_in_ssbo(var))
565
continue;
566
567
unsigned aoa_size = glsl_get_aoa_size(var->type);
568
unsigned buffer_count = aoa_size == 0 ? 1 : aoa_size;
569
570
for (unsigned array_index = 0; array_index < buffer_count; array_index++) {
571
fill_block(&blks[block_index], var, variables, &variable_index,
572
array_index, prog, shader->Stage);
573
block_index++;
574
}
575
}
576
577
assert(block_index == *num_blocks);
578
assert(variable_index == num_variables);
579
}
580
581
bool
582
gl_nir_link_uniform_blocks(struct gl_context *ctx,
583
struct gl_shader_program *prog)
584
{
585
void *mem_ctx = ralloc_context(NULL);
586
bool ret = false;
587
for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
588
struct gl_linked_shader *const linked = prog->_LinkedShaders[stage];
589
struct gl_uniform_block *ubo_blocks = NULL;
590
unsigned num_ubo_blocks = 0;
591
struct gl_uniform_block *ssbo_blocks = NULL;
592
unsigned num_ssbo_blocks = 0;
593
594
if (!linked)
595
continue;
596
597
link_linked_shader_uniform_blocks(mem_ctx, ctx, prog, linked,
598
&ubo_blocks, &num_ubo_blocks,
599
BLOCK_UBO);
600
601
link_linked_shader_uniform_blocks(mem_ctx, ctx, prog, linked,
602
&ssbo_blocks, &num_ssbo_blocks,
603
BLOCK_SSBO);
604
605
if (!prog->data->LinkStatus) {
606
goto out;
607
}
608
609
prog->data->linked_stages |= 1 << stage;
610
611
/* Copy ubo blocks to linked shader list */
612
linked->Program->sh.UniformBlocks =
613
ralloc_array(linked, struct gl_uniform_block *, num_ubo_blocks);
614
ralloc_steal(linked, ubo_blocks);
615
linked->Program->sh.NumUniformBlocks = num_ubo_blocks;
616
for (unsigned i = 0; i < num_ubo_blocks; i++) {
617
linked->Program->sh.UniformBlocks[i] = &ubo_blocks[i];
618
}
619
620
/* We need to set it twice to avoid the value being overwritten by the
621
* one from nir in brw_shader_gather_info. TODO: get a way to set the
622
* info once, and being able to gather properly the info.
623
*/
624
linked->Program->nir->info.num_ubos = num_ubo_blocks;
625
linked->Program->info.num_ubos = num_ubo_blocks;
626
627
/* Copy ssbo blocks to linked shader list */
628
linked->Program->sh.ShaderStorageBlocks =
629
ralloc_array(linked, struct gl_uniform_block *, num_ssbo_blocks);
630
ralloc_steal(linked, ssbo_blocks);
631
for (unsigned i = 0; i < num_ssbo_blocks; i++) {
632
linked->Program->sh.ShaderStorageBlocks[i] = &ssbo_blocks[i];
633
}
634
635
/* See previous comment on num_ubo_blocks */
636
linked->Program->nir->info.num_ssbos = num_ssbo_blocks;
637
linked->Program->info.num_ssbos = num_ssbo_blocks;
638
}
639
640
if (!nir_interstage_cross_validate_uniform_blocks(prog, BLOCK_UBO))
641
goto out;
642
643
if (!nir_interstage_cross_validate_uniform_blocks(prog, BLOCK_SSBO))
644
goto out;
645
646
ret = true;
647
out:
648
ralloc_free(mem_ctx);
649
return ret;
650
}
651
652