Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/frontends/lavapipe/lvp_pipeline.c
4565 views
1
/*
2
* Copyright © 2019 Red Hat.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "lvp_private.h"
25
#include "vk_util.h"
26
#include "glsl_types.h"
27
#include "spirv/nir_spirv.h"
28
#include "nir/nir_builder.h"
29
#include "lvp_lower_vulkan_resource.h"
30
#include "pipe/p_state.h"
31
#include "pipe/p_context.h"
32
#include "nir/nir_xfb_info.h"
33
34
#define SPIR_V_MAGIC_NUMBER 0x07230203
35
36
#define LVP_PIPELINE_DUP(dst, src, type, count) do { \
37
type *temp = ralloc_array(mem_ctx, type, count); \
38
if (!temp) return VK_ERROR_OUT_OF_HOST_MEMORY; \
39
memcpy(temp, (src), sizeof(type) * count); \
40
dst = temp; \
41
} while(0)
42
43
VKAPI_ATTR void VKAPI_CALL lvp_DestroyPipeline(
44
VkDevice _device,
45
VkPipeline _pipeline,
46
const VkAllocationCallbacks* pAllocator)
47
{
48
LVP_FROM_HANDLE(lvp_device, device, _device);
49
LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
50
51
if (!_pipeline)
52
return;
53
54
if (pipeline->shader_cso[PIPE_SHADER_VERTEX])
55
device->queue.ctx->delete_vs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_VERTEX]);
56
if (pipeline->shader_cso[PIPE_SHADER_FRAGMENT])
57
device->queue.ctx->delete_fs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
58
if (pipeline->shader_cso[PIPE_SHADER_GEOMETRY])
59
device->queue.ctx->delete_gs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_GEOMETRY]);
60
if (pipeline->shader_cso[PIPE_SHADER_TESS_CTRL])
61
device->queue.ctx->delete_tcs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_CTRL]);
62
if (pipeline->shader_cso[PIPE_SHADER_TESS_EVAL])
63
device->queue.ctx->delete_tes_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_EVAL]);
64
if (pipeline->shader_cso[PIPE_SHADER_COMPUTE])
65
device->queue.ctx->delete_compute_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_COMPUTE]);
66
67
ralloc_free(pipeline->mem_ctx);
68
vk_object_base_finish(&pipeline->base);
69
vk_free2(&device->vk.alloc, pAllocator, pipeline);
70
}
71
72
static VkResult
73
deep_copy_shader_stage(void *mem_ctx,
74
struct VkPipelineShaderStageCreateInfo *dst,
75
const struct VkPipelineShaderStageCreateInfo *src)
76
{
77
dst->sType = src->sType;
78
dst->pNext = NULL;
79
dst->flags = src->flags;
80
dst->stage = src->stage;
81
dst->module = src->module;
82
dst->pName = src->pName;
83
dst->pSpecializationInfo = NULL;
84
if (src->pSpecializationInfo) {
85
const VkSpecializationInfo *src_spec = src->pSpecializationInfo;
86
VkSpecializationInfo *dst_spec = ralloc_size(mem_ctx, sizeof(VkSpecializationInfo) +
87
src_spec->mapEntryCount * sizeof(VkSpecializationMapEntry) +
88
src_spec->dataSize);
89
VkSpecializationMapEntry *maps = (VkSpecializationMapEntry *)(dst_spec + 1);
90
dst_spec->pMapEntries = maps;
91
void *pdata = (void *)(dst_spec->pMapEntries + src_spec->mapEntryCount);
92
dst_spec->pData = pdata;
93
94
95
dst_spec->mapEntryCount = src_spec->mapEntryCount;
96
dst_spec->dataSize = src_spec->dataSize;
97
memcpy(pdata, src_spec->pData, src->pSpecializationInfo->dataSize);
98
memcpy(maps, src_spec->pMapEntries, src_spec->mapEntryCount * sizeof(VkSpecializationMapEntry));
99
dst->pSpecializationInfo = dst_spec;
100
}
101
return VK_SUCCESS;
102
}
103
104
static VkResult
105
deep_copy_vertex_input_state(void *mem_ctx,
106
struct VkPipelineVertexInputStateCreateInfo *dst,
107
const struct VkPipelineVertexInputStateCreateInfo *src)
108
{
109
dst->sType = src->sType;
110
dst->pNext = NULL;
111
dst->flags = src->flags;
112
dst->vertexBindingDescriptionCount = src->vertexBindingDescriptionCount;
113
114
LVP_PIPELINE_DUP(dst->pVertexBindingDescriptions,
115
src->pVertexBindingDescriptions,
116
VkVertexInputBindingDescription,
117
src->vertexBindingDescriptionCount);
118
119
dst->vertexAttributeDescriptionCount = src->vertexAttributeDescriptionCount;
120
121
LVP_PIPELINE_DUP(dst->pVertexAttributeDescriptions,
122
src->pVertexAttributeDescriptions,
123
VkVertexInputAttributeDescription,
124
src->vertexAttributeDescriptionCount);
125
126
if (src->pNext) {
127
vk_foreach_struct(ext, src->pNext) {
128
switch (ext->sType) {
129
case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: {
130
VkPipelineVertexInputDivisorStateCreateInfoEXT *ext_src = (VkPipelineVertexInputDivisorStateCreateInfoEXT *)ext;;
131
VkPipelineVertexInputDivisorStateCreateInfoEXT *ext_dst = ralloc(mem_ctx, VkPipelineVertexInputDivisorStateCreateInfoEXT);
132
133
ext_dst->sType = ext_src->sType;
134
ext_dst->vertexBindingDivisorCount = ext_src->vertexBindingDivisorCount;
135
136
LVP_PIPELINE_DUP(ext_dst->pVertexBindingDivisors,
137
ext_src->pVertexBindingDivisors,
138
VkVertexInputBindingDivisorDescriptionEXT,
139
ext_src->vertexBindingDivisorCount);
140
141
dst->pNext = ext_dst;
142
break;
143
}
144
default:
145
break;
146
}
147
}
148
}
149
return VK_SUCCESS;
150
}
151
152
static bool
153
dynamic_state_contains(const VkPipelineDynamicStateCreateInfo *src, VkDynamicState state)
154
{
155
if (!src)
156
return false;
157
158
for (unsigned i = 0; i < src->dynamicStateCount; i++)
159
if (src->pDynamicStates[i] == state)
160
return true;
161
return false;
162
}
163
164
static VkResult
165
deep_copy_viewport_state(void *mem_ctx,
166
const VkPipelineDynamicStateCreateInfo *dyn_state,
167
VkPipelineViewportStateCreateInfo *dst,
168
const VkPipelineViewportStateCreateInfo *src)
169
{
170
dst->sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
171
dst->pNext = NULL;
172
dst->pViewports = NULL;
173
dst->pScissors = NULL;
174
175
if (!dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_VIEWPORT) &&
176
!dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT)) {
177
LVP_PIPELINE_DUP(dst->pViewports,
178
src->pViewports,
179
VkViewport,
180
src->viewportCount);
181
}
182
if (!dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT))
183
dst->viewportCount = src->viewportCount;
184
else
185
dst->viewportCount = 0;
186
187
if (!dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_SCISSOR) &&
188
!dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT)) {
189
if (src->pScissors)
190
LVP_PIPELINE_DUP(dst->pScissors,
191
src->pScissors,
192
VkRect2D,
193
src->scissorCount);
194
}
195
if (!dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT))
196
dst->scissorCount = src->scissorCount;
197
else
198
dst->scissorCount = 0;
199
200
return VK_SUCCESS;
201
}
202
203
static VkResult
204
deep_copy_color_blend_state(void *mem_ctx,
205
VkPipelineColorBlendStateCreateInfo *dst,
206
const VkPipelineColorBlendStateCreateInfo *src)
207
{
208
dst->sType = src->sType;
209
dst->pNext = NULL;
210
dst->flags = src->flags;
211
dst->logicOpEnable = src->logicOpEnable;
212
dst->logicOp = src->logicOp;
213
214
LVP_PIPELINE_DUP(dst->pAttachments,
215
src->pAttachments,
216
VkPipelineColorBlendAttachmentState,
217
src->attachmentCount);
218
dst->attachmentCount = src->attachmentCount;
219
220
memcpy(&dst->blendConstants, &src->blendConstants, sizeof(float) * 4);
221
222
return VK_SUCCESS;
223
}
224
225
static VkResult
226
deep_copy_dynamic_state(void *mem_ctx,
227
VkPipelineDynamicStateCreateInfo *dst,
228
const VkPipelineDynamicStateCreateInfo *src)
229
{
230
dst->sType = src->sType;
231
dst->pNext = NULL;
232
dst->flags = src->flags;
233
234
LVP_PIPELINE_DUP(dst->pDynamicStates,
235
src->pDynamicStates,
236
VkDynamicState,
237
src->dynamicStateCount);
238
dst->dynamicStateCount = src->dynamicStateCount;
239
return VK_SUCCESS;
240
}
241
242
static VkResult
243
deep_copy_graphics_create_info(void *mem_ctx,
244
VkGraphicsPipelineCreateInfo *dst,
245
const VkGraphicsPipelineCreateInfo *src)
246
{
247
int i;
248
VkResult result;
249
VkPipelineShaderStageCreateInfo *stages;
250
VkPipelineVertexInputStateCreateInfo *vertex_input;
251
LVP_FROM_HANDLE(lvp_render_pass, pass, src->renderPass);
252
253
dst->sType = src->sType;
254
dst->pNext = NULL;
255
dst->flags = src->flags;
256
dst->layout = src->layout;
257
dst->renderPass = src->renderPass;
258
dst->subpass = src->subpass;
259
dst->basePipelineHandle = src->basePipelineHandle;
260
dst->basePipelineIndex = src->basePipelineIndex;
261
262
/* pStages */
263
VkShaderStageFlags stages_present = 0;
264
dst->stageCount = src->stageCount;
265
stages = ralloc_array(mem_ctx, VkPipelineShaderStageCreateInfo, dst->stageCount);
266
for (i = 0 ; i < dst->stageCount; i++) {
267
result = deep_copy_shader_stage(mem_ctx, &stages[i], &src->pStages[i]);
268
if (result != VK_SUCCESS)
269
return result;
270
stages_present |= src->pStages[i].stage;
271
}
272
dst->pStages = stages;
273
274
/* pVertexInputState */
275
if (!dynamic_state_contains(src->pDynamicState, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) {
276
vertex_input = ralloc(mem_ctx, VkPipelineVertexInputStateCreateInfo);
277
result = deep_copy_vertex_input_state(mem_ctx, vertex_input,
278
src->pVertexInputState);
279
if (result != VK_SUCCESS)
280
return result;
281
dst->pVertexInputState = vertex_input;
282
} else
283
dst->pVertexInputState = NULL;
284
285
/* pInputAssemblyState */
286
LVP_PIPELINE_DUP(dst->pInputAssemblyState,
287
src->pInputAssemblyState,
288
VkPipelineInputAssemblyStateCreateInfo,
289
1);
290
291
/* pTessellationState */
292
if (src->pTessellationState &&
293
(stages_present & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) ==
294
(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
295
LVP_PIPELINE_DUP(dst->pTessellationState,
296
src->pTessellationState,
297
VkPipelineTessellationStateCreateInfo,
298
1);
299
}
300
301
/* pViewportState */
302
bool rasterization_disabled = !dynamic_state_contains(src->pDynamicState, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) &&
303
src->pRasterizationState->rasterizerDiscardEnable;
304
if (src->pViewportState && !rasterization_disabled) {
305
VkPipelineViewportStateCreateInfo *viewport_state;
306
viewport_state = ralloc(mem_ctx, VkPipelineViewportStateCreateInfo);
307
if (!viewport_state)
308
return VK_ERROR_OUT_OF_HOST_MEMORY;
309
deep_copy_viewport_state(mem_ctx, src->pDynamicState,
310
viewport_state, src->pViewportState);
311
dst->pViewportState = viewport_state;
312
} else
313
dst->pViewportState = NULL;
314
315
/* pRasterizationState */
316
LVP_PIPELINE_DUP(dst->pRasterizationState,
317
src->pRasterizationState,
318
VkPipelineRasterizationStateCreateInfo,
319
1);
320
321
/* pMultisampleState */
322
if (src->pMultisampleState && !rasterization_disabled) {
323
VkPipelineMultisampleStateCreateInfo* ms_state;
324
ms_state = ralloc_size(mem_ctx, sizeof(VkPipelineMultisampleStateCreateInfo) + sizeof(VkSampleMask));
325
if (!ms_state)
326
return VK_ERROR_OUT_OF_HOST_MEMORY;
327
/* does samplemask need deep copy? */
328
memcpy(ms_state, src->pMultisampleState, sizeof(VkPipelineMultisampleStateCreateInfo));
329
if (src->pMultisampleState->pSampleMask) {
330
VkSampleMask *sample_mask = (VkSampleMask *)(ms_state + 1);
331
sample_mask[0] = src->pMultisampleState->pSampleMask[0];
332
ms_state->pSampleMask = sample_mask;
333
}
334
dst->pMultisampleState = ms_state;
335
} else
336
dst->pMultisampleState = NULL;
337
338
/* pDepthStencilState */
339
if (src->pDepthStencilState && !rasterization_disabled && pass->has_zs_attachment) {
340
LVP_PIPELINE_DUP(dst->pDepthStencilState,
341
src->pDepthStencilState,
342
VkPipelineDepthStencilStateCreateInfo,
343
1);
344
} else
345
dst->pDepthStencilState = NULL;
346
347
/* pColorBlendState */
348
if (src->pColorBlendState && !rasterization_disabled && pass->has_color_attachment) {
349
VkPipelineColorBlendStateCreateInfo* cb_state;
350
351
cb_state = ralloc(mem_ctx, VkPipelineColorBlendStateCreateInfo);
352
if (!cb_state)
353
return VK_ERROR_OUT_OF_HOST_MEMORY;
354
deep_copy_color_blend_state(mem_ctx, cb_state, src->pColorBlendState);
355
dst->pColorBlendState = cb_state;
356
} else
357
dst->pColorBlendState = NULL;
358
359
if (src->pDynamicState) {
360
VkPipelineDynamicStateCreateInfo* dyn_state;
361
362
/* pDynamicState */
363
dyn_state = ralloc(mem_ctx, VkPipelineDynamicStateCreateInfo);
364
if (!dyn_state)
365
return VK_ERROR_OUT_OF_HOST_MEMORY;
366
deep_copy_dynamic_state(mem_ctx, dyn_state, src->pDynamicState);
367
dst->pDynamicState = dyn_state;
368
} else
369
dst->pDynamicState = NULL;
370
371
return VK_SUCCESS;
372
}
373
374
static VkResult
375
deep_copy_compute_create_info(void *mem_ctx,
376
VkComputePipelineCreateInfo *dst,
377
const VkComputePipelineCreateInfo *src)
378
{
379
VkResult result;
380
dst->sType = src->sType;
381
dst->pNext = NULL;
382
dst->flags = src->flags;
383
dst->layout = src->layout;
384
dst->basePipelineHandle = src->basePipelineHandle;
385
dst->basePipelineIndex = src->basePipelineIndex;
386
387
result = deep_copy_shader_stage(mem_ctx, &dst->stage, &src->stage);
388
if (result != VK_SUCCESS)
389
return result;
390
return VK_SUCCESS;
391
}
392
393
static inline unsigned
394
st_shader_stage_to_ptarget(gl_shader_stage stage)
395
{
396
switch (stage) {
397
case MESA_SHADER_VERTEX:
398
return PIPE_SHADER_VERTEX;
399
case MESA_SHADER_FRAGMENT:
400
return PIPE_SHADER_FRAGMENT;
401
case MESA_SHADER_GEOMETRY:
402
return PIPE_SHADER_GEOMETRY;
403
case MESA_SHADER_TESS_CTRL:
404
return PIPE_SHADER_TESS_CTRL;
405
case MESA_SHADER_TESS_EVAL:
406
return PIPE_SHADER_TESS_EVAL;
407
case MESA_SHADER_COMPUTE:
408
return PIPE_SHADER_COMPUTE;
409
default:
410
break;
411
}
412
413
assert(!"should not be reached");
414
return PIPE_SHADER_VERTEX;
415
}
416
417
static void
418
shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
419
{
420
assert(glsl_type_is_vector_or_scalar(type));
421
422
uint32_t comp_size = glsl_type_is_boolean(type)
423
? 4 : glsl_get_bit_size(type) / 8;
424
unsigned length = glsl_get_vector_elements(type);
425
*size = comp_size * length,
426
*align = comp_size;
427
}
428
429
static void
430
lvp_shader_compile_to_ir(struct lvp_pipeline *pipeline,
431
struct vk_shader_module *module,
432
const char *entrypoint_name,
433
gl_shader_stage stage,
434
const VkSpecializationInfo *spec_info)
435
{
436
nir_shader *nir;
437
const nir_shader_compiler_options *drv_options = pipeline->device->pscreen->get_compiler_options(pipeline->device->pscreen, PIPE_SHADER_IR_NIR, st_shader_stage_to_ptarget(stage));
438
bool progress;
439
uint32_t *spirv = (uint32_t *) module->data;
440
assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
441
assert(module->size % 4 == 0);
442
443
uint32_t num_spec_entries = 0;
444
struct nir_spirv_specialization *spec_entries = NULL;
445
if (spec_info && spec_info->mapEntryCount > 0) {
446
num_spec_entries = spec_info->mapEntryCount;
447
spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
448
for (uint32_t i = 0; i < num_spec_entries; i++) {
449
VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
450
const void *data =
451
(char *)spec_info->pData + entry.offset;
452
assert((const char *)((char *)data + entry.size) <=
453
(char *)spec_info->pData + spec_info->dataSize);
454
455
spec_entries[i].id = entry.constantID;
456
switch (entry.size) {
457
case 8:
458
spec_entries[i].value.u64 = *(const uint64_t *)data;
459
break;
460
case 4:
461
spec_entries[i].value.u32 = *(const uint32_t *)data;
462
break;
463
case 2:
464
spec_entries[i].value.u16 = *(const uint16_t *)data;
465
break;
466
case 1:
467
spec_entries[i].value.u8 = *(const uint8_t *)data;
468
break;
469
default:
470
assert(!"Invalid spec constant size");
471
break;
472
}
473
}
474
}
475
struct lvp_device *pdevice = pipeline->device;
476
const struct spirv_to_nir_options spirv_options = {
477
.environment = NIR_SPIRV_VULKAN,
478
.caps = {
479
.float64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DOUBLES) == 1),
480
.int16 = true,
481
.int64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_INT64) == 1),
482
.tessellation = true,
483
.image_ms_array = true,
484
.image_read_without_format = true,
485
.image_write_without_format = true,
486
.storage_image_ms = true,
487
.geometry_streams = true,
488
.storage_8bit = true,
489
.storage_16bit = true,
490
.variable_pointers = true,
491
.stencil_export = true,
492
.post_depth_coverage = true,
493
.transform_feedback = true,
494
.device_group = true,
495
.draw_parameters = true,
496
.shader_viewport_index_layer = true,
497
.multiview = true,
498
.physical_storage_buffer_address = true,
499
.int64_atomics = true,
500
.subgroup_arithmetic = true,
501
.subgroup_basic = true,
502
.subgroup_ballot = true,
503
.subgroup_quad = true,
504
.subgroup_vote = true,
505
},
506
.ubo_addr_format = nir_address_format_32bit_index_offset,
507
.ssbo_addr_format = nir_address_format_32bit_index_offset,
508
.phys_ssbo_addr_format = nir_address_format_64bit_global,
509
.push_const_addr_format = nir_address_format_logical,
510
.shared_addr_format = nir_address_format_32bit_offset,
511
.frag_coord_is_sysval = false,
512
};
513
514
nir = spirv_to_nir(spirv, module->size / 4,
515
spec_entries, num_spec_entries,
516
stage, entrypoint_name, &spirv_options, drv_options);
517
518
if (!nir) {
519
free(spec_entries);
520
return;
521
}
522
nir_validate_shader(nir, NULL);
523
524
free(spec_entries);
525
526
NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
527
NIR_PASS_V(nir, nir_lower_returns);
528
NIR_PASS_V(nir, nir_inline_functions);
529
NIR_PASS_V(nir, nir_copy_prop);
530
NIR_PASS_V(nir, nir_opt_deref);
531
532
/* Pick off the single entrypoint that we want */
533
foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
534
if (!func->is_entrypoint)
535
exec_node_remove(&func->node);
536
}
537
assert(exec_list_length(&nir->functions) == 1);
538
539
NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
540
NIR_PASS_V(nir, nir_split_var_copies);
541
NIR_PASS_V(nir, nir_split_per_member_structs);
542
543
NIR_PASS_V(nir, nir_remove_dead_variables,
544
nir_var_shader_in | nir_var_shader_out | nir_var_system_value, NULL);
545
546
if (stage == MESA_SHADER_FRAGMENT)
547
lvp_lower_input_attachments(nir, false);
548
NIR_PASS_V(nir, nir_lower_system_values);
549
NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
550
551
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
552
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_uniform, NULL);
553
554
lvp_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);
555
556
NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
557
NIR_PASS_V(nir, nir_split_var_copies);
558
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
559
560
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_push_const,
561
nir_address_format_32bit_offset);
562
563
NIR_PASS_V(nir, nir_lower_explicit_io,
564
nir_var_mem_ubo | nir_var_mem_ssbo,
565
nir_address_format_32bit_index_offset);
566
567
NIR_PASS_V(nir, nir_lower_explicit_io,
568
nir_var_mem_global,
569
nir_address_format_64bit_global);
570
571
if (nir->info.stage == MESA_SHADER_COMPUTE) {
572
NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_var_info);
573
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared, nir_address_format_32bit_offset);
574
}
575
576
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_shader_temp, NULL);
577
578
if (nir->info.stage == MESA_SHADER_VERTEX ||
579
nir->info.stage == MESA_SHADER_GEOMETRY) {
580
NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
581
} else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
582
NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
583
}
584
585
do {
586
progress = false;
587
588
NIR_PASS(progress, nir, nir_lower_flrp, 32|64, true);
589
NIR_PASS(progress, nir, nir_split_array_vars, nir_var_function_temp);
590
NIR_PASS(progress, nir, nir_shrink_vec_array_vars, nir_var_function_temp);
591
NIR_PASS(progress, nir, nir_opt_deref);
592
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
593
594
NIR_PASS(progress, nir, nir_copy_prop);
595
NIR_PASS(progress, nir, nir_opt_dce);
596
NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
597
598
NIR_PASS(progress, nir, nir_opt_algebraic);
599
NIR_PASS(progress, nir, nir_opt_constant_folding);
600
601
NIR_PASS(progress, nir, nir_opt_remove_phis);
602
bool trivial_continues = false;
603
NIR_PASS(trivial_continues, nir, nir_opt_trivial_continues);
604
progress |= trivial_continues;
605
if (trivial_continues) {
606
/* If nir_opt_trivial_continues makes progress, then we need to clean
607
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
608
* to make progress.
609
*/
610
NIR_PASS(progress, nir, nir_copy_prop);
611
NIR_PASS(progress, nir, nir_opt_dce);
612
NIR_PASS(progress, nir, nir_opt_remove_phis);
613
}
614
NIR_PASS(progress, nir, nir_opt_if, true);
615
NIR_PASS(progress, nir, nir_opt_dead_cf);
616
NIR_PASS(progress, nir, nir_opt_conditional_discard);
617
NIR_PASS(progress, nir, nir_opt_remove_phis);
618
NIR_PASS(progress, nir, nir_opt_cse);
619
NIR_PASS(progress, nir, nir_opt_undef);
620
621
NIR_PASS(progress, nir, nir_opt_deref);
622
NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
623
} while (progress);
624
625
NIR_PASS_V(nir, nir_lower_var_copies);
626
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
627
NIR_PASS_V(nir, nir_opt_dce);
628
nir_sweep(nir);
629
630
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
631
632
if (nir->info.stage != MESA_SHADER_VERTEX)
633
nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, nir->info.stage);
634
else {
635
nir->num_inputs = util_last_bit64(nir->info.inputs_read);
636
nir_foreach_shader_in_variable(var, nir) {
637
var->data.driver_location = var->data.location - VERT_ATTRIB_GENERIC0;
638
}
639
}
640
nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs,
641
nir->info.stage);
642
pipeline->pipeline_nir[stage] = nir;
643
}
644
645
static void fill_shader_prog(struct pipe_shader_state *state, gl_shader_stage stage, struct lvp_pipeline *pipeline)
646
{
647
state->type = PIPE_SHADER_IR_NIR;
648
state->ir.nir = pipeline->pipeline_nir[stage];
649
}
650
651
static void
652
merge_tess_info(struct shader_info *tes_info,
653
const struct shader_info *tcs_info)
654
{
655
/* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
656
*
657
* "PointMode. Controls generation of points rather than triangles
658
* or lines. This functionality defaults to disabled, and is
659
* enabled if either shader stage includes the execution mode.
660
*
661
* and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
662
* PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
663
* and OutputVertices, it says:
664
*
665
* "One mode must be set in at least one of the tessellation
666
* shader stages."
667
*
668
* So, the fields can be set in either the TCS or TES, but they must
669
* agree if set in both. Our backend looks at TES, so bitwise-or in
670
* the values from the TCS.
671
*/
672
assert(tcs_info->tess.tcs_vertices_out == 0 ||
673
tes_info->tess.tcs_vertices_out == 0 ||
674
tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
675
tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
676
677
assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
678
tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
679
tcs_info->tess.spacing == tes_info->tess.spacing);
680
tes_info->tess.spacing |= tcs_info->tess.spacing;
681
682
assert(tcs_info->tess.primitive_mode == 0 ||
683
tes_info->tess.primitive_mode == 0 ||
684
tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
685
tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
686
tes_info->tess.ccw |= tcs_info->tess.ccw;
687
tes_info->tess.point_mode |= tcs_info->tess.point_mode;
688
}
689
690
static gl_shader_stage
691
lvp_shader_stage(VkShaderStageFlagBits stage)
692
{
693
switch (stage) {
694
case VK_SHADER_STAGE_VERTEX_BIT:
695
return MESA_SHADER_VERTEX;
696
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
697
return MESA_SHADER_TESS_CTRL;
698
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
699
return MESA_SHADER_TESS_EVAL;
700
case VK_SHADER_STAGE_GEOMETRY_BIT:
701
return MESA_SHADER_GEOMETRY;
702
case VK_SHADER_STAGE_FRAGMENT_BIT:
703
return MESA_SHADER_FRAGMENT;
704
case VK_SHADER_STAGE_COMPUTE_BIT:
705
return MESA_SHADER_COMPUTE;
706
default:
707
unreachable("invalid VkShaderStageFlagBits");
708
return MESA_SHADER_NONE;
709
}
710
}
711
712
static VkResult
713
lvp_pipeline_compile(struct lvp_pipeline *pipeline,
714
gl_shader_stage stage)
715
{
716
struct lvp_device *device = pipeline->device;
717
device->physical_device->pscreen->finalize_nir(device->physical_device->pscreen, pipeline->pipeline_nir[stage], true);
718
if (stage == MESA_SHADER_COMPUTE) {
719
struct pipe_compute_state shstate = {0};
720
shstate.prog = (void *)pipeline->pipeline_nir[MESA_SHADER_COMPUTE];
721
shstate.ir_type = PIPE_SHADER_IR_NIR;
722
shstate.req_local_mem = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.shared_size;
723
pipeline->shader_cso[PIPE_SHADER_COMPUTE] = device->queue.ctx->create_compute_state(device->queue.ctx, &shstate);
724
} else {
725
struct pipe_shader_state shstate = {0};
726
fill_shader_prog(&shstate, stage, pipeline);
727
728
if (stage == MESA_SHADER_VERTEX ||
729
stage == MESA_SHADER_GEOMETRY ||
730
stage == MESA_SHADER_TESS_EVAL) {
731
nir_xfb_info *xfb_info = nir_gather_xfb_info(pipeline->pipeline_nir[stage], NULL);
732
if (xfb_info) {
733
uint8_t output_mapping[VARYING_SLOT_TESS_MAX];
734
memset(output_mapping, 0, sizeof(output_mapping));
735
736
nir_foreach_shader_out_variable(var, pipeline->pipeline_nir[stage]) {
737
unsigned slots = var->data.compact ? DIV_ROUND_UP(glsl_get_length(var->type), 4)
738
: glsl_count_attribute_slots(var->type, false);
739
for (unsigned i = 0; i < slots; i++)
740
output_mapping[var->data.location + i] = var->data.driver_location + i;
741
}
742
743
shstate.stream_output.num_outputs = xfb_info->output_count;
744
for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
745
if (xfb_info->buffers_written & (1 << i)) {
746
shstate.stream_output.stride[i] = xfb_info->buffers[i].stride / 4;
747
}
748
}
749
for (unsigned i = 0; i < xfb_info->output_count; i++) {
750
shstate.stream_output.output[i].output_buffer = xfb_info->outputs[i].buffer;
751
shstate.stream_output.output[i].dst_offset = xfb_info->outputs[i].offset / 4;
752
shstate.stream_output.output[i].register_index = output_mapping[xfb_info->outputs[i].location];
753
shstate.stream_output.output[i].num_components = util_bitcount(xfb_info->outputs[i].component_mask);
754
shstate.stream_output.output[i].start_component = ffs(xfb_info->outputs[i].component_mask) - 1;
755
shstate.stream_output.output[i].stream = xfb_info->buffer_to_stream[xfb_info->outputs[i].buffer];
756
}
757
758
ralloc_free(xfb_info);
759
}
760
}
761
762
switch (stage) {
763
case MESA_SHADER_FRAGMENT:
764
pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
765
break;
766
case MESA_SHADER_VERTEX:
767
pipeline->shader_cso[PIPE_SHADER_VERTEX] = device->queue.ctx->create_vs_state(device->queue.ctx, &shstate);
768
break;
769
case MESA_SHADER_GEOMETRY:
770
pipeline->shader_cso[PIPE_SHADER_GEOMETRY] = device->queue.ctx->create_gs_state(device->queue.ctx, &shstate);
771
break;
772
case MESA_SHADER_TESS_CTRL:
773
pipeline->shader_cso[PIPE_SHADER_TESS_CTRL] = device->queue.ctx->create_tcs_state(device->queue.ctx, &shstate);
774
break;
775
case MESA_SHADER_TESS_EVAL:
776
pipeline->shader_cso[PIPE_SHADER_TESS_EVAL] = device->queue.ctx->create_tes_state(device->queue.ctx, &shstate);
777
break;
778
default:
779
unreachable("illegal shader");
780
break;
781
}
782
}
783
return VK_SUCCESS;
784
}
785
786
static VkResult
787
lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
788
struct lvp_device *device,
789
struct lvp_pipeline_cache *cache,
790
const VkGraphicsPipelineCreateInfo *pCreateInfo,
791
const VkAllocationCallbacks *alloc)
792
{
793
if (alloc == NULL)
794
alloc = &device->vk.alloc;
795
pipeline->device = device;
796
pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
797
pipeline->force_min_sample = false;
798
799
pipeline->mem_ctx = ralloc_context(NULL);
800
/* recreate createinfo */
801
deep_copy_graphics_create_info(pipeline->mem_ctx, &pipeline->graphics_create_info, pCreateInfo);
802
pipeline->is_compute_pipeline = false;
803
804
const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *pv_state =
805
vk_find_struct_const(pCreateInfo->pRasterizationState,
806
PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT);
807
pipeline->provoking_vertex_last = pv_state && pv_state->provokingVertexMode == VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT;
808
809
const VkPipelineRasterizationLineStateCreateInfoEXT *line_state =
810
vk_find_struct_const(pCreateInfo->pRasterizationState,
811
PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
812
if (line_state) {
813
/* always draw bresenham if !smooth */
814
pipeline->line_stipple_enable = line_state->stippledLineEnable;
815
pipeline->line_smooth = line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
816
pipeline->disable_multisample = line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT ||
817
line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
818
pipeline->line_rectangular = line_state->lineRasterizationMode != VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;
819
if (!dynamic_state_contains(pipeline->graphics_create_info.pDynamicState, VK_DYNAMIC_STATE_LINE_STIPPLE_EXT)) {
820
pipeline->line_stipple_factor = line_state->lineStippleFactor - 1;
821
pipeline->line_stipple_pattern = line_state->lineStipplePattern;
822
}
823
} else
824
pipeline->line_rectangular = true;
825
826
827
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
828
VK_FROM_HANDLE(vk_shader_module, module,
829
pCreateInfo->pStages[i].module);
830
gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
831
lvp_shader_compile_to_ir(pipeline, module,
832
pCreateInfo->pStages[i].pName,
833
stage,
834
pCreateInfo->pStages[i].pSpecializationInfo);
835
if (!pipeline->pipeline_nir[stage])
836
return VK_ERROR_FEATURE_NOT_PRESENT;
837
}
838
839
if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]) {
840
if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.fs.uses_sample_qualifier ||
841
BITSET_TEST(pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
842
BITSET_TEST(pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS))
843
pipeline->force_min_sample = true;
844
}
845
if (pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]) {
846
nir_lower_patch_vertices(pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL], pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info.tess.tcs_vertices_out, NULL);
847
merge_tess_info(&pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info, &pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info);
848
const VkPipelineTessellationDomainOriginStateCreateInfo *domain_origin_state =
849
vk_find_struct_const(pCreateInfo->pTessellationState,
850
PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO);
851
if (!domain_origin_state || domain_origin_state->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT)
852
pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw = !pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw;
853
}
854
855
pipeline->gs_output_lines = pipeline->pipeline_nir[MESA_SHADER_GEOMETRY] &&
856
pipeline->pipeline_nir[MESA_SHADER_GEOMETRY]->info.gs.output_primitive == GL_LINES;
857
858
859
bool has_fragment_shader = false;
860
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
861
gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
862
lvp_pipeline_compile(pipeline, stage);
863
if (stage == MESA_SHADER_FRAGMENT)
864
has_fragment_shader = true;
865
}
866
867
if (has_fragment_shader == false) {
868
/* create a dummy fragment shader for this pipeline. */
869
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT, NULL,
870
"dummy_frag");
871
872
pipeline->pipeline_nir[MESA_SHADER_FRAGMENT] = b.shader;
873
struct pipe_shader_state shstate = {0};
874
shstate.type = PIPE_SHADER_IR_NIR;
875
shstate.ir.nir = pipeline->pipeline_nir[MESA_SHADER_FRAGMENT];
876
pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
877
}
878
return VK_SUCCESS;
879
}
880
881
static VkResult
882
lvp_graphics_pipeline_create(
883
VkDevice _device,
884
VkPipelineCache _cache,
885
const VkGraphicsPipelineCreateInfo *pCreateInfo,
886
const VkAllocationCallbacks *pAllocator,
887
VkPipeline *pPipeline)
888
{
889
LVP_FROM_HANDLE(lvp_device, device, _device);
890
LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
891
struct lvp_pipeline *pipeline;
892
VkResult result;
893
894
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
895
896
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
897
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
898
if (pipeline == NULL)
899
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
900
901
vk_object_base_init(&device->vk, &pipeline->base,
902
VK_OBJECT_TYPE_PIPELINE);
903
result = lvp_graphics_pipeline_init(pipeline, device, cache, pCreateInfo,
904
pAllocator);
905
if (result != VK_SUCCESS) {
906
vk_free2(&device->vk.alloc, pAllocator, pipeline);
907
return result;
908
}
909
910
*pPipeline = lvp_pipeline_to_handle(pipeline);
911
912
return VK_SUCCESS;
913
}
914
915
VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateGraphicsPipelines(
916
VkDevice _device,
917
VkPipelineCache pipelineCache,
918
uint32_t count,
919
const VkGraphicsPipelineCreateInfo* pCreateInfos,
920
const VkAllocationCallbacks* pAllocator,
921
VkPipeline* pPipelines)
922
{
923
VkResult result = VK_SUCCESS;
924
unsigned i = 0;
925
926
for (; i < count; i++) {
927
VkResult r;
928
r = lvp_graphics_pipeline_create(_device,
929
pipelineCache,
930
&pCreateInfos[i],
931
pAllocator, &pPipelines[i]);
932
if (r != VK_SUCCESS) {
933
result = r;
934
pPipelines[i] = VK_NULL_HANDLE;
935
}
936
}
937
938
return result;
939
}
940
941
static VkResult
942
lvp_compute_pipeline_init(struct lvp_pipeline *pipeline,
943
struct lvp_device *device,
944
struct lvp_pipeline_cache *cache,
945
const VkComputePipelineCreateInfo *pCreateInfo,
946
const VkAllocationCallbacks *alloc)
947
{
948
VK_FROM_HANDLE(vk_shader_module, module,
949
pCreateInfo->stage.module);
950
if (alloc == NULL)
951
alloc = &device->vk.alloc;
952
pipeline->device = device;
953
pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
954
pipeline->force_min_sample = false;
955
956
pipeline->mem_ctx = ralloc_context(NULL);
957
deep_copy_compute_create_info(pipeline->mem_ctx,
958
&pipeline->compute_create_info, pCreateInfo);
959
pipeline->is_compute_pipeline = true;
960
961
lvp_shader_compile_to_ir(pipeline, module,
962
pCreateInfo->stage.pName,
963
MESA_SHADER_COMPUTE,
964
pCreateInfo->stage.pSpecializationInfo);
965
if (!pipeline->pipeline_nir[MESA_SHADER_COMPUTE])
966
return VK_ERROR_FEATURE_NOT_PRESENT;
967
lvp_pipeline_compile(pipeline, MESA_SHADER_COMPUTE);
968
return VK_SUCCESS;
969
}
970
971
static VkResult
972
lvp_compute_pipeline_create(
973
VkDevice _device,
974
VkPipelineCache _cache,
975
const VkComputePipelineCreateInfo *pCreateInfo,
976
const VkAllocationCallbacks *pAllocator,
977
VkPipeline *pPipeline)
978
{
979
LVP_FROM_HANDLE(lvp_device, device, _device);
980
LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
981
struct lvp_pipeline *pipeline;
982
VkResult result;
983
984
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
985
986
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
987
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
988
if (pipeline == NULL)
989
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
990
991
vk_object_base_init(&device->vk, &pipeline->base,
992
VK_OBJECT_TYPE_PIPELINE);
993
result = lvp_compute_pipeline_init(pipeline, device, cache, pCreateInfo,
994
pAllocator);
995
if (result != VK_SUCCESS) {
996
vk_free2(&device->vk.alloc, pAllocator, pipeline);
997
return result;
998
}
999
1000
*pPipeline = lvp_pipeline_to_handle(pipeline);
1001
1002
return VK_SUCCESS;
1003
}
1004
1005
VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateComputePipelines(
1006
VkDevice _device,
1007
VkPipelineCache pipelineCache,
1008
uint32_t count,
1009
const VkComputePipelineCreateInfo* pCreateInfos,
1010
const VkAllocationCallbacks* pAllocator,
1011
VkPipeline* pPipelines)
1012
{
1013
VkResult result = VK_SUCCESS;
1014
unsigned i = 0;
1015
1016
for (; i < count; i++) {
1017
VkResult r;
1018
r = lvp_compute_pipeline_create(_device,
1019
pipelineCache,
1020
&pCreateInfos[i],
1021
pAllocator, &pPipelines[i]);
1022
if (r != VK_SUCCESS) {
1023
result = r;
1024
pPipelines[i] = VK_NULL_HANDLE;
1025
}
1026
}
1027
1028
return result;
1029
}
1030
1031