Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/frontends/lavapipe/lvp_cmd_buffer.c
4565 views
1
/*
2
* Copyright © 2019 Red Hat.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "lvp_private.h"
25
#include "pipe/p_context.h"
26
#include "vk_util.h"
27
28
static VkResult lvp_create_cmd_buffer(
29
struct lvp_device * device,
30
struct lvp_cmd_pool * pool,
31
VkCommandBufferLevel level,
32
VkCommandBuffer* pCommandBuffer)
33
{
34
struct lvp_cmd_buffer *cmd_buffer;
35
36
cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
37
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
38
if (cmd_buffer == NULL)
39
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
40
41
vk_object_base_init(&device->vk, &cmd_buffer->base,
42
VK_OBJECT_TYPE_COMMAND_BUFFER);
43
cmd_buffer->device = device;
44
cmd_buffer->pool = pool;
45
list_inithead(&cmd_buffer->cmds);
46
cmd_buffer->last_emit = &cmd_buffer->cmds;
47
cmd_buffer->status = LVP_CMD_BUFFER_STATUS_INITIAL;
48
if (pool) {
49
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
50
} else {
51
/* Init the pool_link so we can safefly call list_del when we destroy
52
* the command buffer
53
*/
54
list_inithead(&cmd_buffer->pool_link);
55
}
56
*pCommandBuffer = lvp_cmd_buffer_to_handle(cmd_buffer);
57
58
return VK_SUCCESS;
59
}
60
61
static void
62
lvp_cmd_buffer_free_all_cmds(struct lvp_cmd_buffer *cmd_buffer)
63
{
64
struct lvp_cmd_buffer_entry *tmp, *cmd;
65
LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &cmd_buffer->cmds, cmd_link) {
66
list_del(&cmd->cmd_link);
67
vk_free(&cmd_buffer->pool->alloc, cmd);
68
}
69
}
70
71
static VkResult lvp_reset_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer)
72
{
73
lvp_cmd_buffer_free_all_cmds(cmd_buffer);
74
list_inithead(&cmd_buffer->cmds);
75
cmd_buffer->last_emit = &cmd_buffer->cmds;
76
cmd_buffer->status = LVP_CMD_BUFFER_STATUS_INITIAL;
77
return VK_SUCCESS;
78
}
79
80
VKAPI_ATTR VkResult VKAPI_CALL lvp_AllocateCommandBuffers(
81
VkDevice _device,
82
const VkCommandBufferAllocateInfo* pAllocateInfo,
83
VkCommandBuffer* pCommandBuffers)
84
{
85
LVP_FROM_HANDLE(lvp_device, device, _device);
86
LVP_FROM_HANDLE(lvp_cmd_pool, pool, pAllocateInfo->commandPool);
87
88
VkResult result = VK_SUCCESS;
89
uint32_t i;
90
91
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
92
93
if (!list_is_empty(&pool->free_cmd_buffers)) {
94
struct lvp_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct lvp_cmd_buffer, pool_link);
95
96
list_del(&cmd_buffer->pool_link);
97
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
98
99
result = lvp_reset_cmd_buffer(cmd_buffer);
100
cmd_buffer->level = pAllocateInfo->level;
101
vk_object_base_reset(&cmd_buffer->base);
102
103
pCommandBuffers[i] = lvp_cmd_buffer_to_handle(cmd_buffer);
104
} else {
105
result = lvp_create_cmd_buffer(device, pool, pAllocateInfo->level,
106
&pCommandBuffers[i]);
107
if (result != VK_SUCCESS)
108
break;
109
}
110
}
111
112
if (result != VK_SUCCESS) {
113
lvp_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
114
i, pCommandBuffers);
115
memset(pCommandBuffers, 0,
116
sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
117
}
118
119
return result;
120
}
121
122
static void
123
lvp_cmd_buffer_destroy(struct lvp_cmd_buffer *cmd_buffer)
124
{
125
lvp_cmd_buffer_free_all_cmds(cmd_buffer);
126
list_del(&cmd_buffer->pool_link);
127
vk_object_base_finish(&cmd_buffer->base);
128
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
129
}
130
131
VKAPI_ATTR void VKAPI_CALL lvp_FreeCommandBuffers(
132
VkDevice device,
133
VkCommandPool commandPool,
134
uint32_t commandBufferCount,
135
const VkCommandBuffer* pCommandBuffers)
136
{
137
for (uint32_t i = 0; i < commandBufferCount; i++) {
138
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
139
140
if (cmd_buffer) {
141
if (cmd_buffer->pool) {
142
list_del(&cmd_buffer->pool_link);
143
list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
144
} else
145
lvp_cmd_buffer_destroy(cmd_buffer);
146
}
147
}
148
}
149
150
VKAPI_ATTR VkResult VKAPI_CALL lvp_ResetCommandBuffer(
151
VkCommandBuffer commandBuffer,
152
VkCommandBufferResetFlags flags)
153
{
154
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
155
156
return lvp_reset_cmd_buffer(cmd_buffer);
157
}
158
159
VKAPI_ATTR VkResult VKAPI_CALL lvp_BeginCommandBuffer(
160
VkCommandBuffer commandBuffer,
161
const VkCommandBufferBeginInfo* pBeginInfo)
162
{
163
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
164
VkResult result;
165
if (cmd_buffer->status != LVP_CMD_BUFFER_STATUS_INITIAL) {
166
result = lvp_reset_cmd_buffer(cmd_buffer);
167
if (result != VK_SUCCESS)
168
return result;
169
}
170
cmd_buffer->status = LVP_CMD_BUFFER_STATUS_RECORDING;
171
return VK_SUCCESS;
172
}
173
174
VKAPI_ATTR VkResult VKAPI_CALL lvp_EndCommandBuffer(
175
VkCommandBuffer commandBuffer)
176
{
177
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
178
cmd_buffer->status = LVP_CMD_BUFFER_STATUS_EXECUTABLE;
179
return VK_SUCCESS;
180
}
181
182
VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateCommandPool(
183
VkDevice _device,
184
const VkCommandPoolCreateInfo* pCreateInfo,
185
const VkAllocationCallbacks* pAllocator,
186
VkCommandPool* pCmdPool)
187
{
188
LVP_FROM_HANDLE(lvp_device, device, _device);
189
struct lvp_cmd_pool *pool;
190
191
pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
192
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
193
if (pool == NULL)
194
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
195
196
vk_object_base_init(&device->vk, &pool->base,
197
VK_OBJECT_TYPE_COMMAND_POOL);
198
if (pAllocator)
199
pool->alloc = *pAllocator;
200
else
201
pool->alloc = device->vk.alloc;
202
203
list_inithead(&pool->cmd_buffers);
204
list_inithead(&pool->free_cmd_buffers);
205
206
*pCmdPool = lvp_cmd_pool_to_handle(pool);
207
208
return VK_SUCCESS;
209
}
210
211
VKAPI_ATTR void VKAPI_CALL lvp_DestroyCommandPool(
212
VkDevice _device,
213
VkCommandPool commandPool,
214
const VkAllocationCallbacks* pAllocator)
215
{
216
LVP_FROM_HANDLE(lvp_device, device, _device);
217
LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
218
219
if (!pool)
220
return;
221
222
list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
223
&pool->cmd_buffers, pool_link) {
224
lvp_cmd_buffer_destroy(cmd_buffer);
225
}
226
227
list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
228
&pool->free_cmd_buffers, pool_link) {
229
lvp_cmd_buffer_destroy(cmd_buffer);
230
}
231
232
vk_object_base_finish(&pool->base);
233
vk_free2(&device->vk.alloc, pAllocator, pool);
234
}
235
236
VKAPI_ATTR VkResult VKAPI_CALL lvp_ResetCommandPool(
237
VkDevice device,
238
VkCommandPool commandPool,
239
VkCommandPoolResetFlags flags)
240
{
241
LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
242
VkResult result;
243
244
list_for_each_entry(struct lvp_cmd_buffer, cmd_buffer,
245
&pool->cmd_buffers, pool_link) {
246
result = lvp_reset_cmd_buffer(cmd_buffer);
247
if (result != VK_SUCCESS)
248
return result;
249
}
250
return VK_SUCCESS;
251
}
252
253
VKAPI_ATTR void VKAPI_CALL lvp_TrimCommandPool(
254
VkDevice device,
255
VkCommandPool commandPool,
256
VkCommandPoolTrimFlags flags)
257
{
258
LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
259
260
if (!pool)
261
return;
262
263
list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
264
&pool->free_cmd_buffers, pool_link) {
265
lvp_cmd_buffer_destroy(cmd_buffer);
266
}
267
}
268
269
static struct lvp_cmd_buffer_entry *cmd_buf_entry_alloc_size(struct lvp_cmd_buffer *cmd_buffer,
270
uint32_t extra_size,
271
enum lvp_cmds type)
272
{
273
struct lvp_cmd_buffer_entry *cmd;
274
uint32_t cmd_size = sizeof(*cmd) + extra_size;
275
cmd = vk_alloc(&cmd_buffer->pool->alloc,
276
cmd_size,
277
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
278
if (!cmd)
279
return NULL;
280
281
cmd->cmd_type = type;
282
return cmd;
283
}
284
285
static struct lvp_cmd_buffer_entry *cmd_buf_entry_alloc(struct lvp_cmd_buffer *cmd_buffer,
286
enum lvp_cmds type)
287
{
288
return cmd_buf_entry_alloc_size(cmd_buffer, 0, type);
289
}
290
291
static void cmd_buf_queue(struct lvp_cmd_buffer *cmd_buffer,
292
struct lvp_cmd_buffer_entry *cmd)
293
{
294
switch (cmd->cmd_type) {
295
case LVP_CMD_BIND_DESCRIPTOR_SETS:
296
case LVP_CMD_PUSH_DESCRIPTOR_SET:
297
list_add(&cmd->cmd_link, cmd_buffer->last_emit);
298
cmd_buffer->last_emit = &cmd->cmd_link;
299
break;
300
case LVP_CMD_NEXT_SUBPASS:
301
case LVP_CMD_DRAW:
302
case LVP_CMD_DRAW_INDEXED:
303
case LVP_CMD_DRAW_INDIRECT:
304
case LVP_CMD_DRAW_INDEXED_INDIRECT:
305
case LVP_CMD_DISPATCH:
306
case LVP_CMD_DISPATCH_INDIRECT:
307
cmd_buffer->last_emit = &cmd->cmd_link;
308
FALLTHROUGH;
309
default:
310
list_addtail(&cmd->cmd_link, &cmd_buffer->cmds);
311
}
312
}
313
314
static void
315
state_setup_attachments(struct lvp_attachment_state *attachments,
316
struct lvp_render_pass *pass,
317
const VkClearValue *clear_values)
318
{
319
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
320
struct lvp_render_pass_attachment *att = &pass->attachments[i];
321
VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
322
VkImageAspectFlags clear_aspects = 0;
323
if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
324
/* color attachment */
325
if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
326
clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
327
}
328
} else {
329
/* depthstencil attachment */
330
if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
331
att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
332
clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
333
if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
334
att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
335
clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
336
}
337
if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
338
att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
339
clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
340
}
341
}
342
attachments[i].pending_clear_aspects = clear_aspects;
343
if (clear_aspects)
344
attachments[i].clear_value = clear_values[i];
345
}
346
}
347
348
VKAPI_ATTR void VKAPI_CALL lvp_CmdBeginRenderPass2(
349
VkCommandBuffer commandBuffer,
350
const VkRenderPassBeginInfo* pRenderPassBeginInfo,
351
const VkSubpassBeginInfo* pSubpassBeginInfo)
352
{
353
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
354
LVP_FROM_HANDLE(lvp_render_pass, pass, pRenderPassBeginInfo->renderPass);
355
LVP_FROM_HANDLE(lvp_framebuffer, framebuffer, pRenderPassBeginInfo->framebuffer);
356
const struct VkRenderPassAttachmentBeginInfo *attachment_info =
357
vk_find_struct_const(pRenderPassBeginInfo->pNext,
358
RENDER_PASS_ATTACHMENT_BEGIN_INFO);
359
struct lvp_cmd_buffer_entry *cmd;
360
uint32_t cmd_size = pass->attachment_count * sizeof(struct lvp_attachment_state);
361
362
if (attachment_info)
363
cmd_size += attachment_info->attachmentCount * sizeof(struct lvp_image_view *);
364
365
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BEGIN_RENDER_PASS);
366
if (!cmd)
367
return;
368
369
cmd->u.begin_render_pass.render_pass = pass;
370
cmd->u.begin_render_pass.framebuffer = framebuffer;
371
cmd->u.begin_render_pass.render_area = pRenderPassBeginInfo->renderArea;
372
373
cmd->u.begin_render_pass.attachments = (struct lvp_attachment_state *)(cmd + 1);
374
cmd->u.begin_render_pass.imageless_views = NULL;
375
if (attachment_info) {
376
cmd->u.begin_render_pass.imageless_views = (struct lvp_image_view **)(cmd->u.begin_render_pass.attachments + pass->attachment_count);
377
for (unsigned i = 0; i < attachment_info->attachmentCount; i++)
378
cmd->u.begin_render_pass.imageless_views[i] = lvp_image_view_from_handle(attachment_info->pAttachments[i]);
379
}
380
381
state_setup_attachments(cmd->u.begin_render_pass.attachments, pass, pRenderPassBeginInfo->pClearValues);
382
383
cmd_buf_queue(cmd_buffer, cmd);
384
}
385
386
VKAPI_ATTR void VKAPI_CALL lvp_CmdNextSubpass2(
387
VkCommandBuffer commandBuffer,
388
const VkSubpassBeginInfo* pSubpassBeginInfo,
389
const VkSubpassEndInfo* pSubpassEndInfo)
390
{
391
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
392
struct lvp_cmd_buffer_entry *cmd;
393
394
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_NEXT_SUBPASS);
395
if (!cmd)
396
return;
397
398
cmd->u.next_subpass.contents = pSubpassBeginInfo->contents;
399
400
cmd_buf_queue(cmd_buffer, cmd);
401
}
402
403
VKAPI_ATTR void VKAPI_CALL lvp_CmdBindVertexBuffers(
404
VkCommandBuffer commandBuffer,
405
uint32_t firstBinding,
406
uint32_t bindingCount,
407
const VkBuffer* pBuffers,
408
const VkDeviceSize* pOffsets)
409
{
410
lvp_CmdBindVertexBuffers2EXT(commandBuffer, firstBinding,
411
bindingCount, pBuffers, pOffsets, NULL, NULL);
412
}
413
414
VKAPI_ATTR void VKAPI_CALL lvp_CmdBindPipeline(
415
VkCommandBuffer commandBuffer,
416
VkPipelineBindPoint pipelineBindPoint,
417
VkPipeline _pipeline)
418
{
419
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
420
LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
421
struct lvp_cmd_buffer_entry *cmd;
422
423
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_BIND_PIPELINE);
424
if (!cmd)
425
return;
426
427
cmd->u.pipeline.bind_point = pipelineBindPoint;
428
cmd->u.pipeline.pipeline = pipeline;
429
430
cmd_buf_queue(cmd_buffer, cmd);
431
}
432
433
VKAPI_ATTR void VKAPI_CALL lvp_CmdBindDescriptorSets(
434
VkCommandBuffer commandBuffer,
435
VkPipelineBindPoint pipelineBindPoint,
436
VkPipelineLayout _layout,
437
uint32_t firstSet,
438
uint32_t descriptorSetCount,
439
const VkDescriptorSet* pDescriptorSets,
440
uint32_t dynamicOffsetCount,
441
const uint32_t* pDynamicOffsets)
442
{
443
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
444
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, _layout);
445
struct lvp_cmd_buffer_entry *cmd;
446
struct lvp_descriptor_set **sets;
447
uint32_t *offsets;
448
int i;
449
uint32_t cmd_size = descriptorSetCount * sizeof(struct lvp_descriptor_set *) + dynamicOffsetCount * sizeof(uint32_t);
450
451
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BIND_DESCRIPTOR_SETS);
452
if (!cmd)
453
return;
454
455
cmd->u.descriptor_sets.bind_point = pipelineBindPoint;
456
cmd->u.descriptor_sets.first = firstSet;
457
cmd->u.descriptor_sets.count = descriptorSetCount;
458
459
for (i = 0; i < layout->num_sets; i++)
460
cmd->u.descriptor_sets.set_layout[i] = layout->set[i].layout;
461
sets = (struct lvp_descriptor_set **)(cmd + 1);
462
for (i = 0; i < descriptorSetCount; i++) {
463
464
sets[i] = lvp_descriptor_set_from_handle(pDescriptorSets[i]);
465
}
466
cmd->u.descriptor_sets.sets = sets;
467
468
cmd->u.descriptor_sets.dynamic_offset_count = dynamicOffsetCount;
469
offsets = (uint32_t *)(sets + descriptorSetCount);
470
for (i = 0; i < dynamicOffsetCount; i++)
471
offsets[i] = pDynamicOffsets[i];
472
cmd->u.descriptor_sets.dynamic_offsets = offsets;
473
474
cmd_buf_queue(cmd_buffer, cmd);
475
}
476
477
VKAPI_ATTR void VKAPI_CALL lvp_CmdDraw(
478
VkCommandBuffer commandBuffer,
479
uint32_t vertexCount,
480
uint32_t instanceCount,
481
uint32_t firstVertex,
482
uint32_t firstInstance)
483
{
484
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
485
struct lvp_cmd_buffer_entry *cmd;
486
487
uint32_t cmd_size = sizeof(struct pipe_draw_start_count_bias);
488
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_DRAW);
489
if (!cmd)
490
return;
491
492
cmd->u.draw.instance_count = instanceCount;
493
cmd->u.draw.first_instance = firstInstance;
494
cmd->u.draw.draw_count = 1;
495
cmd->u.draw.draws[0].start = firstVertex;
496
cmd->u.draw.draws[0].count = vertexCount;
497
498
cmd_buf_queue(cmd_buffer, cmd);
499
}
500
501
VKAPI_ATTR void VKAPI_CALL lvp_CmdDrawMultiEXT(
502
VkCommandBuffer commandBuffer,
503
uint32_t drawCount,
504
const VkMultiDrawInfoEXT *pVertexInfo,
505
uint32_t instanceCount,
506
uint32_t firstInstance,
507
uint32_t stride)
508
{
509
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
510
struct lvp_cmd_buffer_entry *cmd;
511
512
uint32_t cmd_size = drawCount * sizeof(struct pipe_draw_start_count_bias);
513
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_DRAW);
514
if (!cmd)
515
return;
516
517
cmd->u.draw.instance_count = instanceCount;
518
cmd->u.draw.first_instance = firstInstance;
519
cmd->u.draw.draw_count = drawCount;
520
if (stride == sizeof(struct pipe_draw_start_count_bias))
521
memcpy(cmd->u.draw.draws, pVertexInfo, cmd_size);
522
else {
523
unsigned i = 0;
524
vk_foreach_multi_draw(draw, i, pVertexInfo, drawCount, stride)
525
memcpy(&cmd->u.draw.draws[i], draw, sizeof(struct VkMultiDrawInfoEXT));
526
}
527
528
cmd_buf_queue(cmd_buffer, cmd);
529
}
530
531
VKAPI_ATTR void VKAPI_CALL lvp_CmdEndRenderPass2(
532
VkCommandBuffer commandBuffer,
533
const VkSubpassEndInfo* pSubpassEndInfo)
534
{
535
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
536
struct lvp_cmd_buffer_entry *cmd;
537
538
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_END_RENDER_PASS);
539
if (!cmd)
540
return;
541
542
cmd_buf_queue(cmd_buffer, cmd);
543
}
544
545
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetViewport(
546
VkCommandBuffer commandBuffer,
547
uint32_t firstViewport,
548
uint32_t viewportCount,
549
const VkViewport* pViewports)
550
{
551
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
552
struct lvp_cmd_buffer_entry *cmd;
553
int i;
554
555
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_VIEWPORT);
556
if (!cmd)
557
return;
558
559
cmd->u.set_viewport.first_viewport = firstViewport;
560
cmd->u.set_viewport.viewport_count = viewportCount;
561
for (i = 0; i < viewportCount; i++)
562
cmd->u.set_viewport.viewports[i] = pViewports[i];
563
564
cmd_buf_queue(cmd_buffer, cmd);
565
}
566
567
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetScissor(
568
VkCommandBuffer commandBuffer,
569
uint32_t firstScissor,
570
uint32_t scissorCount,
571
const VkRect2D* pScissors)
572
{
573
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
574
struct lvp_cmd_buffer_entry *cmd;
575
int i;
576
577
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_SCISSOR);
578
if (!cmd)
579
return;
580
581
cmd->u.set_scissor.first_scissor = firstScissor;
582
cmd->u.set_scissor.scissor_count = scissorCount;
583
for (i = 0; i < scissorCount; i++)
584
cmd->u.set_scissor.scissors[i] = pScissors[i];
585
586
cmd_buf_queue(cmd_buffer, cmd);
587
}
588
589
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetLineWidth(
590
VkCommandBuffer commandBuffer,
591
float lineWidth)
592
{
593
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
594
struct lvp_cmd_buffer_entry *cmd;
595
596
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_LINE_WIDTH);
597
if (!cmd)
598
return;
599
600
cmd->u.set_line_width.line_width = lineWidth;
601
602
cmd_buf_queue(cmd_buffer, cmd);
603
}
604
605
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetDepthBias(
606
VkCommandBuffer commandBuffer,
607
float depthBiasConstantFactor,
608
float depthBiasClamp,
609
float depthBiasSlopeFactor)
610
{
611
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
612
struct lvp_cmd_buffer_entry *cmd;
613
614
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_BIAS);
615
if (!cmd)
616
return;
617
618
cmd->u.set_depth_bias.constant_factor = depthBiasConstantFactor;
619
cmd->u.set_depth_bias.clamp = depthBiasClamp;
620
cmd->u.set_depth_bias.slope_factor = depthBiasSlopeFactor;
621
622
cmd_buf_queue(cmd_buffer, cmd);
623
}
624
625
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetBlendConstants(
626
VkCommandBuffer commandBuffer,
627
const float blendConstants[4])
628
{
629
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
630
struct lvp_cmd_buffer_entry *cmd;
631
632
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_BLEND_CONSTANTS);
633
if (!cmd)
634
return;
635
636
memcpy(cmd->u.set_blend_constants.blend_constants, blendConstants, 4 * sizeof(float));
637
638
cmd_buf_queue(cmd_buffer, cmd);
639
}
640
641
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetDepthBounds(
642
VkCommandBuffer commandBuffer,
643
float minDepthBounds,
644
float maxDepthBounds)
645
{
646
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
647
struct lvp_cmd_buffer_entry *cmd;
648
649
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_BOUNDS);
650
if (!cmd)
651
return;
652
653
cmd->u.set_depth_bounds.min_depth = minDepthBounds;
654
cmd->u.set_depth_bounds.max_depth = maxDepthBounds;
655
656
cmd_buf_queue(cmd_buffer, cmd);
657
}
658
659
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetStencilCompareMask(
660
VkCommandBuffer commandBuffer,
661
VkStencilFaceFlags faceMask,
662
uint32_t compareMask)
663
{
664
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
665
struct lvp_cmd_buffer_entry *cmd;
666
667
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_COMPARE_MASK);
668
if (!cmd)
669
return;
670
671
cmd->u.stencil_vals.face_mask = faceMask;
672
cmd->u.stencil_vals.value = compareMask;
673
674
cmd_buf_queue(cmd_buffer, cmd);
675
}
676
677
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetStencilWriteMask(
678
VkCommandBuffer commandBuffer,
679
VkStencilFaceFlags faceMask,
680
uint32_t writeMask)
681
{
682
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
683
struct lvp_cmd_buffer_entry *cmd;
684
685
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_WRITE_MASK);
686
if (!cmd)
687
return;
688
689
cmd->u.stencil_vals.face_mask = faceMask;
690
cmd->u.stencil_vals.value = writeMask;
691
692
cmd_buf_queue(cmd_buffer, cmd);
693
}
694
695
696
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetStencilReference(
697
VkCommandBuffer commandBuffer,
698
VkStencilFaceFlags faceMask,
699
uint32_t reference)
700
{
701
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
702
struct lvp_cmd_buffer_entry *cmd;
703
704
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_REFERENCE);
705
if (!cmd)
706
return;
707
708
cmd->u.stencil_vals.face_mask = faceMask;
709
cmd->u.stencil_vals.value = reference;
710
711
cmd_buf_queue(cmd_buffer, cmd);
712
}
713
714
VKAPI_ATTR void VKAPI_CALL lvp_CmdPushConstants(
715
VkCommandBuffer commandBuffer,
716
VkPipelineLayout layout,
717
VkShaderStageFlags stageFlags,
718
uint32_t offset,
719
uint32_t size,
720
const void* pValues)
721
{
722
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
723
struct lvp_cmd_buffer_entry *cmd;
724
725
cmd = cmd_buf_entry_alloc_size(cmd_buffer, (size - 4), LVP_CMD_PUSH_CONSTANTS);
726
if (!cmd)
727
return;
728
729
cmd->u.push_constants.stage = stageFlags;
730
cmd->u.push_constants.offset = offset;
731
cmd->u.push_constants.size = size;
732
memcpy(cmd->u.push_constants.val, pValues, size);
733
734
cmd_buf_queue(cmd_buffer, cmd);
735
}
736
737
VKAPI_ATTR void VKAPI_CALL lvp_CmdBindIndexBuffer(
738
VkCommandBuffer commandBuffer,
739
VkBuffer _buffer,
740
VkDeviceSize offset,
741
VkIndexType indexType)
742
{
743
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
744
LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
745
struct lvp_cmd_buffer_entry *cmd;
746
747
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_BIND_INDEX_BUFFER);
748
if (!cmd)
749
return;
750
751
cmd->u.index_buffer.buffer = buffer;
752
cmd->u.index_buffer.offset = offset;
753
cmd->u.index_buffer.index_type = indexType;
754
755
cmd_buf_queue(cmd_buffer, cmd);
756
}
757
758
VKAPI_ATTR void VKAPI_CALL lvp_CmdDrawIndexed(
759
VkCommandBuffer commandBuffer,
760
uint32_t indexCount,
761
uint32_t instanceCount,
762
uint32_t firstIndex,
763
int32_t vertexOffset,
764
uint32_t firstInstance)
765
{
766
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
767
struct lvp_cmd_buffer_entry *cmd;
768
769
uint32_t cmd_size = sizeof(struct pipe_draw_start_count_bias);
770
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_DRAW_INDEXED);
771
if (!cmd)
772
return;
773
774
cmd->u.draw_indexed.instance_count = instanceCount;
775
cmd->u.draw_indexed.first_instance = firstInstance;
776
cmd->u.draw_indexed.draw_count = 1;
777
cmd->u.draw_indexed.draws[0].start = firstIndex;
778
cmd->u.draw_indexed.draws[0].count = indexCount;
779
cmd->u.draw_indexed.draws[0].index_bias = vertexOffset;
780
cmd->u.draw_indexed.calc_start = true;
781
782
cmd_buf_queue(cmd_buffer, cmd);
783
}
784
785
VKAPI_ATTR void VKAPI_CALL lvp_CmdDrawMultiIndexedEXT(
786
VkCommandBuffer commandBuffer,
787
uint32_t drawCount,
788
const VkMultiDrawIndexedInfoEXT *pIndexInfo,
789
uint32_t instanceCount,
790
uint32_t firstInstance,
791
uint32_t stride,
792
const int32_t *pVertexOffset)
793
{
794
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
795
struct lvp_cmd_buffer_entry *cmd;
796
797
uint32_t cmd_size = drawCount * sizeof(struct pipe_draw_start_count_bias);
798
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_DRAW_INDEXED);
799
if (!cmd)
800
return;
801
802
cmd->u.draw_indexed.instance_count = instanceCount;
803
cmd->u.draw_indexed.first_instance = firstInstance;
804
cmd->u.draw_indexed.draw_count = drawCount;
805
cmd->u.draw_indexed.vertex_offset_changes = !pVertexOffset;
806
if (stride == sizeof(struct pipe_draw_start_count_bias))
807
memcpy(cmd->u.draw_indexed.draws, pIndexInfo, cmd_size);
808
else {
809
unsigned i = 0;
810
vk_foreach_multi_draw_indexed(draw, i, pIndexInfo, drawCount, stride)
811
memcpy(&cmd->u.draw_indexed.draws[i], draw, sizeof(struct pipe_draw_start_count_bias));
812
}
813
/* only the first member is read if vertex_offset_changes is true */
814
if (pVertexOffset)
815
cmd->u.draw_indexed.draws[0].index_bias = *pVertexOffset;
816
cmd->u.draw_indexed.calc_start = true;
817
818
cmd_buf_queue(cmd_buffer, cmd);
819
}
820
821
VKAPI_ATTR void VKAPI_CALL lvp_CmdDrawIndirect(
822
VkCommandBuffer commandBuffer,
823
VkBuffer _buffer,
824
VkDeviceSize offset,
825
uint32_t drawCount,
826
uint32_t stride)
827
{
828
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
829
LVP_FROM_HANDLE(lvp_buffer, buf, _buffer);
830
struct lvp_cmd_buffer_entry *cmd;
831
832
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDIRECT);
833
if (!cmd)
834
return;
835
836
cmd->u.draw_indirect.offset = offset;
837
cmd->u.draw_indirect.buffer = buf;
838
cmd->u.draw_indirect.draw_count = drawCount;
839
cmd->u.draw_indirect.stride = stride;
840
841
cmd_buf_queue(cmd_buffer, cmd);
842
}
843
844
VKAPI_ATTR void VKAPI_CALL lvp_CmdDrawIndexedIndirect(
845
VkCommandBuffer commandBuffer,
846
VkBuffer _buffer,
847
VkDeviceSize offset,
848
uint32_t drawCount,
849
uint32_t stride)
850
{
851
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
852
LVP_FROM_HANDLE(lvp_buffer, buf, _buffer);
853
struct lvp_cmd_buffer_entry *cmd;
854
855
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDEXED_INDIRECT);
856
if (!cmd)
857
return;
858
859
cmd->u.draw_indirect.offset = offset;
860
cmd->u.draw_indirect.buffer = buf;
861
cmd->u.draw_indirect.draw_count = drawCount;
862
cmd->u.draw_indirect.stride = stride;
863
864
cmd_buf_queue(cmd_buffer, cmd);
865
}
866
867
VKAPI_ATTR void VKAPI_CALL lvp_CmdDispatch(
868
VkCommandBuffer commandBuffer,
869
uint32_t x,
870
uint32_t y,
871
uint32_t z)
872
{
873
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
874
struct lvp_cmd_buffer_entry *cmd;
875
876
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DISPATCH);
877
if (!cmd)
878
return;
879
880
cmd->u.dispatch.x = x;
881
cmd->u.dispatch.y = y;
882
cmd->u.dispatch.z = z;
883
cmd->u.dispatch.base_x = 0;
884
cmd->u.dispatch.base_y = 0;
885
cmd->u.dispatch.base_z = 0;
886
887
cmd_buf_queue(cmd_buffer, cmd);
888
}
889
890
VKAPI_ATTR void VKAPI_CALL lvp_CmdDispatchIndirect(
891
VkCommandBuffer commandBuffer,
892
VkBuffer _buffer,
893
VkDeviceSize offset)
894
{
895
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
896
struct lvp_cmd_buffer_entry *cmd;
897
898
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DISPATCH_INDIRECT);
899
if (!cmd)
900
return;
901
902
cmd->u.dispatch_indirect.buffer = lvp_buffer_from_handle(_buffer);
903
cmd->u.dispatch_indirect.offset = offset;
904
905
cmd_buf_queue(cmd_buffer, cmd);
906
}
907
908
VKAPI_ATTR void VKAPI_CALL lvp_CmdExecuteCommands(
909
VkCommandBuffer commandBuffer,
910
uint32_t commandBufferCount,
911
const VkCommandBuffer* pCmdBuffers)
912
{
913
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
914
struct lvp_cmd_buffer_entry *cmd;
915
uint32_t cmd_size = commandBufferCount * sizeof(struct lvp_cmd_buffer *);
916
917
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_EXECUTE_COMMANDS);
918
if (!cmd)
919
return;
920
921
cmd->u.execute_commands.command_buffer_count = commandBufferCount;
922
for (unsigned i = 0; i < commandBufferCount; i++)
923
cmd->u.execute_commands.cmd_buffers[i] = lvp_cmd_buffer_from_handle(pCmdBuffers[i]);
924
925
cmd_buf_queue(cmd_buffer, cmd);
926
}
927
928
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetEvent(VkCommandBuffer commandBuffer,
929
VkEvent _event,
930
VkPipelineStageFlags stageMask)
931
{
932
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
933
LVP_FROM_HANDLE(lvp_event, event, _event);
934
struct lvp_cmd_buffer_entry *cmd;
935
936
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_EVENT);
937
if (!cmd)
938
return;
939
940
cmd->u.event_set.event = event;
941
cmd->u.event_set.value = true;
942
cmd->u.event_set.flush = !!(stageMask == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
943
944
cmd_buf_queue(cmd_buffer, cmd);
945
}
946
947
VKAPI_ATTR void VKAPI_CALL lvp_CmdResetEvent(VkCommandBuffer commandBuffer,
948
VkEvent _event,
949
VkPipelineStageFlags stageMask)
950
{
951
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
952
LVP_FROM_HANDLE(lvp_event, event, _event);
953
struct lvp_cmd_buffer_entry *cmd;
954
955
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_EVENT);
956
if (!cmd)
957
return;
958
959
cmd->u.event_set.event = event;
960
cmd->u.event_set.value = false;
961
cmd->u.event_set.flush = !!(stageMask == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
962
963
cmd_buf_queue(cmd_buffer, cmd);
964
965
}
966
967
VKAPI_ATTR void VKAPI_CALL lvp_CmdWaitEvents(VkCommandBuffer commandBuffer,
968
uint32_t eventCount,
969
const VkEvent* pEvents,
970
VkPipelineStageFlags srcStageMask,
971
VkPipelineStageFlags dstStageMask,
972
uint32_t memoryBarrierCount,
973
const VkMemoryBarrier* pMemoryBarriers,
974
uint32_t bufferMemoryBarrierCount,
975
const VkBufferMemoryBarrier* pBufferMemoryBarriers,
976
uint32_t imageMemoryBarrierCount,
977
const VkImageMemoryBarrier* pImageMemoryBarriers)
978
{
979
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
980
struct lvp_cmd_buffer_entry *cmd;
981
uint32_t cmd_size = 0;
982
983
cmd_size += eventCount * sizeof(struct lvp_event *);
984
cmd_size += memoryBarrierCount * sizeof(VkMemoryBarrier);
985
cmd_size += bufferMemoryBarrierCount * sizeof(VkBufferMemoryBarrier);
986
cmd_size += imageMemoryBarrierCount * sizeof(VkImageMemoryBarrier);
987
988
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_WAIT_EVENTS);
989
if (!cmd)
990
return;
991
992
cmd->u.wait_events.src_stage_mask = srcStageMask;
993
cmd->u.wait_events.dst_stage_mask = dstStageMask;
994
cmd->u.wait_events.event_count = eventCount;
995
cmd->u.wait_events.events = (struct lvp_event **)(cmd + 1);
996
for (unsigned i = 0; i < eventCount; i++)
997
cmd->u.wait_events.events[i] = lvp_event_from_handle(pEvents[i]);
998
cmd->u.wait_events.memory_barrier_count = memoryBarrierCount;
999
cmd->u.wait_events.buffer_memory_barrier_count = bufferMemoryBarrierCount;
1000
cmd->u.wait_events.image_memory_barrier_count = imageMemoryBarrierCount;
1001
1002
/* TODO finish off this */
1003
cmd_buf_queue(cmd_buffer, cmd);
1004
}
1005
1006
/* copy a 2KHR struct to the base struct */
1007
static inline void
1008
copy_2_struct_to_base(void *base, const void *struct2, size_t struct_size)
1009
{
1010
size_t offset = align(sizeof(VkStructureType) + sizeof(void*), 8);
1011
memcpy(base, ((uint8_t*)struct2) + offset, struct_size);
1012
}
1013
1014
/* copy an array of 2KHR structs to an array of base structs */
1015
#define COPY_STRUCT2_ARRAY(count, base, struct2, struct_type) \
1016
do { \
1017
for (unsigned _i = 0; _i < (count); _i++) \
1018
copy_2_struct_to_base(&base[_i], &struct2[_i], sizeof(struct_type)); \
1019
} while (0)
1020
1021
VKAPI_ATTR void VKAPI_CALL lvp_CmdCopyBufferToImage2KHR(
1022
VkCommandBuffer commandBuffer,
1023
const VkCopyBufferToImageInfo2KHR *info)
1024
{
1025
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1026
LVP_FROM_HANDLE(lvp_buffer, src_buffer, info->srcBuffer);
1027
LVP_FROM_HANDLE(lvp_image, dst_image, info->dstImage);
1028
struct lvp_cmd_buffer_entry *cmd;
1029
uint32_t cmd_size = info->regionCount * sizeof(VkBufferImageCopy);
1030
1031
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_BUFFER_TO_IMAGE);
1032
if (!cmd)
1033
return;
1034
1035
cmd->u.buffer_to_img.src = src_buffer;
1036
cmd->u.buffer_to_img.dst = dst_image;
1037
cmd->u.buffer_to_img.dst_layout = info->dstImageLayout;
1038
cmd->u.buffer_to_img.region_count = info->regionCount;
1039
1040
{
1041
VkBufferImageCopy *regions;
1042
1043
regions = (VkBufferImageCopy *)(cmd + 1);
1044
COPY_STRUCT2_ARRAY(info->regionCount, regions, info->pRegions, VkBufferImageCopy);
1045
cmd->u.buffer_to_img.regions = regions;
1046
}
1047
1048
cmd_buf_queue(cmd_buffer, cmd);
1049
}
1050
1051
VKAPI_ATTR void VKAPI_CALL lvp_CmdCopyImageToBuffer2KHR(
1052
VkCommandBuffer commandBuffer,
1053
const VkCopyImageToBufferInfo2KHR *info)
1054
{
1055
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1056
LVP_FROM_HANDLE(lvp_image, src_image, info->srcImage);
1057
LVP_FROM_HANDLE(lvp_buffer, dst_buffer, info->dstBuffer);
1058
struct lvp_cmd_buffer_entry *cmd;
1059
uint32_t cmd_size = info->regionCount * sizeof(VkBufferImageCopy);
1060
1061
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_IMAGE_TO_BUFFER);
1062
if (!cmd)
1063
return;
1064
1065
cmd->u.img_to_buffer.src = src_image;
1066
cmd->u.img_to_buffer.dst = dst_buffer;
1067
cmd->u.img_to_buffer.src_layout = info->srcImageLayout;
1068
cmd->u.img_to_buffer.region_count = info->regionCount;
1069
1070
{
1071
VkBufferImageCopy *regions;
1072
1073
regions = (VkBufferImageCopy *)(cmd + 1);
1074
COPY_STRUCT2_ARRAY(info->regionCount, regions, info->pRegions, VkBufferImageCopy);
1075
cmd->u.img_to_buffer.regions = regions;
1076
}
1077
1078
cmd_buf_queue(cmd_buffer, cmd);
1079
}
1080
1081
VKAPI_ATTR void VKAPI_CALL lvp_CmdCopyImage2KHR(
1082
VkCommandBuffer commandBuffer,
1083
const VkCopyImageInfo2KHR *info)
1084
{
1085
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1086
LVP_FROM_HANDLE(lvp_image, src_image, info->srcImage);
1087
LVP_FROM_HANDLE(lvp_image, dest_image, info->dstImage);
1088
struct lvp_cmd_buffer_entry *cmd;
1089
uint32_t cmd_size = info->regionCount * sizeof(VkImageCopy);
1090
1091
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_IMAGE);
1092
if (!cmd)
1093
return;
1094
1095
cmd->u.copy_image.src = src_image;
1096
cmd->u.copy_image.dst = dest_image;
1097
cmd->u.copy_image.src_layout = info->srcImageLayout;
1098
cmd->u.copy_image.dst_layout = info->dstImageLayout;
1099
cmd->u.copy_image.region_count = info->regionCount;
1100
1101
{
1102
VkImageCopy *regions;
1103
1104
regions = (VkImageCopy *)(cmd + 1);
1105
COPY_STRUCT2_ARRAY(info->regionCount, regions, info->pRegions, VkImageCopy);
1106
cmd->u.copy_image.regions = regions;
1107
}
1108
1109
cmd_buf_queue(cmd_buffer, cmd);
1110
}
1111
1112
1113
VKAPI_ATTR void VKAPI_CALL lvp_CmdCopyBuffer2KHR(
1114
VkCommandBuffer commandBuffer,
1115
const VkCopyBufferInfo2KHR *info)
1116
{
1117
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1118
LVP_FROM_HANDLE(lvp_buffer, src_buffer, info->srcBuffer);
1119
LVP_FROM_HANDLE(lvp_buffer, dest_buffer, info->dstBuffer);
1120
struct lvp_cmd_buffer_entry *cmd;
1121
uint32_t cmd_size = info->regionCount * sizeof(VkBufferCopy);
1122
1123
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_BUFFER);
1124
if (!cmd)
1125
return;
1126
1127
cmd->u.copy_buffer.src = src_buffer;
1128
cmd->u.copy_buffer.dst = dest_buffer;
1129
cmd->u.copy_buffer.region_count = info->regionCount;
1130
1131
{
1132
VkBufferCopy *regions;
1133
1134
regions = (VkBufferCopy *)(cmd + 1);
1135
COPY_STRUCT2_ARRAY(info->regionCount, regions, info->pRegions, VkBufferCopy);
1136
cmd->u.copy_buffer.regions = regions;
1137
}
1138
1139
cmd_buf_queue(cmd_buffer, cmd);
1140
}
1141
1142
VKAPI_ATTR void VKAPI_CALL lvp_CmdBlitImage2KHR(
1143
VkCommandBuffer commandBuffer,
1144
const VkBlitImageInfo2KHR *info)
1145
{
1146
1147
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1148
LVP_FROM_HANDLE(lvp_image, src_image, info->srcImage);
1149
LVP_FROM_HANDLE(lvp_image, dest_image, info->dstImage);
1150
struct lvp_cmd_buffer_entry *cmd;
1151
uint32_t cmd_size = info->regionCount * sizeof(VkImageBlit);
1152
1153
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BLIT_IMAGE);
1154
if (!cmd)
1155
return;
1156
1157
cmd->u.blit_image.src = src_image;
1158
cmd->u.blit_image.dst = dest_image;
1159
cmd->u.blit_image.src_layout = info->srcImageLayout;
1160
cmd->u.blit_image.dst_layout = info->dstImageLayout;
1161
cmd->u.blit_image.filter = info->filter;
1162
cmd->u.blit_image.region_count = info->regionCount;
1163
1164
{
1165
VkImageBlit *regions;
1166
1167
regions = (VkImageBlit *)(cmd + 1);
1168
COPY_STRUCT2_ARRAY(info->regionCount, regions, info->pRegions, VkImageBlit);
1169
cmd->u.blit_image.regions = regions;
1170
}
1171
1172
cmd_buf_queue(cmd_buffer, cmd);
1173
}
1174
1175
VKAPI_ATTR void VKAPI_CALL lvp_CmdClearAttachments(
1176
VkCommandBuffer commandBuffer,
1177
uint32_t attachmentCount,
1178
const VkClearAttachment* pAttachments,
1179
uint32_t rectCount,
1180
const VkClearRect* pRects)
1181
{
1182
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1183
struct lvp_cmd_buffer_entry *cmd;
1184
uint32_t cmd_size = attachmentCount * sizeof(VkClearAttachment) + rectCount * sizeof(VkClearRect);
1185
1186
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_CLEAR_ATTACHMENTS);
1187
if (!cmd)
1188
return;
1189
1190
cmd->u.clear_attachments.attachment_count = attachmentCount;
1191
cmd->u.clear_attachments.attachments = (VkClearAttachment *)(cmd + 1);
1192
for (unsigned i = 0; i < attachmentCount; i++)
1193
cmd->u.clear_attachments.attachments[i] = pAttachments[i];
1194
cmd->u.clear_attachments.rect_count = rectCount;
1195
cmd->u.clear_attachments.rects = (VkClearRect *)(cmd->u.clear_attachments.attachments + attachmentCount);
1196
for (unsigned i = 0; i < rectCount; i++)
1197
cmd->u.clear_attachments.rects[i] = pRects[i];
1198
1199
cmd_buf_queue(cmd_buffer, cmd);
1200
}
1201
1202
VKAPI_ATTR void VKAPI_CALL lvp_CmdFillBuffer(
1203
VkCommandBuffer commandBuffer,
1204
VkBuffer dstBuffer,
1205
VkDeviceSize dstOffset,
1206
VkDeviceSize fillSize,
1207
uint32_t data)
1208
{
1209
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1210
LVP_FROM_HANDLE(lvp_buffer, dst_buffer, dstBuffer);
1211
struct lvp_cmd_buffer_entry *cmd;
1212
1213
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_FILL_BUFFER);
1214
if (!cmd)
1215
return;
1216
1217
cmd->u.fill_buffer.buffer = dst_buffer;
1218
cmd->u.fill_buffer.offset = dstOffset;
1219
cmd->u.fill_buffer.fill_size = fillSize;
1220
cmd->u.fill_buffer.data = data;
1221
1222
cmd_buf_queue(cmd_buffer, cmd);
1223
}
1224
1225
VKAPI_ATTR void VKAPI_CALL lvp_CmdUpdateBuffer(
1226
VkCommandBuffer commandBuffer,
1227
VkBuffer dstBuffer,
1228
VkDeviceSize dstOffset,
1229
VkDeviceSize dataSize,
1230
const void* pData)
1231
{
1232
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1233
LVP_FROM_HANDLE(lvp_buffer, dst_buffer, dstBuffer);
1234
struct lvp_cmd_buffer_entry *cmd;
1235
1236
cmd = cmd_buf_entry_alloc_size(cmd_buffer, dataSize, LVP_CMD_UPDATE_BUFFER);
1237
if (!cmd)
1238
return;
1239
1240
cmd->u.update_buffer.buffer = dst_buffer;
1241
cmd->u.update_buffer.offset = dstOffset;
1242
cmd->u.update_buffer.data_size = dataSize;
1243
memcpy(cmd->u.update_buffer.data, pData, dataSize);
1244
1245
cmd_buf_queue(cmd_buffer, cmd);
1246
}
1247
1248
VKAPI_ATTR void VKAPI_CALL lvp_CmdClearColorImage(
1249
VkCommandBuffer commandBuffer,
1250
VkImage image_h,
1251
VkImageLayout imageLayout,
1252
const VkClearColorValue* pColor,
1253
uint32_t rangeCount,
1254
const VkImageSubresourceRange* pRanges)
1255
{
1256
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1257
LVP_FROM_HANDLE(lvp_image, image, image_h);
1258
struct lvp_cmd_buffer_entry *cmd;
1259
uint32_t cmd_size = rangeCount * sizeof(VkImageSubresourceRange);
1260
1261
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_CLEAR_COLOR_IMAGE);
1262
if (!cmd)
1263
return;
1264
1265
cmd->u.clear_color_image.image = image;
1266
cmd->u.clear_color_image.layout = imageLayout;
1267
cmd->u.clear_color_image.clear_val = *pColor;
1268
cmd->u.clear_color_image.range_count = rangeCount;
1269
cmd->u.clear_color_image.ranges = (VkImageSubresourceRange *)(cmd + 1);
1270
for (unsigned i = 0; i < rangeCount; i++)
1271
cmd->u.clear_color_image.ranges[i] = pRanges[i];
1272
1273
cmd_buf_queue(cmd_buffer, cmd);
1274
}
1275
1276
VKAPI_ATTR void VKAPI_CALL lvp_CmdClearDepthStencilImage(
1277
VkCommandBuffer commandBuffer,
1278
VkImage image_h,
1279
VkImageLayout imageLayout,
1280
const VkClearDepthStencilValue* pDepthStencil,
1281
uint32_t rangeCount,
1282
const VkImageSubresourceRange* pRanges)
1283
{
1284
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1285
LVP_FROM_HANDLE(lvp_image, image, image_h);
1286
struct lvp_cmd_buffer_entry *cmd;
1287
uint32_t cmd_size = rangeCount * sizeof(VkImageSubresourceRange);
1288
1289
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_CLEAR_DEPTH_STENCIL_IMAGE);
1290
if (!cmd)
1291
return;
1292
1293
cmd->u.clear_ds_image.image = image;
1294
cmd->u.clear_ds_image.layout = imageLayout;
1295
cmd->u.clear_ds_image.clear_val = *pDepthStencil;
1296
cmd->u.clear_ds_image.range_count = rangeCount;
1297
cmd->u.clear_ds_image.ranges = (VkImageSubresourceRange *)(cmd + 1);
1298
for (unsigned i = 0; i < rangeCount; i++)
1299
cmd->u.clear_ds_image.ranges[i] = pRanges[i];
1300
1301
cmd_buf_queue(cmd_buffer, cmd);
1302
}
1303
1304
1305
VKAPI_ATTR void VKAPI_CALL lvp_CmdResolveImage2KHR(
1306
VkCommandBuffer commandBuffer,
1307
const VkResolveImageInfo2KHR *info)
1308
{
1309
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1310
LVP_FROM_HANDLE(lvp_image, src_image, info->srcImage);
1311
LVP_FROM_HANDLE(lvp_image, dst_image, info->dstImage);
1312
struct lvp_cmd_buffer_entry *cmd;
1313
uint32_t cmd_size = info->regionCount * sizeof(VkImageResolve);
1314
1315
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_RESOLVE_IMAGE);
1316
if (!cmd)
1317
return;
1318
1319
cmd->u.resolve_image.src = src_image;
1320
cmd->u.resolve_image.dst = dst_image;
1321
cmd->u.resolve_image.src_layout = info->srcImageLayout;
1322
cmd->u.resolve_image.dst_layout = info->dstImageLayout;
1323
cmd->u.resolve_image.region_count = info->regionCount;
1324
cmd->u.resolve_image.regions = (VkImageResolve *)(cmd + 1);
1325
COPY_STRUCT2_ARRAY(info->regionCount, cmd->u.resolve_image.regions, info->pRegions, VkImageResolve);
1326
1327
cmd_buf_queue(cmd_buffer, cmd);
1328
}
1329
1330
VKAPI_ATTR void VKAPI_CALL lvp_CmdResetQueryPool(
1331
VkCommandBuffer commandBuffer,
1332
VkQueryPool queryPool,
1333
uint32_t firstQuery,
1334
uint32_t queryCount)
1335
{
1336
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1337
LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
1338
struct lvp_cmd_buffer_entry *cmd;
1339
1340
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_RESET_QUERY_POOL);
1341
if (!cmd)
1342
return;
1343
1344
cmd->u.query.pool = query_pool;
1345
cmd->u.query.query = firstQuery;
1346
cmd->u.query.index = queryCount;
1347
1348
cmd_buf_queue(cmd_buffer, cmd);
1349
}
1350
1351
VKAPI_ATTR void VKAPI_CALL lvp_CmdBeginQueryIndexedEXT(
1352
VkCommandBuffer commandBuffer,
1353
VkQueryPool queryPool,
1354
uint32_t query,
1355
VkQueryControlFlags flags,
1356
uint32_t index)
1357
{
1358
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1359
LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
1360
struct lvp_cmd_buffer_entry *cmd;
1361
1362
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_BEGIN_QUERY);
1363
if (!cmd)
1364
return;
1365
1366
cmd->u.query.pool = query_pool;
1367
cmd->u.query.query = query;
1368
cmd->u.query.index = index;
1369
cmd->u.query.precise = true;
1370
1371
cmd_buf_queue(cmd_buffer, cmd);
1372
}
1373
1374
VKAPI_ATTR void VKAPI_CALL lvp_CmdBeginQuery(
1375
VkCommandBuffer commandBuffer,
1376
VkQueryPool queryPool,
1377
uint32_t query,
1378
VkQueryControlFlags flags)
1379
{
1380
lvp_CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
1381
}
1382
1383
VKAPI_ATTR void VKAPI_CALL lvp_CmdEndQueryIndexedEXT(
1384
VkCommandBuffer commandBuffer,
1385
VkQueryPool queryPool,
1386
uint32_t query,
1387
uint32_t index)
1388
{
1389
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1390
LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
1391
struct lvp_cmd_buffer_entry *cmd;
1392
1393
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_END_QUERY);
1394
if (!cmd)
1395
return;
1396
1397
cmd->u.query.pool = query_pool;
1398
cmd->u.query.query = query;
1399
cmd->u.query.index = index;
1400
1401
cmd_buf_queue(cmd_buffer, cmd);
1402
}
1403
1404
VKAPI_ATTR void VKAPI_CALL lvp_CmdEndQuery(
1405
VkCommandBuffer commandBuffer,
1406
VkQueryPool queryPool,
1407
uint32_t query)
1408
{
1409
lvp_CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
1410
}
1411
1412
VKAPI_ATTR void VKAPI_CALL lvp_CmdWriteTimestamp(
1413
VkCommandBuffer commandBuffer,
1414
VkPipelineStageFlagBits pipelineStage,
1415
VkQueryPool queryPool,
1416
uint32_t query)
1417
{
1418
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1419
LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
1420
struct lvp_cmd_buffer_entry *cmd;
1421
1422
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_WRITE_TIMESTAMP);
1423
if (!cmd)
1424
return;
1425
1426
cmd->u.query.pool = query_pool;
1427
cmd->u.query.query = query;
1428
cmd->u.query.flush = !(pipelineStage == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
1429
1430
cmd_buf_queue(cmd_buffer, cmd);
1431
}
1432
1433
VKAPI_ATTR void VKAPI_CALL lvp_CmdCopyQueryPoolResults(
1434
VkCommandBuffer commandBuffer,
1435
VkQueryPool queryPool,
1436
uint32_t firstQuery,
1437
uint32_t queryCount,
1438
VkBuffer dstBuffer,
1439
VkDeviceSize dstOffset,
1440
VkDeviceSize stride,
1441
VkQueryResultFlags flags)
1442
{
1443
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1444
LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
1445
LVP_FROM_HANDLE(lvp_buffer, buffer, dstBuffer);
1446
struct lvp_cmd_buffer_entry *cmd;
1447
1448
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_COPY_QUERY_POOL_RESULTS);
1449
if (!cmd)
1450
return;
1451
1452
cmd->u.copy_query_pool_results.pool = query_pool;
1453
cmd->u.copy_query_pool_results.first_query = firstQuery;
1454
cmd->u.copy_query_pool_results.query_count = queryCount;
1455
cmd->u.copy_query_pool_results.dst = buffer;
1456
cmd->u.copy_query_pool_results.dst_offset = dstOffset;
1457
cmd->u.copy_query_pool_results.stride = stride;
1458
cmd->u.copy_query_pool_results.flags = flags;
1459
1460
cmd_buf_queue(cmd_buffer, cmd);
1461
}
1462
1463
VKAPI_ATTR void VKAPI_CALL lvp_CmdPipelineBarrier(
1464
VkCommandBuffer commandBuffer,
1465
VkPipelineStageFlags srcStageMask,
1466
VkPipelineStageFlags destStageMask,
1467
VkBool32 byRegion,
1468
uint32_t memoryBarrierCount,
1469
const VkMemoryBarrier* pMemoryBarriers,
1470
uint32_t bufferMemoryBarrierCount,
1471
const VkBufferMemoryBarrier* pBufferMemoryBarriers,
1472
uint32_t imageMemoryBarrierCount,
1473
const VkImageMemoryBarrier* pImageMemoryBarriers)
1474
{
1475
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1476
struct lvp_cmd_buffer_entry *cmd;
1477
uint32_t cmd_size = 0;
1478
1479
cmd_size += memoryBarrierCount * sizeof(VkMemoryBarrier);
1480
cmd_size += bufferMemoryBarrierCount * sizeof(VkBufferMemoryBarrier);
1481
cmd_size += imageMemoryBarrierCount * sizeof(VkImageMemoryBarrier);
1482
1483
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_PIPELINE_BARRIER);
1484
if (!cmd)
1485
return;
1486
1487
cmd->u.pipeline_barrier.src_stage_mask = srcStageMask;
1488
cmd->u.pipeline_barrier.dst_stage_mask = destStageMask;
1489
cmd->u.pipeline_barrier.by_region = byRegion;
1490
cmd->u.pipeline_barrier.memory_barrier_count = memoryBarrierCount;
1491
cmd->u.pipeline_barrier.buffer_memory_barrier_count = bufferMemoryBarrierCount;
1492
cmd->u.pipeline_barrier.image_memory_barrier_count = imageMemoryBarrierCount;
1493
1494
/* TODO finish off this */
1495
cmd_buf_queue(cmd_buffer, cmd);
1496
}
1497
1498
VKAPI_ATTR void VKAPI_CALL lvp_CmdDrawIndirectCount(
1499
VkCommandBuffer commandBuffer,
1500
VkBuffer buffer,
1501
VkDeviceSize offset,
1502
VkBuffer countBuffer,
1503
VkDeviceSize countBufferOffset,
1504
uint32_t maxDrawCount,
1505
uint32_t stride)
1506
{
1507
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1508
LVP_FROM_HANDLE(lvp_buffer, buf, buffer);
1509
LVP_FROM_HANDLE(lvp_buffer, count_buf, countBuffer);
1510
struct lvp_cmd_buffer_entry *cmd;
1511
1512
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDIRECT_COUNT);
1513
if (!cmd)
1514
return;
1515
1516
cmd->u.draw_indirect_count.offset = offset;
1517
cmd->u.draw_indirect_count.buffer = buf;
1518
cmd->u.draw_indirect_count.count_buffer_offset = countBufferOffset;
1519
cmd->u.draw_indirect_count.count_buffer = count_buf;
1520
cmd->u.draw_indirect_count.max_draw_count = maxDrawCount;
1521
cmd->u.draw_indirect_count.stride = stride;
1522
1523
cmd_buf_queue(cmd_buffer, cmd);
1524
}
1525
1526
VKAPI_ATTR void VKAPI_CALL lvp_CmdDrawIndexedIndirectCount(
1527
VkCommandBuffer commandBuffer,
1528
VkBuffer buffer,
1529
VkDeviceSize offset,
1530
VkBuffer countBuffer,
1531
VkDeviceSize countBufferOffset,
1532
uint32_t maxDrawCount,
1533
uint32_t stride)
1534
{
1535
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1536
LVP_FROM_HANDLE(lvp_buffer, buf, buffer);
1537
LVP_FROM_HANDLE(lvp_buffer, count_buf, countBuffer);
1538
struct lvp_cmd_buffer_entry *cmd;
1539
1540
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDEXED_INDIRECT_COUNT);
1541
if (!cmd)
1542
return;
1543
1544
cmd->u.draw_indirect_count.offset = offset;
1545
cmd->u.draw_indirect_count.buffer = buf;
1546
cmd->u.draw_indirect_count.count_buffer_offset = countBufferOffset;
1547
cmd->u.draw_indirect_count.count_buffer = count_buf;
1548
cmd->u.draw_indirect_count.max_draw_count = maxDrawCount;
1549
cmd->u.draw_indirect_count.stride = stride;
1550
1551
cmd_buf_queue(cmd_buffer, cmd);
1552
}
1553
1554
VKAPI_ATTR void VKAPI_CALL lvp_CmdPushDescriptorSetKHR(
1555
VkCommandBuffer commandBuffer,
1556
VkPipelineBindPoint pipelineBindPoint,
1557
VkPipelineLayout _layout,
1558
uint32_t set,
1559
uint32_t descriptorWriteCount,
1560
const VkWriteDescriptorSet* pDescriptorWrites)
1561
{
1562
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1563
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, _layout);
1564
struct lvp_cmd_buffer_entry *cmd;
1565
int cmd_size = 0;
1566
1567
cmd_size += descriptorWriteCount * sizeof(struct lvp_write_descriptor);
1568
1569
int count_descriptors = 0;
1570
1571
for (unsigned i = 0; i < descriptorWriteCount; i++) {
1572
count_descriptors += pDescriptorWrites[i].descriptorCount;
1573
}
1574
cmd_size += count_descriptors * sizeof(union lvp_descriptor_info);
1575
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_PUSH_DESCRIPTOR_SET);
1576
if (!cmd)
1577
return;
1578
1579
cmd->u.push_descriptor_set.bind_point = pipelineBindPoint;
1580
cmd->u.push_descriptor_set.layout = layout;
1581
cmd->u.push_descriptor_set.set = set;
1582
cmd->u.push_descriptor_set.descriptor_write_count = descriptorWriteCount;
1583
cmd->u.push_descriptor_set.descriptors = (struct lvp_write_descriptor *)(cmd + 1);
1584
cmd->u.push_descriptor_set.infos = (union lvp_descriptor_info *)(cmd->u.push_descriptor_set.descriptors + descriptorWriteCount);
1585
1586
unsigned descriptor_index = 0;
1587
1588
for (unsigned i = 0; i < descriptorWriteCount; i++) {
1589
struct lvp_write_descriptor *desc = &cmd->u.push_descriptor_set.descriptors[i];
1590
1591
/* dstSet is ignored */
1592
desc->dst_binding = pDescriptorWrites[i].dstBinding;
1593
desc->dst_array_element = pDescriptorWrites[i].dstArrayElement;
1594
desc->descriptor_count = pDescriptorWrites[i].descriptorCount;
1595
desc->descriptor_type = pDescriptorWrites[i].descriptorType;
1596
1597
for (unsigned j = 0; j < desc->descriptor_count; j++) {
1598
union lvp_descriptor_info *info = &cmd->u.push_descriptor_set.infos[descriptor_index + j];
1599
switch (desc->descriptor_type) {
1600
case VK_DESCRIPTOR_TYPE_SAMPLER:
1601
info->sampler = lvp_sampler_from_handle(pDescriptorWrites[i].pImageInfo[j].sampler);
1602
break;
1603
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1604
info->sampler = lvp_sampler_from_handle(pDescriptorWrites[i].pImageInfo[j].sampler);
1605
info->iview = lvp_image_view_from_handle(pDescriptorWrites[i].pImageInfo[j].imageView);
1606
info->image_layout = pDescriptorWrites[i].pImageInfo[j].imageLayout;
1607
break;
1608
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1609
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1610
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1611
info->iview = lvp_image_view_from_handle(pDescriptorWrites[i].pImageInfo[j].imageView);
1612
info->image_layout = pDescriptorWrites[i].pImageInfo[j].imageLayout;
1613
break;
1614
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1615
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1616
info->buffer_view = lvp_buffer_view_from_handle(pDescriptorWrites[i].pTexelBufferView[j]);
1617
break;
1618
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1619
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1620
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1621
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1622
default:
1623
info->buffer = lvp_buffer_from_handle(pDescriptorWrites[i].pBufferInfo[j].buffer);
1624
info->offset = pDescriptorWrites[i].pBufferInfo[j].offset;
1625
info->range = pDescriptorWrites[i].pBufferInfo[j].range;
1626
break;
1627
}
1628
}
1629
descriptor_index += desc->descriptor_count;
1630
}
1631
cmd_buf_queue(cmd_buffer, cmd);
1632
}
1633
1634
VKAPI_ATTR void VKAPI_CALL lvp_CmdPushDescriptorSetWithTemplateKHR(
1635
VkCommandBuffer commandBuffer,
1636
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1637
VkPipelineLayout _layout,
1638
uint32_t set,
1639
const void* pData)
1640
{
1641
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1642
LVP_FROM_HANDLE(lvp_descriptor_update_template, templ, descriptorUpdateTemplate);
1643
int cmd_size = 0;
1644
struct lvp_cmd_buffer_entry *cmd;
1645
1646
cmd_size += templ->entry_count * sizeof(struct lvp_write_descriptor);
1647
1648
int count_descriptors = 0;
1649
for (unsigned i = 0; i < templ->entry_count; i++) {
1650
VkDescriptorUpdateTemplateEntry *entry = &templ->entry[i];
1651
count_descriptors += entry->descriptorCount;
1652
}
1653
cmd_size += count_descriptors * sizeof(union lvp_descriptor_info);
1654
1655
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_PUSH_DESCRIPTOR_SET);
1656
if (!cmd)
1657
return;
1658
1659
cmd->u.push_descriptor_set.bind_point = templ->bind_point;
1660
cmd->u.push_descriptor_set.layout = templ->pipeline_layout;
1661
cmd->u.push_descriptor_set.set = templ->set;
1662
cmd->u.push_descriptor_set.descriptor_write_count = templ->entry_count;
1663
cmd->u.push_descriptor_set.descriptors = (struct lvp_write_descriptor *)(cmd + 1);
1664
cmd->u.push_descriptor_set.infos = (union lvp_descriptor_info *)(cmd->u.push_descriptor_set.descriptors + templ->entry_count);
1665
1666
unsigned descriptor_index = 0;
1667
1668
for (unsigned i = 0; i < templ->entry_count; i++) {
1669
struct lvp_write_descriptor *desc = &cmd->u.push_descriptor_set.descriptors[i];
1670
struct VkDescriptorUpdateTemplateEntry *entry = &templ->entry[i];
1671
const uint8_t *pSrc = ((const uint8_t *) pData) + entry->offset;
1672
1673
/* dstSet is ignored */
1674
desc->dst_binding = entry->dstBinding;
1675
desc->dst_array_element = entry->dstArrayElement;
1676
desc->descriptor_count = entry->descriptorCount;
1677
desc->descriptor_type = entry->descriptorType;
1678
1679
for (unsigned j = 0; j < desc->descriptor_count; j++) {
1680
union lvp_descriptor_info *info = &cmd->u.push_descriptor_set.infos[descriptor_index + j];
1681
switch (desc->descriptor_type) {
1682
case VK_DESCRIPTOR_TYPE_SAMPLER:
1683
info->sampler = lvp_sampler_from_handle(*(VkSampler *)pSrc);
1684
break;
1685
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
1686
VkDescriptorImageInfo *image_info = (VkDescriptorImageInfo *)pSrc;
1687
info->sampler = lvp_sampler_from_handle(image_info->sampler);
1688
info->iview = lvp_image_view_from_handle(image_info->imageView);
1689
info->image_layout = image_info->imageLayout;
1690
break;
1691
}
1692
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1693
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1694
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
1695
VkDescriptorImageInfo *image_info = (VkDescriptorImageInfo *)pSrc;
1696
info->iview = lvp_image_view_from_handle(image_info->imageView);
1697
info->image_layout = image_info->imageLayout;
1698
break;
1699
}
1700
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1701
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1702
info->buffer_view = lvp_buffer_view_from_handle(*(VkBufferView *)pSrc);
1703
break;
1704
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1705
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1706
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1707
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1708
default: {
1709
VkDescriptorBufferInfo *buffer_info = (VkDescriptorBufferInfo *)pSrc;
1710
info->buffer = lvp_buffer_from_handle(buffer_info->buffer);
1711
info->offset = buffer_info->offset;
1712
info->range = buffer_info->range;
1713
break;
1714
}
1715
}
1716
pSrc += entry->stride;
1717
}
1718
descriptor_index += desc->descriptor_count;
1719
}
1720
cmd_buf_queue(cmd_buffer, cmd);
1721
}
1722
1723
VKAPI_ATTR void VKAPI_CALL lvp_CmdBindTransformFeedbackBuffersEXT(
1724
VkCommandBuffer commandBuffer,
1725
uint32_t firstBinding,
1726
uint32_t bindingCount,
1727
const VkBuffer* pBuffers,
1728
const VkDeviceSize* pOffsets,
1729
const VkDeviceSize* pSizes)
1730
{
1731
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1732
struct lvp_cmd_buffer_entry *cmd;
1733
uint32_t cmd_size = 0;
1734
1735
cmd_size += bindingCount * (sizeof(struct lvp_buffer *) + sizeof(VkDeviceSize) * 2);
1736
1737
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BIND_TRANSFORM_FEEDBACK_BUFFERS);
1738
if (!cmd)
1739
return;
1740
1741
cmd->u.bind_transform_feedback_buffers.first_binding = firstBinding;
1742
cmd->u.bind_transform_feedback_buffers.binding_count = bindingCount;
1743
cmd->u.bind_transform_feedback_buffers.buffers = (struct lvp_buffer **)(cmd + 1);
1744
cmd->u.bind_transform_feedback_buffers.offsets = (VkDeviceSize *)(cmd->u.bind_transform_feedback_buffers.buffers + bindingCount);
1745
cmd->u.bind_transform_feedback_buffers.sizes = (VkDeviceSize *)(cmd->u.bind_transform_feedback_buffers.offsets + bindingCount);
1746
1747
for (unsigned i = 0; i < bindingCount; i++) {
1748
cmd->u.bind_transform_feedback_buffers.buffers[i] = lvp_buffer_from_handle(pBuffers[i]);
1749
cmd->u.bind_transform_feedback_buffers.offsets[i] = pOffsets[i];
1750
if (pSizes && pSizes[i] != VK_WHOLE_SIZE)
1751
cmd->u.bind_transform_feedback_buffers.sizes[i] = pSizes[i];
1752
else
1753
cmd->u.bind_transform_feedback_buffers.sizes[i] = cmd->u.bind_transform_feedback_buffers.buffers[i]->size - pOffsets[i];
1754
}
1755
cmd_buf_queue(cmd_buffer, cmd);
1756
}
1757
1758
VKAPI_ATTR void VKAPI_CALL lvp_CmdBeginTransformFeedbackEXT(
1759
VkCommandBuffer commandBuffer,
1760
uint32_t firstCounterBuffer,
1761
uint32_t counterBufferCount,
1762
const VkBuffer* pCounterBuffers,
1763
const VkDeviceSize* pCounterBufferOffsets)
1764
{
1765
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1766
struct lvp_cmd_buffer_entry *cmd;
1767
uint32_t cmd_size = 0;
1768
1769
cmd_size += counterBufferCount * (sizeof(struct lvp_buffer *) + sizeof(VkDeviceSize));
1770
1771
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BEGIN_TRANSFORM_FEEDBACK);
1772
if (!cmd)
1773
return;
1774
1775
cmd->u.begin_transform_feedback.first_counter_buffer = firstCounterBuffer;
1776
cmd->u.begin_transform_feedback.counter_buffer_count = counterBufferCount;
1777
cmd->u.begin_transform_feedback.counter_buffers = (struct lvp_buffer **)(cmd + 1);
1778
cmd->u.begin_transform_feedback.counter_buffer_offsets = (VkDeviceSize *)(cmd->u.begin_transform_feedback.counter_buffers + counterBufferCount);
1779
1780
for (unsigned i = 0; i < counterBufferCount; i++) {
1781
if (pCounterBuffers)
1782
cmd->u.begin_transform_feedback.counter_buffers[i] = lvp_buffer_from_handle(pCounterBuffers[i]);
1783
else
1784
cmd->u.begin_transform_feedback.counter_buffers[i] = NULL;
1785
if (pCounterBufferOffsets)
1786
cmd->u.begin_transform_feedback.counter_buffer_offsets[i] = pCounterBufferOffsets[i];
1787
else
1788
cmd->u.begin_transform_feedback.counter_buffer_offsets[i] = 0;
1789
}
1790
cmd_buf_queue(cmd_buffer, cmd);
1791
}
1792
1793
VKAPI_ATTR void VKAPI_CALL lvp_CmdEndTransformFeedbackEXT(
1794
VkCommandBuffer commandBuffer,
1795
uint32_t firstCounterBuffer,
1796
uint32_t counterBufferCount,
1797
const VkBuffer* pCounterBuffers,
1798
const VkDeviceSize* pCounterBufferOffsets)
1799
{
1800
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1801
struct lvp_cmd_buffer_entry *cmd;
1802
uint32_t cmd_size = 0;
1803
1804
cmd_size += counterBufferCount * (sizeof(struct lvp_buffer *) + sizeof(VkDeviceSize));
1805
1806
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_END_TRANSFORM_FEEDBACK);
1807
if (!cmd)
1808
return;
1809
1810
cmd->u.begin_transform_feedback.first_counter_buffer = firstCounterBuffer;
1811
cmd->u.begin_transform_feedback.counter_buffer_count = counterBufferCount;
1812
cmd->u.begin_transform_feedback.counter_buffers = (struct lvp_buffer **)(cmd + 1);
1813
cmd->u.begin_transform_feedback.counter_buffer_offsets = (VkDeviceSize *)(cmd->u.begin_transform_feedback.counter_buffers + counterBufferCount);
1814
1815
for (unsigned i = 0; i < counterBufferCount; i++) {
1816
if (pCounterBuffers)
1817
cmd->u.begin_transform_feedback.counter_buffers[i] = lvp_buffer_from_handle(pCounterBuffers[i]);
1818
else
1819
cmd->u.begin_transform_feedback.counter_buffers[i] = NULL;
1820
if (pCounterBufferOffsets)
1821
cmd->u.begin_transform_feedback.counter_buffer_offsets[i] = pCounterBufferOffsets[i];
1822
else
1823
cmd->u.begin_transform_feedback.counter_buffer_offsets[i] = 0;
1824
}
1825
cmd_buf_queue(cmd_buffer, cmd);
1826
}
1827
1828
VKAPI_ATTR void VKAPI_CALL lvp_CmdDrawIndirectByteCountEXT(
1829
VkCommandBuffer commandBuffer,
1830
uint32_t instanceCount,
1831
uint32_t firstInstance,
1832
VkBuffer counterBuffer,
1833
VkDeviceSize counterBufferOffset,
1834
uint32_t counterOffset,
1835
uint32_t vertexStride)
1836
{
1837
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1838
struct lvp_cmd_buffer_entry *cmd;
1839
1840
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDIRECT_BYTE_COUNT);
1841
if (!cmd)
1842
return;
1843
1844
cmd->u.draw_indirect_byte_count.instance_count = instanceCount;
1845
cmd->u.draw_indirect_byte_count.first_instance = firstInstance;
1846
cmd->u.draw_indirect_byte_count.counter_buffer = lvp_buffer_from_handle(counterBuffer);
1847
cmd->u.draw_indirect_byte_count.counter_buffer_offset = counterBufferOffset;
1848
cmd->u.draw_indirect_byte_count.counter_offset = counterOffset;
1849
cmd->u.draw_indirect_byte_count.vertex_stride = vertexStride;
1850
1851
cmd_buf_queue(cmd_buffer, cmd);
1852
}
1853
1854
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetDeviceMask(
1855
VkCommandBuffer commandBuffer,
1856
uint32_t deviceMask)
1857
{
1858
/* No-op */
1859
}
1860
1861
VKAPI_ATTR void VKAPI_CALL lvp_CmdDispatchBase(
1862
VkCommandBuffer commandBuffer,
1863
uint32_t base_x,
1864
uint32_t base_y,
1865
uint32_t base_z,
1866
uint32_t x,
1867
uint32_t y,
1868
uint32_t z)
1869
{
1870
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1871
struct lvp_cmd_buffer_entry *cmd;
1872
1873
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DISPATCH);
1874
if (!cmd)
1875
return;
1876
1877
cmd->u.dispatch.x = x;
1878
cmd->u.dispatch.y = y;
1879
cmd->u.dispatch.z = z;
1880
cmd->u.dispatch.base_x = base_x;
1881
cmd->u.dispatch.base_y = base_y;
1882
cmd->u.dispatch.base_z = base_z;
1883
cmd_buf_queue(cmd_buffer, cmd);
1884
}
1885
1886
VKAPI_ATTR void VKAPI_CALL lvp_CmdBeginConditionalRenderingEXT(
1887
VkCommandBuffer commandBuffer,
1888
const VkConditionalRenderingBeginInfoEXT *pConditionalRenderingBegin)
1889
{
1890
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1891
struct lvp_cmd_buffer_entry *cmd;
1892
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_BEGIN_CONDITIONAL_RENDERING);
1893
if (!cmd)
1894
return;
1895
1896
cmd->u.begin_conditional_rendering.buffer = lvp_buffer_from_handle(pConditionalRenderingBegin->buffer);
1897
cmd->u.begin_conditional_rendering.offset = pConditionalRenderingBegin->offset;
1898
cmd->u.begin_conditional_rendering.inverted = pConditionalRenderingBegin->flags & VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT;
1899
cmd_buf_queue(cmd_buffer, cmd);
1900
}
1901
1902
VKAPI_ATTR void VKAPI_CALL lvp_CmdEndConditionalRenderingEXT(
1903
VkCommandBuffer commandBuffer)
1904
{
1905
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1906
struct lvp_cmd_buffer_entry *cmd;
1907
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_END_CONDITIONAL_RENDERING);
1908
if (!cmd)
1909
return;
1910
cmd_buf_queue(cmd_buffer, cmd);
1911
}
1912
1913
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetCullModeEXT(
1914
VkCommandBuffer commandBuffer,
1915
VkCullModeFlags cullMode)
1916
{
1917
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1918
struct lvp_cmd_buffer_entry *cmd;
1919
1920
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_CULL_MODE);
1921
if (!cmd)
1922
return;
1923
1924
cmd->u.set_cull_mode.cull_mode = cullMode;
1925
cmd_buf_queue(cmd_buffer, cmd);
1926
}
1927
1928
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetVertexInputEXT(
1929
VkCommandBuffer commandBuffer,
1930
uint32_t vertexBindingDescriptionCount,
1931
const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions,
1932
uint32_t vertexAttributeDescriptionCount,
1933
const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions)
1934
{
1935
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1936
struct lvp_cmd_buffer_entry *cmd;
1937
1938
size_t binding_size = vertexBindingDescriptionCount * sizeof(VkVertexInputBindingDescription2EXT);
1939
size_t attr_size = vertexAttributeDescriptionCount * sizeof(VkVertexInputAttributeDescription2EXT);
1940
cmd = cmd_buf_entry_alloc_size(cmd_buffer, binding_size + attr_size, LVP_CMD_SET_VERTEX_INPUT);
1941
if (!cmd)
1942
return;
1943
1944
cmd->u.set_vertex_input.binding_count = vertexBindingDescriptionCount;
1945
cmd->u.set_vertex_input.attr_count = vertexAttributeDescriptionCount;
1946
memcpy(cmd->u.set_vertex_input.data, pVertexBindingDescriptions, binding_size);
1947
memcpy(cmd->u.set_vertex_input.data + binding_size, pVertexAttributeDescriptions, attr_size);
1948
cmd_buf_queue(cmd_buffer, cmd);
1949
}
1950
1951
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetFrontFaceEXT(
1952
VkCommandBuffer commandBuffer,
1953
VkFrontFace frontFace)
1954
{
1955
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1956
struct lvp_cmd_buffer_entry *cmd;
1957
1958
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_FRONT_FACE);
1959
if (!cmd)
1960
return;
1961
1962
cmd->u.set_front_face.front_face = frontFace;
1963
cmd_buf_queue(cmd_buffer, cmd);
1964
}
1965
1966
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetLineStippleEXT(
1967
VkCommandBuffer commandBuffer,
1968
uint32_t lineStippleFactor,
1969
uint16_t lineStipplePattern)
1970
{
1971
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1972
struct lvp_cmd_buffer_entry *cmd;
1973
1974
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_LINE_STIPPLE);
1975
if (!cmd)
1976
return;
1977
1978
cmd->u.set_line_stipple.line_stipple_factor = lineStippleFactor;
1979
cmd->u.set_line_stipple.line_stipple_pattern = lineStipplePattern;
1980
cmd_buf_queue(cmd_buffer, cmd);
1981
}
1982
1983
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetPrimitiveTopologyEXT(
1984
VkCommandBuffer commandBuffer,
1985
VkPrimitiveTopology primitiveTopology)
1986
{
1987
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1988
struct lvp_cmd_buffer_entry *cmd;
1989
1990
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_PRIMITIVE_TOPOLOGY);
1991
if (!cmd)
1992
return;
1993
1994
cmd->u.set_primitive_topology.prim = primitiveTopology;
1995
cmd_buf_queue(cmd_buffer, cmd);
1996
}
1997
1998
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetViewportWithCountEXT(
1999
VkCommandBuffer commandBuffer,
2000
uint32_t viewportCount,
2001
const VkViewport* pViewports)
2002
{
2003
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2004
struct lvp_cmd_buffer_entry *cmd;
2005
int i;
2006
2007
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_VIEWPORT);
2008
if (!cmd)
2009
return;
2010
2011
cmd->u.set_viewport.first_viewport = UINT32_MAX;
2012
cmd->u.set_viewport.viewport_count = viewportCount;
2013
for (i = 0; i < viewportCount; i++)
2014
cmd->u.set_viewport.viewports[i] = pViewports[i];
2015
2016
cmd_buf_queue(cmd_buffer, cmd);
2017
}
2018
2019
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetScissorWithCountEXT(
2020
VkCommandBuffer commandBuffer,
2021
uint32_t scissorCount,
2022
const VkRect2D* pScissors)
2023
{
2024
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2025
struct lvp_cmd_buffer_entry *cmd;
2026
int i;
2027
2028
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_SCISSOR);
2029
if (!cmd)
2030
return;
2031
2032
cmd->u.set_scissor.first_scissor = UINT32_MAX;
2033
cmd->u.set_scissor.scissor_count = scissorCount;
2034
for (i = 0; i < scissorCount; i++)
2035
cmd->u.set_scissor.scissors[i] = pScissors[i];
2036
2037
cmd_buf_queue(cmd_buffer, cmd);
2038
}
2039
2040
VKAPI_ATTR void VKAPI_CALL lvp_CmdBindVertexBuffers2EXT(
2041
VkCommandBuffer commandBuffer,
2042
uint32_t firstBinding,
2043
uint32_t bindingCount,
2044
const VkBuffer* pBuffers,
2045
const VkDeviceSize* pOffsets,
2046
const VkDeviceSize* pSizes,
2047
const VkDeviceSize* pStrides)
2048
{
2049
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2050
struct lvp_cmd_buffer_entry *cmd;
2051
struct lvp_buffer **buffers;
2052
VkDeviceSize *offsets;
2053
VkDeviceSize *sizes;
2054
VkDeviceSize *strides;
2055
int i;
2056
uint32_t array_count = pStrides ? 3 : 2;
2057
uint32_t cmd_size = bindingCount * sizeof(struct lvp_buffer *) + bindingCount * array_count * sizeof(VkDeviceSize);
2058
2059
cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BIND_VERTEX_BUFFERS);
2060
if (!cmd)
2061
return;
2062
2063
cmd->u.vertex_buffers.first = firstBinding;
2064
cmd->u.vertex_buffers.binding_count = bindingCount;
2065
2066
buffers = (struct lvp_buffer **)(cmd + 1);
2067
offsets = (VkDeviceSize *)(buffers + bindingCount);
2068
sizes = (VkDeviceSize *)(offsets + bindingCount);
2069
strides = (VkDeviceSize *)(sizes + bindingCount);
2070
for (i = 0; i < bindingCount; i++) {
2071
buffers[i] = lvp_buffer_from_handle(pBuffers[i]);
2072
offsets[i] = pOffsets[i];
2073
if (pSizes)
2074
sizes[i] = pSizes[i];
2075
else
2076
sizes[i] = 0;
2077
2078
if (pStrides)
2079
strides[i] = pStrides[i];
2080
}
2081
cmd->u.vertex_buffers.buffers = buffers;
2082
cmd->u.vertex_buffers.offsets = offsets;
2083
cmd->u.vertex_buffers.sizes = sizes;
2084
cmd->u.vertex_buffers.strides = pStrides ? strides : NULL;
2085
2086
cmd_buf_queue(cmd_buffer, cmd);
2087
}
2088
2089
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetDepthTestEnableEXT(
2090
VkCommandBuffer commandBuffer,
2091
VkBool32 depthTestEnable)
2092
{
2093
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2094
struct lvp_cmd_buffer_entry *cmd;
2095
2096
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_TEST_ENABLE);
2097
if (!cmd)
2098
return;
2099
2100
cmd->u.set_depth_test_enable.depth_test_enable = depthTestEnable;
2101
cmd_buf_queue(cmd_buffer, cmd);
2102
}
2103
2104
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetDepthWriteEnableEXT(
2105
VkCommandBuffer commandBuffer,
2106
VkBool32 depthWriteEnable)
2107
{
2108
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2109
struct lvp_cmd_buffer_entry *cmd;
2110
2111
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_WRITE_ENABLE);
2112
if (!cmd)
2113
return;
2114
2115
cmd->u.set_depth_write_enable.depth_write_enable = depthWriteEnable;
2116
cmd_buf_queue(cmd_buffer, cmd);
2117
}
2118
2119
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetDepthCompareOpEXT(
2120
VkCommandBuffer commandBuffer,
2121
VkCompareOp depthCompareOp)
2122
{
2123
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2124
struct lvp_cmd_buffer_entry *cmd;
2125
2126
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_COMPARE_OP);
2127
if (!cmd)
2128
return;
2129
2130
cmd->u.set_depth_compare_op.depth_op = depthCompareOp;
2131
cmd_buf_queue(cmd_buffer, cmd);
2132
}
2133
2134
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetDepthBoundsTestEnableEXT(
2135
VkCommandBuffer commandBuffer,
2136
VkBool32 depthBoundsTestEnable)
2137
{
2138
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2139
struct lvp_cmd_buffer_entry *cmd;
2140
2141
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_BOUNDS_TEST_ENABLE);
2142
if (!cmd)
2143
return;
2144
2145
cmd->u.set_depth_bounds_test_enable.depth_bounds_test_enable = depthBoundsTestEnable;
2146
cmd_buf_queue(cmd_buffer, cmd);
2147
}
2148
2149
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetStencilTestEnableEXT(
2150
VkCommandBuffer commandBuffer,
2151
VkBool32 stencilTestEnable)
2152
{
2153
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2154
struct lvp_cmd_buffer_entry *cmd;
2155
2156
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_TEST_ENABLE);
2157
if (!cmd)
2158
return;
2159
2160
cmd->u.set_stencil_test_enable.stencil_test_enable = stencilTestEnable;
2161
cmd_buf_queue(cmd_buffer, cmd);
2162
}
2163
2164
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetStencilOpEXT(
2165
VkCommandBuffer commandBuffer,
2166
VkStencilFaceFlags faceMask,
2167
VkStencilOp failOp,
2168
VkStencilOp passOp,
2169
VkStencilOp depthFailOp,
2170
VkCompareOp compareOp)
2171
{
2172
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2173
struct lvp_cmd_buffer_entry *cmd;
2174
2175
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_OP);
2176
if (!cmd)
2177
return;
2178
2179
cmd->u.set_stencil_op.face_mask = faceMask;
2180
cmd->u.set_stencil_op.fail_op = failOp;
2181
cmd->u.set_stencil_op.pass_op = passOp;
2182
cmd->u.set_stencil_op.depth_fail_op = depthFailOp;
2183
cmd->u.set_stencil_op.compare_op = compareOp;
2184
cmd_buf_queue(cmd_buffer, cmd);
2185
}
2186
2187
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetDepthBiasEnableEXT(
2188
VkCommandBuffer commandBuffer,
2189
VkBool32 depthBiasEnable)
2190
{
2191
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2192
struct lvp_cmd_buffer_entry *cmd;
2193
2194
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_BIAS_ENABLE);
2195
if (!cmd)
2196
return;
2197
2198
cmd->u.set_depth_bias_enable.enable = depthBiasEnable == VK_TRUE;
2199
cmd_buf_queue(cmd_buffer, cmd);
2200
}
2201
2202
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetLogicOpEXT(
2203
VkCommandBuffer commandBuffer,
2204
VkLogicOp logicOp)
2205
{
2206
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2207
struct lvp_cmd_buffer_entry *cmd;
2208
2209
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_LOGIC_OP);
2210
if (!cmd)
2211
return;
2212
2213
cmd->u.set_logic_op.op = logicOp;
2214
cmd_buf_queue(cmd_buffer, cmd);
2215
}
2216
2217
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetPatchControlPointsEXT(
2218
VkCommandBuffer commandBuffer,
2219
uint32_t patchControlPoints)
2220
{
2221
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2222
struct lvp_cmd_buffer_entry *cmd;
2223
2224
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_PATCH_CONTROL_POINTS);
2225
if (!cmd)
2226
return;
2227
2228
cmd->u.set_patch_control_points.vertices_per_patch = patchControlPoints;
2229
cmd_buf_queue(cmd_buffer, cmd);
2230
}
2231
2232
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetPrimitiveRestartEnableEXT(
2233
VkCommandBuffer commandBuffer,
2234
VkBool32 primitiveRestartEnable)
2235
{
2236
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2237
struct lvp_cmd_buffer_entry *cmd;
2238
2239
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_PRIMITIVE_RESTART_ENABLE);
2240
if (!cmd)
2241
return;
2242
2243
cmd->u.set_primitive_restart_enable.enable = primitiveRestartEnable == VK_TRUE;
2244
cmd_buf_queue(cmd_buffer, cmd);
2245
}
2246
2247
VKAPI_ATTR void VKAPI_CALL lvp_CmdSetRasterizerDiscardEnableEXT(
2248
VkCommandBuffer commandBuffer,
2249
VkBool32 rasterizerDiscardEnable)
2250
{
2251
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
2252
struct lvp_cmd_buffer_entry *cmd;
2253
2254
cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_RASTERIZER_DISCARD_ENABLE);
2255
if (!cmd)
2256
return;
2257
2258
cmd->u.set_rasterizer_discard_enable.enable = rasterizerDiscardEnable == VK_TRUE;
2259
cmd_buf_queue(cmd_buffer, cmd);
2260
}
2261
2262