Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/amd/vulkan/layers/radv_sqtt_layer.c
7229 views
1
/*
2
* Copyright © 2020 Valve Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "radv_private.h"
25
#include "radv_shader.h"
26
27
#include "ac_rgp.h"
28
#include "ac_sqtt.h"
29
30
static void
31
radv_write_begin_general_api_marker(struct radv_cmd_buffer *cmd_buffer,
32
enum rgp_sqtt_marker_general_api_type api_type)
33
{
34
struct rgp_sqtt_marker_general_api marker = {0};
35
struct radeon_cmdbuf *cs = cmd_buffer->cs;
36
37
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_GENERAL_API;
38
marker.api_type = api_type;
39
40
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
41
}
42
43
static void
44
radv_write_end_general_api_marker(struct radv_cmd_buffer *cmd_buffer,
45
enum rgp_sqtt_marker_general_api_type api_type)
46
{
47
struct rgp_sqtt_marker_general_api marker = {0};
48
struct radeon_cmdbuf *cs = cmd_buffer->cs;
49
50
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_GENERAL_API;
51
marker.api_type = api_type;
52
marker.is_end = 1;
53
54
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
55
}
56
57
static void
58
radv_write_event_marker(struct radv_cmd_buffer *cmd_buffer,
59
enum rgp_sqtt_marker_event_type api_type, uint32_t vertex_offset_user_data,
60
uint32_t instance_offset_user_data, uint32_t draw_index_user_data)
61
{
62
struct rgp_sqtt_marker_event marker = {0};
63
struct radeon_cmdbuf *cs = cmd_buffer->cs;
64
65
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_EVENT;
66
marker.api_type = api_type;
67
marker.cmd_id = cmd_buffer->state.num_events++;
68
marker.cb_id = 0;
69
70
if (vertex_offset_user_data == UINT_MAX || instance_offset_user_data == UINT_MAX) {
71
vertex_offset_user_data = 0;
72
instance_offset_user_data = 0;
73
}
74
75
if (draw_index_user_data == UINT_MAX)
76
draw_index_user_data = vertex_offset_user_data;
77
78
marker.vertex_offset_reg_idx = vertex_offset_user_data;
79
marker.instance_offset_reg_idx = instance_offset_user_data;
80
marker.draw_index_reg_idx = draw_index_user_data;
81
82
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
83
}
84
85
static void
86
radv_write_event_with_dims_marker(struct radv_cmd_buffer *cmd_buffer,
87
enum rgp_sqtt_marker_event_type api_type, uint32_t x, uint32_t y,
88
uint32_t z)
89
{
90
struct rgp_sqtt_marker_event_with_dims marker = {0};
91
struct radeon_cmdbuf *cs = cmd_buffer->cs;
92
93
marker.event.identifier = RGP_SQTT_MARKER_IDENTIFIER_EVENT;
94
marker.event.api_type = api_type;
95
marker.event.cmd_id = cmd_buffer->state.num_events++;
96
marker.event.cb_id = 0;
97
marker.event.has_thread_dims = 1;
98
99
marker.thread_x = x;
100
marker.thread_y = y;
101
marker.thread_z = z;
102
103
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
104
}
105
106
static void
107
radv_write_user_event_marker(struct radv_cmd_buffer *cmd_buffer,
108
enum rgp_sqtt_marker_user_event_type type, const char *str)
109
{
110
struct radeon_cmdbuf *cs = cmd_buffer->cs;
111
112
if (type == UserEventPop) {
113
assert(str == NULL);
114
struct rgp_sqtt_marker_user_event marker = {0};
115
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_USER_EVENT;
116
marker.data_type = type;
117
118
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
119
} else {
120
assert(str != NULL);
121
unsigned len = strlen(str);
122
struct rgp_sqtt_marker_user_event_with_length marker = {0};
123
marker.user_event.identifier = RGP_SQTT_MARKER_IDENTIFIER_USER_EVENT;
124
marker.user_event.data_type = type;
125
marker.length = align(len, 4);
126
127
uint8_t *buffer = alloca(sizeof(marker) + marker.length);
128
memset(buffer, 0, sizeof(marker) + marker.length);
129
memcpy(buffer, &marker, sizeof(marker));
130
memcpy(buffer + sizeof(marker), str, len);
131
132
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, buffer,
133
sizeof(marker) / 4 + marker.length / 4);
134
}
135
}
136
137
void
138
radv_describe_begin_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
139
{
140
uint64_t device_id = (uintptr_t)cmd_buffer->device;
141
struct rgp_sqtt_marker_cb_start marker = {0};
142
struct radeon_cmdbuf *cs = cmd_buffer->cs;
143
144
if (likely(!cmd_buffer->device->thread_trace.bo))
145
return;
146
147
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_CB_START;
148
marker.cb_id = 0;
149
marker.device_id_low = device_id;
150
marker.device_id_high = device_id >> 32;
151
marker.queue = cmd_buffer->queue_family_index;
152
marker.queue_flags = VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT | VK_QUEUE_SPARSE_BINDING_BIT;
153
154
if (cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL)
155
marker.queue_flags |= VK_QUEUE_GRAPHICS_BIT;
156
157
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
158
}
159
160
void
161
radv_describe_end_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
162
{
163
uint64_t device_id = (uintptr_t)cmd_buffer->device;
164
struct rgp_sqtt_marker_cb_end marker = {0};
165
struct radeon_cmdbuf *cs = cmd_buffer->cs;
166
167
if (likely(!cmd_buffer->device->thread_trace.bo))
168
return;
169
170
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_CB_END;
171
marker.cb_id = 0;
172
marker.device_id_low = device_id;
173
marker.device_id_high = device_id >> 32;
174
175
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
176
}
177
178
void
179
radv_describe_draw(struct radv_cmd_buffer *cmd_buffer)
180
{
181
if (likely(!cmd_buffer->device->thread_trace.bo))
182
return;
183
184
radv_write_event_marker(cmd_buffer, cmd_buffer->state.current_event_type, UINT_MAX, UINT_MAX,
185
UINT_MAX);
186
}
187
188
void
189
radv_describe_dispatch(struct radv_cmd_buffer *cmd_buffer, int x, int y, int z)
190
{
191
if (likely(!cmd_buffer->device->thread_trace.bo))
192
return;
193
194
radv_write_event_with_dims_marker(cmd_buffer, cmd_buffer->state.current_event_type, x, y, z);
195
}
196
197
void
198
radv_describe_begin_render_pass_clear(struct radv_cmd_buffer *cmd_buffer,
199
VkImageAspectFlagBits aspects)
200
{
201
cmd_buffer->state.current_event_type = (aspects & VK_IMAGE_ASPECT_COLOR_BIT)
202
? EventRenderPassColorClear
203
: EventRenderPassDepthStencilClear;
204
}
205
206
void
207
radv_describe_end_render_pass_clear(struct radv_cmd_buffer *cmd_buffer)
208
{
209
cmd_buffer->state.current_event_type = EventInternalUnknown;
210
}
211
212
void
213
radv_describe_begin_render_pass_resolve(struct radv_cmd_buffer *cmd_buffer)
214
{
215
cmd_buffer->state.current_event_type = EventRenderPassResolve;
216
}
217
218
void
219
radv_describe_end_render_pass_resolve(struct radv_cmd_buffer *cmd_buffer)
220
{
221
cmd_buffer->state.current_event_type = EventInternalUnknown;
222
}
223
224
void
225
radv_describe_barrier_end_delayed(struct radv_cmd_buffer *cmd_buffer)
226
{
227
struct rgp_sqtt_marker_barrier_end marker = {0};
228
struct radeon_cmdbuf *cs = cmd_buffer->cs;
229
230
if (likely(!cmd_buffer->device->thread_trace.bo) || !cmd_buffer->state.pending_sqtt_barrier_end)
231
return;
232
233
cmd_buffer->state.pending_sqtt_barrier_end = false;
234
235
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_BARRIER_END;
236
marker.cb_id = 0;
237
238
marker.num_layout_transitions = cmd_buffer->state.num_layout_transitions;
239
240
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_WAIT_ON_EOP_TS)
241
marker.wait_on_eop_ts = true;
242
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_VS_PARTIAL_FLUSH)
243
marker.vs_partial_flush = true;
244
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_PS_PARTIAL_FLUSH)
245
marker.ps_partial_flush = true;
246
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_CS_PARTIAL_FLUSH)
247
marker.cs_partial_flush = true;
248
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_PFP_SYNC_ME)
249
marker.pfp_sync_me = true;
250
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_SYNC_CP_DMA)
251
marker.sync_cp_dma = true;
252
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_INVAL_VMEM_L0)
253
marker.inval_tcp = true;
254
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_INVAL_ICACHE)
255
marker.inval_sqI = true;
256
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_INVAL_SMEM_L0)
257
marker.inval_sqK = true;
258
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_FLUSH_L2)
259
marker.flush_tcc = true;
260
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_INVAL_L2)
261
marker.inval_tcc = true;
262
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_FLUSH_CB)
263
marker.flush_cb = true;
264
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_INVAL_CB)
265
marker.inval_cb = true;
266
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_FLUSH_DB)
267
marker.flush_db = true;
268
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_INVAL_DB)
269
marker.inval_db = true;
270
if (cmd_buffer->state.sqtt_flush_bits & RGP_FLUSH_INVAL_L1)
271
marker.inval_gl1 = true;
272
273
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
274
275
cmd_buffer->state.num_layout_transitions = 0;
276
}
277
278
void
279
radv_describe_barrier_start(struct radv_cmd_buffer *cmd_buffer, enum rgp_barrier_reason reason)
280
{
281
struct rgp_sqtt_marker_barrier_start marker = {0};
282
struct radeon_cmdbuf *cs = cmd_buffer->cs;
283
284
if (likely(!cmd_buffer->device->thread_trace.bo))
285
return;
286
287
radv_describe_barrier_end_delayed(cmd_buffer);
288
cmd_buffer->state.sqtt_flush_bits = 0;
289
290
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_BARRIER_START;
291
marker.cb_id = 0;
292
marker.dword02 = reason;
293
294
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
295
}
296
297
void
298
radv_describe_barrier_end(struct radv_cmd_buffer *cmd_buffer)
299
{
300
cmd_buffer->state.pending_sqtt_barrier_end = true;
301
}
302
303
void
304
radv_describe_layout_transition(struct radv_cmd_buffer *cmd_buffer,
305
const struct radv_barrier_data *barrier)
306
{
307
struct rgp_sqtt_marker_layout_transition marker = {0};
308
struct radeon_cmdbuf *cs = cmd_buffer->cs;
309
310
if (likely(!cmd_buffer->device->thread_trace.bo))
311
return;
312
313
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_LAYOUT_TRANSITION;
314
marker.depth_stencil_expand = barrier->layout_transitions.depth_stencil_expand;
315
marker.htile_hiz_range_expand = barrier->layout_transitions.htile_hiz_range_expand;
316
marker.depth_stencil_resummarize = barrier->layout_transitions.depth_stencil_resummarize;
317
marker.dcc_decompress = barrier->layout_transitions.dcc_decompress;
318
marker.fmask_decompress = barrier->layout_transitions.fmask_decompress;
319
marker.fast_clear_eliminate = barrier->layout_transitions.fast_clear_eliminate;
320
marker.fmask_color_expand = barrier->layout_transitions.fmask_color_expand;
321
marker.init_mask_ram = barrier->layout_transitions.init_mask_ram;
322
323
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
324
325
cmd_buffer->state.num_layout_transitions++;
326
}
327
328
static void
329
radv_describe_pipeline_bind(struct radv_cmd_buffer *cmd_buffer,
330
VkPipelineBindPoint pipelineBindPoint, struct radv_pipeline *pipeline)
331
{
332
struct rgp_sqtt_marker_pipeline_bind marker = {0};
333
struct radeon_cmdbuf *cs = cmd_buffer->cs;
334
335
if (likely(!cmd_buffer->device->thread_trace.bo))
336
return;
337
338
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_BIND_PIPELINE;
339
marker.cb_id = 0;
340
marker.bind_point = pipelineBindPoint;
341
marker.api_pso_hash[0] = pipeline->pipeline_hash;
342
marker.api_pso_hash[1] = pipeline->pipeline_hash >> 32;
343
344
radv_emit_thread_trace_userdata(cmd_buffer->device, cs, &marker, sizeof(marker) / 4);
345
}
346
347
/* TODO: Improve the way to trigger capture (overlay, etc). */
348
static void
349
radv_handle_thread_trace(VkQueue _queue)
350
{
351
RADV_FROM_HANDLE(radv_queue, queue, _queue);
352
static bool thread_trace_enabled = false;
353
static uint64_t num_frames = 0;
354
bool resize_trigger = false;
355
356
if (thread_trace_enabled) {
357
struct ac_thread_trace thread_trace = {0};
358
359
radv_end_thread_trace(queue);
360
thread_trace_enabled = false;
361
362
/* TODO: Do something better than this whole sync. */
363
radv_QueueWaitIdle(_queue);
364
365
if (radv_get_thread_trace(queue, &thread_trace)) {
366
ac_dump_rgp_capture(&queue->device->physical_device->rad_info, &thread_trace);
367
} else {
368
/* Trigger a new capture if the driver failed to get
369
* the trace because the buffer was too small.
370
*/
371
resize_trigger = true;
372
}
373
}
374
375
if (!thread_trace_enabled) {
376
bool frame_trigger = num_frames == queue->device->thread_trace.start_frame;
377
bool file_trigger = false;
378
#ifndef _WIN32
379
if (queue->device->thread_trace.trigger_file &&
380
access(queue->device->thread_trace.trigger_file, W_OK) == 0) {
381
if (unlink(queue->device->thread_trace.trigger_file) == 0) {
382
file_trigger = true;
383
} else {
384
/* Do not enable tracing if we cannot remove the file,
385
* because by then we'll trace every frame ... */
386
fprintf(stderr, "RADV: could not remove thread trace trigger file, ignoring\n");
387
}
388
}
389
#endif
390
391
if (frame_trigger || file_trigger || resize_trigger) {
392
/* FIXME: SQTT on compute hangs. */
393
if (queue->queue_family_index == RADV_QUEUE_COMPUTE) {
394
fprintf(stderr, "RADV: Capturing a SQTT trace on the compute "
395
"queue is currently broken and might hang! "
396
"Please, disable presenting on compute if "
397
"you can.\n");
398
return;
399
}
400
401
radv_begin_thread_trace(queue);
402
assert(!thread_trace_enabled);
403
thread_trace_enabled = true;
404
}
405
}
406
num_frames++;
407
}
408
409
VkResult
410
sqtt_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
411
{
412
VkResult result;
413
414
result = radv_QueuePresentKHR(_queue, pPresentInfo);
415
if (result != VK_SUCCESS)
416
return result;
417
418
radv_handle_thread_trace(_queue);
419
420
return VK_SUCCESS;
421
}
422
423
#define EVENT_MARKER_ALIAS(cmd_name, api_name, ...) \
424
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); \
425
radv_write_begin_general_api_marker(cmd_buffer, ApiCmd##api_name); \
426
cmd_buffer->state.current_event_type = EventCmd##api_name; \
427
radv_Cmd##cmd_name(__VA_ARGS__); \
428
cmd_buffer->state.current_event_type = EventInternalUnknown; \
429
radv_write_end_general_api_marker(cmd_buffer, ApiCmd##api_name);
430
431
#define EVENT_MARKER(cmd_name, ...) EVENT_MARKER_ALIAS(cmd_name, cmd_name, __VA_ARGS__);
432
433
void
434
sqtt_CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
435
uint32_t firstVertex, uint32_t firstInstance)
436
{
437
EVENT_MARKER(Draw, commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
438
}
439
440
void
441
sqtt_CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
442
uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance)
443
{
444
EVENT_MARKER(DrawIndexed, commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
445
firstInstance);
446
}
447
448
void
449
sqtt_CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
450
uint32_t drawCount, uint32_t stride)
451
{
452
EVENT_MARKER(DrawIndirect, commandBuffer, buffer, offset, drawCount, stride);
453
}
454
455
void
456
sqtt_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
457
uint32_t drawCount, uint32_t stride)
458
{
459
EVENT_MARKER(DrawIndexedIndirect, commandBuffer, buffer, offset, drawCount, stride);
460
}
461
462
void
463
sqtt_CmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
464
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
465
uint32_t maxDrawCount, uint32_t stride)
466
{
467
EVENT_MARKER(DrawIndirectCount, commandBuffer, buffer, offset, countBuffer, countBufferOffset,
468
maxDrawCount, stride);
469
}
470
471
void
472
sqtt_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer,
473
VkDeviceSize offset, VkBuffer countBuffer,
474
VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
475
uint32_t stride)
476
{
477
EVENT_MARKER(DrawIndexedIndirectCount, commandBuffer, buffer, offset, countBuffer,
478
countBufferOffset, maxDrawCount, stride);
479
}
480
481
void
482
sqtt_CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z)
483
{
484
EVENT_MARKER(Dispatch, commandBuffer, x, y, z);
485
}
486
487
void
488
sqtt_CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset)
489
{
490
EVENT_MARKER(DispatchIndirect, commandBuffer, buffer, offset);
491
}
492
493
void
494
sqtt_CmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfo)
495
{
496
EVENT_MARKER_ALIAS(CopyBuffer2KHR, CopyBuffer, commandBuffer, pCopyBufferInfo);
497
}
498
499
void
500
sqtt_CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
501
VkDeviceSize fillSize, uint32_t data)
502
{
503
EVENT_MARKER(FillBuffer, commandBuffer, dstBuffer, dstOffset, fillSize, data);
504
}
505
506
void
507
sqtt_CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
508
VkDeviceSize dataSize, const void *pData)
509
{
510
EVENT_MARKER(UpdateBuffer, commandBuffer, dstBuffer, dstOffset, dataSize, pData);
511
}
512
513
void
514
sqtt_CmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo)
515
{
516
EVENT_MARKER_ALIAS(CopyImage2KHR, CopyImage, commandBuffer, pCopyImageInfo);
517
}
518
519
void
520
sqtt_CmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
521
const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo)
522
{
523
EVENT_MARKER_ALIAS(CopyBufferToImage2KHR, CopyBufferToImage, commandBuffer,
524
pCopyBufferToImageInfo);
525
}
526
527
void
528
sqtt_CmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
529
const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo)
530
{
531
EVENT_MARKER_ALIAS(CopyImageToBuffer2KHR, CopyImageToBuffer, commandBuffer,
532
pCopyImageToBufferInfo);
533
}
534
535
void
536
sqtt_CmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo)
537
{
538
EVENT_MARKER_ALIAS(BlitImage2KHR, BlitImage, commandBuffer, pBlitImageInfo);
539
}
540
541
void
542
sqtt_CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image_h, VkImageLayout imageLayout,
543
const VkClearColorValue *pColor, uint32_t rangeCount,
544
const VkImageSubresourceRange *pRanges)
545
{
546
EVENT_MARKER(ClearColorImage, commandBuffer, image_h, imageLayout, pColor, rangeCount, pRanges);
547
}
548
549
void
550
sqtt_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image_h,
551
VkImageLayout imageLayout,
552
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
553
const VkImageSubresourceRange *pRanges)
554
{
555
EVENT_MARKER(ClearDepthStencilImage, commandBuffer, image_h, imageLayout, pDepthStencil,
556
rangeCount, pRanges);
557
}
558
559
void
560
sqtt_CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
561
const VkClearAttachment *pAttachments, uint32_t rectCount,
562
const VkClearRect *pRects)
563
{
564
EVENT_MARKER(ClearAttachments, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
565
}
566
567
void
568
sqtt_CmdResolveImage2KHR(VkCommandBuffer commandBuffer,
569
const VkResolveImageInfo2KHR *pResolveImageInfo)
570
{
571
EVENT_MARKER_ALIAS(ResolveImage2KHR, ResolveImage, commandBuffer, pResolveImageInfo);
572
}
573
574
void
575
sqtt_CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
576
VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
577
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
578
uint32_t bufferMemoryBarrierCount,
579
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
580
uint32_t imageMemoryBarrierCount,
581
const VkImageMemoryBarrier *pImageMemoryBarriers)
582
{
583
EVENT_MARKER(WaitEvents, commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask,
584
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
585
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
586
}
587
588
void
589
sqtt_CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
590
VkPipelineStageFlags destStageMask, VkBool32 byRegion,
591
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
592
uint32_t bufferMemoryBarrierCount,
593
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
594
uint32_t imageMemoryBarrierCount,
595
const VkImageMemoryBarrier *pImageMemoryBarriers)
596
{
597
EVENT_MARKER(PipelineBarrier, commandBuffer, srcStageMask, destStageMask, byRegion,
598
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
599
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
600
}
601
602
void
603
sqtt_CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
604
uint32_t queryCount)
605
{
606
EVENT_MARKER(ResetQueryPool, commandBuffer, queryPool, firstQuery, queryCount);
607
}
608
609
void
610
sqtt_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
611
uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
612
VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags)
613
{
614
EVENT_MARKER(CopyQueryPoolResults, commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
615
dstOffset, stride, flags);
616
}
617
618
#undef EVENT_MARKER
619
#define API_MARKER_ALIAS(cmd_name, api_name, ...) \
620
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); \
621
radv_write_begin_general_api_marker(cmd_buffer, ApiCmd##api_name); \
622
radv_Cmd##cmd_name(__VA_ARGS__); \
623
radv_write_end_general_api_marker(cmd_buffer, ApiCmd##api_name);
624
625
#define API_MARKER(cmd_name, ...) API_MARKER_ALIAS(cmd_name, cmd_name, __VA_ARGS__);
626
627
static bool
628
radv_sqtt_dump_pipeline()
629
{
630
return getenv("RADV_THREAD_TRACE_PIPELINE");
631
}
632
633
void
634
sqtt_CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
635
VkPipeline _pipeline)
636
{
637
RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
638
639
API_MARKER(BindPipeline, commandBuffer, pipelineBindPoint, _pipeline);
640
641
if (radv_sqtt_dump_pipeline())
642
radv_describe_pipeline_bind(cmd_buffer, pipelineBindPoint, pipeline);
643
}
644
645
void
646
sqtt_CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
647
VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount,
648
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
649
const uint32_t *pDynamicOffsets)
650
{
651
API_MARKER(BindDescriptorSets, commandBuffer, pipelineBindPoint, layout, firstSet,
652
descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
653
}
654
655
void
656
sqtt_CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
657
VkIndexType indexType)
658
{
659
API_MARKER(BindIndexBuffer, commandBuffer, buffer, offset, indexType);
660
}
661
662
void
663
sqtt_CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
664
uint32_t bindingCount, const VkBuffer *pBuffers,
665
const VkDeviceSize *pOffsets)
666
{
667
API_MARKER(BindVertexBuffers, commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
668
}
669
670
void
671
sqtt_CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
672
VkQueryControlFlags flags)
673
{
674
API_MARKER(BeginQuery, commandBuffer, queryPool, query, flags);
675
}
676
677
void
678
sqtt_CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query)
679
{
680
API_MARKER(EndQuery, commandBuffer, queryPool, query);
681
}
682
683
void
684
sqtt_CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
685
VkQueryPool queryPool, uint32_t flags)
686
{
687
API_MARKER(WriteTimestamp, commandBuffer, pipelineStage, queryPool, flags);
688
}
689
690
void
691
sqtt_CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
692
VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
693
const void *pValues)
694
{
695
API_MARKER(PushConstants, commandBuffer, layout, stageFlags, offset, size, pValues);
696
}
697
698
void
699
sqtt_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
700
const VkRenderPassBeginInfo *pRenderPassBeginInfo,
701
const VkSubpassBeginInfo *pSubpassBeginInfo)
702
{
703
API_MARKER_ALIAS(BeginRenderPass2, BeginRenderPass, commandBuffer, pRenderPassBeginInfo,
704
pSubpassBeginInfo);
705
}
706
707
void
708
sqtt_CmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
709
const VkSubpassEndInfo *pSubpassEndInfo)
710
{
711
API_MARKER_ALIAS(NextSubpass2, NextSubpass, commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
712
}
713
714
void
715
sqtt_CmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo)
716
{
717
API_MARKER_ALIAS(EndRenderPass2, EndRenderPass, commandBuffer, pSubpassEndInfo);
718
}
719
720
void
721
sqtt_CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
722
const VkCommandBuffer *pCmdBuffers)
723
{
724
API_MARKER(ExecuteCommands, commandBuffer, commandBufferCount, pCmdBuffers);
725
}
726
727
void
728
sqtt_CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
729
const VkViewport *pViewports)
730
{
731
API_MARKER(SetViewport, commandBuffer, firstViewport, viewportCount, pViewports);
732
}
733
734
void
735
sqtt_CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
736
const VkRect2D *pScissors)
737
{
738
API_MARKER(SetScissor, commandBuffer, firstScissor, scissorCount, pScissors);
739
}
740
741
void
742
sqtt_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
743
{
744
API_MARKER(SetLineWidth, commandBuffer, lineWidth);
745
}
746
747
void
748
sqtt_CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor,
749
float depthBiasClamp, float depthBiasSlopeFactor)
750
{
751
API_MARKER(SetDepthBias, commandBuffer, depthBiasConstantFactor, depthBiasClamp,
752
depthBiasSlopeFactor);
753
}
754
755
void
756
sqtt_CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4])
757
{
758
API_MARKER(SetBlendConstants, commandBuffer, blendConstants);
759
}
760
761
void
762
sqtt_CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds)
763
{
764
API_MARKER(SetDepthBounds, commandBuffer, minDepthBounds, maxDepthBounds);
765
}
766
767
void
768
sqtt_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
769
uint32_t compareMask)
770
{
771
API_MARKER(SetStencilCompareMask, commandBuffer, faceMask, compareMask);
772
}
773
774
void
775
sqtt_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
776
uint32_t writeMask)
777
{
778
API_MARKER(SetStencilWriteMask, commandBuffer, faceMask, writeMask);
779
}
780
781
void
782
sqtt_CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
783
uint32_t reference)
784
{
785
API_MARKER(SetStencilReference, commandBuffer, faceMask, reference);
786
}
787
788
/* VK_EXT_debug_marker */
789
void
790
sqtt_CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,
791
const VkDebugMarkerMarkerInfoEXT *pMarkerInfo)
792
{
793
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
794
radv_write_user_event_marker(cmd_buffer, UserEventPush, pMarkerInfo->pMarkerName);
795
}
796
797
void
798
sqtt_CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer)
799
{
800
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
801
radv_write_user_event_marker(cmd_buffer, UserEventPop, NULL);
802
}
803
804
void
805
sqtt_CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer,
806
const VkDebugMarkerMarkerInfoEXT *pMarkerInfo)
807
{
808
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
809
radv_write_user_event_marker(cmd_buffer, UserEventTrigger, pMarkerInfo->pMarkerName);
810
}
811
812
VkResult
813
sqtt_DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo)
814
{
815
/* no-op */
816
return VK_SUCCESS;
817
}
818
819
VkResult
820
sqtt_DebugMarkerSetObjectTagEXT(VkDevice device, const VkDebugMarkerObjectTagInfoEXT *pTagInfo)
821
{
822
/* no-op */
823
return VK_SUCCESS;
824
}
825
826
/* Pipelines */
827
static enum rgp_hardware_stages
828
radv_mesa_to_rgp_shader_stage(struct radv_pipeline *pipeline, gl_shader_stage stage)
829
{
830
struct radv_shader_variant *shader = pipeline->shaders[stage];
831
832
switch (stage) {
833
case MESA_SHADER_VERTEX:
834
if (shader->info.vs.as_ls)
835
return RGP_HW_STAGE_LS;
836
else if (shader->info.vs.as_es)
837
return RGP_HW_STAGE_ES;
838
else if (shader->info.is_ngg)
839
return RGP_HW_STAGE_GS;
840
else
841
return RGP_HW_STAGE_VS;
842
case MESA_SHADER_TESS_CTRL:
843
return RGP_HW_STAGE_HS;
844
case MESA_SHADER_TESS_EVAL:
845
if (shader->info.tes.as_es)
846
return RGP_HW_STAGE_ES;
847
else if (shader->info.is_ngg)
848
return RGP_HW_STAGE_GS;
849
else
850
return RGP_HW_STAGE_VS;
851
case MESA_SHADER_GEOMETRY:
852
return RGP_HW_STAGE_GS;
853
case MESA_SHADER_FRAGMENT:
854
return RGP_HW_STAGE_PS;
855
case MESA_SHADER_COMPUTE:
856
return RGP_HW_STAGE_CS;
857
default:
858
unreachable("invalid mesa shader stage");
859
}
860
}
861
862
static VkResult
863
radv_add_code_object(struct radv_device *device, struct radv_pipeline *pipeline)
864
{
865
struct ac_thread_trace_data *thread_trace_data = &device->thread_trace;
866
struct rgp_code_object *code_object = &thread_trace_data->rgp_code_object;
867
struct rgp_code_object_record *record;
868
869
record = malloc(sizeof(struct rgp_code_object_record));
870
if (!record)
871
return VK_ERROR_OUT_OF_HOST_MEMORY;
872
873
record->shader_stages_mask = 0;
874
record->num_shaders_combined = 0;
875
record->pipeline_hash[0] = pipeline->pipeline_hash;
876
record->pipeline_hash[1] = pipeline->pipeline_hash;
877
878
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
879
struct radv_shader_variant *shader = pipeline->shaders[i];
880
uint8_t *code;
881
uint64_t va;
882
883
if (!shader)
884
continue;
885
886
code = malloc(shader->code_size);
887
if (!code) {
888
free(record);
889
return VK_ERROR_OUT_OF_HOST_MEMORY;
890
}
891
memcpy(code, shader->code_ptr, shader->code_size);
892
893
va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
894
895
record->shader_data[i].hash[0] = (uint64_t)(uintptr_t)shader;
896
record->shader_data[i].hash[1] = (uint64_t)(uintptr_t)shader >> 32;
897
record->shader_data[i].code_size = shader->code_size;
898
record->shader_data[i].code = code;
899
record->shader_data[i].vgpr_count = shader->config.num_vgprs;
900
record->shader_data[i].sgpr_count = shader->config.num_sgprs;
901
record->shader_data[i].base_address = va & 0xffffffffffff;
902
record->shader_data[i].elf_symbol_offset = 0;
903
record->shader_data[i].hw_stage = radv_mesa_to_rgp_shader_stage(pipeline, i);
904
record->shader_data[i].is_combined = false;
905
906
record->shader_stages_mask |= (1 << i);
907
record->num_shaders_combined++;
908
}
909
910
simple_mtx_lock(&code_object->lock);
911
list_addtail(&record->list, &code_object->record);
912
code_object->record_count++;
913
simple_mtx_unlock(&code_object->lock);
914
915
return VK_SUCCESS;
916
}
917
918
static VkResult
919
radv_register_pipeline(struct radv_device *device, struct radv_pipeline *pipeline)
920
{
921
bool result;
922
uint64_t base_va = ~0;
923
924
result = ac_sqtt_add_pso_correlation(&device->thread_trace, pipeline->pipeline_hash);
925
if (!result)
926
return VK_ERROR_OUT_OF_HOST_MEMORY;
927
928
/* Find the lowest shader BO VA. */
929
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
930
struct radv_shader_variant *shader = pipeline->shaders[i];
931
uint64_t va;
932
933
if (!shader)
934
continue;
935
936
va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
937
base_va = MIN2(base_va, va);
938
}
939
940
result =
941
ac_sqtt_add_code_object_loader_event(&device->thread_trace, pipeline->pipeline_hash, base_va);
942
if (!result)
943
return VK_ERROR_OUT_OF_HOST_MEMORY;
944
945
result = radv_add_code_object(device, pipeline);
946
if (result != VK_SUCCESS)
947
return result;
948
949
return VK_SUCCESS;
950
}
951
952
static void
953
radv_unregister_pipeline(struct radv_device *device, struct radv_pipeline *pipeline)
954
{
955
struct ac_thread_trace_data *thread_trace_data = &device->thread_trace;
956
struct rgp_pso_correlation *pso_correlation = &thread_trace_data->rgp_pso_correlation;
957
struct rgp_loader_events *loader_events = &thread_trace_data->rgp_loader_events;
958
struct rgp_code_object *code_object = &thread_trace_data->rgp_code_object;
959
960
/* Destroy the PSO correlation record. */
961
simple_mtx_lock(&pso_correlation->lock);
962
list_for_each_entry_safe(struct rgp_pso_correlation_record, record, &pso_correlation->record,
963
list)
964
{
965
if (record->pipeline_hash[0] == pipeline->pipeline_hash) {
966
pso_correlation->record_count--;
967
list_del(&record->list);
968
free(record);
969
break;
970
}
971
}
972
simple_mtx_unlock(&pso_correlation->lock);
973
974
/* Destroy the code object loader record. */
975
simple_mtx_lock(&loader_events->lock);
976
list_for_each_entry_safe(struct rgp_loader_events_record, record, &loader_events->record, list)
977
{
978
if (record->code_object_hash[0] == pipeline->pipeline_hash) {
979
loader_events->record_count--;
980
list_del(&record->list);
981
free(record);
982
break;
983
}
984
}
985
simple_mtx_unlock(&loader_events->lock);
986
987
/* Destroy the code object record. */
988
simple_mtx_lock(&code_object->lock);
989
list_for_each_entry_safe(struct rgp_code_object_record, record, &code_object->record, list)
990
{
991
if (record->pipeline_hash[0] == pipeline->pipeline_hash) {
992
uint32_t mask = record->shader_stages_mask;
993
int i;
994
995
/* Free the disassembly. */
996
while (mask) {
997
i = u_bit_scan(&mask);
998
free(record->shader_data[i].code);
999
}
1000
1001
code_object->record_count--;
1002
list_del(&record->list);
1003
free(record);
1004
break;
1005
}
1006
}
1007
simple_mtx_unlock(&code_object->lock);
1008
}
1009
1010
VkResult
1011
sqtt_CreateGraphicsPipelines(VkDevice _device, VkPipelineCache pipelineCache, uint32_t count,
1012
const VkGraphicsPipelineCreateInfo *pCreateInfos,
1013
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines)
1014
{
1015
RADV_FROM_HANDLE(radv_device, device, _device);
1016
VkResult result;
1017
1018
result = radv_CreateGraphicsPipelines(_device, pipelineCache, count, pCreateInfos, pAllocator,
1019
pPipelines);
1020
if (result != VK_SUCCESS)
1021
return result;
1022
1023
if (radv_sqtt_dump_pipeline()) {
1024
for (unsigned i = 0; i < count; i++) {
1025
RADV_FROM_HANDLE(radv_pipeline, pipeline, pPipelines[i]);
1026
1027
if (!pipeline)
1028
continue;
1029
1030
result = radv_register_pipeline(device, pipeline);
1031
if (result != VK_SUCCESS)
1032
goto fail;
1033
}
1034
}
1035
1036
return VK_SUCCESS;
1037
1038
fail:
1039
for (unsigned i = 0; i < count; i++) {
1040
sqtt_DestroyPipeline(_device, pPipelines[i], pAllocator);
1041
pPipelines[i] = VK_NULL_HANDLE;
1042
}
1043
return result;
1044
}
1045
1046
VkResult
1047
sqtt_CreateComputePipelines(VkDevice _device, VkPipelineCache pipelineCache, uint32_t count,
1048
const VkComputePipelineCreateInfo *pCreateInfos,
1049
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines)
1050
{
1051
RADV_FROM_HANDLE(radv_device, device, _device);
1052
VkResult result;
1053
1054
result = radv_CreateComputePipelines(_device, pipelineCache, count, pCreateInfos, pAllocator,
1055
pPipelines);
1056
if (result != VK_SUCCESS)
1057
return result;
1058
1059
if (radv_sqtt_dump_pipeline()) {
1060
for (unsigned i = 0; i < count; i++) {
1061
RADV_FROM_HANDLE(radv_pipeline, pipeline, pPipelines[i]);
1062
1063
if (!pipeline)
1064
continue;
1065
1066
result = radv_register_pipeline(device, pipeline);
1067
if (result != VK_SUCCESS)
1068
goto fail;
1069
}
1070
}
1071
1072
return VK_SUCCESS;
1073
1074
fail:
1075
for (unsigned i = 0; i < count; i++) {
1076
sqtt_DestroyPipeline(_device, pPipelines[i], pAllocator);
1077
pPipelines[i] = VK_NULL_HANDLE;
1078
}
1079
return result;
1080
}
1081
1082
void
1083
sqtt_DestroyPipeline(VkDevice _device, VkPipeline _pipeline,
1084
const VkAllocationCallbacks *pAllocator)
1085
{
1086
RADV_FROM_HANDLE(radv_device, device, _device);
1087
RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
1088
1089
if (!_pipeline)
1090
return;
1091
1092
if (radv_sqtt_dump_pipeline())
1093
radv_unregister_pipeline(device, pipeline);
1094
1095
radv_DestroyPipeline(_device, _pipeline, pAllocator);
1096
}
1097
1098
#undef API_MARKER
1099
1100