Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/intel/vulkan/anv_measure.c
4547 views
1
/*
2
* Copyright © 2020 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*/
23
24
#include "anv_measure.h"
25
26
#include <fcntl.h>
27
#include <sys/stat.h>
28
#include <sys/types.h>
29
30
#include "common/intel_measure.h"
31
#include "util/debug.h"
32
33
struct anv_measure_batch {
34
struct anv_bo *bo;
35
struct intel_measure_batch base;
36
};
37
38
void
39
anv_measure_device_init(struct anv_physical_device *device)
40
{
41
switch (device->info.verx10) {
42
case 125:
43
device->cmd_emit_timestamp = &gfx125_cmd_emit_timestamp;
44
break;
45
case 120:
46
device->cmd_emit_timestamp = &gfx12_cmd_emit_timestamp;
47
break;
48
case 110:
49
device->cmd_emit_timestamp = &gfx11_cmd_emit_timestamp;
50
break;
51
case 90:
52
device->cmd_emit_timestamp = &gfx9_cmd_emit_timestamp;
53
break;
54
case 80:
55
device->cmd_emit_timestamp = &gfx8_cmd_emit_timestamp;
56
break;
57
case 75:
58
device->cmd_emit_timestamp = &gfx75_cmd_emit_timestamp;
59
break;
60
case 70:
61
device->cmd_emit_timestamp = &gfx7_cmd_emit_timestamp;
62
break;
63
default:
64
assert(false);
65
}
66
67
/* initialise list of measure structures that await rendering */
68
struct intel_measure_device *measure_device = &device->measure_device;
69
intel_measure_init(measure_device);
70
struct intel_measure_config *config = measure_device->config;
71
if (config == NULL)
72
return;
73
74
/* the final member of intel_measure_ringbuffer is a zero-length array of
75
* intel_measure_buffered_result objects. Allocate additional space for
76
* the buffered objects based on the run-time configurable buffer_size
77
*/
78
const size_t rb_bytes = sizeof(struct intel_measure_ringbuffer) +
79
config->buffer_size * sizeof(struct intel_measure_buffered_result);
80
struct intel_measure_ringbuffer * rb =
81
vk_zalloc(&device->instance->vk.alloc,
82
rb_bytes, 8,
83
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
84
measure_device->ringbuffer = rb;
85
}
86
87
static struct intel_measure_config*
88
config_from_command_buffer(struct anv_cmd_buffer *cmd_buffer)
89
{
90
return cmd_buffer->device->physical->measure_device.config;
91
}
92
93
void
94
anv_measure_init(struct anv_cmd_buffer *cmd_buffer)
95
{
96
struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
97
struct anv_device *device = cmd_buffer->device;
98
99
if (!config || !config->enabled) {
100
cmd_buffer->measure = NULL;
101
return;
102
}
103
104
/* the final member of anv_measure is a zero-length array of
105
* intel_measure_snapshot objects. Create additional space for the
106
* snapshot objects based on the run-time configurable batch_size
107
*/
108
const size_t batch_bytes = sizeof(struct anv_measure_batch) +
109
config->batch_size * sizeof(struct intel_measure_snapshot);
110
struct anv_measure_batch * measure =
111
vk_alloc(&cmd_buffer->pool->alloc,
112
batch_bytes, 8,
113
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
114
115
memset(measure, 0, batch_bytes);
116
ASSERTED VkResult result =
117
anv_device_alloc_bo(device, "measure data",
118
config->batch_size * sizeof(uint64_t),
119
ANV_BO_ALLOC_MAPPED,
120
0,
121
(struct anv_bo**)&measure->bo);
122
measure->base.timestamps = measure->bo->map;
123
assert(result == VK_SUCCESS);
124
125
cmd_buffer->measure = measure;
126
}
127
128
static void
129
anv_measure_start_snapshot(struct anv_cmd_buffer *cmd_buffer,
130
enum intel_measure_snapshot_type type,
131
const char *event_name,
132
uint32_t count)
133
{
134
struct anv_batch *batch = &cmd_buffer->batch;
135
struct anv_measure_batch *measure = cmd_buffer->measure;
136
struct anv_physical_device *device = cmd_buffer->device->physical;
137
struct intel_measure_device *measure_device = &device->measure_device;
138
139
const unsigned device_frame = measure_device->frame;
140
141
/* if the command buffer is not associated with a frame, associate it with
142
* the most recent acquired frame
143
*/
144
if (measure->base.frame == 0)
145
measure->base.frame = device_frame;
146
147
uintptr_t framebuffer = (uintptr_t)cmd_buffer->state.framebuffer;
148
149
if (!measure->base.framebuffer &&
150
cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
151
/* secondary command buffer inherited the framebuffer from the primary */
152
measure->base.framebuffer = framebuffer;
153
154
/* verify framebuffer has been properly tracked */
155
assert(type == INTEL_SNAPSHOT_END ||
156
framebuffer == measure->base.framebuffer ||
157
framebuffer == 0 ); /* compute has no framebuffer */
158
159
unsigned index = measure->base.index++;
160
161
(*device->cmd_emit_timestamp)(batch, measure->bo, index * sizeof(uint64_t));
162
163
if (event_name == NULL)
164
event_name = intel_measure_snapshot_string(type);
165
166
struct intel_measure_snapshot *snapshot = &(measure->base.snapshots[index]);
167
memset(snapshot, 0, sizeof(*snapshot));
168
snapshot->type = type;
169
snapshot->count = (unsigned) count;
170
snapshot->event_count = measure->base.event_count;
171
snapshot->event_name = event_name;
172
snapshot->framebuffer = framebuffer;
173
174
if (type == INTEL_SNAPSHOT_COMPUTE && cmd_buffer->state.compute.pipeline) {
175
snapshot->cs = (uintptr_t) cmd_buffer->state.compute.pipeline->cs;
176
} else if (cmd_buffer->state.gfx.pipeline) {
177
const struct anv_graphics_pipeline *pipeline =
178
cmd_buffer->state.gfx.pipeline;
179
snapshot->vs = (uintptr_t) pipeline->shaders[MESA_SHADER_VERTEX];
180
snapshot->tcs = (uintptr_t) pipeline->shaders[MESA_SHADER_TESS_CTRL];
181
snapshot->tes = (uintptr_t) pipeline->shaders[MESA_SHADER_TESS_EVAL];
182
snapshot->gs = (uintptr_t) pipeline->shaders[MESA_SHADER_GEOMETRY];
183
snapshot->fs = (uintptr_t) pipeline->shaders[MESA_SHADER_FRAGMENT];
184
}
185
}
186
187
static void
188
anv_measure_end_snapshot(struct anv_cmd_buffer *cmd_buffer,
189
uint32_t event_count)
190
{
191
struct anv_batch *batch = &cmd_buffer->batch;
192
struct anv_measure_batch *measure = cmd_buffer->measure;
193
struct anv_physical_device *device = cmd_buffer->device->physical;
194
195
unsigned index = measure->base.index++;
196
assert(index % 2 == 1);
197
198
(*device->cmd_emit_timestamp)(batch, measure->bo, index * sizeof(uint64_t));
199
200
struct intel_measure_snapshot *snapshot = &(measure->base.snapshots[index]);
201
memset(snapshot, 0, sizeof(*snapshot));
202
snapshot->type = INTEL_SNAPSHOT_END;
203
snapshot->event_count = event_count;
204
}
205
206
static bool
207
state_changed(struct anv_cmd_buffer *cmd_buffer,
208
enum intel_measure_snapshot_type type)
209
{
210
uintptr_t vs=0, tcs=0, tes=0, gs=0, fs=0, cs=0;
211
212
if (cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)
213
/* can't record timestamps in this mode */
214
return false;
215
216
if (type == INTEL_SNAPSHOT_COMPUTE) {
217
const struct anv_compute_pipeline *cs_pipe =
218
cmd_buffer->state.compute.pipeline;
219
assert(cs_pipe);
220
cs = (uintptr_t)cs_pipe->cs;
221
} else if (type == INTEL_SNAPSHOT_DRAW) {
222
const struct anv_graphics_pipeline *gfx = cmd_buffer->state.gfx.pipeline;
223
assert(gfx);
224
vs = (uintptr_t) gfx->shaders[MESA_SHADER_VERTEX];
225
tcs = (uintptr_t) gfx->shaders[MESA_SHADER_TESS_CTRL];
226
tes = (uintptr_t) gfx->shaders[MESA_SHADER_TESS_EVAL];
227
gs = (uintptr_t) gfx->shaders[MESA_SHADER_GEOMETRY];
228
fs = (uintptr_t) gfx->shaders[MESA_SHADER_FRAGMENT];
229
}
230
/* else blorp, all programs NULL */
231
232
return intel_measure_state_changed(&cmd_buffer->measure->base,
233
vs, tcs, tes, gs, fs, cs);
234
}
235
236
void
237
_anv_measure_snapshot(struct anv_cmd_buffer *cmd_buffer,
238
enum intel_measure_snapshot_type type,
239
const char *event_name,
240
uint32_t count)
241
{
242
struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
243
struct anv_measure_batch *measure = cmd_buffer->measure;
244
245
assert(config);
246
if (measure == NULL)
247
return;
248
249
assert(type != INTEL_SNAPSHOT_END);
250
if (!state_changed(cmd_buffer, type)) {
251
/* filter out this event */
252
return;
253
}
254
255
/* increment event count */
256
++measure->base.event_count;
257
if (measure->base.event_count == 1 ||
258
measure->base.event_count == config->event_interval + 1) {
259
/* the first event of an interval */
260
261
if (measure->base.index % 2) {
262
/* end the previous event */
263
anv_measure_end_snapshot(cmd_buffer, measure->base.event_count - 1);
264
}
265
measure->base.event_count = 1;
266
267
if (measure->base.index == config->batch_size) {
268
/* Snapshot buffer is full. The batch must be flushed before
269
* additional snapshots can be taken.
270
*/
271
static bool warned = false;
272
if (unlikely(!warned)) {
273
fprintf(config->file,
274
"WARNING: batch size exceeds INTEL_MEASURE limit: %d. "
275
"Data has been dropped. "
276
"Increase setting with INTEL_MEASURE=batch_size={count}\n",
277
config->batch_size);
278
}
279
280
warned = true;
281
return;
282
}
283
284
anv_measure_start_snapshot(cmd_buffer, type, event_name, count);
285
}
286
}
287
288
/**
289
* Called when a command buffer is reset. Re-initializes existing anv_measure
290
* data structures.
291
*/
292
void
293
anv_measure_reset(struct anv_cmd_buffer *cmd_buffer)
294
{
295
struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
296
struct anv_device *device = cmd_buffer->device;
297
struct anv_measure_batch *measure = cmd_buffer->measure;
298
299
if (!config)
300
return;
301
302
if (!config->enabled) {
303
cmd_buffer->measure = NULL;
304
return;
305
}
306
307
if (!measure) {
308
/* Capture has recently been enabled. Instead of resetting, a new data
309
* structure must be allocated and initialized.
310
*/
311
return anv_measure_init(cmd_buffer);
312
}
313
314
/* it is possible that the command buffer contains snapshots that have not
315
* yet been processed
316
*/
317
intel_measure_gather(&device->physical->measure_device,
318
&device->info);
319
320
assert(cmd_buffer->device != NULL);
321
322
measure->base.index = 0;
323
measure->base.framebuffer = 0;
324
measure->base.frame = 0;
325
measure->base.event_count = 0;
326
list_inithead(&measure->base.link);
327
328
anv_device_release_bo(device, measure->bo);
329
ASSERTED VkResult result =
330
anv_device_alloc_bo(device, "measure data",
331
config->batch_size * sizeof(uint64_t),
332
ANV_BO_ALLOC_MAPPED,
333
0,
334
(struct anv_bo**)&measure->bo);
335
measure->base.timestamps = measure->bo->map;
336
assert(result == VK_SUCCESS);
337
}
338
339
void
340
anv_measure_destroy(struct anv_cmd_buffer *cmd_buffer)
341
{
342
struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
343
struct anv_measure_batch *measure = cmd_buffer->measure;
344
struct anv_device *device = cmd_buffer->device;
345
struct anv_physical_device *physical = device->physical;
346
347
if (!config)
348
return;
349
if (measure == NULL)
350
return;
351
352
/* it is possible that the command buffer contains snapshots that have not
353
* yet been processed
354
*/
355
intel_measure_gather(&physical->measure_device, &physical->info);
356
357
anv_device_release_bo(device, measure->bo);
358
vk_free(&cmd_buffer->pool->alloc, measure);
359
cmd_buffer->measure = NULL;
360
}
361
362
static struct intel_measure_config*
363
config_from_device(struct anv_device *device)
364
{
365
return device->physical->measure_device.config;
366
}
367
368
void
369
anv_measure_device_destroy(struct anv_physical_device *device)
370
{
371
struct intel_measure_device *measure_device = &device->measure_device;
372
struct intel_measure_config *config = measure_device->config;
373
374
if (!config)
375
return;
376
377
if (measure_device->ringbuffer != NULL) {
378
vk_free(&device->instance->vk.alloc, measure_device->ringbuffer);
379
measure_device->ringbuffer = NULL;
380
}
381
}
382
383
/**
384
* Hook for command buffer submission.
385
*/
386
void
387
_anv_measure_submit(struct anv_cmd_buffer *cmd_buffer)
388
{
389
struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
390
struct anv_measure_batch *measure = cmd_buffer->measure;
391
struct intel_measure_device *measure_device = &cmd_buffer->device->physical->measure_device;
392
393
if (!config)
394
return;
395
if (measure == NULL)
396
return;
397
398
if (measure->base.index == 0)
399
/* no snapshots were started */
400
return;
401
402
/* finalize snapshots and enqueue them */
403
static unsigned cmd_buffer_count = 0;
404
measure->base.batch_count = p_atomic_inc_return(&cmd_buffer_count);
405
406
if (measure->base.index %2 == 1) {
407
anv_measure_end_snapshot(cmd_buffer, measure->base.event_count);
408
measure->base.event_count = 0;
409
}
410
411
/* add to the list of submitted snapshots */
412
pthread_mutex_lock(&measure_device->mutex);
413
list_addtail(&measure->base.link, &measure_device->queued_snapshots);
414
pthread_mutex_unlock(&measure_device->mutex);
415
}
416
417
/**
418
* Hook for the start of a frame.
419
*/
420
void
421
anv_measure_acquire(struct anv_device *device)
422
{
423
struct intel_measure_config *config = config_from_device(device);
424
struct intel_measure_device *measure_device = &device->physical->measure_device;
425
426
if (!config)
427
return;
428
if (measure_device == NULL)
429
return;
430
431
intel_measure_frame_transition(p_atomic_inc_return(&measure_device->frame));
432
433
/* iterate the queued snapshots and publish those that finished */
434
intel_measure_gather(measure_device, &device->physical->info);
435
}
436
437
void
438
_anv_measure_endcommandbuffer(struct anv_cmd_buffer *cmd_buffer)
439
{
440
struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
441
struct anv_measure_batch *measure = cmd_buffer->measure;
442
443
if (!config)
444
return;
445
if (measure == NULL)
446
return;
447
if (measure->base.index % 2 == 0)
448
return;
449
450
anv_measure_end_snapshot(cmd_buffer, measure->base.event_count);
451
measure->base.event_count = 0;
452
}
453
454
void
455
_anv_measure_beginrenderpass(struct anv_cmd_buffer *cmd_buffer)
456
{
457
struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
458
struct anv_measure_batch *measure = cmd_buffer->measure;
459
460
if (!config)
461
return;
462
if (measure == NULL)
463
return;
464
465
if (measure->base.framebuffer == (uintptr_t) cmd_buffer->state.framebuffer)
466
/* no change */
467
return;
468
469
bool filtering = (config->flags & (INTEL_MEASURE_RENDERPASS |
470
INTEL_MEASURE_SHADER));
471
if (filtering && measure->base.index % 2 == 1) {
472
/* snapshot for previous renderpass was not ended */
473
anv_measure_end_snapshot(cmd_buffer,
474
measure->base.event_count);
475
measure->base.event_count = 0;
476
}
477
478
measure->base.framebuffer = (uintptr_t) cmd_buffer->state.framebuffer;
479
}
480
481
void
482
_anv_measure_add_secondary(struct anv_cmd_buffer *primary,
483
struct anv_cmd_buffer *secondary)
484
{
485
struct intel_measure_config *config = config_from_command_buffer(primary);
486
struct anv_measure_batch *measure = primary->measure;
487
if (!config)
488
return;
489
if (measure == NULL)
490
return;
491
if (config->flags & (INTEL_MEASURE_BATCH | INTEL_MEASURE_FRAME))
492
/* secondary timing will be contained within the primary */
493
return;
494
if (secondary->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
495
static bool warned = false;
496
if (unlikely(!warned)) {
497
fprintf(config->file,
498
"WARNING: INTEL_MEASURE cannot capture timings of commands "
499
"in secondary command buffers with "
500
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.\n");
501
}
502
return;
503
}
504
505
if (measure->base.index % 2 == 1)
506
anv_measure_end_snapshot(primary, measure->base.event_count);
507
508
struct intel_measure_snapshot *snapshot = &(measure->base.snapshots[measure->base.index]);
509
_anv_measure_snapshot(primary, INTEL_SNAPSHOT_SECONDARY_BATCH, NULL, 0);
510
511
snapshot->secondary = &secondary->measure->base;
512
}
513
514