Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/iris/iris_measure.c
4565 views
1
/*
2
* Copyright © 2019 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included
12
* in all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
* DEALINGS IN THE SOFTWARE.
21
*/
22
23
/**
24
* @file iris_measure.c
25
*/
26
27
#include <stdio.h>
28
#include "util/debug.h"
29
#include "util/list.h"
30
#include "util/crc32.h"
31
#include "iris_context.h"
32
#include "iris_defines.h"
33
34
void
35
iris_init_screen_measure(struct iris_screen *screen)
36
{
37
struct intel_measure_device *measure_device = &screen->measure;
38
39
memset(measure_device, 0, sizeof(*measure_device));
40
intel_measure_init(measure_device);
41
struct intel_measure_config *config = measure_device->config;
42
if (config == NULL)
43
return;
44
45
/* the final member of intel_measure_ringbuffer is a zero-length array of
46
* intel_measure_buffered_result objects. Allocate additional space for
47
* the buffered objects based on the run-time configurable buffer_size
48
*/
49
const size_t rb_bytes = sizeof(struct intel_measure_ringbuffer) +
50
config->buffer_size * sizeof(struct intel_measure_buffered_result);
51
struct intel_measure_ringbuffer *rb = rzalloc_size(screen, rb_bytes);
52
measure_device->ringbuffer = rb;
53
}
54
55
static struct intel_measure_config *
56
config_from_screen(struct iris_screen *screen)
57
{
58
return screen->measure.config;
59
}
60
61
static struct intel_measure_config *
62
config_from_context(struct iris_context *ice)
63
{
64
return ((struct iris_screen *) ice->ctx.screen)->measure.config;
65
}
66
67
void
68
iris_destroy_screen_measure(struct iris_screen *screen)
69
{
70
if (!config_from_screen(screen))
71
return;
72
73
struct intel_measure_device *measure_device = &screen->measure;
74
75
if (measure_device->config->file &&
76
measure_device->config->file != stderr)
77
fclose(screen->measure.config->file);
78
79
ralloc_free(measure_device->ringbuffer);
80
measure_device->ringbuffer = NULL;
81
}
82
83
84
void
85
iris_init_batch_measure(struct iris_context *ice, struct iris_batch *batch)
86
{
87
const struct intel_measure_config *config = config_from_context(ice);
88
struct iris_screen *screen = batch->screen;
89
struct iris_bufmgr *bufmgr = screen->bufmgr;
90
91
if (!config)
92
return;
93
94
/* the final member of iris_measure_batch is a zero-length array of
95
* intel_measure_snapshot objects. Create additional space for the
96
* snapshot objects based on the run-time configurable batch_size
97
*/
98
const size_t batch_bytes = sizeof(struct iris_measure_batch) +
99
config->batch_size * sizeof(struct intel_measure_snapshot);
100
assert(batch->measure == NULL);
101
batch->measure = malloc(batch_bytes);
102
memset(batch->measure, 0, batch_bytes);
103
struct iris_measure_batch *measure = batch->measure;
104
105
measure->bo = iris_bo_alloc(bufmgr, "measure",
106
config->batch_size * sizeof(uint64_t), 1,
107
IRIS_MEMZONE_OTHER, BO_ALLOC_ZEROED);
108
measure->base.timestamps = iris_bo_map(NULL, measure->bo, MAP_READ);
109
measure->base.framebuffer =
110
(uintptr_t)util_hash_crc32(&ice->state.framebuffer,
111
sizeof(ice->state.framebuffer));
112
}
113
114
void
115
iris_destroy_batch_measure(struct iris_measure_batch *batch)
116
{
117
if (!batch)
118
return;
119
iris_bo_unmap(batch->bo);
120
iris_bo_unreference(batch->bo);
121
batch->bo = NULL;
122
free(batch);
123
}
124
125
static void
126
measure_start_snapshot(struct iris_context *ice,
127
struct iris_batch *batch,
128
enum intel_measure_snapshot_type type,
129
const char *event_name,
130
uint32_t count)
131
{
132
struct intel_measure_batch *measure_batch = &batch->measure->base;
133
const struct intel_measure_config *config = config_from_context(ice);
134
const struct iris_screen *screen = (void *) ice->ctx.screen;
135
const unsigned screen_frame = screen->measure.frame;
136
137
/* if the command buffer is not associated with a frame, associate it with
138
* the most recent acquired frame
139
*/
140
if (measure_batch->frame == 0)
141
measure_batch->frame = screen_frame;
142
143
uintptr_t framebuffer = measure_batch->framebuffer;
144
145
if (measure_batch->index == config->batch_size) {
146
/* Snapshot buffer is full. The batch must be flushed before additional
147
* snapshots can be taken.
148
*/
149
static bool warned = false;
150
if (unlikely(!warned)) {
151
fprintf(config->file,
152
"WARNING: batch size exceeds INTEL_MEASURE limit: %d. "
153
"Data has been dropped. "
154
"Increase setting with INTEL_MEASURE=batch_size={count}\n",
155
config->batch_size);
156
warned = true;
157
}
158
return;
159
}
160
161
unsigned index = measure_batch->index++;
162
assert(index < config->batch_size);
163
iris_emit_pipe_control_write(batch, "measurement snapshot",
164
PIPE_CONTROL_WRITE_TIMESTAMP |
165
PIPE_CONTROL_CS_STALL,
166
batch->measure->bo, index * sizeof(uint64_t), 0ull);
167
if (event_name == NULL)
168
event_name = intel_measure_snapshot_string(type);
169
170
struct intel_measure_snapshot *snapshot = &(measure_batch->snapshots[index]);
171
memset(snapshot, 0, sizeof(*snapshot));
172
snapshot->type = type;
173
snapshot->count = (unsigned) count;
174
snapshot->event_count = measure_batch->event_count;
175
snapshot->event_name = event_name;
176
snapshot->framebuffer = framebuffer;
177
178
if (type == INTEL_SNAPSHOT_COMPUTE) {
179
snapshot->cs = (uintptr_t) ice->shaders.prog[MESA_SHADER_COMPUTE];
180
} else {
181
snapshot->vs = (uintptr_t) ice->shaders.prog[MESA_SHADER_VERTEX];
182
snapshot->tcs = (uintptr_t) ice->shaders.prog[MESA_SHADER_TESS_CTRL];
183
snapshot->tes = (uintptr_t) ice->shaders.prog[MESA_SHADER_TESS_EVAL];
184
snapshot->gs = (uintptr_t) ice->shaders.prog[MESA_SHADER_GEOMETRY];
185
snapshot->fs = (uintptr_t) ice->shaders.prog[MESA_SHADER_FRAGMENT];
186
}
187
}
188
189
static void
190
measure_end_snapshot(struct iris_batch *batch,
191
uint32_t event_count)
192
{
193
struct intel_measure_batch *measure_batch = &batch->measure->base;
194
195
unsigned index = measure_batch->index++;
196
assert(index % 2 == 1);
197
198
iris_emit_pipe_control_write(batch, "measurement snapshot",
199
PIPE_CONTROL_WRITE_TIMESTAMP |
200
PIPE_CONTROL_CS_STALL,
201
batch->measure->bo,
202
index * sizeof(uint64_t), 0ull);
203
204
struct intel_measure_snapshot *snapshot = &(measure_batch->snapshots[index]);
205
memset(snapshot, 0, sizeof(*snapshot));
206
snapshot->type = INTEL_SNAPSHOT_END;
207
snapshot->event_count = event_count;
208
}
209
210
static bool
211
state_changed(const struct iris_context *ice,
212
const struct iris_batch *batch,
213
enum intel_measure_snapshot_type type)
214
{
215
uintptr_t vs=0, tcs=0, tes=0, gs=0, fs=0, cs=0;
216
217
if (type == INTEL_SNAPSHOT_COMPUTE) {
218
cs = (uintptr_t) ice->shaders.prog[MESA_SHADER_COMPUTE];
219
} else if (type == INTEL_SNAPSHOT_DRAW) {
220
vs = (uintptr_t) ice->shaders.prog[MESA_SHADER_VERTEX];
221
tcs = (uintptr_t) ice->shaders.prog[MESA_SHADER_TESS_CTRL];
222
tes = (uintptr_t) ice->shaders.prog[MESA_SHADER_TESS_EVAL];
223
gs = (uintptr_t) ice->shaders.prog[MESA_SHADER_GEOMETRY];
224
fs = (uintptr_t) ice->shaders.prog[MESA_SHADER_FRAGMENT];
225
}
226
/* else blorp, all programs NULL */
227
228
return intel_measure_state_changed(&batch->measure->base,
229
vs, tcs, tes, gs, fs, cs);
230
}
231
232
static void
233
iris_measure_renderpass(struct iris_context *ice)
234
{
235
const struct intel_measure_config *config = config_from_context(ice);
236
struct intel_measure_batch *batch =
237
&ice->batches[IRIS_BATCH_RENDER].measure->base;
238
239
if (!config)
240
return;
241
uint32_t framebuffer_crc = util_hash_crc32(&ice->state.framebuffer,
242
sizeof(ice->state.framebuffer));
243
if (framebuffer_crc == batch->framebuffer)
244
return;
245
bool filtering = config->flags & INTEL_MEASURE_RENDERPASS;
246
if (filtering && batch->index % 2 == 1) {
247
/* snapshot for previous renderpass was not ended */
248
measure_end_snapshot(&ice->batches[IRIS_BATCH_RENDER],
249
batch->event_count);
250
batch->event_count = 0;
251
}
252
253
batch->framebuffer = framebuffer_crc;
254
}
255
256
void
257
_iris_measure_snapshot(struct iris_context *ice,
258
struct iris_batch *batch,
259
enum intel_measure_snapshot_type type,
260
const struct pipe_draw_info *draw,
261
const struct pipe_draw_indirect_info *indirect,
262
const struct pipe_draw_start_count_bias *sc)
263
{
264
265
const struct intel_measure_config *config = config_from_context(ice);
266
struct intel_measure_batch* measure_batch = &batch->measure->base;
267
268
assert(config);
269
if (!config->enabled)
270
return;
271
if (measure_batch == NULL)
272
return;
273
274
assert(type != INTEL_SNAPSHOT_END);
275
iris_measure_renderpass(ice);
276
277
if (!state_changed(ice, batch, type)) {
278
/* filter out this event */
279
return;
280
}
281
282
/* increment event count */
283
++measure_batch->event_count;
284
if (measure_batch->event_count == 1 ||
285
measure_batch->event_count == config->event_interval + 1) {
286
/* the first event of an interval */
287
if (measure_batch->index % 2) {
288
/* end the previous event */
289
measure_end_snapshot(batch, measure_batch->event_count - 1);
290
}
291
measure_batch->event_count = 1;
292
293
const char *event_name = NULL;
294
int count = 0;
295
if (sc)
296
count = sc->count;
297
298
if (draw != NULL) {
299
const struct shader_info *fs_info =
300
iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
301
if (fs_info && fs_info->name && strncmp(fs_info->name, "st/", 2) == 0) {
302
event_name = fs_info->name;
303
} else if (indirect) {
304
event_name = "DrawIndirect";
305
if (indirect->count_from_stream_output) {
306
event_name = "DrawTransformFeedback";
307
}
308
}
309
else if (draw->index_size)
310
event_name = "DrawElements";
311
else
312
event_name = "DrawArrays";
313
count = count * (draw->instance_count ? draw->instance_count : 1);
314
}
315
316
measure_start_snapshot(ice, batch, type, event_name, count);
317
return;
318
}
319
}
320
321
void
322
iris_destroy_ctx_measure(struct iris_context *ice)
323
{
324
/* All outstanding snapshots must be collected before the context is
325
* destroyed.
326
*/
327
struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
328
intel_measure_gather(&screen->measure, &screen->devinfo);
329
}
330
331
void
332
iris_measure_batch_end(struct iris_context *ice, struct iris_batch *batch)
333
{
334
const struct intel_measure_config *config = config_from_context(ice);
335
struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
336
struct iris_measure_batch *iris_measure_batch = batch->measure;
337
struct intel_measure_batch *measure_batch = &iris_measure_batch->base;
338
struct intel_measure_device *measure_device = &screen->measure;
339
340
if (!config)
341
return;
342
if (!config->enabled)
343
return;
344
345
assert(measure_batch);
346
assert(measure_device);
347
348
static unsigned batch_count = 0;
349
measure_batch->batch_count = p_atomic_inc_return(&batch_count);
350
351
if (measure_batch->index % 2) {
352
/* We hit the end of the batch, but never terminated our section of
353
* drawing with the same render target or shaders. End it now.
354
*/
355
measure_end_snapshot(batch, measure_batch->event_count);
356
}
357
358
if (measure_batch->index == 0)
359
return;
360
361
/* enqueue snapshot for gathering */
362
pthread_mutex_lock(&measure_device->mutex);
363
list_addtail(&iris_measure_batch->base.link, &measure_device->queued_snapshots);
364
batch->measure = NULL;
365
pthread_mutex_unlock(&measure_device->mutex);
366
/* init new measure_batch */
367
iris_init_batch_measure(ice, batch);
368
369
static int interval = 0;
370
if (++interval > 10) {
371
intel_measure_gather(measure_device, &screen->devinfo);
372
interval = 0;
373
}
374
}
375
376
void
377
iris_measure_frame_end(struct iris_context *ice)
378
{
379
struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
380
struct intel_measure_device *measure_device = &screen->measure;
381
const struct intel_measure_config *config = measure_device->config;
382
383
if (!config)
384
return;
385
386
/* increment frame counter */
387
intel_measure_frame_transition(p_atomic_inc_return(&measure_device->frame));
388
389
intel_measure_gather(measure_device, &screen->devinfo);
390
}
391
392