Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/auxiliary/util/u_debug_flush.c
4561 views
1
/**************************************************************************
2
*
3
* Copyright 2012 VMware, Inc.
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
16
* of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
**************************************************************************/
27
28
/**
29
* @file
30
* u_debug_flush.c Debug flush and map-related issues:
31
* - Flush while synchronously mapped.
32
* - Command stream reference while synchronously mapped.
33
* - Synchronous map while referenced on command stream.
34
* - Recursive maps.
35
* - Unmap while not mapped.
36
*
37
* @author Thomas Hellstrom <[email protected]>
38
*/
39
40
#ifdef DEBUG
41
#include "pipe/p_compiler.h"
42
#include "util/u_debug_stack.h"
43
#include "util/u_debug.h"
44
#include "util/u_memory.h"
45
#include "util/u_debug_flush.h"
46
#include "util/u_hash_table.h"
47
#include "util/list.h"
48
#include "util/u_inlines.h"
49
#include "util/u_string.h"
50
#include "os/os_thread.h"
51
#include <stdio.h>
52
53
/* Future improvement: Use realloc instead? */
54
#define DEBUG_FLUSH_MAP_DEPTH 32
55
56
struct debug_map_item {
57
struct debug_stack_frame *frame;
58
boolean persistent;
59
};
60
61
struct debug_flush_buf {
62
/* Atomic */
63
struct pipe_reference reference; /* Must be the first member. */
64
mtx_t mutex;
65
/* Immutable */
66
boolean supports_persistent;
67
unsigned bt_depth;
68
/* Protected by mutex */
69
int map_count;
70
boolean has_sync_map;
71
int last_sync_map;
72
struct debug_map_item maps[DEBUG_FLUSH_MAP_DEPTH];
73
};
74
75
struct debug_flush_item {
76
struct debug_flush_buf *fbuf;
77
unsigned bt_depth;
78
struct debug_stack_frame *ref_frame;
79
};
80
81
struct debug_flush_ctx {
82
/* Contexts are used by a single thread at a time */
83
unsigned bt_depth;
84
boolean catch_map_of_referenced;
85
struct hash_table *ref_hash;
86
struct list_head head;
87
};
88
89
static mtx_t list_mutex = _MTX_INITIALIZER_NP;
90
static struct list_head ctx_list = {&ctx_list, &ctx_list};
91
92
static struct debug_stack_frame *
93
debug_flush_capture_frame(int start, int depth)
94
{
95
struct debug_stack_frame *frames;
96
97
frames = CALLOC(depth, sizeof(*frames));
98
if (!frames)
99
return NULL;
100
101
debug_backtrace_capture(frames, start, depth);
102
return frames;
103
}
104
105
struct debug_flush_buf *
106
debug_flush_buf_create(boolean supports_persistent, unsigned bt_depth)
107
{
108
struct debug_flush_buf *fbuf = CALLOC_STRUCT(debug_flush_buf);
109
110
if (!fbuf)
111
goto out_no_buf;
112
113
fbuf->supports_persistent = supports_persistent;
114
fbuf->bt_depth = bt_depth;
115
pipe_reference_init(&fbuf->reference, 1);
116
(void) mtx_init(&fbuf->mutex, mtx_plain);
117
118
return fbuf;
119
out_no_buf:
120
debug_printf("Debug flush buffer creation failed.\n");
121
debug_printf("Debug flush checking for this buffer will be incomplete.\n");
122
return NULL;
123
}
124
125
void
126
debug_flush_buf_reference(struct debug_flush_buf **dst,
127
struct debug_flush_buf *src)
128
{
129
struct debug_flush_buf *fbuf = *dst;
130
131
if (pipe_reference(&(*dst)->reference, &src->reference)) {
132
int i;
133
134
for (i = 0; i < fbuf->map_count; ++i) {
135
FREE(fbuf->maps[i].frame);
136
}
137
FREE(fbuf);
138
}
139
140
*dst = src;
141
}
142
143
static void
144
debug_flush_item_destroy(struct debug_flush_item *item)
145
{
146
debug_flush_buf_reference(&item->fbuf, NULL);
147
148
FREE(item->ref_frame);
149
150
FREE(item);
151
}
152
153
struct debug_flush_ctx *
154
debug_flush_ctx_create(UNUSED boolean catch_reference_of_mapped,
155
unsigned bt_depth)
156
{
157
struct debug_flush_ctx *fctx = CALLOC_STRUCT(debug_flush_ctx);
158
159
if (!fctx)
160
goto out_no_ctx;
161
162
fctx->ref_hash = util_hash_table_create_ptr_keys();
163
164
if (!fctx->ref_hash)
165
goto out_no_ref_hash;
166
167
fctx->bt_depth = bt_depth;
168
mtx_lock(&list_mutex);
169
list_addtail(&fctx->head, &ctx_list);
170
mtx_unlock(&list_mutex);
171
172
return fctx;
173
174
out_no_ref_hash:
175
FREE(fctx);
176
out_no_ctx:
177
debug_printf("Debug flush context creation failed.\n");
178
debug_printf("Debug flush checking for this context will be incomplete.\n");
179
return NULL;
180
}
181
182
static void
183
debug_flush_alert(const char *s, const char *op,
184
unsigned start, unsigned depth,
185
boolean continued,
186
boolean capture,
187
const struct debug_stack_frame *frame)
188
{
189
if (capture)
190
frame = debug_flush_capture_frame(start, depth);
191
192
if (s)
193
debug_printf("%s ", s);
194
if (frame) {
195
debug_printf("%s backtrace follows:\n", op);
196
debug_backtrace_dump(frame, depth);
197
} else
198
debug_printf("No %s backtrace was captured.\n", op);
199
200
if (continued)
201
debug_printf("**********************************\n");
202
else
203
debug_printf("*********END OF MESSAGE***********\n\n\n");
204
205
if (capture)
206
FREE((void *)frame);
207
}
208
209
210
void
211
debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
212
{
213
boolean map_sync, persistent;
214
215
if (!fbuf)
216
return;
217
218
mtx_lock(&fbuf->mutex);
219
map_sync = !(flags & PIPE_MAP_UNSYNCHRONIZED);
220
persistent = !map_sync || fbuf->supports_persistent ||
221
!!(flags & PIPE_MAP_PERSISTENT);
222
223
/* Recursive maps are allowed if previous maps are persistent,
224
* or if the current map is unsync. In other cases we might flush
225
* with unpersistent maps.
226
*/
227
if (fbuf->has_sync_map && !map_sync) {
228
debug_flush_alert("Recursive sync map detected.", "Map",
229
2, fbuf->bt_depth, TRUE, TRUE, NULL);
230
debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, FALSE,
231
FALSE, fbuf->maps[fbuf->last_sync_map].frame);
232
}
233
234
fbuf->maps[fbuf->map_count].frame =
235
debug_flush_capture_frame(1, fbuf->bt_depth);
236
fbuf->maps[fbuf->map_count].persistent = persistent;
237
if (!persistent) {
238
fbuf->has_sync_map = TRUE;
239
fbuf->last_sync_map = fbuf->map_count;
240
}
241
242
fbuf->map_count++;
243
assert(fbuf->map_count < DEBUG_FLUSH_MAP_DEPTH);
244
245
mtx_unlock(&fbuf->mutex);
246
247
if (!persistent) {
248
struct debug_flush_ctx *fctx;
249
250
mtx_lock(&list_mutex);
251
LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
252
struct debug_flush_item *item =
253
util_hash_table_get(fctx->ref_hash, fbuf);
254
255
if (item && fctx->catch_map_of_referenced) {
256
debug_flush_alert("Already referenced map detected.",
257
"Map", 2, fbuf->bt_depth, TRUE, TRUE, NULL);
258
debug_flush_alert(NULL, "Reference", 0, item->bt_depth,
259
FALSE, FALSE, item->ref_frame);
260
}
261
}
262
mtx_unlock(&list_mutex);
263
}
264
}
265
266
void
267
debug_flush_unmap(struct debug_flush_buf *fbuf)
268
{
269
if (!fbuf)
270
return;
271
272
mtx_lock(&fbuf->mutex);
273
if (--fbuf->map_count < 0) {
274
debug_flush_alert("Unmap not previously mapped detected.", "Map",
275
2, fbuf->bt_depth, FALSE, TRUE, NULL);
276
} else {
277
if (fbuf->has_sync_map && fbuf->last_sync_map == fbuf->map_count) {
278
int i = fbuf->map_count;
279
280
fbuf->has_sync_map = FALSE;
281
while (i-- && !fbuf->has_sync_map) {
282
if (!fbuf->maps[i].persistent) {
283
fbuf->has_sync_map = TRUE;
284
fbuf->last_sync_map = i;
285
}
286
}
287
FREE(fbuf->maps[fbuf->map_count].frame);
288
fbuf->maps[fbuf->map_count].frame = NULL;
289
}
290
}
291
mtx_unlock(&fbuf->mutex);
292
}
293
294
295
/**
296
* Add the given buffer to the list of active buffers. Active buffers
297
* are those which are referenced by the command buffer currently being
298
* constructed.
299
*/
300
void
301
debug_flush_cb_reference(struct debug_flush_ctx *fctx,
302
struct debug_flush_buf *fbuf)
303
{
304
struct debug_flush_item *item;
305
306
if (!fctx || !fbuf)
307
return;
308
309
item = util_hash_table_get(fctx->ref_hash, fbuf);
310
311
mtx_lock(&fbuf->mutex);
312
if (fbuf->map_count && fbuf->has_sync_map) {
313
debug_flush_alert("Reference of mapped buffer detected.", "Reference",
314
2, fctx->bt_depth, TRUE, TRUE, NULL);
315
debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,
316
FALSE, fbuf->maps[fbuf->last_sync_map].frame);
317
}
318
mtx_unlock(&fbuf->mutex);
319
320
if (!item) {
321
item = CALLOC_STRUCT(debug_flush_item);
322
if (item) {
323
debug_flush_buf_reference(&item->fbuf, fbuf);
324
item->bt_depth = fctx->bt_depth;
325
item->ref_frame = debug_flush_capture_frame(2, item->bt_depth);
326
_mesa_hash_table_insert(fctx->ref_hash, fbuf, item);
327
return;
328
}
329
goto out_no_item;
330
}
331
return;
332
333
out_no_item:
334
debug_printf("Debug flush command buffer reference creation failed.\n");
335
debug_printf("Debug flush checking will be incomplete "
336
"for this command batch.\n");
337
}
338
339
static enum pipe_error
340
debug_flush_might_flush_cb(UNUSED void *key, void *value, void *data)
341
{
342
struct debug_flush_item *item =
343
(struct debug_flush_item *) value;
344
struct debug_flush_buf *fbuf = item->fbuf;
345
346
mtx_lock(&fbuf->mutex);
347
if (fbuf->map_count && fbuf->has_sync_map) {
348
const char *reason = (const char *) data;
349
char message[80];
350
351
snprintf(message, sizeof(message),
352
"%s referenced mapped buffer detected.", reason);
353
354
debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
355
debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
356
fbuf->maps[fbuf->last_sync_map].frame);
357
debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,
358
FALSE, item->ref_frame);
359
}
360
mtx_unlock(&fbuf->mutex);
361
362
return PIPE_OK;
363
}
364
365
/**
366
* Called when we're about to possibly flush a command buffer.
367
* We check if any active buffers are in a mapped state. If so, print an alert.
368
*/
369
void
370
debug_flush_might_flush(struct debug_flush_ctx *fctx)
371
{
372
if (!fctx)
373
return;
374
375
util_hash_table_foreach(fctx->ref_hash,
376
debug_flush_might_flush_cb,
377
"Might flush");
378
}
379
380
static enum pipe_error
381
debug_flush_flush_cb(UNUSED void *key, void *value, UNUSED void *data)
382
{
383
struct debug_flush_item *item =
384
(struct debug_flush_item *) value;
385
386
debug_flush_item_destroy(item);
387
388
return PIPE_OK;
389
}
390
391
392
/**
393
* Called when we flush a command buffer. Two things are done:
394
* 1. Check if any of the active buffers are currently mapped (alert if so).
395
* 2. Discard/unreference all the active buffers.
396
*/
397
void
398
debug_flush_flush(struct debug_flush_ctx *fctx)
399
{
400
if (!fctx)
401
return;
402
403
util_hash_table_foreach(fctx->ref_hash,
404
debug_flush_might_flush_cb,
405
"Flush");
406
util_hash_table_foreach(fctx->ref_hash,
407
debug_flush_flush_cb,
408
NULL);
409
_mesa_hash_table_clear(fctx->ref_hash, NULL);
410
}
411
412
void
413
debug_flush_ctx_destroy(struct debug_flush_ctx *fctx)
414
{
415
if (!fctx)
416
return;
417
418
list_del(&fctx->head);
419
util_hash_table_foreach(fctx->ref_hash,
420
debug_flush_flush_cb,
421
NULL);
422
_mesa_hash_table_clear(fctx->ref_hash, NULL);
423
_mesa_hash_table_destroy(fctx->ref_hash, NULL);
424
FREE(fctx);
425
}
426
#endif
427
428