Path: blob/21.2-virgl/src/gallium/auxiliary/util/u_debug_flush.c
4561 views
/**************************************************************************1*2* Copyright 2012 VMware, Inc.3* All Rights Reserved.4*5* Permission is hereby granted, free of charge, to any person obtaining a6* copy of this software and associated documentation files (the7* "Software"), to deal in the Software without restriction, including8* without limitation the rights to use, copy, modify, merge, publish,9* distribute, sub license, and/or sell copies of the Software, and to10* permit persons to whom the Software is furnished to do so, subject to11* the following conditions:12*13* The above copyright notice and this permission notice (including the14* next paragraph) shall be included in all copies or substantial portions15* of the Software.16*17* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS18* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF19* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.20* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR21* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,22* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE23* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.24*25**************************************************************************/2627/**28* @file29* u_debug_flush.c Debug flush and map-related issues:30* - Flush while synchronously mapped.31* - Command stream reference while synchronously mapped.32* - Synchronous map while referenced on command stream.33* - Recursive maps.34* - Unmap while not mapped.35*36* @author Thomas Hellstrom <[email protected]>37*/3839#ifdef DEBUG40#include "pipe/p_compiler.h"41#include "util/u_debug_stack.h"42#include "util/u_debug.h"43#include "util/u_memory.h"44#include "util/u_debug_flush.h"45#include "util/u_hash_table.h"46#include "util/list.h"47#include "util/u_inlines.h"48#include "util/u_string.h"49#include "os/os_thread.h"50#include <stdio.h>5152/* Future improvement: Use realloc instead? */53#define DEBUG_FLUSH_MAP_DEPTH 325455struct debug_map_item {56struct debug_stack_frame *frame;57boolean persistent;58};5960struct debug_flush_buf {61/* Atomic */62struct pipe_reference reference; /* Must be the first member. */63mtx_t mutex;64/* Immutable */65boolean supports_persistent;66unsigned bt_depth;67/* Protected by mutex */68int map_count;69boolean has_sync_map;70int last_sync_map;71struct debug_map_item maps[DEBUG_FLUSH_MAP_DEPTH];72};7374struct debug_flush_item {75struct debug_flush_buf *fbuf;76unsigned bt_depth;77struct debug_stack_frame *ref_frame;78};7980struct debug_flush_ctx {81/* Contexts are used by a single thread at a time */82unsigned bt_depth;83boolean catch_map_of_referenced;84struct hash_table *ref_hash;85struct list_head head;86};8788static mtx_t list_mutex = _MTX_INITIALIZER_NP;89static struct list_head ctx_list = {&ctx_list, &ctx_list};9091static struct debug_stack_frame *92debug_flush_capture_frame(int start, int depth)93{94struct debug_stack_frame *frames;9596frames = CALLOC(depth, sizeof(*frames));97if (!frames)98return NULL;99100debug_backtrace_capture(frames, start, depth);101return frames;102}103104struct debug_flush_buf *105debug_flush_buf_create(boolean supports_persistent, unsigned bt_depth)106{107struct debug_flush_buf *fbuf = CALLOC_STRUCT(debug_flush_buf);108109if (!fbuf)110goto out_no_buf;111112fbuf->supports_persistent = supports_persistent;113fbuf->bt_depth = bt_depth;114pipe_reference_init(&fbuf->reference, 1);115(void) mtx_init(&fbuf->mutex, mtx_plain);116117return fbuf;118out_no_buf:119debug_printf("Debug flush buffer creation failed.\n");120debug_printf("Debug flush checking for this buffer will be incomplete.\n");121return NULL;122}123124void125debug_flush_buf_reference(struct debug_flush_buf **dst,126struct debug_flush_buf *src)127{128struct debug_flush_buf *fbuf = *dst;129130if (pipe_reference(&(*dst)->reference, &src->reference)) {131int i;132133for (i = 0; i < fbuf->map_count; ++i) {134FREE(fbuf->maps[i].frame);135}136FREE(fbuf);137}138139*dst = src;140}141142static void143debug_flush_item_destroy(struct debug_flush_item *item)144{145debug_flush_buf_reference(&item->fbuf, NULL);146147FREE(item->ref_frame);148149FREE(item);150}151152struct debug_flush_ctx *153debug_flush_ctx_create(UNUSED boolean catch_reference_of_mapped,154unsigned bt_depth)155{156struct debug_flush_ctx *fctx = CALLOC_STRUCT(debug_flush_ctx);157158if (!fctx)159goto out_no_ctx;160161fctx->ref_hash = util_hash_table_create_ptr_keys();162163if (!fctx->ref_hash)164goto out_no_ref_hash;165166fctx->bt_depth = bt_depth;167mtx_lock(&list_mutex);168list_addtail(&fctx->head, &ctx_list);169mtx_unlock(&list_mutex);170171return fctx;172173out_no_ref_hash:174FREE(fctx);175out_no_ctx:176debug_printf("Debug flush context creation failed.\n");177debug_printf("Debug flush checking for this context will be incomplete.\n");178return NULL;179}180181static void182debug_flush_alert(const char *s, const char *op,183unsigned start, unsigned depth,184boolean continued,185boolean capture,186const struct debug_stack_frame *frame)187{188if (capture)189frame = debug_flush_capture_frame(start, depth);190191if (s)192debug_printf("%s ", s);193if (frame) {194debug_printf("%s backtrace follows:\n", op);195debug_backtrace_dump(frame, depth);196} else197debug_printf("No %s backtrace was captured.\n", op);198199if (continued)200debug_printf("**********************************\n");201else202debug_printf("*********END OF MESSAGE***********\n\n\n");203204if (capture)205FREE((void *)frame);206}207208209void210debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)211{212boolean map_sync, persistent;213214if (!fbuf)215return;216217mtx_lock(&fbuf->mutex);218map_sync = !(flags & PIPE_MAP_UNSYNCHRONIZED);219persistent = !map_sync || fbuf->supports_persistent ||220!!(flags & PIPE_MAP_PERSISTENT);221222/* Recursive maps are allowed if previous maps are persistent,223* or if the current map is unsync. In other cases we might flush224* with unpersistent maps.225*/226if (fbuf->has_sync_map && !map_sync) {227debug_flush_alert("Recursive sync map detected.", "Map",2282, fbuf->bt_depth, TRUE, TRUE, NULL);229debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, FALSE,230FALSE, fbuf->maps[fbuf->last_sync_map].frame);231}232233fbuf->maps[fbuf->map_count].frame =234debug_flush_capture_frame(1, fbuf->bt_depth);235fbuf->maps[fbuf->map_count].persistent = persistent;236if (!persistent) {237fbuf->has_sync_map = TRUE;238fbuf->last_sync_map = fbuf->map_count;239}240241fbuf->map_count++;242assert(fbuf->map_count < DEBUG_FLUSH_MAP_DEPTH);243244mtx_unlock(&fbuf->mutex);245246if (!persistent) {247struct debug_flush_ctx *fctx;248249mtx_lock(&list_mutex);250LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {251struct debug_flush_item *item =252util_hash_table_get(fctx->ref_hash, fbuf);253254if (item && fctx->catch_map_of_referenced) {255debug_flush_alert("Already referenced map detected.",256"Map", 2, fbuf->bt_depth, TRUE, TRUE, NULL);257debug_flush_alert(NULL, "Reference", 0, item->bt_depth,258FALSE, FALSE, item->ref_frame);259}260}261mtx_unlock(&list_mutex);262}263}264265void266debug_flush_unmap(struct debug_flush_buf *fbuf)267{268if (!fbuf)269return;270271mtx_lock(&fbuf->mutex);272if (--fbuf->map_count < 0) {273debug_flush_alert("Unmap not previously mapped detected.", "Map",2742, fbuf->bt_depth, FALSE, TRUE, NULL);275} else {276if (fbuf->has_sync_map && fbuf->last_sync_map == fbuf->map_count) {277int i = fbuf->map_count;278279fbuf->has_sync_map = FALSE;280while (i-- && !fbuf->has_sync_map) {281if (!fbuf->maps[i].persistent) {282fbuf->has_sync_map = TRUE;283fbuf->last_sync_map = i;284}285}286FREE(fbuf->maps[fbuf->map_count].frame);287fbuf->maps[fbuf->map_count].frame = NULL;288}289}290mtx_unlock(&fbuf->mutex);291}292293294/**295* Add the given buffer to the list of active buffers. Active buffers296* are those which are referenced by the command buffer currently being297* constructed.298*/299void300debug_flush_cb_reference(struct debug_flush_ctx *fctx,301struct debug_flush_buf *fbuf)302{303struct debug_flush_item *item;304305if (!fctx || !fbuf)306return;307308item = util_hash_table_get(fctx->ref_hash, fbuf);309310mtx_lock(&fbuf->mutex);311if (fbuf->map_count && fbuf->has_sync_map) {312debug_flush_alert("Reference of mapped buffer detected.", "Reference",3132, fctx->bt_depth, TRUE, TRUE, NULL);314debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,315FALSE, fbuf->maps[fbuf->last_sync_map].frame);316}317mtx_unlock(&fbuf->mutex);318319if (!item) {320item = CALLOC_STRUCT(debug_flush_item);321if (item) {322debug_flush_buf_reference(&item->fbuf, fbuf);323item->bt_depth = fctx->bt_depth;324item->ref_frame = debug_flush_capture_frame(2, item->bt_depth);325_mesa_hash_table_insert(fctx->ref_hash, fbuf, item);326return;327}328goto out_no_item;329}330return;331332out_no_item:333debug_printf("Debug flush command buffer reference creation failed.\n");334debug_printf("Debug flush checking will be incomplete "335"for this command batch.\n");336}337338static enum pipe_error339debug_flush_might_flush_cb(UNUSED void *key, void *value, void *data)340{341struct debug_flush_item *item =342(struct debug_flush_item *) value;343struct debug_flush_buf *fbuf = item->fbuf;344345mtx_lock(&fbuf->mutex);346if (fbuf->map_count && fbuf->has_sync_map) {347const char *reason = (const char *) data;348char message[80];349350snprintf(message, sizeof(message),351"%s referenced mapped buffer detected.", reason);352353debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);354debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,355fbuf->maps[fbuf->last_sync_map].frame);356debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,357FALSE, item->ref_frame);358}359mtx_unlock(&fbuf->mutex);360361return PIPE_OK;362}363364/**365* Called when we're about to possibly flush a command buffer.366* We check if any active buffers are in a mapped state. If so, print an alert.367*/368void369debug_flush_might_flush(struct debug_flush_ctx *fctx)370{371if (!fctx)372return;373374util_hash_table_foreach(fctx->ref_hash,375debug_flush_might_flush_cb,376"Might flush");377}378379static enum pipe_error380debug_flush_flush_cb(UNUSED void *key, void *value, UNUSED void *data)381{382struct debug_flush_item *item =383(struct debug_flush_item *) value;384385debug_flush_item_destroy(item);386387return PIPE_OK;388}389390391/**392* Called when we flush a command buffer. Two things are done:393* 1. Check if any of the active buffers are currently mapped (alert if so).394* 2. Discard/unreference all the active buffers.395*/396void397debug_flush_flush(struct debug_flush_ctx *fctx)398{399if (!fctx)400return;401402util_hash_table_foreach(fctx->ref_hash,403debug_flush_might_flush_cb,404"Flush");405util_hash_table_foreach(fctx->ref_hash,406debug_flush_flush_cb,407NULL);408_mesa_hash_table_clear(fctx->ref_hash, NULL);409}410411void412debug_flush_ctx_destroy(struct debug_flush_ctx *fctx)413{414if (!fctx)415return;416417list_del(&fctx->head);418util_hash_table_foreach(fctx->ref_hash,419debug_flush_flush_cb,420NULL);421_mesa_hash_table_clear(fctx->ref_hash, NULL);422_mesa_hash_table_destroy(fctx->ref_hash, NULL);423FREE(fctx);424}425#endif426427428