Path: blob/master/drivers/gpu/drm/i915/i915_gem_debug.c
15113 views
/*1* Copyright © 2008 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*22* Authors:23* Keith Packard <[email protected]>24*25*/2627#include "drmP.h"28#include "drm.h"29#include "i915_drm.h"30#include "i915_drv.h"3132#if WATCH_LISTS33int34i915_verify_lists(struct drm_device *dev)35{36static int warned;37drm_i915_private_t *dev_priv = dev->dev_private;38struct drm_i915_gem_object *obj;39int err = 0;4041if (warned)42return 0;4344list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {45if (obj->base.dev != dev ||46!atomic_read(&obj->base.refcount.refcount)) {47DRM_ERROR("freed render active %p\n", obj);48err++;49break;50} else if (!obj->active ||51(obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {52DRM_ERROR("invalid render active %p (a %d r %x)\n",53obj,54obj->active,55obj->base.read_domains);56err++;57} else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {58DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",59obj,60obj->base.write_domain,61!list_empty(&obj->gpu_write_list));62err++;63}64}6566list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {67if (obj->base.dev != dev ||68!atomic_read(&obj->base.refcount.refcount)) {69DRM_ERROR("freed flushing %p\n", obj);70err++;71break;72} else if (!obj->active ||73(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||74list_empty(&obj->gpu_write_list)){75DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",76obj,77obj->active,78obj->base.write_domain,79!list_empty(&obj->gpu_write_list));80err++;81}82}8384list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {85if (obj->base.dev != dev ||86!atomic_read(&obj->base.refcount.refcount)) {87DRM_ERROR("freed gpu write %p\n", obj);88err++;89break;90} else if (!obj->active ||91(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {92DRM_ERROR("invalid gpu write %p (a %d w %x)\n",93obj,94obj->active,95obj->base.write_domain);96err++;97}98}99100list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {101if (obj->base.dev != dev ||102!atomic_read(&obj->base.refcount.refcount)) {103DRM_ERROR("freed inactive %p\n", obj);104err++;105break;106} else if (obj->pin_count || obj->active ||107(obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {108DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",109obj,110obj->pin_count, obj->active,111obj->base.write_domain);112err++;113}114}115116list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {117if (obj->base.dev != dev ||118!atomic_read(&obj->base.refcount.refcount)) {119DRM_ERROR("freed pinned %p\n", obj);120err++;121break;122} else if (!obj->pin_count || obj->active ||123(obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {124DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",125obj,126obj->pin_count, obj->active,127obj->base.write_domain);128err++;129}130}131132return warned = err;133}134#endif /* WATCH_INACTIVE */135136#if WATCH_COHERENCY137void138i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)139{140struct drm_device *dev = obj->base.dev;141int page;142uint32_t *gtt_mapping;143uint32_t *backing_map = NULL;144int bad_count = 0;145146DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",147__func__, obj, obj->gtt_offset, handle,148obj->size / 1024);149150gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);151if (gtt_mapping == NULL) {152DRM_ERROR("failed to map GTT space\n");153return;154}155156for (page = 0; page < obj->size / PAGE_SIZE; page++) {157int i;158159backing_map = kmap_atomic(obj->pages[page], KM_USER0);160161if (backing_map == NULL) {162DRM_ERROR("failed to map backing page\n");163goto out;164}165166for (i = 0; i < PAGE_SIZE / 4; i++) {167uint32_t cpuval = backing_map[i];168uint32_t gttval = readl(gtt_mapping +169page * 1024 + i);170171if (cpuval != gttval) {172DRM_INFO("incoherent CPU vs GPU at 0x%08x: "173"0x%08x vs 0x%08x\n",174(int)(obj->gtt_offset +175page * PAGE_SIZE + i * 4),176cpuval, gttval);177if (bad_count++ >= 8) {178DRM_INFO("...\n");179goto out;180}181}182}183kunmap_atomic(backing_map, KM_USER0);184backing_map = NULL;185}186187out:188if (backing_map != NULL)189kunmap_atomic(backing_map, KM_USER0);190iounmap(gtt_mapping);191192/* give syslog time to catch up */193msleep(1);194195/* Directly flush the object, since we just loaded values with the CPU196* from the backing pages and we don't want to disturb the cache197* management that we're trying to observe.198*/199200i915_gem_clflush_object(obj);201}202#endif203204205