Path: blob/master/drivers/gpu/drm/nouveau/nouveau_channel.c
15112 views
/*1* Copyright 2005-2006 Stephane Marchesin2* All Rights Reserved.3*4* Permission is hereby granted, free of charge, to any person obtaining a5* copy of this software and associated documentation files (the "Software"),6* to deal in the Software without restriction, including without limitation7* the rights to use, copy, modify, merge, publish, distribute, sublicense,8* and/or sell copies of the Software, and to permit persons to whom the9* Software is furnished to do so, subject to the following conditions:10*11* The above copyright notice and this permission notice (including the next12* paragraph) shall be included in all copies or substantial portions of the13* Software.14*15* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR16* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,17* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL18* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR19* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,20* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER21* DEALINGS IN THE SOFTWARE.22*/2324#include "drmP.h"25#include "drm.h"26#include "nouveau_drv.h"27#include "nouveau_drm.h"28#include "nouveau_dma.h"2930static int31nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)32{33struct drm_device *dev = chan->dev;34struct drm_nouveau_private *dev_priv = dev->dev_private;35struct nouveau_bo *pb = chan->pushbuf_bo;36struct nouveau_gpuobj *pushbuf = NULL;37int ret = 0;3839if (dev_priv->card_type >= NV_50) {40if (dev_priv->card_type < NV_C0) {41ret = nouveau_gpuobj_dma_new(chan,42NV_CLASS_DMA_IN_MEMORY, 0,43(1ULL << 40),44NV_MEM_ACCESS_RO,45NV_MEM_TARGET_VM,46&pushbuf);47}48chan->pushbuf_base = pb->bo.offset;49} else50if (pb->bo.mem.mem_type == TTM_PL_TT) {51ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,52dev_priv->gart_info.aper_size,53NV_MEM_ACCESS_RO,54NV_MEM_TARGET_GART, &pushbuf);55chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;56} else57if (dev_priv->card_type != NV_04) {58ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,59dev_priv->fb_available_size,60NV_MEM_ACCESS_RO,61NV_MEM_TARGET_VRAM, &pushbuf);62chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;63} else {64/* NV04 cmdbuf hack, from original ddx.. not sure of it's65* exact reason for existing :) PCI access to cmdbuf in66* VRAM.67*/68ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,69pci_resource_start(dev->pdev, 1),70dev_priv->fb_available_size,71NV_MEM_ACCESS_RO,72NV_MEM_TARGET_PCI, &pushbuf);73chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;74}7576nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);77nouveau_gpuobj_ref(NULL, &pushbuf);78return ret;79}8081static struct nouveau_bo *82nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)83{84struct nouveau_bo *pushbuf = NULL;85int location, ret;8687if (nouveau_vram_pushbuf)88location = TTM_PL_FLAG_VRAM;89else90location = TTM_PL_FLAG_TT;9192ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);93if (ret) {94NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);95return NULL;96}9798ret = nouveau_bo_pin(pushbuf, location);99if (ret) {100NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);101nouveau_bo_ref(NULL, &pushbuf);102return NULL;103}104105ret = nouveau_bo_map(pushbuf);106if (ret) {107nouveau_bo_unpin(pushbuf);108nouveau_bo_ref(NULL, &pushbuf);109return NULL;110}111112return pushbuf;113}114115/* allocates and initializes a fifo for user space consumption */116int117nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,118struct drm_file *file_priv,119uint32_t vram_handle, uint32_t gart_handle)120{121struct drm_nouveau_private *dev_priv = dev->dev_private;122struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;123struct nouveau_channel *chan;124unsigned long flags;125int ret;126127/* allocate and lock channel structure */128chan = kzalloc(sizeof(*chan), GFP_KERNEL);129if (!chan)130return -ENOMEM;131chan->dev = dev;132chan->file_priv = file_priv;133chan->vram_handle = vram_handle;134chan->gart_handle = gart_handle;135136kref_init(&chan->ref);137atomic_set(&chan->users, 1);138mutex_init(&chan->mutex);139mutex_lock(&chan->mutex);140141/* allocate hw channel id */142spin_lock_irqsave(&dev_priv->channels.lock, flags);143for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {144if (!dev_priv->channels.ptr[chan->id]) {145nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);146break;147}148}149spin_unlock_irqrestore(&dev_priv->channels.lock, flags);150151if (chan->id == pfifo->channels) {152mutex_unlock(&chan->mutex);153kfree(chan);154return -ENODEV;155}156157NV_DEBUG(dev, "initialising channel %d\n", chan->id);158INIT_LIST_HEAD(&chan->nvsw.vbl_wait);159INIT_LIST_HEAD(&chan->nvsw.flip);160INIT_LIST_HEAD(&chan->fence.pending);161162/* Allocate DMA push buffer */163chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);164if (!chan->pushbuf_bo) {165ret = -ENOMEM;166NV_ERROR(dev, "pushbuf %d\n", ret);167nouveau_channel_put(&chan);168return ret;169}170171nouveau_dma_pre_init(chan);172chan->user_put = 0x40;173chan->user_get = 0x44;174175/* Allocate space for per-channel fixed notifier memory */176ret = nouveau_notifier_init_channel(chan);177if (ret) {178NV_ERROR(dev, "ntfy %d\n", ret);179nouveau_channel_put(&chan);180return ret;181}182183/* Setup channel's default objects */184ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);185if (ret) {186NV_ERROR(dev, "gpuobj %d\n", ret);187nouveau_channel_put(&chan);188return ret;189}190191/* Create a dma object for the push buffer */192ret = nouveau_channel_pushbuf_ctxdma_init(chan);193if (ret) {194NV_ERROR(dev, "pbctxdma %d\n", ret);195nouveau_channel_put(&chan);196return ret;197}198199/* disable the fifo caches */200pfifo->reassign(dev, false);201202/* Construct initial RAMFC for new channel */203ret = pfifo->create_context(chan);204if (ret) {205nouveau_channel_put(&chan);206return ret;207}208209pfifo->reassign(dev, true);210211ret = nouveau_dma_init(chan);212if (!ret)213ret = nouveau_fence_channel_init(chan);214if (ret) {215nouveau_channel_put(&chan);216return ret;217}218219nouveau_debugfs_channel_init(chan);220221NV_DEBUG(dev, "channel %d initialised\n", chan->id);222*chan_ret = chan;223return 0;224}225226struct nouveau_channel *227nouveau_channel_get_unlocked(struct nouveau_channel *ref)228{229struct nouveau_channel *chan = NULL;230231if (likely(ref && atomic_inc_not_zero(&ref->users)))232nouveau_channel_ref(ref, &chan);233234return chan;235}236237struct nouveau_channel *238nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)239{240struct drm_nouveau_private *dev_priv = dev->dev_private;241struct nouveau_channel *chan;242unsigned long flags;243244if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))245return ERR_PTR(-EINVAL);246247spin_lock_irqsave(&dev_priv->channels.lock, flags);248chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);249spin_unlock_irqrestore(&dev_priv->channels.lock, flags);250251if (unlikely(!chan))252return ERR_PTR(-EINVAL);253254if (unlikely(file_priv && chan->file_priv != file_priv)) {255nouveau_channel_put_unlocked(&chan);256return ERR_PTR(-EINVAL);257}258259mutex_lock(&chan->mutex);260return chan;261}262263void264nouveau_channel_put_unlocked(struct nouveau_channel **pchan)265{266struct nouveau_channel *chan = *pchan;267struct drm_device *dev = chan->dev;268struct drm_nouveau_private *dev_priv = dev->dev_private;269struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;270unsigned long flags;271int i;272273/* decrement the refcount, and we're done if there's still refs */274if (likely(!atomic_dec_and_test(&chan->users))) {275nouveau_channel_ref(NULL, pchan);276return;277}278279/* no one wants the channel anymore */280NV_DEBUG(dev, "freeing channel %d\n", chan->id);281nouveau_debugfs_channel_fini(chan);282283/* give it chance to idle */284nouveau_channel_idle(chan);285286/* ensure all outstanding fences are signaled. they should be if the287* above attempts at idling were OK, but if we failed this'll tell TTM288* we're done with the buffers.289*/290nouveau_fence_channel_fini(chan);291292/* boot it off the hardware */293pfifo->reassign(dev, false);294295/* destroy the engine specific contexts */296pfifo->destroy_context(chan);297for (i = 0; i < NVOBJ_ENGINE_NR; i++) {298if (chan->engctx[i])299dev_priv->eng[i]->context_del(chan, i);300}301302pfifo->reassign(dev, true);303304/* aside from its resources, the channel should now be dead,305* remove it from the channel list306*/307spin_lock_irqsave(&dev_priv->channels.lock, flags);308nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);309spin_unlock_irqrestore(&dev_priv->channels.lock, flags);310311/* destroy any resources the channel owned */312nouveau_gpuobj_ref(NULL, &chan->pushbuf);313if (chan->pushbuf_bo) {314nouveau_bo_unmap(chan->pushbuf_bo);315nouveau_bo_unpin(chan->pushbuf_bo);316nouveau_bo_ref(NULL, &chan->pushbuf_bo);317}318nouveau_gpuobj_channel_takedown(chan);319nouveau_notifier_takedown_channel(chan);320321nouveau_channel_ref(NULL, pchan);322}323324void325nouveau_channel_put(struct nouveau_channel **pchan)326{327mutex_unlock(&(*pchan)->mutex);328nouveau_channel_put_unlocked(pchan);329}330331static void332nouveau_channel_del(struct kref *ref)333{334struct nouveau_channel *chan =335container_of(ref, struct nouveau_channel, ref);336337kfree(chan);338}339340void341nouveau_channel_ref(struct nouveau_channel *chan,342struct nouveau_channel **pchan)343{344if (chan)345kref_get(&chan->ref);346347if (*pchan)348kref_put(&(*pchan)->ref, nouveau_channel_del);349350*pchan = chan;351}352353void354nouveau_channel_idle(struct nouveau_channel *chan)355{356struct drm_device *dev = chan->dev;357struct nouveau_fence *fence = NULL;358int ret;359360nouveau_fence_update(chan);361362if (chan->fence.sequence != chan->fence.sequence_ack) {363ret = nouveau_fence_new(chan, &fence, true);364if (!ret) {365ret = nouveau_fence_wait(fence, false, false);366nouveau_fence_unref(&fence);367}368369if (ret)370NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);371}372}373374/* cleans up all the fifos from file_priv */375void376nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)377{378struct drm_nouveau_private *dev_priv = dev->dev_private;379struct nouveau_engine *engine = &dev_priv->engine;380struct nouveau_channel *chan;381int i;382383NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");384for (i = 0; i < engine->fifo.channels; i++) {385chan = nouveau_channel_get(dev, file_priv, i);386if (IS_ERR(chan))387continue;388389atomic_dec(&chan->users);390nouveau_channel_put(&chan);391}392}393394395/***********************************396* ioctls wrapping the functions397***********************************/398399static int400nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,401struct drm_file *file_priv)402{403struct drm_nouveau_private *dev_priv = dev->dev_private;404struct drm_nouveau_channel_alloc *init = data;405struct nouveau_channel *chan;406int ret;407408if (!dev_priv->eng[NVOBJ_ENGINE_GR])409return -ENODEV;410411if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)412return -EINVAL;413414ret = nouveau_channel_alloc(dev, &chan, file_priv,415init->fb_ctxdma_handle,416init->tt_ctxdma_handle);417if (ret)418return ret;419init->channel = chan->id;420421if (chan->dma.ib_max)422init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |423NOUVEAU_GEM_DOMAIN_GART;424else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)425init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;426else427init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;428429if (dev_priv->card_type < NV_C0) {430init->subchan[0].handle = NvM2MF;431if (dev_priv->card_type < NV_50)432init->subchan[0].grclass = 0x0039;433else434init->subchan[0].grclass = 0x5039;435init->subchan[1].handle = NvSw;436init->subchan[1].grclass = NV_SW;437init->nr_subchan = 2;438} else {439init->subchan[0].handle = 0x9039;440init->subchan[0].grclass = 0x9039;441init->nr_subchan = 1;442}443444/* Named memory object area */445ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,446&init->notifier_handle);447448if (ret == 0)449atomic_inc(&chan->users); /* userspace reference */450nouveau_channel_put(&chan);451return ret;452}453454static int455nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,456struct drm_file *file_priv)457{458struct drm_nouveau_channel_free *req = data;459struct nouveau_channel *chan;460461chan = nouveau_channel_get(dev, file_priv, req->channel);462if (IS_ERR(chan))463return PTR_ERR(chan);464465atomic_dec(&chan->users);466nouveau_channel_put(&chan);467return 0;468}469470/***********************************471* finally, the ioctl table472***********************************/473474struct drm_ioctl_desc nouveau_ioctls[] = {475DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),476DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),477DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),478DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),479DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),480DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),481DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),482DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),483DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),484DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),485DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),486DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),487};488489int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);490491492