Path: blob/master/drivers/gpu/drm/nouveau/nouveau_object.c
15112 views
/*1* Copyright (C) 2006 Ben Skeggs.2*3* All Rights Reserved.4*5* Permission is hereby granted, free of charge, to any person obtaining6* a copy of this software and associated documentation files (the7* "Software"), to deal in the Software without restriction, including8* without limitation the rights to use, copy, modify, merge, publish,9* distribute, sublicense, and/or sell copies of the Software, and to10* permit persons to whom the Software is furnished to do so, subject to11* the following conditions:12*13* The above copyright notice and this permission notice (including the14* next paragraph) shall be included in all copies or substantial15* portions of the Software.16*17* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,18* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF19* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.20* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE21* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION22* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION23* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.24*25*/2627/*28* Authors:29* Ben Skeggs <[email protected]>30*/3132#include "drmP.h"33#include "drm.h"34#include "nouveau_drv.h"35#include "nouveau_drm.h"36#include "nouveau_ramht.h"37#include "nouveau_vm.h"38#include "nv50_display.h"3940struct nouveau_gpuobj_method {41struct list_head head;42u32 mthd;43int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);44};4546struct nouveau_gpuobj_class {47struct list_head head;48struct list_head methods;49u32 id;50u32 engine;51};5253int54nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)55{56struct drm_nouveau_private *dev_priv = dev->dev_private;57struct nouveau_gpuobj_class *oc;5859oc = kzalloc(sizeof(*oc), GFP_KERNEL);60if (!oc)61return -ENOMEM;6263INIT_LIST_HEAD(&oc->methods);64oc->id = class;65oc->engine = engine;66list_add(&oc->head, &dev_priv->classes);67return 0;68}6970int71nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,72int (*exec)(struct nouveau_channel *, u32, u32, u32))73{74struct drm_nouveau_private *dev_priv = dev->dev_private;75struct nouveau_gpuobj_method *om;76struct nouveau_gpuobj_class *oc;7778list_for_each_entry(oc, &dev_priv->classes, head) {79if (oc->id == class)80goto found;81}8283return -EINVAL;8485found:86om = kzalloc(sizeof(*om), GFP_KERNEL);87if (!om)88return -ENOMEM;8990om->mthd = mthd;91om->exec = exec;92list_add(&om->head, &oc->methods);93return 0;94}9596int97nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,98u32 class, u32 mthd, u32 data)99{100struct drm_nouveau_private *dev_priv = chan->dev->dev_private;101struct nouveau_gpuobj_method *om;102struct nouveau_gpuobj_class *oc;103104list_for_each_entry(oc, &dev_priv->classes, head) {105if (oc->id != class)106continue;107108list_for_each_entry(om, &oc->methods, head) {109if (om->mthd == mthd)110return om->exec(chan, class, mthd, data);111}112}113114return -ENOENT;115}116117int118nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,119u32 class, u32 mthd, u32 data)120{121struct drm_nouveau_private *dev_priv = dev->dev_private;122struct nouveau_channel *chan = NULL;123unsigned long flags;124int ret = -EINVAL;125126spin_lock_irqsave(&dev_priv->channels.lock, flags);127if (chid > 0 && chid < dev_priv->engine.fifo.channels)128chan = dev_priv->channels.ptr[chid];129if (chan)130ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);131spin_unlock_irqrestore(&dev_priv->channels.lock, flags);132return ret;133}134135/* NVidia uses context objects to drive drawing operations.136137Context objects can be selected into 8 subchannels in the FIFO,138and then used via DMA command buffers.139140A context object is referenced by a user defined handle (CARD32). The HW141looks up graphics objects in a hash table in the instance RAM.142143An entry in the hash table consists of 2 CARD32. The first CARD32 contains144the handle, the second one a bitfield, that contains the address of the145object in instance RAM.146147The format of the second CARD32 seems to be:148149NV4 to NV30:15015115: 0 instance_addr >> 415217:16 engine (here uses 1 = graphics)15328:24 channel id (here uses 0)15431 valid (use 1)155156NV40:15715815: 0 instance_addr >> 4 (maybe 19-0)15921:20 engine (here uses 1 = graphics)160I'm unsure about the other bits, but using 0 seems to work.161162The key into the hash table depends on the object handle and channel id and163is given as:164*/165166int167nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,168uint32_t size, int align, uint32_t flags,169struct nouveau_gpuobj **gpuobj_ret)170{171struct drm_nouveau_private *dev_priv = dev->dev_private;172struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;173struct nouveau_gpuobj *gpuobj;174struct drm_mm_node *ramin = NULL;175int ret, i;176177NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",178chan ? chan->id : -1, size, align, flags);179180gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);181if (!gpuobj)182return -ENOMEM;183NV_DEBUG(dev, "gpuobj %p\n", gpuobj);184gpuobj->dev = dev;185gpuobj->flags = flags;186kref_init(&gpuobj->refcount);187gpuobj->size = size;188189spin_lock(&dev_priv->ramin_lock);190list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);191spin_unlock(&dev_priv->ramin_lock);192193if (chan) {194ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);195if (ramin)196ramin = drm_mm_get_block(ramin, size, align);197if (!ramin) {198nouveau_gpuobj_ref(NULL, &gpuobj);199return -ENOMEM;200}201202gpuobj->pinst = chan->ramin->pinst;203if (gpuobj->pinst != ~0)204gpuobj->pinst += ramin->start;205206gpuobj->cinst = ramin->start;207gpuobj->vinst = ramin->start + chan->ramin->vinst;208gpuobj->node = ramin;209} else {210ret = instmem->get(gpuobj, size, align);211if (ret) {212nouveau_gpuobj_ref(NULL, &gpuobj);213return ret;214}215216ret = -ENOSYS;217if (!(flags & NVOBJ_FLAG_DONT_MAP))218ret = instmem->map(gpuobj);219if (ret)220gpuobj->pinst = ~0;221222gpuobj->cinst = NVOBJ_CINST_GLOBAL;223}224225if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {226for (i = 0; i < gpuobj->size; i += 4)227nv_wo32(gpuobj, i, 0);228instmem->flush(dev);229}230231232*gpuobj_ret = gpuobj;233return 0;234}235236int237nouveau_gpuobj_init(struct drm_device *dev)238{239struct drm_nouveau_private *dev_priv = dev->dev_private;240241NV_DEBUG(dev, "\n");242243INIT_LIST_HEAD(&dev_priv->gpuobj_list);244INIT_LIST_HEAD(&dev_priv->classes);245spin_lock_init(&dev_priv->ramin_lock);246dev_priv->ramin_base = ~0;247248return 0;249}250251void252nouveau_gpuobj_takedown(struct drm_device *dev)253{254struct drm_nouveau_private *dev_priv = dev->dev_private;255struct nouveau_gpuobj_method *om, *tm;256struct nouveau_gpuobj_class *oc, *tc;257258NV_DEBUG(dev, "\n");259260list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {261list_for_each_entry_safe(om, tm, &oc->methods, head) {262list_del(&om->head);263kfree(om);264}265list_del(&oc->head);266kfree(oc);267}268269BUG_ON(!list_empty(&dev_priv->gpuobj_list));270}271272273static void274nouveau_gpuobj_del(struct kref *ref)275{276struct nouveau_gpuobj *gpuobj =277container_of(ref, struct nouveau_gpuobj, refcount);278struct drm_device *dev = gpuobj->dev;279struct drm_nouveau_private *dev_priv = dev->dev_private;280struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;281int i;282283NV_DEBUG(dev, "gpuobj %p\n", gpuobj);284285if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {286for (i = 0; i < gpuobj->size; i += 4)287nv_wo32(gpuobj, i, 0);288instmem->flush(dev);289}290291if (gpuobj->dtor)292gpuobj->dtor(dev, gpuobj);293294if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {295if (gpuobj->node) {296instmem->unmap(gpuobj);297instmem->put(gpuobj);298}299} else {300if (gpuobj->node) {301spin_lock(&dev_priv->ramin_lock);302drm_mm_put_block(gpuobj->node);303spin_unlock(&dev_priv->ramin_lock);304}305}306307spin_lock(&dev_priv->ramin_lock);308list_del(&gpuobj->list);309spin_unlock(&dev_priv->ramin_lock);310311kfree(gpuobj);312}313314void315nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)316{317if (ref)318kref_get(&ref->refcount);319320if (*ptr)321kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);322323*ptr = ref;324}325326int327nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,328u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)329{330struct drm_nouveau_private *dev_priv = dev->dev_private;331struct nouveau_gpuobj *gpuobj = NULL;332int i;333334NV_DEBUG(dev,335"pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",336pinst, vinst, size, flags);337338gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);339if (!gpuobj)340return -ENOMEM;341NV_DEBUG(dev, "gpuobj %p\n", gpuobj);342gpuobj->dev = dev;343gpuobj->flags = flags;344kref_init(&gpuobj->refcount);345gpuobj->size = size;346gpuobj->pinst = pinst;347gpuobj->cinst = NVOBJ_CINST_GLOBAL;348gpuobj->vinst = vinst;349350if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {351for (i = 0; i < gpuobj->size; i += 4)352nv_wo32(gpuobj, i, 0);353dev_priv->engine.instmem.flush(dev);354}355356spin_lock(&dev_priv->ramin_lock);357list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);358spin_unlock(&dev_priv->ramin_lock);359*pgpuobj = gpuobj;360return 0;361}362363/*364DMA objects are used to reference a piece of memory in the365framebuffer, PCI or AGP address space. Each object is 16 bytes big366and looks as follows:367368entry[0]36911:0 class (seems like I can always use 0 here)37012 page table present?37113 page entry linear?37215:14 access: 0 rw, 1 ro, 2 wo37317:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP37431:20 dma adjust (bits 0-11 of the address)375entry[1]376dma limit (size of transfer)377entry[X]3781 0 readonly, 1 readwrite37931:12 dma frame address of the page (bits 12-31 of the address)380entry[N]381page table terminator, same value as the first pte, as does nvidia382rivatv uses 0xffffffff383384Non linear page tables need a list of frame addresses afterwards,385the rivatv project has some info on this.386387The method below creates a DMA object in instance RAM and returns a handle388to it that can be used to set up context objects.389*/390391void392nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,393u64 base, u64 size, int target, int access,394u32 type, u32 comp)395{396struct drm_nouveau_private *dev_priv = obj->dev->dev_private;397struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;398u32 flags0;399400flags0 = (comp << 29) | (type << 22) | class;401flags0 |= 0x00100000;402403switch (access) {404case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;405case NV_MEM_ACCESS_RW:406case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;407default:408break;409}410411switch (target) {412case NV_MEM_TARGET_VRAM:413flags0 |= 0x00010000;414break;415case NV_MEM_TARGET_PCI:416flags0 |= 0x00020000;417break;418case NV_MEM_TARGET_PCI_NOSNOOP:419flags0 |= 0x00030000;420break;421case NV_MEM_TARGET_GART:422base += dev_priv->gart_info.aper_base;423default:424flags0 &= ~0x00100000;425break;426}427428/* convert to base + limit */429size = (base + size) - 1;430431nv_wo32(obj, offset + 0x00, flags0);432nv_wo32(obj, offset + 0x04, lower_32_bits(size));433nv_wo32(obj, offset + 0x08, lower_32_bits(base));434nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |435upper_32_bits(base));436nv_wo32(obj, offset + 0x10, 0x00000000);437nv_wo32(obj, offset + 0x14, 0x00000000);438439pinstmem->flush(obj->dev);440}441442int443nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,444int target, int access, u32 type, u32 comp,445struct nouveau_gpuobj **pobj)446{447struct drm_device *dev = chan->dev;448int ret;449450ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);451if (ret)452return ret;453454nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,455access, type, comp);456return 0;457}458459int460nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,461u64 size, int access, int target,462struct nouveau_gpuobj **pobj)463{464struct drm_nouveau_private *dev_priv = chan->dev->dev_private;465struct drm_device *dev = chan->dev;466struct nouveau_gpuobj *obj;467u32 flags0, flags2;468int ret;469470if (dev_priv->card_type >= NV_50) {471u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;472u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;473474return nv50_gpuobj_dma_new(chan, class, base, size,475target, access, type, comp, pobj);476}477478if (target == NV_MEM_TARGET_GART) {479struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;480481if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {482if (base == 0) {483nouveau_gpuobj_ref(gart, pobj);484return 0;485}486487base = nouveau_sgdma_get_physical(dev, base);488target = NV_MEM_TARGET_PCI;489} else {490base += dev_priv->gart_info.aper_base;491if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)492target = NV_MEM_TARGET_PCI_NOSNOOP;493else494target = NV_MEM_TARGET_PCI;495}496}497498flags0 = class;499flags0 |= 0x00003000; /* PT present, PT linear */500flags2 = 0;501502switch (target) {503case NV_MEM_TARGET_PCI:504flags0 |= 0x00020000;505break;506case NV_MEM_TARGET_PCI_NOSNOOP:507flags0 |= 0x00030000;508break;509default:510break;511}512513switch (access) {514case NV_MEM_ACCESS_RO:515flags0 |= 0x00004000;516break;517case NV_MEM_ACCESS_WO:518flags0 |= 0x00008000;519default:520flags2 |= 0x00000002;521break;522}523524flags0 |= (base & 0x00000fff) << 20;525flags2 |= (base & 0xfffff000);526527ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);528if (ret)529return ret;530531nv_wo32(obj, 0x00, flags0);532nv_wo32(obj, 0x04, size - 1);533nv_wo32(obj, 0x08, flags2);534nv_wo32(obj, 0x0c, flags2);535536obj->engine = NVOBJ_ENGINE_SW;537obj->class = class;538*pobj = obj;539return 0;540}541542/* Context objects in the instance RAM have the following structure.543* On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.544545NV4 - NV30:546547entry[0]54811:0 class54912 chroma key enable55013 user clip enable55114 swizzle enable55217:15 patch config:553scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre55418 synchronize enable55519 endian: 1 big, 0 little55621:20 dither mode55723 single step enable55824 patch status: 0 invalid, 1 valid55925 context_surface 0: 1 valid56026 context surface 1: 1 valid56127 context pattern: 1 valid56228 context rop: 1 valid56329,30 context beta, beta4564entry[1]5657:0 mono format56615:8 color format56731:16 notify instance address568entry[2]56915:0 dma 0 instance address57031:16 dma 1 instance address571entry[3]572dma method traps573574NV40:575No idea what the exact format is. Here's what can be deducted:576577entry[0]:57811:0 class (maybe uses more bits here?)57917 user clip enable58021:19 patch config58125 patch status valid ?582entry[1]:58315:0 DMA notifier (maybe 20:0)584entry[2]:58515:0 DMA 0 instance (maybe 20:0)58624 big endian587entry[3]:58815:0 DMA 1 instance (maybe 20:0)589entry[4]:590entry[5]:591set to 0?592*/593static int594nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)595{596struct drm_nouveau_private *dev_priv = chan->dev->dev_private;597struct nouveau_gpuobj *gpuobj;598int ret;599600gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);601if (!gpuobj)602return -ENOMEM;603gpuobj->dev = chan->dev;604gpuobj->engine = NVOBJ_ENGINE_SW;605gpuobj->class = class;606kref_init(&gpuobj->refcount);607gpuobj->cinst = 0x40;608609spin_lock(&dev_priv->ramin_lock);610list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);611spin_unlock(&dev_priv->ramin_lock);612613ret = nouveau_ramht_insert(chan, handle, gpuobj);614nouveau_gpuobj_ref(NULL, &gpuobj);615return ret;616}617618int619nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)620{621struct drm_nouveau_private *dev_priv = chan->dev->dev_private;622struct drm_device *dev = chan->dev;623struct nouveau_gpuobj_class *oc;624int ret;625626NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);627628list_for_each_entry(oc, &dev_priv->classes, head) {629struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];630631if (oc->id != class)632continue;633634if (oc->engine == NVOBJ_ENGINE_SW)635return nouveau_gpuobj_sw_new(chan, handle, class);636637if (!chan->engctx[oc->engine]) {638ret = eng->context_new(chan, oc->engine);639if (ret)640return ret;641}642643return eng->object_new(chan, oc->engine, handle, class);644}645646NV_ERROR(dev, "illegal object class: 0x%x\n", class);647return -EINVAL;648}649650static int651nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)652{653struct drm_device *dev = chan->dev;654struct drm_nouveau_private *dev_priv = dev->dev_private;655uint32_t size;656uint32_t base;657int ret;658659NV_DEBUG(dev, "ch%d\n", chan->id);660661/* Base amount for object storage (4KiB enough?) */662size = 0x2000;663base = 0;664665if (dev_priv->card_type == NV_50) {666/* Various fixed table thingos */667size += 0x1400; /* mostly unknown stuff */668size += 0x4000; /* vm pd */669base = 0x6000;670/* RAMHT, not sure about setting size yet, 32KiB to be safe */671size += 0x8000;672/* RAMFC */673size += 0x1000;674}675676ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);677if (ret) {678NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);679return ret;680}681682ret = drm_mm_init(&chan->ramin_heap, base, size);683if (ret) {684NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);685nouveau_gpuobj_ref(NULL, &chan->ramin);686return ret;687}688689return 0;690}691692int693nouveau_gpuobj_channel_init(struct nouveau_channel *chan,694uint32_t vram_h, uint32_t tt_h)695{696struct drm_device *dev = chan->dev;697struct drm_nouveau_private *dev_priv = dev->dev_private;698struct nouveau_gpuobj *vram = NULL, *tt = NULL;699int ret, i;700701NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);702703if (dev_priv->card_type == NV_C0) {704struct nouveau_vm *vm = dev_priv->chan_vm;705struct nouveau_vm_pgd *vpgd;706707ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,708&chan->ramin);709if (ret)710return ret;711712nouveau_vm_ref(vm, &chan->vm, NULL);713714vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);715nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));716nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));717nv_wo32(chan->ramin, 0x0208, 0xffffffff);718nv_wo32(chan->ramin, 0x020c, 0x000000ff);719return 0;720}721722/* Allocate a chunk of memory for per-channel object storage */723ret = nouveau_gpuobj_channel_init_pramin(chan);724if (ret) {725NV_ERROR(dev, "init pramin\n");726return ret;727}728729/* NV50 VM730* - Allocate per-channel page-directory731* - Link with shared channel VM732*/733if (dev_priv->chan_vm) {734u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;735u64 vm_vinst = chan->ramin->vinst + pgd_offs;736u32 vm_pinst = chan->ramin->pinst;737738if (vm_pinst != ~0)739vm_pinst += pgd_offs;740741ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,7420, &chan->vm_pd);743if (ret)744return ret;745746nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);747}748749/* RAMHT */750if (dev_priv->card_type < NV_50) {751nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);752} else {753struct nouveau_gpuobj *ramht = NULL;754755ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,756NVOBJ_FLAG_ZERO_ALLOC, &ramht);757if (ret)758return ret;759760ret = nouveau_ramht_new(dev, ramht, &chan->ramht);761nouveau_gpuobj_ref(NULL, &ramht);762if (ret)763return ret;764765/* dma objects for display sync channel semaphore blocks */766for (i = 0; i < 2; i++) {767struct nouveau_gpuobj *sem = NULL;768struct nv50_display_crtc *dispc =769&nv50_display(dev)->crtc[i];770u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;771772ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,773NV_MEM_ACCESS_RW,774NV_MEM_TARGET_VRAM, &sem);775if (ret)776return ret;777778ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);779nouveau_gpuobj_ref(NULL, &sem);780if (ret)781return ret;782}783}784785/* VRAM ctxdma */786if (dev_priv->card_type >= NV_50) {787ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,7880, (1ULL << 40), NV_MEM_ACCESS_RW,789NV_MEM_TARGET_VM, &vram);790if (ret) {791NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);792return ret;793}794} else {795ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,7960, dev_priv->fb_available_size,797NV_MEM_ACCESS_RW,798NV_MEM_TARGET_VRAM, &vram);799if (ret) {800NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);801return ret;802}803}804805ret = nouveau_ramht_insert(chan, vram_h, vram);806nouveau_gpuobj_ref(NULL, &vram);807if (ret) {808NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);809return ret;810}811812/* TT memory ctxdma */813if (dev_priv->card_type >= NV_50) {814ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,8150, (1ULL << 40), NV_MEM_ACCESS_RW,816NV_MEM_TARGET_VM, &tt);817} else {818ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,8190, dev_priv->gart_info.aper_size,820NV_MEM_ACCESS_RW,821NV_MEM_TARGET_GART, &tt);822}823824if (ret) {825NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);826return ret;827}828829ret = nouveau_ramht_insert(chan, tt_h, tt);830nouveau_gpuobj_ref(NULL, &tt);831if (ret) {832NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);833return ret;834}835836return 0;837}838839void840nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)841{842struct drm_device *dev = chan->dev;843844NV_DEBUG(dev, "ch%d\n", chan->id);845846nouveau_ramht_ref(NULL, &chan->ramht, chan);847848nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);849nouveau_gpuobj_ref(NULL, &chan->vm_pd);850851if (drm_mm_initialized(&chan->ramin_heap))852drm_mm_takedown(&chan->ramin_heap);853nouveau_gpuobj_ref(NULL, &chan->ramin);854}855856int857nouveau_gpuobj_suspend(struct drm_device *dev)858{859struct drm_nouveau_private *dev_priv = dev->dev_private;860struct nouveau_gpuobj *gpuobj;861int i;862863list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {864if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)865continue;866867gpuobj->suspend = vmalloc(gpuobj->size);868if (!gpuobj->suspend) {869nouveau_gpuobj_resume(dev);870return -ENOMEM;871}872873for (i = 0; i < gpuobj->size; i += 4)874gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);875}876877return 0;878}879880void881nouveau_gpuobj_resume(struct drm_device *dev)882{883struct drm_nouveau_private *dev_priv = dev->dev_private;884struct nouveau_gpuobj *gpuobj;885int i;886887list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {888if (!gpuobj->suspend)889continue;890891for (i = 0; i < gpuobj->size; i += 4)892nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);893894vfree(gpuobj->suspend);895gpuobj->suspend = NULL;896}897898dev_priv->engine.instmem.flush(dev);899}900901int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,902struct drm_file *file_priv)903{904struct drm_nouveau_grobj_alloc *init = data;905struct nouveau_channel *chan;906int ret;907908if (init->handle == ~0)909return -EINVAL;910911chan = nouveau_channel_get(dev, file_priv, init->channel);912if (IS_ERR(chan))913return PTR_ERR(chan);914915if (nouveau_ramht_find(chan, init->handle)) {916ret = -EEXIST;917goto out;918}919920ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);921if (ret) {922NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",923ret, init->channel, init->handle);924}925926out:927nouveau_channel_put(&chan);928return ret;929}930931int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,932struct drm_file *file_priv)933{934struct drm_nouveau_gpuobj_free *objfree = data;935struct nouveau_channel *chan;936int ret;937938chan = nouveau_channel_get(dev, file_priv, objfree->channel);939if (IS_ERR(chan))940return PTR_ERR(chan);941942/* Synchronize with the user channel */943nouveau_channel_idle(chan);944945ret = nouveau_ramht_remove(chan, objfree->handle);946nouveau_channel_put(&chan);947return ret;948}949950u32951nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)952{953struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;954struct drm_device *dev = gpuobj->dev;955unsigned long flags;956957if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {958u64 ptr = gpuobj->vinst + offset;959u32 base = ptr >> 16;960u32 val;961962spin_lock_irqsave(&dev_priv->vm_lock, flags);963if (dev_priv->ramin_base != base) {964dev_priv->ramin_base = base;965nv_wr32(dev, 0x001700, dev_priv->ramin_base);966}967val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));968spin_unlock_irqrestore(&dev_priv->vm_lock, flags);969return val;970}971972return nv_ri32(dev, gpuobj->pinst + offset);973}974975void976nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)977{978struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;979struct drm_device *dev = gpuobj->dev;980unsigned long flags;981982if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {983u64 ptr = gpuobj->vinst + offset;984u32 base = ptr >> 16;985986spin_lock_irqsave(&dev_priv->vm_lock, flags);987if (dev_priv->ramin_base != base) {988dev_priv->ramin_base = base;989nv_wr32(dev, 0x001700, dev_priv->ramin_base);990}991nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);992spin_unlock_irqrestore(&dev_priv->vm_lock, flags);993return;994}995996nv_wi32(dev, gpuobj->pinst + offset, val);997}9989991000