Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/gpu/drm/nouveau/nouveau_channel.c
15112 views
1
/*
2
* Copyright 2005-2006 Stephane Marchesin
3
* All Rights Reserved.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22
* DEALINGS IN THE SOFTWARE.
23
*/
24
25
#include "drmP.h"
26
#include "drm.h"
27
#include "nouveau_drv.h"
28
#include "nouveau_drm.h"
29
#include "nouveau_dma.h"
30
31
static int
32
nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
33
{
34
struct drm_device *dev = chan->dev;
35
struct drm_nouveau_private *dev_priv = dev->dev_private;
36
struct nouveau_bo *pb = chan->pushbuf_bo;
37
struct nouveau_gpuobj *pushbuf = NULL;
38
int ret = 0;
39
40
if (dev_priv->card_type >= NV_50) {
41
if (dev_priv->card_type < NV_C0) {
42
ret = nouveau_gpuobj_dma_new(chan,
43
NV_CLASS_DMA_IN_MEMORY, 0,
44
(1ULL << 40),
45
NV_MEM_ACCESS_RO,
46
NV_MEM_TARGET_VM,
47
&pushbuf);
48
}
49
chan->pushbuf_base = pb->bo.offset;
50
} else
51
if (pb->bo.mem.mem_type == TTM_PL_TT) {
52
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
53
dev_priv->gart_info.aper_size,
54
NV_MEM_ACCESS_RO,
55
NV_MEM_TARGET_GART, &pushbuf);
56
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
57
} else
58
if (dev_priv->card_type != NV_04) {
59
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
60
dev_priv->fb_available_size,
61
NV_MEM_ACCESS_RO,
62
NV_MEM_TARGET_VRAM, &pushbuf);
63
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
64
} else {
65
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
66
* exact reason for existing :) PCI access to cmdbuf in
67
* VRAM.
68
*/
69
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
70
pci_resource_start(dev->pdev, 1),
71
dev_priv->fb_available_size,
72
NV_MEM_ACCESS_RO,
73
NV_MEM_TARGET_PCI, &pushbuf);
74
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
75
}
76
77
nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
78
nouveau_gpuobj_ref(NULL, &pushbuf);
79
return ret;
80
}
81
82
static struct nouveau_bo *
83
nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
84
{
85
struct nouveau_bo *pushbuf = NULL;
86
int location, ret;
87
88
if (nouveau_vram_pushbuf)
89
location = TTM_PL_FLAG_VRAM;
90
else
91
location = TTM_PL_FLAG_TT;
92
93
ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
94
if (ret) {
95
NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
96
return NULL;
97
}
98
99
ret = nouveau_bo_pin(pushbuf, location);
100
if (ret) {
101
NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
102
nouveau_bo_ref(NULL, &pushbuf);
103
return NULL;
104
}
105
106
ret = nouveau_bo_map(pushbuf);
107
if (ret) {
108
nouveau_bo_unpin(pushbuf);
109
nouveau_bo_ref(NULL, &pushbuf);
110
return NULL;
111
}
112
113
return pushbuf;
114
}
115
116
/* allocates and initializes a fifo for user space consumption */
117
int
118
nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
119
struct drm_file *file_priv,
120
uint32_t vram_handle, uint32_t gart_handle)
121
{
122
struct drm_nouveau_private *dev_priv = dev->dev_private;
123
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
124
struct nouveau_channel *chan;
125
unsigned long flags;
126
int ret;
127
128
/* allocate and lock channel structure */
129
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
130
if (!chan)
131
return -ENOMEM;
132
chan->dev = dev;
133
chan->file_priv = file_priv;
134
chan->vram_handle = vram_handle;
135
chan->gart_handle = gart_handle;
136
137
kref_init(&chan->ref);
138
atomic_set(&chan->users, 1);
139
mutex_init(&chan->mutex);
140
mutex_lock(&chan->mutex);
141
142
/* allocate hw channel id */
143
spin_lock_irqsave(&dev_priv->channels.lock, flags);
144
for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
145
if (!dev_priv->channels.ptr[chan->id]) {
146
nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
147
break;
148
}
149
}
150
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
151
152
if (chan->id == pfifo->channels) {
153
mutex_unlock(&chan->mutex);
154
kfree(chan);
155
return -ENODEV;
156
}
157
158
NV_DEBUG(dev, "initialising channel %d\n", chan->id);
159
INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
160
INIT_LIST_HEAD(&chan->nvsw.flip);
161
INIT_LIST_HEAD(&chan->fence.pending);
162
163
/* Allocate DMA push buffer */
164
chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
165
if (!chan->pushbuf_bo) {
166
ret = -ENOMEM;
167
NV_ERROR(dev, "pushbuf %d\n", ret);
168
nouveau_channel_put(&chan);
169
return ret;
170
}
171
172
nouveau_dma_pre_init(chan);
173
chan->user_put = 0x40;
174
chan->user_get = 0x44;
175
176
/* Allocate space for per-channel fixed notifier memory */
177
ret = nouveau_notifier_init_channel(chan);
178
if (ret) {
179
NV_ERROR(dev, "ntfy %d\n", ret);
180
nouveau_channel_put(&chan);
181
return ret;
182
}
183
184
/* Setup channel's default objects */
185
ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
186
if (ret) {
187
NV_ERROR(dev, "gpuobj %d\n", ret);
188
nouveau_channel_put(&chan);
189
return ret;
190
}
191
192
/* Create a dma object for the push buffer */
193
ret = nouveau_channel_pushbuf_ctxdma_init(chan);
194
if (ret) {
195
NV_ERROR(dev, "pbctxdma %d\n", ret);
196
nouveau_channel_put(&chan);
197
return ret;
198
}
199
200
/* disable the fifo caches */
201
pfifo->reassign(dev, false);
202
203
/* Construct initial RAMFC for new channel */
204
ret = pfifo->create_context(chan);
205
if (ret) {
206
nouveau_channel_put(&chan);
207
return ret;
208
}
209
210
pfifo->reassign(dev, true);
211
212
ret = nouveau_dma_init(chan);
213
if (!ret)
214
ret = nouveau_fence_channel_init(chan);
215
if (ret) {
216
nouveau_channel_put(&chan);
217
return ret;
218
}
219
220
nouveau_debugfs_channel_init(chan);
221
222
NV_DEBUG(dev, "channel %d initialised\n", chan->id);
223
*chan_ret = chan;
224
return 0;
225
}
226
227
struct nouveau_channel *
228
nouveau_channel_get_unlocked(struct nouveau_channel *ref)
229
{
230
struct nouveau_channel *chan = NULL;
231
232
if (likely(ref && atomic_inc_not_zero(&ref->users)))
233
nouveau_channel_ref(ref, &chan);
234
235
return chan;
236
}
237
238
struct nouveau_channel *
239
nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
240
{
241
struct drm_nouveau_private *dev_priv = dev->dev_private;
242
struct nouveau_channel *chan;
243
unsigned long flags;
244
245
if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
246
return ERR_PTR(-EINVAL);
247
248
spin_lock_irqsave(&dev_priv->channels.lock, flags);
249
chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
250
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
251
252
if (unlikely(!chan))
253
return ERR_PTR(-EINVAL);
254
255
if (unlikely(file_priv && chan->file_priv != file_priv)) {
256
nouveau_channel_put_unlocked(&chan);
257
return ERR_PTR(-EINVAL);
258
}
259
260
mutex_lock(&chan->mutex);
261
return chan;
262
}
263
264
void
265
nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
266
{
267
struct nouveau_channel *chan = *pchan;
268
struct drm_device *dev = chan->dev;
269
struct drm_nouveau_private *dev_priv = dev->dev_private;
270
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
271
unsigned long flags;
272
int i;
273
274
/* decrement the refcount, and we're done if there's still refs */
275
if (likely(!atomic_dec_and_test(&chan->users))) {
276
nouveau_channel_ref(NULL, pchan);
277
return;
278
}
279
280
/* no one wants the channel anymore */
281
NV_DEBUG(dev, "freeing channel %d\n", chan->id);
282
nouveau_debugfs_channel_fini(chan);
283
284
/* give it chance to idle */
285
nouveau_channel_idle(chan);
286
287
/* ensure all outstanding fences are signaled. they should be if the
288
* above attempts at idling were OK, but if we failed this'll tell TTM
289
* we're done with the buffers.
290
*/
291
nouveau_fence_channel_fini(chan);
292
293
/* boot it off the hardware */
294
pfifo->reassign(dev, false);
295
296
/* destroy the engine specific contexts */
297
pfifo->destroy_context(chan);
298
for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
299
if (chan->engctx[i])
300
dev_priv->eng[i]->context_del(chan, i);
301
}
302
303
pfifo->reassign(dev, true);
304
305
/* aside from its resources, the channel should now be dead,
306
* remove it from the channel list
307
*/
308
spin_lock_irqsave(&dev_priv->channels.lock, flags);
309
nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
310
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
311
312
/* destroy any resources the channel owned */
313
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
314
if (chan->pushbuf_bo) {
315
nouveau_bo_unmap(chan->pushbuf_bo);
316
nouveau_bo_unpin(chan->pushbuf_bo);
317
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
318
}
319
nouveau_gpuobj_channel_takedown(chan);
320
nouveau_notifier_takedown_channel(chan);
321
322
nouveau_channel_ref(NULL, pchan);
323
}
324
325
void
326
nouveau_channel_put(struct nouveau_channel **pchan)
327
{
328
mutex_unlock(&(*pchan)->mutex);
329
nouveau_channel_put_unlocked(pchan);
330
}
331
332
static void
333
nouveau_channel_del(struct kref *ref)
334
{
335
struct nouveau_channel *chan =
336
container_of(ref, struct nouveau_channel, ref);
337
338
kfree(chan);
339
}
340
341
void
342
nouveau_channel_ref(struct nouveau_channel *chan,
343
struct nouveau_channel **pchan)
344
{
345
if (chan)
346
kref_get(&chan->ref);
347
348
if (*pchan)
349
kref_put(&(*pchan)->ref, nouveau_channel_del);
350
351
*pchan = chan;
352
}
353
354
void
355
nouveau_channel_idle(struct nouveau_channel *chan)
356
{
357
struct drm_device *dev = chan->dev;
358
struct nouveau_fence *fence = NULL;
359
int ret;
360
361
nouveau_fence_update(chan);
362
363
if (chan->fence.sequence != chan->fence.sequence_ack) {
364
ret = nouveau_fence_new(chan, &fence, true);
365
if (!ret) {
366
ret = nouveau_fence_wait(fence, false, false);
367
nouveau_fence_unref(&fence);
368
}
369
370
if (ret)
371
NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
372
}
373
}
374
375
/* cleans up all the fifos from file_priv */
376
void
377
nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
378
{
379
struct drm_nouveau_private *dev_priv = dev->dev_private;
380
struct nouveau_engine *engine = &dev_priv->engine;
381
struct nouveau_channel *chan;
382
int i;
383
384
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
385
for (i = 0; i < engine->fifo.channels; i++) {
386
chan = nouveau_channel_get(dev, file_priv, i);
387
if (IS_ERR(chan))
388
continue;
389
390
atomic_dec(&chan->users);
391
nouveau_channel_put(&chan);
392
}
393
}
394
395
396
/***********************************
397
* ioctls wrapping the functions
398
***********************************/
399
400
static int
401
nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
402
struct drm_file *file_priv)
403
{
404
struct drm_nouveau_private *dev_priv = dev->dev_private;
405
struct drm_nouveau_channel_alloc *init = data;
406
struct nouveau_channel *chan;
407
int ret;
408
409
if (!dev_priv->eng[NVOBJ_ENGINE_GR])
410
return -ENODEV;
411
412
if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
413
return -EINVAL;
414
415
ret = nouveau_channel_alloc(dev, &chan, file_priv,
416
init->fb_ctxdma_handle,
417
init->tt_ctxdma_handle);
418
if (ret)
419
return ret;
420
init->channel = chan->id;
421
422
if (chan->dma.ib_max)
423
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
424
NOUVEAU_GEM_DOMAIN_GART;
425
else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
426
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
427
else
428
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
429
430
if (dev_priv->card_type < NV_C0) {
431
init->subchan[0].handle = NvM2MF;
432
if (dev_priv->card_type < NV_50)
433
init->subchan[0].grclass = 0x0039;
434
else
435
init->subchan[0].grclass = 0x5039;
436
init->subchan[1].handle = NvSw;
437
init->subchan[1].grclass = NV_SW;
438
init->nr_subchan = 2;
439
} else {
440
init->subchan[0].handle = 0x9039;
441
init->subchan[0].grclass = 0x9039;
442
init->nr_subchan = 1;
443
}
444
445
/* Named memory object area */
446
ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
447
&init->notifier_handle);
448
449
if (ret == 0)
450
atomic_inc(&chan->users); /* userspace reference */
451
nouveau_channel_put(&chan);
452
return ret;
453
}
454
455
static int
456
nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
457
struct drm_file *file_priv)
458
{
459
struct drm_nouveau_channel_free *req = data;
460
struct nouveau_channel *chan;
461
462
chan = nouveau_channel_get(dev, file_priv, req->channel);
463
if (IS_ERR(chan))
464
return PTR_ERR(chan);
465
466
atomic_dec(&chan->users);
467
nouveau_channel_put(&chan);
468
return 0;
469
}
470
471
/***********************************
472
* finally, the ioctl table
473
***********************************/
474
475
struct drm_ioctl_desc nouveau_ioctls[] = {
476
DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
477
DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
478
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
479
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
480
DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
481
DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
482
DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
483
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
484
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
485
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
486
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
487
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
488
};
489
490
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
491
492