Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/gpu/drm/nouveau/nouveau_fence.c
15112 views
1
/*
2
* Copyright (C) 2007 Ben Skeggs.
3
* All Rights Reserved.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining
6
* a copy of this software and associated documentation files (the
7
* "Software"), to deal in the Software without restriction, including
8
* without limitation the rights to use, copy, modify, merge, publish,
9
* distribute, sublicense, and/or sell copies of the Software, and to
10
* permit persons to whom the Software is furnished to do so, subject to
11
* the following conditions:
12
*
13
* The above copyright notice and this permission notice (including the
14
* next paragraph) shall be included in all copies or substantial
15
* portions of the Software.
16
*
17
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24
*
25
*/
26
27
#include "drmP.h"
28
#include "drm.h"
29
30
#include <linux/ktime.h>
31
#include <linux/hrtimer.h>
32
33
#include "nouveau_drv.h"
34
#include "nouveau_ramht.h"
35
#include "nouveau_dma.h"
36
37
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
38
#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
39
40
struct nouveau_fence {
41
struct nouveau_channel *channel;
42
struct kref refcount;
43
struct list_head entry;
44
45
uint32_t sequence;
46
bool signalled;
47
48
void (*work)(void *priv, bool signalled);
49
void *priv;
50
};
51
52
struct nouveau_semaphore {
53
struct kref ref;
54
struct drm_device *dev;
55
struct drm_mm_node *mem;
56
};
57
58
static inline struct nouveau_fence *
59
nouveau_fence(void *sync_obj)
60
{
61
return (struct nouveau_fence *)sync_obj;
62
}
63
64
static void
65
nouveau_fence_del(struct kref *ref)
66
{
67
struct nouveau_fence *fence =
68
container_of(ref, struct nouveau_fence, refcount);
69
70
nouveau_channel_ref(NULL, &fence->channel);
71
kfree(fence);
72
}
73
74
void
75
nouveau_fence_update(struct nouveau_channel *chan)
76
{
77
struct drm_device *dev = chan->dev;
78
struct nouveau_fence *tmp, *fence;
79
uint32_t sequence;
80
81
spin_lock(&chan->fence.lock);
82
83
/* Fetch the last sequence if the channel is still up and running */
84
if (likely(!list_empty(&chan->fence.pending))) {
85
if (USE_REFCNT(dev))
86
sequence = nvchan_rd32(chan, 0x48);
87
else
88
sequence = atomic_read(&chan->fence.last_sequence_irq);
89
90
if (chan->fence.sequence_ack == sequence)
91
goto out;
92
chan->fence.sequence_ack = sequence;
93
}
94
95
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
96
sequence = fence->sequence;
97
fence->signalled = true;
98
list_del(&fence->entry);
99
100
if (unlikely(fence->work))
101
fence->work(fence->priv, true);
102
103
kref_put(&fence->refcount, nouveau_fence_del);
104
105
if (sequence == chan->fence.sequence_ack)
106
break;
107
}
108
out:
109
spin_unlock(&chan->fence.lock);
110
}
111
112
int
113
nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
114
bool emit)
115
{
116
struct nouveau_fence *fence;
117
int ret = 0;
118
119
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
120
if (!fence)
121
return -ENOMEM;
122
kref_init(&fence->refcount);
123
nouveau_channel_ref(chan, &fence->channel);
124
125
if (emit)
126
ret = nouveau_fence_emit(fence);
127
128
if (ret)
129
nouveau_fence_unref(&fence);
130
*pfence = fence;
131
return ret;
132
}
133
134
struct nouveau_channel *
135
nouveau_fence_channel(struct nouveau_fence *fence)
136
{
137
return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
138
}
139
140
int
141
nouveau_fence_emit(struct nouveau_fence *fence)
142
{
143
struct nouveau_channel *chan = fence->channel;
144
struct drm_device *dev = chan->dev;
145
struct drm_nouveau_private *dev_priv = dev->dev_private;
146
int ret;
147
148
ret = RING_SPACE(chan, 2);
149
if (ret)
150
return ret;
151
152
if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
153
nouveau_fence_update(chan);
154
155
BUG_ON(chan->fence.sequence ==
156
chan->fence.sequence_ack - 1);
157
}
158
159
fence->sequence = ++chan->fence.sequence;
160
161
kref_get(&fence->refcount);
162
spin_lock(&chan->fence.lock);
163
list_add_tail(&fence->entry, &chan->fence.pending);
164
spin_unlock(&chan->fence.lock);
165
166
if (USE_REFCNT(dev)) {
167
if (dev_priv->card_type < NV_C0)
168
BEGIN_RING(chan, NvSubSw, 0x0050, 1);
169
else
170
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
171
} else {
172
BEGIN_RING(chan, NvSubSw, 0x0150, 1);
173
}
174
OUT_RING (chan, fence->sequence);
175
FIRE_RING(chan);
176
177
return 0;
178
}
179
180
void
181
nouveau_fence_work(struct nouveau_fence *fence,
182
void (*work)(void *priv, bool signalled),
183
void *priv)
184
{
185
BUG_ON(fence->work);
186
187
spin_lock(&fence->channel->fence.lock);
188
189
if (fence->signalled) {
190
work(priv, true);
191
} else {
192
fence->work = work;
193
fence->priv = priv;
194
}
195
196
spin_unlock(&fence->channel->fence.lock);
197
}
198
199
void
200
__nouveau_fence_unref(void **sync_obj)
201
{
202
struct nouveau_fence *fence = nouveau_fence(*sync_obj);
203
204
if (fence)
205
kref_put(&fence->refcount, nouveau_fence_del);
206
*sync_obj = NULL;
207
}
208
209
void *
210
__nouveau_fence_ref(void *sync_obj)
211
{
212
struct nouveau_fence *fence = nouveau_fence(sync_obj);
213
214
kref_get(&fence->refcount);
215
return sync_obj;
216
}
217
218
bool
219
__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
220
{
221
struct nouveau_fence *fence = nouveau_fence(sync_obj);
222
struct nouveau_channel *chan = fence->channel;
223
224
if (fence->signalled)
225
return true;
226
227
nouveau_fence_update(chan);
228
return fence->signalled;
229
}
230
231
int
232
__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
233
{
234
unsigned long timeout = jiffies + (3 * DRM_HZ);
235
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
236
ktime_t t;
237
int ret = 0;
238
239
while (1) {
240
if (__nouveau_fence_signalled(sync_obj, sync_arg))
241
break;
242
243
if (time_after_eq(jiffies, timeout)) {
244
ret = -EBUSY;
245
break;
246
}
247
248
__set_current_state(intr ? TASK_INTERRUPTIBLE
249
: TASK_UNINTERRUPTIBLE);
250
if (lazy) {
251
t = ktime_set(0, sleep_time);
252
schedule_hrtimeout(&t, HRTIMER_MODE_REL);
253
sleep_time *= 2;
254
if (sleep_time > NSEC_PER_MSEC)
255
sleep_time = NSEC_PER_MSEC;
256
}
257
258
if (intr && signal_pending(current)) {
259
ret = -ERESTARTSYS;
260
break;
261
}
262
}
263
264
__set_current_state(TASK_RUNNING);
265
266
return ret;
267
}
268
269
static struct nouveau_semaphore *
270
semaphore_alloc(struct drm_device *dev)
271
{
272
struct drm_nouveau_private *dev_priv = dev->dev_private;
273
struct nouveau_semaphore *sema;
274
int size = (dev_priv->chipset < 0x84) ? 4 : 16;
275
int ret, i;
276
277
if (!USE_SEMA(dev))
278
return NULL;
279
280
sema = kmalloc(sizeof(*sema), GFP_KERNEL);
281
if (!sema)
282
goto fail;
283
284
ret = drm_mm_pre_get(&dev_priv->fence.heap);
285
if (ret)
286
goto fail;
287
288
spin_lock(&dev_priv->fence.lock);
289
sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
290
if (sema->mem)
291
sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
292
spin_unlock(&dev_priv->fence.lock);
293
294
if (!sema->mem)
295
goto fail;
296
297
kref_init(&sema->ref);
298
sema->dev = dev;
299
for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
300
nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
301
302
return sema;
303
fail:
304
kfree(sema);
305
return NULL;
306
}
307
308
static void
309
semaphore_free(struct kref *ref)
310
{
311
struct nouveau_semaphore *sema =
312
container_of(ref, struct nouveau_semaphore, ref);
313
struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
314
315
spin_lock(&dev_priv->fence.lock);
316
drm_mm_put_block(sema->mem);
317
spin_unlock(&dev_priv->fence.lock);
318
319
kfree(sema);
320
}
321
322
static void
323
semaphore_work(void *priv, bool signalled)
324
{
325
struct nouveau_semaphore *sema = priv;
326
struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
327
328
if (unlikely(!signalled))
329
nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
330
331
kref_put(&sema->ref, semaphore_free);
332
}
333
334
static int
335
semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
336
{
337
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
338
struct nouveau_fence *fence = NULL;
339
int ret;
340
341
if (dev_priv->chipset < 0x84) {
342
ret = RING_SPACE(chan, 4);
343
if (ret)
344
return ret;
345
346
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
347
OUT_RING (chan, NvSema);
348
OUT_RING (chan, sema->mem->start);
349
OUT_RING (chan, 1);
350
} else
351
if (dev_priv->chipset < 0xc0) {
352
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
353
u64 offset = vma->offset + sema->mem->start;
354
355
ret = RING_SPACE(chan, 7);
356
if (ret)
357
return ret;
358
359
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
360
OUT_RING (chan, chan->vram_handle);
361
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
362
OUT_RING (chan, upper_32_bits(offset));
363
OUT_RING (chan, lower_32_bits(offset));
364
OUT_RING (chan, 1);
365
OUT_RING (chan, 1); /* ACQUIRE_EQ */
366
} else {
367
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
368
u64 offset = vma->offset + sema->mem->start;
369
370
ret = RING_SPACE(chan, 5);
371
if (ret)
372
return ret;
373
374
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
375
OUT_RING (chan, upper_32_bits(offset));
376
OUT_RING (chan, lower_32_bits(offset));
377
OUT_RING (chan, 1);
378
OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
379
}
380
381
/* Delay semaphore destruction until its work is done */
382
ret = nouveau_fence_new(chan, &fence, true);
383
if (ret)
384
return ret;
385
386
kref_get(&sema->ref);
387
nouveau_fence_work(fence, semaphore_work, sema);
388
nouveau_fence_unref(&fence);
389
return 0;
390
}
391
392
static int
393
semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
394
{
395
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
396
struct nouveau_fence *fence = NULL;
397
int ret;
398
399
if (dev_priv->chipset < 0x84) {
400
ret = RING_SPACE(chan, 5);
401
if (ret)
402
return ret;
403
404
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
405
OUT_RING (chan, NvSema);
406
OUT_RING (chan, sema->mem->start);
407
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
408
OUT_RING (chan, 1);
409
} else
410
if (dev_priv->chipset < 0xc0) {
411
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
412
u64 offset = vma->offset + sema->mem->start;
413
414
ret = RING_SPACE(chan, 7);
415
if (ret)
416
return ret;
417
418
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
419
OUT_RING (chan, chan->vram_handle);
420
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
421
OUT_RING (chan, upper_32_bits(offset));
422
OUT_RING (chan, lower_32_bits(offset));
423
OUT_RING (chan, 1);
424
OUT_RING (chan, 2); /* RELEASE */
425
} else {
426
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
427
u64 offset = vma->offset + sema->mem->start;
428
429
ret = RING_SPACE(chan, 5);
430
if (ret)
431
return ret;
432
433
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
434
OUT_RING (chan, upper_32_bits(offset));
435
OUT_RING (chan, lower_32_bits(offset));
436
OUT_RING (chan, 1);
437
OUT_RING (chan, 0x1002); /* RELEASE */
438
}
439
440
/* Delay semaphore destruction until its work is done */
441
ret = nouveau_fence_new(chan, &fence, true);
442
if (ret)
443
return ret;
444
445
kref_get(&sema->ref);
446
nouveau_fence_work(fence, semaphore_work, sema);
447
nouveau_fence_unref(&fence);
448
return 0;
449
}
450
451
int
452
nouveau_fence_sync(struct nouveau_fence *fence,
453
struct nouveau_channel *wchan)
454
{
455
struct nouveau_channel *chan = nouveau_fence_channel(fence);
456
struct drm_device *dev = wchan->dev;
457
struct nouveau_semaphore *sema;
458
int ret = 0;
459
460
if (likely(!chan || chan == wchan ||
461
nouveau_fence_signalled(fence)))
462
goto out;
463
464
sema = semaphore_alloc(dev);
465
if (!sema) {
466
/* Early card or broken userspace, fall back to
467
* software sync. */
468
ret = nouveau_fence_wait(fence, true, false);
469
goto out;
470
}
471
472
/* try to take chan's mutex, if we can't take it right away
473
* we have to fallback to software sync to prevent locking
474
* order issues
475
*/
476
if (!mutex_trylock(&chan->mutex)) {
477
ret = nouveau_fence_wait(fence, true, false);
478
goto out_unref;
479
}
480
481
/* Make wchan wait until it gets signalled */
482
ret = semaphore_acquire(wchan, sema);
483
if (ret)
484
goto out_unlock;
485
486
/* Signal the semaphore from chan */
487
ret = semaphore_release(chan, sema);
488
489
out_unlock:
490
mutex_unlock(&chan->mutex);
491
out_unref:
492
kref_put(&sema->ref, semaphore_free);
493
out:
494
if (chan)
495
nouveau_channel_put_unlocked(&chan);
496
return ret;
497
}
498
499
int
500
__nouveau_fence_flush(void *sync_obj, void *sync_arg)
501
{
502
return 0;
503
}
504
505
int
506
nouveau_fence_channel_init(struct nouveau_channel *chan)
507
{
508
struct drm_device *dev = chan->dev;
509
struct drm_nouveau_private *dev_priv = dev->dev_private;
510
struct nouveau_gpuobj *obj = NULL;
511
int ret;
512
513
if (dev_priv->card_type < NV_C0) {
514
/* Create an NV_SW object for various sync purposes */
515
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
516
if (ret)
517
return ret;
518
519
ret = RING_SPACE(chan, 2);
520
if (ret)
521
return ret;
522
523
BEGIN_RING(chan, NvSubSw, 0, 1);
524
OUT_RING (chan, NvSw);
525
FIRE_RING (chan);
526
}
527
528
/* Setup area of memory shared between all channels for x-chan sync */
529
if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
530
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
531
532
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
533
mem->start << PAGE_SHIFT,
534
mem->size, NV_MEM_ACCESS_RW,
535
NV_MEM_TARGET_VRAM, &obj);
536
if (ret)
537
return ret;
538
539
ret = nouveau_ramht_insert(chan, NvSema, obj);
540
nouveau_gpuobj_ref(NULL, &obj);
541
if (ret)
542
return ret;
543
}
544
545
INIT_LIST_HEAD(&chan->fence.pending);
546
spin_lock_init(&chan->fence.lock);
547
atomic_set(&chan->fence.last_sequence_irq, 0);
548
return 0;
549
}
550
551
void
552
nouveau_fence_channel_fini(struct nouveau_channel *chan)
553
{
554
struct nouveau_fence *tmp, *fence;
555
556
spin_lock(&chan->fence.lock);
557
558
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
559
fence->signalled = true;
560
list_del(&fence->entry);
561
562
if (unlikely(fence->work))
563
fence->work(fence->priv, false);
564
565
kref_put(&fence->refcount, nouveau_fence_del);
566
}
567
568
spin_unlock(&chan->fence.lock);
569
}
570
571
int
572
nouveau_fence_init(struct drm_device *dev)
573
{
574
struct drm_nouveau_private *dev_priv = dev->dev_private;
575
int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
576
int ret;
577
578
/* Create a shared VRAM heap for cross-channel sync. */
579
if (USE_SEMA(dev)) {
580
ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
581
0, 0, &dev_priv->fence.bo);
582
if (ret)
583
return ret;
584
585
ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
586
if (ret)
587
goto fail;
588
589
ret = nouveau_bo_map(dev_priv->fence.bo);
590
if (ret)
591
goto fail;
592
593
ret = drm_mm_init(&dev_priv->fence.heap, 0,
594
dev_priv->fence.bo->bo.mem.size);
595
if (ret)
596
goto fail;
597
598
spin_lock_init(&dev_priv->fence.lock);
599
}
600
601
return 0;
602
fail:
603
nouveau_bo_unmap(dev_priv->fence.bo);
604
nouveau_bo_ref(NULL, &dev_priv->fence.bo);
605
return ret;
606
}
607
608
void
609
nouveau_fence_fini(struct drm_device *dev)
610
{
611
struct drm_nouveau_private *dev_priv = dev->dev_private;
612
613
if (USE_SEMA(dev)) {
614
drm_mm_takedown(&dev_priv->fence.heap);
615
nouveau_bo_unmap(dev_priv->fence.bo);
616
nouveau_bo_unpin(dev_priv->fence.bo);
617
nouveau_bo_ref(NULL, &dev_priv->fence.bo);
618
}
619
}
620
621