Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/gpu/drm/nouveau/nouveau_sgdma.c
15112 views
1
#include "drmP.h"
2
#include "nouveau_drv.h"
3
#include <linux/pagemap.h>
4
#include <linux/slab.h>
5
6
#define NV_CTXDMA_PAGE_SHIFT 12
7
#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
9
10
struct nouveau_sgdma_be {
11
struct ttm_backend backend;
12
struct drm_device *dev;
13
14
dma_addr_t *pages;
15
bool *ttm_alloced;
16
unsigned nr_pages;
17
18
u64 offset;
19
bool bound;
20
};
21
22
static int
23
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
24
struct page **pages, struct page *dummy_read_page,
25
dma_addr_t *dma_addrs)
26
{
27
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
28
struct drm_device *dev = nvbe->dev;
29
30
NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
31
32
if (nvbe->pages)
33
return -EINVAL;
34
35
nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
36
if (!nvbe->pages)
37
return -ENOMEM;
38
39
nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
40
if (!nvbe->ttm_alloced)
41
return -ENOMEM;
42
43
nvbe->nr_pages = 0;
44
while (num_pages--) {
45
/* this code path isn't called and is incorrect anyways */
46
if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
47
nvbe->pages[nvbe->nr_pages] =
48
dma_addrs[nvbe->nr_pages];
49
nvbe->ttm_alloced[nvbe->nr_pages] = true;
50
} else {
51
nvbe->pages[nvbe->nr_pages] =
52
pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
53
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
54
if (pci_dma_mapping_error(dev->pdev,
55
nvbe->pages[nvbe->nr_pages])) {
56
be->func->clear(be);
57
return -EFAULT;
58
}
59
nvbe->ttm_alloced[nvbe->nr_pages] = false;
60
}
61
62
nvbe->nr_pages++;
63
}
64
65
return 0;
66
}
67
68
static void
69
nouveau_sgdma_clear(struct ttm_backend *be)
70
{
71
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
72
struct drm_device *dev;
73
74
if (nvbe && nvbe->pages) {
75
dev = nvbe->dev;
76
NV_DEBUG(dev, "\n");
77
78
if (nvbe->bound)
79
be->func->unbind(be);
80
81
while (nvbe->nr_pages--) {
82
if (!nvbe->ttm_alloced[nvbe->nr_pages])
83
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
84
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
85
}
86
kfree(nvbe->pages);
87
kfree(nvbe->ttm_alloced);
88
nvbe->pages = NULL;
89
nvbe->ttm_alloced = NULL;
90
nvbe->nr_pages = 0;
91
}
92
}
93
94
static void
95
nouveau_sgdma_destroy(struct ttm_backend *be)
96
{
97
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
98
99
if (be) {
100
NV_DEBUG(nvbe->dev, "\n");
101
102
if (nvbe) {
103
if (nvbe->pages)
104
be->func->clear(be);
105
kfree(nvbe);
106
}
107
}
108
}
109
110
static int
111
nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
112
{
113
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
114
struct drm_device *dev = nvbe->dev;
115
struct drm_nouveau_private *dev_priv = dev->dev_private;
116
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
117
unsigned i, j, pte;
118
119
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
120
121
nvbe->offset = mem->start << PAGE_SHIFT;
122
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
123
for (i = 0; i < nvbe->nr_pages; i++) {
124
dma_addr_t dma_offset = nvbe->pages[i];
125
uint32_t offset_l = lower_32_bits(dma_offset);
126
127
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
128
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
129
dma_offset += NV_CTXDMA_PAGE_SIZE;
130
}
131
}
132
133
nvbe->bound = true;
134
return 0;
135
}
136
137
static int
138
nv04_sgdma_unbind(struct ttm_backend *be)
139
{
140
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
141
struct drm_device *dev = nvbe->dev;
142
struct drm_nouveau_private *dev_priv = dev->dev_private;
143
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
144
unsigned i, j, pte;
145
146
NV_DEBUG(dev, "\n");
147
148
if (!nvbe->bound)
149
return 0;
150
151
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
152
for (i = 0; i < nvbe->nr_pages; i++) {
153
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
154
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
155
}
156
157
nvbe->bound = false;
158
return 0;
159
}
160
161
static struct ttm_backend_func nv04_sgdma_backend = {
162
.populate = nouveau_sgdma_populate,
163
.clear = nouveau_sgdma_clear,
164
.bind = nv04_sgdma_bind,
165
.unbind = nv04_sgdma_unbind,
166
.destroy = nouveau_sgdma_destroy
167
};
168
169
static void
170
nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
171
{
172
struct drm_device *dev = nvbe->dev;
173
174
nv_wr32(dev, 0x100810, 0x00000022);
175
if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
176
NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
177
nv_rd32(dev, 0x100810));
178
nv_wr32(dev, 0x100810, 0x00000000);
179
}
180
181
static int
182
nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
183
{
184
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
185
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
186
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
187
dma_addr_t *list = nvbe->pages;
188
u32 pte = mem->start << 2;
189
u32 cnt = nvbe->nr_pages;
190
191
nvbe->offset = mem->start << PAGE_SHIFT;
192
193
while (cnt--) {
194
nv_wo32(pgt, pte, (*list++ >> 7) | 1);
195
pte += 4;
196
}
197
198
nv41_sgdma_flush(nvbe);
199
nvbe->bound = true;
200
return 0;
201
}
202
203
static int
204
nv41_sgdma_unbind(struct ttm_backend *be)
205
{
206
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
207
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
208
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
209
u32 pte = (nvbe->offset >> 12) << 2;
210
u32 cnt = nvbe->nr_pages;
211
212
while (cnt--) {
213
nv_wo32(pgt, pte, 0x00000000);
214
pte += 4;
215
}
216
217
nv41_sgdma_flush(nvbe);
218
nvbe->bound = false;
219
return 0;
220
}
221
222
static struct ttm_backend_func nv41_sgdma_backend = {
223
.populate = nouveau_sgdma_populate,
224
.clear = nouveau_sgdma_clear,
225
.bind = nv41_sgdma_bind,
226
.unbind = nv41_sgdma_unbind,
227
.destroy = nouveau_sgdma_destroy
228
};
229
230
static void
231
nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
232
{
233
struct drm_device *dev = nvbe->dev;
234
235
nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
236
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
237
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
238
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
239
nv_rd32(dev, 0x100808));
240
nv_wr32(dev, 0x100808, 0x00000000);
241
}
242
243
static void
244
nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
245
{
246
struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
247
dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
248
u32 pte, tmp[4];
249
250
pte = base >> 2;
251
base &= ~0x0000000f;
252
253
tmp[0] = nv_ro32(pgt, base + 0x0);
254
tmp[1] = nv_ro32(pgt, base + 0x4);
255
tmp[2] = nv_ro32(pgt, base + 0x8);
256
tmp[3] = nv_ro32(pgt, base + 0xc);
257
while (cnt--) {
258
u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
259
switch (pte++ & 0x3) {
260
case 0:
261
tmp[0] &= ~0x07ffffff;
262
tmp[0] |= addr;
263
break;
264
case 1:
265
tmp[0] &= ~0xf8000000;
266
tmp[0] |= addr << 27;
267
tmp[1] &= ~0x003fffff;
268
tmp[1] |= addr >> 5;
269
break;
270
case 2:
271
tmp[1] &= ~0xffc00000;
272
tmp[1] |= addr << 22;
273
tmp[2] &= ~0x0001ffff;
274
tmp[2] |= addr >> 10;
275
break;
276
case 3:
277
tmp[2] &= ~0xfffe0000;
278
tmp[2] |= addr << 17;
279
tmp[3] &= ~0x00000fff;
280
tmp[3] |= addr >> 15;
281
break;
282
}
283
}
284
285
tmp[3] |= 0x40000000;
286
287
nv_wo32(pgt, base + 0x0, tmp[0]);
288
nv_wo32(pgt, base + 0x4, tmp[1]);
289
nv_wo32(pgt, base + 0x8, tmp[2]);
290
nv_wo32(pgt, base + 0xc, tmp[3]);
291
}
292
293
static int
294
nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
295
{
296
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
297
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
298
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
299
dma_addr_t *list = nvbe->pages;
300
u32 pte = mem->start << 2, tmp[4];
301
u32 cnt = nvbe->nr_pages;
302
int i;
303
304
nvbe->offset = mem->start << PAGE_SHIFT;
305
306
if (pte & 0x0000000c) {
307
u32 max = 4 - ((pte >> 2) & 0x3);
308
u32 part = (cnt > max) ? max : cnt;
309
nv44_sgdma_fill(pgt, list, pte, part);
310
pte += (part << 2);
311
list += part;
312
cnt -= part;
313
}
314
315
while (cnt >= 4) {
316
for (i = 0; i < 4; i++)
317
tmp[i] = *list++ >> 12;
318
nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
319
nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
320
nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
321
nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
322
pte += 0x10;
323
cnt -= 4;
324
}
325
326
if (cnt)
327
nv44_sgdma_fill(pgt, list, pte, cnt);
328
329
nv44_sgdma_flush(nvbe);
330
nvbe->bound = true;
331
return 0;
332
}
333
334
static int
335
nv44_sgdma_unbind(struct ttm_backend *be)
336
{
337
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
338
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
339
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
340
u32 pte = (nvbe->offset >> 12) << 2;
341
u32 cnt = nvbe->nr_pages;
342
343
if (pte & 0x0000000c) {
344
u32 max = 4 - ((pte >> 2) & 0x3);
345
u32 part = (cnt > max) ? max : cnt;
346
nv44_sgdma_fill(pgt, NULL, pte, part);
347
pte += (part << 2);
348
cnt -= part;
349
}
350
351
while (cnt >= 4) {
352
nv_wo32(pgt, pte + 0x0, 0x00000000);
353
nv_wo32(pgt, pte + 0x4, 0x00000000);
354
nv_wo32(pgt, pte + 0x8, 0x00000000);
355
nv_wo32(pgt, pte + 0xc, 0x00000000);
356
pte += 0x10;
357
cnt -= 4;
358
}
359
360
if (cnt)
361
nv44_sgdma_fill(pgt, NULL, pte, cnt);
362
363
nv44_sgdma_flush(nvbe);
364
nvbe->bound = false;
365
return 0;
366
}
367
368
static struct ttm_backend_func nv44_sgdma_backend = {
369
.populate = nouveau_sgdma_populate,
370
.clear = nouveau_sgdma_clear,
371
.bind = nv44_sgdma_bind,
372
.unbind = nv44_sgdma_unbind,
373
.destroy = nouveau_sgdma_destroy
374
};
375
376
static int
377
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
378
{
379
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
380
struct nouveau_mem *node = mem->mm_node;
381
/* noop: bound in move_notify() */
382
node->pages = nvbe->pages;
383
nvbe->pages = (dma_addr_t *)node;
384
nvbe->bound = true;
385
return 0;
386
}
387
388
static int
389
nv50_sgdma_unbind(struct ttm_backend *be)
390
{
391
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
392
struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
393
/* noop: unbound in move_notify() */
394
nvbe->pages = node->pages;
395
node->pages = NULL;
396
nvbe->bound = false;
397
return 0;
398
}
399
400
static struct ttm_backend_func nv50_sgdma_backend = {
401
.populate = nouveau_sgdma_populate,
402
.clear = nouveau_sgdma_clear,
403
.bind = nv50_sgdma_bind,
404
.unbind = nv50_sgdma_unbind,
405
.destroy = nouveau_sgdma_destroy
406
};
407
408
struct ttm_backend *
409
nouveau_sgdma_init_ttm(struct drm_device *dev)
410
{
411
struct drm_nouveau_private *dev_priv = dev->dev_private;
412
struct nouveau_sgdma_be *nvbe;
413
414
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
415
if (!nvbe)
416
return NULL;
417
418
nvbe->dev = dev;
419
420
nvbe->backend.func = dev_priv->gart_info.func;
421
return &nvbe->backend;
422
}
423
424
int
425
nouveau_sgdma_init(struct drm_device *dev)
426
{
427
struct drm_nouveau_private *dev_priv = dev->dev_private;
428
struct nouveau_gpuobj *gpuobj = NULL;
429
u32 aper_size, align;
430
int ret;
431
432
if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
433
aper_size = 512 * 1024 * 1024;
434
else
435
aper_size = 64 * 1024 * 1024;
436
437
/* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
438
* christmas. The cards before it have them, the cards after
439
* it have them, why is NV44 so unloved?
440
*/
441
dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
442
if (!dev_priv->gart_info.dummy.page)
443
return -ENOMEM;
444
445
dev_priv->gart_info.dummy.addr =
446
pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
447
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
448
if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
449
NV_ERROR(dev, "error mapping dummy page\n");
450
__free_page(dev_priv->gart_info.dummy.page);
451
dev_priv->gart_info.dummy.page = NULL;
452
return -ENOMEM;
453
}
454
455
if (dev_priv->card_type >= NV_50) {
456
dev_priv->gart_info.aper_base = 0;
457
dev_priv->gart_info.aper_size = aper_size;
458
dev_priv->gart_info.type = NOUVEAU_GART_HW;
459
dev_priv->gart_info.func = &nv50_sgdma_backend;
460
} else
461
if (0 && drm_pci_device_is_pcie(dev) &&
462
dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
463
if (nv44_graph_class(dev)) {
464
dev_priv->gart_info.func = &nv44_sgdma_backend;
465
align = 512 * 1024;
466
} else {
467
dev_priv->gart_info.func = &nv41_sgdma_backend;
468
align = 16;
469
}
470
471
ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
472
NVOBJ_FLAG_ZERO_ALLOC |
473
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
474
if (ret) {
475
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
476
return ret;
477
}
478
479
dev_priv->gart_info.sg_ctxdma = gpuobj;
480
dev_priv->gart_info.aper_base = 0;
481
dev_priv->gart_info.aper_size = aper_size;
482
dev_priv->gart_info.type = NOUVEAU_GART_HW;
483
} else {
484
ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
485
NVOBJ_FLAG_ZERO_ALLOC |
486
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
487
if (ret) {
488
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
489
return ret;
490
}
491
492
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
493
(1 << 12) /* PT present */ |
494
(0 << 13) /* PT *not* linear */ |
495
(0 << 14) /* RW */ |
496
(2 << 16) /* PCI */);
497
nv_wo32(gpuobj, 4, aper_size - 1);
498
499
dev_priv->gart_info.sg_ctxdma = gpuobj;
500
dev_priv->gart_info.aper_base = 0;
501
dev_priv->gart_info.aper_size = aper_size;
502
dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
503
dev_priv->gart_info.func = &nv04_sgdma_backend;
504
}
505
506
return 0;
507
}
508
509
void
510
nouveau_sgdma_takedown(struct drm_device *dev)
511
{
512
struct drm_nouveau_private *dev_priv = dev->dev_private;
513
514
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
515
516
if (dev_priv->gart_info.dummy.page) {
517
pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
518
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
519
__free_page(dev_priv->gart_info.dummy.page);
520
dev_priv->gart_info.dummy.page = NULL;
521
}
522
}
523
524
uint32_t
525
nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
526
{
527
struct drm_nouveau_private *dev_priv = dev->dev_private;
528
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
529
int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
530
531
BUG_ON(dev_priv->card_type >= NV_50);
532
533
return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
534
(offset & NV_CTXDMA_PAGE_MASK);
535
}
536
537