Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/gpu/drm/nouveau/nouveau_vm.c
15112 views
1
/*
2
* Copyright 2010 Red Hat Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*
22
* Authors: Ben Skeggs
23
*/
24
25
#include "drmP.h"
26
#include "nouveau_drv.h"
27
#include "nouveau_mm.h"
28
#include "nouveau_vm.h"
29
30
void
31
nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
32
{
33
struct nouveau_vm *vm = vma->vm;
34
struct nouveau_mm_node *r;
35
int big = vma->node->type != vm->spg_shift;
36
u32 offset = vma->node->offset + (delta >> 12);
37
u32 bits = vma->node->type - 12;
38
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
39
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
40
u32 max = 1 << (vm->pgt_bits - bits);
41
u32 end, len;
42
43
delta = 0;
44
list_for_each_entry(r, &node->regions, rl_entry) {
45
u64 phys = (u64)r->offset << 12;
46
u32 num = r->length >> bits;
47
48
while (num) {
49
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
50
51
end = (pte + num);
52
if (unlikely(end >= max))
53
end = max;
54
len = end - pte;
55
56
vm->map(vma, pgt, node, pte, len, phys, delta);
57
58
num -= len;
59
pte += len;
60
if (unlikely(end >= max)) {
61
phys += len << (bits + 12);
62
pde++;
63
pte = 0;
64
}
65
66
delta += (u64)len << vma->node->type;
67
}
68
}
69
70
vm->flush(vm);
71
}
72
73
void
74
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
75
{
76
nouveau_vm_map_at(vma, 0, node);
77
}
78
79
void
80
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
81
struct nouveau_mem *mem, dma_addr_t *list)
82
{
83
struct nouveau_vm *vm = vma->vm;
84
int big = vma->node->type != vm->spg_shift;
85
u32 offset = vma->node->offset + (delta >> 12);
86
u32 bits = vma->node->type - 12;
87
u32 num = length >> vma->node->type;
88
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
89
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
90
u32 max = 1 << (vm->pgt_bits - bits);
91
u32 end, len;
92
93
while (num) {
94
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
95
96
end = (pte + num);
97
if (unlikely(end >= max))
98
end = max;
99
len = end - pte;
100
101
vm->map_sg(vma, pgt, mem, pte, len, list);
102
103
num -= len;
104
pte += len;
105
list += len;
106
if (unlikely(end >= max)) {
107
pde++;
108
pte = 0;
109
}
110
}
111
112
vm->flush(vm);
113
}
114
115
void
116
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
117
{
118
struct nouveau_vm *vm = vma->vm;
119
int big = vma->node->type != vm->spg_shift;
120
u32 offset = vma->node->offset + (delta >> 12);
121
u32 bits = vma->node->type - 12;
122
u32 num = length >> vma->node->type;
123
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
124
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
125
u32 max = 1 << (vm->pgt_bits - bits);
126
u32 end, len;
127
128
while (num) {
129
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
130
131
end = (pte + num);
132
if (unlikely(end >= max))
133
end = max;
134
len = end - pte;
135
136
vm->unmap(pgt, pte, len);
137
138
num -= len;
139
pte += len;
140
if (unlikely(end >= max)) {
141
pde++;
142
pte = 0;
143
}
144
}
145
146
vm->flush(vm);
147
}
148
149
void
150
nouveau_vm_unmap(struct nouveau_vma *vma)
151
{
152
nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
153
}
154
155
static void
156
nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
157
{
158
struct nouveau_vm_pgd *vpgd;
159
struct nouveau_vm_pgt *vpgt;
160
struct nouveau_gpuobj *pgt;
161
u32 pde;
162
163
for (pde = fpde; pde <= lpde; pde++) {
164
vpgt = &vm->pgt[pde - vm->fpde];
165
if (--vpgt->refcount[big])
166
continue;
167
168
pgt = vpgt->obj[big];
169
vpgt->obj[big] = NULL;
170
171
list_for_each_entry(vpgd, &vm->pgd_list, head) {
172
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
173
}
174
175
mutex_unlock(&vm->mm->mutex);
176
nouveau_gpuobj_ref(NULL, &pgt);
177
mutex_lock(&vm->mm->mutex);
178
}
179
}
180
181
static int
182
nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
183
{
184
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
185
struct nouveau_vm_pgd *vpgd;
186
struct nouveau_gpuobj *pgt;
187
int big = (type != vm->spg_shift);
188
u32 pgt_size;
189
int ret;
190
191
pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
192
pgt_size *= 8;
193
194
mutex_unlock(&vm->mm->mutex);
195
ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
196
NVOBJ_FLAG_ZERO_ALLOC, &pgt);
197
mutex_lock(&vm->mm->mutex);
198
if (unlikely(ret))
199
return ret;
200
201
/* someone beat us to filling the PDE while we didn't have the lock */
202
if (unlikely(vpgt->refcount[big]++)) {
203
mutex_unlock(&vm->mm->mutex);
204
nouveau_gpuobj_ref(NULL, &pgt);
205
mutex_lock(&vm->mm->mutex);
206
return 0;
207
}
208
209
vpgt->obj[big] = pgt;
210
list_for_each_entry(vpgd, &vm->pgd_list, head) {
211
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
212
}
213
214
return 0;
215
}
216
217
int
218
nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
219
u32 access, struct nouveau_vma *vma)
220
{
221
u32 align = (1 << page_shift) >> 12;
222
u32 msize = size >> 12;
223
u32 fpde, lpde, pde;
224
int ret;
225
226
mutex_lock(&vm->mm->mutex);
227
ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
228
if (unlikely(ret != 0)) {
229
mutex_unlock(&vm->mm->mutex);
230
return ret;
231
}
232
233
fpde = (vma->node->offset >> vm->pgt_bits);
234
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
235
for (pde = fpde; pde <= lpde; pde++) {
236
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
237
int big = (vma->node->type != vm->spg_shift);
238
239
if (likely(vpgt->refcount[big])) {
240
vpgt->refcount[big]++;
241
continue;
242
}
243
244
ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
245
if (ret) {
246
if (pde != fpde)
247
nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
248
nouveau_mm_put(vm->mm, vma->node);
249
mutex_unlock(&vm->mm->mutex);
250
vma->node = NULL;
251
return ret;
252
}
253
}
254
mutex_unlock(&vm->mm->mutex);
255
256
vma->vm = vm;
257
vma->offset = (u64)vma->node->offset << 12;
258
vma->access = access;
259
return 0;
260
}
261
262
void
263
nouveau_vm_put(struct nouveau_vma *vma)
264
{
265
struct nouveau_vm *vm = vma->vm;
266
u32 fpde, lpde;
267
268
if (unlikely(vma->node == NULL))
269
return;
270
fpde = (vma->node->offset >> vm->pgt_bits);
271
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
272
273
mutex_lock(&vm->mm->mutex);
274
nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
275
nouveau_mm_put(vm->mm, vma->node);
276
vma->node = NULL;
277
mutex_unlock(&vm->mm->mutex);
278
}
279
280
int
281
nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
282
struct nouveau_vm **pvm)
283
{
284
struct drm_nouveau_private *dev_priv = dev->dev_private;
285
struct nouveau_vm *vm;
286
u64 mm_length = (offset + length) - mm_offset;
287
u32 block, pgt_bits;
288
int ret;
289
290
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
291
if (!vm)
292
return -ENOMEM;
293
294
if (dev_priv->card_type == NV_50) {
295
vm->map_pgt = nv50_vm_map_pgt;
296
vm->map = nv50_vm_map;
297
vm->map_sg = nv50_vm_map_sg;
298
vm->unmap = nv50_vm_unmap;
299
vm->flush = nv50_vm_flush;
300
vm->spg_shift = 12;
301
vm->lpg_shift = 16;
302
303
pgt_bits = 29;
304
block = (1 << pgt_bits);
305
if (length < block)
306
block = length;
307
308
} else
309
if (dev_priv->card_type == NV_C0) {
310
vm->map_pgt = nvc0_vm_map_pgt;
311
vm->map = nvc0_vm_map;
312
vm->map_sg = nvc0_vm_map_sg;
313
vm->unmap = nvc0_vm_unmap;
314
vm->flush = nvc0_vm_flush;
315
vm->spg_shift = 12;
316
vm->lpg_shift = 17;
317
pgt_bits = 27;
318
block = 4096;
319
} else {
320
kfree(vm);
321
return -ENOSYS;
322
}
323
324
vm->fpde = offset >> pgt_bits;
325
vm->lpde = (offset + length - 1) >> pgt_bits;
326
vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
327
if (!vm->pgt) {
328
kfree(vm);
329
return -ENOMEM;
330
}
331
332
INIT_LIST_HEAD(&vm->pgd_list);
333
vm->dev = dev;
334
vm->refcount = 1;
335
vm->pgt_bits = pgt_bits - 12;
336
337
ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
338
block >> 12);
339
if (ret) {
340
kfree(vm);
341
return ret;
342
}
343
344
*pvm = vm;
345
return 0;
346
}
347
348
static int
349
nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
350
{
351
struct nouveau_vm_pgd *vpgd;
352
int i;
353
354
if (!pgd)
355
return 0;
356
357
vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
358
if (!vpgd)
359
return -ENOMEM;
360
361
nouveau_gpuobj_ref(pgd, &vpgd->obj);
362
363
mutex_lock(&vm->mm->mutex);
364
for (i = vm->fpde; i <= vm->lpde; i++)
365
vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
366
list_add(&vpgd->head, &vm->pgd_list);
367
mutex_unlock(&vm->mm->mutex);
368
return 0;
369
}
370
371
static void
372
nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
373
{
374
struct nouveau_vm_pgd *vpgd, *tmp;
375
376
if (!pgd)
377
return;
378
379
mutex_lock(&vm->mm->mutex);
380
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
381
if (vpgd->obj != pgd)
382
continue;
383
384
list_del(&vpgd->head);
385
nouveau_gpuobj_ref(NULL, &vpgd->obj);
386
kfree(vpgd);
387
}
388
mutex_unlock(&vm->mm->mutex);
389
}
390
391
static void
392
nouveau_vm_del(struct nouveau_vm *vm)
393
{
394
struct nouveau_vm_pgd *vpgd, *tmp;
395
396
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
397
nouveau_vm_unlink(vm, vpgd->obj);
398
}
399
WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
400
401
kfree(vm->pgt);
402
kfree(vm);
403
}
404
405
int
406
nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
407
struct nouveau_gpuobj *pgd)
408
{
409
struct nouveau_vm *vm;
410
int ret;
411
412
vm = ref;
413
if (vm) {
414
ret = nouveau_vm_link(vm, pgd);
415
if (ret)
416
return ret;
417
418
vm->refcount++;
419
}
420
421
vm = *ptr;
422
*ptr = ref;
423
424
if (vm) {
425
nouveau_vm_unlink(vm, pgd);
426
427
if (--vm->refcount == 0)
428
nouveau_vm_del(vm);
429
}
430
431
return 0;
432
}
433
434