Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/gpu/drm/drm_bufs.c
15111 views
1
/**
2
* \file drm_bufs.c
3
* Generic buffer template
4
*
5
* \author Rickard E. (Rik) Faith <[email protected]>
6
* \author Gareth Hughes <[email protected]>
7
*/
8
9
/*
10
* Created: Thu Nov 23 03:10:50 2000 by [email protected]
11
*
12
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14
* All Rights Reserved.
15
*
16
* Permission is hereby granted, free of charge, to any person obtaining a
17
* copy of this software and associated documentation files (the "Software"),
18
* to deal in the Software without restriction, including without limitation
19
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
20
* and/or sell copies of the Software, and to permit persons to whom the
21
* Software is furnished to do so, subject to the following conditions:
22
*
23
* The above copyright notice and this permission notice (including the next
24
* paragraph) shall be included in all copies or substantial portions of the
25
* Software.
26
*
27
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33
* OTHER DEALINGS IN THE SOFTWARE.
34
*/
35
36
#include <linux/vmalloc.h>
37
#include <linux/slab.h>
38
#include <linux/log2.h>
39
#include <asm/shmparam.h>
40
#include "drmP.h"
41
42
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
43
struct drm_local_map *map)
44
{
45
struct drm_map_list *entry;
46
list_for_each_entry(entry, &dev->maplist, head) {
47
/*
48
* Because the kernel-userspace ABI is fixed at a 32-bit offset
49
* while PCI resources may live above that, we only compare the
50
* lower 32 bits of the map offset for maps of type
51
* _DRM_FRAMEBUFFER or _DRM_REGISTERS.
52
* It is assumed that if a driver have more than one resource
53
* of each type, the lower 32 bits are different.
54
*/
55
if (!entry->map ||
56
map->type != entry->map->type ||
57
entry->master != dev->primary->master)
58
continue;
59
switch (map->type) {
60
case _DRM_SHM:
61
if (map->flags != _DRM_CONTAINS_LOCK)
62
break;
63
return entry;
64
case _DRM_REGISTERS:
65
case _DRM_FRAME_BUFFER:
66
if ((entry->map->offset & 0xffffffff) ==
67
(map->offset & 0xffffffff))
68
return entry;
69
default: /* Make gcc happy */
70
;
71
}
72
if (entry->map->offset == map->offset)
73
return entry;
74
}
75
76
return NULL;
77
}
78
79
static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
80
unsigned long user_token, int hashed_handle, int shm)
81
{
82
int use_hashed_handle, shift;
83
unsigned long add;
84
85
#if (BITS_PER_LONG == 64)
86
use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
87
#elif (BITS_PER_LONG == 32)
88
use_hashed_handle = hashed_handle;
89
#else
90
#error Unsupported long size. Neither 64 nor 32 bits.
91
#endif
92
93
if (!use_hashed_handle) {
94
int ret;
95
hash->key = user_token >> PAGE_SHIFT;
96
ret = drm_ht_insert_item(&dev->map_hash, hash);
97
if (ret != -EINVAL)
98
return ret;
99
}
100
101
shift = 0;
102
add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
103
if (shm && (SHMLBA > PAGE_SIZE)) {
104
int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
105
106
/* For shared memory, we have to preserve the SHMLBA
107
* bits of the eventual vma->vm_pgoff value during
108
* mmap(). Otherwise we run into cache aliasing problems
109
* on some platforms. On these platforms, the pgoff of
110
* a mmap() request is used to pick a suitable virtual
111
* address for the mmap() region such that it will not
112
* cause cache aliasing problems.
113
*
114
* Therefore, make sure the SHMLBA relevant bits of the
115
* hash value we use are equal to those in the original
116
* kernel virtual address.
117
*/
118
shift = bits;
119
add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
120
}
121
122
return drm_ht_just_insert_please(&dev->map_hash, hash,
123
user_token, 32 - PAGE_SHIFT - 3,
124
shift, add);
125
}
126
127
/**
128
* Core function to create a range of memory available for mapping by a
129
* non-root process.
130
*
131
* Adjusts the memory offset to its absolute value according to the mapping
132
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
133
* applicable and if supported by the kernel.
134
*/
135
static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
136
unsigned int size, enum drm_map_type type,
137
enum drm_map_flags flags,
138
struct drm_map_list ** maplist)
139
{
140
struct drm_local_map *map;
141
struct drm_map_list *list;
142
drm_dma_handle_t *dmah;
143
unsigned long user_token;
144
int ret;
145
146
map = kmalloc(sizeof(*map), GFP_KERNEL);
147
if (!map)
148
return -ENOMEM;
149
150
map->offset = offset;
151
map->size = size;
152
map->flags = flags;
153
map->type = type;
154
155
/* Only allow shared memory to be removable since we only keep enough
156
* book keeping information about shared memory to allow for removal
157
* when processes fork.
158
*/
159
if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
160
kfree(map);
161
return -EINVAL;
162
}
163
DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
164
(unsigned long long)map->offset, map->size, map->type);
165
166
/* page-align _DRM_SHM maps. They are allocated here so there is no security
167
* hole created by that and it works around various broken drivers that use
168
* a non-aligned quantity to map the SAREA. --BenH
169
*/
170
if (map->type == _DRM_SHM)
171
map->size = PAGE_ALIGN(map->size);
172
173
if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
174
kfree(map);
175
return -EINVAL;
176
}
177
map->mtrr = -1;
178
map->handle = NULL;
179
180
switch (map->type) {
181
case _DRM_REGISTERS:
182
case _DRM_FRAME_BUFFER:
183
#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
184
if (map->offset + (map->size-1) < map->offset ||
185
map->offset < virt_to_phys(high_memory)) {
186
kfree(map);
187
return -EINVAL;
188
}
189
#endif
190
/* Some drivers preinitialize some maps, without the X Server
191
* needing to be aware of it. Therefore, we just return success
192
* when the server tries to create a duplicate map.
193
*/
194
list = drm_find_matching_map(dev, map);
195
if (list != NULL) {
196
if (list->map->size != map->size) {
197
DRM_DEBUG("Matching maps of type %d with "
198
"mismatched sizes, (%ld vs %ld)\n",
199
map->type, map->size,
200
list->map->size);
201
list->map->size = map->size;
202
}
203
204
kfree(map);
205
*maplist = list;
206
return 0;
207
}
208
209
if (drm_core_has_MTRR(dev)) {
210
if (map->type == _DRM_FRAME_BUFFER ||
211
(map->flags & _DRM_WRITE_COMBINING)) {
212
map->mtrr = mtrr_add(map->offset, map->size,
213
MTRR_TYPE_WRCOMB, 1);
214
}
215
}
216
if (map->type == _DRM_REGISTERS) {
217
map->handle = ioremap(map->offset, map->size);
218
if (!map->handle) {
219
kfree(map);
220
return -ENOMEM;
221
}
222
}
223
224
break;
225
case _DRM_SHM:
226
list = drm_find_matching_map(dev, map);
227
if (list != NULL) {
228
if(list->map->size != map->size) {
229
DRM_DEBUG("Matching maps of type %d with "
230
"mismatched sizes, (%ld vs %ld)\n",
231
map->type, map->size, list->map->size);
232
list->map->size = map->size;
233
}
234
235
kfree(map);
236
*maplist = list;
237
return 0;
238
}
239
map->handle = vmalloc_user(map->size);
240
DRM_DEBUG("%lu %d %p\n",
241
map->size, drm_order(map->size), map->handle);
242
if (!map->handle) {
243
kfree(map);
244
return -ENOMEM;
245
}
246
map->offset = (unsigned long)map->handle;
247
if (map->flags & _DRM_CONTAINS_LOCK) {
248
/* Prevent a 2nd X Server from creating a 2nd lock */
249
if (dev->primary->master->lock.hw_lock != NULL) {
250
vfree(map->handle);
251
kfree(map);
252
return -EBUSY;
253
}
254
dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
255
}
256
break;
257
case _DRM_AGP: {
258
struct drm_agp_mem *entry;
259
int valid = 0;
260
261
if (!drm_core_has_AGP(dev)) {
262
kfree(map);
263
return -EINVAL;
264
}
265
#ifdef __alpha__
266
map->offset += dev->hose->mem_space->start;
267
#endif
268
/* In some cases (i810 driver), user space may have already
269
* added the AGP base itself, because dev->agp->base previously
270
* only got set during AGP enable. So, only add the base
271
* address if the map's offset isn't already within the
272
* aperture.
273
*/
274
if (map->offset < dev->agp->base ||
275
map->offset > dev->agp->base +
276
dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
277
map->offset += dev->agp->base;
278
}
279
map->mtrr = dev->agp->agp_mtrr; /* for getmap */
280
281
/* This assumes the DRM is in total control of AGP space.
282
* It's not always the case as AGP can be in the control
283
* of user space (i.e. i810 driver). So this loop will get
284
* skipped and we double check that dev->agp->memory is
285
* actually set as well as being invalid before EPERM'ing
286
*/
287
list_for_each_entry(entry, &dev->agp->memory, head) {
288
if ((map->offset >= entry->bound) &&
289
(map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
290
valid = 1;
291
break;
292
}
293
}
294
if (!list_empty(&dev->agp->memory) && !valid) {
295
kfree(map);
296
return -EPERM;
297
}
298
DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
299
(unsigned long long)map->offset, map->size);
300
301
break;
302
}
303
case _DRM_GEM:
304
DRM_ERROR("tried to addmap GEM object\n");
305
break;
306
case _DRM_SCATTER_GATHER:
307
if (!dev->sg) {
308
kfree(map);
309
return -EINVAL;
310
}
311
map->offset += (unsigned long)dev->sg->virtual;
312
break;
313
case _DRM_CONSISTENT:
314
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
315
* As we're limiting the address to 2^32-1 (or less),
316
* casting it down to 32 bits is no problem, but we
317
* need to point to a 64bit variable first. */
318
dmah = drm_pci_alloc(dev, map->size, map->size);
319
if (!dmah) {
320
kfree(map);
321
return -ENOMEM;
322
}
323
map->handle = dmah->vaddr;
324
map->offset = (unsigned long)dmah->busaddr;
325
kfree(dmah);
326
break;
327
default:
328
kfree(map);
329
return -EINVAL;
330
}
331
332
list = kzalloc(sizeof(*list), GFP_KERNEL);
333
if (!list) {
334
if (map->type == _DRM_REGISTERS)
335
iounmap(map->handle);
336
kfree(map);
337
return -EINVAL;
338
}
339
list->map = map;
340
341
mutex_lock(&dev->struct_mutex);
342
list_add(&list->head, &dev->maplist);
343
344
/* Assign a 32-bit handle */
345
/* We do it here so that dev->struct_mutex protects the increment */
346
user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
347
map->offset;
348
ret = drm_map_handle(dev, &list->hash, user_token, 0,
349
(map->type == _DRM_SHM));
350
if (ret) {
351
if (map->type == _DRM_REGISTERS)
352
iounmap(map->handle);
353
kfree(map);
354
kfree(list);
355
mutex_unlock(&dev->struct_mutex);
356
return ret;
357
}
358
359
list->user_token = list->hash.key << PAGE_SHIFT;
360
mutex_unlock(&dev->struct_mutex);
361
362
if (!(map->flags & _DRM_DRIVER))
363
list->master = dev->primary->master;
364
*maplist = list;
365
return 0;
366
}
367
368
int drm_addmap(struct drm_device * dev, resource_size_t offset,
369
unsigned int size, enum drm_map_type type,
370
enum drm_map_flags flags, struct drm_local_map ** map_ptr)
371
{
372
struct drm_map_list *list;
373
int rc;
374
375
rc = drm_addmap_core(dev, offset, size, type, flags, &list);
376
if (!rc)
377
*map_ptr = list->map;
378
return rc;
379
}
380
381
EXPORT_SYMBOL(drm_addmap);
382
383
/**
384
* Ioctl to specify a range of memory that is available for mapping by a
385
* non-root process.
386
*
387
* \param inode device inode.
388
* \param file_priv DRM file private.
389
* \param cmd command.
390
* \param arg pointer to a drm_map structure.
391
* \return zero on success or a negative value on error.
392
*
393
*/
394
int drm_addmap_ioctl(struct drm_device *dev, void *data,
395
struct drm_file *file_priv)
396
{
397
struct drm_map *map = data;
398
struct drm_map_list *maplist;
399
int err;
400
401
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
402
return -EPERM;
403
404
err = drm_addmap_core(dev, map->offset, map->size, map->type,
405
map->flags, &maplist);
406
407
if (err)
408
return err;
409
410
/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
411
map->handle = (void *)(unsigned long)maplist->user_token;
412
return 0;
413
}
414
415
/**
416
* Remove a map private from list and deallocate resources if the mapping
417
* isn't in use.
418
*
419
* Searches the map on drm_device::maplist, removes it from the list, see if
420
* its being used, and free any associate resource (such as MTRR's) if it's not
421
* being on use.
422
*
423
* \sa drm_addmap
424
*/
425
int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
426
{
427
struct drm_map_list *r_list = NULL, *list_t;
428
drm_dma_handle_t dmah;
429
int found = 0;
430
struct drm_master *master;
431
432
/* Find the list entry for the map and remove it */
433
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
434
if (r_list->map == map) {
435
master = r_list->master;
436
list_del(&r_list->head);
437
drm_ht_remove_key(&dev->map_hash,
438
r_list->user_token >> PAGE_SHIFT);
439
kfree(r_list);
440
found = 1;
441
break;
442
}
443
}
444
445
if (!found)
446
return -EINVAL;
447
448
switch (map->type) {
449
case _DRM_REGISTERS:
450
iounmap(map->handle);
451
/* FALLTHROUGH */
452
case _DRM_FRAME_BUFFER:
453
if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
454
int retcode;
455
retcode = mtrr_del(map->mtrr, map->offset, map->size);
456
DRM_DEBUG("mtrr_del=%d\n", retcode);
457
}
458
break;
459
case _DRM_SHM:
460
vfree(map->handle);
461
if (master) {
462
if (dev->sigdata.lock == master->lock.hw_lock)
463
dev->sigdata.lock = NULL;
464
master->lock.hw_lock = NULL; /* SHM removed */
465
master->lock.file_priv = NULL;
466
wake_up_interruptible_all(&master->lock.lock_queue);
467
}
468
break;
469
case _DRM_AGP:
470
case _DRM_SCATTER_GATHER:
471
break;
472
case _DRM_CONSISTENT:
473
dmah.vaddr = map->handle;
474
dmah.busaddr = map->offset;
475
dmah.size = map->size;
476
__drm_pci_free(dev, &dmah);
477
break;
478
case _DRM_GEM:
479
DRM_ERROR("tried to rmmap GEM object\n");
480
break;
481
}
482
kfree(map);
483
484
return 0;
485
}
486
EXPORT_SYMBOL(drm_rmmap_locked);
487
488
int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
489
{
490
int ret;
491
492
mutex_lock(&dev->struct_mutex);
493
ret = drm_rmmap_locked(dev, map);
494
mutex_unlock(&dev->struct_mutex);
495
496
return ret;
497
}
498
EXPORT_SYMBOL(drm_rmmap);
499
500
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
501
* the last close of the device, and this is necessary for cleanup when things
502
* exit uncleanly. Therefore, having userland manually remove mappings seems
503
* like a pointless exercise since they're going away anyway.
504
*
505
* One use case might be after addmap is allowed for normal users for SHM and
506
* gets used by drivers that the server doesn't need to care about. This seems
507
* unlikely.
508
*
509
* \param inode device inode.
510
* \param file_priv DRM file private.
511
* \param cmd command.
512
* \param arg pointer to a struct drm_map structure.
513
* \return zero on success or a negative value on error.
514
*/
515
int drm_rmmap_ioctl(struct drm_device *dev, void *data,
516
struct drm_file *file_priv)
517
{
518
struct drm_map *request = data;
519
struct drm_local_map *map = NULL;
520
struct drm_map_list *r_list;
521
int ret;
522
523
mutex_lock(&dev->struct_mutex);
524
list_for_each_entry(r_list, &dev->maplist, head) {
525
if (r_list->map &&
526
r_list->user_token == (unsigned long)request->handle &&
527
r_list->map->flags & _DRM_REMOVABLE) {
528
map = r_list->map;
529
break;
530
}
531
}
532
533
/* List has wrapped around to the head pointer, or its empty we didn't
534
* find anything.
535
*/
536
if (list_empty(&dev->maplist) || !map) {
537
mutex_unlock(&dev->struct_mutex);
538
return -EINVAL;
539
}
540
541
/* Register and framebuffer maps are permanent */
542
if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
543
mutex_unlock(&dev->struct_mutex);
544
return 0;
545
}
546
547
ret = drm_rmmap_locked(dev, map);
548
549
mutex_unlock(&dev->struct_mutex);
550
551
return ret;
552
}
553
554
/**
555
* Cleanup after an error on one of the addbufs() functions.
556
*
557
* \param dev DRM device.
558
* \param entry buffer entry where the error occurred.
559
*
560
* Frees any pages and buffers associated with the given entry.
561
*/
562
static void drm_cleanup_buf_error(struct drm_device * dev,
563
struct drm_buf_entry * entry)
564
{
565
int i;
566
567
if (entry->seg_count) {
568
for (i = 0; i < entry->seg_count; i++) {
569
if (entry->seglist[i]) {
570
drm_pci_free(dev, entry->seglist[i]);
571
}
572
}
573
kfree(entry->seglist);
574
575
entry->seg_count = 0;
576
}
577
578
if (entry->buf_count) {
579
for (i = 0; i < entry->buf_count; i++) {
580
kfree(entry->buflist[i].dev_private);
581
}
582
kfree(entry->buflist);
583
584
entry->buf_count = 0;
585
}
586
}
587
588
#if __OS_HAS_AGP
589
/**
590
* Add AGP buffers for DMA transfers.
591
*
592
* \param dev struct drm_device to which the buffers are to be added.
593
* \param request pointer to a struct drm_buf_desc describing the request.
594
* \return zero on success or a negative number on failure.
595
*
596
* After some sanity checks creates a drm_buf structure for each buffer and
597
* reallocates the buffer list of the same size order to accommodate the new
598
* buffers.
599
*/
600
int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
601
{
602
struct drm_device_dma *dma = dev->dma;
603
struct drm_buf_entry *entry;
604
struct drm_agp_mem *agp_entry;
605
struct drm_buf *buf;
606
unsigned long offset;
607
unsigned long agp_offset;
608
int count;
609
int order;
610
int size;
611
int alignment;
612
int page_order;
613
int total;
614
int byte_count;
615
int i, valid;
616
struct drm_buf **temp_buflist;
617
618
if (!dma)
619
return -EINVAL;
620
621
count = request->count;
622
order = drm_order(request->size);
623
size = 1 << order;
624
625
alignment = (request->flags & _DRM_PAGE_ALIGN)
626
? PAGE_ALIGN(size) : size;
627
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
628
total = PAGE_SIZE << page_order;
629
630
byte_count = 0;
631
agp_offset = dev->agp->base + request->agp_start;
632
633
DRM_DEBUG("count: %d\n", count);
634
DRM_DEBUG("order: %d\n", order);
635
DRM_DEBUG("size: %d\n", size);
636
DRM_DEBUG("agp_offset: %lx\n", agp_offset);
637
DRM_DEBUG("alignment: %d\n", alignment);
638
DRM_DEBUG("page_order: %d\n", page_order);
639
DRM_DEBUG("total: %d\n", total);
640
641
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
642
return -EINVAL;
643
if (dev->queue_count)
644
return -EBUSY; /* Not while in use */
645
646
/* Make sure buffers are located in AGP memory that we own */
647
valid = 0;
648
list_for_each_entry(agp_entry, &dev->agp->memory, head) {
649
if ((agp_offset >= agp_entry->bound) &&
650
(agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
651
valid = 1;
652
break;
653
}
654
}
655
if (!list_empty(&dev->agp->memory) && !valid) {
656
DRM_DEBUG("zone invalid\n");
657
return -EINVAL;
658
}
659
spin_lock(&dev->count_lock);
660
if (dev->buf_use) {
661
spin_unlock(&dev->count_lock);
662
return -EBUSY;
663
}
664
atomic_inc(&dev->buf_alloc);
665
spin_unlock(&dev->count_lock);
666
667
mutex_lock(&dev->struct_mutex);
668
entry = &dma->bufs[order];
669
if (entry->buf_count) {
670
mutex_unlock(&dev->struct_mutex);
671
atomic_dec(&dev->buf_alloc);
672
return -ENOMEM; /* May only call once for each order */
673
}
674
675
if (count < 0 || count > 4096) {
676
mutex_unlock(&dev->struct_mutex);
677
atomic_dec(&dev->buf_alloc);
678
return -EINVAL;
679
}
680
681
entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
682
if (!entry->buflist) {
683
mutex_unlock(&dev->struct_mutex);
684
atomic_dec(&dev->buf_alloc);
685
return -ENOMEM;
686
}
687
688
entry->buf_size = size;
689
entry->page_order = page_order;
690
691
offset = 0;
692
693
while (entry->buf_count < count) {
694
buf = &entry->buflist[entry->buf_count];
695
buf->idx = dma->buf_count + entry->buf_count;
696
buf->total = alignment;
697
buf->order = order;
698
buf->used = 0;
699
700
buf->offset = (dma->byte_count + offset);
701
buf->bus_address = agp_offset + offset;
702
buf->address = (void *)(agp_offset + offset);
703
buf->next = NULL;
704
buf->waiting = 0;
705
buf->pending = 0;
706
init_waitqueue_head(&buf->dma_wait);
707
buf->file_priv = NULL;
708
709
buf->dev_priv_size = dev->driver->dev_priv_size;
710
buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
711
if (!buf->dev_private) {
712
/* Set count correctly so we free the proper amount. */
713
entry->buf_count = count;
714
drm_cleanup_buf_error(dev, entry);
715
mutex_unlock(&dev->struct_mutex);
716
atomic_dec(&dev->buf_alloc);
717
return -ENOMEM;
718
}
719
720
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
721
722
offset += alignment;
723
entry->buf_count++;
724
byte_count += PAGE_SIZE << page_order;
725
}
726
727
DRM_DEBUG("byte_count: %d\n", byte_count);
728
729
temp_buflist = krealloc(dma->buflist,
730
(dma->buf_count + entry->buf_count) *
731
sizeof(*dma->buflist), GFP_KERNEL);
732
if (!temp_buflist) {
733
/* Free the entry because it isn't valid */
734
drm_cleanup_buf_error(dev, entry);
735
mutex_unlock(&dev->struct_mutex);
736
atomic_dec(&dev->buf_alloc);
737
return -ENOMEM;
738
}
739
dma->buflist = temp_buflist;
740
741
for (i = 0; i < entry->buf_count; i++) {
742
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
743
}
744
745
dma->buf_count += entry->buf_count;
746
dma->seg_count += entry->seg_count;
747
dma->page_count += byte_count >> PAGE_SHIFT;
748
dma->byte_count += byte_count;
749
750
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
751
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
752
753
mutex_unlock(&dev->struct_mutex);
754
755
request->count = entry->buf_count;
756
request->size = size;
757
758
dma->flags = _DRM_DMA_USE_AGP;
759
760
atomic_dec(&dev->buf_alloc);
761
return 0;
762
}
763
EXPORT_SYMBOL(drm_addbufs_agp);
764
#endif /* __OS_HAS_AGP */
765
766
int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
767
{
768
struct drm_device_dma *dma = dev->dma;
769
int count;
770
int order;
771
int size;
772
int total;
773
int page_order;
774
struct drm_buf_entry *entry;
775
drm_dma_handle_t *dmah;
776
struct drm_buf *buf;
777
int alignment;
778
unsigned long offset;
779
int i;
780
int byte_count;
781
int page_count;
782
unsigned long *temp_pagelist;
783
struct drm_buf **temp_buflist;
784
785
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
786
return -EINVAL;
787
788
if (!dma)
789
return -EINVAL;
790
791
if (!capable(CAP_SYS_ADMIN))
792
return -EPERM;
793
794
count = request->count;
795
order = drm_order(request->size);
796
size = 1 << order;
797
798
DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
799
request->count, request->size, size, order, dev->queue_count);
800
801
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
802
return -EINVAL;
803
if (dev->queue_count)
804
return -EBUSY; /* Not while in use */
805
806
alignment = (request->flags & _DRM_PAGE_ALIGN)
807
? PAGE_ALIGN(size) : size;
808
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
809
total = PAGE_SIZE << page_order;
810
811
spin_lock(&dev->count_lock);
812
if (dev->buf_use) {
813
spin_unlock(&dev->count_lock);
814
return -EBUSY;
815
}
816
atomic_inc(&dev->buf_alloc);
817
spin_unlock(&dev->count_lock);
818
819
mutex_lock(&dev->struct_mutex);
820
entry = &dma->bufs[order];
821
if (entry->buf_count) {
822
mutex_unlock(&dev->struct_mutex);
823
atomic_dec(&dev->buf_alloc);
824
return -ENOMEM; /* May only call once for each order */
825
}
826
827
if (count < 0 || count > 4096) {
828
mutex_unlock(&dev->struct_mutex);
829
atomic_dec(&dev->buf_alloc);
830
return -EINVAL;
831
}
832
833
entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
834
if (!entry->buflist) {
835
mutex_unlock(&dev->struct_mutex);
836
atomic_dec(&dev->buf_alloc);
837
return -ENOMEM;
838
}
839
840
entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
841
if (!entry->seglist) {
842
kfree(entry->buflist);
843
mutex_unlock(&dev->struct_mutex);
844
atomic_dec(&dev->buf_alloc);
845
return -ENOMEM;
846
}
847
848
/* Keep the original pagelist until we know all the allocations
849
* have succeeded
850
*/
851
temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
852
sizeof(*dma->pagelist), GFP_KERNEL);
853
if (!temp_pagelist) {
854
kfree(entry->buflist);
855
kfree(entry->seglist);
856
mutex_unlock(&dev->struct_mutex);
857
atomic_dec(&dev->buf_alloc);
858
return -ENOMEM;
859
}
860
memcpy(temp_pagelist,
861
dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
862
DRM_DEBUG("pagelist: %d entries\n",
863
dma->page_count + (count << page_order));
864
865
entry->buf_size = size;
866
entry->page_order = page_order;
867
byte_count = 0;
868
page_count = 0;
869
870
while (entry->buf_count < count) {
871
872
dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
873
874
if (!dmah) {
875
/* Set count correctly so we free the proper amount. */
876
entry->buf_count = count;
877
entry->seg_count = count;
878
drm_cleanup_buf_error(dev, entry);
879
kfree(temp_pagelist);
880
mutex_unlock(&dev->struct_mutex);
881
atomic_dec(&dev->buf_alloc);
882
return -ENOMEM;
883
}
884
entry->seglist[entry->seg_count++] = dmah;
885
for (i = 0; i < (1 << page_order); i++) {
886
DRM_DEBUG("page %d @ 0x%08lx\n",
887
dma->page_count + page_count,
888
(unsigned long)dmah->vaddr + PAGE_SIZE * i);
889
temp_pagelist[dma->page_count + page_count++]
890
= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
891
}
892
for (offset = 0;
893
offset + size <= total && entry->buf_count < count;
894
offset += alignment, ++entry->buf_count) {
895
buf = &entry->buflist[entry->buf_count];
896
buf->idx = dma->buf_count + entry->buf_count;
897
buf->total = alignment;
898
buf->order = order;
899
buf->used = 0;
900
buf->offset = (dma->byte_count + byte_count + offset);
901
buf->address = (void *)(dmah->vaddr + offset);
902
buf->bus_address = dmah->busaddr + offset;
903
buf->next = NULL;
904
buf->waiting = 0;
905
buf->pending = 0;
906
init_waitqueue_head(&buf->dma_wait);
907
buf->file_priv = NULL;
908
909
buf->dev_priv_size = dev->driver->dev_priv_size;
910
buf->dev_private = kzalloc(buf->dev_priv_size,
911
GFP_KERNEL);
912
if (!buf->dev_private) {
913
/* Set count correctly so we free the proper amount. */
914
entry->buf_count = count;
915
entry->seg_count = count;
916
drm_cleanup_buf_error(dev, entry);
917
kfree(temp_pagelist);
918
mutex_unlock(&dev->struct_mutex);
919
atomic_dec(&dev->buf_alloc);
920
return -ENOMEM;
921
}
922
923
DRM_DEBUG("buffer %d @ %p\n",
924
entry->buf_count, buf->address);
925
}
926
byte_count += PAGE_SIZE << page_order;
927
}
928
929
temp_buflist = krealloc(dma->buflist,
930
(dma->buf_count + entry->buf_count) *
931
sizeof(*dma->buflist), GFP_KERNEL);
932
if (!temp_buflist) {
933
/* Free the entry because it isn't valid */
934
drm_cleanup_buf_error(dev, entry);
935
kfree(temp_pagelist);
936
mutex_unlock(&dev->struct_mutex);
937
atomic_dec(&dev->buf_alloc);
938
return -ENOMEM;
939
}
940
dma->buflist = temp_buflist;
941
942
for (i = 0; i < entry->buf_count; i++) {
943
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
944
}
945
946
/* No allocations failed, so now we can replace the original pagelist
947
* with the new one.
948
*/
949
if (dma->page_count) {
950
kfree(dma->pagelist);
951
}
952
dma->pagelist = temp_pagelist;
953
954
dma->buf_count += entry->buf_count;
955
dma->seg_count += entry->seg_count;
956
dma->page_count += entry->seg_count << page_order;
957
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
958
959
mutex_unlock(&dev->struct_mutex);
960
961
request->count = entry->buf_count;
962
request->size = size;
963
964
if (request->flags & _DRM_PCI_BUFFER_RO)
965
dma->flags = _DRM_DMA_USE_PCI_RO;
966
967
atomic_dec(&dev->buf_alloc);
968
return 0;
969
970
}
971
EXPORT_SYMBOL(drm_addbufs_pci);
972
973
static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
974
{
975
struct drm_device_dma *dma = dev->dma;
976
struct drm_buf_entry *entry;
977
struct drm_buf *buf;
978
unsigned long offset;
979
unsigned long agp_offset;
980
int count;
981
int order;
982
int size;
983
int alignment;
984
int page_order;
985
int total;
986
int byte_count;
987
int i;
988
struct drm_buf **temp_buflist;
989
990
if (!drm_core_check_feature(dev, DRIVER_SG))
991
return -EINVAL;
992
993
if (!dma)
994
return -EINVAL;
995
996
if (!capable(CAP_SYS_ADMIN))
997
return -EPERM;
998
999
count = request->count;
1000
order = drm_order(request->size);
1001
size = 1 << order;
1002
1003
alignment = (request->flags & _DRM_PAGE_ALIGN)
1004
? PAGE_ALIGN(size) : size;
1005
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1006
total = PAGE_SIZE << page_order;
1007
1008
byte_count = 0;
1009
agp_offset = request->agp_start;
1010
1011
DRM_DEBUG("count: %d\n", count);
1012
DRM_DEBUG("order: %d\n", order);
1013
DRM_DEBUG("size: %d\n", size);
1014
DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1015
DRM_DEBUG("alignment: %d\n", alignment);
1016
DRM_DEBUG("page_order: %d\n", page_order);
1017
DRM_DEBUG("total: %d\n", total);
1018
1019
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1020
return -EINVAL;
1021
if (dev->queue_count)
1022
return -EBUSY; /* Not while in use */
1023
1024
spin_lock(&dev->count_lock);
1025
if (dev->buf_use) {
1026
spin_unlock(&dev->count_lock);
1027
return -EBUSY;
1028
}
1029
atomic_inc(&dev->buf_alloc);
1030
spin_unlock(&dev->count_lock);
1031
1032
mutex_lock(&dev->struct_mutex);
1033
entry = &dma->bufs[order];
1034
if (entry->buf_count) {
1035
mutex_unlock(&dev->struct_mutex);
1036
atomic_dec(&dev->buf_alloc);
1037
return -ENOMEM; /* May only call once for each order */
1038
}
1039
1040
if (count < 0 || count > 4096) {
1041
mutex_unlock(&dev->struct_mutex);
1042
atomic_dec(&dev->buf_alloc);
1043
return -EINVAL;
1044
}
1045
1046
entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1047
GFP_KERNEL);
1048
if (!entry->buflist) {
1049
mutex_unlock(&dev->struct_mutex);
1050
atomic_dec(&dev->buf_alloc);
1051
return -ENOMEM;
1052
}
1053
1054
entry->buf_size = size;
1055
entry->page_order = page_order;
1056
1057
offset = 0;
1058
1059
while (entry->buf_count < count) {
1060
buf = &entry->buflist[entry->buf_count];
1061
buf->idx = dma->buf_count + entry->buf_count;
1062
buf->total = alignment;
1063
buf->order = order;
1064
buf->used = 0;
1065
1066
buf->offset = (dma->byte_count + offset);
1067
buf->bus_address = agp_offset + offset;
1068
buf->address = (void *)(agp_offset + offset
1069
+ (unsigned long)dev->sg->virtual);
1070
buf->next = NULL;
1071
buf->waiting = 0;
1072
buf->pending = 0;
1073
init_waitqueue_head(&buf->dma_wait);
1074
buf->file_priv = NULL;
1075
1076
buf->dev_priv_size = dev->driver->dev_priv_size;
1077
buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1078
if (!buf->dev_private) {
1079
/* Set count correctly so we free the proper amount. */
1080
entry->buf_count = count;
1081
drm_cleanup_buf_error(dev, entry);
1082
mutex_unlock(&dev->struct_mutex);
1083
atomic_dec(&dev->buf_alloc);
1084
return -ENOMEM;
1085
}
1086
1087
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1088
1089
offset += alignment;
1090
entry->buf_count++;
1091
byte_count += PAGE_SIZE << page_order;
1092
}
1093
1094
DRM_DEBUG("byte_count: %d\n", byte_count);
1095
1096
temp_buflist = krealloc(dma->buflist,
1097
(dma->buf_count + entry->buf_count) *
1098
sizeof(*dma->buflist), GFP_KERNEL);
1099
if (!temp_buflist) {
1100
/* Free the entry because it isn't valid */
1101
drm_cleanup_buf_error(dev, entry);
1102
mutex_unlock(&dev->struct_mutex);
1103
atomic_dec(&dev->buf_alloc);
1104
return -ENOMEM;
1105
}
1106
dma->buflist = temp_buflist;
1107
1108
for (i = 0; i < entry->buf_count; i++) {
1109
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1110
}
1111
1112
dma->buf_count += entry->buf_count;
1113
dma->seg_count += entry->seg_count;
1114
dma->page_count += byte_count >> PAGE_SHIFT;
1115
dma->byte_count += byte_count;
1116
1117
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1118
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1119
1120
mutex_unlock(&dev->struct_mutex);
1121
1122
request->count = entry->buf_count;
1123
request->size = size;
1124
1125
dma->flags = _DRM_DMA_USE_SG;
1126
1127
atomic_dec(&dev->buf_alloc);
1128
return 0;
1129
}
1130
1131
static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1132
{
1133
struct drm_device_dma *dma = dev->dma;
1134
struct drm_buf_entry *entry;
1135
struct drm_buf *buf;
1136
unsigned long offset;
1137
unsigned long agp_offset;
1138
int count;
1139
int order;
1140
int size;
1141
int alignment;
1142
int page_order;
1143
int total;
1144
int byte_count;
1145
int i;
1146
struct drm_buf **temp_buflist;
1147
1148
if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1149
return -EINVAL;
1150
1151
if (!dma)
1152
return -EINVAL;
1153
1154
if (!capable(CAP_SYS_ADMIN))
1155
return -EPERM;
1156
1157
count = request->count;
1158
order = drm_order(request->size);
1159
size = 1 << order;
1160
1161
alignment = (request->flags & _DRM_PAGE_ALIGN)
1162
? PAGE_ALIGN(size) : size;
1163
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1164
total = PAGE_SIZE << page_order;
1165
1166
byte_count = 0;
1167
agp_offset = request->agp_start;
1168
1169
DRM_DEBUG("count: %d\n", count);
1170
DRM_DEBUG("order: %d\n", order);
1171
DRM_DEBUG("size: %d\n", size);
1172
DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1173
DRM_DEBUG("alignment: %d\n", alignment);
1174
DRM_DEBUG("page_order: %d\n", page_order);
1175
DRM_DEBUG("total: %d\n", total);
1176
1177
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1178
return -EINVAL;
1179
if (dev->queue_count)
1180
return -EBUSY; /* Not while in use */
1181
1182
spin_lock(&dev->count_lock);
1183
if (dev->buf_use) {
1184
spin_unlock(&dev->count_lock);
1185
return -EBUSY;
1186
}
1187
atomic_inc(&dev->buf_alloc);
1188
spin_unlock(&dev->count_lock);
1189
1190
mutex_lock(&dev->struct_mutex);
1191
entry = &dma->bufs[order];
1192
if (entry->buf_count) {
1193
mutex_unlock(&dev->struct_mutex);
1194
atomic_dec(&dev->buf_alloc);
1195
return -ENOMEM; /* May only call once for each order */
1196
}
1197
1198
if (count < 0 || count > 4096) {
1199
mutex_unlock(&dev->struct_mutex);
1200
atomic_dec(&dev->buf_alloc);
1201
return -EINVAL;
1202
}
1203
1204
entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1205
GFP_KERNEL);
1206
if (!entry->buflist) {
1207
mutex_unlock(&dev->struct_mutex);
1208
atomic_dec(&dev->buf_alloc);
1209
return -ENOMEM;
1210
}
1211
1212
entry->buf_size = size;
1213
entry->page_order = page_order;
1214
1215
offset = 0;
1216
1217
while (entry->buf_count < count) {
1218
buf = &entry->buflist[entry->buf_count];
1219
buf->idx = dma->buf_count + entry->buf_count;
1220
buf->total = alignment;
1221
buf->order = order;
1222
buf->used = 0;
1223
1224
buf->offset = (dma->byte_count + offset);
1225
buf->bus_address = agp_offset + offset;
1226
buf->address = (void *)(agp_offset + offset);
1227
buf->next = NULL;
1228
buf->waiting = 0;
1229
buf->pending = 0;
1230
init_waitqueue_head(&buf->dma_wait);
1231
buf->file_priv = NULL;
1232
1233
buf->dev_priv_size = dev->driver->dev_priv_size;
1234
buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1235
if (!buf->dev_private) {
1236
/* Set count correctly so we free the proper amount. */
1237
entry->buf_count = count;
1238
drm_cleanup_buf_error(dev, entry);
1239
mutex_unlock(&dev->struct_mutex);
1240
atomic_dec(&dev->buf_alloc);
1241
return -ENOMEM;
1242
}
1243
1244
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1245
1246
offset += alignment;
1247
entry->buf_count++;
1248
byte_count += PAGE_SIZE << page_order;
1249
}
1250
1251
DRM_DEBUG("byte_count: %d\n", byte_count);
1252
1253
temp_buflist = krealloc(dma->buflist,
1254
(dma->buf_count + entry->buf_count) *
1255
sizeof(*dma->buflist), GFP_KERNEL);
1256
if (!temp_buflist) {
1257
/* Free the entry because it isn't valid */
1258
drm_cleanup_buf_error(dev, entry);
1259
mutex_unlock(&dev->struct_mutex);
1260
atomic_dec(&dev->buf_alloc);
1261
return -ENOMEM;
1262
}
1263
dma->buflist = temp_buflist;
1264
1265
for (i = 0; i < entry->buf_count; i++) {
1266
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1267
}
1268
1269
dma->buf_count += entry->buf_count;
1270
dma->seg_count += entry->seg_count;
1271
dma->page_count += byte_count >> PAGE_SHIFT;
1272
dma->byte_count += byte_count;
1273
1274
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1275
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1276
1277
mutex_unlock(&dev->struct_mutex);
1278
1279
request->count = entry->buf_count;
1280
request->size = size;
1281
1282
dma->flags = _DRM_DMA_USE_FB;
1283
1284
atomic_dec(&dev->buf_alloc);
1285
return 0;
1286
}
1287
1288
1289
/**
1290
* Add buffers for DMA transfers (ioctl).
1291
*
1292
* \param inode device inode.
1293
* \param file_priv DRM file private.
1294
* \param cmd command.
1295
* \param arg pointer to a struct drm_buf_desc request.
1296
* \return zero on success or a negative number on failure.
1297
*
1298
* According with the memory type specified in drm_buf_desc::flags and the
1299
* build options, it dispatches the call either to addbufs_agp(),
1300
* addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1301
* PCI memory respectively.
1302
*/
1303
int drm_addbufs(struct drm_device *dev, void *data,
1304
struct drm_file *file_priv)
1305
{
1306
struct drm_buf_desc *request = data;
1307
int ret;
1308
1309
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1310
return -EINVAL;
1311
1312
#if __OS_HAS_AGP
1313
if (request->flags & _DRM_AGP_BUFFER)
1314
ret = drm_addbufs_agp(dev, request);
1315
else
1316
#endif
1317
if (request->flags & _DRM_SG_BUFFER)
1318
ret = drm_addbufs_sg(dev, request);
1319
else if (request->flags & _DRM_FB_BUFFER)
1320
ret = drm_addbufs_fb(dev, request);
1321
else
1322
ret = drm_addbufs_pci(dev, request);
1323
1324
return ret;
1325
}
1326
1327
/**
1328
* Get information about the buffer mappings.
1329
*
1330
* This was originally mean for debugging purposes, or by a sophisticated
1331
* client library to determine how best to use the available buffers (e.g.,
1332
* large buffers can be used for image transfer).
1333
*
1334
* \param inode device inode.
1335
* \param file_priv DRM file private.
1336
* \param cmd command.
1337
* \param arg pointer to a drm_buf_info structure.
1338
* \return zero on success or a negative number on failure.
1339
*
1340
* Increments drm_device::buf_use while holding the drm_device::count_lock
1341
* lock, preventing of allocating more buffers after this call. Information
1342
* about each requested buffer is then copied into user space.
1343
*/
1344
int drm_infobufs(struct drm_device *dev, void *data,
1345
struct drm_file *file_priv)
1346
{
1347
struct drm_device_dma *dma = dev->dma;
1348
struct drm_buf_info *request = data;
1349
int i;
1350
int count;
1351
1352
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1353
return -EINVAL;
1354
1355
if (!dma)
1356
return -EINVAL;
1357
1358
spin_lock(&dev->count_lock);
1359
if (atomic_read(&dev->buf_alloc)) {
1360
spin_unlock(&dev->count_lock);
1361
return -EBUSY;
1362
}
1363
++dev->buf_use; /* Can't allocate more after this call */
1364
spin_unlock(&dev->count_lock);
1365
1366
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1367
if (dma->bufs[i].buf_count)
1368
++count;
1369
}
1370
1371
DRM_DEBUG("count = %d\n", count);
1372
1373
if (request->count >= count) {
1374
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1375
if (dma->bufs[i].buf_count) {
1376
struct drm_buf_desc __user *to =
1377
&request->list[count];
1378
struct drm_buf_entry *from = &dma->bufs[i];
1379
struct drm_freelist *list = &dma->bufs[i].freelist;
1380
if (copy_to_user(&to->count,
1381
&from->buf_count,
1382
sizeof(from->buf_count)) ||
1383
copy_to_user(&to->size,
1384
&from->buf_size,
1385
sizeof(from->buf_size)) ||
1386
copy_to_user(&to->low_mark,
1387
&list->low_mark,
1388
sizeof(list->low_mark)) ||
1389
copy_to_user(&to->high_mark,
1390
&list->high_mark,
1391
sizeof(list->high_mark)))
1392
return -EFAULT;
1393
1394
DRM_DEBUG("%d %d %d %d %d\n",
1395
i,
1396
dma->bufs[i].buf_count,
1397
dma->bufs[i].buf_size,
1398
dma->bufs[i].freelist.low_mark,
1399
dma->bufs[i].freelist.high_mark);
1400
++count;
1401
}
1402
}
1403
}
1404
request->count = count;
1405
1406
return 0;
1407
}
1408
1409
/**
1410
* Specifies a low and high water mark for buffer allocation
1411
*
1412
* \param inode device inode.
1413
* \param file_priv DRM file private.
1414
* \param cmd command.
1415
* \param arg a pointer to a drm_buf_desc structure.
1416
* \return zero on success or a negative number on failure.
1417
*
1418
* Verifies that the size order is bounded between the admissible orders and
1419
* updates the respective drm_device_dma::bufs entry low and high water mark.
1420
*
1421
* \note This ioctl is deprecated and mostly never used.
1422
*/
1423
int drm_markbufs(struct drm_device *dev, void *data,
1424
struct drm_file *file_priv)
1425
{
1426
struct drm_device_dma *dma = dev->dma;
1427
struct drm_buf_desc *request = data;
1428
int order;
1429
struct drm_buf_entry *entry;
1430
1431
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1432
return -EINVAL;
1433
1434
if (!dma)
1435
return -EINVAL;
1436
1437
DRM_DEBUG("%d, %d, %d\n",
1438
request->size, request->low_mark, request->high_mark);
1439
order = drm_order(request->size);
1440
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1441
return -EINVAL;
1442
entry = &dma->bufs[order];
1443
1444
if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1445
return -EINVAL;
1446
if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1447
return -EINVAL;
1448
1449
entry->freelist.low_mark = request->low_mark;
1450
entry->freelist.high_mark = request->high_mark;
1451
1452
return 0;
1453
}
1454
1455
/**
1456
* Unreserve the buffers in list, previously reserved using drmDMA.
1457
*
1458
* \param inode device inode.
1459
* \param file_priv DRM file private.
1460
* \param cmd command.
1461
* \param arg pointer to a drm_buf_free structure.
1462
* \return zero on success or a negative number on failure.
1463
*
1464
* Calls free_buffer() for each used buffer.
1465
* This function is primarily used for debugging.
1466
*/
1467
int drm_freebufs(struct drm_device *dev, void *data,
1468
struct drm_file *file_priv)
1469
{
1470
struct drm_device_dma *dma = dev->dma;
1471
struct drm_buf_free *request = data;
1472
int i;
1473
int idx;
1474
struct drm_buf *buf;
1475
1476
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1477
return -EINVAL;
1478
1479
if (!dma)
1480
return -EINVAL;
1481
1482
DRM_DEBUG("%d\n", request->count);
1483
for (i = 0; i < request->count; i++) {
1484
if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1485
return -EFAULT;
1486
if (idx < 0 || idx >= dma->buf_count) {
1487
DRM_ERROR("Index %d (of %d max)\n",
1488
idx, dma->buf_count - 1);
1489
return -EINVAL;
1490
}
1491
buf = dma->buflist[idx];
1492
if (buf->file_priv != file_priv) {
1493
DRM_ERROR("Process %d freeing buffer not owned\n",
1494
task_pid_nr(current));
1495
return -EINVAL;
1496
}
1497
drm_free_buffer(dev, buf);
1498
}
1499
1500
return 0;
1501
}
1502
1503
/**
1504
* Maps all of the DMA buffers into client-virtual space (ioctl).
1505
*
1506
* \param inode device inode.
1507
* \param file_priv DRM file private.
1508
* \param cmd command.
1509
* \param arg pointer to a drm_buf_map structure.
1510
* \return zero on success or a negative number on failure.
1511
*
1512
* Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1513
* about each buffer into user space. For PCI buffers, it calls do_mmap() with
1514
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1515
* drm_mmap_dma().
1516
*/
1517
int drm_mapbufs(struct drm_device *dev, void *data,
1518
struct drm_file *file_priv)
1519
{
1520
struct drm_device_dma *dma = dev->dma;
1521
int retcode = 0;
1522
const int zero = 0;
1523
unsigned long virtual;
1524
unsigned long address;
1525
struct drm_buf_map *request = data;
1526
int i;
1527
1528
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1529
return -EINVAL;
1530
1531
if (!dma)
1532
return -EINVAL;
1533
1534
spin_lock(&dev->count_lock);
1535
if (atomic_read(&dev->buf_alloc)) {
1536
spin_unlock(&dev->count_lock);
1537
return -EBUSY;
1538
}
1539
dev->buf_use++; /* Can't allocate more after this call */
1540
spin_unlock(&dev->count_lock);
1541
1542
if (request->count >= dma->buf_count) {
1543
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1544
|| (drm_core_check_feature(dev, DRIVER_SG)
1545
&& (dma->flags & _DRM_DMA_USE_SG))
1546
|| (drm_core_check_feature(dev, DRIVER_FB_DMA)
1547
&& (dma->flags & _DRM_DMA_USE_FB))) {
1548
struct drm_local_map *map = dev->agp_buffer_map;
1549
unsigned long token = dev->agp_buffer_token;
1550
1551
if (!map) {
1552
retcode = -EINVAL;
1553
goto done;
1554
}
1555
down_write(&current->mm->mmap_sem);
1556
virtual = do_mmap(file_priv->filp, 0, map->size,
1557
PROT_READ | PROT_WRITE,
1558
MAP_SHARED,
1559
token);
1560
up_write(&current->mm->mmap_sem);
1561
} else {
1562
down_write(&current->mm->mmap_sem);
1563
virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1564
PROT_READ | PROT_WRITE,
1565
MAP_SHARED, 0);
1566
up_write(&current->mm->mmap_sem);
1567
}
1568
if (virtual > -1024UL) {
1569
/* Real error */
1570
retcode = (signed long)virtual;
1571
goto done;
1572
}
1573
request->virtual = (void __user *)virtual;
1574
1575
for (i = 0; i < dma->buf_count; i++) {
1576
if (copy_to_user(&request->list[i].idx,
1577
&dma->buflist[i]->idx,
1578
sizeof(request->list[0].idx))) {
1579
retcode = -EFAULT;
1580
goto done;
1581
}
1582
if (copy_to_user(&request->list[i].total,
1583
&dma->buflist[i]->total,
1584
sizeof(request->list[0].total))) {
1585
retcode = -EFAULT;
1586
goto done;
1587
}
1588
if (copy_to_user(&request->list[i].used,
1589
&zero, sizeof(zero))) {
1590
retcode = -EFAULT;
1591
goto done;
1592
}
1593
address = virtual + dma->buflist[i]->offset; /* *** */
1594
if (copy_to_user(&request->list[i].address,
1595
&address, sizeof(address))) {
1596
retcode = -EFAULT;
1597
goto done;
1598
}
1599
}
1600
}
1601
done:
1602
request->count = dma->buf_count;
1603
DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1604
1605
return retcode;
1606
}
1607
1608
/**
1609
* Compute size order. Returns the exponent of the smaller power of two which
1610
* is greater or equal to given number.
1611
*
1612
* \param size size.
1613
* \return order.
1614
*
1615
* \todo Can be made faster.
1616
*/
1617
int drm_order(unsigned long size)
1618
{
1619
int order;
1620
unsigned long tmp;
1621
1622
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1623
1624
if (size & (size - 1))
1625
++order;
1626
1627
return order;
1628
}
1629
EXPORT_SYMBOL(drm_order);
1630
1631