Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/mm/dmapool.c
10814 views
1
/*
2
* DMA Pool allocator
3
*
4
* Copyright 2001 David Brownell
5
* Copyright 2007 Intel Corporation
6
* Author: Matthew Wilcox <[email protected]>
7
*
8
* This software may be redistributed and/or modified under the terms of
9
* the GNU General Public License ("GPL") version 2 as published by the
10
* Free Software Foundation.
11
*
12
* This allocator returns small blocks of a given size which are DMA-able by
13
* the given device. It uses the dma_alloc_coherent page allocator to get
14
* new pages, then splits them up into blocks of the required size.
15
* Many older drivers still have their own code to do this.
16
*
17
* The current design of this allocator is fairly simple. The pool is
18
* represented by the 'struct dma_pool' which keeps a doubly-linked list of
19
* allocated pages. Each page in the page_list is split into blocks of at
20
* least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21
* list of free blocks within the page. Used blocks aren't tracked, but we
22
* keep a count of how many are currently allocated from each page.
23
*/
24
25
#include <linux/device.h>
26
#include <linux/dma-mapping.h>
27
#include <linux/dmapool.h>
28
#include <linux/kernel.h>
29
#include <linux/list.h>
30
#include <linux/module.h>
31
#include <linux/mutex.h>
32
#include <linux/poison.h>
33
#include <linux/sched.h>
34
#include <linux/slab.h>
35
#include <linux/spinlock.h>
36
#include <linux/string.h>
37
#include <linux/types.h>
38
#include <linux/wait.h>
39
40
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
41
#define DMAPOOL_DEBUG 1
42
#endif
43
44
struct dma_pool { /* the pool */
45
struct list_head page_list;
46
spinlock_t lock;
47
size_t size;
48
struct device *dev;
49
size_t allocation;
50
size_t boundary;
51
char name[32];
52
wait_queue_head_t waitq;
53
struct list_head pools;
54
};
55
56
struct dma_page { /* cacheable header for 'allocation' bytes */
57
struct list_head page_list;
58
void *vaddr;
59
dma_addr_t dma;
60
unsigned int in_use;
61
unsigned int offset;
62
};
63
64
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
65
66
static DEFINE_MUTEX(pools_lock);
67
68
static ssize_t
69
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
70
{
71
unsigned temp;
72
unsigned size;
73
char *next;
74
struct dma_page *page;
75
struct dma_pool *pool;
76
77
next = buf;
78
size = PAGE_SIZE;
79
80
temp = scnprintf(next, size, "poolinfo - 0.1\n");
81
size -= temp;
82
next += temp;
83
84
mutex_lock(&pools_lock);
85
list_for_each_entry(pool, &dev->dma_pools, pools) {
86
unsigned pages = 0;
87
unsigned blocks = 0;
88
89
spin_lock_irq(&pool->lock);
90
list_for_each_entry(page, &pool->page_list, page_list) {
91
pages++;
92
blocks += page->in_use;
93
}
94
spin_unlock_irq(&pool->lock);
95
96
/* per-pool info, no real statistics yet */
97
temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
98
pool->name, blocks,
99
pages * (pool->allocation / pool->size),
100
pool->size, pages);
101
size -= temp;
102
next += temp;
103
}
104
mutex_unlock(&pools_lock);
105
106
return PAGE_SIZE - size;
107
}
108
109
static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
110
111
/**
112
* dma_pool_create - Creates a pool of consistent memory blocks, for dma.
113
* @name: name of pool, for diagnostics
114
* @dev: device that will be doing the DMA
115
* @size: size of the blocks in this pool.
116
* @align: alignment requirement for blocks; must be a power of two
117
* @boundary: returned blocks won't cross this power of two boundary
118
* Context: !in_interrupt()
119
*
120
* Returns a dma allocation pool with the requested characteristics, or
121
* null if one can't be created. Given one of these pools, dma_pool_alloc()
122
* may be used to allocate memory. Such memory will all have "consistent"
123
* DMA mappings, accessible by the device and its driver without using
124
* cache flushing primitives. The actual size of blocks allocated may be
125
* larger than requested because of alignment.
126
*
127
* If @boundary is nonzero, objects returned from dma_pool_alloc() won't
128
* cross that size boundary. This is useful for devices which have
129
* addressing restrictions on individual DMA transfers, such as not crossing
130
* boundaries of 4KBytes.
131
*/
132
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
133
size_t size, size_t align, size_t boundary)
134
{
135
struct dma_pool *retval;
136
size_t allocation;
137
138
if (align == 0) {
139
align = 1;
140
} else if (align & (align - 1)) {
141
return NULL;
142
}
143
144
if (size == 0) {
145
return NULL;
146
} else if (size < 4) {
147
size = 4;
148
}
149
150
if ((size % align) != 0)
151
size = ALIGN(size, align);
152
153
allocation = max_t(size_t, size, PAGE_SIZE);
154
155
if (!boundary) {
156
boundary = allocation;
157
} else if ((boundary < size) || (boundary & (boundary - 1))) {
158
return NULL;
159
}
160
161
retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
162
if (!retval)
163
return retval;
164
165
strlcpy(retval->name, name, sizeof(retval->name));
166
167
retval->dev = dev;
168
169
INIT_LIST_HEAD(&retval->page_list);
170
spin_lock_init(&retval->lock);
171
retval->size = size;
172
retval->boundary = boundary;
173
retval->allocation = allocation;
174
init_waitqueue_head(&retval->waitq);
175
176
if (dev) {
177
int ret;
178
179
mutex_lock(&pools_lock);
180
if (list_empty(&dev->dma_pools))
181
ret = device_create_file(dev, &dev_attr_pools);
182
else
183
ret = 0;
184
/* note: not currently insisting "name" be unique */
185
if (!ret)
186
list_add(&retval->pools, &dev->dma_pools);
187
else {
188
kfree(retval);
189
retval = NULL;
190
}
191
mutex_unlock(&pools_lock);
192
} else
193
INIT_LIST_HEAD(&retval->pools);
194
195
return retval;
196
}
197
EXPORT_SYMBOL(dma_pool_create);
198
199
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
200
{
201
unsigned int offset = 0;
202
unsigned int next_boundary = pool->boundary;
203
204
do {
205
unsigned int next = offset + pool->size;
206
if (unlikely((next + pool->size) >= next_boundary)) {
207
next = next_boundary;
208
next_boundary += pool->boundary;
209
}
210
*(int *)(page->vaddr + offset) = next;
211
offset = next;
212
} while (offset < pool->allocation);
213
}
214
215
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
216
{
217
struct dma_page *page;
218
219
page = kmalloc(sizeof(*page), mem_flags);
220
if (!page)
221
return NULL;
222
page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
223
&page->dma, mem_flags);
224
if (page->vaddr) {
225
#ifdef DMAPOOL_DEBUG
226
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
227
#endif
228
pool_initialise_page(pool, page);
229
list_add(&page->page_list, &pool->page_list);
230
page->in_use = 0;
231
page->offset = 0;
232
} else {
233
kfree(page);
234
page = NULL;
235
}
236
return page;
237
}
238
239
static inline int is_page_busy(struct dma_page *page)
240
{
241
return page->in_use != 0;
242
}
243
244
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
245
{
246
dma_addr_t dma = page->dma;
247
248
#ifdef DMAPOOL_DEBUG
249
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
250
#endif
251
dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
252
list_del(&page->page_list);
253
kfree(page);
254
}
255
256
/**
257
* dma_pool_destroy - destroys a pool of dma memory blocks.
258
* @pool: dma pool that will be destroyed
259
* Context: !in_interrupt()
260
*
261
* Caller guarantees that no more memory from the pool is in use,
262
* and that nothing will try to use the pool after this call.
263
*/
264
void dma_pool_destroy(struct dma_pool *pool)
265
{
266
mutex_lock(&pools_lock);
267
list_del(&pool->pools);
268
if (pool->dev && list_empty(&pool->dev->dma_pools))
269
device_remove_file(pool->dev, &dev_attr_pools);
270
mutex_unlock(&pools_lock);
271
272
while (!list_empty(&pool->page_list)) {
273
struct dma_page *page;
274
page = list_entry(pool->page_list.next,
275
struct dma_page, page_list);
276
if (is_page_busy(page)) {
277
if (pool->dev)
278
dev_err(pool->dev,
279
"dma_pool_destroy %s, %p busy\n",
280
pool->name, page->vaddr);
281
else
282
printk(KERN_ERR
283
"dma_pool_destroy %s, %p busy\n",
284
pool->name, page->vaddr);
285
/* leak the still-in-use consistent memory */
286
list_del(&page->page_list);
287
kfree(page);
288
} else
289
pool_free_page(pool, page);
290
}
291
292
kfree(pool);
293
}
294
EXPORT_SYMBOL(dma_pool_destroy);
295
296
/**
297
* dma_pool_alloc - get a block of consistent memory
298
* @pool: dma pool that will produce the block
299
* @mem_flags: GFP_* bitmask
300
* @handle: pointer to dma address of block
301
*
302
* This returns the kernel virtual address of a currently unused block,
303
* and reports its dma address through the handle.
304
* If such a memory block can't be allocated, %NULL is returned.
305
*/
306
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
307
dma_addr_t *handle)
308
{
309
unsigned long flags;
310
struct dma_page *page;
311
size_t offset;
312
void *retval;
313
314
might_sleep_if(mem_flags & __GFP_WAIT);
315
316
spin_lock_irqsave(&pool->lock, flags);
317
restart:
318
list_for_each_entry(page, &pool->page_list, page_list) {
319
if (page->offset < pool->allocation)
320
goto ready;
321
}
322
page = pool_alloc_page(pool, GFP_ATOMIC);
323
if (!page) {
324
if (mem_flags & __GFP_WAIT) {
325
DECLARE_WAITQUEUE(wait, current);
326
327
__set_current_state(TASK_UNINTERRUPTIBLE);
328
__add_wait_queue(&pool->waitq, &wait);
329
spin_unlock_irqrestore(&pool->lock, flags);
330
331
schedule_timeout(POOL_TIMEOUT_JIFFIES);
332
333
spin_lock_irqsave(&pool->lock, flags);
334
__remove_wait_queue(&pool->waitq, &wait);
335
goto restart;
336
}
337
retval = NULL;
338
goto done;
339
}
340
341
ready:
342
page->in_use++;
343
offset = page->offset;
344
page->offset = *(int *)(page->vaddr + offset);
345
retval = offset + page->vaddr;
346
*handle = offset + page->dma;
347
#ifdef DMAPOOL_DEBUG
348
memset(retval, POOL_POISON_ALLOCATED, pool->size);
349
#endif
350
done:
351
spin_unlock_irqrestore(&pool->lock, flags);
352
return retval;
353
}
354
EXPORT_SYMBOL(dma_pool_alloc);
355
356
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
357
{
358
struct dma_page *page;
359
360
list_for_each_entry(page, &pool->page_list, page_list) {
361
if (dma < page->dma)
362
continue;
363
if (dma < (page->dma + pool->allocation))
364
return page;
365
}
366
return NULL;
367
}
368
369
/**
370
* dma_pool_free - put block back into dma pool
371
* @pool: the dma pool holding the block
372
* @vaddr: virtual address of block
373
* @dma: dma address of block
374
*
375
* Caller promises neither device nor driver will again touch this block
376
* unless it is first re-allocated.
377
*/
378
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
379
{
380
struct dma_page *page;
381
unsigned long flags;
382
unsigned int offset;
383
384
spin_lock_irqsave(&pool->lock, flags);
385
page = pool_find_page(pool, dma);
386
if (!page) {
387
spin_unlock_irqrestore(&pool->lock, flags);
388
if (pool->dev)
389
dev_err(pool->dev,
390
"dma_pool_free %s, %p/%lx (bad dma)\n",
391
pool->name, vaddr, (unsigned long)dma);
392
else
393
printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
394
pool->name, vaddr, (unsigned long)dma);
395
return;
396
}
397
398
offset = vaddr - page->vaddr;
399
#ifdef DMAPOOL_DEBUG
400
if ((dma - page->dma) != offset) {
401
spin_unlock_irqrestore(&pool->lock, flags);
402
if (pool->dev)
403
dev_err(pool->dev,
404
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
405
pool->name, vaddr, (unsigned long long)dma);
406
else
407
printk(KERN_ERR
408
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
409
pool->name, vaddr, (unsigned long long)dma);
410
return;
411
}
412
{
413
unsigned int chain = page->offset;
414
while (chain < pool->allocation) {
415
if (chain != offset) {
416
chain = *(int *)(page->vaddr + chain);
417
continue;
418
}
419
spin_unlock_irqrestore(&pool->lock, flags);
420
if (pool->dev)
421
dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
422
"already free\n", pool->name,
423
(unsigned long long)dma);
424
else
425
printk(KERN_ERR "dma_pool_free %s, dma %Lx "
426
"already free\n", pool->name,
427
(unsigned long long)dma);
428
return;
429
}
430
}
431
memset(vaddr, POOL_POISON_FREED, pool->size);
432
#endif
433
434
page->in_use--;
435
*(int *)vaddr = page->offset;
436
page->offset = offset;
437
if (waitqueue_active(&pool->waitq))
438
wake_up_locked(&pool->waitq);
439
/*
440
* Resist a temptation to do
441
* if (!is_page_busy(page)) pool_free_page(pool, page);
442
* Better have a few empty pages hang around.
443
*/
444
spin_unlock_irqrestore(&pool->lock, flags);
445
}
446
EXPORT_SYMBOL(dma_pool_free);
447
448
/*
449
* Managed DMA pool
450
*/
451
static void dmam_pool_release(struct device *dev, void *res)
452
{
453
struct dma_pool *pool = *(struct dma_pool **)res;
454
455
dma_pool_destroy(pool);
456
}
457
458
static int dmam_pool_match(struct device *dev, void *res, void *match_data)
459
{
460
return *(struct dma_pool **)res == match_data;
461
}
462
463
/**
464
* dmam_pool_create - Managed dma_pool_create()
465
* @name: name of pool, for diagnostics
466
* @dev: device that will be doing the DMA
467
* @size: size of the blocks in this pool.
468
* @align: alignment requirement for blocks; must be a power of two
469
* @allocation: returned blocks won't cross this boundary (or zero)
470
*
471
* Managed dma_pool_create(). DMA pool created with this function is
472
* automatically destroyed on driver detach.
473
*/
474
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
475
size_t size, size_t align, size_t allocation)
476
{
477
struct dma_pool **ptr, *pool;
478
479
ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
480
if (!ptr)
481
return NULL;
482
483
pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
484
if (pool)
485
devres_add(dev, ptr);
486
else
487
devres_free(ptr);
488
489
return pool;
490
}
491
EXPORT_SYMBOL(dmam_pool_create);
492
493
/**
494
* dmam_pool_destroy - Managed dma_pool_destroy()
495
* @pool: dma pool that will be destroyed
496
*
497
* Managed dma_pool_destroy().
498
*/
499
void dmam_pool_destroy(struct dma_pool *pool)
500
{
501
struct device *dev = pool->dev;
502
503
dma_pool_destroy(pool);
504
WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
505
}
506
EXPORT_SYMBOL(dmam_pool_destroy);
507
508