Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/arm/include/asm/dma-mapping.h
17355 views
1
#ifndef ASMARM_DMA_MAPPING_H
2
#define ASMARM_DMA_MAPPING_H
3
4
#ifdef __KERNEL__
5
6
#include <linux/mm_types.h>
7
#include <linux/scatterlist.h>
8
#include <linux/dma-debug.h>
9
10
#include <asm-generic/dma-coherent.h>
11
#include <asm/memory.h>
12
13
#ifdef __arch_page_to_dma
14
#error Please update to __arch_pfn_to_dma
15
#endif
16
17
/*
18
* dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
19
* functions used internally by the DMA-mapping API to provide DMA
20
* addresses. They must not be used by drivers.
21
*/
22
#ifndef __arch_pfn_to_dma
23
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
24
{
25
return (dma_addr_t)__pfn_to_bus(pfn);
26
}
27
28
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
29
{
30
return __bus_to_pfn(addr);
31
}
32
33
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
34
{
35
return (void *)__bus_to_virt(addr);
36
}
37
38
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
39
{
40
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
41
}
42
#else
43
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
44
{
45
return __arch_pfn_to_dma(dev, pfn);
46
}
47
48
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
49
{
50
return __arch_dma_to_pfn(dev, addr);
51
}
52
53
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
54
{
55
return __arch_dma_to_virt(dev, addr);
56
}
57
58
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
59
{
60
return __arch_virt_to_dma(dev, addr);
61
}
62
#endif
63
64
/*
65
* The DMA API is built upon the notion of "buffer ownership". A buffer
66
* is either exclusively owned by the CPU (and therefore may be accessed
67
* by it) or exclusively owned by the DMA device. These helper functions
68
* represent the transitions between these two ownership states.
69
*
70
* Note, however, that on later ARMs, this notion does not work due to
71
* speculative prefetches. We model our approach on the assumption that
72
* the CPU does do speculative prefetches, which means we clean caches
73
* before transfers and delay cache invalidation until transfer completion.
74
*
75
* Private support functions: these are not part of the API and are
76
* liable to change. Drivers must not use these.
77
*/
78
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
79
enum dma_data_direction dir)
80
{
81
extern void ___dma_single_cpu_to_dev(const void *, size_t,
82
enum dma_data_direction);
83
84
if (!arch_is_coherent())
85
___dma_single_cpu_to_dev(kaddr, size, dir);
86
}
87
88
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
89
enum dma_data_direction dir)
90
{
91
extern void ___dma_single_dev_to_cpu(const void *, size_t,
92
enum dma_data_direction);
93
94
if (!arch_is_coherent())
95
___dma_single_dev_to_cpu(kaddr, size, dir);
96
}
97
98
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
99
size_t size, enum dma_data_direction dir)
100
{
101
extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
102
size_t, enum dma_data_direction);
103
104
if (!arch_is_coherent())
105
___dma_page_cpu_to_dev(page, off, size, dir);
106
}
107
108
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
109
size_t size, enum dma_data_direction dir)
110
{
111
extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
112
size_t, enum dma_data_direction);
113
114
if (!arch_is_coherent())
115
___dma_page_dev_to_cpu(page, off, size, dir);
116
}
117
118
/*
119
* Return whether the given device DMA address mask can be supported
120
* properly. For example, if your device can only drive the low 24-bits
121
* during bus mastering, then you would pass 0x00ffffff as the mask
122
* to this function.
123
*
124
* FIXME: This should really be a platform specific issue - we should
125
* return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
126
*/
127
static inline int dma_supported(struct device *dev, u64 mask)
128
{
129
if (mask < ISA_DMA_THRESHOLD)
130
return 0;
131
return 1;
132
}
133
134
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
135
{
136
#ifdef CONFIG_DMABOUNCE
137
if (dev->archdata.dmabounce) {
138
if (dma_mask >= ISA_DMA_THRESHOLD)
139
return 0;
140
else
141
return -EIO;
142
}
143
#endif
144
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
145
return -EIO;
146
147
*dev->dma_mask = dma_mask;
148
149
return 0;
150
}
151
152
/*
153
* DMA errors are defined by all-bits-set in the DMA address.
154
*/
155
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
156
{
157
return dma_addr == ~0;
158
}
159
160
/*
161
* Dummy noncoherent implementation. We don't provide a dma_cache_sync
162
* function so drivers using this API are highlighted with build warnings.
163
*/
164
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
165
dma_addr_t *handle, gfp_t gfp)
166
{
167
return NULL;
168
}
169
170
static inline void dma_free_noncoherent(struct device *dev, size_t size,
171
void *cpu_addr, dma_addr_t handle)
172
{
173
}
174
175
/**
176
* dma_alloc_coherent - allocate consistent memory for DMA
177
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
178
* @size: required memory size
179
* @handle: bus-specific DMA address
180
*
181
* Allocate some uncached, unbuffered memory for a device for
182
* performing DMA. This function allocates pages, and will
183
* return the CPU-viewed address, and sets @handle to be the
184
* device-viewed address.
185
*/
186
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
187
188
/**
189
* dma_free_coherent - free memory allocated by dma_alloc_coherent
190
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
191
* @size: size of memory originally requested in dma_alloc_coherent
192
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
193
* @handle: device-view address returned from dma_alloc_coherent
194
*
195
* Free (and unmap) a DMA buffer previously allocated by
196
* dma_alloc_coherent().
197
*
198
* References to memory and mappings associated with cpu_addr/handle
199
* during and after this call executing are illegal.
200
*/
201
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
202
203
/**
204
* dma_mmap_coherent - map a coherent DMA allocation into user space
205
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
206
* @vma: vm_area_struct describing requested user mapping
207
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
208
* @handle: device-view address returned from dma_alloc_coherent
209
* @size: size of memory originally requested in dma_alloc_coherent
210
*
211
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
212
* into user space. The coherent DMA buffer must not be freed by the
213
* driver until the user space mapping has been released.
214
*/
215
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
216
void *, dma_addr_t, size_t);
217
218
219
/**
220
* dma_alloc_writecombine - allocate writecombining memory for DMA
221
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
222
* @size: required memory size
223
* @handle: bus-specific DMA address
224
*
225
* Allocate some uncached, buffered memory for a device for
226
* performing DMA. This function allocates pages, and will
227
* return the CPU-viewed address, and sets @handle to be the
228
* device-viewed address.
229
*/
230
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
231
gfp_t);
232
233
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
234
dma_free_coherent(dev,size,cpu_addr,handle)
235
236
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
237
void *, dma_addr_t, size_t);
238
239
240
#ifdef CONFIG_DMABOUNCE
241
/*
242
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
243
* and utilize bounce buffers as needed to work around limited DMA windows.
244
*
245
* On the SA-1111, a bug limits DMA to only certain regions of RAM.
246
* On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
247
* On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
248
*
249
* The following are helper functions used by the dmabounce subystem
250
*
251
*/
252
253
/**
254
* dmabounce_register_dev
255
*
256
* @dev: valid struct device pointer
257
* @small_buf_size: size of buffers to use with small buffer pool
258
* @large_buf_size: size of buffers to use with large buffer pool (can be 0)
259
*
260
* This function should be called by low-level platform code to register
261
* a device as requireing DMA buffer bouncing. The function will allocate
262
* appropriate DMA pools for the device.
263
*
264
*/
265
extern int dmabounce_register_dev(struct device *, unsigned long,
266
unsigned long);
267
268
/**
269
* dmabounce_unregister_dev
270
*
271
* @dev: valid struct device pointer
272
*
273
* This function should be called by low-level platform code when device
274
* that was previously registered with dmabounce_register_dev is removed
275
* from the system.
276
*
277
*/
278
extern void dmabounce_unregister_dev(struct device *);
279
280
/**
281
* dma_needs_bounce
282
*
283
* @dev: valid struct device pointer
284
* @dma_handle: dma_handle of unbounced buffer
285
* @size: size of region being mapped
286
*
287
* Platforms that utilize the dmabounce mechanism must implement
288
* this function.
289
*
290
* The dmabounce routines call this function whenever a dma-mapping
291
* is requested to determine whether a given buffer needs to be bounced
292
* or not. The function must return 0 if the buffer is OK for
293
* DMA access and 1 if the buffer needs to be bounced.
294
*
295
*/
296
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
297
298
/*
299
* The DMA API, implemented by dmabounce.c. See below for descriptions.
300
*/
301
extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
302
enum dma_data_direction);
303
extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
304
enum dma_data_direction);
305
extern dma_addr_t __dma_map_page(struct device *, struct page *,
306
unsigned long, size_t, enum dma_data_direction);
307
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
308
enum dma_data_direction);
309
310
/*
311
* Private functions
312
*/
313
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
314
size_t, enum dma_data_direction);
315
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
316
size_t, enum dma_data_direction);
317
#else
318
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
319
unsigned long offset, size_t size, enum dma_data_direction dir)
320
{
321
return 1;
322
}
323
324
static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
325
unsigned long offset, size_t size, enum dma_data_direction dir)
326
{
327
return 1;
328
}
329
330
331
static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
332
size_t size, enum dma_data_direction dir)
333
{
334
__dma_single_cpu_to_dev(cpu_addr, size, dir);
335
return virt_to_dma(dev, cpu_addr);
336
}
337
338
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
339
unsigned long offset, size_t size, enum dma_data_direction dir)
340
{
341
__dma_page_cpu_to_dev(page, offset, size, dir);
342
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
343
}
344
345
static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
346
size_t size, enum dma_data_direction dir)
347
{
348
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
349
}
350
351
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
352
size_t size, enum dma_data_direction dir)
353
{
354
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
355
handle & ~PAGE_MASK, size, dir);
356
}
357
#endif /* CONFIG_DMABOUNCE */
358
359
/**
360
* dma_map_single - map a single buffer for streaming DMA
361
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
362
* @cpu_addr: CPU direct mapped address of buffer
363
* @size: size of buffer to map
364
* @dir: DMA transfer direction
365
*
366
* Ensure that any data held in the cache is appropriately discarded
367
* or written back.
368
*
369
* The device owns this memory once this call has completed. The CPU
370
* can regain ownership by calling dma_unmap_single() or
371
* dma_sync_single_for_cpu().
372
*/
373
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
374
size_t size, enum dma_data_direction dir)
375
{
376
dma_addr_t addr;
377
378
BUG_ON(!valid_dma_direction(dir));
379
380
addr = __dma_map_single(dev, cpu_addr, size, dir);
381
debug_dma_map_page(dev, virt_to_page(cpu_addr),
382
(unsigned long)cpu_addr & ~PAGE_MASK, size,
383
dir, addr, true);
384
385
return addr;
386
}
387
388
/**
389
* dma_map_page - map a portion of a page for streaming DMA
390
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
391
* @page: page that buffer resides in
392
* @offset: offset into page for start of buffer
393
* @size: size of buffer to map
394
* @dir: DMA transfer direction
395
*
396
* Ensure that any data held in the cache is appropriately discarded
397
* or written back.
398
*
399
* The device owns this memory once this call has completed. The CPU
400
* can regain ownership by calling dma_unmap_page().
401
*/
402
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
403
unsigned long offset, size_t size, enum dma_data_direction dir)
404
{
405
dma_addr_t addr;
406
407
BUG_ON(!valid_dma_direction(dir));
408
409
addr = __dma_map_page(dev, page, offset, size, dir);
410
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
411
412
return addr;
413
}
414
415
/**
416
* dma_unmap_single - unmap a single buffer previously mapped
417
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
418
* @handle: DMA address of buffer
419
* @size: size of buffer (same as passed to dma_map_single)
420
* @dir: DMA transfer direction (same as passed to dma_map_single)
421
*
422
* Unmap a single streaming mode DMA translation. The handle and size
423
* must match what was provided in the previous dma_map_single() call.
424
* All other usages are undefined.
425
*
426
* After this call, reads by the CPU to the buffer are guaranteed to see
427
* whatever the device wrote there.
428
*/
429
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
430
size_t size, enum dma_data_direction dir)
431
{
432
debug_dma_unmap_page(dev, handle, size, dir, true);
433
__dma_unmap_single(dev, handle, size, dir);
434
}
435
436
/**
437
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
438
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
439
* @handle: DMA address of buffer
440
* @size: size of buffer (same as passed to dma_map_page)
441
* @dir: DMA transfer direction (same as passed to dma_map_page)
442
*
443
* Unmap a page streaming mode DMA translation. The handle and size
444
* must match what was provided in the previous dma_map_page() call.
445
* All other usages are undefined.
446
*
447
* After this call, reads by the CPU to the buffer are guaranteed to see
448
* whatever the device wrote there.
449
*/
450
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
451
size_t size, enum dma_data_direction dir)
452
{
453
debug_dma_unmap_page(dev, handle, size, dir, false);
454
__dma_unmap_page(dev, handle, size, dir);
455
}
456
457
/**
458
* dma_sync_single_range_for_cpu
459
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
460
* @handle: DMA address of buffer
461
* @offset: offset of region to start sync
462
* @size: size of region to sync
463
* @dir: DMA transfer direction (same as passed to dma_map_single)
464
*
465
* Make physical memory consistent for a single streaming mode DMA
466
* translation after a transfer.
467
*
468
* If you perform a dma_map_single() but wish to interrogate the
469
* buffer using the cpu, yet do not wish to teardown the PCI dma
470
* mapping, you must call this function before doing so. At the
471
* next point you give the PCI dma address back to the card, you
472
* must first the perform a dma_sync_for_device, and then the
473
* device again owns the buffer.
474
*/
475
static inline void dma_sync_single_range_for_cpu(struct device *dev,
476
dma_addr_t handle, unsigned long offset, size_t size,
477
enum dma_data_direction dir)
478
{
479
BUG_ON(!valid_dma_direction(dir));
480
481
debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
482
483
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
484
return;
485
486
__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
487
}
488
489
static inline void dma_sync_single_range_for_device(struct device *dev,
490
dma_addr_t handle, unsigned long offset, size_t size,
491
enum dma_data_direction dir)
492
{
493
BUG_ON(!valid_dma_direction(dir));
494
495
debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
496
497
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
498
return;
499
500
__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
501
}
502
503
static inline void dma_sync_single_for_cpu(struct device *dev,
504
dma_addr_t handle, size_t size, enum dma_data_direction dir)
505
{
506
dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
507
}
508
509
static inline void dma_sync_single_for_device(struct device *dev,
510
dma_addr_t handle, size_t size, enum dma_data_direction dir)
511
{
512
dma_sync_single_range_for_device(dev, handle, 0, size, dir);
513
}
514
515
/*
516
* The scatter list versions of the above methods.
517
*/
518
extern int dma_map_sg(struct device *, struct scatterlist *, int,
519
enum dma_data_direction);
520
extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
521
enum dma_data_direction);
522
extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
523
enum dma_data_direction);
524
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
525
enum dma_data_direction);
526
527
528
#endif /* __KERNEL__ */
529
#endif
530
531