Path: blob/master/arch/avr32/include/asm/dma-mapping.h
10818 views
#ifndef __ASM_AVR32_DMA_MAPPING_H1#define __ASM_AVR32_DMA_MAPPING_H23#include <linux/mm.h>4#include <linux/device.h>5#include <linux/scatterlist.h>6#include <asm/processor.h>7#include <asm/cacheflush.h>8#include <asm/io.h>910extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,11int direction);1213/*14* Return whether the given device DMA address mask can be supported15* properly. For example, if your device can only drive the low 24-bits16* during bus mastering, then you would pass 0x00ffffff as the mask17* to this function.18*/19static inline int dma_supported(struct device *dev, u64 mask)20{21/* Fix when needed. I really don't know of any limitations */22return 1;23}2425static inline int dma_set_mask(struct device *dev, u64 dma_mask)26{27if (!dev->dma_mask || !dma_supported(dev, dma_mask))28return -EIO;2930*dev->dma_mask = dma_mask;31return 0;32}3334/*35* dma_map_single can't fail as it is implemented now.36*/37static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)38{39return 0;40}4142/**43* dma_alloc_coherent - allocate consistent memory for DMA44* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices45* @size: required memory size46* @handle: bus-specific DMA address47*48* Allocate some uncached, unbuffered memory for a device for49* performing DMA. This function allocates pages, and will50* return the CPU-viewed address, and sets @handle to be the51* device-viewed address.52*/53extern void *dma_alloc_coherent(struct device *dev, size_t size,54dma_addr_t *handle, gfp_t gfp);5556/**57* dma_free_coherent - free memory allocated by dma_alloc_coherent58* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices59* @size: size of memory originally requested in dma_alloc_coherent60* @cpu_addr: CPU-view address returned from dma_alloc_coherent61* @handle: device-view address returned from dma_alloc_coherent62*63* Free (and unmap) a DMA buffer previously allocated by64* dma_alloc_coherent().65*66* References to memory and mappings associated with cpu_addr/handle67* during and after this call executing are illegal.68*/69extern void dma_free_coherent(struct device *dev, size_t size,70void *cpu_addr, dma_addr_t handle);7172/**73* dma_alloc_writecombine - allocate write-combining memory for DMA74* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices75* @size: required memory size76* @handle: bus-specific DMA address77*78* Allocate some uncached, buffered memory for a device for79* performing DMA. This function allocates pages, and will80* return the CPU-viewed address, and sets @handle to be the81* device-viewed address.82*/83extern void *dma_alloc_writecombine(struct device *dev, size_t size,84dma_addr_t *handle, gfp_t gfp);8586/**87* dma_free_coherent - free memory allocated by dma_alloc_writecombine88* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices89* @size: size of memory originally requested in dma_alloc_writecombine90* @cpu_addr: CPU-view address returned from dma_alloc_writecombine91* @handle: device-view address returned from dma_alloc_writecombine92*93* Free (and unmap) a DMA buffer previously allocated by94* dma_alloc_writecombine().95*96* References to memory and mappings associated with cpu_addr/handle97* during and after this call executing are illegal.98*/99extern void dma_free_writecombine(struct device *dev, size_t size,100void *cpu_addr, dma_addr_t handle);101102/**103* dma_map_single - map a single buffer for streaming DMA104* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices105* @cpu_addr: CPU direct mapped address of buffer106* @size: size of buffer to map107* @dir: DMA transfer direction108*109* Ensure that any data held in the cache is appropriately discarded110* or written back.111*112* The device owns this memory once this call has completed. The CPU113* can regain ownership by calling dma_unmap_single() or dma_sync_single().114*/115static inline dma_addr_t116dma_map_single(struct device *dev, void *cpu_addr, size_t size,117enum dma_data_direction direction)118{119dma_cache_sync(dev, cpu_addr, size, direction);120return virt_to_bus(cpu_addr);121}122123/**124* dma_unmap_single - unmap a single buffer previously mapped125* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices126* @handle: DMA address of buffer127* @size: size of buffer to map128* @dir: DMA transfer direction129*130* Unmap a single streaming mode DMA translation. The handle and size131* must match what was provided in the previous dma_map_single() call.132* All other usages are undefined.133*134* After this call, reads by the CPU to the buffer are guaranteed to see135* whatever the device wrote there.136*/137static inline void138dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,139enum dma_data_direction direction)140{141142}143144/**145* dma_map_page - map a portion of a page for streaming DMA146* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices147* @page: page that buffer resides in148* @offset: offset into page for start of buffer149* @size: size of buffer to map150* @dir: DMA transfer direction151*152* Ensure that any data held in the cache is appropriately discarded153* or written back.154*155* The device owns this memory once this call has completed. The CPU156* can regain ownership by calling dma_unmap_page() or dma_sync_single().157*/158static inline dma_addr_t159dma_map_page(struct device *dev, struct page *page,160unsigned long offset, size_t size,161enum dma_data_direction direction)162{163return dma_map_single(dev, page_address(page) + offset,164size, direction);165}166167/**168* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()169* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices170* @handle: DMA address of buffer171* @size: size of buffer to map172* @dir: DMA transfer direction173*174* Unmap a single streaming mode DMA translation. The handle and size175* must match what was provided in the previous dma_map_single() call.176* All other usages are undefined.177*178* After this call, reads by the CPU to the buffer are guaranteed to see179* whatever the device wrote there.180*/181static inline void182dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,183enum dma_data_direction direction)184{185dma_unmap_single(dev, dma_address, size, direction);186}187188/**189* dma_map_sg - map a set of SG buffers for streaming mode DMA190* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices191* @sg: list of buffers192* @nents: number of buffers to map193* @dir: DMA transfer direction194*195* Map a set of buffers described by scatterlist in streaming196* mode for DMA. This is the scatter-gather version of the197* above pci_map_single interface. Here the scatter gather list198* elements are each tagged with the appropriate dma address199* and length. They are obtained via sg_dma_{address,length}(SG).200*201* NOTE: An implementation may be able to use a smaller number of202* DMA address/length pairs than there are SG table elements.203* (for example via virtual mapping capabilities)204* The routine returns the number of addr/length pairs actually205* used, at most nents.206*207* Device ownership issues as mentioned above for pci_map_single are208* the same here.209*/210static inline int211dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,212enum dma_data_direction direction)213{214int i;215216for (i = 0; i < nents; i++) {217char *virt;218219sg[i].dma_address = page_to_bus(sg_page(&sg[i])) + sg[i].offset;220virt = sg_virt(&sg[i]);221dma_cache_sync(dev, virt, sg[i].length, direction);222}223224return nents;225}226227/**228* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg229* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices230* @sg: list of buffers231* @nents: number of buffers to map232* @dir: DMA transfer direction233*234* Unmap a set of streaming mode DMA translations.235* Again, CPU read rules concerning calls here are the same as for236* pci_unmap_single() above.237*/238static inline void239dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,240enum dma_data_direction direction)241{242243}244245/**246* dma_sync_single_for_cpu247* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices248* @handle: DMA address of buffer249* @size: size of buffer to map250* @dir: DMA transfer direction251*252* Make physical memory consistent for a single streaming mode DMA253* translation after a transfer.254*255* If you perform a dma_map_single() but wish to interrogate the256* buffer using the cpu, yet do not wish to teardown the DMA mapping,257* you must call this function before doing so. At the next point you258* give the DMA address back to the card, you must first perform a259* dma_sync_single_for_device, and then the device again owns the260* buffer.261*/262static inline void263dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,264size_t size, enum dma_data_direction direction)265{266/*267* No need to do anything since the CPU isn't supposed to268* touch this memory after we flushed it at mapping- or269* sync-for-device time.270*/271}272273static inline void274dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,275size_t size, enum dma_data_direction direction)276{277dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);278}279280static inline void281dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,282unsigned long offset, size_t size,283enum dma_data_direction direction)284{285/* just sync everything, that's all the pci API can do */286dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);287}288289static inline void290dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,291unsigned long offset, size_t size,292enum dma_data_direction direction)293{294/* just sync everything, that's all the pci API can do */295dma_sync_single_for_device(dev, dma_handle, offset+size, direction);296}297298/**299* dma_sync_sg_for_cpu300* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices301* @sg: list of buffers302* @nents: number of buffers to map303* @dir: DMA transfer direction304*305* Make physical memory consistent for a set of streaming306* mode DMA translations after a transfer.307*308* The same as dma_sync_single_for_* but for a scatter-gather list,309* same rules and usage.310*/311static inline void312dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,313int nents, enum dma_data_direction direction)314{315/*316* No need to do anything since the CPU isn't supposed to317* touch this memory after we flushed it at mapping- or318* sync-for-device time.319*/320}321322static inline void323dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,324int nents, enum dma_data_direction direction)325{326int i;327328for (i = 0; i < nents; i++) {329dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, direction);330}331}332333/* Now for the API extensions over the pci_ one */334335#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)336#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)337338#endif /* __ASM_AVR32_DMA_MAPPING_H */339340341