Path: blob/master/arch/cris/include/asm/dma-mapping.h
15126 views
/* DMA mapping. Nothing tricky here, just virt_to_phys */12#ifndef _ASM_CRIS_DMA_MAPPING_H3#define _ASM_CRIS_DMA_MAPPING_H45#include <linux/mm.h>6#include <linux/kernel.h>78#include <asm/cache.h>9#include <asm/io.h>10#include <asm/scatterlist.h>1112#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)1415#ifdef CONFIG_PCI16#include <asm-generic/dma-coherent.h>1718void *dma_alloc_coherent(struct device *dev, size_t size,19dma_addr_t *dma_handle, gfp_t flag);2021void dma_free_coherent(struct device *dev, size_t size,22void *vaddr, dma_addr_t dma_handle);23#else24static inline void *25dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,26gfp_t flag)27{28BUG();29return NULL;30}3132static inline void33dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,34dma_addr_t dma_handle)35{36BUG();37}38#endif39static inline dma_addr_t40dma_map_single(struct device *dev, void *ptr, size_t size,41enum dma_data_direction direction)42{43BUG_ON(direction == DMA_NONE);44return virt_to_phys(ptr);45}4647static inline void48dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,49enum dma_data_direction direction)50{51BUG_ON(direction == DMA_NONE);52}5354static inline int55dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,56enum dma_data_direction direction)57{58printk("Map sg\n");59return nents;60}6162static inline dma_addr_t63dma_map_page(struct device *dev, struct page *page, unsigned long offset,64size_t size, enum dma_data_direction direction)65{66BUG_ON(direction == DMA_NONE);67return page_to_phys(page) + offset;68}6970static inline void71dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,72enum dma_data_direction direction)73{74BUG_ON(direction == DMA_NONE);75}767778static inline void79dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,80enum dma_data_direction direction)81{82BUG_ON(direction == DMA_NONE);83}8485static inline void86dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,87enum dma_data_direction direction)88{89}9091static inline void92dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,93enum dma_data_direction direction)94{95}9697static inline void98dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,99unsigned long offset, size_t size,100enum dma_data_direction direction)101{102}103104static inline void105dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,106unsigned long offset, size_t size,107enum dma_data_direction direction)108{109}110111static inline void112dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,113enum dma_data_direction direction)114{115}116117static inline void118dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,119enum dma_data_direction direction)120{121}122123static inline int124dma_mapping_error(struct device *dev, dma_addr_t dma_addr)125{126return 0;127}128129static inline int130dma_supported(struct device *dev, u64 mask)131{132/*133* we fall back to GFP_DMA when the mask isn't all 1s,134* so we can't guarantee allocations that must be135* within a tighter range than GFP_DMA..136*/137if(mask < 0x00ffffff)138return 0;139140return 1;141}142143static inline int144dma_set_mask(struct device *dev, u64 mask)145{146if(!dev->dma_mask || !dma_supported(dev, mask))147return -EIO;148149*dev->dma_mask = mask;150151return 0;152}153154static inline void155dma_cache_sync(struct device *dev, void *vaddr, size_t size,156enum dma_data_direction direction)157{158}159160161#endif162163164