Path: blob/master/arch/xtensa/include/asm/dma-mapping.h
15126 views
/*1* include/asm-xtensa/dma-mapping.h2*3* This file is subject to the terms and conditions of the GNU General Public4* License. See the file "COPYING" in the main directory of this archive5* for more details.6*7* Copyright (C) 2003 - 2005 Tensilica Inc.8*/910#ifndef _XTENSA_DMA_MAPPING_H11#define _XTENSA_DMA_MAPPING_H1213#include <asm/cache.h>14#include <asm/io.h>15#include <linux/mm.h>16#include <linux/scatterlist.h>1718/*19* DMA-consistent mapping functions.20*/2122extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);23extern void consistent_free(void*, size_t, dma_addr_t);24extern void consistent_sync(void*, size_t, int);2526#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)27#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)2829void *dma_alloc_coherent(struct device *dev, size_t size,30dma_addr_t *dma_handle, gfp_t flag);3132void dma_free_coherent(struct device *dev, size_t size,33void *vaddr, dma_addr_t dma_handle);3435static inline dma_addr_t36dma_map_single(struct device *dev, void *ptr, size_t size,37enum dma_data_direction direction)38{39BUG_ON(direction == DMA_NONE);40consistent_sync(ptr, size, direction);41return virt_to_phys(ptr);42}4344static inline void45dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,46enum dma_data_direction direction)47{48BUG_ON(direction == DMA_NONE);49}5051static inline int52dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,53enum dma_data_direction direction)54{55int i;5657BUG_ON(direction == DMA_NONE);5859for (i = 0; i < nents; i++, sg++ ) {60BUG_ON(!sg_page(sg));6162sg->dma_address = sg_phys(sg);63consistent_sync(sg_virt(sg), sg->length, direction);64}6566return nents;67}6869static inline dma_addr_t70dma_map_page(struct device *dev, struct page *page, unsigned long offset,71size_t size, enum dma_data_direction direction)72{73BUG_ON(direction == DMA_NONE);74return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;75}7677static inline void78dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,79enum dma_data_direction direction)80{81BUG_ON(direction == DMA_NONE);82}838485static inline void86dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,87enum dma_data_direction direction)88{89BUG_ON(direction == DMA_NONE);90}9192static inline void93dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,94enum dma_data_direction direction)95{96consistent_sync((void *)bus_to_virt(dma_handle), size, direction);97}9899static inline void100dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,101enum dma_data_direction direction)102{103consistent_sync((void *)bus_to_virt(dma_handle), size, direction);104}105106static inline void107dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,108unsigned long offset, size_t size,109enum dma_data_direction direction)110{111112consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);113}114115static inline void116dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,117unsigned long offset, size_t size,118enum dma_data_direction direction)119{120121consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);122}123static inline void124dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,125enum dma_data_direction dir)126{127int i;128for (i = 0; i < nelems; i++, sg++)129consistent_sync(sg_virt(sg), sg->length, dir);130}131132static inline void133dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,134enum dma_data_direction dir)135{136int i;137for (i = 0; i < nelems; i++, sg++)138consistent_sync(sg_virt(sg), sg->length, dir);139}140static inline int141dma_mapping_error(struct device *dev, dma_addr_t dma_addr)142{143return 0;144}145146static inline int147dma_supported(struct device *dev, u64 mask)148{149return 1;150}151152static inline int153dma_set_mask(struct device *dev, u64 mask)154{155if(!dev->dma_mask || !dma_supported(dev, mask))156return -EIO;157158*dev->dma_mask = mask;159160return 0;161}162163static inline void164dma_cache_sync(struct device *dev, void *vaddr, size_t size,165enum dma_data_direction direction)166{167consistent_sync(vaddr, size, direction);168}169170#endif /* _XTENSA_DMA_MAPPING_H */171172173