Path: blob/master/arch/parisc/include/asm/dma-mapping.h
10819 views
#ifndef _PARISC_DMA_MAPPING_H1#define _PARISC_DMA_MAPPING_H23#include <linux/mm.h>4#include <asm/cacheflush.h>5#include <asm/scatterlist.h>67/* See Documentation/PCI/PCI-DMA-mapping.txt */8struct hppa_dma_ops {9int (*dma_supported)(struct device *dev, u64 mask);10void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);11void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);12void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);13dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);14void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);15int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);16void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);17void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);18void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);19void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);20void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);21};2223/*24** We could live without the hppa_dma_ops indirection if we didn't want25** to support 4 different coherent dma models with one binary (they will26** someday be loadable modules):27** I/O MMU consistent method dma_sync behavior28** ============= ====================== =======================29** a) PA-7x00LC uncachable host memory flush/purge30** b) U2/Uturn cachable host memory NOP31** c) Ike/Astro cachable host memory NOP32** d) EPIC/SAGA memory on EPIC/SAGA flush/reset DMA channel33**34** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU.35**36** Systems (eg PCX-T workstations) that don't fall into the above37** categories will need to modify the needed drivers to perform38** flush/purge and allocate "regular" cacheable pages for everything.39*/4041#ifdef CONFIG_PA1142extern struct hppa_dma_ops pcxl_dma_ops;43extern struct hppa_dma_ops pcx_dma_ops;44#endif4546extern struct hppa_dma_ops *hppa_dma_ops;4748static inline void *49dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,50gfp_t flag)51{52return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);53}5455static inline void *56dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,57gfp_t flag)58{59return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);60}6162static inline void63dma_free_coherent(struct device *dev, size_t size,64void *vaddr, dma_addr_t dma_handle)65{66hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);67}6869static inline void70dma_free_noncoherent(struct device *dev, size_t size,71void *vaddr, dma_addr_t dma_handle)72{73hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);74}7576static inline dma_addr_t77dma_map_single(struct device *dev, void *ptr, size_t size,78enum dma_data_direction direction)79{80return hppa_dma_ops->map_single(dev, ptr, size, direction);81}8283static inline void84dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,85enum dma_data_direction direction)86{87hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);88}8990static inline int91dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,92enum dma_data_direction direction)93{94return hppa_dma_ops->map_sg(dev, sg, nents, direction);95}9697static inline void98dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,99enum dma_data_direction direction)100{101hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);102}103104static inline dma_addr_t105dma_map_page(struct device *dev, struct page *page, unsigned long offset,106size_t size, enum dma_data_direction direction)107{108return dma_map_single(dev, (page_address(page) + (offset)), size, direction);109}110111static inline void112dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,113enum dma_data_direction direction)114{115dma_unmap_single(dev, dma_address, size, direction);116}117118119static inline void120dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,121enum dma_data_direction direction)122{123if(hppa_dma_ops->dma_sync_single_for_cpu)124hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);125}126127static inline void128dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,129enum dma_data_direction direction)130{131if(hppa_dma_ops->dma_sync_single_for_device)132hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);133}134135static inline void136dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,137unsigned long offset, size_t size,138enum dma_data_direction direction)139{140if(hppa_dma_ops->dma_sync_single_for_cpu)141hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);142}143144static inline void145dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,146unsigned long offset, size_t size,147enum dma_data_direction direction)148{149if(hppa_dma_ops->dma_sync_single_for_device)150hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);151}152153static inline void154dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,155enum dma_data_direction direction)156{157if(hppa_dma_ops->dma_sync_sg_for_cpu)158hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);159}160161static inline void162dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,163enum dma_data_direction direction)164{165if(hppa_dma_ops->dma_sync_sg_for_device)166hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);167}168169static inline int170dma_supported(struct device *dev, u64 mask)171{172return hppa_dma_ops->dma_supported(dev, mask);173}174175static inline int176dma_set_mask(struct device *dev, u64 mask)177{178if(!dev->dma_mask || !dma_supported(dev, mask))179return -EIO;180181*dev->dma_mask = mask;182183return 0;184}185186static inline void187dma_cache_sync(struct device *dev, void *vaddr, size_t size,188enum dma_data_direction direction)189{190if(hppa_dma_ops->dma_sync_single_for_cpu)191flush_kernel_dcache_range((unsigned long)vaddr, size);192}193194static inline void *195parisc_walk_tree(struct device *dev)196{197struct device *otherdev;198if(likely(dev->platform_data != NULL))199return dev->platform_data;200/* OK, just traverse the bus to find it */201for(otherdev = dev->parent; otherdev;202otherdev = otherdev->parent) {203if(otherdev->platform_data) {204dev->platform_data = otherdev->platform_data;205break;206}207}208BUG_ON(!dev->platform_data);209return dev->platform_data;210}211212#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu);213214215#ifdef CONFIG_IOMMU_CCIO216struct parisc_device;217struct ioc;218void * ccio_get_iommu(const struct parisc_device *dev);219int ccio_request_resource(const struct parisc_device *dev,220struct resource *res);221int ccio_allocate_resource(const struct parisc_device *dev,222struct resource *res, unsigned long size,223unsigned long min, unsigned long max, unsigned long align);224#else /* !CONFIG_IOMMU_CCIO */225#define ccio_get_iommu(dev) NULL226#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)227#define ccio_allocate_resource(dev, res, size, min, max, align) \228allocate_resource(&iomem_resource, res, size, min, max, \229align, NULL, NULL)230#endif /* !CONFIG_IOMMU_CCIO */231232#ifdef CONFIG_IOMMU_SBA233struct parisc_device;234void * sba_get_iommu(struct parisc_device *dev);235#endif236237/* At the moment, we panic on error for IOMMU resource exaustion */238#define dma_mapping_error(dev, x) 0239240#endif241242243