Path: blob/master/include/asm-generic/dma-mapping-common.h
10814 views
#ifndef _ASM_GENERIC_DMA_MAPPING_H1#define _ASM_GENERIC_DMA_MAPPING_H23#include <linux/kmemcheck.h>4#include <linux/scatterlist.h>5#include <linux/dma-debug.h>6#include <linux/dma-attrs.h>78static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,9size_t size,10enum dma_data_direction dir,11struct dma_attrs *attrs)12{13struct dma_map_ops *ops = get_dma_ops(dev);14dma_addr_t addr;1516kmemcheck_mark_initialized(ptr, size);17BUG_ON(!valid_dma_direction(dir));18addr = ops->map_page(dev, virt_to_page(ptr),19(unsigned long)ptr & ~PAGE_MASK, size,20dir, attrs);21debug_dma_map_page(dev, virt_to_page(ptr),22(unsigned long)ptr & ~PAGE_MASK, size,23dir, addr, true);24return addr;25}2627static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,28size_t size,29enum dma_data_direction dir,30struct dma_attrs *attrs)31{32struct dma_map_ops *ops = get_dma_ops(dev);3334BUG_ON(!valid_dma_direction(dir));35if (ops->unmap_page)36ops->unmap_page(dev, addr, size, dir, attrs);37debug_dma_unmap_page(dev, addr, size, dir, true);38}3940static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,41int nents, enum dma_data_direction dir,42struct dma_attrs *attrs)43{44struct dma_map_ops *ops = get_dma_ops(dev);45int i, ents;46struct scatterlist *s;4748for_each_sg(sg, s, nents, i)49kmemcheck_mark_initialized(sg_virt(s), s->length);50BUG_ON(!valid_dma_direction(dir));51ents = ops->map_sg(dev, sg, nents, dir, attrs);52debug_dma_map_sg(dev, sg, nents, ents, dir);5354return ents;55}5657static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,58int nents, enum dma_data_direction dir,59struct dma_attrs *attrs)60{61struct dma_map_ops *ops = get_dma_ops(dev);6263BUG_ON(!valid_dma_direction(dir));64debug_dma_unmap_sg(dev, sg, nents, dir);65if (ops->unmap_sg)66ops->unmap_sg(dev, sg, nents, dir, attrs);67}6869static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,70size_t offset, size_t size,71enum dma_data_direction dir)72{73struct dma_map_ops *ops = get_dma_ops(dev);74dma_addr_t addr;7576kmemcheck_mark_initialized(page_address(page) + offset, size);77BUG_ON(!valid_dma_direction(dir));78addr = ops->map_page(dev, page, offset, size, dir, NULL);79debug_dma_map_page(dev, page, offset, size, dir, addr, false);8081return addr;82}8384static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,85size_t size, enum dma_data_direction dir)86{87struct dma_map_ops *ops = get_dma_ops(dev);8889BUG_ON(!valid_dma_direction(dir));90if (ops->unmap_page)91ops->unmap_page(dev, addr, size, dir, NULL);92debug_dma_unmap_page(dev, addr, size, dir, false);93}9495static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,96size_t size,97enum dma_data_direction dir)98{99struct dma_map_ops *ops = get_dma_ops(dev);100101BUG_ON(!valid_dma_direction(dir));102if (ops->sync_single_for_cpu)103ops->sync_single_for_cpu(dev, addr, size, dir);104debug_dma_sync_single_for_cpu(dev, addr, size, dir);105}106107static inline void dma_sync_single_for_device(struct device *dev,108dma_addr_t addr, size_t size,109enum dma_data_direction dir)110{111struct dma_map_ops *ops = get_dma_ops(dev);112113BUG_ON(!valid_dma_direction(dir));114if (ops->sync_single_for_device)115ops->sync_single_for_device(dev, addr, size, dir);116debug_dma_sync_single_for_device(dev, addr, size, dir);117}118119static inline void dma_sync_single_range_for_cpu(struct device *dev,120dma_addr_t addr,121unsigned long offset,122size_t size,123enum dma_data_direction dir)124{125dma_sync_single_for_cpu(dev, addr + offset, size, dir);126}127128static inline void dma_sync_single_range_for_device(struct device *dev,129dma_addr_t addr,130unsigned long offset,131size_t size,132enum dma_data_direction dir)133{134dma_sync_single_for_device(dev, addr + offset, size, dir);135}136137static inline void138dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,139int nelems, enum dma_data_direction dir)140{141struct dma_map_ops *ops = get_dma_ops(dev);142143BUG_ON(!valid_dma_direction(dir));144if (ops->sync_sg_for_cpu)145ops->sync_sg_for_cpu(dev, sg, nelems, dir);146debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);147}148149static inline void150dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,151int nelems, enum dma_data_direction dir)152{153struct dma_map_ops *ops = get_dma_ops(dev);154155BUG_ON(!valid_dma_direction(dir));156if (ops->sync_sg_for_device)157ops->sync_sg_for_device(dev, sg, nelems, dir);158debug_dma_sync_sg_for_device(dev, sg, nelems, dir);159160}161162#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)163#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)164#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)165#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)166167#endif168169170