Path: blob/master/arch/blackfin/kernel/dma-mapping.c
10817 views
/*1* Dynamic DMA mapping support2*3* Copyright 2005-2009 Analog Devices Inc.4*5* Licensed under the GPL-2 or later6*/78#include <linux/types.h>9#include <linux/gfp.h>10#include <linux/string.h>11#include <linux/spinlock.h>12#include <linux/dma-mapping.h>13#include <linux/scatterlist.h>1415static spinlock_t dma_page_lock;16static unsigned long *dma_page;17static unsigned int dma_pages;18static unsigned long dma_base;19static unsigned long dma_size;20static unsigned int dma_initialized;2122static void dma_alloc_init(unsigned long start, unsigned long end)23{24spin_lock_init(&dma_page_lock);25dma_initialized = 0;2627dma_page = (unsigned long *)__get_free_page(GFP_KERNEL);28memset(dma_page, 0, PAGE_SIZE);29dma_base = PAGE_ALIGN(start);30dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start);31dma_pages = dma_size >> PAGE_SHIFT;32memset((void *)dma_base, 0, DMA_UNCACHED_REGION);33dma_initialized = 1;3435printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__,36dma_page, dma_pages, dma_base);37}3839static inline unsigned int get_pages(size_t size)40{41return ((size - 1) >> PAGE_SHIFT) + 1;42}4344static unsigned long __alloc_dma_pages(unsigned int pages)45{46unsigned long ret = 0, flags;47int i, count = 0;4849if (dma_initialized == 0)50dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend);5152spin_lock_irqsave(&dma_page_lock, flags);5354for (i = 0; i < dma_pages;) {55if (test_bit(i++, dma_page) == 0) {56if (++count == pages) {57while (count--)58__set_bit(--i, dma_page);5960ret = dma_base + (i << PAGE_SHIFT);61break;62}63} else64count = 0;65}66spin_unlock_irqrestore(&dma_page_lock, flags);67return ret;68}6970static void __free_dma_pages(unsigned long addr, unsigned int pages)71{72unsigned long page = (addr - dma_base) >> PAGE_SHIFT;73unsigned long flags;74int i;7576if ((page + pages) > dma_pages) {77printk(KERN_ERR "%s: freeing outside range.\n", __func__);78BUG();79}8081spin_lock_irqsave(&dma_page_lock, flags);82for (i = page; i < page + pages; i++)83__clear_bit(i, dma_page);8485spin_unlock_irqrestore(&dma_page_lock, flags);86}8788void *dma_alloc_coherent(struct device *dev, size_t size,89dma_addr_t *dma_handle, gfp_t gfp)90{91void *ret;9293ret = (void *)__alloc_dma_pages(get_pages(size));9495if (ret) {96memset(ret, 0, size);97*dma_handle = virt_to_phys(ret);98}99100return ret;101}102EXPORT_SYMBOL(dma_alloc_coherent);103104void105dma_free_coherent(struct device *dev, size_t size, void *vaddr,106dma_addr_t dma_handle)107{108__free_dma_pages((unsigned long)vaddr, get_pages(size));109}110EXPORT_SYMBOL(dma_free_coherent);111112/*113* Streaming DMA mappings114*/115void __dma_sync(dma_addr_t addr, size_t size,116enum dma_data_direction dir)117{118__dma_sync_inline(addr, size, dir);119}120EXPORT_SYMBOL(__dma_sync);121122int123dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,124enum dma_data_direction direction)125{126int i;127128for (i = 0; i < nents; i++, sg++) {129sg->dma_address = (dma_addr_t) sg_virt(sg);130__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);131}132133return nents;134}135EXPORT_SYMBOL(dma_map_sg);136137void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,138int nelems, enum dma_data_direction direction)139{140int i;141142for (i = 0; i < nelems; i++, sg++) {143sg->dma_address = (dma_addr_t) sg_virt(sg);144__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);145}146}147EXPORT_SYMBOL(dma_sync_sg_for_device);148149150