Path: blob/master/arch/powerpc/mm/dma-noncoherent.c
10817 views
/*1* PowerPC version derived from arch/arm/mm/consistent.c2* Copyright (C) 2001 Dan Malek ([email protected])3*4* Copyright (C) 2000 Russell King5*6* Consistent memory allocators. Used for DMA devices that want to7* share uncached memory with the processor core. The function return8* is the virtual address and 'dma_handle' is the physical address.9* Mostly stolen from the ARM port, with some changes for PowerPC.10* -- Dan11*12* Reorganized to get rid of the arch-specific consistent_* functions13* and provide non-coherent implementations for the DMA API. -Matt14*15* Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()16* implementation. This is pulled straight from ARM and barely17* modified. -Matt18*19* This program is free software; you can redistribute it and/or modify20* it under the terms of the GNU General Public License version 2 as21* published by the Free Software Foundation.22*/2324#include <linux/sched.h>25#include <linux/slab.h>26#include <linux/kernel.h>27#include <linux/errno.h>28#include <linux/string.h>29#include <linux/types.h>30#include <linux/highmem.h>31#include <linux/dma-mapping.h>3233#include <asm/tlbflush.h>3435#include "mmu_decl.h"3637/*38* This address range defaults to a value that is safe for all39* platforms which currently set CONFIG_NOT_COHERENT_CACHE. It40* can be further configured for specific applications under41* the "Advanced Setup" menu. -Matt42*/43#define CONSISTENT_BASE (IOREMAP_TOP)44#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)45#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)4647/*48* This is the page table (2MB) covering uncached, DMA consistent allocations49*/50static DEFINE_SPINLOCK(consistent_lock);5152/*53* VM region handling support.54*55* This should become something generic, handling VM region allocations for56* vmalloc and similar (ioremap, module space, etc).57*58* I envisage vmalloc()'s supporting vm_struct becoming:59*60* struct vm_struct {61* struct vm_region region;62* unsigned long flags;63* struct page **pages;64* unsigned int nr_pages;65* unsigned long phys_addr;66* };67*68* get_vm_area() would then call vm_region_alloc with an appropriate69* struct vm_region head (eg):70*71* struct vm_region vmalloc_head = {72* .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),73* .vm_start = VMALLOC_START,74* .vm_end = VMALLOC_END,75* };76*77* However, vmalloc_head.vm_start is variable (typically, it is dependent on78* the amount of RAM found at boot time.) I would imagine that get_vm_area()79* would have to initialise this each time prior to calling vm_region_alloc().80*/81struct ppc_vm_region {82struct list_head vm_list;83unsigned long vm_start;84unsigned long vm_end;85};8687static struct ppc_vm_region consistent_head = {88.vm_list = LIST_HEAD_INIT(consistent_head.vm_list),89.vm_start = CONSISTENT_BASE,90.vm_end = CONSISTENT_END,91};9293static struct ppc_vm_region *94ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)95{96unsigned long addr = head->vm_start, end = head->vm_end - size;97unsigned long flags;98struct ppc_vm_region *c, *new;99100new = kmalloc(sizeof(struct ppc_vm_region), gfp);101if (!new)102goto out;103104spin_lock_irqsave(&consistent_lock, flags);105106list_for_each_entry(c, &head->vm_list, vm_list) {107if ((addr + size) < addr)108goto nospc;109if ((addr + size) <= c->vm_start)110goto found;111addr = c->vm_end;112if (addr > end)113goto nospc;114}115116found:117/*118* Insert this entry _before_ the one we found.119*/120list_add_tail(&new->vm_list, &c->vm_list);121new->vm_start = addr;122new->vm_end = addr + size;123124spin_unlock_irqrestore(&consistent_lock, flags);125return new;126127nospc:128spin_unlock_irqrestore(&consistent_lock, flags);129kfree(new);130out:131return NULL;132}133134static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)135{136struct ppc_vm_region *c;137138list_for_each_entry(c, &head->vm_list, vm_list) {139if (c->vm_start == addr)140goto out;141}142c = NULL;143out:144return c;145}146147/*148* Allocate DMA-coherent memory space and return both the kernel remapped149* virtual and bus address for that space.150*/151void *152__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)153{154struct page *page;155struct ppc_vm_region *c;156unsigned long order;157u64 mask = ISA_DMA_THRESHOLD, limit;158159if (dev) {160mask = dev->coherent_dma_mask;161162/*163* Sanity check the DMA mask - it must be non-zero, and164* must be able to be satisfied by a DMA allocation.165*/166if (mask == 0) {167dev_warn(dev, "coherent DMA mask is unset\n");168goto no_page;169}170171if ((~mask) & ISA_DMA_THRESHOLD) {172dev_warn(dev, "coherent DMA mask %#llx is smaller "173"than system GFP_DMA mask %#llx\n",174mask, (unsigned long long)ISA_DMA_THRESHOLD);175goto no_page;176}177}178179180size = PAGE_ALIGN(size);181limit = (mask + 1) & ~mask;182if ((limit && size >= limit) ||183size >= (CONSISTENT_END - CONSISTENT_BASE)) {184printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",185size, mask);186return NULL;187}188189order = get_order(size);190191/* Might be useful if we ever have a real legacy DMA zone... */192if (mask != 0xffffffff)193gfp |= GFP_DMA;194195page = alloc_pages(gfp, order);196if (!page)197goto no_page;198199/*200* Invalidate any data that might be lurking in the201* kernel direct-mapped region for device DMA.202*/203{204unsigned long kaddr = (unsigned long)page_address(page);205memset(page_address(page), 0, size);206flush_dcache_range(kaddr, kaddr + size);207}208209/*210* Allocate a virtual address in the consistent mapping region.211*/212c = ppc_vm_region_alloc(&consistent_head, size,213gfp & ~(__GFP_DMA | __GFP_HIGHMEM));214if (c) {215unsigned long vaddr = c->vm_start;216struct page *end = page + (1 << order);217218split_page(page, order);219220/*221* Set the "dma handle"222*/223*handle = page_to_phys(page);224225do {226SetPageReserved(page);227map_page(vaddr, page_to_phys(page),228pgprot_noncached(PAGE_KERNEL));229page++;230vaddr += PAGE_SIZE;231} while (size -= PAGE_SIZE);232233/*234* Free the otherwise unused pages.235*/236while (page < end) {237__free_page(page);238page++;239}240241return (void *)c->vm_start;242}243244if (page)245__free_pages(page, order);246no_page:247return NULL;248}249EXPORT_SYMBOL(__dma_alloc_coherent);250251/*252* free a page as defined by the above mapping.253*/254void __dma_free_coherent(size_t size, void *vaddr)255{256struct ppc_vm_region *c;257unsigned long flags, addr;258259size = PAGE_ALIGN(size);260261spin_lock_irqsave(&consistent_lock, flags);262263c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);264if (!c)265goto no_area;266267if ((c->vm_end - c->vm_start) != size) {268printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",269__func__, c->vm_end - c->vm_start, size);270dump_stack();271size = c->vm_end - c->vm_start;272}273274addr = c->vm_start;275do {276pte_t *ptep;277unsigned long pfn;278279ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),280addr),281addr),282addr);283if (!pte_none(*ptep) && pte_present(*ptep)) {284pfn = pte_pfn(*ptep);285pte_clear(&init_mm, addr, ptep);286if (pfn_valid(pfn)) {287struct page *page = pfn_to_page(pfn);288289ClearPageReserved(page);290__free_page(page);291}292}293addr += PAGE_SIZE;294} while (size -= PAGE_SIZE);295296flush_tlb_kernel_range(c->vm_start, c->vm_end);297298list_del(&c->vm_list);299300spin_unlock_irqrestore(&consistent_lock, flags);301302kfree(c);303return;304305no_area:306spin_unlock_irqrestore(&consistent_lock, flags);307printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",308__func__, vaddr);309dump_stack();310}311EXPORT_SYMBOL(__dma_free_coherent);312313/*314* make an area consistent.315*/316void __dma_sync(void *vaddr, size_t size, int direction)317{318unsigned long start = (unsigned long)vaddr;319unsigned long end = start + size;320321switch (direction) {322case DMA_NONE:323BUG();324case DMA_FROM_DEVICE:325/*326* invalidate only when cache-line aligned otherwise there is327* the potential for discarding uncommitted data from the cache328*/329if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))330flush_dcache_range(start, end);331else332invalidate_dcache_range(start, end);333break;334case DMA_TO_DEVICE: /* writeback only */335clean_dcache_range(start, end);336break;337case DMA_BIDIRECTIONAL: /* writeback and invalidate */338flush_dcache_range(start, end);339break;340}341}342EXPORT_SYMBOL(__dma_sync);343344#ifdef CONFIG_HIGHMEM345/*346* __dma_sync_page() implementation for systems using highmem.347* In this case, each page of a buffer must be kmapped/kunmapped348* in order to have a virtual address for __dma_sync(). This must349* not sleep so kmap_atomic()/kunmap_atomic() are used.350*351* Note: yes, it is possible and correct to have a buffer extend352* beyond the first page.353*/354static inline void __dma_sync_page_highmem(struct page *page,355unsigned long offset, size_t size, int direction)356{357size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);358size_t cur_size = seg_size;359unsigned long flags, start, seg_offset = offset;360int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;361int seg_nr = 0;362363local_irq_save(flags);364365do {366start = (unsigned long)kmap_atomic(page + seg_nr,367KM_PPC_SYNC_PAGE) + seg_offset;368369/* Sync this buffer segment */370__dma_sync((void *)start, seg_size, direction);371kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);372seg_nr++;373374/* Calculate next buffer segment size */375seg_size = min((size_t)PAGE_SIZE, size - cur_size);376377/* Add the segment size to our running total */378cur_size += seg_size;379seg_offset = 0;380} while (seg_nr < nr_segs);381382local_irq_restore(flags);383}384#endif /* CONFIG_HIGHMEM */385386/*387* __dma_sync_page makes memory consistent. identical to __dma_sync, but388* takes a struct page instead of a virtual address389*/390void __dma_sync_page(struct page *page, unsigned long offset,391size_t size, int direction)392{393#ifdef CONFIG_HIGHMEM394__dma_sync_page_highmem(page, offset, size, direction);395#else396unsigned long start = (unsigned long)page_address(page) + offset;397__dma_sync((void *)start, size, direction);398#endif399}400EXPORT_SYMBOL(__dma_sync_page);401402/*403* Return the PFN for a given cpu virtual address returned by404* __dma_alloc_coherent. This is used by dma_mmap_coherent()405*/406unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)407{408/* This should always be populated, so we don't test every409* level. If that fails, we'll have a nice crash which410* will be as good as a BUG_ON()411*/412pgd_t *pgd = pgd_offset_k(cpu_addr);413pud_t *pud = pud_offset(pgd, cpu_addr);414pmd_t *pmd = pmd_offset(pud, cpu_addr);415pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);416417if (pte_none(*ptep) || !pte_present(*ptep))418return 0;419return pte_pfn(*ptep);420}421422423