Path: blob/master/include/asm-generic/cacheflush.h
10818 views
#ifndef __ASM_CACHEFLUSH_H1#define __ASM_CACHEFLUSH_H23/* Keep includes the same across arches. */4#include <linux/mm.h>56/*7* The cache doesn't need to be flushed when TLB entries change when8* the cache is mapped to physical memory, not virtual memory9*/10#define flush_cache_all() do { } while (0)11#define flush_cache_mm(mm) do { } while (0)12#define flush_cache_dup_mm(mm) do { } while (0)13#define flush_cache_range(vma, start, end) do { } while (0)14#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)15#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 016#define flush_dcache_page(page) do { } while (0)17#define flush_dcache_mmap_lock(mapping) do { } while (0)18#define flush_dcache_mmap_unlock(mapping) do { } while (0)19#define flush_icache_range(start, end) do { } while (0)20#define flush_icache_page(vma,pg) do { } while (0)21#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)22#define flush_cache_vmap(start, end) do { } while (0)23#define flush_cache_vunmap(start, end) do { } while (0)2425#define copy_to_user_page(vma, page, vaddr, dst, src, len) \26do { \27memcpy(dst, src, len); \28flush_icache_user_range(vma, page, vaddr, len); \29} while (0)30#define copy_from_user_page(vma, page, vaddr, dst, src, len) \31memcpy(dst, src, len)3233#endif /* __ASM_CACHEFLUSH_H */343536