Path: blob/master/arch/xtensa/include/asm/cacheflush.h
15126 views
/*1* include/asm-xtensa/cacheflush.h2*3* This file is subject to the terms and conditions of the GNU General Public4* License. See the file "COPYING" in the main directory of this archive5* for more details.6*7* (C) 2001 - 2007 Tensilica Inc.8*/910#ifndef _XTENSA_CACHEFLUSH_H11#define _XTENSA_CACHEFLUSH_H1213#ifdef __KERNEL__1415#include <linux/mm.h>16#include <asm/processor.h>17#include <asm/page.h>1819/*20* Lo-level routines for cache flushing.21*22* invalidate data or instruction cache:23*24* __invalidate_icache_all()25* __invalidate_icache_page(adr)26* __invalidate_dcache_page(adr)27* __invalidate_icache_range(from,size)28* __invalidate_dcache_range(from,size)29*30* flush data cache:31*32* __flush_dcache_page(adr)33*34* flush and invalidate data cache:35*36* __flush_invalidate_dcache_all()37* __flush_invalidate_dcache_page(adr)38* __flush_invalidate_dcache_range(from,size)39*40* specials for cache aliasing:41*42* __flush_invalidate_dcache_page_alias(vaddr,paddr)43* __invalidate_icache_page_alias(vaddr,paddr)44*/4546extern void __invalidate_dcache_all(void);47extern void __invalidate_icache_all(void);48extern void __invalidate_dcache_page(unsigned long);49extern void __invalidate_icache_page(unsigned long);50extern void __invalidate_icache_range(unsigned long, unsigned long);51extern void __invalidate_dcache_range(unsigned long, unsigned long);525354#if XCHAL_DCACHE_IS_WRITEBACK55extern void __flush_invalidate_dcache_all(void);56extern void __flush_dcache_page(unsigned long);57extern void __flush_dcache_range(unsigned long, unsigned long);58extern void __flush_invalidate_dcache_page(unsigned long);59extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);60#else61# define __flush_dcache_range(p,s) do { } while(0)62# define __flush_dcache_page(p) do { } while(0)63# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)64# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)65#endif6667#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)68extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);69#else70static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,71unsigned long phys) { }72#endif73#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)74extern void __invalidate_icache_page_alias(unsigned long, unsigned long);75#else76static inline void __invalidate_icache_page_alias(unsigned long virt,77unsigned long phys) { }78#endif7980/*81* We have physically tagged caches - nothing to do here -82* unless we have cache aliasing.83*84* Pages can get remapped. Because this might change the 'color' of that page,85* we have to flush the cache before the PTE is changed.86* (see also Documentation/cachetlb.txt)87*/8889#if (DCACHE_WAY_SIZE > PAGE_SIZE)9091#define flush_cache_all() \92do { \93__flush_invalidate_dcache_all(); \94__invalidate_icache_all(); \95} while (0)9697#define flush_cache_mm(mm) flush_cache_all()98#define flush_cache_dup_mm(mm) flush_cache_mm(mm)99100#define flush_cache_vmap(start,end) flush_cache_all()101#define flush_cache_vunmap(start,end) flush_cache_all()102103#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1104extern void flush_dcache_page(struct page*);105extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);106extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);107108#else109110#define flush_cache_all() do { } while (0)111#define flush_cache_mm(mm) do { } while (0)112#define flush_cache_dup_mm(mm) do { } while (0)113114#define flush_cache_vmap(start,end) do { } while (0)115#define flush_cache_vunmap(start,end) do { } while (0)116117#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0118#define flush_dcache_page(page) do { } while (0)119120#define flush_cache_page(vma,addr,pfn) do { } while (0)121#define flush_cache_range(vma,start,end) do { } while (0)122123#endif124125/* Ensure consistency between data and instruction cache. */126#define flush_icache_range(start,end) \127do { \128__flush_dcache_range(start, (end) - (start)); \129__invalidate_icache_range(start,(end) - (start)); \130} while (0)131132/* This is not required, see Documentation/cachetlb.txt */133#define flush_icache_page(vma,page) do { } while (0)134135#define flush_dcache_mmap_lock(mapping) do { } while (0)136#define flush_dcache_mmap_unlock(mapping) do { } while (0)137138#if (DCACHE_WAY_SIZE > PAGE_SIZE)139140extern void copy_to_user_page(struct vm_area_struct*, struct page*,141unsigned long, void*, const void*, unsigned long);142extern void copy_from_user_page(struct vm_area_struct*, struct page*,143unsigned long, void*, const void*, unsigned long);144145#else146147#define copy_to_user_page(vma, page, vaddr, dst, src, len) \148do { \149memcpy(dst, src, len); \150__flush_dcache_range((unsigned long) dst, len); \151__invalidate_icache_range((unsigned long) dst, len); \152} while (0)153154#define copy_from_user_page(vma, page, vaddr, dst, src, len) \155memcpy(dst, src, len)156157#endif158159#define XTENSA_CACHEBLK_LOG2 29160#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)161#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)162163#if XCHAL_HAVE_CACHEATTR164static inline u32 xtensa_get_cacheattr(void)165{166u32 r;167asm volatile(" rsr %0, CACHEATTR" : "=a"(r));168return r;169}170171static inline u32 xtensa_get_dtlb1(u32 addr)172{173u32 r = addr & XTENSA_CACHEBLK_MASK;174return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))175& 0xF);176}177#else178static inline u32 xtensa_get_dtlb1(u32 addr)179{180u32 r;181asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));182asm volatile(" dsync");183return r;184}185186static inline u32 xtensa_get_cacheattr(void)187{188u32 r = 0;189u32 a = 0;190do {191a -= XTENSA_CACHEBLK_SIZE;192r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);193} while (a);194return r;195}196#endif197198static inline int xtensa_need_flush_dma_source(u32 addr)199{200return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;201}202203static inline int xtensa_need_invalidate_dma_destination(u32 addr)204{205return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;206}207208static inline void flush_dcache_unaligned(u32 addr, u32 size)209{210u32 cnt;211if (size) {212cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)213+ XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;214while (cnt--) {215asm volatile(" dhwb %0, 0" : : "a"(addr));216addr += XCHAL_DCACHE_LINESIZE;217}218asm volatile(" dsync");219}220}221222static inline void invalidate_dcache_unaligned(u32 addr, u32 size)223{224int cnt;225if (size) {226asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));227cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)228- XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;229while (cnt-- > 0) {230asm volatile(" dhi %0, %1" : : "a"(addr),231"n"(XCHAL_DCACHE_LINESIZE));232addr += XCHAL_DCACHE_LINESIZE;233}234asm volatile(" dhwbi %0, %1" : : "a"(addr),235"n"(XCHAL_DCACHE_LINESIZE));236asm volatile(" dsync");237}238}239240static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)241{242u32 cnt;243if (size) {244cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)245+ XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;246while (cnt--) {247asm volatile(" dhwbi %0, 0" : : "a"(addr));248addr += XCHAL_DCACHE_LINESIZE;249}250asm volatile(" dsync");251}252}253254#endif /* __KERNEL__ */255#endif /* _XTENSA_CACHEFLUSH_H */256257258