Path: blob/master/arch/m68k/include/asm/cacheflush_mm.h
26481 views
/* SPDX-License-Identifier: GPL-2.0 */1#ifndef _M68K_CACHEFLUSH_H2#define _M68K_CACHEFLUSH_H34#include <linux/mm.h>5#ifdef CONFIG_COLDFIRE6#include <asm/mcfsim.h>7#endif89/* cache code */10#define FLUSH_I_AND_D (0x00000808)11#define FLUSH_I (0x00000008)1213#ifndef ICACHE_MAX_ADDR14#define ICACHE_MAX_ADDR 015#define ICACHE_SET_MASK 016#define DCACHE_MAX_ADDR 017#define DCACHE_SETMASK 018#endif19#ifndef CACHE_MODE20#define CACHE_MODE 021#define CACR_ICINVA 022#define CACR_DCINVA 023#define CACR_BCINVA 024#endif2526/*27* ColdFire architecture has no way to clear individual cache lines, so we28* are stuck invalidating all the cache entries when we want a clear operation.29*/30static inline void clear_cf_icache(unsigned long start, unsigned long end)31{32__asm__ __volatile__ (33"movec %0,%%cacr\n\t"34"nop"35:36: "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA));37}3839static inline void clear_cf_dcache(unsigned long start, unsigned long end)40{41__asm__ __volatile__ (42"movec %0,%%cacr\n\t"43"nop"44:45: "r" (CACHE_MODE | CACR_DCINVA));46}4748static inline void clear_cf_bcache(unsigned long start, unsigned long end)49{50__asm__ __volatile__ (51"movec %0,%%cacr\n\t"52"nop"53:54: "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA | CACR_DCINVA));55}5657/*58* Use the ColdFire cpushl instruction to push (and invalidate) cache lines.59* The start and end addresses are cache line numbers not memory addresses.60*/61static inline void flush_cf_icache(unsigned long start, unsigned long end)62{63unsigned long set;6465for (set = start; set <= end; set += (0x10 - 3)) {66__asm__ __volatile__ (67"cpushl %%ic,(%0)\n\t"68"addq%.l #1,%0\n\t"69"cpushl %%ic,(%0)\n\t"70"addq%.l #1,%0\n\t"71"cpushl %%ic,(%0)\n\t"72"addq%.l #1,%0\n\t"73"cpushl %%ic,(%0)"74: "=a" (set)75: "a" (set));76}77}7879static inline void flush_cf_dcache(unsigned long start, unsigned long end)80{81unsigned long set;8283for (set = start; set <= end; set += (0x10 - 3)) {84__asm__ __volatile__ (85"cpushl %%dc,(%0)\n\t"86"addq%.l #1,%0\n\t"87"cpushl %%dc,(%0)\n\t"88"addq%.l #1,%0\n\t"89"cpushl %%dc,(%0)\n\t"90"addq%.l #1,%0\n\t"91"cpushl %%dc,(%0)"92: "=a" (set)93: "a" (set));94}95}9697static inline void flush_cf_bcache(unsigned long start, unsigned long end)98{99unsigned long set;100101for (set = start; set <= end; set += (0x10 - 3)) {102__asm__ __volatile__ (103"cpushl %%bc,(%0)\n\t"104"addq%.l #1,%0\n\t"105"cpushl %%bc,(%0)\n\t"106"addq%.l #1,%0\n\t"107"cpushl %%bc,(%0)\n\t"108"addq%.l #1,%0\n\t"109"cpushl %%bc,(%0)"110: "=a" (set)111: "a" (set));112}113}114115/*116* Cache handling functions117*/118119static inline void flush_icache(void)120{121if (CPU_IS_COLDFIRE) {122flush_cf_icache(0, ICACHE_MAX_ADDR);123} else if (CPU_IS_040_OR_060) {124asm volatile ( "nop\n"125" .chip 68040\n"126" cpusha %bc\n"127" .chip 68k");128} else {129unsigned long tmp;130asm volatile ( "movec %%cacr,%0\n"131" or.w %1,%0\n"132" movec %0,%%cacr"133: "=&d" (tmp)134: "id" (FLUSH_I));135}136}137138/*139* invalidate the cache for the specified memory range.140* It starts at the physical address specified for141* the given number of bytes.142*/143extern void cache_clear(unsigned long paddr, int len);144/*145* push any dirty cache in the specified memory range.146* It starts at the physical address specified for147* the given number of bytes.148*/149extern void cache_push(unsigned long paddr, int len);150151/*152* push and invalidate pages in the specified user virtual153* memory range.154*/155extern void cache_push_v(unsigned long vaddr, int len);156157/* This is needed whenever the virtual mapping of the current158process changes. */159#define __flush_cache_all() \160({ \161if (CPU_IS_COLDFIRE) { \162flush_cf_dcache(0, DCACHE_MAX_ADDR); \163} else if (CPU_IS_040_OR_060) { \164__asm__ __volatile__("nop\n\t" \165".chip 68040\n\t" \166"cpusha %dc\n\t" \167".chip 68k"); \168} else { \169unsigned long _tmp; \170__asm__ __volatile__("movec %%cacr,%0\n\t" \171"orw %1,%0\n\t" \172"movec %0,%%cacr" \173: "=&d" (_tmp) \174: "di" (FLUSH_I_AND_D)); \175} \176})177178#define __flush_cache_030() \179({ \180if (CPU_IS_020_OR_030) { \181unsigned long _tmp; \182__asm__ __volatile__("movec %%cacr,%0\n\t" \183"orw %1,%0\n\t" \184"movec %0,%%cacr" \185: "=&d" (_tmp) \186: "di" (FLUSH_I_AND_D)); \187} \188})189190#define flush_cache_all() __flush_cache_all()191192#define flush_cache_vmap(start, end) flush_cache_all()193#define flush_cache_vmap_early(start, end) do { } while (0)194#define flush_cache_vunmap(start, end) flush_cache_all()195196static inline void flush_cache_mm(struct mm_struct *mm)197{198if (mm == current->mm)199__flush_cache_030();200}201202#define flush_cache_dup_mm(mm) flush_cache_mm(mm)203204/* flush_cache_range/flush_cache_page must be macros to avoid205a dependency on linux/mm.h, which includes this file... */206static inline void flush_cache_range(struct vm_area_struct *vma,207unsigned long start,208unsigned long end)209{210if (vma->vm_mm == current->mm)211__flush_cache_030();212}213214static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)215{216if (vma->vm_mm == current->mm)217__flush_cache_030();218}219220221/* Push the page at kernel virtual address and clear the icache */222/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */223static inline void __flush_pages_to_ram(void *vaddr, unsigned int nr)224{225if (CPU_IS_COLDFIRE) {226unsigned long addr, start, end;227addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);228start = addr & ICACHE_SET_MASK;229end = (addr + nr * PAGE_SIZE - 1) & ICACHE_SET_MASK;230if (start > end) {231flush_cf_bcache(0, end);232end = ICACHE_MAX_ADDR;233}234flush_cf_bcache(start, end);235} else if (CPU_IS_040_OR_060) {236unsigned long paddr = __pa(vaddr);237238do {239__asm__ __volatile__("nop\n\t"240".chip 68040\n\t"241"cpushp %%bc,(%0)\n\t"242".chip 68k"243: : "a" (paddr));244paddr += PAGE_SIZE;245} while (--nr);246} else {247unsigned long _tmp;248__asm__ __volatile__("movec %%cacr,%0\n\t"249"orw %1,%0\n\t"250"movec %0,%%cacr"251: "=&d" (_tmp)252: "di" (FLUSH_I));253}254}255256#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1257#define flush_dcache_page(page) __flush_pages_to_ram(page_address(page), 1)258#define flush_dcache_folio(folio) \259__flush_pages_to_ram(folio_address(folio), folio_nr_pages(folio))260#define flush_dcache_mmap_lock(mapping) do { } while (0)261#define flush_dcache_mmap_unlock(mapping) do { } while (0)262#define flush_icache_pages(vma, page, nr) \263__flush_pages_to_ram(page_address(page), nr)264265extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,266unsigned long addr, int len);267extern void flush_icache_range(unsigned long address, unsigned long endaddr);268extern void flush_icache_user_range(unsigned long address,269unsigned long endaddr);270271static inline void copy_to_user_page(struct vm_area_struct *vma,272struct page *page, unsigned long vaddr,273void *dst, void *src, int len)274{275flush_cache_page(vma, vaddr, page_to_pfn(page));276memcpy(dst, src, len);277flush_icache_user_page(vma, page, vaddr, len);278}279static inline void copy_from_user_page(struct vm_area_struct *vma,280struct page *page, unsigned long vaddr,281void *dst, void *src, int len)282{283flush_cache_page(vma, vaddr, page_to_pfn(page));284memcpy(dst, src, len);285}286287#endif /* _M68K_CACHEFLUSH_H */288289290