/* SPDX-License-Identifier: GPL-2.0-only */1/*2* arch/arm/include/asm/cacheflush.h3*4* Copyright (C) 1999-2002 Russell King5*/6#ifndef _ASMARM_CACHEFLUSH_H7#define _ASMARM_CACHEFLUSH_H89#include <linux/mm.h>1011#include <asm/glue-cache.h>12#include <asm/shmparam.h>13#include <asm/cachetype.h>14#include <asm/outercache.h>1516#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)1718/*19* This flag is used to indicate that the page pointed to by a pte is clean20* and does not require cleaning before returning it to the user.21*/22#define PG_dcache_clean PG_arch_12324/*25* MM Cache Management26* ===================27*28* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files29* implement these methods.30*31* Start addresses are inclusive and end addresses are exclusive;32* start addresses should be rounded down, end addresses up.33*34* See Documentation/core-api/cachetlb.rst for more information.35* Please note that the implementation of these, and the required36* effects are cache-type (VIVT/VIPT/PIPT) specific.37*38* flush_icache_all()39*40* Unconditionally clean and invalidate the entire icache.41* Currently only needed for cache-v6.S and cache-v7.S, see42* __flush_icache_all for the generic implementation.43*44* flush_kern_all()45*46* Unconditionally clean and invalidate the entire cache.47*48* flush_kern_louis()49*50* Flush data cache levels up to the level of unification51* inner shareable and invalidate the I-cache.52* Only needed from v7 onwards, falls back to flush_cache_all()53* for all other processor versions.54*55* flush_user_all()56*57* Clean and invalidate all user space cache entries58* before a change of page tables.59*60* flush_user_range(start, end, flags)61*62* Clean and invalidate a range of cache entries in the63* specified address space before a change of page tables.64* - start - user start address (inclusive, page aligned)65* - end - user end address (exclusive, page aligned)66* - flags - vma->vm_flags field67*68* coherent_kern_range(start, end)69*70* Ensure coherency between the Icache and the Dcache in the71* region described by start, end. If you have non-snooping72* Harvard caches, you need to implement this function.73* - start - virtual start address74* - end - virtual end address75*76* coherent_user_range(start, end)77*78* Ensure coherency between the Icache and the Dcache in the79* region described by start, end. If you have non-snooping80* Harvard caches, you need to implement this function.81* - start - virtual start address82* - end - virtual end address83*84* flush_kern_dcache_area(kaddr, size)85*86* Ensure that the data held in page is written back.87* - kaddr - page address88* - size - region size89*90* DMA Cache Coherency91* ===================92*93* dma_flush_range(start, end)94*95* Clean and invalidate the specified virtual address range.96* - start - virtual start address97* - end - virtual end address98*/99100struct cpu_cache_fns {101void (*flush_icache_all)(void);102void (*flush_kern_all)(void);103void (*flush_kern_louis)(void);104void (*flush_user_all)(void);105void (*flush_user_range)(unsigned long, unsigned long, unsigned int);106107void (*coherent_kern_range)(unsigned long, unsigned long);108int (*coherent_user_range)(unsigned long, unsigned long);109void (*flush_kern_dcache_area)(void *, size_t);110111void (*dma_map_area)(const void *, size_t, int);112void (*dma_unmap_area)(const void *, size_t, int);113114void (*dma_flush_range)(const void *, const void *);115} __no_randomize_layout;116117/*118* Select the calling method119*/120#ifdef MULTI_CACHE121122extern struct cpu_cache_fns cpu_cache;123124#define __cpuc_flush_icache_all cpu_cache.flush_icache_all125#define __cpuc_flush_kern_all cpu_cache.flush_kern_all126#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis127#define __cpuc_flush_user_all cpu_cache.flush_user_all128#define __cpuc_flush_user_range cpu_cache.flush_user_range129#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range130#define __cpuc_coherent_user_range cpu_cache.coherent_user_range131#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area132133/*134* These are private to the dma-mapping API. Do not use directly.135* Their sole purpose is to ensure that data held in the cache136* is visible to DMA, or data written by DMA to system memory is137* visible to the CPU.138*/139#define dmac_flush_range cpu_cache.dma_flush_range140141#else142143extern void __cpuc_flush_icache_all(void);144extern void __cpuc_flush_kern_all(void);145extern void __cpuc_flush_kern_louis(void);146extern void __cpuc_flush_user_all(void);147extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);148extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);149extern int __cpuc_coherent_user_range(unsigned long, unsigned long);150extern void __cpuc_flush_dcache_area(void *, size_t);151152/*153* These are private to the dma-mapping API. Do not use directly.154* Their sole purpose is to ensure that data held in the cache155* is visible to DMA, or data written by DMA to system memory is156* visible to the CPU.157*/158extern void dmac_flush_range(const void *, const void *);159160#endif161162/*163* Copy user data from/to a page which is mapped into a different164* processes address space. Really, we want to allow our "user165* space" model to handle this.166*/167extern void copy_to_user_page(struct vm_area_struct *, struct page *,168unsigned long, void *, const void *, unsigned long);169#define copy_from_user_page(vma, page, vaddr, dst, src, len) \170do { \171memcpy(dst, src, len); \172} while (0)173174/*175* Convert calls to our calling convention.176*/177178/* Invalidate I-cache */179#define __flush_icache_all_generic() \180asm("mcr p15, 0, %0, c7, c5, 0" \181: : "r" (0));182183/* Invalidate I-cache inner shareable */184#define __flush_icache_all_v7_smp() \185asm("mcr p15, 0, %0, c7, c1, 0" \186: : "r" (0));187188/*189* Optimized __flush_icache_all for the common cases. Note that UP ARMv7190* will fall through to use __flush_icache_all_generic.191*/192#if (defined(CONFIG_CPU_V7) && \193(defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \194defined(CONFIG_SMP_ON_UP)195#define __flush_icache_preferred __cpuc_flush_icache_all196#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)197#define __flush_icache_preferred __flush_icache_all_v7_smp198#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)199#define __flush_icache_preferred __cpuc_flush_icache_all200#else201#define __flush_icache_preferred __flush_icache_all_generic202#endif203204static inline void __flush_icache_all(void)205{206__flush_icache_preferred();207dsb(ishst);208}209210/*211* Flush caches up to Level of Unification Inner Shareable212*/213#define flush_cache_louis() __cpuc_flush_kern_louis()214215#define flush_cache_all() __cpuc_flush_kern_all()216217static inline void vivt_flush_cache_mm(struct mm_struct *mm)218{219if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))220__cpuc_flush_user_all();221}222223static inline void224vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)225{226struct mm_struct *mm = vma->vm_mm;227228if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))229__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),230vma->vm_flags);231}232233static inline void vivt_flush_cache_pages(struct vm_area_struct *vma,234unsigned long user_addr, unsigned long pfn, unsigned int nr)235{236struct mm_struct *mm = vma->vm_mm;237238if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {239unsigned long addr = user_addr & PAGE_MASK;240__cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE,241vma->vm_flags);242}243}244245#ifndef CONFIG_CPU_CACHE_VIPT246#define flush_cache_mm(mm) \247vivt_flush_cache_mm(mm)248#define flush_cache_range(vma,start,end) \249vivt_flush_cache_range(vma,start,end)250#define flush_cache_pages(vma, addr, pfn, nr) \251vivt_flush_cache_pages(vma, addr, pfn, nr)252#else253void flush_cache_mm(struct mm_struct *mm);254void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);255void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr,256unsigned long pfn, unsigned int nr);257#endif258259#define flush_cache_dup_mm(mm) flush_cache_mm(mm)260#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1)261262/*263* flush_icache_user_range is used when we want to ensure that the264* Harvard caches are synchronised for the user space address range.265* This is used for the ARM private sys_cacheflush system call.266*/267#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)268269/*270* Perform necessary cache operations to ensure that data previously271* stored within this range of addresses can be executed by the CPU.272*/273#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)274275/*276* Perform necessary cache operations to ensure that the TLB will277* see data written in the specified area.278*/279#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)280281/*282* flush_dcache_page is used when the kernel has written to the page283* cache page at virtual address page->virtual.284*285* If this page isn't mapped (ie, folio_mapping == NULL), or it might286* have userspace mappings, then we _must_ always clean + invalidate287* the dcache entries associated with the kernel mapping.288*289* Otherwise we can defer the operation, and clean the cache when we are290* about to change to user space. This is the same method as used on SPARC64.291* See update_mmu_cache for the user space part.292*/293#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1294void flush_dcache_page(struct page *);295void flush_dcache_folio(struct folio *folio);296#define flush_dcache_folio flush_dcache_folio297298#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1299static inline void flush_kernel_vmap_range(void *addr, int size)300{301if ((cache_is_vivt() || cache_is_vipt_aliasing()))302__cpuc_flush_dcache_area(addr, (size_t)size);303}304static inline void invalidate_kernel_vmap_range(void *addr, int size)305{306if ((cache_is_vivt() || cache_is_vipt_aliasing()))307__cpuc_flush_dcache_area(addr, (size_t)size);308}309310#define ARCH_HAS_FLUSH_ANON_PAGE311static inline void flush_anon_page(struct vm_area_struct *vma,312struct page *page, unsigned long vmaddr)313{314extern void __flush_anon_page(struct vm_area_struct *vma,315struct page *, unsigned long);316if (PageAnon(page))317__flush_anon_page(vma, page, vmaddr);318}319320#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)321#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)322323/*324* flush_cache_vmap() is used when creating mappings (eg, via vmap,325* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT326* caches, since the direct-mappings of these pages may contain cached327* data, we need to do a full cache flush to ensure that writebacks328* don't corrupt data placed into these pages via the new mappings.329*/330static inline void flush_cache_vmap(unsigned long start, unsigned long end)331{332if (!cache_is_vipt_nonaliasing())333flush_cache_all();334else335/*336* set_pte_at() called from vmap_pte_range() does not337* have a DSB after cleaning the cache line.338*/339dsb(ishst);340}341342#define flush_cache_vmap_early(start, end) do { } while (0)343344static inline void flush_cache_vunmap(unsigned long start, unsigned long end)345{346if (!cache_is_vipt_nonaliasing())347flush_cache_all();348}349350/*351* Memory synchronization helpers for mixed cached vs non cached accesses.352*353* Some synchronization algorithms have to set states in memory with the354* cache enabled or disabled depending on the code path. It is crucial355* to always ensure proper cache maintenance to update main memory right356* away in that case.357*358* Any cached write must be followed by a cache clean operation.359* Any cached read must be preceded by a cache invalidate operation.360* Yet, in the read case, a cache flush i.e. atomic clean+invalidate361* operation is needed to avoid discarding possible concurrent writes to the362* accessed memory.363*364* Also, in order to prevent a cached writer from interfering with an365* adjacent non-cached writer, each state variable must be located to366* a separate cache line.367*/368369/*370* This needs to be >= the max cache writeback size of all371* supported platforms included in the current kernel configuration.372* This is used to align state variables to their own cache lines.373*/374#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */375#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)376377/*378* There is no __cpuc_clean_dcache_area but we use it anyway for379* code intent clarity, and alias it to __cpuc_flush_dcache_area.380*/381#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area382383/*384* Ensure preceding writes to *p by this CPU are visible to385* subsequent reads by other CPUs:386*/387static inline void __sync_cache_range_w(volatile void *p, size_t size)388{389char *_p = (char *)p;390391__cpuc_clean_dcache_area(_p, size);392outer_clean_range(__pa(_p), __pa(_p + size));393}394395/*396* Ensure preceding writes to *p by other CPUs are visible to397* subsequent reads by this CPU. We must be careful not to398* discard data simultaneously written by another CPU, hence the399* usage of flush rather than invalidate operations.400*/401static inline void __sync_cache_range_r(volatile void *p, size_t size)402{403char *_p = (char *)p;404405#ifdef CONFIG_OUTER_CACHE406if (outer_cache.flush_range) {407/*408* Ensure dirty data migrated from other CPUs into our cache409* are cleaned out safely before the outer cache is cleaned:410*/411__cpuc_clean_dcache_area(_p, size);412413/* Clean and invalidate stale data for *p from outer ... */414outer_flush_range(__pa(_p), __pa(_p + size));415}416#endif417418/* ... and inner cache: */419__cpuc_flush_dcache_area(_p, size);420}421422#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))423#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))424425/*426* Disabling cache access for one CPU in an ARMv7 SMP system is tricky.427* To do so we must:428*429* - Clear the SCTLR.C bit to prevent further cache allocations430* - Flush the desired level of cache431* - Clear the ACTLR "SMP" bit to disable local coherency432*433* ... and so without any intervening memory access in between those steps,434* not even to the stack.435*436* WARNING -- After this has been called:437*438* - No ldrex/strex (and similar) instructions must be used.439* - The CPU is obviously no longer coherent with the other CPUs.440* - This is unlikely to work as expected if Linux is running non-secure.441*442* Note:443*444* - This is known to apply to several ARMv7 processor implementations,445* however some exceptions may exist. Caveat emptor.446*447* - The clobber list is dictated by the call to v7_flush_dcache_*.448*/449#define v7_exit_coherency_flush(level) \450asm volatile( \451".arch armv7-a \n\t" \452"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \453"bic r0, r0, #"__stringify(CR_C)" \n\t" \454"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \455"isb \n\t" \456"bl v7_flush_dcache_"__stringify(level)" \n\t" \457"mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \458"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \459"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \460"isb \n\t" \461"dsb" \462: : : "r0","r1","r2","r3","r4","r5","r6", \463"r9","r10","ip","lr","memory" )464465void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,466void *kaddr, unsigned long len);467468469#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND470void check_cpu_icache_size(int cpuid);471#else472static inline void check_cpu_icache_size(int cpuid) { }473#endif474475#endif476477478