Path: blob/master/arch/openrisc/include/asm/cacheflush.h
26481 views
/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* OpenRISC Linux3*4* Linux architectural port borrowing liberally from similar works of5* others. All original copyrights apply as per the original source6* declaration.7*8* OpenRISC implementation:9* Copyright (C) Jan Henrik Weinstock <[email protected]>10* et al.11*/1213#ifndef __ASM_CACHEFLUSH_H14#define __ASM_CACHEFLUSH_H1516#include <linux/mm.h>1718/*19* Helper function for flushing or invalidating entire pages from data20* and instruction caches. SMP needs a little extra work, since we need21* to flush the pages on all cpus.22*/23extern void local_dcache_page_flush(struct page *page);24extern void local_icache_page_inv(struct page *page);25extern void local_dcache_range_flush(unsigned long start, unsigned long end);26extern void local_dcache_range_inv(unsigned long start, unsigned long end);27extern void local_icache_range_inv(unsigned long start, unsigned long end);2829/*30* Data cache flushing always happen on the local cpu. Instruction cache31* invalidations need to be broadcasted to all other cpu in the system in32* case of SMP configurations.33*/34#ifndef CONFIG_SMP35#define dcache_page_flush(page) local_dcache_page_flush(page)36#define icache_page_inv(page) local_icache_page_inv(page)37#else /* CONFIG_SMP */38#define dcache_page_flush(page) local_dcache_page_flush(page)39#define icache_page_inv(page) smp_icache_page_inv(page)40extern void smp_icache_page_inv(struct page *page);41#endif /* CONFIG_SMP */4243/*44* Even if the actual block size is larger than L1_CACHE_BYTES, paddr45* can be incremented by L1_CACHE_BYTES. When paddr is written to the46* invalidate register, the entire cache line encompassing this address47* is invalidated. Each subsequent reference to the same cache line will48* not affect the invalidation process.49*/50#define local_dcache_block_flush(addr) \51local_dcache_range_flush(addr, addr + L1_CACHE_BYTES)52#define local_dcache_block_inv(addr) \53local_dcache_range_inv(addr, addr + L1_CACHE_BYTES)54#define local_icache_block_inv(addr) \55local_icache_range_inv(addr, addr + L1_CACHE_BYTES)5657/*58* Synchronizes caches. Whenever a cpu writes executable code to memory, this59* should be called to make sure the processor sees the newly written code.60*/61static inline void sync_icache_dcache(struct page *page)62{63if (!IS_ENABLED(CONFIG_DCACHE_WRITETHROUGH))64dcache_page_flush(page);65icache_page_inv(page);66}6768/*69* Pages with this bit set need not be flushed/invalidated, since70* they have not changed since last flush. New pages start with71* PG_arch_1 not set and are therefore dirty by default.72*/73#define PG_dc_clean PG_arch_17475static inline void flush_dcache_folio(struct folio *folio)76{77clear_bit(PG_dc_clean, &folio->flags);78}79#define flush_dcache_folio flush_dcache_folio8081#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 182static inline void flush_dcache_page(struct page *page)83{84flush_dcache_folio(page_folio(page));85}8687#define flush_icache_user_page(vma, page, addr, len) \88do { \89if (vma->vm_flags & VM_EXEC) \90sync_icache_dcache(page); \91} while (0)9293#include <asm-generic/cacheflush.h>9495#endif /* __ASM_CACHEFLUSH_H */969798