Path: blob/master/arch/hexagon/include/asm/cacheflush.h
26481 views
/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Cache flush operations for the Hexagon architecture3*4* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.5*/67#ifndef _ASM_CACHEFLUSH_H8#define _ASM_CACHEFLUSH_H910#include <linux/mm_types.h>1112/* Cache flushing:13*14* - flush_cache_all() flushes entire cache15* - flush_cache_mm(mm) flushes the specified mm context's cache lines16* - flush_cache_page(mm, vmaddr, pfn) flushes a single page17* - flush_cache_range(vma, start, end) flushes a range of pages18* - flush_icache_range(start, end) flush a range of instructions19* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache20* - flush_icache_pages(vma, pg, nr) flushes(invalidates) nr pages for icache21*22* Need to doublecheck which one is really needed for ptrace stuff to work.23*/24#define LINESIZE 3225#define LINEBITS 52627/*28* Flush Dcache range through current map.29*/30extern void flush_dcache_range(unsigned long start, unsigned long end);31#define flush_dcache_range flush_dcache_range3233/*34* Flush Icache range through current map.35*/36extern void flush_icache_range(unsigned long start, unsigned long end);37#define flush_icache_range flush_icache_range3839/*40* Memory-management related flushes are there to ensure in non-physically41* indexed cache schemes that stale lines belonging to a given ASID aren't42* in the cache to confuse things. The prototype Hexagon Virtual Machine43* only uses a single ASID for all user-mode maps, which should44* mean that they aren't necessary. A brute-force, flush-everything45* implementation, with the name xxxxx_hexagon() is present in46* arch/hexagon/mm/cache.c, but let's not wire it up until we know47* it is needed.48*/49extern void flush_cache_all_hexagon(void);5051/*52* This may or may not ever have to be non-null, depending on the53* virtual machine MMU. For a native kernel, it's definitiely a no-op54*55* This is also the place where deferred cache coherency stuff seems56* to happen, classically... but instead we do it like ia64 and57* clean the cache when the PTE is set.58*59*/60static inline void update_mmu_cache_range(struct vm_fault *vmf,61struct vm_area_struct *vma, unsigned long address,62pte_t *ptep, unsigned int nr)63{64/* generic_ptrace_pokedata doesn't wind up here, does it? */65}6667#define update_mmu_cache(vma, addr, ptep) \68update_mmu_cache_range(NULL, vma, addr, ptep, 1)6970void copy_to_user_page(struct vm_area_struct *vma, struct page *page,71unsigned long vaddr, void *dst, void *src, int len);72#define copy_to_user_page copy_to_user_page7374#define copy_from_user_page(vma, page, vaddr, dst, src, len) \75memcpy(dst, src, len)7677extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);78extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);7980#include <asm-generic/cacheflush.h>8182#endif838485