Path: blob/master/arch/alpha/include/asm/cacheflush.h
15126 views
#ifndef _ALPHA_CACHEFLUSH_H1#define _ALPHA_CACHEFLUSH_H23#include <linux/mm.h>45/* Caches aren't brain-dead on the Alpha. */6#define flush_cache_all() do { } while (0)7#define flush_cache_mm(mm) do { } while (0)8#define flush_cache_dup_mm(mm) do { } while (0)9#define flush_cache_range(vma, start, end) do { } while (0)10#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)11#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 012#define flush_dcache_page(page) do { } while (0)13#define flush_dcache_mmap_lock(mapping) do { } while (0)14#define flush_dcache_mmap_unlock(mapping) do { } while (0)15#define flush_cache_vmap(start, end) do { } while (0)16#define flush_cache_vunmap(start, end) do { } while (0)1718/* Note that the following two definitions are _highly_ dependent19on the contexts in which they are used in the kernel. I personally20think it is criminal how loosely defined these macros are. */2122/* We need to flush the kernel's icache after loading modules. The23only other use of this macro is in load_aout_interp which is not24used on Alpha.2526Note that this definition should *not* be used for userspace27icache flushing. While functional, it is _way_ overkill. The28icache is tagged with ASNs and it suffices to allocate a new ASN29for the process. */30#ifndef CONFIG_SMP31#define flush_icache_range(start, end) imb()32#else33#define flush_icache_range(start, end) smp_imb()34extern void smp_imb(void);35#endif3637/* We need to flush the userspace icache after setting breakpoints in38ptrace.3940Instead of indiscriminately using imb, take advantage of the fact41that icache entries are tagged with the ASN and load a new mm context. */42/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */4344#ifndef CONFIG_SMP45#include <linux/sched.h>4647extern void __load_new_mm_context(struct mm_struct *);48static inline void49flush_icache_user_range(struct vm_area_struct *vma, struct page *page,50unsigned long addr, int len)51{52if (vma->vm_flags & VM_EXEC) {53struct mm_struct *mm = vma->vm_mm;54if (current->active_mm == mm)55__load_new_mm_context(mm);56else57mm->context[smp_processor_id()] = 0;58}59}60#else61extern void flush_icache_user_range(struct vm_area_struct *vma,62struct page *page, unsigned long addr, int len);63#endif6465/* This is used only in __do_fault and do_swap_page. */66#define flush_icache_page(vma, page) \67flush_icache_user_range((vma), (page), 0, 0)6869#define copy_to_user_page(vma, page, vaddr, dst, src, len) \70do { memcpy(dst, src, len); \71flush_icache_user_range(vma, page, vaddr, len); \72} while (0)73#define copy_from_user_page(vma, page, vaddr, dst, src, len) \74memcpy(dst, src, len)7576#endif /* _ALPHA_CACHEFLUSH_H */777879