Path: blob/master/arch/x86/include/asm/cacheflush.h
10821 views
#ifndef _ASM_X86_CACHEFLUSH_H1#define _ASM_X86_CACHEFLUSH_H23/* Caches aren't brain-dead on the intel. */4#include <asm-generic/cacheflush.h>56#ifdef CONFIG_X86_PAT7/*8* X86 PAT uses page flags WC and Uncached together to keep track of9* memory type of pages that have backing page struct. X86 PAT supports 310* different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and11* _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not12* been changed from its default (value of -1 used to denote this).13* Note we do not support _PAGE_CACHE_UC here.14*/1516#define _PGMT_DEFAULT 017#define _PGMT_WC (1UL << PG_arch_1)18#define _PGMT_UC_MINUS (1UL << PG_uncached)19#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)20#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)21#define _PGMT_CLEAR_MASK (~_PGMT_MASK)2223static inline unsigned long get_page_memtype(struct page *pg)24{25unsigned long pg_flags = pg->flags & _PGMT_MASK;2627if (pg_flags == _PGMT_DEFAULT)28return -1;29else if (pg_flags == _PGMT_WC)30return _PAGE_CACHE_WC;31else if (pg_flags == _PGMT_UC_MINUS)32return _PAGE_CACHE_UC_MINUS;33else34return _PAGE_CACHE_WB;35}3637static inline void set_page_memtype(struct page *pg, unsigned long memtype)38{39unsigned long memtype_flags = _PGMT_DEFAULT;40unsigned long old_flags;41unsigned long new_flags;4243switch (memtype) {44case _PAGE_CACHE_WC:45memtype_flags = _PGMT_WC;46break;47case _PAGE_CACHE_UC_MINUS:48memtype_flags = _PGMT_UC_MINUS;49break;50case _PAGE_CACHE_WB:51memtype_flags = _PGMT_WB;52break;53}5455do {56old_flags = pg->flags;57new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;58} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);59}60#else61static inline unsigned long get_page_memtype(struct page *pg) { return -1; }62static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }63#endif6465/*66* The set_memory_* API can be used to change various attributes of a virtual67* address range. The attributes include:68* Cachability : UnCached, WriteCombining, WriteBack69* Executability : eXeutable, NoteXecutable70* Read/Write : ReadOnly, ReadWrite71* Presence : NotPresent72*73* Within a category, the attributes are mutually exclusive.74*75* The implementation of this API will take care of various aspects that76* are associated with changing such attributes, such as:77* - Flushing TLBs78* - Flushing CPU caches79* - Making sure aliases of the memory behind the mapping don't violate80* coherency rules as defined by the CPU in the system.81*82* What this API does not do:83* - Provide exclusion between various callers - including callers that84* operation on other mappings of the same physical page85* - Restore default attributes when a page is freed86* - Guarantee that mappings other than the requested one are87* in any state, other than that these do not violate rules for88* the CPU you have. Do not depend on any effects on other mappings,89* CPUs other than the one you have may have more relaxed rules.90* The caller is required to take care of these.91*/9293int _set_memory_uc(unsigned long addr, int numpages);94int _set_memory_wc(unsigned long addr, int numpages);95int _set_memory_wb(unsigned long addr, int numpages);96int set_memory_uc(unsigned long addr, int numpages);97int set_memory_wc(unsigned long addr, int numpages);98int set_memory_wb(unsigned long addr, int numpages);99int set_memory_x(unsigned long addr, int numpages);100int set_memory_nx(unsigned long addr, int numpages);101int set_memory_ro(unsigned long addr, int numpages);102int set_memory_rw(unsigned long addr, int numpages);103int set_memory_np(unsigned long addr, int numpages);104int set_memory_4k(unsigned long addr, int numpages);105106int set_memory_array_uc(unsigned long *addr, int addrinarray);107int set_memory_array_wc(unsigned long *addr, int addrinarray);108int set_memory_array_wb(unsigned long *addr, int addrinarray);109110int set_pages_array_uc(struct page **pages, int addrinarray);111int set_pages_array_wc(struct page **pages, int addrinarray);112int set_pages_array_wb(struct page **pages, int addrinarray);113114/*115* For legacy compatibility with the old APIs, a few functions116* are provided that work on a "struct page".117* These functions operate ONLY on the 1:1 kernel mapping of the118* memory that the struct page represents, and internally just119* call the set_memory_* function. See the description of the120* set_memory_* function for more details on conventions.121*122* These APIs should be considered *deprecated* and are likely going to123* be removed in the future.124* The reason for this is the implicit operation on the 1:1 mapping only,125* making this not a generally useful API.126*127* Specifically, many users of the old APIs had a virtual address,128* called virt_to_page() or vmalloc_to_page() on that address to129* get a struct page* that the old API required.130* To convert these cases, use set_memory_*() on the original131* virtual address, do not use these functions.132*/133134int set_pages_uc(struct page *page, int numpages);135int set_pages_wb(struct page *page, int numpages);136int set_pages_x(struct page *page, int numpages);137int set_pages_nx(struct page *page, int numpages);138int set_pages_ro(struct page *page, int numpages);139int set_pages_rw(struct page *page, int numpages);140141142void clflush_cache_range(void *addr, unsigned int size);143144#ifdef CONFIG_DEBUG_RODATA145void mark_rodata_ro(void);146extern const int rodata_test_data;147extern int kernel_set_to_readonly;148void set_kernel_text_rw(void);149void set_kernel_text_ro(void);150#else151static inline void set_kernel_text_rw(void) { }152static inline void set_kernel_text_ro(void) { }153#endif154155#ifdef CONFIG_DEBUG_RODATA_TEST156int rodata_test(void);157#else158static inline int rodata_test(void)159{160return 0;161}162#endif163164#endif /* _ASM_X86_CACHEFLUSH_H */165166167