Path: blob/master/arch/powerpc/include/asm/book3s/64/tlbflush.h
26519 views
/* SPDX-License-Identifier: GPL-2.0 */1#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H2#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H34#define MMU_NO_CONTEXT ~0UL56#include <linux/mm_types.h>7#include <linux/mmu_notifier.h>8#include <asm/book3s/64/tlbflush-hash.h>9#include <asm/book3s/64/tlbflush-radix.h>1011/* TLB flush actions. Used as argument to tlbiel_all() */12enum {13TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */14TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */15};1617static inline void tlbiel_all(void)18{19/*20* This is used for host machine check and bootup.21*22* This uses early_radix_enabled and implementations use23* early_cpu_has_feature etc because that works early in boot24* and this is the machine check path which is not performance25* critical.26*/27if (early_radix_enabled())28radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);29else30hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);31}3233static inline void tlbiel_all_lpid(bool radix)34{35/*36* This is used for guest machine check.37*/38if (radix)39radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);40else41hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);42}434445#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE46static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,47unsigned long start, unsigned long end)48{49if (radix_enabled())50radix__flush_pmd_tlb_range(vma, start, end);51}5253#define __HAVE_ARCH_FLUSH_PUD_TLB_RANGE54static inline void flush_pud_tlb_range(struct vm_area_struct *vma,55unsigned long start, unsigned long end)56{57if (radix_enabled())58radix__flush_pud_tlb_range(vma, start, end);59}6061#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE62static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,63unsigned long start,64unsigned long end)65{66if (radix_enabled())67radix__flush_hugetlb_tlb_range(vma, start, end);68}6970static inline void flush_tlb_range(struct vm_area_struct *vma,71unsigned long start, unsigned long end)72{73if (radix_enabled())74radix__flush_tlb_range(vma, start, end);75}7677static inline void flush_tlb_kernel_range(unsigned long start,78unsigned long end)79{80if (radix_enabled())81radix__flush_tlb_kernel_range(start, end);82}8384static inline void local_flush_tlb_mm(struct mm_struct *mm)85{86if (radix_enabled())87radix__local_flush_tlb_mm(mm);88}8990static inline void local_flush_tlb_page(struct vm_area_struct *vma,91unsigned long vmaddr)92{93if (radix_enabled())94radix__local_flush_tlb_page(vma, vmaddr);95}9697static inline void local_flush_tlb_page_psize(struct mm_struct *mm,98unsigned long vmaddr, int psize)99{100if (radix_enabled())101radix__local_flush_tlb_page_psize(mm, vmaddr, psize);102}103104static inline void tlb_flush(struct mmu_gather *tlb)105{106if (radix_enabled())107radix__tlb_flush(tlb);108else109hash__tlb_flush(tlb);110}111112#ifdef CONFIG_SMP113static inline void flush_tlb_mm(struct mm_struct *mm)114{115if (radix_enabled())116radix__flush_tlb_mm(mm);117}118119static inline void flush_tlb_page(struct vm_area_struct *vma,120unsigned long vmaddr)121{122if (radix_enabled())123radix__flush_tlb_page(vma, vmaddr);124}125#else126#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)127#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)128#endif /* CONFIG_SMP */129130#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault131static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,132unsigned long address,133pte_t *ptep)134{135/*136* Book3S 64 does not require spurious fault flushes because the PTE137* must be re-fetched in case of an access permission problem. So the138* only reason for a spurious fault should be concurrent modification139* to the PTE, in which case the PTE will eventually be re-fetched by140* the MMU when it attempts the access again.141*142* See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table143* Entry, Setting a Reference or Change Bit or Upgrading Access144* Authority (PTE Subject to Atomic Hardware Updates):145*146* "If the only change being made to a valid PTE that is subject to147* atomic hardware updates is to set the Reference or Change bit to148* 1 or to upgrade access authority, a simpler sequence suffices149* because the translation hardware will refetch the PTE if an150* access is attempted for which the only problems were reference151* and/or change bits needing to be set or insufficient access152* authority."153*154* The nest MMU in POWER9 does not perform this PTE re-fetch, but155* it avoids the spurious fault problem by flushing the TLB before156* upgrading PTE permissions, see radix__ptep_set_access_flags.157*/158}159160static inline bool __pte_flags_need_flush(unsigned long oldval,161unsigned long newval)162{163unsigned long delta = oldval ^ newval;164165/*166* The return value of this function doesn't matter for hash,167* ptep_modify_prot_start() does a pte_update() which does or schedules168* any necessary hash table update and flush.169*/170if (!radix_enabled())171return true;172173/*174* We do not expect kernel mappings or non-PTEs or not-present PTEs.175*/176VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);177VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);178VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));179VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));180VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));181VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));182183/*184* Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.185*186* In theory, some changed software bits could be tolerated, in187* practice those should rarely if ever matter.188*/189190if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))191return true;192193/*194* If any of the above was present in old but cleared in new, flush.195* With the exception of _PAGE_ACCESSED, don't worry about flushing196* if that was cleared (see the comment in ptep_clear_flush_young()).197*/198if ((delta & ~_PAGE_ACCESSED) & oldval)199return true;200201return false;202}203204static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)205{206return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));207}208#define pte_needs_flush pte_needs_flush209210static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)211{212return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));213}214#define huge_pmd_needs_flush huge_pmd_needs_flush215216extern bool tlbie_capable;217extern bool tlbie_enabled;218219static inline bool cputlb_use_tlbie(void)220{221return tlbie_enabled;222}223224#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */225226227