Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/include/asm/book3s/64/tlbflush.h
26519 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3
#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4
5
#define MMU_NO_CONTEXT ~0UL
6
7
#include <linux/mm_types.h>
8
#include <linux/mmu_notifier.h>
9
#include <asm/book3s/64/tlbflush-hash.h>
10
#include <asm/book3s/64/tlbflush-radix.h>
11
12
/* TLB flush actions. Used as argument to tlbiel_all() */
13
enum {
14
TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
15
TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
16
};
17
18
static inline void tlbiel_all(void)
19
{
20
/*
21
* This is used for host machine check and bootup.
22
*
23
* This uses early_radix_enabled and implementations use
24
* early_cpu_has_feature etc because that works early in boot
25
* and this is the machine check path which is not performance
26
* critical.
27
*/
28
if (early_radix_enabled())
29
radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
30
else
31
hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
32
}
33
34
static inline void tlbiel_all_lpid(bool radix)
35
{
36
/*
37
* This is used for guest machine check.
38
*/
39
if (radix)
40
radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
41
else
42
hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
43
}
44
45
46
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
47
static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
48
unsigned long start, unsigned long end)
49
{
50
if (radix_enabled())
51
radix__flush_pmd_tlb_range(vma, start, end);
52
}
53
54
#define __HAVE_ARCH_FLUSH_PUD_TLB_RANGE
55
static inline void flush_pud_tlb_range(struct vm_area_struct *vma,
56
unsigned long start, unsigned long end)
57
{
58
if (radix_enabled())
59
radix__flush_pud_tlb_range(vma, start, end);
60
}
61
62
#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
63
static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
64
unsigned long start,
65
unsigned long end)
66
{
67
if (radix_enabled())
68
radix__flush_hugetlb_tlb_range(vma, start, end);
69
}
70
71
static inline void flush_tlb_range(struct vm_area_struct *vma,
72
unsigned long start, unsigned long end)
73
{
74
if (radix_enabled())
75
radix__flush_tlb_range(vma, start, end);
76
}
77
78
static inline void flush_tlb_kernel_range(unsigned long start,
79
unsigned long end)
80
{
81
if (radix_enabled())
82
radix__flush_tlb_kernel_range(start, end);
83
}
84
85
static inline void local_flush_tlb_mm(struct mm_struct *mm)
86
{
87
if (radix_enabled())
88
radix__local_flush_tlb_mm(mm);
89
}
90
91
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
92
unsigned long vmaddr)
93
{
94
if (radix_enabled())
95
radix__local_flush_tlb_page(vma, vmaddr);
96
}
97
98
static inline void local_flush_tlb_page_psize(struct mm_struct *mm,
99
unsigned long vmaddr, int psize)
100
{
101
if (radix_enabled())
102
radix__local_flush_tlb_page_psize(mm, vmaddr, psize);
103
}
104
105
static inline void tlb_flush(struct mmu_gather *tlb)
106
{
107
if (radix_enabled())
108
radix__tlb_flush(tlb);
109
else
110
hash__tlb_flush(tlb);
111
}
112
113
#ifdef CONFIG_SMP
114
static inline void flush_tlb_mm(struct mm_struct *mm)
115
{
116
if (radix_enabled())
117
radix__flush_tlb_mm(mm);
118
}
119
120
static inline void flush_tlb_page(struct vm_area_struct *vma,
121
unsigned long vmaddr)
122
{
123
if (radix_enabled())
124
radix__flush_tlb_page(vma, vmaddr);
125
}
126
#else
127
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
128
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
129
#endif /* CONFIG_SMP */
130
131
#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
132
static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
133
unsigned long address,
134
pte_t *ptep)
135
{
136
/*
137
* Book3S 64 does not require spurious fault flushes because the PTE
138
* must be re-fetched in case of an access permission problem. So the
139
* only reason for a spurious fault should be concurrent modification
140
* to the PTE, in which case the PTE will eventually be re-fetched by
141
* the MMU when it attempts the access again.
142
*
143
* See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
144
* Entry, Setting a Reference or Change Bit or Upgrading Access
145
* Authority (PTE Subject to Atomic Hardware Updates):
146
*
147
* "If the only change being made to a valid PTE that is subject to
148
* atomic hardware updates is to set the Reference or Change bit to
149
* 1 or to upgrade access authority, a simpler sequence suffices
150
* because the translation hardware will refetch the PTE if an
151
* access is attempted for which the only problems were reference
152
* and/or change bits needing to be set or insufficient access
153
* authority."
154
*
155
* The nest MMU in POWER9 does not perform this PTE re-fetch, but
156
* it avoids the spurious fault problem by flushing the TLB before
157
* upgrading PTE permissions, see radix__ptep_set_access_flags.
158
*/
159
}
160
161
static inline bool __pte_flags_need_flush(unsigned long oldval,
162
unsigned long newval)
163
{
164
unsigned long delta = oldval ^ newval;
165
166
/*
167
* The return value of this function doesn't matter for hash,
168
* ptep_modify_prot_start() does a pte_update() which does or schedules
169
* any necessary hash table update and flush.
170
*/
171
if (!radix_enabled())
172
return true;
173
174
/*
175
* We do not expect kernel mappings or non-PTEs or not-present PTEs.
176
*/
177
VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
178
VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
179
VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
180
VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
181
VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
182
VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
183
184
/*
185
* Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
186
*
187
* In theory, some changed software bits could be tolerated, in
188
* practice those should rarely if ever matter.
189
*/
190
191
if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
192
return true;
193
194
/*
195
* If any of the above was present in old but cleared in new, flush.
196
* With the exception of _PAGE_ACCESSED, don't worry about flushing
197
* if that was cleared (see the comment in ptep_clear_flush_young()).
198
*/
199
if ((delta & ~_PAGE_ACCESSED) & oldval)
200
return true;
201
202
return false;
203
}
204
205
static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
206
{
207
return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
208
}
209
#define pte_needs_flush pte_needs_flush
210
211
static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
212
{
213
return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
214
}
215
#define huge_pmd_needs_flush huge_pmd_needs_flush
216
217
extern bool tlbie_capable;
218
extern bool tlbie_enabled;
219
220
static inline bool cputlb_use_tlbie(void)
221
{
222
return tlbie_enabled;
223
}
224
225
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
226
227