Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sparc/mm/tlb.c
51282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* arch/sparc64/mm/tlb.c
3
*
4
* Copyright (C) 2004 David S. Miller <[email protected]>
5
*/
6
7
#include <linux/kernel.h>
8
#include <linux/percpu.h>
9
#include <linux/mm.h>
10
#include <linux/swap.h>
11
#include <linux/preempt.h>
12
#include <linux/pagemap.h>
13
14
#include <kunit/visibility.h>
15
16
#include <asm/tlbflush.h>
17
#include <asm/cacheflush.h>
18
#include <asm/mmu_context.h>
19
#include <asm/tlb.h>
20
21
/* Heavily inspired by the ppc64 code. */
22
23
static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
24
25
void flush_tlb_pending(void)
26
{
27
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
28
struct mm_struct *mm = tb->mm;
29
30
if (!tb->tlb_nr)
31
goto out;
32
33
flush_tsb_user(tb);
34
35
if (CTX_VALID(mm->context)) {
36
if (tb->tlb_nr == 1) {
37
global_flush_tlb_page(mm, tb->vaddrs[0]);
38
} else {
39
#ifdef CONFIG_SMP
40
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
41
&tb->vaddrs[0]);
42
#else
43
__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
44
tb->tlb_nr, &tb->vaddrs[0]);
45
#endif
46
}
47
}
48
49
tb->tlb_nr = 0;
50
51
out:
52
put_cpu_var(tlb_batch);
53
}
54
55
void arch_enter_lazy_mmu_mode(void)
56
{
57
preempt_disable();
58
}
59
/* For lazy_mmu_mode KUnit tests */
60
EXPORT_SYMBOL_IF_KUNIT(arch_enter_lazy_mmu_mode);
61
62
void arch_flush_lazy_mmu_mode(void)
63
{
64
struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
65
66
if (tb->tlb_nr)
67
flush_tlb_pending();
68
}
69
EXPORT_SYMBOL_IF_KUNIT(arch_flush_lazy_mmu_mode);
70
71
void arch_leave_lazy_mmu_mode(void)
72
{
73
arch_flush_lazy_mmu_mode();
74
preempt_enable();
75
}
76
EXPORT_SYMBOL_IF_KUNIT(arch_leave_lazy_mmu_mode);
77
78
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
79
bool exec, unsigned int hugepage_shift)
80
{
81
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
82
unsigned long nr;
83
84
vaddr &= PAGE_MASK;
85
if (exec)
86
vaddr |= 0x1UL;
87
88
nr = tb->tlb_nr;
89
90
if (unlikely(nr != 0 && mm != tb->mm)) {
91
flush_tlb_pending();
92
nr = 0;
93
}
94
95
if (!is_lazy_mmu_mode_active()) {
96
flush_tsb_user_page(mm, vaddr, hugepage_shift);
97
global_flush_tlb_page(mm, vaddr);
98
goto out;
99
}
100
101
if (nr == 0) {
102
tb->mm = mm;
103
tb->hugepage_shift = hugepage_shift;
104
}
105
106
if (tb->hugepage_shift != hugepage_shift) {
107
flush_tlb_pending();
108
tb->hugepage_shift = hugepage_shift;
109
nr = 0;
110
}
111
112
tb->vaddrs[nr] = vaddr;
113
tb->tlb_nr = ++nr;
114
if (nr >= TLB_BATCH_NR)
115
flush_tlb_pending();
116
117
out:
118
put_cpu_var(tlb_batch);
119
}
120
121
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
122
pte_t *ptep, pte_t orig, int fullmm,
123
unsigned int hugepage_shift)
124
{
125
if (tlb_type != hypervisor &&
126
pte_dirty(orig)) {
127
unsigned long paddr, pfn = pte_pfn(orig);
128
struct address_space *mapping;
129
struct page *page;
130
struct folio *folio;
131
132
if (!pfn_valid(pfn))
133
goto no_cache_flush;
134
135
page = pfn_to_page(pfn);
136
if (PageReserved(page))
137
goto no_cache_flush;
138
139
/* A real file page? */
140
folio = page_folio(page);
141
mapping = folio_flush_mapping(folio);
142
if (!mapping)
143
goto no_cache_flush;
144
145
paddr = (unsigned long) page_address(page);
146
if ((paddr ^ vaddr) & (1 << 13))
147
flush_dcache_folio_all(mm, folio);
148
}
149
150
no_cache_flush:
151
if (!fullmm)
152
tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
153
}
154
155
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
156
static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
157
pmd_t pmd)
158
{
159
unsigned long end;
160
pte_t *pte;
161
162
pte = pte_offset_map(&pmd, vaddr);
163
if (!pte)
164
return;
165
end = vaddr + HPAGE_SIZE;
166
while (vaddr < end) {
167
if (pte_val(*pte) & _PAGE_VALID) {
168
bool exec = pte_exec(*pte);
169
170
tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
171
}
172
pte++;
173
vaddr += PAGE_SIZE;
174
}
175
pte_unmap(pte);
176
}
177
178
179
static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
180
pmd_t orig, pmd_t pmd)
181
{
182
if (mm == &init_mm)
183
return;
184
185
if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
186
/*
187
* Note that this routine only sets pmds for THP pages.
188
* Hugetlb pages are handled elsewhere. We need to check
189
* for huge zero page. Huge zero pages are like hugetlb
190
* pages in that there is no RSS, but there is the need
191
* for TSB entries. So, huge zero page counts go into
192
* hugetlb_pte_count.
193
*/
194
if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
195
if (is_huge_zero_pmd(pmd))
196
mm->context.hugetlb_pte_count++;
197
else
198
mm->context.thp_pte_count++;
199
} else {
200
if (is_huge_zero_pmd(orig))
201
mm->context.hugetlb_pte_count--;
202
else
203
mm->context.thp_pte_count--;
204
}
205
206
/* Do not try to allocate the TSB hash table if we
207
* don't have one already. We have various locks held
208
* and thus we'll end up doing a GFP_KERNEL allocation
209
* in an atomic context.
210
*
211
* Instead, we let the first TLB miss on a hugepage
212
* take care of this.
213
*/
214
}
215
216
if (!pmd_none(orig)) {
217
addr &= HPAGE_MASK;
218
if (pmd_trans_huge(orig)) {
219
pte_t orig_pte = __pte(pmd_val(orig));
220
bool exec = pte_exec(orig_pte);
221
222
tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
223
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
224
REAL_HPAGE_SHIFT);
225
} else {
226
tlb_batch_pmd_scan(mm, addr, orig);
227
}
228
}
229
}
230
231
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
232
pmd_t *pmdp, pmd_t pmd)
233
{
234
pmd_t orig = *pmdp;
235
236
*pmdp = pmd;
237
__set_pmd_acct(mm, addr, orig, pmd);
238
}
239
240
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
241
unsigned long address, pmd_t *pmdp, pmd_t pmd)
242
{
243
pmd_t old;
244
245
do {
246
old = *pmdp;
247
} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
248
__set_pmd_acct(vma->vm_mm, address, old, pmd);
249
250
return old;
251
}
252
253
/*
254
* This routine is only called when splitting a THP
255
*/
256
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
257
pmd_t *pmdp)
258
{
259
pmd_t old, entry;
260
261
VM_WARN_ON_ONCE(!pmd_present(*pmdp));
262
entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
263
old = pmdp_establish(vma, address, pmdp, entry);
264
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
265
266
/*
267
* set_pmd_at() will not be called in a way to decrement
268
* thp_pte_count when splitting a THP, so do it now.
269
* Sanity check pmd before doing the actual decrement.
270
*/
271
if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
272
!is_huge_zero_pmd(entry))
273
(vma->vm_mm)->context.thp_pte_count--;
274
275
return old;
276
}
277
278
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
279
pgtable_t pgtable)
280
{
281
struct list_head *lh = (struct list_head *) pgtable;
282
283
assert_spin_locked(&mm->page_table_lock);
284
285
/* FIFO */
286
if (!pmd_huge_pte(mm, pmdp))
287
INIT_LIST_HEAD(lh);
288
else
289
list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
290
pmd_huge_pte(mm, pmdp) = pgtable;
291
}
292
293
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
294
{
295
struct list_head *lh;
296
pgtable_t pgtable;
297
298
assert_spin_locked(&mm->page_table_lock);
299
300
/* FIFO */
301
pgtable = pmd_huge_pte(mm, pmdp);
302
lh = (struct list_head *) pgtable;
303
if (list_empty(lh))
304
pmd_huge_pte(mm, pmdp) = NULL;
305
else {
306
pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
307
list_del(lh);
308
}
309
pte_val(pgtable[0]) = 0;
310
pte_val(pgtable[1]) = 0;
311
312
return pgtable;
313
}
314
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
315
316