Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/mm/book3s64/mmu_context.c
52395 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* MMU context allocation for 64-bit kernels.
4
*
5
* Copyright (C) 2004 Anton Blanchard, IBM Corp. <[email protected]>
6
*/
7
8
#include <linux/sched.h>
9
#include <linux/kernel.h>
10
#include <linux/errno.h>
11
#include <linux/string.h>
12
#include <linux/types.h>
13
#include <linux/mm.h>
14
#include <linux/pkeys.h>
15
#include <linux/spinlock.h>
16
#include <linux/idr.h>
17
#include <linux/export.h>
18
#include <linux/gfp.h>
19
#include <linux/slab.h>
20
#include <linux/cpu.h>
21
22
#include <asm/mmu_context.h>
23
#include <asm/pgalloc.h>
24
25
#include "internal.h"
26
27
static DEFINE_IDA(mmu_context_ida);
28
29
static int alloc_context_id(int min_id, int max_id)
30
{
31
return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
32
}
33
34
#ifdef CONFIG_PPC_64S_HASH_MMU
35
void __init hash__reserve_context_id(int id)
36
{
37
int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
38
39
WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
40
}
41
42
int hash__alloc_context_id(void)
43
{
44
unsigned long max;
45
46
if (mmu_has_feature(MMU_FTR_68_BIT_VA))
47
max = MAX_USER_CONTEXT;
48
else
49
max = MAX_USER_CONTEXT_65BIT_VA;
50
51
return alloc_context_id(MIN_USER_CONTEXT, max);
52
}
53
EXPORT_SYMBOL_GPL(hash__alloc_context_id);
54
#endif
55
56
#ifdef CONFIG_PPC_64S_HASH_MMU
57
static int realloc_context_ids(mm_context_t *ctx)
58
{
59
int i, id;
60
61
/*
62
* id 0 (aka. ctx->id) is special, we always allocate a new one, even if
63
* there wasn't one allocated previously (which happens in the exec
64
* case where ctx is newly allocated).
65
*
66
* We have to be a bit careful here. We must keep the existing ids in
67
* the array, so that we can test if they're non-zero to decide if we
68
* need to allocate a new one. However in case of error we must free the
69
* ids we've allocated but *not* any of the existing ones (or risk a
70
* UAF). That's why we decrement i at the start of the error handling
71
* loop, to skip the id that we just tested but couldn't reallocate.
72
*/
73
for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
74
if (i == 0 || ctx->extended_id[i]) {
75
id = hash__alloc_context_id();
76
if (id < 0)
77
goto error;
78
79
ctx->extended_id[i] = id;
80
}
81
}
82
83
/* The caller expects us to return id */
84
return ctx->id;
85
86
error:
87
for (i--; i >= 0; i--) {
88
if (ctx->extended_id[i])
89
ida_free(&mmu_context_ida, ctx->extended_id[i]);
90
}
91
92
return id;
93
}
94
95
static int hash__init_new_context(struct mm_struct *mm)
96
{
97
int index;
98
99
mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
100
GFP_KERNEL);
101
if (!mm->context.hash_context)
102
return -ENOMEM;
103
104
/*
105
* The old code would re-promote on fork, we don't do that when using
106
* slices as it could cause problem promoting slices that have been
107
* forced down to 4K.
108
*
109
* For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
110
* explicitly against context.id == 0. This ensures that we properly
111
* initialize context slice details for newly allocated mm's (which will
112
* have id == 0) and don't alter context slice inherited via fork (which
113
* will have id != 0).
114
*
115
* We should not be calling init_new_context() on init_mm. Hence a
116
* check against 0 is OK.
117
*/
118
if (mm->context.id == 0) {
119
memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
120
slice_init_new_context_exec(mm);
121
} else {
122
/* This is fork. Copy hash_context details from current->mm */
123
memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
124
#ifdef CONFIG_PPC_SUBPAGE_PROT
125
/* inherit subpage prot details if we have one. */
126
if (current->mm->context.hash_context->spt) {
127
mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
128
GFP_KERNEL);
129
if (!mm->context.hash_context->spt) {
130
kfree(mm->context.hash_context);
131
return -ENOMEM;
132
}
133
}
134
#endif
135
}
136
137
index = realloc_context_ids(&mm->context);
138
if (index < 0) {
139
#ifdef CONFIG_PPC_SUBPAGE_PROT
140
kfree(mm->context.hash_context->spt);
141
#endif
142
kfree(mm->context.hash_context);
143
return index;
144
}
145
146
pkey_mm_init(mm);
147
return index;
148
}
149
150
void hash__setup_new_exec(void)
151
{
152
slice_setup_new_exec();
153
}
154
#else
155
static inline int hash__init_new_context(struct mm_struct *mm)
156
{
157
BUILD_BUG();
158
return 0;
159
}
160
#endif
161
162
static int radix__init_new_context(struct mm_struct *mm)
163
{
164
unsigned long rts_field;
165
int index, max_id;
166
167
max_id = (1 << mmu_pid_bits) - 1;
168
index = alloc_context_id(mmu_base_pid, max_id);
169
if (index < 0)
170
return index;
171
172
/*
173
* set the process table entry,
174
*/
175
rts_field = radix__get_tree_size();
176
process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
177
178
/*
179
* Order the above store with subsequent update of the PID
180
* register (at which point HW can start loading/caching
181
* the entry) and the corresponding load by the MMU from
182
* the L2 cache.
183
*/
184
asm volatile("ptesync;isync" : : : "memory");
185
186
#ifdef CONFIG_PPC_64S_HASH_MMU
187
mm->context.hash_context = NULL;
188
#endif
189
190
return index;
191
}
192
193
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
194
{
195
int index;
196
197
if (radix_enabled())
198
index = radix__init_new_context(mm);
199
else
200
index = hash__init_new_context(mm);
201
202
if (index < 0)
203
return index;
204
205
mm->context.id = index;
206
207
mm->context.pte_frag = NULL;
208
mm->context.pmd_frag = NULL;
209
#ifdef CONFIG_SPAPR_TCE_IOMMU
210
mm_iommu_init(mm);
211
#endif
212
atomic_set(&mm->context.active_cpus, 0);
213
atomic_set(&mm->context.copros, 0);
214
215
return 0;
216
}
217
218
void __destroy_context(int context_id)
219
{
220
ida_free(&mmu_context_ida, context_id);
221
}
222
EXPORT_SYMBOL_GPL(__destroy_context);
223
224
static void destroy_contexts(mm_context_t *ctx)
225
{
226
if (radix_enabled()) {
227
ida_free(&mmu_context_ida, ctx->id);
228
} else {
229
#ifdef CONFIG_PPC_64S_HASH_MMU
230
int index, context_id;
231
232
for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
233
context_id = ctx->extended_id[index];
234
if (context_id)
235
ida_free(&mmu_context_ida, context_id);
236
}
237
kfree(ctx->hash_context);
238
#else
239
BUILD_BUG(); // radix_enabled() should be constant true
240
#endif
241
}
242
}
243
244
static void pmd_frag_destroy(void *pmd_frag)
245
{
246
int count;
247
struct ptdesc *ptdesc;
248
249
ptdesc = virt_to_ptdesc(pmd_frag);
250
/* drop all the pending references */
251
count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
252
/* We allow PTE_FRAG_NR fragments from a PTE page */
253
if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {
254
pagetable_dtor(ptdesc);
255
pagetable_free(ptdesc);
256
}
257
}
258
259
static void destroy_pagetable_cache(struct mm_struct *mm)
260
{
261
void *frag;
262
263
frag = mm->context.pte_frag;
264
if (frag)
265
pte_frag_destroy(frag);
266
267
frag = mm->context.pmd_frag;
268
if (frag)
269
pmd_frag_destroy(frag);
270
return;
271
}
272
273
void destroy_context(struct mm_struct *mm)
274
{
275
#ifdef CONFIG_SPAPR_TCE_IOMMU
276
WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
277
#endif
278
/*
279
* For tasks which were successfully initialized we end up calling
280
* arch_exit_mmap() which clears the process table entry. And
281
* arch_exit_mmap() is called before the required fullmm TLB flush
282
* which does a RIC=2 flush. Hence for an initialized task, we do clear
283
* any cached process table entries.
284
*
285
* The condition below handles the error case during task init. We have
286
* set the process table entry early and if we fail a task
287
* initialization, we need to ensure the process table entry is zeroed.
288
* We need not worry about process table entry caches because the task
289
* never ran with the PID value.
290
*/
291
if (radix_enabled())
292
process_tb[mm->context.id].prtb0 = 0;
293
else
294
subpage_prot_free(mm);
295
destroy_contexts(&mm->context);
296
mm->context.id = MMU_NO_CONTEXT;
297
}
298
299
void arch_exit_mmap(struct mm_struct *mm)
300
{
301
destroy_pagetable_cache(mm);
302
303
if (radix_enabled()) {
304
/*
305
* Radix doesn't have a valid bit in the process table
306
* entries. However we know that at least P9 implementation
307
* will avoid caching an entry with an invalid RTS field,
308
* and 0 is invalid. So this will do.
309
*
310
* This runs before the "fullmm" tlb flush in exit_mmap,
311
* which does a RIC=2 tlbie to clear the process table
312
* entry. See the "fullmm" comments in tlb-radix.c.
313
*
314
* No barrier required here after the store because
315
* this process will do the invalidate, which starts with
316
* ptesync.
317
*/
318
process_tb[mm->context.id].prtb0 = 0;
319
}
320
}
321
322
#ifdef CONFIG_PPC_RADIX_MMU
323
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
324
{
325
mtspr(SPRN_PID, next->context.id);
326
isync();
327
}
328
#endif
329
330
/**
331
* cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)
332
*
333
* This clears the CPU from mm_cpumask for all processes, and then flushes the
334
* local TLB to ensure TLB coherency in case the CPU is onlined again.
335
*
336
* KVM guest translations are not necessarily flushed here. If KVM started
337
* using mm_cpumask or the Linux APIs which do, this would have to be resolved.
338
*/
339
#ifdef CONFIG_HOTPLUG_CPU
340
void cleanup_cpu_mmu_context(void)
341
{
342
int cpu = smp_processor_id();
343
344
clear_tasks_mm_cpumask(cpu);
345
tlbiel_all();
346
}
347
#endif
348
349