Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/mm/kasan_init_64.c
26451 views
1
// SPDX-License-Identifier: GPL-2.0
2
#define pr_fmt(fmt) "kasan: " fmt
3
4
/* cpu_feature_enabled() cannot be used this early */
5
#define USE_EARLY_PGTABLE_L5
6
7
#include <linux/memblock.h>
8
#include <linux/kasan.h>
9
#include <linux/kdebug.h>
10
#include <linux/mm.h>
11
#include <linux/sched.h>
12
#include <linux/sched/task.h>
13
#include <linux/vmalloc.h>
14
15
#include <asm/e820/types.h>
16
#include <asm/pgalloc.h>
17
#include <asm/tlbflush.h>
18
#include <asm/sections.h>
19
#include <asm/cpu_entry_area.h>
20
21
extern struct range pfn_mapped[E820_MAX_ENTRIES];
22
23
static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
24
25
static __init void *early_alloc(size_t size, int nid, bool should_panic)
26
{
27
void *ptr = memblock_alloc_try_nid(size, size,
28
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
29
30
if (!ptr && should_panic)
31
panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
32
(void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
33
34
return ptr;
35
}
36
37
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
38
unsigned long end, int nid)
39
{
40
pte_t *pte;
41
42
if (pmd_none(*pmd)) {
43
void *p;
44
45
if (boot_cpu_has(X86_FEATURE_PSE) &&
46
((end - addr) == PMD_SIZE) &&
47
IS_ALIGNED(addr, PMD_SIZE)) {
48
p = early_alloc(PMD_SIZE, nid, false);
49
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
50
return;
51
memblock_free(p, PMD_SIZE);
52
}
53
54
p = early_alloc(PAGE_SIZE, nid, true);
55
pmd_populate_kernel(&init_mm, pmd, p);
56
}
57
58
pte = pte_offset_kernel(pmd, addr);
59
do {
60
pte_t entry;
61
void *p;
62
63
if (!pte_none(*pte))
64
continue;
65
66
p = early_alloc(PAGE_SIZE, nid, true);
67
entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
68
set_pte_at(&init_mm, addr, pte, entry);
69
} while (pte++, addr += PAGE_SIZE, addr != end);
70
}
71
72
static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
73
unsigned long end, int nid)
74
{
75
pmd_t *pmd;
76
unsigned long next;
77
78
if (pud_none(*pud)) {
79
void *p;
80
81
if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
82
((end - addr) == PUD_SIZE) &&
83
IS_ALIGNED(addr, PUD_SIZE)) {
84
p = early_alloc(PUD_SIZE, nid, false);
85
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
86
return;
87
memblock_free(p, PUD_SIZE);
88
}
89
90
p = early_alloc(PAGE_SIZE, nid, true);
91
pud_populate(&init_mm, pud, p);
92
}
93
94
pmd = pmd_offset(pud, addr);
95
do {
96
next = pmd_addr_end(addr, end);
97
if (!pmd_leaf(*pmd))
98
kasan_populate_pmd(pmd, addr, next, nid);
99
} while (pmd++, addr = next, addr != end);
100
}
101
102
static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
103
unsigned long end, int nid)
104
{
105
pud_t *pud;
106
unsigned long next;
107
108
if (p4d_none(*p4d)) {
109
void *p = early_alloc(PAGE_SIZE, nid, true);
110
111
p4d_populate(&init_mm, p4d, p);
112
}
113
114
pud = pud_offset(p4d, addr);
115
do {
116
next = pud_addr_end(addr, end);
117
if (!pud_leaf(*pud))
118
kasan_populate_pud(pud, addr, next, nid);
119
} while (pud++, addr = next, addr != end);
120
}
121
122
static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
123
unsigned long end, int nid)
124
{
125
void *p;
126
p4d_t *p4d;
127
unsigned long next;
128
129
if (pgd_none(*pgd)) {
130
p = early_alloc(PAGE_SIZE, nid, true);
131
pgd_populate(&init_mm, pgd, p);
132
}
133
134
p4d = p4d_offset(pgd, addr);
135
do {
136
next = p4d_addr_end(addr, end);
137
kasan_populate_p4d(p4d, addr, next, nid);
138
} while (p4d++, addr = next, addr != end);
139
}
140
141
static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
142
int nid)
143
{
144
pgd_t *pgd;
145
unsigned long next;
146
147
addr = addr & PAGE_MASK;
148
end = round_up(end, PAGE_SIZE);
149
pgd = pgd_offset_k(addr);
150
do {
151
next = pgd_addr_end(addr, end);
152
kasan_populate_pgd(pgd, addr, next, nid);
153
} while (pgd++, addr = next, addr != end);
154
}
155
156
static void __init map_range(struct range *range)
157
{
158
unsigned long start;
159
unsigned long end;
160
161
start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
162
end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
163
164
kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
165
}
166
167
static void __init clear_pgds(unsigned long start,
168
unsigned long end)
169
{
170
pgd_t *pgd;
171
/* See comment in kasan_init() */
172
unsigned long pgd_end = end & PGDIR_MASK;
173
174
for (; start < pgd_end; start += PGDIR_SIZE) {
175
pgd = pgd_offset_k(start);
176
/*
177
* With folded p4d, pgd_clear() is nop, use p4d_clear()
178
* instead.
179
*/
180
if (pgtable_l5_enabled())
181
pgd_clear(pgd);
182
else
183
p4d_clear(p4d_offset(pgd, start));
184
}
185
186
pgd = pgd_offset_k(start);
187
for (; start < end; start += P4D_SIZE)
188
p4d_clear(p4d_offset(pgd, start));
189
}
190
191
static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
192
{
193
unsigned long p4d;
194
195
if (!pgtable_l5_enabled())
196
return (p4d_t *)pgd;
197
198
p4d = pgd_val(*pgd) & PTE_PFN_MASK;
199
p4d += __START_KERNEL_map - phys_base;
200
return (p4d_t *)p4d + p4d_index(addr);
201
}
202
203
static void __init kasan_early_p4d_populate(pgd_t *pgd,
204
unsigned long addr,
205
unsigned long end)
206
{
207
pgd_t pgd_entry;
208
p4d_t *p4d, p4d_entry;
209
unsigned long next;
210
211
if (pgd_none(*pgd)) {
212
pgd_entry = __pgd(_KERNPG_TABLE |
213
__pa_nodebug(kasan_early_shadow_p4d));
214
set_pgd(pgd, pgd_entry);
215
}
216
217
p4d = early_p4d_offset(pgd, addr);
218
do {
219
next = p4d_addr_end(addr, end);
220
221
if (!p4d_none(*p4d))
222
continue;
223
224
p4d_entry = __p4d(_KERNPG_TABLE |
225
__pa_nodebug(kasan_early_shadow_pud));
226
set_p4d(p4d, p4d_entry);
227
} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
228
}
229
230
static void __init kasan_map_early_shadow(pgd_t *pgd)
231
{
232
/* See comment in kasan_init() */
233
unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
234
unsigned long end = KASAN_SHADOW_END;
235
unsigned long next;
236
237
pgd += pgd_index(addr);
238
do {
239
next = pgd_addr_end(addr, end);
240
kasan_early_p4d_populate(pgd, addr, next);
241
} while (pgd++, addr = next, addr != end);
242
}
243
244
static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
245
unsigned long addr,
246
unsigned long end)
247
{
248
p4d_t *p4d;
249
unsigned long next;
250
void *p;
251
252
p4d = p4d_offset(pgd, addr);
253
do {
254
next = p4d_addr_end(addr, end);
255
256
if (p4d_none(*p4d)) {
257
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
258
p4d_populate(&init_mm, p4d, p);
259
}
260
} while (p4d++, addr = next, addr != end);
261
}
262
263
static void __init kasan_shallow_populate_pgds(void *start, void *end)
264
{
265
unsigned long addr, next;
266
pgd_t *pgd;
267
void *p;
268
269
addr = (unsigned long)start;
270
pgd = pgd_offset_k(addr);
271
do {
272
next = pgd_addr_end(addr, (unsigned long)end);
273
274
if (pgd_none(*pgd)) {
275
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
276
pgd_populate(&init_mm, pgd, p);
277
}
278
279
/*
280
* we need to populate p4ds to be synced when running in
281
* four level mode - see sync_global_pgds_l4()
282
*/
283
kasan_shallow_populate_p4ds(pgd, addr, next);
284
} while (pgd++, addr = next, addr != (unsigned long)end);
285
}
286
287
void __init kasan_early_init(void)
288
{
289
int i;
290
pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
291
__PAGE_KERNEL | _PAGE_ENC;
292
pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
293
pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
294
p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
295
296
/* Mask out unsupported __PAGE_KERNEL bits: */
297
pte_val &= __default_kernel_pte_mask;
298
pmd_val &= __default_kernel_pte_mask;
299
pud_val &= __default_kernel_pte_mask;
300
p4d_val &= __default_kernel_pte_mask;
301
302
for (i = 0; i < PTRS_PER_PTE; i++)
303
kasan_early_shadow_pte[i] = __pte(pte_val);
304
305
for (i = 0; i < PTRS_PER_PMD; i++)
306
kasan_early_shadow_pmd[i] = __pmd(pmd_val);
307
308
for (i = 0; i < PTRS_PER_PUD; i++)
309
kasan_early_shadow_pud[i] = __pud(pud_val);
310
311
for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
312
kasan_early_shadow_p4d[i] = __p4d(p4d_val);
313
314
kasan_map_early_shadow(early_top_pgt);
315
kasan_map_early_shadow(init_top_pgt);
316
}
317
318
static unsigned long kasan_mem_to_shadow_align_down(unsigned long va)
319
{
320
unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va);
321
322
return round_down(shadow, PAGE_SIZE);
323
}
324
325
static unsigned long kasan_mem_to_shadow_align_up(unsigned long va)
326
{
327
unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va);
328
329
return round_up(shadow, PAGE_SIZE);
330
}
331
332
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
333
{
334
unsigned long shadow_start, shadow_end;
335
336
shadow_start = kasan_mem_to_shadow_align_down((unsigned long)va);
337
shadow_end = kasan_mem_to_shadow_align_up((unsigned long)va + size);
338
kasan_populate_shadow(shadow_start, shadow_end, nid);
339
}
340
341
void __init kasan_init(void)
342
{
343
unsigned long shadow_cea_begin, shadow_cea_per_cpu_begin, shadow_cea_end;
344
int i;
345
346
memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
347
348
/*
349
* We use the same shadow offset for 4- and 5-level paging to
350
* facilitate boot-time switching between paging modes.
351
* As result in 5-level paging mode KASAN_SHADOW_START and
352
* KASAN_SHADOW_END are not aligned to PGD boundary.
353
*
354
* KASAN_SHADOW_START doesn't share PGD with anything else.
355
* We claim whole PGD entry to make things easier.
356
*
357
* KASAN_SHADOW_END lands in the last PGD entry and it collides with
358
* bunch of things like kernel code, modules, EFI mapping, etc.
359
* We need to take extra steps to not overwrite them.
360
*/
361
if (pgtable_l5_enabled()) {
362
void *ptr;
363
364
ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
365
memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
366
set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
367
__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
368
}
369
370
load_cr3(early_top_pgt);
371
__flush_tlb_all();
372
373
clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
374
375
kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
376
kasan_mem_to_shadow((void *)PAGE_OFFSET));
377
378
for (i = 0; i < E820_MAX_ENTRIES; i++) {
379
if (pfn_mapped[i].end == 0)
380
break;
381
382
map_range(&pfn_mapped[i]);
383
}
384
385
shadow_cea_begin = kasan_mem_to_shadow_align_down(CPU_ENTRY_AREA_BASE);
386
shadow_cea_per_cpu_begin = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_PER_CPU);
387
shadow_cea_end = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_BASE +
388
CPU_ENTRY_AREA_MAP_SIZE);
389
390
kasan_populate_early_shadow(
391
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
392
kasan_mem_to_shadow((void *)VMALLOC_START));
393
394
/*
395
* If we're in full vmalloc mode, don't back vmalloc space with early
396
* shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
397
* the global table and we can populate the lower levels on demand.
398
*/
399
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
400
kasan_shallow_populate_pgds(
401
kasan_mem_to_shadow((void *)VMALLOC_START),
402
kasan_mem_to_shadow((void *)VMALLOC_END));
403
else
404
kasan_populate_early_shadow(
405
kasan_mem_to_shadow((void *)VMALLOC_START),
406
kasan_mem_to_shadow((void *)VMALLOC_END));
407
408
kasan_populate_early_shadow(
409
kasan_mem_to_shadow((void *)VMALLOC_END + 1),
410
(void *)shadow_cea_begin);
411
412
/*
413
* Populate the shadow for the shared portion of the CPU entry area.
414
* Shadows for the per-CPU areas are mapped on-demand, as each CPU's
415
* area is randomly placed somewhere in the 512GiB range and mapping
416
* the entire 512GiB range is prohibitively expensive.
417
*/
418
kasan_populate_shadow(shadow_cea_begin,
419
shadow_cea_per_cpu_begin, 0);
420
421
kasan_populate_early_shadow((void *)shadow_cea_end,
422
kasan_mem_to_shadow((void *)__START_KERNEL_map));
423
424
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
425
(unsigned long)kasan_mem_to_shadow(_end),
426
early_pfn_to_nid(__pa(_stext)));
427
428
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
429
(void *)KASAN_SHADOW_END);
430
431
load_cr3(init_top_pgt);
432
__flush_tlb_all();
433
434
/*
435
* kasan_early_shadow_page has been used as early shadow memory, thus
436
* it may contain some garbage. Now we can clear and write protect it,
437
* since after the TLB flush no one should write to it.
438
*/
439
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
440
for (i = 0; i < PTRS_PER_PTE; i++) {
441
pte_t pte;
442
pgprot_t prot;
443
444
prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
445
pgprot_val(prot) &= __default_kernel_pte_mask;
446
447
pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
448
set_pte(&kasan_early_shadow_pte[i], pte);
449
}
450
/* Flush TLBs again to be sure that write protection applied. */
451
__flush_tlb_all();
452
453
init_task.kasan_depth = 0;
454
pr_info("KernelAddressSanitizer initialized\n");
455
}
456
457