Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/kasan/init.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* This file contains KASAN shadow initialization code.
4
*
5
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
6
* Author: Andrey Ryabinin <[email protected]>
7
*/
8
9
#include <linux/memblock.h>
10
#include <linux/init.h>
11
#include <linux/kasan.h>
12
#include <linux/kernel.h>
13
#include <linux/mm.h>
14
#include <linux/pfn.h>
15
#include <linux/slab.h>
16
#include <linux/pgalloc.h>
17
18
#include <asm/page.h>
19
20
#include "kasan.h"
21
22
/*
23
* This page serves two purposes:
24
* - It used as early shadow memory. The entire shadow region populated
25
* with this page, before we will be able to setup normal shadow memory.
26
* - Latter it reused it as zero shadow to cover large ranges of memory
27
* that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
28
*/
29
unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
30
31
#if CONFIG_PGTABLE_LEVELS > 4
32
p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
33
static inline bool kasan_p4d_table(pgd_t pgd)
34
{
35
return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
36
}
37
#else
38
static inline bool kasan_p4d_table(pgd_t pgd)
39
{
40
return false;
41
}
42
#endif
43
#if CONFIG_PGTABLE_LEVELS > 3
44
pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD] __page_aligned_bss;
45
static inline bool kasan_pud_table(p4d_t p4d)
46
{
47
return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
48
}
49
#else
50
static inline bool kasan_pud_table(p4d_t p4d)
51
{
52
return false;
53
}
54
#endif
55
#if CONFIG_PGTABLE_LEVELS > 2
56
pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD] __page_aligned_bss;
57
static inline bool kasan_pmd_table(pud_t pud)
58
{
59
return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
60
}
61
#else
62
static inline bool kasan_pmd_table(pud_t pud)
63
{
64
return false;
65
}
66
#endif
67
pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]
68
__page_aligned_bss;
69
70
static inline bool kasan_pte_table(pmd_t pmd)
71
{
72
return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
73
}
74
75
static inline bool kasan_early_shadow_page_entry(pte_t pte)
76
{
77
return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
78
}
79
80
static __init void *early_alloc(size_t size, int node)
81
{
82
void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
83
MEMBLOCK_ALLOC_ACCESSIBLE, node);
84
85
if (!ptr)
86
panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
87
__func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
88
89
return ptr;
90
}
91
92
static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
93
unsigned long end)
94
{
95
pte_t *pte = pte_offset_kernel(pmd, addr);
96
pte_t zero_pte;
97
98
zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)),
99
PAGE_KERNEL);
100
zero_pte = pte_wrprotect(zero_pte);
101
102
while (addr + PAGE_SIZE <= end) {
103
set_pte_at(&init_mm, addr, pte, zero_pte);
104
addr += PAGE_SIZE;
105
pte = pte_offset_kernel(pmd, addr);
106
}
107
}
108
109
static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
110
unsigned long end)
111
{
112
pmd_t *pmd = pmd_offset(pud, addr);
113
unsigned long next;
114
115
do {
116
next = pmd_addr_end(addr, end);
117
118
if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
119
pmd_populate_kernel(&init_mm, pmd,
120
lm_alias(kasan_early_shadow_pte));
121
continue;
122
}
123
124
if (pmd_none(*pmd)) {
125
pte_t *p;
126
127
if (slab_is_available())
128
p = pte_alloc_one_kernel(&init_mm);
129
else {
130
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
131
kernel_pte_init(p);
132
}
133
if (!p)
134
return -ENOMEM;
135
136
pmd_populate_kernel(&init_mm, pmd, p);
137
}
138
zero_pte_populate(pmd, addr, next);
139
} while (pmd++, addr = next, addr != end);
140
141
return 0;
142
}
143
144
static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
145
unsigned long end)
146
{
147
pud_t *pud = pud_offset(p4d, addr);
148
unsigned long next;
149
150
do {
151
next = pud_addr_end(addr, end);
152
if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
153
pmd_t *pmd;
154
155
pud_populate(&init_mm, pud,
156
lm_alias(kasan_early_shadow_pmd));
157
pmd = pmd_offset(pud, addr);
158
pmd_populate_kernel(&init_mm, pmd,
159
lm_alias(kasan_early_shadow_pte));
160
continue;
161
}
162
163
if (pud_none(*pud)) {
164
pmd_t *p;
165
166
if (slab_is_available()) {
167
p = pmd_alloc(&init_mm, pud, addr);
168
if (!p)
169
return -ENOMEM;
170
} else {
171
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
172
pmd_init(p);
173
pud_populate(&init_mm, pud, p);
174
}
175
}
176
zero_pmd_populate(pud, addr, next);
177
} while (pud++, addr = next, addr != end);
178
179
return 0;
180
}
181
182
static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
183
unsigned long end)
184
{
185
p4d_t *p4d = p4d_offset(pgd, addr);
186
unsigned long next;
187
188
do {
189
next = p4d_addr_end(addr, end);
190
if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
191
pud_t *pud;
192
pmd_t *pmd;
193
194
p4d_populate_kernel(addr, p4d,
195
lm_alias(kasan_early_shadow_pud));
196
pud = pud_offset(p4d, addr);
197
pud_populate(&init_mm, pud,
198
lm_alias(kasan_early_shadow_pmd));
199
pmd = pmd_offset(pud, addr);
200
pmd_populate_kernel(&init_mm, pmd,
201
lm_alias(kasan_early_shadow_pte));
202
continue;
203
}
204
205
if (p4d_none(*p4d)) {
206
pud_t *p;
207
208
if (slab_is_available()) {
209
p = pud_alloc(&init_mm, p4d, addr);
210
if (!p)
211
return -ENOMEM;
212
} else {
213
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
214
pud_init(p);
215
p4d_populate_kernel(addr, p4d, p);
216
}
217
}
218
zero_pud_populate(p4d, addr, next);
219
} while (p4d++, addr = next, addr != end);
220
221
return 0;
222
}
223
224
/**
225
* kasan_populate_early_shadow - populate shadow memory region with
226
* kasan_early_shadow_page
227
* @shadow_start: start of the memory range to populate
228
* @shadow_end: end of the memory range to populate
229
*/
230
int __ref kasan_populate_early_shadow(const void *shadow_start,
231
const void *shadow_end)
232
{
233
unsigned long addr = (unsigned long)shadow_start;
234
unsigned long end = (unsigned long)shadow_end;
235
pgd_t *pgd = pgd_offset_k(addr);
236
unsigned long next;
237
238
do {
239
next = pgd_addr_end(addr, end);
240
241
if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
242
p4d_t *p4d;
243
pud_t *pud;
244
pmd_t *pmd;
245
246
/*
247
* kasan_early_shadow_pud should be populated with pmds
248
* at this moment.
249
* [pud,pmd]_populate*() below needed only for
250
* 3,2 - level page tables where we don't have
251
* puds,pmds, so pgd_populate(), pud_populate()
252
* is noops.
253
*/
254
pgd_populate_kernel(addr, pgd,
255
lm_alias(kasan_early_shadow_p4d));
256
p4d = p4d_offset(pgd, addr);
257
p4d_populate_kernel(addr, p4d,
258
lm_alias(kasan_early_shadow_pud));
259
pud = pud_offset(p4d, addr);
260
pud_populate(&init_mm, pud,
261
lm_alias(kasan_early_shadow_pmd));
262
pmd = pmd_offset(pud, addr);
263
pmd_populate_kernel(&init_mm, pmd,
264
lm_alias(kasan_early_shadow_pte));
265
continue;
266
}
267
268
if (pgd_none(*pgd)) {
269
p4d_t *p;
270
271
if (slab_is_available()) {
272
p = p4d_alloc(&init_mm, pgd, addr);
273
if (!p)
274
return -ENOMEM;
275
} else {
276
pgd_populate_kernel(addr, pgd,
277
early_alloc(PAGE_SIZE, NUMA_NO_NODE));
278
}
279
}
280
zero_p4d_populate(pgd, addr, next);
281
} while (pgd++, addr = next, addr != end);
282
283
return 0;
284
}
285
286
static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
287
{
288
pte_t *pte;
289
int i;
290
291
for (i = 0; i < PTRS_PER_PTE; i++) {
292
pte = pte_start + i;
293
if (!pte_none(ptep_get(pte)))
294
return;
295
}
296
297
pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
298
pmd_clear(pmd);
299
}
300
301
static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
302
{
303
pmd_t *pmd;
304
int i;
305
306
for (i = 0; i < PTRS_PER_PMD; i++) {
307
pmd = pmd_start + i;
308
if (!pmd_none(*pmd))
309
return;
310
}
311
312
pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
313
pud_clear(pud);
314
}
315
316
static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
317
{
318
pud_t *pud;
319
int i;
320
321
for (i = 0; i < PTRS_PER_PUD; i++) {
322
pud = pud_start + i;
323
if (!pud_none(*pud))
324
return;
325
}
326
327
pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
328
p4d_clear(p4d);
329
}
330
331
static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
332
{
333
p4d_t *p4d;
334
int i;
335
336
for (i = 0; i < PTRS_PER_P4D; i++) {
337
p4d = p4d_start + i;
338
if (!p4d_none(*p4d))
339
return;
340
}
341
342
p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
343
pgd_clear(pgd);
344
}
345
346
static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
347
unsigned long end)
348
{
349
unsigned long next;
350
pte_t ptent;
351
352
for (; addr < end; addr = next, pte++) {
353
next = (addr + PAGE_SIZE) & PAGE_MASK;
354
if (next > end)
355
next = end;
356
357
ptent = ptep_get(pte);
358
359
if (!pte_present(ptent))
360
continue;
361
362
if (WARN_ON(!kasan_early_shadow_page_entry(ptent)))
363
continue;
364
pte_clear(&init_mm, addr, pte);
365
}
366
}
367
368
static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
369
unsigned long end)
370
{
371
unsigned long next;
372
373
for (; addr < end; addr = next, pmd++) {
374
pte_t *pte;
375
376
next = pmd_addr_end(addr, end);
377
378
if (!pmd_present(*pmd))
379
continue;
380
381
if (kasan_pte_table(*pmd)) {
382
if (IS_ALIGNED(addr, PMD_SIZE) &&
383
IS_ALIGNED(next, PMD_SIZE)) {
384
pmd_clear(pmd);
385
continue;
386
}
387
}
388
pte = pte_offset_kernel(pmd, addr);
389
kasan_remove_pte_table(pte, addr, next);
390
kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
391
}
392
}
393
394
static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
395
unsigned long end)
396
{
397
unsigned long next;
398
399
for (; addr < end; addr = next, pud++) {
400
pmd_t *pmd, *pmd_base;
401
402
next = pud_addr_end(addr, end);
403
404
if (!pud_present(*pud))
405
continue;
406
407
if (kasan_pmd_table(*pud)) {
408
if (IS_ALIGNED(addr, PUD_SIZE) &&
409
IS_ALIGNED(next, PUD_SIZE)) {
410
pud_clear(pud);
411
continue;
412
}
413
}
414
pmd = pmd_offset(pud, addr);
415
pmd_base = pmd_offset(pud, 0);
416
kasan_remove_pmd_table(pmd, addr, next);
417
kasan_free_pmd(pmd_base, pud);
418
}
419
}
420
421
static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
422
unsigned long end)
423
{
424
unsigned long next;
425
426
for (; addr < end; addr = next, p4d++) {
427
pud_t *pud;
428
429
next = p4d_addr_end(addr, end);
430
431
if (!p4d_present(*p4d))
432
continue;
433
434
if (kasan_pud_table(*p4d)) {
435
if (IS_ALIGNED(addr, P4D_SIZE) &&
436
IS_ALIGNED(next, P4D_SIZE)) {
437
p4d_clear(p4d);
438
continue;
439
}
440
}
441
pud = pud_offset(p4d, addr);
442
kasan_remove_pud_table(pud, addr, next);
443
kasan_free_pud(pud_offset(p4d, 0), p4d);
444
}
445
}
446
447
void kasan_remove_zero_shadow(void *start, unsigned long size)
448
{
449
unsigned long addr, end, next;
450
pgd_t *pgd;
451
452
addr = (unsigned long)kasan_mem_to_shadow(start);
453
end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
454
455
if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
456
WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
457
return;
458
459
for (; addr < end; addr = next) {
460
p4d_t *p4d;
461
462
next = pgd_addr_end(addr, end);
463
464
pgd = pgd_offset_k(addr);
465
if (!pgd_present(*pgd))
466
continue;
467
468
if (kasan_p4d_table(*pgd)) {
469
if (IS_ALIGNED(addr, PGDIR_SIZE) &&
470
IS_ALIGNED(next, PGDIR_SIZE)) {
471
pgd_clear(pgd);
472
continue;
473
}
474
}
475
476
p4d = p4d_offset(pgd, addr);
477
kasan_remove_p4d_table(p4d, addr, next);
478
kasan_free_p4d(p4d_offset(pgd, 0), pgd);
479
}
480
}
481
482
int kasan_add_zero_shadow(void *start, unsigned long size)
483
{
484
int ret;
485
void *shadow_start, *shadow_end;
486
487
shadow_start = kasan_mem_to_shadow(start);
488
shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
489
490
if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
491
WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
492
return -EINVAL;
493
494
ret = kasan_populate_early_shadow(shadow_start, shadow_end);
495
if (ret)
496
kasan_remove_zero_shadow(start, size);
497
return ret;
498
}
499
500