Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/kmsan/shadow.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* KMSAN shadow implementation.
4
*
5
* Copyright (C) 2017-2022 Google LLC
6
* Author: Alexander Potapenko <[email protected]>
7
*
8
*/
9
10
#include <asm/kmsan.h>
11
#include <asm/tlbflush.h>
12
#include <linux/cacheflush.h>
13
#include <linux/memblock.h>
14
#include <linux/mm_types.h>
15
#include <linux/slab.h>
16
#include <linux/smp.h>
17
#include <linux/stddef.h>
18
19
#include "../internal.h"
20
#include "kmsan.h"
21
22
#define shadow_page_for(page) ((page)->kmsan_shadow)
23
24
#define origin_page_for(page) ((page)->kmsan_origin)
25
26
static void *shadow_ptr_for(struct page *page)
27
{
28
return page_address(shadow_page_for(page));
29
}
30
31
static void *origin_ptr_for(struct page *page)
32
{
33
return page_address(origin_page_for(page));
34
}
35
36
static bool page_has_metadata(struct page *page)
37
{
38
return shadow_page_for(page) && origin_page_for(page);
39
}
40
41
static void set_no_shadow_origin_page(struct page *page)
42
{
43
shadow_page_for(page) = NULL;
44
origin_page_for(page) = NULL;
45
}
46
47
/*
48
* Dummy load and store pages to be used when the real metadata is unavailable.
49
* There are separate pages for loads and stores, so that every load returns a
50
* zero, and every store doesn't affect other loads.
51
*/
52
static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
53
static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);
54
55
static unsigned long vmalloc_meta(void *addr, bool is_origin)
56
{
57
unsigned long addr64 = (unsigned long)addr, off;
58
59
KMSAN_WARN_ON(is_origin && !IS_ALIGNED(addr64, KMSAN_ORIGIN_SIZE));
60
if (kmsan_internal_is_vmalloc_addr(addr)) {
61
off = addr64 - VMALLOC_START;
62
return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START :
63
KMSAN_VMALLOC_SHADOW_START);
64
}
65
if (kmsan_internal_is_module_addr(addr)) {
66
off = addr64 - MODULES_VADDR;
67
return off + (is_origin ? KMSAN_MODULES_ORIGIN_START :
68
KMSAN_MODULES_SHADOW_START);
69
}
70
return 0;
71
}
72
73
static struct page *virt_to_page_or_null(void *vaddr)
74
{
75
if (kmsan_virt_addr_valid(vaddr))
76
return virt_to_page(vaddr);
77
else
78
return NULL;
79
}
80
81
struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *address, u64 size,
82
bool store)
83
{
84
struct shadow_origin_ptr ret;
85
void *shadow;
86
87
/*
88
* Even if we redirect this memory access to the dummy page, it will
89
* go out of bounds.
90
*/
91
KMSAN_WARN_ON(size > PAGE_SIZE);
92
93
if (!kmsan_enabled)
94
goto return_dummy;
95
96
KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address, size));
97
shadow = kmsan_get_metadata(address, KMSAN_META_SHADOW);
98
if (!shadow)
99
goto return_dummy;
100
101
ret.shadow = shadow;
102
ret.origin = kmsan_get_metadata(address, KMSAN_META_ORIGIN);
103
return ret;
104
105
return_dummy:
106
if (store) {
107
/* Ignore this store. */
108
ret.shadow = dummy_store_page;
109
ret.origin = dummy_store_page;
110
} else {
111
/* This load will return zero. */
112
ret.shadow = dummy_load_page;
113
ret.origin = dummy_load_page;
114
}
115
return ret;
116
}
117
118
/*
119
* Obtain the shadow or origin pointer for the given address, or NULL if there's
120
* none. The caller must check the return value for being non-NULL if needed.
121
* The return value of this function should not depend on whether we're in the
122
* runtime or not.
123
*/
124
void *kmsan_get_metadata(void *address, bool is_origin)
125
{
126
u64 addr = (u64)address, off;
127
struct page *page;
128
void *ret;
129
130
if (is_origin)
131
addr = ALIGN_DOWN(addr, KMSAN_ORIGIN_SIZE);
132
address = (void *)addr;
133
if (kmsan_internal_is_vmalloc_addr(address) ||
134
kmsan_internal_is_module_addr(address))
135
return (void *)vmalloc_meta(address, is_origin);
136
137
ret = arch_kmsan_get_meta_or_null(address, is_origin);
138
if (ret)
139
return ret;
140
141
page = virt_to_page_or_null(address);
142
if (!page)
143
return NULL;
144
if (!page_has_metadata(page))
145
return NULL;
146
off = offset_in_page(addr);
147
148
return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
149
}
150
151
void kmsan_copy_page_meta(struct page *dst, struct page *src)
152
{
153
if (!kmsan_enabled || kmsan_in_runtime())
154
return;
155
if (!dst || !page_has_metadata(dst))
156
return;
157
if (!src || !page_has_metadata(src)) {
158
kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
159
/*checked*/ false);
160
return;
161
}
162
163
kmsan_enter_runtime();
164
__memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
165
__memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
166
kmsan_leave_runtime();
167
}
168
EXPORT_SYMBOL(kmsan_copy_page_meta);
169
170
void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
171
{
172
bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
173
struct page *shadow, *origin;
174
depot_stack_handle_t handle;
175
int pages = 1 << order;
176
177
if (!page)
178
return;
179
180
shadow = shadow_page_for(page);
181
origin = origin_page_for(page);
182
183
if (initialized) {
184
__memset(page_address(shadow), 0, PAGE_SIZE * pages);
185
__memset(page_address(origin), 0, PAGE_SIZE * pages);
186
return;
187
}
188
189
/* Zero pages allocated by the runtime should also be initialized. */
190
if (kmsan_in_runtime())
191
return;
192
193
__memset(page_address(shadow), -1, PAGE_SIZE * pages);
194
kmsan_enter_runtime();
195
handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
196
kmsan_leave_runtime();
197
/*
198
* Addresses are page-aligned, pages are contiguous, so it's ok
199
* to just fill the origin pages with @handle.
200
*/
201
for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
202
((depot_stack_handle_t *)page_address(origin))[i] = handle;
203
}
204
205
void kmsan_free_page(struct page *page, unsigned int order)
206
{
207
if (!kmsan_enabled || kmsan_in_runtime())
208
return;
209
kmsan_enter_runtime();
210
kmsan_internal_poison_memory(page_address(page), page_size(page),
211
GFP_KERNEL,
212
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
213
kmsan_leave_runtime();
214
}
215
216
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
217
pgprot_t prot, struct page **pages,
218
unsigned int page_shift)
219
{
220
unsigned long shadow_start, origin_start, shadow_end, origin_end;
221
struct page **s_pages, **o_pages;
222
int nr, mapped, err = 0;
223
224
if (!kmsan_enabled)
225
return 0;
226
227
shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
228
shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
229
if (!shadow_start)
230
return 0;
231
232
nr = (end - start) / PAGE_SIZE;
233
s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
234
o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
235
if (!s_pages || !o_pages) {
236
err = -ENOMEM;
237
goto ret;
238
}
239
for (int i = 0; i < nr; i++) {
240
s_pages[i] = shadow_page_for(pages[i]);
241
o_pages[i] = origin_page_for(pages[i]);
242
}
243
prot = PAGE_KERNEL;
244
245
origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
246
origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
247
kmsan_enter_runtime();
248
mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
249
s_pages, page_shift);
250
kmsan_leave_runtime();
251
if (mapped) {
252
err = mapped;
253
goto ret;
254
}
255
kmsan_enter_runtime();
256
mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
257
o_pages, page_shift);
258
kmsan_leave_runtime();
259
if (mapped) {
260
err = mapped;
261
goto ret;
262
}
263
flush_tlb_kernel_range(shadow_start, shadow_end);
264
flush_tlb_kernel_range(origin_start, origin_end);
265
flush_cache_vmap(shadow_start, shadow_end);
266
flush_cache_vmap(origin_start, origin_end);
267
268
ret:
269
kfree(s_pages);
270
kfree(o_pages);
271
return err;
272
}
273
274
/* Allocate metadata for pages allocated at boot time. */
275
void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
276
{
277
struct page *shadow_p, *origin_p;
278
void *shadow, *origin;
279
struct page *page;
280
u64 size;
281
282
start = (void *)PAGE_ALIGN_DOWN((u64)start);
283
size = PAGE_ALIGN((u64)end - (u64)start);
284
shadow = memblock_alloc_or_panic(size, PAGE_SIZE);
285
origin = memblock_alloc_or_panic(size, PAGE_SIZE);
286
287
for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
288
page = virt_to_page_or_null((char *)start + addr);
289
shadow_p = virt_to_page((char *)shadow + addr);
290
set_no_shadow_origin_page(shadow_p);
291
shadow_page_for(page) = shadow_p;
292
origin_p = virt_to_page((char *)origin + addr);
293
set_no_shadow_origin_page(origin_p);
294
origin_page_for(page) = origin_p;
295
}
296
}
297
298
void kmsan_setup_meta(struct page *page, struct page *shadow,
299
struct page *origin, int order)
300
{
301
for (int i = 0; i < (1 << order); i++) {
302
set_no_shadow_origin_page(&shadow[i]);
303
set_no_shadow_origin_page(&origin[i]);
304
shadow_page_for(&page[i]) = &shadow[i];
305
origin_page_for(&page[i]) = &origin[i];
306
}
307
}
308
309