Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/kmsan/hooks.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* KMSAN hooks for kernel subsystems.
4
*
5
* These functions handle creation of KMSAN metadata for memory allocations.
6
*
7
* Copyright (C) 2018-2022 Google LLC
8
* Author: Alexander Potapenko <[email protected]>
9
*
10
*/
11
12
#include <linux/cacheflush.h>
13
#include <linux/dma-direction.h>
14
#include <linux/gfp.h>
15
#include <linux/kmsan.h>
16
#include <linux/mm.h>
17
#include <linux/mm_types.h>
18
#include <linux/scatterlist.h>
19
#include <linux/slab.h>
20
#include <linux/uaccess.h>
21
#include <linux/usb.h>
22
23
#include "../internal.h"
24
#include "../slab.h"
25
#include "kmsan.h"
26
27
/*
28
* Instrumented functions shouldn't be called under
29
* kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to
30
* skipping effects of functions like memset() inside instrumented code.
31
*/
32
33
void kmsan_task_create(struct task_struct *task)
34
{
35
kmsan_enter_runtime();
36
kmsan_internal_task_create(task);
37
kmsan_leave_runtime();
38
}
39
40
void kmsan_task_exit(struct task_struct *task)
41
{
42
if (!kmsan_enabled || kmsan_in_runtime())
43
return;
44
45
kmsan_disable_current();
46
}
47
48
void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
49
{
50
if (unlikely(object == NULL))
51
return;
52
if (!kmsan_enabled || kmsan_in_runtime())
53
return;
54
/*
55
* There's a ctor or this is an RCU cache - do nothing. The memory
56
* status hasn't changed since last use.
57
*/
58
if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
59
return;
60
61
kmsan_enter_runtime();
62
if (flags & __GFP_ZERO)
63
kmsan_internal_unpoison_memory(object, s->object_size,
64
KMSAN_POISON_CHECK);
65
else
66
kmsan_internal_poison_memory(object, s->object_size, flags,
67
KMSAN_POISON_CHECK);
68
kmsan_leave_runtime();
69
}
70
71
void kmsan_slab_free(struct kmem_cache *s, void *object)
72
{
73
if (!kmsan_enabled || kmsan_in_runtime())
74
return;
75
76
/* RCU slabs could be legally used after free within the RCU period */
77
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
78
return;
79
/*
80
* If there's a constructor, freed memory must remain in the same state
81
* until the next allocation. We cannot save its state to detect
82
* use-after-free bugs, instead we just keep it unpoisoned.
83
*/
84
if (s->ctor)
85
return;
86
kmsan_enter_runtime();
87
kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
88
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
89
kmsan_leave_runtime();
90
}
91
92
void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
93
{
94
if (unlikely(ptr == NULL))
95
return;
96
if (!kmsan_enabled || kmsan_in_runtime())
97
return;
98
kmsan_enter_runtime();
99
if (flags & __GFP_ZERO)
100
kmsan_internal_unpoison_memory((void *)ptr, size,
101
/*checked*/ true);
102
else
103
kmsan_internal_poison_memory((void *)ptr, size, flags,
104
KMSAN_POISON_CHECK);
105
kmsan_leave_runtime();
106
}
107
108
void kmsan_kfree_large(const void *ptr)
109
{
110
struct page *page;
111
112
if (!kmsan_enabled || kmsan_in_runtime())
113
return;
114
kmsan_enter_runtime();
115
page = virt_to_head_page((void *)ptr);
116
KMSAN_WARN_ON(ptr != page_address(page));
117
kmsan_internal_poison_memory((void *)ptr, page_size(page), GFP_KERNEL,
118
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
119
kmsan_leave_runtime();
120
}
121
122
static unsigned long vmalloc_shadow(unsigned long addr)
123
{
124
return (unsigned long)kmsan_get_metadata((void *)addr,
125
KMSAN_META_SHADOW);
126
}
127
128
static unsigned long vmalloc_origin(unsigned long addr)
129
{
130
return (unsigned long)kmsan_get_metadata((void *)addr,
131
KMSAN_META_ORIGIN);
132
}
133
134
void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
135
{
136
__vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end));
137
__vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end));
138
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
139
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
140
}
141
142
/*
143
* This function creates new shadow/origin pages for the physical pages mapped
144
* into the virtual memory. If those physical pages already had shadow/origin,
145
* those are ignored.
146
*/
147
int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
148
phys_addr_t phys_addr, pgprot_t prot,
149
unsigned int page_shift)
150
{
151
gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
152
struct page *shadow, *origin;
153
unsigned long off = 0;
154
int nr, err = 0, clean = 0, mapped;
155
156
if (!kmsan_enabled || kmsan_in_runtime())
157
return 0;
158
159
nr = (end - start) / PAGE_SIZE;
160
kmsan_enter_runtime();
161
for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
162
shadow = alloc_pages(gfp_mask, 1);
163
origin = alloc_pages(gfp_mask, 1);
164
if (!shadow || !origin) {
165
err = -ENOMEM;
166
goto ret;
167
}
168
mapped = __vmap_pages_range_noflush(
169
vmalloc_shadow(start + off),
170
vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
171
PAGE_SHIFT);
172
if (mapped) {
173
err = mapped;
174
goto ret;
175
}
176
shadow = NULL;
177
mapped = __vmap_pages_range_noflush(
178
vmalloc_origin(start + off),
179
vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
180
PAGE_SHIFT);
181
if (mapped) {
182
__vunmap_range_noflush(
183
vmalloc_shadow(start + off),
184
vmalloc_shadow(start + off + PAGE_SIZE));
185
err = mapped;
186
goto ret;
187
}
188
origin = NULL;
189
}
190
/* Page mapping loop finished normally, nothing to clean up. */
191
clean = 0;
192
193
ret:
194
if (clean > 0) {
195
/*
196
* Something went wrong. Clean up shadow/origin pages allocated
197
* on the last loop iteration, then delete mappings created
198
* during the previous iterations.
199
*/
200
if (shadow)
201
__free_pages(shadow, 1);
202
if (origin)
203
__free_pages(origin, 1);
204
__vunmap_range_noflush(
205
vmalloc_shadow(start),
206
vmalloc_shadow(start + clean * PAGE_SIZE));
207
__vunmap_range_noflush(
208
vmalloc_origin(start),
209
vmalloc_origin(start + clean * PAGE_SIZE));
210
}
211
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
212
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
213
kmsan_leave_runtime();
214
return err;
215
}
216
217
void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
218
{
219
unsigned long v_shadow, v_origin;
220
struct page *shadow, *origin;
221
int nr;
222
223
if (!kmsan_enabled || kmsan_in_runtime())
224
return;
225
226
nr = (end - start) / PAGE_SIZE;
227
kmsan_enter_runtime();
228
v_shadow = (unsigned long)vmalloc_shadow(start);
229
v_origin = (unsigned long)vmalloc_origin(start);
230
for (int i = 0; i < nr;
231
i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
232
shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow);
233
origin = kmsan_vmalloc_to_page_or_null((void *)v_origin);
234
__vunmap_range_noflush(v_shadow, vmalloc_shadow(end));
235
__vunmap_range_noflush(v_origin, vmalloc_origin(end));
236
if (shadow)
237
__free_pages(shadow, 1);
238
if (origin)
239
__free_pages(origin, 1);
240
}
241
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
242
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
243
kmsan_leave_runtime();
244
}
245
246
void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
247
size_t left)
248
{
249
unsigned long ua_flags;
250
251
if (!kmsan_enabled || kmsan_in_runtime())
252
return;
253
/*
254
* At this point we've copied the memory already. It's hard to check it
255
* before copying, as the size of actually copied buffer is unknown.
256
*/
257
258
/* copy_to_user() may copy zero bytes. No need to check. */
259
if (!to_copy)
260
return;
261
/* Or maybe copy_to_user() failed to copy anything. */
262
if (to_copy <= left)
263
return;
264
265
ua_flags = user_access_save();
266
if (!IS_ENABLED(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) ||
267
(u64)to < TASK_SIZE) {
268
/* This is a user memory access, check it. */
269
kmsan_internal_check_memory((void *)from, to_copy - left, to,
270
REASON_COPY_TO_USER);
271
} else {
272
/* Otherwise this is a kernel memory access. This happens when a
273
* compat syscall passes an argument allocated on the kernel
274
* stack to a real syscall.
275
* Don't check anything, just copy the shadow of the copied
276
* bytes.
277
*/
278
kmsan_enter_runtime();
279
kmsan_internal_memmove_metadata((void *)to, (void *)from,
280
to_copy - left);
281
kmsan_leave_runtime();
282
}
283
user_access_restore(ua_flags);
284
}
285
EXPORT_SYMBOL(kmsan_copy_to_user);
286
287
void kmsan_memmove(void *to, const void *from, size_t size)
288
{
289
if (!kmsan_enabled || kmsan_in_runtime())
290
return;
291
292
kmsan_enter_runtime();
293
kmsan_internal_memmove_metadata(to, (void *)from, size);
294
kmsan_leave_runtime();
295
}
296
EXPORT_SYMBOL(kmsan_memmove);
297
298
/* Helper function to check an URB. */
299
void kmsan_handle_urb(const struct urb *urb, bool is_out)
300
{
301
if (!urb)
302
return;
303
if (is_out)
304
kmsan_internal_check_memory(urb->transfer_buffer,
305
urb->transfer_buffer_length,
306
/*user_addr*/ NULL,
307
REASON_SUBMIT_URB);
308
else
309
kmsan_internal_unpoison_memory(urb->transfer_buffer,
310
urb->transfer_buffer_length,
311
/*checked*/ false);
312
}
313
EXPORT_SYMBOL_GPL(kmsan_handle_urb);
314
315
static void kmsan_handle_dma_page(const void *addr, size_t size,
316
enum dma_data_direction dir)
317
{
318
switch (dir) {
319
case DMA_BIDIRECTIONAL:
320
kmsan_internal_check_memory((void *)addr, size,
321
/*user_addr*/ NULL, REASON_ANY);
322
kmsan_internal_unpoison_memory((void *)addr, size,
323
/*checked*/ false);
324
break;
325
case DMA_TO_DEVICE:
326
kmsan_internal_check_memory((void *)addr, size,
327
/*user_addr*/ NULL, REASON_ANY);
328
break;
329
case DMA_FROM_DEVICE:
330
kmsan_internal_unpoison_memory((void *)addr, size,
331
/*checked*/ false);
332
break;
333
case DMA_NONE:
334
break;
335
}
336
}
337
338
/* Helper function to handle DMA data transfers. */
339
void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
340
enum dma_data_direction dir)
341
{
342
u64 page_offset, to_go, addr;
343
344
if (PageHighMem(page))
345
return;
346
addr = (u64)page_address(page) + offset;
347
/*
348
* The kernel may occasionally give us adjacent DMA pages not belonging
349
* to the same allocation. Process them separately to avoid triggering
350
* internal KMSAN checks.
351
*/
352
while (size > 0) {
353
page_offset = offset_in_page(addr);
354
to_go = min(PAGE_SIZE - page_offset, (u64)size);
355
kmsan_handle_dma_page((void *)addr, to_go, dir);
356
addr += to_go;
357
size -= to_go;
358
}
359
}
360
EXPORT_SYMBOL_GPL(kmsan_handle_dma);
361
362
void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
363
enum dma_data_direction dir)
364
{
365
struct scatterlist *item;
366
int i;
367
368
for_each_sg(sg, item, nents, i)
369
kmsan_handle_dma(sg_page(item), item->offset, item->length,
370
dir);
371
}
372
373
/* Functions from kmsan-checks.h follow. */
374
375
/*
376
* To create an origin, kmsan_poison_memory() unwinds the stacks and stores it
377
* into the stack depot. This may cause deadlocks if done from within KMSAN
378
* runtime, therefore we bail out if kmsan_in_runtime().
379
*/
380
void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
381
{
382
if (!kmsan_enabled || kmsan_in_runtime())
383
return;
384
kmsan_enter_runtime();
385
/* The users may want to poison/unpoison random memory. */
386
kmsan_internal_poison_memory((void *)address, size, flags,
387
KMSAN_POISON_NOCHECK);
388
kmsan_leave_runtime();
389
}
390
EXPORT_SYMBOL(kmsan_poison_memory);
391
392
/*
393
* Unlike kmsan_poison_memory(), this function can be used from within KMSAN
394
* runtime, because it does not trigger allocations or call instrumented code.
395
*/
396
void kmsan_unpoison_memory(const void *address, size_t size)
397
{
398
unsigned long ua_flags;
399
400
if (!kmsan_enabled)
401
return;
402
403
ua_flags = user_access_save();
404
/* The users may want to poison/unpoison random memory. */
405
kmsan_internal_unpoison_memory((void *)address, size,
406
KMSAN_POISON_NOCHECK);
407
user_access_restore(ua_flags);
408
}
409
EXPORT_SYMBOL(kmsan_unpoison_memory);
410
411
/*
412
* Version of kmsan_unpoison_memory() called from IRQ entry functions.
413
*/
414
void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
415
{
416
kmsan_unpoison_memory((void *)regs, sizeof(*regs));
417
}
418
419
void kmsan_check_memory(const void *addr, size_t size)
420
{
421
if (!kmsan_enabled)
422
return;
423
return kmsan_internal_check_memory((void *)addr, size,
424
/*user_addr*/ NULL, REASON_ANY);
425
}
426
EXPORT_SYMBOL(kmsan_check_memory);
427
428
void kmsan_enable_current(void)
429
{
430
KMSAN_WARN_ON(current->kmsan_ctx.depth == 0);
431
current->kmsan_ctx.depth--;
432
}
433
EXPORT_SYMBOL(kmsan_enable_current);
434
435
void kmsan_disable_current(void)
436
{
437
current->kmsan_ctx.depth++;
438
KMSAN_WARN_ON(current->kmsan_ctx.depth == 0);
439
}
440
EXPORT_SYMBOL(kmsan_disable_current);
441
442