Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/kasan/kasan_test_c.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
*
4
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
5
* Author: Andrey Ryabinin <[email protected]>
6
*/
7
8
#define pr_fmt(fmt) "kasan: test: " fmt
9
10
#include <kunit/test.h>
11
#include <linux/bitops.h>
12
#include <linux/delay.h>
13
#include <linux/io.h>
14
#include <linux/kasan.h>
15
#include <linux/kernel.h>
16
#include <linux/mempool.h>
17
#include <linux/mm.h>
18
#include <linux/mman.h>
19
#include <linux/module.h>
20
#include <linux/printk.h>
21
#include <linux/random.h>
22
#include <linux/set_memory.h>
23
#include <linux/slab.h>
24
#include <linux/string.h>
25
#include <linux/tracepoint.h>
26
#include <linux/uaccess.h>
27
#include <linux/vmalloc.h>
28
#include <trace/events/printk.h>
29
30
#include <asm/page.h>
31
32
#include "kasan.h"
33
34
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
35
36
MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
37
38
static bool multishot;
39
40
/* Fields set based on lines observed in the console. */
41
static struct {
42
bool report_found;
43
bool async_fault;
44
} test_status;
45
46
/*
47
* Some tests use these global variables to store return values from function
48
* calls that could otherwise be eliminated by the compiler as dead code.
49
*/
50
static void *volatile kasan_ptr_result;
51
static volatile int kasan_int_result;
52
53
/* Probe for console output: obtains test_status lines of interest. */
54
static void probe_console(void *ignore, const char *buf, size_t len)
55
{
56
if (strnstr(buf, "BUG: KASAN: ", len))
57
WRITE_ONCE(test_status.report_found, true);
58
else if (strnstr(buf, "Asynchronous fault: ", len))
59
WRITE_ONCE(test_status.async_fault, true);
60
}
61
62
static int kasan_suite_init(struct kunit_suite *suite)
63
{
64
if (!kasan_enabled()) {
65
pr_err("Can't run KASAN tests with KASAN disabled");
66
return -1;
67
}
68
69
/* Stop failing KUnit tests on KASAN reports. */
70
kasan_kunit_test_suite_start();
71
72
/*
73
* Temporarily enable multi-shot mode. Otherwise, KASAN would only
74
* report the first detected bug and panic the kernel if panic_on_warn
75
* is enabled.
76
*/
77
multishot = kasan_save_enable_multi_shot();
78
79
register_trace_console(probe_console, NULL);
80
return 0;
81
}
82
83
static void kasan_suite_exit(struct kunit_suite *suite)
84
{
85
kasan_kunit_test_suite_end();
86
kasan_restore_multi_shot(multishot);
87
unregister_trace_console(probe_console, NULL);
88
tracepoint_synchronize_unregister();
89
}
90
91
static void kasan_test_exit(struct kunit *test)
92
{
93
KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
94
}
95
96
/**
97
* KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
98
* KASAN report; causes a KUnit test failure otherwise.
99
*
100
* @test: Currently executing KUnit test.
101
* @expression: Expression that must produce a KASAN report.
102
*
103
* For hardware tag-based KASAN, when a synchronous tag fault happens, tag
104
* checking is auto-disabled. When this happens, this test handler reenables
105
* tag checking. As tag checking can be only disabled or enabled per CPU,
106
* this handler disables migration (preemption).
107
*
108
* Since the compiler doesn't see that the expression can change the test_status
109
* fields, it can reorder or optimize away the accesses to those fields.
110
* Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
111
* expression to prevent that.
112
*
113
* In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
114
* as false. This allows detecting KASAN reports that happen outside of the
115
* checks by asserting !test_status.report_found at the start of
116
* KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
117
*/
118
#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
119
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
120
kasan_sync_fault_possible()) \
121
migrate_disable(); \
122
KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
123
barrier(); \
124
expression; \
125
barrier(); \
126
if (kasan_async_fault_possible()) \
127
kasan_force_async_fault(); \
128
if (!READ_ONCE(test_status.report_found)) { \
129
KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
130
"expected in \"" #expression \
131
"\", but none occurred"); \
132
} \
133
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
134
kasan_sync_fault_possible()) { \
135
if (READ_ONCE(test_status.report_found) && \
136
!READ_ONCE(test_status.async_fault)) \
137
kasan_enable_hw_tags(); \
138
migrate_enable(); \
139
} \
140
WRITE_ONCE(test_status.report_found, false); \
141
WRITE_ONCE(test_status.async_fault, false); \
142
} while (0)
143
144
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
145
if (!IS_ENABLED(config)) \
146
kunit_skip((test), "Test requires " #config "=y"); \
147
} while (0)
148
149
#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
150
if (IS_ENABLED(config)) \
151
kunit_skip((test), "Test requires " #config "=n"); \
152
} while (0)
153
154
#define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \
155
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
156
break; /* No compiler instrumentation. */ \
157
if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \
158
break; /* Should always be instrumented! */ \
159
if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \
160
kunit_skip((test), "Test requires checked mem*()"); \
161
} while (0)
162
163
static void kmalloc_oob_right(struct kunit *test)
164
{
165
char *ptr;
166
size_t size = 128 - KASAN_GRANULE_SIZE - 5;
167
168
ptr = kmalloc(size, GFP_KERNEL);
169
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
170
171
OPTIMIZER_HIDE_VAR(ptr);
172
/*
173
* An unaligned access past the requested kmalloc size.
174
* Only generic KASAN can precisely detect these.
175
*/
176
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
177
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
178
179
/*
180
* An aligned access into the first out-of-bounds granule that falls
181
* within the aligned kmalloc object.
182
*/
183
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
184
185
/* Out-of-bounds access past the aligned kmalloc object. */
186
KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
187
ptr[size + KASAN_GRANULE_SIZE + 5]);
188
189
kfree(ptr);
190
}
191
192
static void kmalloc_oob_left(struct kunit *test)
193
{
194
char *ptr;
195
size_t size = 15;
196
197
ptr = kmalloc(size, GFP_KERNEL);
198
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
199
200
OPTIMIZER_HIDE_VAR(ptr);
201
KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
202
kfree(ptr);
203
}
204
205
static void kmalloc_node_oob_right(struct kunit *test)
206
{
207
char *ptr;
208
size_t size = 4096;
209
210
ptr = kmalloc_node(size, GFP_KERNEL, 0);
211
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
212
213
OPTIMIZER_HIDE_VAR(ptr);
214
KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
215
kfree(ptr);
216
}
217
218
static void kmalloc_track_caller_oob_right(struct kunit *test)
219
{
220
char *ptr;
221
size_t size = 128 - KASAN_GRANULE_SIZE;
222
223
/*
224
* Check that KASAN detects out-of-bounds access for object allocated via
225
* kmalloc_track_caller().
226
*/
227
ptr = kmalloc_track_caller(size, GFP_KERNEL);
228
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
229
230
OPTIMIZER_HIDE_VAR(ptr);
231
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y');
232
233
kfree(ptr);
234
235
/*
236
* Check that KASAN detects out-of-bounds access for object allocated via
237
* kmalloc_node_track_caller().
238
*/
239
ptr = kmalloc_node_track_caller(size, GFP_KERNEL, 0);
240
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
241
242
OPTIMIZER_HIDE_VAR(ptr);
243
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y');
244
245
kfree(ptr);
246
}
247
248
/*
249
* Check that KASAN detects an out-of-bounds access for a big object allocated
250
* via kmalloc(). But not as big as to trigger the page_alloc fallback.
251
*/
252
static void kmalloc_big_oob_right(struct kunit *test)
253
{
254
char *ptr;
255
size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
256
257
ptr = kmalloc(size, GFP_KERNEL);
258
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
259
260
OPTIMIZER_HIDE_VAR(ptr);
261
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
262
kfree(ptr);
263
}
264
265
/*
266
* The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
267
* that does not fit into the largest slab cache and therefore is allocated via
268
* the page_alloc fallback.
269
*/
270
271
static void kmalloc_large_oob_right(struct kunit *test)
272
{
273
char *ptr;
274
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
275
276
ptr = kmalloc(size, GFP_KERNEL);
277
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
278
279
OPTIMIZER_HIDE_VAR(ptr);
280
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
281
282
kfree(ptr);
283
}
284
285
static void kmalloc_large_uaf(struct kunit *test)
286
{
287
char *ptr;
288
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
289
290
ptr = kmalloc(size, GFP_KERNEL);
291
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
292
kfree(ptr);
293
294
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
295
}
296
297
static void kmalloc_large_invalid_free(struct kunit *test)
298
{
299
char *ptr;
300
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
301
302
ptr = kmalloc(size, GFP_KERNEL);
303
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
304
305
KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
306
}
307
308
static void page_alloc_oob_right(struct kunit *test)
309
{
310
char *ptr;
311
struct page *pages;
312
size_t order = 4;
313
size_t size = (1UL << (PAGE_SHIFT + order));
314
315
/*
316
* With generic KASAN page allocations have no redzones, thus
317
* out-of-bounds detection is not guaranteed.
318
* See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
319
*/
320
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
321
322
pages = alloc_pages(GFP_KERNEL, order);
323
ptr = page_address(pages);
324
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
325
326
KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
327
free_pages((unsigned long)ptr, order);
328
}
329
330
static void page_alloc_uaf(struct kunit *test)
331
{
332
char *ptr;
333
struct page *pages;
334
size_t order = 4;
335
336
pages = alloc_pages(GFP_KERNEL, order);
337
ptr = page_address(pages);
338
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
339
free_pages((unsigned long)ptr, order);
340
341
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
342
}
343
344
static void krealloc_more_oob_helper(struct kunit *test,
345
size_t size1, size_t size2)
346
{
347
char *ptr1, *ptr2;
348
size_t middle;
349
350
KUNIT_ASSERT_LT(test, size1, size2);
351
middle = size1 + (size2 - size1) / 2;
352
353
ptr1 = kmalloc(size1, GFP_KERNEL);
354
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
355
356
ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
357
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
358
359
/* Suppress -Warray-bounds warnings. */
360
OPTIMIZER_HIDE_VAR(ptr2);
361
362
/* All offsets up to size2 must be accessible. */
363
ptr2[size1 - 1] = 'x';
364
ptr2[size1] = 'x';
365
ptr2[middle] = 'x';
366
ptr2[size2 - 1] = 'x';
367
368
/* Generic mode is precise, so unaligned size2 must be inaccessible. */
369
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
370
KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
371
372
/* For all modes first aligned offset after size2 must be inaccessible. */
373
KUNIT_EXPECT_KASAN_FAIL(test,
374
ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
375
376
kfree(ptr2);
377
}
378
379
static void krealloc_less_oob_helper(struct kunit *test,
380
size_t size1, size_t size2)
381
{
382
char *ptr1, *ptr2;
383
size_t middle;
384
385
KUNIT_ASSERT_LT(test, size2, size1);
386
middle = size2 + (size1 - size2) / 2;
387
388
ptr1 = kmalloc(size1, GFP_KERNEL);
389
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
390
391
ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
392
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
393
394
/* Suppress -Warray-bounds warnings. */
395
OPTIMIZER_HIDE_VAR(ptr2);
396
397
/* Must be accessible for all modes. */
398
ptr2[size2 - 1] = 'x';
399
400
/* Generic mode is precise, so unaligned size2 must be inaccessible. */
401
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
402
KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
403
404
/* For all modes first aligned offset after size2 must be inaccessible. */
405
KUNIT_EXPECT_KASAN_FAIL(test,
406
ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
407
408
/*
409
* For all modes all size2, middle, and size1 should land in separate
410
* granules and thus the latter two offsets should be inaccessible.
411
*/
412
KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
413
round_down(middle, KASAN_GRANULE_SIZE));
414
KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
415
round_down(size1, KASAN_GRANULE_SIZE));
416
KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
417
KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
418
KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
419
420
kfree(ptr2);
421
}
422
423
static void krealloc_more_oob(struct kunit *test)
424
{
425
krealloc_more_oob_helper(test, 201, 235);
426
}
427
428
static void krealloc_less_oob(struct kunit *test)
429
{
430
krealloc_less_oob_helper(test, 235, 201);
431
}
432
433
static void krealloc_large_more_oob(struct kunit *test)
434
{
435
krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
436
KMALLOC_MAX_CACHE_SIZE + 235);
437
}
438
439
static void krealloc_large_less_oob(struct kunit *test)
440
{
441
krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
442
KMALLOC_MAX_CACHE_SIZE + 201);
443
}
444
445
/*
446
* Check that krealloc() detects a use-after-free, returns NULL,
447
* and doesn't unpoison the freed object.
448
*/
449
static void krealloc_uaf(struct kunit *test)
450
{
451
char *ptr1, *ptr2;
452
int size1 = 201;
453
int size2 = 235;
454
455
ptr1 = kmalloc(size1, GFP_KERNEL);
456
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
457
kfree(ptr1);
458
459
KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
460
KUNIT_ASSERT_NULL(test, ptr2);
461
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
462
}
463
464
static void kmalloc_oob_16(struct kunit *test)
465
{
466
struct {
467
u64 words[2];
468
} *ptr1, *ptr2;
469
470
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
471
472
/* This test is specifically crafted for the generic mode. */
473
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
474
475
/* RELOC_HIDE to prevent gcc from warning about short alloc */
476
ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
477
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
478
479
ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
480
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
481
482
OPTIMIZER_HIDE_VAR(ptr1);
483
OPTIMIZER_HIDE_VAR(ptr2);
484
KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
485
kfree(ptr1);
486
kfree(ptr2);
487
}
488
489
static void kmalloc_uaf_16(struct kunit *test)
490
{
491
struct {
492
u64 words[2];
493
} *ptr1, *ptr2;
494
495
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
496
497
ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
498
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
499
500
ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
501
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
502
kfree(ptr2);
503
504
KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
505
kfree(ptr1);
506
}
507
508
/*
509
* Note: in the memset tests below, the written range touches both valid and
510
* invalid memory. This makes sure that the instrumentation does not only check
511
* the starting address but the whole range.
512
*/
513
514
static void kmalloc_oob_memset_2(struct kunit *test)
515
{
516
char *ptr;
517
size_t size = 128 - KASAN_GRANULE_SIZE;
518
size_t memset_size = 2;
519
520
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
521
522
ptr = kmalloc(size, GFP_KERNEL);
523
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
524
525
OPTIMIZER_HIDE_VAR(ptr);
526
OPTIMIZER_HIDE_VAR(size);
527
OPTIMIZER_HIDE_VAR(memset_size);
528
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
529
kfree(ptr);
530
}
531
532
static void kmalloc_oob_memset_4(struct kunit *test)
533
{
534
char *ptr;
535
size_t size = 128 - KASAN_GRANULE_SIZE;
536
size_t memset_size = 4;
537
538
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
539
540
ptr = kmalloc(size, GFP_KERNEL);
541
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
542
543
OPTIMIZER_HIDE_VAR(ptr);
544
OPTIMIZER_HIDE_VAR(size);
545
OPTIMIZER_HIDE_VAR(memset_size);
546
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
547
kfree(ptr);
548
}
549
550
static void kmalloc_oob_memset_8(struct kunit *test)
551
{
552
char *ptr;
553
size_t size = 128 - KASAN_GRANULE_SIZE;
554
size_t memset_size = 8;
555
556
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
557
558
ptr = kmalloc(size, GFP_KERNEL);
559
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
560
561
OPTIMIZER_HIDE_VAR(ptr);
562
OPTIMIZER_HIDE_VAR(size);
563
OPTIMIZER_HIDE_VAR(memset_size);
564
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
565
kfree(ptr);
566
}
567
568
static void kmalloc_oob_memset_16(struct kunit *test)
569
{
570
char *ptr;
571
size_t size = 128 - KASAN_GRANULE_SIZE;
572
size_t memset_size = 16;
573
574
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
575
576
ptr = kmalloc(size, GFP_KERNEL);
577
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
578
579
OPTIMIZER_HIDE_VAR(ptr);
580
OPTIMIZER_HIDE_VAR(size);
581
OPTIMIZER_HIDE_VAR(memset_size);
582
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
583
kfree(ptr);
584
}
585
586
static void kmalloc_oob_in_memset(struct kunit *test)
587
{
588
char *ptr;
589
size_t size = 128 - KASAN_GRANULE_SIZE;
590
591
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
592
593
ptr = kmalloc(size, GFP_KERNEL);
594
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
595
596
OPTIMIZER_HIDE_VAR(ptr);
597
OPTIMIZER_HIDE_VAR(size);
598
KUNIT_EXPECT_KASAN_FAIL(test,
599
memset(ptr, 0, size + KASAN_GRANULE_SIZE));
600
kfree(ptr);
601
}
602
603
static void kmalloc_memmove_negative_size(struct kunit *test)
604
{
605
char *ptr;
606
size_t size = 64;
607
size_t invalid_size = -2;
608
609
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
610
611
/*
612
* Hardware tag-based mode doesn't check memmove for negative size.
613
* As a result, this test introduces a side-effect memory corruption,
614
* which can result in a crash.
615
*/
616
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
617
618
ptr = kmalloc(size, GFP_KERNEL);
619
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
620
621
memset((char *)ptr, 0, 64);
622
OPTIMIZER_HIDE_VAR(ptr);
623
OPTIMIZER_HIDE_VAR(invalid_size);
624
KUNIT_EXPECT_KASAN_FAIL(test,
625
memmove((char *)ptr, (char *)ptr + 4, invalid_size));
626
kfree(ptr);
627
}
628
629
static void kmalloc_memmove_invalid_size(struct kunit *test)
630
{
631
char *ptr;
632
size_t size = 64;
633
size_t invalid_size = size;
634
635
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
636
637
ptr = kmalloc(size, GFP_KERNEL);
638
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
639
640
memset((char *)ptr, 0, 64);
641
OPTIMIZER_HIDE_VAR(ptr);
642
OPTIMIZER_HIDE_VAR(invalid_size);
643
KUNIT_EXPECT_KASAN_FAIL(test,
644
memmove((char *)ptr, (char *)ptr + 4, invalid_size));
645
kfree(ptr);
646
}
647
648
static void kmalloc_uaf(struct kunit *test)
649
{
650
char *ptr;
651
size_t size = 10;
652
653
ptr = kmalloc(size, GFP_KERNEL);
654
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
655
656
kfree(ptr);
657
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
658
}
659
660
static void kmalloc_uaf_memset(struct kunit *test)
661
{
662
char *ptr;
663
size_t size = 33;
664
665
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
666
667
/*
668
* Only generic KASAN uses quarantine, which is required to avoid a
669
* kernel memory corruption this test causes.
670
*/
671
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
672
673
ptr = kmalloc(size, GFP_KERNEL);
674
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
675
676
kfree(ptr);
677
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
678
}
679
680
static void kmalloc_uaf2(struct kunit *test)
681
{
682
char *ptr1, *ptr2;
683
size_t size = 43;
684
int counter = 0;
685
686
again:
687
ptr1 = kmalloc(size, GFP_KERNEL);
688
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
689
690
kfree(ptr1);
691
692
ptr2 = kmalloc(size, GFP_KERNEL);
693
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
694
695
/*
696
* For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
697
* Allow up to 16 attempts at generating different tags.
698
*/
699
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
700
kfree(ptr2);
701
goto again;
702
}
703
704
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
705
KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
706
707
kfree(ptr2);
708
}
709
710
/*
711
* Check that KASAN detects use-after-free when another object was allocated in
712
* the same slot. Relevant for the tag-based modes, which do not use quarantine.
713
*/
714
static void kmalloc_uaf3(struct kunit *test)
715
{
716
char *ptr1, *ptr2;
717
size_t size = 100;
718
719
/* This test is specifically crafted for tag-based modes. */
720
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
721
722
ptr1 = kmalloc(size, GFP_KERNEL);
723
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
724
kfree(ptr1);
725
726
ptr2 = kmalloc(size, GFP_KERNEL);
727
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
728
kfree(ptr2);
729
730
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
731
}
732
733
static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
734
{
735
int *i_unsafe = unsafe;
736
737
KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
738
KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
739
KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
740
KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
741
742
KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
743
KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
744
KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
745
KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
746
KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
747
KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
748
KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
749
KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
750
KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
751
KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
752
KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
753
KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
754
KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
755
KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
756
KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
757
KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
758
KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
759
KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
760
KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
761
KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
762
KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
763
KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
764
KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
765
766
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
767
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
768
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
769
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
770
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
771
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
772
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
773
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
774
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
775
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
776
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
777
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
778
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
779
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
780
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
781
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
782
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
783
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
784
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
785
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
786
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
787
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
788
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
789
}
790
791
static void kasan_atomics(struct kunit *test)
792
{
793
void *a1, *a2;
794
795
/*
796
* Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
797
* that the following 16 bytes will make up the redzone.
798
*/
799
a1 = kzalloc(48, GFP_KERNEL);
800
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
801
a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL);
802
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);
803
804
/* Use atomics to access the redzone. */
805
kasan_atomics_helper(test, a1 + 48, a2);
806
807
kfree(a1);
808
kfree(a2);
809
}
810
811
static void kmalloc_double_kzfree(struct kunit *test)
812
{
813
char *ptr;
814
size_t size = 16;
815
816
ptr = kmalloc(size, GFP_KERNEL);
817
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
818
819
kfree_sensitive(ptr);
820
KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
821
}
822
823
/* Check that ksize() does NOT unpoison whole object. */
824
static void ksize_unpoisons_memory(struct kunit *test)
825
{
826
char *ptr;
827
size_t size = 128 - KASAN_GRANULE_SIZE - 5;
828
size_t real_size;
829
830
ptr = kmalloc(size, GFP_KERNEL);
831
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
832
833
real_size = ksize(ptr);
834
KUNIT_EXPECT_GT(test, real_size, size);
835
836
OPTIMIZER_HIDE_VAR(ptr);
837
838
/* These accesses shouldn't trigger a KASAN report. */
839
ptr[0] = 'x';
840
ptr[size - 1] = 'x';
841
842
/* These must trigger a KASAN report. */
843
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
844
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
845
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
846
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
847
848
kfree(ptr);
849
}
850
851
/*
852
* Check that a use-after-free is detected by ksize() and via normal accesses
853
* after it.
854
*/
855
static void ksize_uaf(struct kunit *test)
856
{
857
char *ptr;
858
int size = 128 - KASAN_GRANULE_SIZE;
859
860
ptr = kmalloc(size, GFP_KERNEL);
861
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
862
kfree(ptr);
863
864
OPTIMIZER_HIDE_VAR(ptr);
865
KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
866
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
867
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
868
}
869
870
/*
871
* The two tests below check that Generic KASAN prints auxiliary stack traces
872
* for RCU callbacks and workqueues. The reports need to be inspected manually.
873
*
874
* These tests are still enabled for other KASAN modes to make sure that all
875
* modes report bad accesses in tested scenarios.
876
*/
877
878
static struct kasan_rcu_info {
879
int i;
880
struct rcu_head rcu;
881
} *global_rcu_ptr;
882
883
static void rcu_uaf_reclaim(struct rcu_head *rp)
884
{
885
struct kasan_rcu_info *fp =
886
container_of(rp, struct kasan_rcu_info, rcu);
887
888
kfree(fp);
889
((volatile struct kasan_rcu_info *)fp)->i;
890
}
891
892
static void rcu_uaf(struct kunit *test)
893
{
894
struct kasan_rcu_info *ptr;
895
896
ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
897
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
898
899
global_rcu_ptr = rcu_dereference_protected(
900
(struct kasan_rcu_info __rcu *)ptr, NULL);
901
902
KUNIT_EXPECT_KASAN_FAIL(test,
903
call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
904
rcu_barrier());
905
}
906
907
static void workqueue_uaf_work(struct work_struct *work)
908
{
909
kfree(work);
910
}
911
912
static void workqueue_uaf(struct kunit *test)
913
{
914
struct workqueue_struct *workqueue;
915
struct work_struct *work;
916
917
workqueue = create_workqueue("kasan_workqueue_test");
918
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
919
920
work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
921
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
922
923
INIT_WORK(work, workqueue_uaf_work);
924
queue_work(workqueue, work);
925
destroy_workqueue(workqueue);
926
927
KUNIT_EXPECT_KASAN_FAIL(test,
928
((volatile struct work_struct *)work)->data);
929
}
930
931
static void kfree_via_page(struct kunit *test)
932
{
933
char *ptr;
934
size_t size = 8;
935
struct page *page;
936
unsigned long offset;
937
938
ptr = kmalloc(size, GFP_KERNEL);
939
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
940
941
page = virt_to_page(ptr);
942
offset = offset_in_page(ptr);
943
kfree(page_address(page) + offset);
944
}
945
946
static void kfree_via_phys(struct kunit *test)
947
{
948
char *ptr;
949
size_t size = 8;
950
phys_addr_t phys;
951
952
ptr = kmalloc(size, GFP_KERNEL);
953
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
954
955
phys = virt_to_phys(ptr);
956
kfree(phys_to_virt(phys));
957
}
958
959
static void kmem_cache_oob(struct kunit *test)
960
{
961
char *p;
962
size_t size = 200;
963
struct kmem_cache *cache;
964
965
cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
966
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
967
968
p = kmem_cache_alloc(cache, GFP_KERNEL);
969
if (!p) {
970
kunit_err(test, "Allocation failed: %s\n", __func__);
971
kmem_cache_destroy(cache);
972
return;
973
}
974
975
KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
976
977
kmem_cache_free(cache, p);
978
kmem_cache_destroy(cache);
979
}
980
981
static void kmem_cache_double_free(struct kunit *test)
982
{
983
char *p;
984
size_t size = 200;
985
struct kmem_cache *cache;
986
987
cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
988
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
989
990
p = kmem_cache_alloc(cache, GFP_KERNEL);
991
if (!p) {
992
kunit_err(test, "Allocation failed: %s\n", __func__);
993
kmem_cache_destroy(cache);
994
return;
995
}
996
997
kmem_cache_free(cache, p);
998
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
999
kmem_cache_destroy(cache);
1000
}
1001
1002
static void kmem_cache_invalid_free(struct kunit *test)
1003
{
1004
char *p;
1005
size_t size = 200;
1006
struct kmem_cache *cache;
1007
1008
cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
1009
NULL);
1010
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1011
1012
p = kmem_cache_alloc(cache, GFP_KERNEL);
1013
if (!p) {
1014
kunit_err(test, "Allocation failed: %s\n", __func__);
1015
kmem_cache_destroy(cache);
1016
return;
1017
}
1018
1019
/* Trigger invalid free, the object doesn't get freed. */
1020
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
1021
1022
/*
1023
* Properly free the object to prevent the "Objects remaining in
1024
* test_cache on __kmem_cache_shutdown" BUG failure.
1025
*/
1026
kmem_cache_free(cache, p);
1027
1028
kmem_cache_destroy(cache);
1029
}
1030
1031
static void kmem_cache_rcu_uaf(struct kunit *test)
1032
{
1033
char *p;
1034
size_t size = 200;
1035
struct kmem_cache *cache;
1036
1037
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
1038
1039
cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
1040
NULL);
1041
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1042
1043
p = kmem_cache_alloc(cache, GFP_KERNEL);
1044
if (!p) {
1045
kunit_err(test, "Allocation failed: %s\n", __func__);
1046
kmem_cache_destroy(cache);
1047
return;
1048
}
1049
*p = 1;
1050
1051
rcu_read_lock();
1052
1053
/* Free the object - this will internally schedule an RCU callback. */
1054
kmem_cache_free(cache, p);
1055
1056
/*
1057
* We should still be allowed to access the object at this point because
1058
* the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
1059
* critical section since before the kmem_cache_free().
1060
*/
1061
READ_ONCE(*p);
1062
1063
rcu_read_unlock();
1064
1065
/*
1066
* Wait for the RCU callback to execute; after this, the object should
1067
* have actually been freed from KASAN's perspective.
1068
*/
1069
rcu_barrier();
1070
1071
KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
1072
1073
kmem_cache_destroy(cache);
1074
}
1075
1076
static void kmem_cache_double_destroy(struct kunit *test)
1077
{
1078
struct kmem_cache *cache;
1079
1080
cache = kmem_cache_create("test_cache", 200, 0, SLAB_NO_MERGE, NULL);
1081
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1082
kmem_cache_destroy(cache);
1083
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
1084
}
1085
1086
static void kmem_cache_accounted(struct kunit *test)
1087
{
1088
int i;
1089
char *p;
1090
size_t size = 200;
1091
struct kmem_cache *cache;
1092
1093
cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
1094
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1095
1096
/*
1097
* Several allocations with a delay to allow for lazy per memcg kmem
1098
* cache creation.
1099
*/
1100
for (i = 0; i < 5; i++) {
1101
p = kmem_cache_alloc(cache, GFP_KERNEL);
1102
if (!p)
1103
goto free_cache;
1104
1105
kmem_cache_free(cache, p);
1106
msleep(100);
1107
}
1108
1109
free_cache:
1110
kmem_cache_destroy(cache);
1111
}
1112
1113
static void kmem_cache_bulk(struct kunit *test)
1114
{
1115
struct kmem_cache *cache;
1116
size_t size = 200;
1117
char *p[10];
1118
bool ret;
1119
int i;
1120
1121
cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1122
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1123
1124
ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
1125
if (!ret) {
1126
kunit_err(test, "Allocation failed: %s\n", __func__);
1127
kmem_cache_destroy(cache);
1128
return;
1129
}
1130
1131
for (i = 0; i < ARRAY_SIZE(p); i++)
1132
p[i][0] = p[i][size - 1] = 42;
1133
1134
kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
1135
kmem_cache_destroy(cache);
1136
}
1137
1138
static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
1139
{
1140
int pool_size = 4;
1141
int ret;
1142
void *elem;
1143
1144
memset(pool, 0, sizeof(*pool));
1145
ret = mempool_init_kmalloc_pool(pool, pool_size, size);
1146
KUNIT_ASSERT_EQ(test, ret, 0);
1147
1148
/*
1149
* Allocate one element to prevent mempool from freeing elements to the
1150
* underlying allocator and instead make it add them to the element
1151
* list when the tests trigger double-free and invalid-free bugs.
1152
* This allows testing KASAN annotations in add_element().
1153
*/
1154
elem = mempool_alloc_preallocated(pool);
1155
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1156
1157
return elem;
1158
}
1159
1160
static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
1161
{
1162
struct kmem_cache *cache;
1163
int pool_size = 4;
1164
int ret;
1165
1166
cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1167
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1168
1169
memset(pool, 0, sizeof(*pool));
1170
ret = mempool_init_slab_pool(pool, pool_size, cache);
1171
KUNIT_ASSERT_EQ(test, ret, 0);
1172
1173
/*
1174
* Do not allocate one preallocated element, as we skip the double-free
1175
* and invalid-free tests for slab mempool for simplicity.
1176
*/
1177
1178
return cache;
1179
}
1180
1181
static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
1182
{
1183
int pool_size = 4;
1184
int ret;
1185
void *elem;
1186
1187
memset(pool, 0, sizeof(*pool));
1188
ret = mempool_init_page_pool(pool, pool_size, order);
1189
KUNIT_ASSERT_EQ(test, ret, 0);
1190
1191
elem = mempool_alloc_preallocated(pool);
1192
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1193
1194
return elem;
1195
}
1196
1197
static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
1198
{
1199
char *elem;
1200
1201
elem = mempool_alloc_preallocated(pool);
1202
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1203
1204
OPTIMIZER_HIDE_VAR(elem);
1205
1206
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1207
KUNIT_EXPECT_KASAN_FAIL(test,
1208
((volatile char *)&elem[size])[0]);
1209
else
1210
KUNIT_EXPECT_KASAN_FAIL(test,
1211
((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
1212
1213
mempool_free(elem, pool);
1214
}
1215
1216
static void mempool_kmalloc_oob_right(struct kunit *test)
1217
{
1218
mempool_t pool;
1219
size_t size = 128 - KASAN_GRANULE_SIZE - 5;
1220
void *extra_elem;
1221
1222
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1223
1224
mempool_oob_right_helper(test, &pool, size);
1225
1226
mempool_free(extra_elem, &pool);
1227
mempool_exit(&pool);
1228
}
1229
1230
static void mempool_kmalloc_large_oob_right(struct kunit *test)
1231
{
1232
mempool_t pool;
1233
size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1234
void *extra_elem;
1235
1236
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1237
1238
mempool_oob_right_helper(test, &pool, size);
1239
1240
mempool_free(extra_elem, &pool);
1241
mempool_exit(&pool);
1242
}
1243
1244
static void mempool_slab_oob_right(struct kunit *test)
1245
{
1246
mempool_t pool;
1247
size_t size = 123;
1248
struct kmem_cache *cache;
1249
1250
cache = mempool_prepare_slab(test, &pool, size);
1251
1252
mempool_oob_right_helper(test, &pool, size);
1253
1254
mempool_exit(&pool);
1255
kmem_cache_destroy(cache);
1256
}
1257
1258
/*
1259
* Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1260
* allocations have no redzones, and thus the out-of-bounds detection is not
1261
* guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
1262
* the tag-based KASAN modes, the neighboring allocation might have the same
1263
* tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
1264
*/
1265
1266
static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
1267
{
1268
char *elem, *ptr;
1269
1270
elem = mempool_alloc_preallocated(pool);
1271
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1272
1273
mempool_free(elem, pool);
1274
1275
ptr = page ? page_address((struct page *)elem) : elem;
1276
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
1277
}
1278
1279
static void mempool_kmalloc_uaf(struct kunit *test)
1280
{
1281
mempool_t pool;
1282
size_t size = 128;
1283
void *extra_elem;
1284
1285
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1286
1287
mempool_uaf_helper(test, &pool, false);
1288
1289
mempool_free(extra_elem, &pool);
1290
mempool_exit(&pool);
1291
}
1292
1293
static void mempool_kmalloc_large_uaf(struct kunit *test)
1294
{
1295
mempool_t pool;
1296
size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1297
void *extra_elem;
1298
1299
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1300
1301
mempool_uaf_helper(test, &pool, false);
1302
1303
mempool_free(extra_elem, &pool);
1304
mempool_exit(&pool);
1305
}
1306
1307
static void mempool_slab_uaf(struct kunit *test)
1308
{
1309
mempool_t pool;
1310
size_t size = 123;
1311
struct kmem_cache *cache;
1312
1313
cache = mempool_prepare_slab(test, &pool, size);
1314
1315
mempool_uaf_helper(test, &pool, false);
1316
1317
mempool_exit(&pool);
1318
kmem_cache_destroy(cache);
1319
}
1320
1321
static void mempool_page_alloc_uaf(struct kunit *test)
1322
{
1323
mempool_t pool;
1324
int order = 2;
1325
void *extra_elem;
1326
1327
extra_elem = mempool_prepare_page(test, &pool, order);
1328
1329
mempool_uaf_helper(test, &pool, true);
1330
1331
mempool_free(extra_elem, &pool);
1332
mempool_exit(&pool);
1333
}
1334
1335
static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
1336
{
1337
char *elem;
1338
1339
elem = mempool_alloc_preallocated(pool);
1340
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1341
1342
mempool_free(elem, pool);
1343
1344
KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
1345
}
1346
1347
static void mempool_kmalloc_double_free(struct kunit *test)
1348
{
1349
mempool_t pool;
1350
size_t size = 128;
1351
char *extra_elem;
1352
1353
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1354
1355
mempool_double_free_helper(test, &pool);
1356
1357
mempool_free(extra_elem, &pool);
1358
mempool_exit(&pool);
1359
}
1360
1361
static void mempool_kmalloc_large_double_free(struct kunit *test)
1362
{
1363
mempool_t pool;
1364
size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1365
char *extra_elem;
1366
1367
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1368
1369
mempool_double_free_helper(test, &pool);
1370
1371
mempool_free(extra_elem, &pool);
1372
mempool_exit(&pool);
1373
}
1374
1375
static void mempool_page_alloc_double_free(struct kunit *test)
1376
{
1377
mempool_t pool;
1378
int order = 2;
1379
char *extra_elem;
1380
1381
extra_elem = mempool_prepare_page(test, &pool, order);
1382
1383
mempool_double_free_helper(test, &pool);
1384
1385
mempool_free(extra_elem, &pool);
1386
mempool_exit(&pool);
1387
}
1388
1389
static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
1390
{
1391
char *elem;
1392
1393
elem = mempool_alloc_preallocated(pool);
1394
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1395
1396
KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
1397
1398
mempool_free(elem, pool);
1399
}
1400
1401
static void mempool_kmalloc_invalid_free(struct kunit *test)
1402
{
1403
mempool_t pool;
1404
size_t size = 128;
1405
char *extra_elem;
1406
1407
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1408
1409
mempool_kmalloc_invalid_free_helper(test, &pool);
1410
1411
mempool_free(extra_elem, &pool);
1412
mempool_exit(&pool);
1413
}
1414
1415
static void mempool_kmalloc_large_invalid_free(struct kunit *test)
1416
{
1417
mempool_t pool;
1418
size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1419
char *extra_elem;
1420
1421
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1422
1423
mempool_kmalloc_invalid_free_helper(test, &pool);
1424
1425
mempool_free(extra_elem, &pool);
1426
mempool_exit(&pool);
1427
}
1428
1429
/*
1430
* Skip the invalid-free test for page mempool. The invalid-free detection only
1431
* works for compound pages and mempool preallocates all page elements without
1432
* the __GFP_COMP flag.
1433
*/
1434
1435
static char global_array[10];
1436
1437
static void kasan_global_oob_right(struct kunit *test)
1438
{
1439
/*
1440
* Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
1441
* from failing here and panicking the kernel, access the array via a
1442
* volatile pointer, which will prevent the compiler from being able to
1443
* determine the array bounds.
1444
*
1445
* This access uses a volatile pointer to char (char *volatile) rather
1446
* than the more conventional pointer to volatile char (volatile char *)
1447
* because we want to prevent the compiler from making inferences about
1448
* the pointer itself (i.e. its array bounds), not the data that it
1449
* refers to.
1450
*/
1451
char *volatile array = global_array;
1452
char *p = &array[ARRAY_SIZE(global_array) + 3];
1453
1454
/* Only generic mode instruments globals. */
1455
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1456
1457
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1458
}
1459
1460
static void kasan_global_oob_left(struct kunit *test)
1461
{
1462
char *volatile array = global_array;
1463
char *p = array - 3;
1464
1465
/*
1466
* GCC is known to fail this test, skip it.
1467
* See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
1468
*/
1469
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
1470
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1471
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1472
}
1473
1474
static void kasan_stack_oob(struct kunit *test)
1475
{
1476
char stack_array[10];
1477
/* See comment in kasan_global_oob_right. */
1478
char *volatile array = stack_array;
1479
char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
1480
1481
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1482
1483
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1484
}
1485
1486
static void kasan_alloca_oob_left(struct kunit *test)
1487
{
1488
volatile int i = 10;
1489
char alloca_array[i];
1490
/* See comment in kasan_global_oob_right. */
1491
char *volatile array = alloca_array;
1492
char *p = array - 1;
1493
1494
/* Only generic mode instruments dynamic allocas. */
1495
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1496
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1497
1498
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1499
}
1500
1501
static void kasan_alloca_oob_right(struct kunit *test)
1502
{
1503
volatile int i = 10;
1504
char alloca_array[i];
1505
/* See comment in kasan_global_oob_right. */
1506
char *volatile array = alloca_array;
1507
char *p = array + i;
1508
1509
/* Only generic mode instruments dynamic allocas. */
1510
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1511
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1512
1513
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1514
}
1515
1516
static void kasan_memchr(struct kunit *test)
1517
{
1518
char *ptr;
1519
size_t size = 24;
1520
1521
/*
1522
* str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1523
* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1524
*/
1525
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1526
1527
if (OOB_TAG_OFF)
1528
size = round_up(size, OOB_TAG_OFF);
1529
1530
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1531
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1532
1533
OPTIMIZER_HIDE_VAR(ptr);
1534
OPTIMIZER_HIDE_VAR(size);
1535
KUNIT_EXPECT_KASAN_FAIL(test,
1536
kasan_ptr_result = memchr(ptr, '1', size + 1));
1537
1538
kfree(ptr);
1539
}
1540
1541
static void kasan_memcmp(struct kunit *test)
1542
{
1543
char *ptr;
1544
size_t size = 24;
1545
int arr[9];
1546
1547
/*
1548
* str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1549
* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1550
*/
1551
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1552
1553
if (OOB_TAG_OFF)
1554
size = round_up(size, OOB_TAG_OFF);
1555
1556
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1557
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1558
memset(arr, 0, sizeof(arr));
1559
1560
OPTIMIZER_HIDE_VAR(ptr);
1561
OPTIMIZER_HIDE_VAR(size);
1562
KUNIT_EXPECT_KASAN_FAIL(test,
1563
kasan_int_result = memcmp(ptr, arr, size+1));
1564
kfree(ptr);
1565
}
1566
1567
static void kasan_strings(struct kunit *test)
1568
{
1569
char *ptr;
1570
char *src;
1571
size_t size = 24;
1572
1573
/*
1574
* str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1575
* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1576
*/
1577
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1578
1579
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1580
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1581
OPTIMIZER_HIDE_VAR(ptr);
1582
1583
src = kmalloc(KASAN_GRANULE_SIZE, GFP_KERNEL | __GFP_ZERO);
1584
strscpy(src, "f0cacc1a0000000", KASAN_GRANULE_SIZE);
1585
OPTIMIZER_HIDE_VAR(src);
1586
1587
/*
1588
* Make sure that strscpy() does not trigger KASAN if it overreads into
1589
* poisoned memory.
1590
*
1591
* The expected size does not include the terminator '\0'
1592
* so it is (KASAN_GRANULE_SIZE - 2) ==
1593
* KASAN_GRANULE_SIZE - ("initial removed character" + "\0").
1594
*/
1595
KUNIT_EXPECT_EQ(test, KASAN_GRANULE_SIZE - 2,
1596
strscpy(ptr, src + 1, KASAN_GRANULE_SIZE));
1597
1598
/* strscpy should fail if the first byte is unreadable. */
1599
KUNIT_EXPECT_KASAN_FAIL(test, strscpy(ptr, src + KASAN_GRANULE_SIZE,
1600
KASAN_GRANULE_SIZE));
1601
1602
kfree(src);
1603
kfree(ptr);
1604
1605
/*
1606
* Try to cause only 1 invalid access (less spam in dmesg).
1607
* For that we need ptr to point to zeroed byte.
1608
* Skip metadata that could be stored in freed object so ptr
1609
* will likely point to zeroed byte.
1610
*/
1611
ptr += 16;
1612
KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1613
1614
KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1615
1616
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1617
1618
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1619
1620
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1621
1622
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1623
}
1624
1625
static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1626
{
1627
KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1628
KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1629
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1630
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1631
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1632
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1633
KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1634
KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1635
}
1636
1637
static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1638
{
1639
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1640
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1641
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1642
KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1643
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1644
KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1645
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1646
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1647
if (nr < 7)
1648
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1649
xor_unlock_is_negative_byte(1 << nr, addr));
1650
}
1651
1652
static void kasan_bitops_generic(struct kunit *test)
1653
{
1654
long *bits;
1655
1656
/* This test is specifically crafted for the generic mode. */
1657
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1658
1659
/*
1660
* Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1661
* this way we do not actually corrupt other memory.
1662
*/
1663
bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1664
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1665
1666
/*
1667
* Below calls try to access bit within allocated memory; however, the
1668
* below accesses are still out-of-bounds, since bitops are defined to
1669
* operate on the whole long the bit is in.
1670
*/
1671
kasan_bitops_modify(test, BITS_PER_LONG, bits);
1672
1673
/*
1674
* Below calls try to access bit beyond allocated memory.
1675
*/
1676
kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1677
1678
kfree(bits);
1679
}
1680
1681
static void kasan_bitops_tags(struct kunit *test)
1682
{
1683
long *bits;
1684
1685
/* This test is specifically crafted for tag-based modes. */
1686
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1687
1688
/* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1689
bits = kzalloc(48, GFP_KERNEL);
1690
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1691
1692
/* Do the accesses past the 48 allocated bytes, but within the redone. */
1693
kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1694
kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1695
1696
kfree(bits);
1697
}
1698
1699
static void vmalloc_helpers_tags(struct kunit *test)
1700
{
1701
void *ptr;
1702
1703
/* This test is intended for tag-based modes. */
1704
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1705
1706
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1707
1708
if (!kasan_vmalloc_enabled())
1709
kunit_skip(test, "Test requires kasan.vmalloc=on");
1710
1711
ptr = vmalloc(PAGE_SIZE);
1712
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1713
1714
/* Check that the returned pointer is tagged. */
1715
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1716
KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1717
1718
/* Make sure exported vmalloc helpers handle tagged pointers. */
1719
KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1720
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1721
1722
#if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1723
{
1724
int rv;
1725
1726
/* Make sure vmalloc'ed memory permissions can be changed. */
1727
rv = set_memory_ro((unsigned long)ptr, 1);
1728
KUNIT_ASSERT_GE(test, rv, 0);
1729
rv = set_memory_rw((unsigned long)ptr, 1);
1730
KUNIT_ASSERT_GE(test, rv, 0);
1731
}
1732
#endif
1733
1734
vfree(ptr);
1735
}
1736
1737
static void vmalloc_oob(struct kunit *test)
1738
{
1739
char *v_ptr, *p_ptr;
1740
struct page *page;
1741
size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1742
1743
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1744
1745
if (!kasan_vmalloc_enabled())
1746
kunit_skip(test, "Test requires kasan.vmalloc=on");
1747
1748
v_ptr = vmalloc(size);
1749
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1750
1751
OPTIMIZER_HIDE_VAR(v_ptr);
1752
1753
/*
1754
* We have to be careful not to hit the guard page in vmalloc tests.
1755
* The MMU will catch that and crash us.
1756
*/
1757
1758
/* Make sure in-bounds accesses are valid. */
1759
v_ptr[0] = 0;
1760
v_ptr[size - 1] = 0;
1761
1762
/*
1763
* An unaligned access past the requested vmalloc size.
1764
* Only generic KASAN can precisely detect these.
1765
*/
1766
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1767
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1768
1769
/* An aligned access into the first out-of-bounds granule. */
1770
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1771
1772
/* Check that in-bounds accesses to the physical page are valid. */
1773
page = vmalloc_to_page(v_ptr);
1774
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1775
p_ptr = page_address(page);
1776
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1777
p_ptr[0] = 0;
1778
1779
vfree(v_ptr);
1780
1781
/*
1782
* We can't check for use-after-unmap bugs in this nor in the following
1783
* vmalloc tests, as the page might be fully unmapped and accessing it
1784
* will crash the kernel.
1785
*/
1786
}
1787
1788
static void vmap_tags(struct kunit *test)
1789
{
1790
char *p_ptr, *v_ptr;
1791
struct page *p_page, *v_page;
1792
1793
/*
1794
* This test is specifically crafted for the software tag-based mode,
1795
* the only tag-based mode that poisons vmap mappings.
1796
*/
1797
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1798
1799
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1800
1801
if (!kasan_vmalloc_enabled())
1802
kunit_skip(test, "Test requires kasan.vmalloc=on");
1803
1804
p_page = alloc_pages(GFP_KERNEL, 1);
1805
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1806
p_ptr = page_address(p_page);
1807
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1808
1809
v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1810
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1811
1812
/*
1813
* We can't check for out-of-bounds bugs in this nor in the following
1814
* vmalloc tests, as allocations have page granularity and accessing
1815
* the guard page will crash the kernel.
1816
*/
1817
1818
KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1819
KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1820
1821
/* Make sure that in-bounds accesses through both pointers work. */
1822
*p_ptr = 0;
1823
*v_ptr = 0;
1824
1825
/* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1826
v_page = vmalloc_to_page(v_ptr);
1827
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1828
KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1829
1830
vunmap(v_ptr);
1831
free_pages((unsigned long)p_ptr, 1);
1832
}
1833
1834
static void vm_map_ram_tags(struct kunit *test)
1835
{
1836
char *p_ptr, *v_ptr;
1837
struct page *page;
1838
1839
/*
1840
* This test is specifically crafted for the software tag-based mode,
1841
* the only tag-based mode that poisons vm_map_ram mappings.
1842
*/
1843
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1844
1845
page = alloc_pages(GFP_KERNEL, 1);
1846
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1847
p_ptr = page_address(page);
1848
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1849
1850
v_ptr = vm_map_ram(&page, 1, -1);
1851
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1852
1853
KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1854
KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1855
1856
/* Make sure that in-bounds accesses through both pointers work. */
1857
*p_ptr = 0;
1858
*v_ptr = 0;
1859
1860
vm_unmap_ram(v_ptr, 1);
1861
free_pages((unsigned long)p_ptr, 1);
1862
}
1863
1864
/*
1865
* Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1866
* KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1867
* modes.
1868
*/
1869
static void match_all_not_assigned(struct kunit *test)
1870
{
1871
char *ptr;
1872
struct page *pages;
1873
int i, size, order;
1874
1875
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1876
1877
for (i = 0; i < 256; i++) {
1878
size = get_random_u32_inclusive(1, 1024);
1879
ptr = kmalloc(size, GFP_KERNEL);
1880
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1881
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1882
KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1883
kfree(ptr);
1884
}
1885
1886
for (i = 0; i < 256; i++) {
1887
order = get_random_u32_inclusive(1, 4);
1888
pages = alloc_pages(GFP_KERNEL, order);
1889
ptr = page_address(pages);
1890
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1891
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1892
KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1893
free_pages((unsigned long)ptr, order);
1894
}
1895
1896
if (!kasan_vmalloc_enabled())
1897
return;
1898
1899
for (i = 0; i < 256; i++) {
1900
size = get_random_u32_inclusive(1, 1024);
1901
ptr = vmalloc(size);
1902
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1903
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1904
KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1905
vfree(ptr);
1906
}
1907
}
1908
1909
/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1910
static void match_all_ptr_tag(struct kunit *test)
1911
{
1912
char *ptr;
1913
u8 tag;
1914
1915
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1916
1917
ptr = kmalloc(128, GFP_KERNEL);
1918
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1919
1920
/* Backup the assigned tag. */
1921
tag = get_tag(ptr);
1922
KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1923
1924
/* Reset the tag to 0xff.*/
1925
ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1926
1927
/* This access shouldn't trigger a KASAN report. */
1928
*ptr = 0;
1929
1930
/* Recover the pointer tag and free. */
1931
ptr = set_tag(ptr, tag);
1932
kfree(ptr);
1933
}
1934
1935
/* Check that there are no match-all memory tags for tag-based modes. */
1936
static void match_all_mem_tag(struct kunit *test)
1937
{
1938
char *ptr;
1939
int tag;
1940
1941
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1942
1943
ptr = kmalloc(128, GFP_KERNEL);
1944
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1945
KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1946
1947
/* For each possible tag value not matching the pointer tag. */
1948
for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1949
/*
1950
* For Software Tag-Based KASAN, skip the majority of tag
1951
* values to avoid the test printing too many reports.
1952
*/
1953
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
1954
tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
1955
continue;
1956
1957
if (tag == get_tag(ptr))
1958
continue;
1959
1960
/* Mark the first memory granule with the chosen memory tag. */
1961
kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1962
1963
/* This access must cause a KASAN report. */
1964
KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1965
}
1966
1967
/* Recover the memory tag and free. */
1968
kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1969
kfree(ptr);
1970
}
1971
1972
/*
1973
* Check that Rust performing a use-after-free using `unsafe` is detected.
1974
* This is a smoke test to make sure that Rust is being sanitized properly.
1975
*/
1976
static void rust_uaf(struct kunit *test)
1977
{
1978
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST);
1979
KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
1980
}
1981
1982
/*
1983
* copy_to_kernel_nofault() is an internal helper available when
1984
* kasan_test is built-in, so it must not be visible to loadable modules.
1985
*/
1986
#ifndef MODULE
1987
static void copy_to_kernel_nofault_oob(struct kunit *test)
1988
{
1989
char *ptr;
1990
char buf[128];
1991
size_t size = sizeof(buf);
1992
1993
/*
1994
* This test currently fails with the HW_TAGS mode. The reason is
1995
* unknown and needs to be investigated.
1996
*/
1997
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
1998
1999
ptr = kmalloc(size - KASAN_GRANULE_SIZE, GFP_KERNEL);
2000
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
2001
OPTIMIZER_HIDE_VAR(ptr);
2002
2003
/*
2004
* We test copy_to_kernel_nofault() to detect corrupted memory that is
2005
* being written into the kernel. In contrast,
2006
* copy_from_kernel_nofault() is primarily used in kernel helper
2007
* functions where the source address might be random or uninitialized.
2008
* Applying KASAN instrumentation to copy_from_kernel_nofault() could
2009
* lead to false positives. By focusing KASAN checks only on
2010
* copy_to_kernel_nofault(), we ensure that only valid memory is
2011
* written to the kernel, minimizing the risk of kernel corruption
2012
* while avoiding false positives in the reverse case.
2013
*/
2014
KUNIT_EXPECT_KASAN_FAIL(test,
2015
copy_to_kernel_nofault(&buf[0], ptr, size));
2016
KUNIT_EXPECT_KASAN_FAIL(test,
2017
copy_to_kernel_nofault(ptr, &buf[0], size));
2018
2019
kfree(ptr);
2020
}
2021
#endif /* !MODULE */
2022
2023
static void copy_user_test_oob(struct kunit *test)
2024
{
2025
char *kmem;
2026
char __user *usermem;
2027
unsigned long useraddr;
2028
size_t size = 128 - KASAN_GRANULE_SIZE;
2029
int __maybe_unused unused;
2030
2031
kmem = kunit_kmalloc(test, size, GFP_KERNEL);
2032
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, kmem);
2033
2034
useraddr = kunit_vm_mmap(test, NULL, 0, PAGE_SIZE,
2035
PROT_READ | PROT_WRITE | PROT_EXEC,
2036
MAP_ANONYMOUS | MAP_PRIVATE, 0);
2037
KUNIT_ASSERT_NE_MSG(test, useraddr, 0,
2038
"Could not create userspace mm");
2039
KUNIT_ASSERT_LT_MSG(test, useraddr, (unsigned long)TASK_SIZE,
2040
"Failed to allocate user memory");
2041
2042
OPTIMIZER_HIDE_VAR(size);
2043
usermem = (char __user *)useraddr;
2044
2045
KUNIT_EXPECT_KASAN_FAIL(test,
2046
unused = copy_from_user(kmem, usermem, size + 1));
2047
KUNIT_EXPECT_KASAN_FAIL(test,
2048
unused = copy_to_user(usermem, kmem, size + 1));
2049
KUNIT_EXPECT_KASAN_FAIL(test,
2050
unused = __copy_from_user(kmem, usermem, size + 1));
2051
KUNIT_EXPECT_KASAN_FAIL(test,
2052
unused = __copy_to_user(usermem, kmem, size + 1));
2053
KUNIT_EXPECT_KASAN_FAIL(test,
2054
unused = __copy_from_user_inatomic(kmem, usermem, size + 1));
2055
KUNIT_EXPECT_KASAN_FAIL(test,
2056
unused = __copy_to_user_inatomic(usermem, kmem, size + 1));
2057
2058
/*
2059
* Prepare a long string in usermem to avoid the strncpy_from_user test
2060
* bailing out on '\0' before it reaches out-of-bounds.
2061
*/
2062
memset(kmem, 'a', size);
2063
KUNIT_EXPECT_EQ(test, copy_to_user(usermem, kmem, size), 0);
2064
2065
KUNIT_EXPECT_KASAN_FAIL(test,
2066
unused = strncpy_from_user(kmem, usermem, size + 1));
2067
}
2068
2069
static struct kunit_case kasan_kunit_test_cases[] = {
2070
KUNIT_CASE(kmalloc_oob_right),
2071
KUNIT_CASE(kmalloc_oob_left),
2072
KUNIT_CASE(kmalloc_node_oob_right),
2073
KUNIT_CASE(kmalloc_track_caller_oob_right),
2074
KUNIT_CASE(kmalloc_big_oob_right),
2075
KUNIT_CASE(kmalloc_large_oob_right),
2076
KUNIT_CASE(kmalloc_large_uaf),
2077
KUNIT_CASE(kmalloc_large_invalid_free),
2078
KUNIT_CASE(page_alloc_oob_right),
2079
KUNIT_CASE(page_alloc_uaf),
2080
KUNIT_CASE(krealloc_more_oob),
2081
KUNIT_CASE(krealloc_less_oob),
2082
KUNIT_CASE(krealloc_large_more_oob),
2083
KUNIT_CASE(krealloc_large_less_oob),
2084
KUNIT_CASE(krealloc_uaf),
2085
KUNIT_CASE(kmalloc_oob_16),
2086
KUNIT_CASE(kmalloc_uaf_16),
2087
KUNIT_CASE(kmalloc_oob_in_memset),
2088
KUNIT_CASE(kmalloc_oob_memset_2),
2089
KUNIT_CASE(kmalloc_oob_memset_4),
2090
KUNIT_CASE(kmalloc_oob_memset_8),
2091
KUNIT_CASE(kmalloc_oob_memset_16),
2092
KUNIT_CASE(kmalloc_memmove_negative_size),
2093
KUNIT_CASE(kmalloc_memmove_invalid_size),
2094
KUNIT_CASE(kmalloc_uaf),
2095
KUNIT_CASE(kmalloc_uaf_memset),
2096
KUNIT_CASE(kmalloc_uaf2),
2097
KUNIT_CASE(kmalloc_uaf3),
2098
KUNIT_CASE(kmalloc_double_kzfree),
2099
KUNIT_CASE(ksize_unpoisons_memory),
2100
KUNIT_CASE(ksize_uaf),
2101
KUNIT_CASE(rcu_uaf),
2102
KUNIT_CASE(workqueue_uaf),
2103
KUNIT_CASE(kfree_via_page),
2104
KUNIT_CASE(kfree_via_phys),
2105
KUNIT_CASE(kmem_cache_oob),
2106
KUNIT_CASE(kmem_cache_double_free),
2107
KUNIT_CASE(kmem_cache_invalid_free),
2108
KUNIT_CASE(kmem_cache_rcu_uaf),
2109
KUNIT_CASE(kmem_cache_double_destroy),
2110
KUNIT_CASE(kmem_cache_accounted),
2111
KUNIT_CASE(kmem_cache_bulk),
2112
KUNIT_CASE(mempool_kmalloc_oob_right),
2113
KUNIT_CASE(mempool_kmalloc_large_oob_right),
2114
KUNIT_CASE(mempool_slab_oob_right),
2115
KUNIT_CASE(mempool_kmalloc_uaf),
2116
KUNIT_CASE(mempool_kmalloc_large_uaf),
2117
KUNIT_CASE(mempool_slab_uaf),
2118
KUNIT_CASE(mempool_page_alloc_uaf),
2119
KUNIT_CASE(mempool_kmalloc_double_free),
2120
KUNIT_CASE(mempool_kmalloc_large_double_free),
2121
KUNIT_CASE(mempool_page_alloc_double_free),
2122
KUNIT_CASE(mempool_kmalloc_invalid_free),
2123
KUNIT_CASE(mempool_kmalloc_large_invalid_free),
2124
KUNIT_CASE(kasan_global_oob_right),
2125
KUNIT_CASE(kasan_global_oob_left),
2126
KUNIT_CASE(kasan_stack_oob),
2127
KUNIT_CASE(kasan_alloca_oob_left),
2128
KUNIT_CASE(kasan_alloca_oob_right),
2129
KUNIT_CASE(kasan_memchr),
2130
KUNIT_CASE(kasan_memcmp),
2131
KUNIT_CASE(kasan_strings),
2132
KUNIT_CASE(kasan_bitops_generic),
2133
KUNIT_CASE(kasan_bitops_tags),
2134
KUNIT_CASE_SLOW(kasan_atomics),
2135
KUNIT_CASE(vmalloc_helpers_tags),
2136
KUNIT_CASE(vmalloc_oob),
2137
KUNIT_CASE(vmap_tags),
2138
KUNIT_CASE(vm_map_ram_tags),
2139
KUNIT_CASE(match_all_not_assigned),
2140
KUNIT_CASE(match_all_ptr_tag),
2141
KUNIT_CASE(match_all_mem_tag),
2142
#ifndef MODULE
2143
KUNIT_CASE(copy_to_kernel_nofault_oob),
2144
#endif
2145
KUNIT_CASE(rust_uaf),
2146
KUNIT_CASE(copy_user_test_oob),
2147
{}
2148
};
2149
2150
static struct kunit_suite kasan_kunit_test_suite = {
2151
.name = "kasan",
2152
.test_cases = kasan_kunit_test_cases,
2153
.exit = kasan_test_exit,
2154
.suite_init = kasan_suite_init,
2155
.suite_exit = kasan_suite_exit,
2156
};
2157
2158
kunit_test_suite(kasan_kunit_test_suite);
2159
2160
MODULE_DESCRIPTION("KUnit tests for checking KASAN bug-detection capabilities");
2161
MODULE_LICENSE("GPL");
2162
2163