Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/execmem.c
26131 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2002 Richard Henderson
4
* Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5
* Copyright (C) 2023 Luis Chamberlain <[email protected]>
6
* Copyright (C) 2024 Mike Rapoport IBM.
7
*/
8
9
#define pr_fmt(fmt) "execmem: " fmt
10
11
#include <linux/mm.h>
12
#include <linux/mutex.h>
13
#include <linux/vmalloc.h>
14
#include <linux/execmem.h>
15
#include <linux/maple_tree.h>
16
#include <linux/set_memory.h>
17
#include <linux/moduleloader.h>
18
#include <linux/text-patching.h>
19
20
#include <asm/tlbflush.h>
21
22
#include "internal.h"
23
24
static struct execmem_info *execmem_info __ro_after_init;
25
static struct execmem_info default_execmem_info __ro_after_init;
26
27
#ifdef CONFIG_MMU
28
static void *execmem_vmalloc(struct execmem_range *range, size_t size,
29
pgprot_t pgprot, unsigned long vm_flags)
30
{
31
bool kasan = range->flags & EXECMEM_KASAN_SHADOW;
32
gfp_t gfp_flags = GFP_KERNEL | __GFP_NOWARN;
33
unsigned int align = range->alignment;
34
unsigned long start = range->start;
35
unsigned long end = range->end;
36
void *p;
37
38
if (kasan)
39
vm_flags |= VM_DEFER_KMEMLEAK;
40
41
if (vm_flags & VM_ALLOW_HUGE_VMAP)
42
align = PMD_SIZE;
43
44
p = __vmalloc_node_range(size, align, start, end, gfp_flags,
45
pgprot, vm_flags, NUMA_NO_NODE,
46
__builtin_return_address(0));
47
if (!p && range->fallback_start) {
48
start = range->fallback_start;
49
end = range->fallback_end;
50
p = __vmalloc_node_range(size, align, start, end, gfp_flags,
51
pgprot, vm_flags, NUMA_NO_NODE,
52
__builtin_return_address(0));
53
}
54
55
if (!p) {
56
pr_warn_ratelimited("unable to allocate memory\n");
57
return NULL;
58
}
59
60
if (kasan && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
61
vfree(p);
62
return NULL;
63
}
64
65
return p;
66
}
67
68
struct vm_struct *execmem_vmap(size_t size)
69
{
70
struct execmem_range *range = &execmem_info->ranges[EXECMEM_MODULE_DATA];
71
struct vm_struct *area;
72
73
area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
74
range->start, range->end, NUMA_NO_NODE,
75
GFP_KERNEL, __builtin_return_address(0));
76
if (!area && range->fallback_start)
77
area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
78
range->fallback_start, range->fallback_end,
79
NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0));
80
81
return area;
82
}
83
#else
84
static void *execmem_vmalloc(struct execmem_range *range, size_t size,
85
pgprot_t pgprot, unsigned long vm_flags)
86
{
87
return vmalloc(size);
88
}
89
#endif /* CONFIG_MMU */
90
91
#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
92
struct execmem_cache {
93
struct mutex mutex;
94
struct maple_tree busy_areas;
95
struct maple_tree free_areas;
96
unsigned int pending_free_cnt; /* protected by mutex */
97
};
98
99
/* delay to schedule asynchronous free if fast path free fails */
100
#define FREE_DELAY (msecs_to_jiffies(10))
101
102
/* mark entries in busy_areas that should be freed asynchronously */
103
#define PENDING_FREE_MASK (1 << (PAGE_SHIFT - 1))
104
105
static struct execmem_cache execmem_cache = {
106
.mutex = __MUTEX_INITIALIZER(execmem_cache.mutex),
107
.busy_areas = MTREE_INIT_EXT(busy_areas, MT_FLAGS_LOCK_EXTERN,
108
execmem_cache.mutex),
109
.free_areas = MTREE_INIT_EXT(free_areas, MT_FLAGS_LOCK_EXTERN,
110
execmem_cache.mutex),
111
};
112
113
static inline unsigned long mas_range_len(struct ma_state *mas)
114
{
115
return mas->last - mas->index + 1;
116
}
117
118
static int execmem_set_direct_map_valid(struct vm_struct *vm, bool valid)
119
{
120
unsigned int nr = (1 << get_vm_area_page_order(vm));
121
unsigned int updated = 0;
122
int err = 0;
123
124
for (int i = 0; i < vm->nr_pages; i += nr) {
125
err = set_direct_map_valid_noflush(vm->pages[i], nr, valid);
126
if (err)
127
goto err_restore;
128
updated += nr;
129
}
130
131
return 0;
132
133
err_restore:
134
for (int i = 0; i < updated; i += nr)
135
set_direct_map_valid_noflush(vm->pages[i], nr, !valid);
136
137
return err;
138
}
139
140
static int execmem_force_rw(void *ptr, size_t size)
141
{
142
unsigned int nr = PAGE_ALIGN(size) >> PAGE_SHIFT;
143
unsigned long addr = (unsigned long)ptr;
144
int ret;
145
146
ret = set_memory_nx(addr, nr);
147
if (ret)
148
return ret;
149
150
return set_memory_rw(addr, nr);
151
}
152
153
int execmem_restore_rox(void *ptr, size_t size)
154
{
155
unsigned int nr = PAGE_ALIGN(size) >> PAGE_SHIFT;
156
unsigned long addr = (unsigned long)ptr;
157
158
return set_memory_rox(addr, nr);
159
}
160
161
static void execmem_cache_clean(struct work_struct *work)
162
{
163
struct maple_tree *free_areas = &execmem_cache.free_areas;
164
struct mutex *mutex = &execmem_cache.mutex;
165
MA_STATE(mas, free_areas, 0, ULONG_MAX);
166
void *area;
167
168
mutex_lock(mutex);
169
mas_for_each(&mas, area, ULONG_MAX) {
170
size_t size = mas_range_len(&mas);
171
172
if (IS_ALIGNED(size, PMD_SIZE) &&
173
IS_ALIGNED(mas.index, PMD_SIZE)) {
174
struct vm_struct *vm = find_vm_area(area);
175
176
execmem_set_direct_map_valid(vm, true);
177
mas_store_gfp(&mas, NULL, GFP_KERNEL);
178
vfree(area);
179
}
180
}
181
mutex_unlock(mutex);
182
}
183
184
static DECLARE_WORK(execmem_cache_clean_work, execmem_cache_clean);
185
186
static int execmem_cache_add_locked(void *ptr, size_t size, gfp_t gfp_mask)
187
{
188
struct maple_tree *free_areas = &execmem_cache.free_areas;
189
unsigned long addr = (unsigned long)ptr;
190
MA_STATE(mas, free_areas, addr - 1, addr + 1);
191
unsigned long lower, upper;
192
void *area = NULL;
193
194
lower = addr;
195
upper = addr + size - 1;
196
197
area = mas_walk(&mas);
198
if (area && mas.last == addr - 1)
199
lower = mas.index;
200
201
area = mas_next(&mas, ULONG_MAX);
202
if (area && mas.index == addr + size)
203
upper = mas.last;
204
205
mas_set_range(&mas, lower, upper);
206
return mas_store_gfp(&mas, (void *)lower, gfp_mask);
207
}
208
209
static int execmem_cache_add(void *ptr, size_t size, gfp_t gfp_mask)
210
{
211
guard(mutex)(&execmem_cache.mutex);
212
213
return execmem_cache_add_locked(ptr, size, gfp_mask);
214
}
215
216
static bool within_range(struct execmem_range *range, struct ma_state *mas,
217
size_t size)
218
{
219
unsigned long addr = mas->index;
220
221
if (addr >= range->start && addr + size < range->end)
222
return true;
223
224
if (range->fallback_start &&
225
addr >= range->fallback_start && addr + size < range->fallback_end)
226
return true;
227
228
return false;
229
}
230
231
static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
232
{
233
struct maple_tree *free_areas = &execmem_cache.free_areas;
234
struct maple_tree *busy_areas = &execmem_cache.busy_areas;
235
MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
236
MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
237
struct mutex *mutex = &execmem_cache.mutex;
238
unsigned long addr, last, area_size = 0;
239
void *area, *ptr = NULL;
240
int err;
241
242
mutex_lock(mutex);
243
mas_for_each(&mas_free, area, ULONG_MAX) {
244
area_size = mas_range_len(&mas_free);
245
246
if (area_size >= size && within_range(range, &mas_free, size))
247
break;
248
}
249
250
if (area_size < size)
251
goto out_unlock;
252
253
addr = mas_free.index;
254
last = mas_free.last;
255
256
/* insert allocated size to busy_areas at range [addr, addr + size) */
257
mas_set_range(&mas_busy, addr, addr + size - 1);
258
err = mas_store_gfp(&mas_busy, (void *)addr, GFP_KERNEL);
259
if (err)
260
goto out_unlock;
261
262
mas_store_gfp(&mas_free, NULL, GFP_KERNEL);
263
if (area_size > size) {
264
void *ptr = (void *)(addr + size);
265
266
/*
267
* re-insert remaining free size to free_areas at range
268
* [addr + size, last]
269
*/
270
mas_set_range(&mas_free, addr + size, last);
271
err = mas_store_gfp(&mas_free, ptr, GFP_KERNEL);
272
if (err) {
273
mas_store_gfp(&mas_busy, NULL, GFP_KERNEL);
274
goto out_unlock;
275
}
276
}
277
ptr = (void *)addr;
278
279
out_unlock:
280
mutex_unlock(mutex);
281
return ptr;
282
}
283
284
static int execmem_cache_populate(struct execmem_range *range, size_t size)
285
{
286
unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
287
struct vm_struct *vm;
288
size_t alloc_size;
289
int err = -ENOMEM;
290
void *p;
291
292
alloc_size = round_up(size, PMD_SIZE);
293
p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags);
294
if (!p) {
295
alloc_size = size;
296
p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags);
297
}
298
299
if (!p)
300
return err;
301
302
vm = find_vm_area(p);
303
if (!vm)
304
goto err_free_mem;
305
306
/* fill memory with instructions that will trap */
307
execmem_fill_trapping_insns(p, alloc_size);
308
309
err = set_memory_rox((unsigned long)p, vm->nr_pages);
310
if (err)
311
goto err_free_mem;
312
313
err = execmem_cache_add(p, alloc_size, GFP_KERNEL);
314
if (err)
315
goto err_reset_direct_map;
316
317
return 0;
318
319
err_reset_direct_map:
320
execmem_set_direct_map_valid(vm, true);
321
err_free_mem:
322
vfree(p);
323
return err;
324
}
325
326
static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
327
{
328
void *p;
329
int err;
330
331
p = __execmem_cache_alloc(range, size);
332
if (p)
333
return p;
334
335
err = execmem_cache_populate(range, size);
336
if (err)
337
return NULL;
338
339
return __execmem_cache_alloc(range, size);
340
}
341
342
static inline bool is_pending_free(void *ptr)
343
{
344
return ((unsigned long)ptr & PENDING_FREE_MASK);
345
}
346
347
static inline void *pending_free_set(void *ptr)
348
{
349
return (void *)((unsigned long)ptr | PENDING_FREE_MASK);
350
}
351
352
static inline void *pending_free_clear(void *ptr)
353
{
354
return (void *)((unsigned long)ptr & ~PENDING_FREE_MASK);
355
}
356
357
static int __execmem_cache_free(struct ma_state *mas, void *ptr, gfp_t gfp_mask)
358
{
359
size_t size = mas_range_len(mas);
360
int err;
361
362
err = execmem_force_rw(ptr, size);
363
if (err)
364
return err;
365
366
execmem_fill_trapping_insns(ptr, size);
367
execmem_restore_rox(ptr, size);
368
369
err = execmem_cache_add_locked(ptr, size, gfp_mask);
370
if (err)
371
return err;
372
373
mas_store_gfp(mas, NULL, gfp_mask);
374
return 0;
375
}
376
377
static void execmem_cache_free_slow(struct work_struct *work);
378
static DECLARE_DELAYED_WORK(execmem_cache_free_work, execmem_cache_free_slow);
379
380
static void execmem_cache_free_slow(struct work_struct *work)
381
{
382
struct maple_tree *busy_areas = &execmem_cache.busy_areas;
383
MA_STATE(mas, busy_areas, 0, ULONG_MAX);
384
void *area;
385
386
guard(mutex)(&execmem_cache.mutex);
387
388
if (!execmem_cache.pending_free_cnt)
389
return;
390
391
mas_for_each(&mas, area, ULONG_MAX) {
392
if (!is_pending_free(area))
393
continue;
394
395
area = pending_free_clear(area);
396
if (__execmem_cache_free(&mas, area, GFP_KERNEL))
397
continue;
398
399
execmem_cache.pending_free_cnt--;
400
}
401
402
if (execmem_cache.pending_free_cnt)
403
schedule_delayed_work(&execmem_cache_free_work, FREE_DELAY);
404
else
405
schedule_work(&execmem_cache_clean_work);
406
}
407
408
static bool execmem_cache_free(void *ptr)
409
{
410
struct maple_tree *busy_areas = &execmem_cache.busy_areas;
411
unsigned long addr = (unsigned long)ptr;
412
MA_STATE(mas, busy_areas, addr, addr);
413
void *area;
414
int err;
415
416
guard(mutex)(&execmem_cache.mutex);
417
418
area = mas_walk(&mas);
419
if (!area)
420
return false;
421
422
err = __execmem_cache_free(&mas, area, GFP_KERNEL | __GFP_NORETRY);
423
if (err) {
424
/*
425
* mas points to exact slot we've got the area from, nothing
426
* else can modify the tree because of the mutex, so there
427
* won't be any allocations in mas_store_gfp() and it will just
428
* change the pointer.
429
*/
430
area = pending_free_set(area);
431
mas_store_gfp(&mas, area, GFP_KERNEL);
432
execmem_cache.pending_free_cnt++;
433
schedule_delayed_work(&execmem_cache_free_work, FREE_DELAY);
434
return true;
435
}
436
437
schedule_work(&execmem_cache_clean_work);
438
439
return true;
440
}
441
442
#else /* CONFIG_ARCH_HAS_EXECMEM_ROX */
443
/*
444
* when ROX cache is not used the permissions defined by architectures for
445
* execmem ranges that are updated before use (e.g. EXECMEM_MODULE_TEXT) must
446
* be writable anyway
447
*/
448
static inline int execmem_force_rw(void *ptr, size_t size)
449
{
450
return 0;
451
}
452
453
static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
454
{
455
return NULL;
456
}
457
458
static bool execmem_cache_free(void *ptr)
459
{
460
return false;
461
}
462
#endif /* CONFIG_ARCH_HAS_EXECMEM_ROX */
463
464
void *execmem_alloc(enum execmem_type type, size_t size)
465
{
466
struct execmem_range *range = &execmem_info->ranges[type];
467
bool use_cache = range->flags & EXECMEM_ROX_CACHE;
468
unsigned long vm_flags = VM_FLUSH_RESET_PERMS;
469
pgprot_t pgprot = range->pgprot;
470
void *p = NULL;
471
472
size = PAGE_ALIGN(size);
473
474
if (use_cache)
475
p = execmem_cache_alloc(range, size);
476
else
477
p = execmem_vmalloc(range, size, pgprot, vm_flags);
478
479
return kasan_reset_tag(p);
480
}
481
482
void *execmem_alloc_rw(enum execmem_type type, size_t size)
483
{
484
void *p __free(execmem) = execmem_alloc(type, size);
485
int err;
486
487
if (!p)
488
return NULL;
489
490
err = execmem_force_rw(p, size);
491
if (err)
492
return NULL;
493
494
return no_free_ptr(p);
495
}
496
497
void execmem_free(void *ptr)
498
{
499
/*
500
* This memory may be RO, and freeing RO memory in an interrupt is not
501
* supported by vmalloc.
502
*/
503
WARN_ON(in_interrupt());
504
505
if (!execmem_cache_free(ptr))
506
vfree(ptr);
507
}
508
509
bool execmem_is_rox(enum execmem_type type)
510
{
511
return !!(execmem_info->ranges[type].flags & EXECMEM_ROX_CACHE);
512
}
513
514
static bool execmem_validate(struct execmem_info *info)
515
{
516
struct execmem_range *r = &info->ranges[EXECMEM_DEFAULT];
517
518
if (!r->alignment || !r->start || !r->end || !pgprot_val(r->pgprot)) {
519
pr_crit("Invalid parameters for execmem allocator, module loading will fail");
520
return false;
521
}
522
523
if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX)) {
524
for (int i = EXECMEM_DEFAULT; i < EXECMEM_TYPE_MAX; i++) {
525
r = &info->ranges[i];
526
527
if (r->flags & EXECMEM_ROX_CACHE) {
528
pr_warn_once("ROX cache is not supported\n");
529
r->flags &= ~EXECMEM_ROX_CACHE;
530
}
531
}
532
}
533
534
return true;
535
}
536
537
static void execmem_init_missing(struct execmem_info *info)
538
{
539
struct execmem_range *default_range = &info->ranges[EXECMEM_DEFAULT];
540
541
for (int i = EXECMEM_DEFAULT + 1; i < EXECMEM_TYPE_MAX; i++) {
542
struct execmem_range *r = &info->ranges[i];
543
544
if (!r->start) {
545
if (i == EXECMEM_MODULE_DATA)
546
r->pgprot = PAGE_KERNEL;
547
else
548
r->pgprot = default_range->pgprot;
549
r->alignment = default_range->alignment;
550
r->start = default_range->start;
551
r->end = default_range->end;
552
r->flags = default_range->flags;
553
r->fallback_start = default_range->fallback_start;
554
r->fallback_end = default_range->fallback_end;
555
}
556
}
557
}
558
559
struct execmem_info * __weak execmem_arch_setup(void)
560
{
561
return NULL;
562
}
563
564
static void __init __execmem_init(void)
565
{
566
struct execmem_info *info = execmem_arch_setup();
567
568
if (!info) {
569
info = execmem_info = &default_execmem_info;
570
info->ranges[EXECMEM_DEFAULT].start = VMALLOC_START;
571
info->ranges[EXECMEM_DEFAULT].end = VMALLOC_END;
572
info->ranges[EXECMEM_DEFAULT].pgprot = PAGE_KERNEL_EXEC;
573
info->ranges[EXECMEM_DEFAULT].alignment = 1;
574
}
575
576
if (!execmem_validate(info))
577
return;
578
579
execmem_init_missing(info);
580
581
execmem_info = info;
582
}
583
584
#ifdef CONFIG_ARCH_WANTS_EXECMEM_LATE
585
static int __init execmem_late_init(void)
586
{
587
__execmem_init();
588
return 0;
589
}
590
core_initcall(execmem_late_init);
591
#else
592
void __init execmem_init(void)
593
{
594
__execmem_init();
595
}
596
#endif
597
598