Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/hugetlb.c
49954 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Generic hugetlb support.
4
* (C) Nadia Yvette Chambers, April 2004
5
*/
6
#include <linux/list.h>
7
#include <linux/init.h>
8
#include <linux/mm.h>
9
#include <linux/seq_file.h>
10
#include <linux/highmem.h>
11
#include <linux/mmu_notifier.h>
12
#include <linux/nodemask.h>
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/compiler.h>
16
#include <linux/cpumask.h>
17
#include <linux/cpuset.h>
18
#include <linux/mutex.h>
19
#include <linux/memblock.h>
20
#include <linux/minmax.h>
21
#include <linux/slab.h>
22
#include <linux/sched/mm.h>
23
#include <linux/mmdebug.h>
24
#include <linux/sched/signal.h>
25
#include <linux/rmap.h>
26
#include <linux/string_choices.h>
27
#include <linux/string_helpers.h>
28
#include <linux/swap.h>
29
#include <linux/leafops.h>
30
#include <linux/jhash.h>
31
#include <linux/numa.h>
32
#include <linux/llist.h>
33
#include <linux/cma.h>
34
#include <linux/migrate.h>
35
#include <linux/nospec.h>
36
#include <linux/delayacct.h>
37
#include <linux/memory.h>
38
#include <linux/mm_inline.h>
39
#include <linux/padata.h>
40
#include <linux/pgalloc.h>
41
42
#include <asm/page.h>
43
#include <asm/tlb.h>
44
#include <asm/setup.h>
45
46
#include <linux/io.h>
47
#include <linux/node.h>
48
#include <linux/page_owner.h>
49
#include "internal.h"
50
#include "hugetlb_vmemmap.h"
51
#include "hugetlb_cma.h"
52
#include "hugetlb_internal.h"
53
#include <linux/page-isolation.h>
54
55
int hugetlb_max_hstate __read_mostly;
56
unsigned int default_hstate_idx;
57
struct hstate hstates[HUGE_MAX_HSTATE];
58
59
__initdata nodemask_t hugetlb_bootmem_nodes;
60
__initdata struct list_head huge_boot_pages[MAX_NUMNODES];
61
static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata;
62
63
/*
64
* Due to ordering constraints across the init code for various
65
* architectures, hugetlb hstate cmdline parameters can't simply
66
* be early_param. early_param might call the setup function
67
* before valid hugetlb page sizes are determined, leading to
68
* incorrect rejection of valid hugepagesz= options.
69
*
70
* So, record the parameters early and consume them whenever the
71
* init code is ready for them, by calling hugetlb_parse_params().
72
*/
73
74
/* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */
75
#define HUGE_MAX_CMDLINE_ARGS (2 * HUGE_MAX_HSTATE + 1)
76
struct hugetlb_cmdline {
77
char *val;
78
int (*setup)(char *val);
79
};
80
81
/* for command line parsing */
82
static struct hstate * __initdata parsed_hstate;
83
static unsigned long __initdata default_hstate_max_huge_pages;
84
static bool __initdata parsed_valid_hugepagesz = true;
85
static bool __initdata parsed_default_hugepagesz;
86
static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
87
static unsigned long hugepage_allocation_threads __initdata;
88
89
static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata;
90
static int hstate_cmdline_index __initdata;
91
static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata;
92
static int hugetlb_param_index __initdata;
93
static __init int hugetlb_add_param(char *s, int (*setup)(char *val));
94
static __init void hugetlb_parse_params(void);
95
96
#define hugetlb_early_param(str, func) \
97
static __init int func##args(char *s) \
98
{ \
99
return hugetlb_add_param(s, func); \
100
} \
101
early_param(str, func##args)
102
103
/*
104
* Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
105
* free_huge_pages, and surplus_huge_pages.
106
*/
107
__cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
108
109
/*
110
* Serializes faults on the same logical page. This is used to
111
* prevent spurious OOMs when the hugepage pool is fully utilized.
112
*/
113
static int num_fault_mutexes __ro_after_init;
114
struct mutex *hugetlb_fault_mutex_table __ro_after_init;
115
116
/* Forward declaration */
117
static int hugetlb_acct_memory(struct hstate *h, long delta);
118
static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
119
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
120
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
121
unsigned long start, unsigned long end, bool take_locks);
122
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
123
124
static void hugetlb_free_folio(struct folio *folio)
125
{
126
if (folio_test_hugetlb_cma(folio)) {
127
hugetlb_cma_free_folio(folio);
128
return;
129
}
130
131
folio_put(folio);
132
}
133
134
static inline bool subpool_is_free(struct hugepage_subpool *spool)
135
{
136
if (spool->count)
137
return false;
138
if (spool->max_hpages != -1)
139
return spool->used_hpages == 0;
140
if (spool->min_hpages != -1)
141
return spool->rsv_hpages == spool->min_hpages;
142
143
return true;
144
}
145
146
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
147
unsigned long irq_flags)
148
{
149
spin_unlock_irqrestore(&spool->lock, irq_flags);
150
151
/* If no pages are used, and no other handles to the subpool
152
* remain, give up any reservations based on minimum size and
153
* free the subpool */
154
if (subpool_is_free(spool)) {
155
if (spool->min_hpages != -1)
156
hugetlb_acct_memory(spool->hstate,
157
-spool->min_hpages);
158
kfree(spool);
159
}
160
}
161
162
struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
163
long min_hpages)
164
{
165
struct hugepage_subpool *spool;
166
167
spool = kzalloc(sizeof(*spool), GFP_KERNEL);
168
if (!spool)
169
return NULL;
170
171
spin_lock_init(&spool->lock);
172
spool->count = 1;
173
spool->max_hpages = max_hpages;
174
spool->hstate = h;
175
spool->min_hpages = min_hpages;
176
177
if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
178
kfree(spool);
179
return NULL;
180
}
181
spool->rsv_hpages = min_hpages;
182
183
return spool;
184
}
185
186
void hugepage_put_subpool(struct hugepage_subpool *spool)
187
{
188
unsigned long flags;
189
190
spin_lock_irqsave(&spool->lock, flags);
191
BUG_ON(!spool->count);
192
spool->count--;
193
unlock_or_release_subpool(spool, flags);
194
}
195
196
/*
197
* Subpool accounting for allocating and reserving pages.
198
* Return -ENOMEM if there are not enough resources to satisfy the
199
* request. Otherwise, return the number of pages by which the
200
* global pools must be adjusted (upward). The returned value may
201
* only be different than the passed value (delta) in the case where
202
* a subpool minimum size must be maintained.
203
*/
204
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
205
long delta)
206
{
207
long ret = delta;
208
209
if (!spool)
210
return ret;
211
212
spin_lock_irq(&spool->lock);
213
214
if (spool->max_hpages != -1) { /* maximum size accounting */
215
if ((spool->used_hpages + delta) <= spool->max_hpages)
216
spool->used_hpages += delta;
217
else {
218
ret = -ENOMEM;
219
goto unlock_ret;
220
}
221
}
222
223
/* minimum size accounting */
224
if (spool->min_hpages != -1 && spool->rsv_hpages) {
225
if (delta > spool->rsv_hpages) {
226
/*
227
* Asking for more reserves than those already taken on
228
* behalf of subpool. Return difference.
229
*/
230
ret = delta - spool->rsv_hpages;
231
spool->rsv_hpages = 0;
232
} else {
233
ret = 0; /* reserves already accounted for */
234
spool->rsv_hpages -= delta;
235
}
236
}
237
238
unlock_ret:
239
spin_unlock_irq(&spool->lock);
240
return ret;
241
}
242
243
/*
244
* Subpool accounting for freeing and unreserving pages.
245
* Return the number of global page reservations that must be dropped.
246
* The return value may only be different than the passed value (delta)
247
* in the case where a subpool minimum size must be maintained.
248
*/
249
static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
250
long delta)
251
{
252
long ret = delta;
253
unsigned long flags;
254
255
if (!spool)
256
return delta;
257
258
spin_lock_irqsave(&spool->lock, flags);
259
260
if (spool->max_hpages != -1) /* maximum size accounting */
261
spool->used_hpages -= delta;
262
263
/* minimum size accounting */
264
if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
265
if (spool->rsv_hpages + delta <= spool->min_hpages)
266
ret = 0;
267
else
268
ret = spool->rsv_hpages + delta - spool->min_hpages;
269
270
spool->rsv_hpages += delta;
271
if (spool->rsv_hpages > spool->min_hpages)
272
spool->rsv_hpages = spool->min_hpages;
273
}
274
275
/*
276
* If hugetlbfs_put_super couldn't free spool due to an outstanding
277
* quota reference, free it now.
278
*/
279
unlock_or_release_subpool(spool, flags);
280
281
return ret;
282
}
283
284
static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
285
{
286
return subpool_inode(file_inode(vma->vm_file));
287
}
288
289
/*
290
* hugetlb vma_lock helper routines
291
*/
292
void hugetlb_vma_lock_read(struct vm_area_struct *vma)
293
{
294
if (__vma_shareable_lock(vma)) {
295
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
296
297
down_read(&vma_lock->rw_sema);
298
} else if (__vma_private_lock(vma)) {
299
struct resv_map *resv_map = vma_resv_map(vma);
300
301
down_read(&resv_map->rw_sema);
302
}
303
}
304
305
void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
306
{
307
if (__vma_shareable_lock(vma)) {
308
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
309
310
up_read(&vma_lock->rw_sema);
311
} else if (__vma_private_lock(vma)) {
312
struct resv_map *resv_map = vma_resv_map(vma);
313
314
up_read(&resv_map->rw_sema);
315
}
316
}
317
318
void hugetlb_vma_lock_write(struct vm_area_struct *vma)
319
{
320
if (__vma_shareable_lock(vma)) {
321
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
322
323
down_write(&vma_lock->rw_sema);
324
} else if (__vma_private_lock(vma)) {
325
struct resv_map *resv_map = vma_resv_map(vma);
326
327
down_write(&resv_map->rw_sema);
328
}
329
}
330
331
void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
332
{
333
if (__vma_shareable_lock(vma)) {
334
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
335
336
up_write(&vma_lock->rw_sema);
337
} else if (__vma_private_lock(vma)) {
338
struct resv_map *resv_map = vma_resv_map(vma);
339
340
up_write(&resv_map->rw_sema);
341
}
342
}
343
344
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
345
{
346
347
if (__vma_shareable_lock(vma)) {
348
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
349
350
return down_write_trylock(&vma_lock->rw_sema);
351
} else if (__vma_private_lock(vma)) {
352
struct resv_map *resv_map = vma_resv_map(vma);
353
354
return down_write_trylock(&resv_map->rw_sema);
355
}
356
357
return 1;
358
}
359
360
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
361
{
362
if (__vma_shareable_lock(vma)) {
363
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
364
365
lockdep_assert_held(&vma_lock->rw_sema);
366
} else if (__vma_private_lock(vma)) {
367
struct resv_map *resv_map = vma_resv_map(vma);
368
369
lockdep_assert_held(&resv_map->rw_sema);
370
}
371
}
372
373
void hugetlb_vma_lock_release(struct kref *kref)
374
{
375
struct hugetlb_vma_lock *vma_lock = container_of(kref,
376
struct hugetlb_vma_lock, refs);
377
378
kfree(vma_lock);
379
}
380
381
static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
382
{
383
struct vm_area_struct *vma = vma_lock->vma;
384
385
/*
386
* vma_lock structure may or not be released as a result of put,
387
* it certainly will no longer be attached to vma so clear pointer.
388
* Semaphore synchronizes access to vma_lock->vma field.
389
*/
390
vma_lock->vma = NULL;
391
vma->vm_private_data = NULL;
392
up_write(&vma_lock->rw_sema);
393
kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
394
}
395
396
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
397
{
398
if (__vma_shareable_lock(vma)) {
399
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
400
401
__hugetlb_vma_unlock_write_put(vma_lock);
402
} else if (__vma_private_lock(vma)) {
403
struct resv_map *resv_map = vma_resv_map(vma);
404
405
/* no free for anon vmas, but still need to unlock */
406
up_write(&resv_map->rw_sema);
407
}
408
}
409
410
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
411
{
412
/*
413
* Only present in sharable vmas.
414
*/
415
if (!vma || !__vma_shareable_lock(vma))
416
return;
417
418
if (vma->vm_private_data) {
419
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
420
421
down_write(&vma_lock->rw_sema);
422
__hugetlb_vma_unlock_write_put(vma_lock);
423
}
424
}
425
426
/*
427
* vma specific semaphore used for pmd sharing and fault/truncation
428
* synchronization
429
*/
430
int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
431
{
432
struct hugetlb_vma_lock *vma_lock;
433
434
/* Only establish in (flags) sharable vmas */
435
if (!vma || !(vma->vm_flags & VM_MAYSHARE))
436
return 0;
437
438
/* Should never get here with non-NULL vm_private_data */
439
if (vma->vm_private_data)
440
return -EINVAL;
441
442
vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
443
if (!vma_lock) {
444
/*
445
* If we can not allocate structure, then vma can not
446
* participate in pmd sharing. This is only a possible
447
* performance enhancement and memory saving issue.
448
* However, the lock is also used to synchronize page
449
* faults with truncation. If the lock is not present,
450
* unlikely races could leave pages in a file past i_size
451
* until the file is removed. Warn in the unlikely case of
452
* allocation failure.
453
*/
454
pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
455
return -EINVAL;
456
}
457
458
kref_init(&vma_lock->refs);
459
init_rwsem(&vma_lock->rw_sema);
460
vma_lock->vma = vma;
461
vma->vm_private_data = vma_lock;
462
463
return 0;
464
}
465
466
/* Helper that removes a struct file_region from the resv_map cache and returns
467
* it for use.
468
*/
469
static struct file_region *
470
get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
471
{
472
struct file_region *nrg;
473
474
VM_BUG_ON(resv->region_cache_count <= 0);
475
476
resv->region_cache_count--;
477
nrg = list_first_entry(&resv->region_cache, struct file_region, link);
478
list_del(&nrg->link);
479
480
nrg->from = from;
481
nrg->to = to;
482
483
return nrg;
484
}
485
486
static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
487
struct file_region *rg)
488
{
489
#ifdef CONFIG_CGROUP_HUGETLB
490
nrg->reservation_counter = rg->reservation_counter;
491
nrg->css = rg->css;
492
if (rg->css)
493
css_get(rg->css);
494
#endif
495
}
496
497
/* Helper that records hugetlb_cgroup uncharge info. */
498
static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
499
struct hstate *h,
500
struct resv_map *resv,
501
struct file_region *nrg)
502
{
503
#ifdef CONFIG_CGROUP_HUGETLB
504
if (h_cg) {
505
nrg->reservation_counter =
506
&h_cg->rsvd_hugepage[hstate_index(h)];
507
nrg->css = &h_cg->css;
508
/*
509
* The caller will hold exactly one h_cg->css reference for the
510
* whole contiguous reservation region. But this area might be
511
* scattered when there are already some file_regions reside in
512
* it. As a result, many file_regions may share only one css
513
* reference. In order to ensure that one file_region must hold
514
* exactly one h_cg->css reference, we should do css_get for
515
* each file_region and leave the reference held by caller
516
* untouched.
517
*/
518
css_get(&h_cg->css);
519
if (!resv->pages_per_hpage)
520
resv->pages_per_hpage = pages_per_huge_page(h);
521
/* pages_per_hpage should be the same for all entries in
522
* a resv_map.
523
*/
524
VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
525
} else {
526
nrg->reservation_counter = NULL;
527
nrg->css = NULL;
528
}
529
#endif
530
}
531
532
static void put_uncharge_info(struct file_region *rg)
533
{
534
#ifdef CONFIG_CGROUP_HUGETLB
535
if (rg->css)
536
css_put(rg->css);
537
#endif
538
}
539
540
static bool has_same_uncharge_info(struct file_region *rg,
541
struct file_region *org)
542
{
543
#ifdef CONFIG_CGROUP_HUGETLB
544
return rg->reservation_counter == org->reservation_counter &&
545
rg->css == org->css;
546
547
#else
548
return true;
549
#endif
550
}
551
552
static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
553
{
554
struct file_region *nrg, *prg;
555
556
prg = list_prev_entry(rg, link);
557
if (&prg->link != &resv->regions && prg->to == rg->from &&
558
has_same_uncharge_info(prg, rg)) {
559
prg->to = rg->to;
560
561
list_del(&rg->link);
562
put_uncharge_info(rg);
563
kfree(rg);
564
565
rg = prg;
566
}
567
568
nrg = list_next_entry(rg, link);
569
if (&nrg->link != &resv->regions && nrg->from == rg->to &&
570
has_same_uncharge_info(nrg, rg)) {
571
nrg->from = rg->from;
572
573
list_del(&rg->link);
574
put_uncharge_info(rg);
575
kfree(rg);
576
}
577
}
578
579
static inline long
580
hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
581
long to, struct hstate *h, struct hugetlb_cgroup *cg,
582
long *regions_needed)
583
{
584
struct file_region *nrg;
585
586
if (!regions_needed) {
587
nrg = get_file_region_entry_from_cache(map, from, to);
588
record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
589
list_add(&nrg->link, rg);
590
coalesce_file_region(map, nrg);
591
} else
592
*regions_needed += 1;
593
594
return to - from;
595
}
596
597
/*
598
* Must be called with resv->lock held.
599
*
600
* Calling this with regions_needed != NULL will count the number of pages
601
* to be added but will not modify the linked list. And regions_needed will
602
* indicate the number of file_regions needed in the cache to carry out to add
603
* the regions for this range.
604
*/
605
static long add_reservation_in_range(struct resv_map *resv, long f, long t,
606
struct hugetlb_cgroup *h_cg,
607
struct hstate *h, long *regions_needed)
608
{
609
long add = 0;
610
struct list_head *head = &resv->regions;
611
long last_accounted_offset = f;
612
struct file_region *iter, *trg = NULL;
613
struct list_head *rg = NULL;
614
615
if (regions_needed)
616
*regions_needed = 0;
617
618
/* In this loop, we essentially handle an entry for the range
619
* [last_accounted_offset, iter->from), at every iteration, with some
620
* bounds checking.
621
*/
622
list_for_each_entry_safe(iter, trg, head, link) {
623
/* Skip irrelevant regions that start before our range. */
624
if (iter->from < f) {
625
/* If this region ends after the last accounted offset,
626
* then we need to update last_accounted_offset.
627
*/
628
if (iter->to > last_accounted_offset)
629
last_accounted_offset = iter->to;
630
continue;
631
}
632
633
/* When we find a region that starts beyond our range, we've
634
* finished.
635
*/
636
if (iter->from >= t) {
637
rg = iter->link.prev;
638
break;
639
}
640
641
/* Add an entry for last_accounted_offset -> iter->from, and
642
* update last_accounted_offset.
643
*/
644
if (iter->from > last_accounted_offset)
645
add += hugetlb_resv_map_add(resv, iter->link.prev,
646
last_accounted_offset,
647
iter->from, h, h_cg,
648
regions_needed);
649
650
last_accounted_offset = iter->to;
651
}
652
653
/* Handle the case where our range extends beyond
654
* last_accounted_offset.
655
*/
656
if (!rg)
657
rg = head->prev;
658
if (last_accounted_offset < t)
659
add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
660
t, h, h_cg, regions_needed);
661
662
return add;
663
}
664
665
/* Must be called with resv->lock acquired. Will drop lock to allocate entries.
666
*/
667
static int allocate_file_region_entries(struct resv_map *resv,
668
int regions_needed)
669
__must_hold(&resv->lock)
670
{
671
LIST_HEAD(allocated_regions);
672
int to_allocate = 0, i = 0;
673
struct file_region *trg = NULL, *rg = NULL;
674
675
VM_BUG_ON(regions_needed < 0);
676
677
/*
678
* Check for sufficient descriptors in the cache to accommodate
679
* the number of in progress add operations plus regions_needed.
680
*
681
* This is a while loop because when we drop the lock, some other call
682
* to region_add or region_del may have consumed some region_entries,
683
* so we keep looping here until we finally have enough entries for
684
* (adds_in_progress + regions_needed).
685
*/
686
while (resv->region_cache_count <
687
(resv->adds_in_progress + regions_needed)) {
688
to_allocate = resv->adds_in_progress + regions_needed -
689
resv->region_cache_count;
690
691
/* At this point, we should have enough entries in the cache
692
* for all the existing adds_in_progress. We should only be
693
* needing to allocate for regions_needed.
694
*/
695
VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
696
697
spin_unlock(&resv->lock);
698
for (i = 0; i < to_allocate; i++) {
699
trg = kmalloc(sizeof(*trg), GFP_KERNEL);
700
if (!trg)
701
goto out_of_memory;
702
list_add(&trg->link, &allocated_regions);
703
}
704
705
spin_lock(&resv->lock);
706
707
list_splice(&allocated_regions, &resv->region_cache);
708
resv->region_cache_count += to_allocate;
709
}
710
711
return 0;
712
713
out_of_memory:
714
list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
715
list_del(&rg->link);
716
kfree(rg);
717
}
718
return -ENOMEM;
719
}
720
721
/*
722
* Add the huge page range represented by [f, t) to the reserve
723
* map. Regions will be taken from the cache to fill in this range.
724
* Sufficient regions should exist in the cache due to the previous
725
* call to region_chg with the same range, but in some cases the cache will not
726
* have sufficient entries due to races with other code doing region_add or
727
* region_del. The extra needed entries will be allocated.
728
*
729
* regions_needed is the out value provided by a previous call to region_chg.
730
*
731
* Return the number of new huge pages added to the map. This number is greater
732
* than or equal to zero. If file_region entries needed to be allocated for
733
* this operation and we were not able to allocate, it returns -ENOMEM.
734
* region_add of regions of length 1 never allocate file_regions and cannot
735
* fail; region_chg will always allocate at least 1 entry and a region_add for
736
* 1 page will only require at most 1 entry.
737
*/
738
static long region_add(struct resv_map *resv, long f, long t,
739
long in_regions_needed, struct hstate *h,
740
struct hugetlb_cgroup *h_cg)
741
{
742
long add = 0, actual_regions_needed = 0;
743
744
spin_lock(&resv->lock);
745
retry:
746
747
/* Count how many regions are actually needed to execute this add. */
748
add_reservation_in_range(resv, f, t, NULL, NULL,
749
&actual_regions_needed);
750
751
/*
752
* Check for sufficient descriptors in the cache to accommodate
753
* this add operation. Note that actual_regions_needed may be greater
754
* than in_regions_needed, as the resv_map may have been modified since
755
* the region_chg call. In this case, we need to make sure that we
756
* allocate extra entries, such that we have enough for all the
757
* existing adds_in_progress, plus the excess needed for this
758
* operation.
759
*/
760
if (actual_regions_needed > in_regions_needed &&
761
resv->region_cache_count <
762
resv->adds_in_progress +
763
(actual_regions_needed - in_regions_needed)) {
764
/* region_add operation of range 1 should never need to
765
* allocate file_region entries.
766
*/
767
VM_BUG_ON(t - f <= 1);
768
769
if (allocate_file_region_entries(
770
resv, actual_regions_needed - in_regions_needed)) {
771
return -ENOMEM;
772
}
773
774
goto retry;
775
}
776
777
add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
778
779
resv->adds_in_progress -= in_regions_needed;
780
781
spin_unlock(&resv->lock);
782
return add;
783
}
784
785
/*
786
* Examine the existing reserve map and determine how many
787
* huge pages in the specified range [f, t) are NOT currently
788
* represented. This routine is called before a subsequent
789
* call to region_add that will actually modify the reserve
790
* map to add the specified range [f, t). region_chg does
791
* not change the number of huge pages represented by the
792
* map. A number of new file_region structures is added to the cache as a
793
* placeholder, for the subsequent region_add call to use. At least 1
794
* file_region structure is added.
795
*
796
* out_regions_needed is the number of regions added to the
797
* resv->adds_in_progress. This value needs to be provided to a follow up call
798
* to region_add or region_abort for proper accounting.
799
*
800
* Returns the number of huge pages that need to be added to the existing
801
* reservation map for the range [f, t). This number is greater or equal to
802
* zero. -ENOMEM is returned if a new file_region structure or cache entry
803
* is needed and can not be allocated.
804
*/
805
static long region_chg(struct resv_map *resv, long f, long t,
806
long *out_regions_needed)
807
{
808
long chg = 0;
809
810
spin_lock(&resv->lock);
811
812
/* Count how many hugepages in this range are NOT represented. */
813
chg = add_reservation_in_range(resv, f, t, NULL, NULL,
814
out_regions_needed);
815
816
if (*out_regions_needed == 0)
817
*out_regions_needed = 1;
818
819
if (allocate_file_region_entries(resv, *out_regions_needed))
820
return -ENOMEM;
821
822
resv->adds_in_progress += *out_regions_needed;
823
824
spin_unlock(&resv->lock);
825
return chg;
826
}
827
828
/*
829
* Abort the in progress add operation. The adds_in_progress field
830
* of the resv_map keeps track of the operations in progress between
831
* calls to region_chg and region_add. Operations are sometimes
832
* aborted after the call to region_chg. In such cases, region_abort
833
* is called to decrement the adds_in_progress counter. regions_needed
834
* is the value returned by the region_chg call, it is used to decrement
835
* the adds_in_progress counter.
836
*
837
* NOTE: The range arguments [f, t) are not needed or used in this
838
* routine. They are kept to make reading the calling code easier as
839
* arguments will match the associated region_chg call.
840
*/
841
static void region_abort(struct resv_map *resv, long f, long t,
842
long regions_needed)
843
{
844
spin_lock(&resv->lock);
845
VM_BUG_ON(!resv->region_cache_count);
846
resv->adds_in_progress -= regions_needed;
847
spin_unlock(&resv->lock);
848
}
849
850
/*
851
* Delete the specified range [f, t) from the reserve map. If the
852
* t parameter is LONG_MAX, this indicates that ALL regions after f
853
* should be deleted. Locate the regions which intersect [f, t)
854
* and either trim, delete or split the existing regions.
855
*
856
* Returns the number of huge pages deleted from the reserve map.
857
* In the normal case, the return value is zero or more. In the
858
* case where a region must be split, a new region descriptor must
859
* be allocated. If the allocation fails, -ENOMEM will be returned.
860
* NOTE: If the parameter t == LONG_MAX, then we will never split
861
* a region and possibly return -ENOMEM. Callers specifying
862
* t == LONG_MAX do not need to check for -ENOMEM error.
863
*/
864
static long region_del(struct resv_map *resv, long f, long t)
865
{
866
struct list_head *head = &resv->regions;
867
struct file_region *rg, *trg;
868
struct file_region *nrg = NULL;
869
long del = 0;
870
871
retry:
872
spin_lock(&resv->lock);
873
list_for_each_entry_safe(rg, trg, head, link) {
874
/*
875
* Skip regions before the range to be deleted. file_region
876
* ranges are normally of the form [from, to). However, there
877
* may be a "placeholder" entry in the map which is of the form
878
* (from, to) with from == to. Check for placeholder entries
879
* at the beginning of the range to be deleted.
880
*/
881
if (rg->to <= f && (rg->to != rg->from || rg->to != f))
882
continue;
883
884
if (rg->from >= t)
885
break;
886
887
if (f > rg->from && t < rg->to) { /* Must split region */
888
/*
889
* Check for an entry in the cache before dropping
890
* lock and attempting allocation.
891
*/
892
if (!nrg &&
893
resv->region_cache_count > resv->adds_in_progress) {
894
nrg = list_first_entry(&resv->region_cache,
895
struct file_region,
896
link);
897
list_del(&nrg->link);
898
resv->region_cache_count--;
899
}
900
901
if (!nrg) {
902
spin_unlock(&resv->lock);
903
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
904
if (!nrg)
905
return -ENOMEM;
906
goto retry;
907
}
908
909
del += t - f;
910
hugetlb_cgroup_uncharge_file_region(
911
resv, rg, t - f, false);
912
913
/* New entry for end of split region */
914
nrg->from = t;
915
nrg->to = rg->to;
916
917
copy_hugetlb_cgroup_uncharge_info(nrg, rg);
918
919
INIT_LIST_HEAD(&nrg->link);
920
921
/* Original entry is trimmed */
922
rg->to = f;
923
924
list_add(&nrg->link, &rg->link);
925
nrg = NULL;
926
break;
927
}
928
929
if (f <= rg->from && t >= rg->to) { /* Remove entire region */
930
del += rg->to - rg->from;
931
hugetlb_cgroup_uncharge_file_region(resv, rg,
932
rg->to - rg->from, true);
933
list_del(&rg->link);
934
kfree(rg);
935
continue;
936
}
937
938
if (f <= rg->from) { /* Trim beginning of region */
939
hugetlb_cgroup_uncharge_file_region(resv, rg,
940
t - rg->from, false);
941
942
del += t - rg->from;
943
rg->from = t;
944
} else { /* Trim end of region */
945
hugetlb_cgroup_uncharge_file_region(resv, rg,
946
rg->to - f, false);
947
948
del += rg->to - f;
949
rg->to = f;
950
}
951
}
952
953
spin_unlock(&resv->lock);
954
kfree(nrg);
955
return del;
956
}
957
958
/*
959
* A rare out of memory error was encountered which prevented removal of
960
* the reserve map region for a page. The huge page itself was free'ed
961
* and removed from the page cache. This routine will adjust the subpool
962
* usage count, and the global reserve count if needed. By incrementing
963
* these counts, the reserve map entry which could not be deleted will
964
* appear as a "reserved" entry instead of simply dangling with incorrect
965
* counts.
966
*/
967
void hugetlb_fix_reserve_counts(struct inode *inode)
968
{
969
struct hugepage_subpool *spool = subpool_inode(inode);
970
long rsv_adjust;
971
bool reserved = false;
972
973
rsv_adjust = hugepage_subpool_get_pages(spool, 1);
974
if (rsv_adjust > 0) {
975
struct hstate *h = hstate_inode(inode);
976
977
if (!hugetlb_acct_memory(h, 1))
978
reserved = true;
979
} else if (!rsv_adjust) {
980
reserved = true;
981
}
982
983
if (!reserved)
984
pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
985
}
986
987
/*
988
* Count and return the number of huge pages in the reserve map
989
* that intersect with the range [f, t).
990
*/
991
static long region_count(struct resv_map *resv, long f, long t)
992
{
993
struct list_head *head = &resv->regions;
994
struct file_region *rg;
995
long chg = 0;
996
997
spin_lock(&resv->lock);
998
/* Locate each segment we overlap with, and count that overlap. */
999
list_for_each_entry(rg, head, link) {
1000
long seg_from;
1001
long seg_to;
1002
1003
if (rg->to <= f)
1004
continue;
1005
if (rg->from >= t)
1006
break;
1007
1008
seg_from = max(rg->from, f);
1009
seg_to = min(rg->to, t);
1010
1011
chg += seg_to - seg_from;
1012
}
1013
spin_unlock(&resv->lock);
1014
1015
return chg;
1016
}
1017
1018
/*
1019
* Convert the address within this vma to the page offset within
1020
* the mapping, huge page units here.
1021
*/
1022
static pgoff_t vma_hugecache_offset(struct hstate *h,
1023
struct vm_area_struct *vma, unsigned long address)
1024
{
1025
return ((address - vma->vm_start) >> huge_page_shift(h)) +
1026
(vma->vm_pgoff >> huge_page_order(h));
1027
}
1028
1029
/**
1030
* vma_kernel_pagesize - Page size granularity for this VMA.
1031
* @vma: The user mapping.
1032
*
1033
* Folios in this VMA will be aligned to, and at least the size of the
1034
* number of bytes returned by this function.
1035
*
1036
* Return: The default size of the folios allocated when backing a VMA.
1037
*/
1038
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1039
{
1040
if (vma->vm_ops && vma->vm_ops->pagesize)
1041
return vma->vm_ops->pagesize(vma);
1042
return PAGE_SIZE;
1043
}
1044
EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
1045
1046
/*
1047
* Return the page size being used by the MMU to back a VMA. In the majority
1048
* of cases, the page size used by the kernel matches the MMU size. On
1049
* architectures where it differs, an architecture-specific 'strong'
1050
* version of this symbol is required.
1051
*/
1052
__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1053
{
1054
return vma_kernel_pagesize(vma);
1055
}
1056
1057
/*
1058
* Flags for MAP_PRIVATE reservations. These are stored in the bottom
1059
* bits of the reservation map pointer, which are always clear due to
1060
* alignment.
1061
*/
1062
#define HPAGE_RESV_OWNER (1UL << 0)
1063
#define HPAGE_RESV_UNMAPPED (1UL << 1)
1064
#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1065
1066
/*
1067
* These helpers are used to track how many pages are reserved for
1068
* faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1069
* is guaranteed to have their future faults succeed.
1070
*
1071
* With the exception of hugetlb_dup_vma_private() which is called at fork(),
1072
* the reserve counters are updated with the hugetlb_lock held. It is safe
1073
* to reset the VMA at fork() time as it is not in use yet and there is no
1074
* chance of the global counters getting corrupted as a result of the values.
1075
*
1076
* The private mapping reservation is represented in a subtly different
1077
* manner to a shared mapping. A shared mapping has a region map associated
1078
* with the underlying file, this region map represents the backing file
1079
* pages which have ever had a reservation assigned which this persists even
1080
* after the page is instantiated. A private mapping has a region map
1081
* associated with the original mmap which is attached to all VMAs which
1082
* reference it, this region map represents those offsets which have consumed
1083
* reservation ie. where pages have been instantiated.
1084
*/
1085
static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1086
{
1087
return (unsigned long)vma->vm_private_data;
1088
}
1089
1090
static void set_vma_private_data(struct vm_area_struct *vma,
1091
unsigned long value)
1092
{
1093
vma->vm_private_data = (void *)value;
1094
}
1095
1096
static void
1097
resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1098
struct hugetlb_cgroup *h_cg,
1099
struct hstate *h)
1100
{
1101
#ifdef CONFIG_CGROUP_HUGETLB
1102
if (!h_cg || !h) {
1103
resv_map->reservation_counter = NULL;
1104
resv_map->pages_per_hpage = 0;
1105
resv_map->css = NULL;
1106
} else {
1107
resv_map->reservation_counter =
1108
&h_cg->rsvd_hugepage[hstate_index(h)];
1109
resv_map->pages_per_hpage = pages_per_huge_page(h);
1110
resv_map->css = &h_cg->css;
1111
}
1112
#endif
1113
}
1114
1115
struct resv_map *resv_map_alloc(void)
1116
{
1117
struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
1118
struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
1119
1120
if (!resv_map || !rg) {
1121
kfree(resv_map);
1122
kfree(rg);
1123
return NULL;
1124
}
1125
1126
kref_init(&resv_map->refs);
1127
spin_lock_init(&resv_map->lock);
1128
INIT_LIST_HEAD(&resv_map->regions);
1129
init_rwsem(&resv_map->rw_sema);
1130
1131
resv_map->adds_in_progress = 0;
1132
/*
1133
* Initialize these to 0. On shared mappings, 0's here indicate these
1134
* fields don't do cgroup accounting. On private mappings, these will be
1135
* re-initialized to the proper values, to indicate that hugetlb cgroup
1136
* reservations are to be un-charged from here.
1137
*/
1138
resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
1139
1140
INIT_LIST_HEAD(&resv_map->region_cache);
1141
list_add(&rg->link, &resv_map->region_cache);
1142
resv_map->region_cache_count = 1;
1143
1144
return resv_map;
1145
}
1146
1147
void resv_map_release(struct kref *ref)
1148
{
1149
struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
1150
struct list_head *head = &resv_map->region_cache;
1151
struct file_region *rg, *trg;
1152
1153
/* Clear out any active regions before we release the map. */
1154
region_del(resv_map, 0, LONG_MAX);
1155
1156
/* ... and any entries left in the cache */
1157
list_for_each_entry_safe(rg, trg, head, link) {
1158
list_del(&rg->link);
1159
kfree(rg);
1160
}
1161
1162
VM_BUG_ON(resv_map->adds_in_progress);
1163
1164
kfree(resv_map);
1165
}
1166
1167
static inline struct resv_map *inode_resv_map(struct inode *inode)
1168
{
1169
/*
1170
* At inode evict time, i_mapping may not point to the original
1171
* address space within the inode. This original address space
1172
* contains the pointer to the resv_map. So, always use the
1173
* address space embedded within the inode.
1174
* The VERY common case is inode->mapping == &inode->i_data but,
1175
* this may not be true for device special inodes.
1176
*/
1177
return (struct resv_map *)(&inode->i_data)->i_private_data;
1178
}
1179
1180
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1181
{
1182
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1183
if (vma->vm_flags & VM_MAYSHARE) {
1184
struct address_space *mapping = vma->vm_file->f_mapping;
1185
struct inode *inode = mapping->host;
1186
1187
return inode_resv_map(inode);
1188
1189
} else {
1190
return (struct resv_map *)(get_vma_private_data(vma) &
1191
~HPAGE_RESV_MASK);
1192
}
1193
}
1194
1195
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1196
{
1197
VM_WARN_ON_ONCE_VMA(!is_vm_hugetlb_page(vma), vma);
1198
VM_WARN_ON_ONCE_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1199
1200
set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1201
}
1202
1203
static void set_vma_desc_resv_map(struct vm_area_desc *desc, struct resv_map *map)
1204
{
1205
VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags));
1206
VM_WARN_ON_ONCE(desc->vm_flags & VM_MAYSHARE);
1207
1208
desc->private_data = map;
1209
}
1210
1211
static void set_vma_desc_resv_flags(struct vm_area_desc *desc, unsigned long flags)
1212
{
1213
VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags));
1214
VM_WARN_ON_ONCE(desc->vm_flags & VM_MAYSHARE);
1215
1216
desc->private_data = (void *)((unsigned long)desc->private_data | flags);
1217
}
1218
1219
static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1220
{
1221
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1222
1223
return (get_vma_private_data(vma) & flag) != 0;
1224
}
1225
1226
static bool is_vma_desc_resv_set(struct vm_area_desc *desc, unsigned long flag)
1227
{
1228
VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags));
1229
1230
return ((unsigned long)desc->private_data) & flag;
1231
}
1232
1233
bool __vma_private_lock(struct vm_area_struct *vma)
1234
{
1235
return !(vma->vm_flags & VM_MAYSHARE) &&
1236
get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1237
is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1238
}
1239
1240
void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1241
{
1242
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1243
/*
1244
* Clear vm_private_data
1245
* - For shared mappings this is a per-vma semaphore that may be
1246
* allocated in a subsequent call to hugetlb_vm_op_open.
1247
* Before clearing, make sure pointer is not associated with vma
1248
* as this will leak the structure. This is the case when called
1249
* via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1250
* been called to allocate a new structure.
1251
* - For MAP_PRIVATE mappings, this is the reserve map which does
1252
* not apply to children. Faults generated by the children are
1253
* not guaranteed to succeed, even if read-only.
1254
*/
1255
if (vma->vm_flags & VM_MAYSHARE) {
1256
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1257
1258
if (vma_lock && vma_lock->vma != vma)
1259
vma->vm_private_data = NULL;
1260
} else
1261
vma->vm_private_data = NULL;
1262
}
1263
1264
/*
1265
* Reset and decrement one ref on hugepage private reservation.
1266
* Called with mm->mmap_lock writer semaphore held.
1267
* This function should be only used by mremap and operate on
1268
* same sized vma. It should never come here with last ref on the
1269
* reservation.
1270
*/
1271
void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1272
{
1273
/*
1274
* Clear the old hugetlb private page reservation.
1275
* It has already been transferred to new_vma.
1276
*
1277
* During a mremap() operation of a hugetlb vma we call move_vma()
1278
* which copies vma into new_vma and unmaps vma. After the copy
1279
* operation both new_vma and vma share a reference to the resv_map
1280
* struct, and at that point vma is about to be unmapped. We don't
1281
* want to return the reservation to the pool at unmap of vma because
1282
* the reservation still lives on in new_vma, so simply decrement the
1283
* ref here and remove the resv_map reference from this vma.
1284
*/
1285
struct resv_map *reservations = vma_resv_map(vma);
1286
1287
if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1288
resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1289
kref_put(&reservations->refs, resv_map_release);
1290
}
1291
1292
hugetlb_dup_vma_private(vma);
1293
}
1294
1295
static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1296
{
1297
int nid = folio_nid(folio);
1298
1299
lockdep_assert_held(&hugetlb_lock);
1300
VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1301
1302
list_move(&folio->lru, &h->hugepage_freelists[nid]);
1303
h->free_huge_pages++;
1304
h->free_huge_pages_node[nid]++;
1305
folio_set_hugetlb_freed(folio);
1306
}
1307
1308
static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1309
int nid)
1310
{
1311
struct folio *folio;
1312
bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1313
1314
lockdep_assert_held(&hugetlb_lock);
1315
list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1316
if (pin && !folio_is_longterm_pinnable(folio))
1317
continue;
1318
1319
if (folio_test_hwpoison(folio))
1320
continue;
1321
1322
if (is_migrate_isolate_page(&folio->page))
1323
continue;
1324
1325
list_move(&folio->lru, &h->hugepage_activelist);
1326
folio_ref_unfreeze(folio, 1);
1327
folio_clear_hugetlb_freed(folio);
1328
h->free_huge_pages--;
1329
h->free_huge_pages_node[nid]--;
1330
return folio;
1331
}
1332
1333
return NULL;
1334
}
1335
1336
static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1337
int nid, nodemask_t *nmask)
1338
{
1339
unsigned int cpuset_mems_cookie;
1340
struct zonelist *zonelist;
1341
struct zone *zone;
1342
struct zoneref *z;
1343
int node = NUMA_NO_NODE;
1344
1345
/* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */
1346
if (nid == NUMA_NO_NODE)
1347
nid = numa_node_id();
1348
1349
zonelist = node_zonelist(nid, gfp_mask);
1350
1351
retry_cpuset:
1352
cpuset_mems_cookie = read_mems_allowed_begin();
1353
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1354
struct folio *folio;
1355
1356
if (!cpuset_zone_allowed(zone, gfp_mask))
1357
continue;
1358
/*
1359
* no need to ask again on the same node. Pool is node rather than
1360
* zone aware
1361
*/
1362
if (zone_to_nid(zone) == node)
1363
continue;
1364
node = zone_to_nid(zone);
1365
1366
folio = dequeue_hugetlb_folio_node_exact(h, node);
1367
if (folio)
1368
return folio;
1369
}
1370
if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1371
goto retry_cpuset;
1372
1373
return NULL;
1374
}
1375
1376
static unsigned long available_huge_pages(struct hstate *h)
1377
{
1378
return h->free_huge_pages - h->resv_huge_pages;
1379
}
1380
1381
static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1382
struct vm_area_struct *vma,
1383
unsigned long address, long gbl_chg)
1384
{
1385
struct folio *folio = NULL;
1386
struct mempolicy *mpol;
1387
gfp_t gfp_mask;
1388
nodemask_t *nodemask;
1389
int nid;
1390
1391
/*
1392
* gbl_chg==1 means the allocation requires a new page that was not
1393
* reserved before. Making sure there's at least one free page.
1394
*/
1395
if (gbl_chg && !available_huge_pages(h))
1396
goto err;
1397
1398
gfp_mask = htlb_alloc_mask(h);
1399
nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1400
1401
if (mpol_is_preferred_many(mpol)) {
1402
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1403
nid, nodemask);
1404
1405
/* Fallback to all nodes if page==NULL */
1406
nodemask = NULL;
1407
}
1408
1409
if (!folio)
1410
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1411
nid, nodemask);
1412
1413
mpol_cond_put(mpol);
1414
return folio;
1415
1416
err:
1417
return NULL;
1418
}
1419
1420
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1421
#ifdef CONFIG_CONTIG_ALLOC
1422
static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask,
1423
int nid, nodemask_t *nodemask)
1424
{
1425
struct folio *folio;
1426
bool retried = false;
1427
1428
retry:
1429
folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask);
1430
if (!folio) {
1431
if (hugetlb_cma_exclusive_alloc())
1432
return NULL;
1433
1434
folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask);
1435
if (!folio)
1436
return NULL;
1437
}
1438
1439
if (folio_ref_freeze(folio, 1))
1440
return folio;
1441
1442
pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio));
1443
hugetlb_free_folio(folio);
1444
if (!retried) {
1445
retried = true;
1446
goto retry;
1447
}
1448
return NULL;
1449
}
1450
1451
#else /* !CONFIG_CONTIG_ALLOC */
1452
static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
1453
nodemask_t *nodemask)
1454
{
1455
return NULL;
1456
}
1457
#endif /* CONFIG_CONTIG_ALLOC */
1458
1459
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1460
static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
1461
nodemask_t *nodemask)
1462
{
1463
return NULL;
1464
}
1465
#endif
1466
1467
/*
1468
* Remove hugetlb folio from lists.
1469
* If vmemmap exists for the folio, clear the hugetlb flag so that the
1470
* folio appears as just a compound page. Otherwise, wait until after
1471
* allocating vmemmap to clear the flag.
1472
*
1473
* Must be called with hugetlb lock held.
1474
*/
1475
void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1476
bool adjust_surplus)
1477
{
1478
int nid = folio_nid(folio);
1479
1480
VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1481
VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1482
1483
lockdep_assert_held(&hugetlb_lock);
1484
if (hstate_is_gigantic_no_runtime(h))
1485
return;
1486
1487
list_del(&folio->lru);
1488
1489
if (folio_test_hugetlb_freed(folio)) {
1490
folio_clear_hugetlb_freed(folio);
1491
h->free_huge_pages--;
1492
h->free_huge_pages_node[nid]--;
1493
}
1494
if (adjust_surplus) {
1495
h->surplus_huge_pages--;
1496
h->surplus_huge_pages_node[nid]--;
1497
}
1498
1499
/*
1500
* We can only clear the hugetlb flag after allocating vmemmap
1501
* pages. Otherwise, someone (memory error handling) may try to write
1502
* to tail struct pages.
1503
*/
1504
if (!folio_test_hugetlb_vmemmap_optimized(folio))
1505
__folio_clear_hugetlb(folio);
1506
1507
h->nr_huge_pages--;
1508
h->nr_huge_pages_node[nid]--;
1509
}
1510
1511
void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1512
bool adjust_surplus)
1513
{
1514
int nid = folio_nid(folio);
1515
1516
VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1517
1518
lockdep_assert_held(&hugetlb_lock);
1519
1520
INIT_LIST_HEAD(&folio->lru);
1521
h->nr_huge_pages++;
1522
h->nr_huge_pages_node[nid]++;
1523
1524
if (adjust_surplus) {
1525
h->surplus_huge_pages++;
1526
h->surplus_huge_pages_node[nid]++;
1527
}
1528
1529
__folio_set_hugetlb(folio);
1530
folio_change_private(folio, NULL);
1531
/*
1532
* We have to set hugetlb_vmemmap_optimized again as above
1533
* folio_change_private(folio, NULL) cleared it.
1534
*/
1535
folio_set_hugetlb_vmemmap_optimized(folio);
1536
1537
arch_clear_hugetlb_flags(folio);
1538
enqueue_hugetlb_folio(h, folio);
1539
}
1540
1541
static void __update_and_free_hugetlb_folio(struct hstate *h,
1542
struct folio *folio)
1543
{
1544
bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio);
1545
1546
if (hstate_is_gigantic_no_runtime(h))
1547
return;
1548
1549
/*
1550
* If we don't know which subpages are hwpoisoned, we can't free
1551
* the hugepage, so it's leaked intentionally.
1552
*/
1553
if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1554
return;
1555
1556
/*
1557
* If folio is not vmemmap optimized (!clear_flag), then the folio
1558
* is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio
1559
* can only be passed hugetlb pages and will BUG otherwise.
1560
*/
1561
if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) {
1562
spin_lock_irq(&hugetlb_lock);
1563
/*
1564
* If we cannot allocate vmemmap pages, just refuse to free the
1565
* page and put the page back on the hugetlb free list and treat
1566
* as a surplus page.
1567
*/
1568
add_hugetlb_folio(h, folio, true);
1569
spin_unlock_irq(&hugetlb_lock);
1570
return;
1571
}
1572
1573
/*
1574
* If vmemmap pages were allocated above, then we need to clear the
1575
* hugetlb flag under the hugetlb lock.
1576
*/
1577
if (folio_test_hugetlb(folio)) {
1578
spin_lock_irq(&hugetlb_lock);
1579
__folio_clear_hugetlb(folio);
1580
spin_unlock_irq(&hugetlb_lock);
1581
}
1582
1583
/*
1584
* Move PageHWPoison flag from head page to the raw error pages,
1585
* which makes any healthy subpages reusable.
1586
*/
1587
if (unlikely(folio_test_hwpoison(folio)))
1588
folio_clear_hugetlb_hwpoison(folio);
1589
1590
folio_ref_unfreeze(folio, 1);
1591
1592
hugetlb_free_folio(folio);
1593
}
1594
1595
/*
1596
* As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1597
* use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1598
* actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1599
* the vmemmap pages.
1600
*
1601
* free_hpage_workfn() locklessly retrieves the linked list of pages to be
1602
* freed and frees them one-by-one. As the page->mapping pointer is going
1603
* to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1604
* structure of a lockless linked list of huge pages to be freed.
1605
*/
1606
static LLIST_HEAD(hpage_freelist);
1607
1608
static void free_hpage_workfn(struct work_struct *work)
1609
{
1610
struct llist_node *node;
1611
1612
node = llist_del_all(&hpage_freelist);
1613
1614
while (node) {
1615
struct folio *folio;
1616
struct hstate *h;
1617
1618
folio = container_of((struct address_space **)node,
1619
struct folio, mapping);
1620
node = node->next;
1621
folio->mapping = NULL;
1622
/*
1623
* The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1624
* folio_hstate() is going to trigger because a previous call to
1625
* remove_hugetlb_folio() will clear the hugetlb bit, so do
1626
* not use folio_hstate() directly.
1627
*/
1628
h = size_to_hstate(folio_size(folio));
1629
1630
__update_and_free_hugetlb_folio(h, folio);
1631
1632
cond_resched();
1633
}
1634
}
1635
static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1636
1637
static inline void flush_free_hpage_work(struct hstate *h)
1638
{
1639
if (hugetlb_vmemmap_optimizable(h))
1640
flush_work(&free_hpage_work);
1641
}
1642
1643
static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1644
bool atomic)
1645
{
1646
if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1647
__update_and_free_hugetlb_folio(h, folio);
1648
return;
1649
}
1650
1651
/*
1652
* Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1653
*
1654
* Only call schedule_work() if hpage_freelist is previously
1655
* empty. Otherwise, schedule_work() had been called but the workfn
1656
* hasn't retrieved the list yet.
1657
*/
1658
if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1659
schedule_work(&free_hpage_work);
1660
}
1661
1662
static void bulk_vmemmap_restore_error(struct hstate *h,
1663
struct list_head *folio_list,
1664
struct list_head *non_hvo_folios)
1665
{
1666
struct folio *folio, *t_folio;
1667
1668
if (!list_empty(non_hvo_folios)) {
1669
/*
1670
* Free any restored hugetlb pages so that restore of the
1671
* entire list can be retried.
1672
* The idea is that in the common case of ENOMEM errors freeing
1673
* hugetlb pages with vmemmap we will free up memory so that we
1674
* can allocate vmemmap for more hugetlb pages.
1675
*/
1676
list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
1677
list_del(&folio->lru);
1678
spin_lock_irq(&hugetlb_lock);
1679
__folio_clear_hugetlb(folio);
1680
spin_unlock_irq(&hugetlb_lock);
1681
update_and_free_hugetlb_folio(h, folio, false);
1682
cond_resched();
1683
}
1684
} else {
1685
/*
1686
* In the case where there are no folios which can be
1687
* immediately freed, we loop through the list trying to restore
1688
* vmemmap individually in the hope that someone elsewhere may
1689
* have done something to cause success (such as freeing some
1690
* memory). If unable to restore a hugetlb page, the hugetlb
1691
* page is made a surplus page and removed from the list.
1692
* If are able to restore vmemmap and free one hugetlb page, we
1693
* quit processing the list to retry the bulk operation.
1694
*/
1695
list_for_each_entry_safe(folio, t_folio, folio_list, lru)
1696
if (hugetlb_vmemmap_restore_folio(h, folio)) {
1697
list_del(&folio->lru);
1698
spin_lock_irq(&hugetlb_lock);
1699
add_hugetlb_folio(h, folio, true);
1700
spin_unlock_irq(&hugetlb_lock);
1701
} else {
1702
list_del(&folio->lru);
1703
spin_lock_irq(&hugetlb_lock);
1704
__folio_clear_hugetlb(folio);
1705
spin_unlock_irq(&hugetlb_lock);
1706
update_and_free_hugetlb_folio(h, folio, false);
1707
cond_resched();
1708
break;
1709
}
1710
}
1711
}
1712
1713
static void update_and_free_pages_bulk(struct hstate *h,
1714
struct list_head *folio_list)
1715
{
1716
long ret;
1717
struct folio *folio, *t_folio;
1718
LIST_HEAD(non_hvo_folios);
1719
1720
/*
1721
* First allocate required vmemmmap (if necessary) for all folios.
1722
* Carefully handle errors and free up any available hugetlb pages
1723
* in an effort to make forward progress.
1724
*/
1725
retry:
1726
ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
1727
if (ret < 0) {
1728
bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
1729
goto retry;
1730
}
1731
1732
/*
1733
* At this point, list should be empty, ret should be >= 0 and there
1734
* should only be pages on the non_hvo_folios list.
1735
* Do note that the non_hvo_folios list could be empty.
1736
* Without HVO enabled, ret will be 0 and there is no need to call
1737
* __folio_clear_hugetlb as this was done previously.
1738
*/
1739
VM_WARN_ON(!list_empty(folio_list));
1740
VM_WARN_ON(ret < 0);
1741
if (!list_empty(&non_hvo_folios) && ret) {
1742
spin_lock_irq(&hugetlb_lock);
1743
list_for_each_entry(folio, &non_hvo_folios, lru)
1744
__folio_clear_hugetlb(folio);
1745
spin_unlock_irq(&hugetlb_lock);
1746
}
1747
1748
list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
1749
update_and_free_hugetlb_folio(h, folio, false);
1750
cond_resched();
1751
}
1752
}
1753
1754
struct hstate *size_to_hstate(unsigned long size)
1755
{
1756
struct hstate *h;
1757
1758
for_each_hstate(h) {
1759
if (huge_page_size(h) == size)
1760
return h;
1761
}
1762
return NULL;
1763
}
1764
1765
void free_huge_folio(struct folio *folio)
1766
{
1767
/*
1768
* Can't pass hstate in here because it is called from the
1769
* generic mm code.
1770
*/
1771
struct hstate *h = folio_hstate(folio);
1772
int nid = folio_nid(folio);
1773
struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1774
bool restore_reserve;
1775
unsigned long flags;
1776
1777
VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1778
VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1779
1780
hugetlb_set_folio_subpool(folio, NULL);
1781
if (folio_test_anon(folio))
1782
__ClearPageAnonExclusive(&folio->page);
1783
folio->mapping = NULL;
1784
restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1785
folio_clear_hugetlb_restore_reserve(folio);
1786
1787
/*
1788
* If HPageRestoreReserve was set on page, page allocation consumed a
1789
* reservation. If the page was associated with a subpool, there
1790
* would have been a page reserved in the subpool before allocation
1791
* via hugepage_subpool_get_pages(). Since we are 'restoring' the
1792
* reservation, do not call hugepage_subpool_put_pages() as this will
1793
* remove the reserved page from the subpool.
1794
*/
1795
if (!restore_reserve) {
1796
/*
1797
* A return code of zero implies that the subpool will be
1798
* under its minimum size if the reservation is not restored
1799
* after page is free. Therefore, force restore_reserve
1800
* operation.
1801
*/
1802
if (hugepage_subpool_put_pages(spool, 1) == 0)
1803
restore_reserve = true;
1804
}
1805
1806
spin_lock_irqsave(&hugetlb_lock, flags);
1807
folio_clear_hugetlb_migratable(folio);
1808
hugetlb_cgroup_uncharge_folio(hstate_index(h),
1809
pages_per_huge_page(h), folio);
1810
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1811
pages_per_huge_page(h), folio);
1812
lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h));
1813
mem_cgroup_uncharge(folio);
1814
if (restore_reserve)
1815
h->resv_huge_pages++;
1816
1817
if (folio_test_hugetlb_temporary(folio)) {
1818
remove_hugetlb_folio(h, folio, false);
1819
spin_unlock_irqrestore(&hugetlb_lock, flags);
1820
update_and_free_hugetlb_folio(h, folio, true);
1821
} else if (h->surplus_huge_pages_node[nid]) {
1822
/* remove the page from active list */
1823
remove_hugetlb_folio(h, folio, true);
1824
spin_unlock_irqrestore(&hugetlb_lock, flags);
1825
update_and_free_hugetlb_folio(h, folio, true);
1826
} else {
1827
arch_clear_hugetlb_flags(folio);
1828
enqueue_hugetlb_folio(h, folio);
1829
spin_unlock_irqrestore(&hugetlb_lock, flags);
1830
}
1831
}
1832
1833
/*
1834
* Must be called with the hugetlb lock held
1835
*/
1836
static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1837
{
1838
lockdep_assert_held(&hugetlb_lock);
1839
h->nr_huge_pages++;
1840
h->nr_huge_pages_node[folio_nid(folio)]++;
1841
}
1842
1843
void init_new_hugetlb_folio(struct folio *folio)
1844
{
1845
__folio_set_hugetlb(folio);
1846
INIT_LIST_HEAD(&folio->lru);
1847
hugetlb_set_folio_subpool(folio, NULL);
1848
set_hugetlb_cgroup(folio, NULL);
1849
set_hugetlb_cgroup_rsvd(folio, NULL);
1850
}
1851
1852
/*
1853
* Find and lock address space (mapping) in write mode.
1854
*
1855
* Upon entry, the folio is locked which means that folio_mapping() is
1856
* stable. Due to locking order, we can only trylock_write. If we can
1857
* not get the lock, simply return NULL to caller.
1858
*/
1859
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
1860
{
1861
struct address_space *mapping = folio_mapping(folio);
1862
1863
if (!mapping)
1864
return mapping;
1865
1866
if (i_mmap_trylock_write(mapping))
1867
return mapping;
1868
1869
return NULL;
1870
}
1871
1872
static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask,
1873
int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
1874
{
1875
struct folio *folio;
1876
bool alloc_try_hard = true;
1877
1878
/*
1879
* By default we always try hard to allocate the folio with
1880
* __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in
1881
* a loop (to adjust global huge page counts) and previous allocation
1882
* failed, do not continue to try hard on the same node. Use the
1883
* node_alloc_noretry bitmap to manage this state information.
1884
*/
1885
if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1886
alloc_try_hard = false;
1887
if (alloc_try_hard)
1888
gfp_mask |= __GFP_RETRY_MAYFAIL;
1889
1890
folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
1891
1892
/*
1893
* If we did not specify __GFP_RETRY_MAYFAIL, but still got a
1894
* folio this indicates an overall state change. Clear bit so
1895
* that we resume normal 'try hard' allocations.
1896
*/
1897
if (node_alloc_noretry && folio && !alloc_try_hard)
1898
node_clear(nid, *node_alloc_noretry);
1899
1900
/*
1901
* If we tried hard to get a folio but failed, set bit so that
1902
* subsequent attempts will not try as hard until there is an
1903
* overall state change.
1904
*/
1905
if (node_alloc_noretry && !folio && alloc_try_hard)
1906
node_set(nid, *node_alloc_noretry);
1907
1908
if (!folio) {
1909
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1910
return NULL;
1911
}
1912
1913
__count_vm_event(HTLB_BUDDY_PGALLOC);
1914
return folio;
1915
}
1916
1917
static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
1918
gfp_t gfp_mask, int nid, nodemask_t *nmask,
1919
nodemask_t *node_alloc_noretry)
1920
{
1921
struct folio *folio;
1922
int order = huge_page_order(h);
1923
1924
if (nid == NUMA_NO_NODE)
1925
nid = numa_mem_id();
1926
1927
if (order_is_gigantic(order))
1928
folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask);
1929
else
1930
folio = alloc_buddy_hugetlb_folio(order, gfp_mask, nid, nmask,
1931
node_alloc_noretry);
1932
if (folio)
1933
init_new_hugetlb_folio(folio);
1934
return folio;
1935
}
1936
1937
/*
1938
* Common helper to allocate a fresh hugetlb folio. All specific allocators
1939
* should use this function to get new hugetlb folio
1940
*
1941
* Note that returned folio is 'frozen': ref count of head page and all tail
1942
* pages is zero, and the accounting must be done in the caller.
1943
*/
1944
static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
1945
gfp_t gfp_mask, int nid, nodemask_t *nmask)
1946
{
1947
struct folio *folio;
1948
1949
folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
1950
if (folio)
1951
hugetlb_vmemmap_optimize_folio(h, folio);
1952
return folio;
1953
}
1954
1955
void prep_and_add_allocated_folios(struct hstate *h,
1956
struct list_head *folio_list)
1957
{
1958
unsigned long flags;
1959
struct folio *folio, *tmp_f;
1960
1961
/* Send list for bulk vmemmap optimization processing */
1962
hugetlb_vmemmap_optimize_folios(h, folio_list);
1963
1964
/* Add all new pool pages to free lists in one lock cycle */
1965
spin_lock_irqsave(&hugetlb_lock, flags);
1966
list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
1967
account_new_hugetlb_folio(h, folio);
1968
enqueue_hugetlb_folio(h, folio);
1969
}
1970
spin_unlock_irqrestore(&hugetlb_lock, flags);
1971
}
1972
1973
/*
1974
* Allocates a fresh hugetlb page in a node interleaved manner. The page
1975
* will later be added to the appropriate hugetlb pool.
1976
*/
1977
static struct folio *alloc_pool_huge_folio(struct hstate *h,
1978
nodemask_t *nodes_allowed,
1979
nodemask_t *node_alloc_noretry,
1980
int *next_node)
1981
{
1982
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1983
int nr_nodes, node;
1984
1985
for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) {
1986
struct folio *folio;
1987
1988
folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
1989
nodes_allowed, node_alloc_noretry);
1990
if (folio)
1991
return folio;
1992
}
1993
1994
return NULL;
1995
}
1996
1997
/*
1998
* Remove huge page from pool from next node to free. Attempt to keep
1999
* persistent huge pages more or less balanced over allowed nodes.
2000
* This routine only 'removes' the hugetlb page. The caller must make
2001
* an additional call to free the page to low level allocators.
2002
* Called with hugetlb_lock locked.
2003
*/
2004
static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
2005
nodemask_t *nodes_allowed, bool acct_surplus)
2006
{
2007
int nr_nodes, node;
2008
struct folio *folio = NULL;
2009
2010
lockdep_assert_held(&hugetlb_lock);
2011
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2012
/*
2013
* If we're returning unused surplus pages, only examine
2014
* nodes with surplus pages.
2015
*/
2016
if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2017
!list_empty(&h->hugepage_freelists[node])) {
2018
folio = list_entry(h->hugepage_freelists[node].next,
2019
struct folio, lru);
2020
remove_hugetlb_folio(h, folio, acct_surplus);
2021
break;
2022
}
2023
}
2024
2025
return folio;
2026
}
2027
2028
/*
2029
* Dissolve a given free hugetlb folio into free buddy pages. This function
2030
* does nothing for in-use hugetlb folios and non-hugetlb folios.
2031
* This function returns values like below:
2032
*
2033
* -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2034
* when the system is under memory pressure and the feature of
2035
* freeing unused vmemmap pages associated with each hugetlb page
2036
* is enabled.
2037
* -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2038
* (allocated or reserved.)
2039
* 0: successfully dissolved free hugepages or the page is not a
2040
* hugepage (considered as already dissolved)
2041
*/
2042
int dissolve_free_hugetlb_folio(struct folio *folio)
2043
{
2044
int rc = -EBUSY;
2045
2046
retry:
2047
/* Not to disrupt normal path by vainly holding hugetlb_lock */
2048
if (!folio_test_hugetlb(folio))
2049
return 0;
2050
2051
spin_lock_irq(&hugetlb_lock);
2052
if (!folio_test_hugetlb(folio)) {
2053
rc = 0;
2054
goto out;
2055
}
2056
2057
if (!folio_ref_count(folio)) {
2058
struct hstate *h = folio_hstate(folio);
2059
bool adjust_surplus = false;
2060
2061
if (!available_huge_pages(h))
2062
goto out;
2063
2064
/*
2065
* We should make sure that the page is already on the free list
2066
* when it is dissolved.
2067
*/
2068
if (unlikely(!folio_test_hugetlb_freed(folio))) {
2069
spin_unlock_irq(&hugetlb_lock);
2070
cond_resched();
2071
2072
/*
2073
* Theoretically, we should return -EBUSY when we
2074
* encounter this race. In fact, we have a chance
2075
* to successfully dissolve the page if we do a
2076
* retry. Because the race window is quite small.
2077
* If we seize this opportunity, it is an optimization
2078
* for increasing the success rate of dissolving page.
2079
*/
2080
goto retry;
2081
}
2082
2083
if (h->surplus_huge_pages_node[folio_nid(folio)])
2084
adjust_surplus = true;
2085
remove_hugetlb_folio(h, folio, adjust_surplus);
2086
h->max_huge_pages--;
2087
spin_unlock_irq(&hugetlb_lock);
2088
2089
/*
2090
* Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2091
* before freeing the page. update_and_free_hugtlb_folio will fail to
2092
* free the page if it can not allocate required vmemmap. We
2093
* need to adjust max_huge_pages if the page is not freed.
2094
* Attempt to allocate vmemmmap here so that we can take
2095
* appropriate action on failure.
2096
*
2097
* The folio_test_hugetlb check here is because
2098
* remove_hugetlb_folio will clear hugetlb folio flag for
2099
* non-vmemmap optimized hugetlb folios.
2100
*/
2101
if (folio_test_hugetlb(folio)) {
2102
rc = hugetlb_vmemmap_restore_folio(h, folio);
2103
if (rc) {
2104
spin_lock_irq(&hugetlb_lock);
2105
add_hugetlb_folio(h, folio, adjust_surplus);
2106
h->max_huge_pages++;
2107
goto out;
2108
}
2109
} else
2110
rc = 0;
2111
2112
update_and_free_hugetlb_folio(h, folio, false);
2113
return rc;
2114
}
2115
out:
2116
spin_unlock_irq(&hugetlb_lock);
2117
return rc;
2118
}
2119
2120
/*
2121
* Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2122
* make specified memory blocks removable from the system.
2123
* Note that this will dissolve a free gigantic hugepage completely, if any
2124
* part of it lies within the given range.
2125
* Also note that if dissolve_free_hugetlb_folio() returns with an error, all
2126
* free hugetlb folios that were dissolved before that error are lost.
2127
*/
2128
int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn)
2129
{
2130
unsigned long pfn;
2131
struct folio *folio;
2132
int rc = 0;
2133
unsigned int order;
2134
struct hstate *h;
2135
2136
if (!hugepages_supported())
2137
return rc;
2138
2139
order = huge_page_order(&default_hstate);
2140
for_each_hstate(h)
2141
order = min(order, huge_page_order(h));
2142
2143
for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2144
folio = pfn_folio(pfn);
2145
rc = dissolve_free_hugetlb_folio(folio);
2146
if (rc)
2147
break;
2148
}
2149
2150
return rc;
2151
}
2152
2153
/*
2154
* Allocates a fresh surplus page from the page allocator.
2155
*/
2156
static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2157
gfp_t gfp_mask, int nid, nodemask_t *nmask)
2158
{
2159
struct folio *folio = NULL;
2160
2161
if (hstate_is_gigantic_no_runtime(h))
2162
return NULL;
2163
2164
spin_lock_irq(&hugetlb_lock);
2165
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2166
goto out_unlock;
2167
spin_unlock_irq(&hugetlb_lock);
2168
2169
folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2170
if (!folio)
2171
return NULL;
2172
2173
spin_lock_irq(&hugetlb_lock);
2174
/*
2175
* nr_huge_pages needs to be adjusted within the same lock cycle
2176
* as surplus_pages, otherwise it might confuse
2177
* persistent_huge_pages() momentarily.
2178
*/
2179
account_new_hugetlb_folio(h, folio);
2180
2181
/*
2182
* We could have raced with the pool size change.
2183
* Double check that and simply deallocate the new page
2184
* if we would end up overcommiting the surpluses. Abuse
2185
* temporary page to workaround the nasty free_huge_folio
2186
* codeflow
2187
*/
2188
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2189
folio_set_hugetlb_temporary(folio);
2190
spin_unlock_irq(&hugetlb_lock);
2191
free_huge_folio(folio);
2192
return NULL;
2193
}
2194
2195
h->surplus_huge_pages++;
2196
h->surplus_huge_pages_node[folio_nid(folio)]++;
2197
2198
out_unlock:
2199
spin_unlock_irq(&hugetlb_lock);
2200
2201
return folio;
2202
}
2203
2204
static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2205
int nid, nodemask_t *nmask)
2206
{
2207
struct folio *folio;
2208
2209
if (hstate_is_gigantic(h))
2210
return NULL;
2211
2212
folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2213
if (!folio)
2214
return NULL;
2215
2216
spin_lock_irq(&hugetlb_lock);
2217
account_new_hugetlb_folio(h, folio);
2218
spin_unlock_irq(&hugetlb_lock);
2219
2220
/* fresh huge pages are frozen */
2221
folio_ref_unfreeze(folio, 1);
2222
/*
2223
* We do not account these pages as surplus because they are only
2224
* temporary and will be released properly on the last reference
2225
*/
2226
folio_set_hugetlb_temporary(folio);
2227
2228
return folio;
2229
}
2230
2231
/*
2232
* Use the VMA's mpolicy to allocate a huge page from the buddy.
2233
*/
2234
static
2235
struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2236
struct vm_area_struct *vma, unsigned long addr)
2237
{
2238
struct folio *folio = NULL;
2239
struct mempolicy *mpol;
2240
gfp_t gfp_mask = htlb_alloc_mask(h);
2241
int nid;
2242
nodemask_t *nodemask;
2243
2244
nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2245
if (mpol_is_preferred_many(mpol)) {
2246
gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2247
2248
folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2249
2250
/* Fallback to all nodes if page==NULL */
2251
nodemask = NULL;
2252
}
2253
2254
if (!folio)
2255
folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2256
mpol_cond_put(mpol);
2257
return folio;
2258
}
2259
2260
struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
2261
nodemask_t *nmask, gfp_t gfp_mask)
2262
{
2263
struct folio *folio;
2264
2265
spin_lock_irq(&hugetlb_lock);
2266
if (!h->resv_huge_pages) {
2267
spin_unlock_irq(&hugetlb_lock);
2268
return NULL;
2269
}
2270
2271
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
2272
nmask);
2273
if (folio)
2274
h->resv_huge_pages--;
2275
2276
spin_unlock_irq(&hugetlb_lock);
2277
return folio;
2278
}
2279
2280
/* folio migration callback function */
2281
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2282
nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
2283
{
2284
spin_lock_irq(&hugetlb_lock);
2285
if (available_huge_pages(h)) {
2286
struct folio *folio;
2287
2288
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2289
preferred_nid, nmask);
2290
if (folio) {
2291
spin_unlock_irq(&hugetlb_lock);
2292
return folio;
2293
}
2294
}
2295
spin_unlock_irq(&hugetlb_lock);
2296
2297
/* We cannot fallback to other nodes, as we could break the per-node pool. */
2298
if (!allow_alloc_fallback)
2299
gfp_mask |= __GFP_THISNODE;
2300
2301
return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
2302
}
2303
2304
static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
2305
{
2306
#ifdef CONFIG_NUMA
2307
struct mempolicy *mpol = get_task_policy(current);
2308
2309
/*
2310
* Only enforce MPOL_BIND policy which overlaps with cpuset policy
2311
* (from policy_nodemask) specifically for hugetlb case
2312
*/
2313
if (mpol->mode == MPOL_BIND &&
2314
(apply_policy_zone(mpol, gfp_zone(gfp)) &&
2315
cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
2316
return &mpol->nodes;
2317
#endif
2318
return NULL;
2319
}
2320
2321
/*
2322
* Increase the hugetlb pool such that it can accommodate a reservation
2323
* of size 'delta'.
2324
*/
2325
static int gather_surplus_pages(struct hstate *h, long delta)
2326
__must_hold(&hugetlb_lock)
2327
{
2328
LIST_HEAD(surplus_list);
2329
struct folio *folio, *tmp;
2330
int ret;
2331
long i;
2332
long needed, allocated;
2333
bool alloc_ok = true;
2334
nodemask_t *mbind_nodemask, alloc_nodemask;
2335
2336
mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
2337
if (mbind_nodemask)
2338
nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed);
2339
else
2340
alloc_nodemask = cpuset_current_mems_allowed;
2341
2342
lockdep_assert_held(&hugetlb_lock);
2343
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2344
if (needed <= 0) {
2345
h->resv_huge_pages += delta;
2346
return 0;
2347
}
2348
2349
allocated = 0;
2350
2351
ret = -ENOMEM;
2352
retry:
2353
spin_unlock_irq(&hugetlb_lock);
2354
for (i = 0; i < needed; i++) {
2355
folio = NULL;
2356
2357
/*
2358
* It is okay to use NUMA_NO_NODE because we use numa_mem_id()
2359
* down the road to pick the current node if that is the case.
2360
*/
2361
folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2362
NUMA_NO_NODE, &alloc_nodemask);
2363
if (!folio) {
2364
alloc_ok = false;
2365
break;
2366
}
2367
list_add(&folio->lru, &surplus_list);
2368
cond_resched();
2369
}
2370
allocated += i;
2371
2372
/*
2373
* After retaking hugetlb_lock, we need to recalculate 'needed'
2374
* because either resv_huge_pages or free_huge_pages may have changed.
2375
*/
2376
spin_lock_irq(&hugetlb_lock);
2377
needed = (h->resv_huge_pages + delta) -
2378
(h->free_huge_pages + allocated);
2379
if (needed > 0) {
2380
if (alloc_ok)
2381
goto retry;
2382
/*
2383
* We were not able to allocate enough pages to
2384
* satisfy the entire reservation so we free what
2385
* we've allocated so far.
2386
*/
2387
goto free;
2388
}
2389
/*
2390
* The surplus_list now contains _at_least_ the number of extra pages
2391
* needed to accommodate the reservation. Add the appropriate number
2392
* of pages to the hugetlb pool and free the extras back to the buddy
2393
* allocator. Commit the entire reservation here to prevent another
2394
* process from stealing the pages as they are added to the pool but
2395
* before they are reserved.
2396
*/
2397
needed += allocated;
2398
h->resv_huge_pages += delta;
2399
ret = 0;
2400
2401
/* Free the needed pages to the hugetlb pool */
2402
list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
2403
if ((--needed) < 0)
2404
break;
2405
/* Add the page to the hugetlb allocator */
2406
enqueue_hugetlb_folio(h, folio);
2407
}
2408
free:
2409
spin_unlock_irq(&hugetlb_lock);
2410
2411
/*
2412
* Free unnecessary surplus pages to the buddy allocator.
2413
* Pages have no ref count, call free_huge_folio directly.
2414
*/
2415
list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2416
free_huge_folio(folio);
2417
spin_lock_irq(&hugetlb_lock);
2418
2419
return ret;
2420
}
2421
2422
/*
2423
* This routine has two main purposes:
2424
* 1) Decrement the reservation count (resv_huge_pages) by the value passed
2425
* in unused_resv_pages. This corresponds to the prior adjustments made
2426
* to the associated reservation map.
2427
* 2) Free any unused surplus pages that may have been allocated to satisfy
2428
* the reservation. As many as unused_resv_pages may be freed.
2429
*/
2430
static void return_unused_surplus_pages(struct hstate *h,
2431
unsigned long unused_resv_pages)
2432
{
2433
unsigned long nr_pages;
2434
LIST_HEAD(page_list);
2435
2436
lockdep_assert_held(&hugetlb_lock);
2437
/* Uncommit the reservation */
2438
h->resv_huge_pages -= unused_resv_pages;
2439
2440
if (hstate_is_gigantic_no_runtime(h))
2441
goto out;
2442
2443
/*
2444
* Part (or even all) of the reservation could have been backed
2445
* by pre-allocated pages. Only free surplus pages.
2446
*/
2447
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2448
2449
/*
2450
* We want to release as many surplus pages as possible, spread
2451
* evenly across all nodes with memory. Iterate across these nodes
2452
* until we can no longer free unreserved surplus pages. This occurs
2453
* when the nodes with surplus pages have no free pages.
2454
* remove_pool_hugetlb_folio() will balance the freed pages across the
2455
* on-line nodes with memory and will handle the hstate accounting.
2456
*/
2457
while (nr_pages--) {
2458
struct folio *folio;
2459
2460
folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
2461
if (!folio)
2462
goto out;
2463
2464
list_add(&folio->lru, &page_list);
2465
}
2466
2467
out:
2468
spin_unlock_irq(&hugetlb_lock);
2469
update_and_free_pages_bulk(h, &page_list);
2470
spin_lock_irq(&hugetlb_lock);
2471
}
2472
2473
2474
/*
2475
* vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2476
* are used by the huge page allocation routines to manage reservations.
2477
*
2478
* vma_needs_reservation is called to determine if the huge page at addr
2479
* within the vma has an associated reservation. If a reservation is
2480
* needed, the value 1 is returned. The caller is then responsible for
2481
* managing the global reservation and subpool usage counts. After
2482
* the huge page has been allocated, vma_commit_reservation is called
2483
* to add the page to the reservation map. If the page allocation fails,
2484
* the reservation must be ended instead of committed. vma_end_reservation
2485
* is called in such cases.
2486
*
2487
* In the normal case, vma_commit_reservation returns the same value
2488
* as the preceding vma_needs_reservation call. The only time this
2489
* is not the case is if a reserve map was changed between calls. It
2490
* is the responsibility of the caller to notice the difference and
2491
* take appropriate action.
2492
*
2493
* vma_add_reservation is used in error paths where a reservation must
2494
* be restored when a newly allocated huge page must be freed. It is
2495
* to be called after calling vma_needs_reservation to determine if a
2496
* reservation exists.
2497
*
2498
* vma_del_reservation is used in error paths where an entry in the reserve
2499
* map was created during huge page allocation and must be removed. It is to
2500
* be called after calling vma_needs_reservation to determine if a reservation
2501
* exists.
2502
*/
2503
enum vma_resv_mode {
2504
VMA_NEEDS_RESV,
2505
VMA_COMMIT_RESV,
2506
VMA_END_RESV,
2507
VMA_ADD_RESV,
2508
VMA_DEL_RESV,
2509
};
2510
static long __vma_reservation_common(struct hstate *h,
2511
struct vm_area_struct *vma, unsigned long addr,
2512
enum vma_resv_mode mode)
2513
{
2514
struct resv_map *resv;
2515
pgoff_t idx;
2516
long ret;
2517
long dummy_out_regions_needed;
2518
2519
resv = vma_resv_map(vma);
2520
if (!resv)
2521
return 1;
2522
2523
idx = vma_hugecache_offset(h, vma, addr);
2524
switch (mode) {
2525
case VMA_NEEDS_RESV:
2526
ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2527
/* We assume that vma_reservation_* routines always operate on
2528
* 1 page, and that adding to resv map a 1 page entry can only
2529
* ever require 1 region.
2530
*/
2531
VM_BUG_ON(dummy_out_regions_needed != 1);
2532
break;
2533
case VMA_COMMIT_RESV:
2534
ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2535
/* region_add calls of range 1 should never fail. */
2536
VM_BUG_ON(ret < 0);
2537
break;
2538
case VMA_END_RESV:
2539
region_abort(resv, idx, idx + 1, 1);
2540
ret = 0;
2541
break;
2542
case VMA_ADD_RESV:
2543
if (vma->vm_flags & VM_MAYSHARE) {
2544
ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2545
/* region_add calls of range 1 should never fail. */
2546
VM_BUG_ON(ret < 0);
2547
} else {
2548
region_abort(resv, idx, idx + 1, 1);
2549
ret = region_del(resv, idx, idx + 1);
2550
}
2551
break;
2552
case VMA_DEL_RESV:
2553
if (vma->vm_flags & VM_MAYSHARE) {
2554
region_abort(resv, idx, idx + 1, 1);
2555
ret = region_del(resv, idx, idx + 1);
2556
} else {
2557
ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2558
/* region_add calls of range 1 should never fail. */
2559
VM_BUG_ON(ret < 0);
2560
}
2561
break;
2562
default:
2563
BUG();
2564
}
2565
2566
if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2567
return ret;
2568
/*
2569
* We know private mapping must have HPAGE_RESV_OWNER set.
2570
*
2571
* In most cases, reserves always exist for private mappings.
2572
* However, a file associated with mapping could have been
2573
* hole punched or truncated after reserves were consumed.
2574
* As subsequent fault on such a range will not use reserves.
2575
* Subtle - The reserve map for private mappings has the
2576
* opposite meaning than that of shared mappings. If NO
2577
* entry is in the reserve map, it means a reservation exists.
2578
* If an entry exists in the reserve map, it means the
2579
* reservation has already been consumed. As a result, the
2580
* return value of this routine is the opposite of the
2581
* value returned from reserve map manipulation routines above.
2582
*/
2583
if (ret > 0)
2584
return 0;
2585
if (ret == 0)
2586
return 1;
2587
return ret;
2588
}
2589
2590
static long vma_needs_reservation(struct hstate *h,
2591
struct vm_area_struct *vma, unsigned long addr)
2592
{
2593
return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2594
}
2595
2596
static long vma_commit_reservation(struct hstate *h,
2597
struct vm_area_struct *vma, unsigned long addr)
2598
{
2599
return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2600
}
2601
2602
static void vma_end_reservation(struct hstate *h,
2603
struct vm_area_struct *vma, unsigned long addr)
2604
{
2605
(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2606
}
2607
2608
static long vma_add_reservation(struct hstate *h,
2609
struct vm_area_struct *vma, unsigned long addr)
2610
{
2611
return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2612
}
2613
2614
static long vma_del_reservation(struct hstate *h,
2615
struct vm_area_struct *vma, unsigned long addr)
2616
{
2617
return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2618
}
2619
2620
/*
2621
* This routine is called to restore reservation information on error paths.
2622
* It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2623
* and the hugetlb mutex should remain held when calling this routine.
2624
*
2625
* It handles two specific cases:
2626
* 1) A reservation was in place and the folio consumed the reservation.
2627
* hugetlb_restore_reserve is set in the folio.
2628
* 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2629
* not set. However, alloc_hugetlb_folio always updates the reserve map.
2630
*
2631
* In case 1, free_huge_folio later in the error path will increment the
2632
* global reserve count. But, free_huge_folio does not have enough context
2633
* to adjust the reservation map. This case deals primarily with private
2634
* mappings. Adjust the reserve map here to be consistent with global
2635
* reserve count adjustments to be made by free_huge_folio. Make sure the
2636
* reserve map indicates there is a reservation present.
2637
*
2638
* In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2639
*/
2640
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2641
unsigned long address, struct folio *folio)
2642
{
2643
long rc = vma_needs_reservation(h, vma, address);
2644
2645
if (folio_test_hugetlb_restore_reserve(folio)) {
2646
if (unlikely(rc < 0))
2647
/*
2648
* Rare out of memory condition in reserve map
2649
* manipulation. Clear hugetlb_restore_reserve so
2650
* that global reserve count will not be incremented
2651
* by free_huge_folio. This will make it appear
2652
* as though the reservation for this folio was
2653
* consumed. This may prevent the task from
2654
* faulting in the folio at a later time. This
2655
* is better than inconsistent global huge page
2656
* accounting of reserve counts.
2657
*/
2658
folio_clear_hugetlb_restore_reserve(folio);
2659
else if (rc)
2660
(void)vma_add_reservation(h, vma, address);
2661
else
2662
vma_end_reservation(h, vma, address);
2663
} else {
2664
if (!rc) {
2665
/*
2666
* This indicates there is an entry in the reserve map
2667
* not added by alloc_hugetlb_folio. We know it was added
2668
* before the alloc_hugetlb_folio call, otherwise
2669
* hugetlb_restore_reserve would be set on the folio.
2670
* Remove the entry so that a subsequent allocation
2671
* does not consume a reservation.
2672
*/
2673
rc = vma_del_reservation(h, vma, address);
2674
if (rc < 0)
2675
/*
2676
* VERY rare out of memory condition. Since
2677
* we can not delete the entry, set
2678
* hugetlb_restore_reserve so that the reserve
2679
* count will be incremented when the folio
2680
* is freed. This reserve will be consumed
2681
* on a subsequent allocation.
2682
*/
2683
folio_set_hugetlb_restore_reserve(folio);
2684
} else if (rc < 0) {
2685
/*
2686
* Rare out of memory condition from
2687
* vma_needs_reservation call. Memory allocation is
2688
* only attempted if a new entry is needed. Therefore,
2689
* this implies there is not an entry in the
2690
* reserve map.
2691
*
2692
* For shared mappings, no entry in the map indicates
2693
* no reservation. We are done.
2694
*/
2695
if (!(vma->vm_flags & VM_MAYSHARE))
2696
/*
2697
* For private mappings, no entry indicates
2698
* a reservation is present. Since we can
2699
* not add an entry, set hugetlb_restore_reserve
2700
* on the folio so reserve count will be
2701
* incremented when freed. This reserve will
2702
* be consumed on a subsequent allocation.
2703
*/
2704
folio_set_hugetlb_restore_reserve(folio);
2705
} else
2706
/*
2707
* No reservation present, do nothing
2708
*/
2709
vma_end_reservation(h, vma, address);
2710
}
2711
}
2712
2713
/*
2714
* alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2715
* the old one
2716
* @old_folio: Old folio to dissolve
2717
* @list: List to isolate the page in case we need to
2718
* Returns 0 on success, otherwise negated error.
2719
*/
2720
static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
2721
struct list_head *list)
2722
{
2723
gfp_t gfp_mask;
2724
struct hstate *h;
2725
int nid = folio_nid(old_folio);
2726
struct folio *new_folio = NULL;
2727
int ret = 0;
2728
2729
retry:
2730
/*
2731
* The old_folio might have been dissolved from under our feet, so make sure
2732
* to carefully check the state under the lock.
2733
*/
2734
spin_lock_irq(&hugetlb_lock);
2735
if (!folio_test_hugetlb(old_folio)) {
2736
/*
2737
* Freed from under us. Drop new_folio too.
2738
*/
2739
goto free_new;
2740
} else if (folio_ref_count(old_folio)) {
2741
bool isolated;
2742
2743
/*
2744
* Someone has grabbed the folio, try to isolate it here.
2745
* Fail with -EBUSY if not possible.
2746
*/
2747
spin_unlock_irq(&hugetlb_lock);
2748
isolated = folio_isolate_hugetlb(old_folio, list);
2749
ret = isolated ? 0 : -EBUSY;
2750
spin_lock_irq(&hugetlb_lock);
2751
goto free_new;
2752
} else if (!folio_test_hugetlb_freed(old_folio)) {
2753
/*
2754
* Folio's refcount is 0 but it has not been enqueued in the
2755
* freelist yet. Race window is small, so we can succeed here if
2756
* we retry.
2757
*/
2758
spin_unlock_irq(&hugetlb_lock);
2759
cond_resched();
2760
goto retry;
2761
} else {
2762
h = folio_hstate(old_folio);
2763
if (!new_folio) {
2764
spin_unlock_irq(&hugetlb_lock);
2765
gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2766
new_folio = alloc_fresh_hugetlb_folio(h, gfp_mask,
2767
nid, NULL);
2768
if (!new_folio)
2769
return -ENOMEM;
2770
goto retry;
2771
}
2772
2773
/*
2774
* Ok, old_folio is still a genuine free hugepage. Remove it from
2775
* the freelist and decrease the counters. These will be
2776
* incremented again when calling account_new_hugetlb_folio()
2777
* and enqueue_hugetlb_folio() for new_folio. The counters will
2778
* remain stable since this happens under the lock.
2779
*/
2780
remove_hugetlb_folio(h, old_folio, false);
2781
2782
/*
2783
* Ref count on new_folio is already zero as it was dropped
2784
* earlier. It can be directly added to the pool free list.
2785
*/
2786
account_new_hugetlb_folio(h, new_folio);
2787
enqueue_hugetlb_folio(h, new_folio);
2788
2789
/*
2790
* Folio has been replaced, we can safely free the old one.
2791
*/
2792
spin_unlock_irq(&hugetlb_lock);
2793
update_and_free_hugetlb_folio(h, old_folio, false);
2794
}
2795
2796
return ret;
2797
2798
free_new:
2799
spin_unlock_irq(&hugetlb_lock);
2800
if (new_folio)
2801
update_and_free_hugetlb_folio(h, new_folio, false);
2802
2803
return ret;
2804
}
2805
2806
int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
2807
{
2808
int ret = -EBUSY;
2809
2810
/* Not to disrupt normal path by vainly holding hugetlb_lock */
2811
if (!folio_test_hugetlb(folio))
2812
return 0;
2813
2814
/*
2815
* Fence off gigantic pages as there is a cyclic dependency between
2816
* alloc_contig_range and them. Return -ENOMEM as this has the effect
2817
* of bailing out right away without further retrying.
2818
*/
2819
if (order_is_gigantic(folio_order(folio)))
2820
return -ENOMEM;
2821
2822
if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
2823
ret = 0;
2824
else if (!folio_ref_count(folio))
2825
ret = alloc_and_dissolve_hugetlb_folio(folio, list);
2826
2827
return ret;
2828
}
2829
2830
/*
2831
* replace_free_hugepage_folios - Replace free hugepage folios in a given pfn
2832
* range with new folios.
2833
* @start_pfn: start pfn of the given pfn range
2834
* @end_pfn: end pfn of the given pfn range
2835
* Returns 0 on success, otherwise negated error.
2836
*/
2837
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
2838
{
2839
struct folio *folio;
2840
int ret = 0;
2841
2842
LIST_HEAD(isolate_list);
2843
2844
while (start_pfn < end_pfn) {
2845
folio = pfn_folio(start_pfn);
2846
2847
/* Not to disrupt normal path by vainly holding hugetlb_lock */
2848
if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
2849
ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list);
2850
if (ret)
2851
break;
2852
2853
putback_movable_pages(&isolate_list);
2854
}
2855
start_pfn++;
2856
}
2857
2858
return ret;
2859
}
2860
2861
void wait_for_freed_hugetlb_folios(void)
2862
{
2863
if (llist_empty(&hpage_freelist))
2864
return;
2865
2866
flush_work(&free_hpage_work);
2867
}
2868
2869
typedef enum {
2870
/*
2871
* For either 0/1: we checked the per-vma resv map, and one resv
2872
* count either can be reused (0), or an extra needed (1).
2873
*/
2874
MAP_CHG_REUSE = 0,
2875
MAP_CHG_NEEDED = 1,
2876
/*
2877
* Cannot use per-vma resv count can be used, hence a new resv
2878
* count is enforced.
2879
*
2880
* NOTE: This is mostly identical to MAP_CHG_NEEDED, except
2881
* that currently vma_needs_reservation() has an unwanted side
2882
* effect to either use end() or commit() to complete the
2883
* transaction. Hence it needs to differentiate from NEEDED.
2884
*/
2885
MAP_CHG_ENFORCED = 2,
2886
} map_chg_state;
2887
2888
/*
2889
* NOTE! "cow_from_owner" represents a very hacky usage only used in CoW
2890
* faults of hugetlb private mappings on top of a non-page-cache folio (in
2891
* which case even if there's a private vma resv map it won't cover such
2892
* allocation). New call sites should (probably) never set it to true!!
2893
* When it's set, the allocation will bypass all vma level reservations.
2894
*/
2895
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
2896
unsigned long addr, bool cow_from_owner)
2897
{
2898
struct hugepage_subpool *spool = subpool_vma(vma);
2899
struct hstate *h = hstate_vma(vma);
2900
struct folio *folio;
2901
long retval, gbl_chg, gbl_reserve;
2902
map_chg_state map_chg;
2903
int ret, idx;
2904
struct hugetlb_cgroup *h_cg = NULL;
2905
gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
2906
2907
idx = hstate_index(h);
2908
2909
/* Whether we need a separate per-vma reservation? */
2910
if (cow_from_owner) {
2911
/*
2912
* Special case! Since it's a CoW on top of a reserved
2913
* page, the private resv map doesn't count. So it cannot
2914
* consume the per-vma resv map even if it's reserved.
2915
*/
2916
map_chg = MAP_CHG_ENFORCED;
2917
} else {
2918
/*
2919
* Examine the region/reserve map to determine if the process
2920
* has a reservation for the page to be allocated. A return
2921
* code of zero indicates a reservation exists (no change).
2922
*/
2923
retval = vma_needs_reservation(h, vma, addr);
2924
if (retval < 0)
2925
return ERR_PTR(-ENOMEM);
2926
map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE;
2927
}
2928
2929
/*
2930
* Whether we need a separate global reservation?
2931
*
2932
* Processes that did not create the mapping will have no
2933
* reserves as indicated by the region/reserve map. Check
2934
* that the allocation will not exceed the subpool limit.
2935
* Or if it can get one from the pool reservation directly.
2936
*/
2937
if (map_chg) {
2938
gbl_chg = hugepage_subpool_get_pages(spool, 1);
2939
if (gbl_chg < 0)
2940
goto out_end_reservation;
2941
} else {
2942
/*
2943
* If we have the vma reservation ready, no need for extra
2944
* global reservation.
2945
*/
2946
gbl_chg = 0;
2947
}
2948
2949
/*
2950
* If this allocation is not consuming a per-vma reservation,
2951
* charge the hugetlb cgroup now.
2952
*/
2953
if (map_chg) {
2954
ret = hugetlb_cgroup_charge_cgroup_rsvd(
2955
idx, pages_per_huge_page(h), &h_cg);
2956
if (ret)
2957
goto out_subpool_put;
2958
}
2959
2960
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2961
if (ret)
2962
goto out_uncharge_cgroup_reservation;
2963
2964
spin_lock_irq(&hugetlb_lock);
2965
/*
2966
* glb_chg is passed to indicate whether or not a page must be taken
2967
* from the global free pool (global change). gbl_chg == 0 indicates
2968
* a reservation exists for the allocation.
2969
*/
2970
folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
2971
if (!folio) {
2972
spin_unlock_irq(&hugetlb_lock);
2973
folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
2974
if (!folio)
2975
goto out_uncharge_cgroup;
2976
spin_lock_irq(&hugetlb_lock);
2977
list_add(&folio->lru, &h->hugepage_activelist);
2978
folio_ref_unfreeze(folio, 1);
2979
/* Fall through */
2980
}
2981
2982
/*
2983
* Either dequeued or buddy-allocated folio needs to add special
2984
* mark to the folio when it consumes a global reservation.
2985
*/
2986
if (!gbl_chg) {
2987
folio_set_hugetlb_restore_reserve(folio);
2988
h->resv_huge_pages--;
2989
}
2990
2991
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
2992
/* If allocation is not consuming a reservation, also store the
2993
* hugetlb_cgroup pointer on the page.
2994
*/
2995
if (map_chg) {
2996
hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2997
h_cg, folio);
2998
}
2999
3000
spin_unlock_irq(&hugetlb_lock);
3001
3002
hugetlb_set_folio_subpool(folio, spool);
3003
3004
if (map_chg != MAP_CHG_ENFORCED) {
3005
/* commit() is only needed if the map_chg is not enforced */
3006
retval = vma_commit_reservation(h, vma, addr);
3007
/*
3008
* Check for possible race conditions. When it happens..
3009
* The page was added to the reservation map between
3010
* vma_needs_reservation and vma_commit_reservation.
3011
* This indicates a race with hugetlb_reserve_pages.
3012
* Adjust for the subpool count incremented above AND
3013
* in hugetlb_reserve_pages for the same page. Also,
3014
* the reservation count added in hugetlb_reserve_pages
3015
* no longer applies.
3016
*/
3017
if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) {
3018
long rsv_adjust;
3019
3020
rsv_adjust = hugepage_subpool_put_pages(spool, 1);
3021
hugetlb_acct_memory(h, -rsv_adjust);
3022
if (map_chg) {
3023
spin_lock_irq(&hugetlb_lock);
3024
hugetlb_cgroup_uncharge_folio_rsvd(
3025
hstate_index(h), pages_per_huge_page(h),
3026
folio);
3027
spin_unlock_irq(&hugetlb_lock);
3028
}
3029
}
3030
}
3031
3032
ret = mem_cgroup_charge_hugetlb(folio, gfp);
3033
/*
3034
* Unconditionally increment NR_HUGETLB here. If it turns out that
3035
* mem_cgroup_charge_hugetlb failed, then immediately free the page and
3036
* decrement NR_HUGETLB.
3037
*/
3038
lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h));
3039
3040
if (ret == -ENOMEM) {
3041
free_huge_folio(folio);
3042
return ERR_PTR(-ENOMEM);
3043
}
3044
3045
return folio;
3046
3047
out_uncharge_cgroup:
3048
hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
3049
out_uncharge_cgroup_reservation:
3050
if (map_chg)
3051
hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3052
h_cg);
3053
out_subpool_put:
3054
/*
3055
* put page to subpool iff the quota of subpool's rsv_hpages is used
3056
* during hugepage_subpool_get_pages.
3057
*/
3058
if (map_chg && !gbl_chg) {
3059
gbl_reserve = hugepage_subpool_put_pages(spool, 1);
3060
hugetlb_acct_memory(h, -gbl_reserve);
3061
}
3062
3063
3064
out_end_reservation:
3065
if (map_chg != MAP_CHG_ENFORCED)
3066
vma_end_reservation(h, vma, addr);
3067
return ERR_PTR(-ENOSPC);
3068
}
3069
3070
static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
3071
{
3072
struct huge_bootmem_page *m;
3073
int listnode = nid;
3074
3075
if (hugetlb_early_cma(h))
3076
m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact);
3077
else {
3078
if (node_exact)
3079
m = memblock_alloc_exact_nid_raw(huge_page_size(h),
3080
huge_page_size(h), 0,
3081
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3082
else {
3083
m = memblock_alloc_try_nid_raw(huge_page_size(h),
3084
huge_page_size(h), 0,
3085
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3086
/*
3087
* For pre-HVO to work correctly, pages need to be on
3088
* the list for the node they were actually allocated
3089
* from. That node may be different in the case of
3090
* fallback by memblock_alloc_try_nid_raw. So,
3091
* extract the actual node first.
3092
*/
3093
if (m)
3094
listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
3095
}
3096
3097
if (m) {
3098
m->flags = 0;
3099
m->cma = NULL;
3100
}
3101
}
3102
3103
if (m) {
3104
/*
3105
* Use the beginning of the huge page to store the
3106
* huge_bootmem_page struct (until gather_bootmem
3107
* puts them into the mem_map).
3108
*
3109
* Put them into a private list first because mem_map
3110
* is not up yet.
3111
*/
3112
INIT_LIST_HEAD(&m->list);
3113
list_add(&m->list, &huge_boot_pages[listnode]);
3114
m->hstate = h;
3115
}
3116
3117
return m;
3118
}
3119
3120
int alloc_bootmem_huge_page(struct hstate *h, int nid)
3121
__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
3122
int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3123
{
3124
struct huge_bootmem_page *m = NULL; /* initialize for clang */
3125
int nr_nodes, node = nid;
3126
3127
/* do node specific alloc */
3128
if (nid != NUMA_NO_NODE) {
3129
m = alloc_bootmem(h, node, true);
3130
if (!m)
3131
return 0;
3132
goto found;
3133
}
3134
3135
/* allocate from next node when distributing huge pages */
3136
for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node,
3137
&hugetlb_bootmem_nodes) {
3138
m = alloc_bootmem(h, node, false);
3139
if (!m)
3140
return 0;
3141
goto found;
3142
}
3143
3144
found:
3145
3146
/*
3147
* Only initialize the head struct page in memmap_init_reserved_pages,
3148
* rest of the struct pages will be initialized by the HugeTLB
3149
* subsystem itself.
3150
* The head struct page is used to get folio information by the HugeTLB
3151
* subsystem like zone id and node id.
3152
*/
3153
memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
3154
huge_page_size(h) - PAGE_SIZE);
3155
3156
return 1;
3157
}
3158
3159
/* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
3160
static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
3161
unsigned long start_page_number,
3162
unsigned long end_page_number)
3163
{
3164
enum zone_type zone = folio_zonenum(folio);
3165
int nid = folio_nid(folio);
3166
struct page *page = folio_page(folio, start_page_number);
3167
unsigned long head_pfn = folio_pfn(folio);
3168
unsigned long pfn, end_pfn = head_pfn + end_page_number;
3169
3170
/*
3171
* As we marked all tail pages with memblock_reserved_mark_noinit(),
3172
* we must initialize them ourselves here.
3173
*/
3174
for (pfn = head_pfn + start_page_number; pfn < end_pfn; page++, pfn++) {
3175
__init_single_page(page, pfn, zone, nid);
3176
prep_compound_tail((struct page *)folio, pfn - head_pfn);
3177
set_page_count(page, 0);
3178
}
3179
}
3180
3181
static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
3182
struct hstate *h,
3183
unsigned long nr_pages)
3184
{
3185
int ret;
3186
3187
/*
3188
* This is an open-coded prep_compound_page() whereby we avoid
3189
* walking pages twice by initializing/preparing+freezing them in the
3190
* same go.
3191
*/
3192
__folio_clear_reserved(folio);
3193
__folio_set_head(folio);
3194
ret = folio_ref_freeze(folio, 1);
3195
VM_BUG_ON(!ret);
3196
hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
3197
prep_compound_head(&folio->page, huge_page_order(h));
3198
}
3199
3200
static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
3201
{
3202
return m->flags & HUGE_BOOTMEM_HVO;
3203
}
3204
3205
static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m)
3206
{
3207
return m->flags & HUGE_BOOTMEM_CMA;
3208
}
3209
3210
/*
3211
* memblock-allocated pageblocks might not have the migrate type set
3212
* if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE)
3213
* here, or MIGRATE_CMA if this was a page allocated through an early CMA
3214
* reservation.
3215
*
3216
* In case of vmemmap optimized folios, the tail vmemmap pages are mapped
3217
* read-only, but that's ok - for sparse vmemmap this does not write to
3218
* the page structure.
3219
*/
3220
static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,
3221
struct hstate *h)
3222
{
3223
unsigned long nr_pages = pages_per_huge_page(h), i;
3224
3225
WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio)));
3226
3227
for (i = 0; i < nr_pages; i += pageblock_nr_pages) {
3228
if (folio_test_hugetlb_cma(folio))
3229
init_cma_pageblock(folio_page(folio, i));
3230
else
3231
init_pageblock_migratetype(folio_page(folio, i),
3232
MIGRATE_MOVABLE, false);
3233
}
3234
}
3235
3236
static void __init prep_and_add_bootmem_folios(struct hstate *h,
3237
struct list_head *folio_list)
3238
{
3239
unsigned long flags;
3240
struct folio *folio, *tmp_f;
3241
3242
/* Send list for bulk vmemmap optimization processing */
3243
hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list);
3244
3245
list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3246
if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3247
/*
3248
* If HVO fails, initialize all tail struct pages
3249
* We do not worry about potential long lock hold
3250
* time as this is early in boot and there should
3251
* be no contention.
3252
*/
3253
hugetlb_folio_init_tail_vmemmap(folio,
3254
HUGETLB_VMEMMAP_RESERVE_PAGES,
3255
pages_per_huge_page(h));
3256
}
3257
hugetlb_bootmem_init_migratetype(folio, h);
3258
/* Subdivide locks to achieve better parallel performance */
3259
spin_lock_irqsave(&hugetlb_lock, flags);
3260
account_new_hugetlb_folio(h, folio);
3261
enqueue_hugetlb_folio(h, folio);
3262
spin_unlock_irqrestore(&hugetlb_lock, flags);
3263
}
3264
}
3265
3266
bool __init hugetlb_bootmem_page_zones_valid(int nid,
3267
struct huge_bootmem_page *m)
3268
{
3269
unsigned long start_pfn;
3270
bool valid;
3271
3272
if (m->flags & HUGE_BOOTMEM_ZONES_VALID) {
3273
/*
3274
* Already validated, skip check.
3275
*/
3276
return true;
3277
}
3278
3279
if (hugetlb_bootmem_page_earlycma(m)) {
3280
valid = cma_validate_zones(m->cma);
3281
goto out;
3282
}
3283
3284
start_pfn = virt_to_phys(m) >> PAGE_SHIFT;
3285
3286
valid = !pfn_range_intersects_zones(nid, start_pfn,
3287
pages_per_huge_page(m->hstate));
3288
out:
3289
if (!valid)
3290
hstate_boot_nrinvalid[hstate_index(m->hstate)]++;
3291
3292
return valid;
3293
}
3294
3295
/*
3296
* Free a bootmem page that was found to be invalid (intersecting with
3297
* multiple zones).
3298
*
3299
* Since it intersects with multiple zones, we can't just do a free
3300
* operation on all pages at once, but instead have to walk all
3301
* pages, freeing them one by one.
3302
*/
3303
static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
3304
struct hstate *h)
3305
{
3306
unsigned long npages = pages_per_huge_page(h);
3307
unsigned long pfn;
3308
3309
while (npages--) {
3310
pfn = page_to_pfn(page);
3311
__init_page_from_nid(pfn, nid);
3312
free_reserved_page(page);
3313
page++;
3314
}
3315
}
3316
3317
/*
3318
* Put bootmem huge pages into the standard lists after mem_map is up.
3319
* Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
3320
*/
3321
static void __init gather_bootmem_prealloc_node(unsigned long nid)
3322
{
3323
LIST_HEAD(folio_list);
3324
struct huge_bootmem_page *m, *tm;
3325
struct hstate *h = NULL, *prev_h = NULL;
3326
3327
list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) {
3328
struct page *page = virt_to_page(m);
3329
struct folio *folio = (void *)page;
3330
3331
h = m->hstate;
3332
if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
3333
/*
3334
* Can't use this page. Initialize the
3335
* page structures if that hasn't already
3336
* been done, and give them to the page
3337
* allocator.
3338
*/
3339
hugetlb_bootmem_free_invalid_page(nid, page, h);
3340
continue;
3341
}
3342
3343
/*
3344
* It is possible to have multiple huge page sizes (hstates)
3345
* in this list. If so, process each size separately.
3346
*/
3347
if (h != prev_h && prev_h != NULL)
3348
prep_and_add_bootmem_folios(prev_h, &folio_list);
3349
prev_h = h;
3350
3351
VM_BUG_ON(!hstate_is_gigantic(h));
3352
WARN_ON(folio_ref_count(folio) != 1);
3353
3354
hugetlb_folio_init_vmemmap(folio, h,
3355
HUGETLB_VMEMMAP_RESERVE_PAGES);
3356
init_new_hugetlb_folio(folio);
3357
3358
if (hugetlb_bootmem_page_prehvo(m))
3359
/*
3360
* If pre-HVO was done, just set the
3361
* flag, the HVO code will then skip
3362
* this folio.
3363
*/
3364
folio_set_hugetlb_vmemmap_optimized(folio);
3365
3366
if (hugetlb_bootmem_page_earlycma(m))
3367
folio_set_hugetlb_cma(folio);
3368
3369
list_add(&folio->lru, &folio_list);
3370
3371
/*
3372
* We need to restore the 'stolen' pages to totalram_pages
3373
* in order to fix confusing memory reports from free(1) and
3374
* other side-effects, like CommitLimit going negative.
3375
*
3376
* For CMA pages, this is done in init_cma_pageblock
3377
* (via hugetlb_bootmem_init_migratetype), so skip it here.
3378
*/
3379
if (!folio_test_hugetlb_cma(folio))
3380
adjust_managed_page_count(page, pages_per_huge_page(h));
3381
cond_resched();
3382
}
3383
3384
prep_and_add_bootmem_folios(h, &folio_list);
3385
}
3386
3387
static void __init gather_bootmem_prealloc_parallel(unsigned long start,
3388
unsigned long end, void *arg)
3389
{
3390
int nid;
3391
3392
for (nid = start; nid < end; nid++)
3393
gather_bootmem_prealloc_node(nid);
3394
}
3395
3396
static void __init gather_bootmem_prealloc(void)
3397
{
3398
struct padata_mt_job job = {
3399
.thread_fn = gather_bootmem_prealloc_parallel,
3400
.fn_arg = NULL,
3401
.start = 0,
3402
.size = nr_node_ids,
3403
.align = 1,
3404
.min_chunk = 1,
3405
.max_threads = num_node_state(N_MEMORY),
3406
.numa_aware = true,
3407
};
3408
3409
padata_do_multithreaded(&job);
3410
}
3411
3412
static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3413
{
3414
unsigned long i;
3415
char buf[32];
3416
LIST_HEAD(folio_list);
3417
3418
for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3419
if (hstate_is_gigantic(h)) {
3420
if (!alloc_bootmem_huge_page(h, nid))
3421
break;
3422
} else {
3423
struct folio *folio;
3424
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3425
3426
folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3427
&node_states[N_MEMORY], NULL);
3428
if (!folio)
3429
break;
3430
list_add(&folio->lru, &folio_list);
3431
}
3432
cond_resched();
3433
}
3434
3435
if (!list_empty(&folio_list))
3436
prep_and_add_allocated_folios(h, &folio_list);
3437
3438
if (i == h->max_huge_pages_node[nid])
3439
return;
3440
3441
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3442
pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3443
h->max_huge_pages_node[nid], buf, nid, i);
3444
h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3445
h->max_huge_pages_node[nid] = i;
3446
}
3447
3448
static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h)
3449
{
3450
int i;
3451
bool node_specific_alloc = false;
3452
3453
for_each_online_node(i) {
3454
if (h->max_huge_pages_node[i] > 0) {
3455
hugetlb_hstate_alloc_pages_onenode(h, i);
3456
node_specific_alloc = true;
3457
}
3458
}
3459
3460
return node_specific_alloc;
3461
}
3462
3463
static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h)
3464
{
3465
if (allocated < h->max_huge_pages) {
3466
char buf[32];
3467
3468
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3469
pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
3470
h->max_huge_pages, buf, allocated);
3471
h->max_huge_pages = allocated;
3472
}
3473
}
3474
3475
static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg)
3476
{
3477
struct hstate *h = (struct hstate *)arg;
3478
int i, num = end - start;
3479
nodemask_t node_alloc_noretry;
3480
LIST_HEAD(folio_list);
3481
int next_node = first_online_node;
3482
3483
/* Bit mask controlling how hard we retry per-node allocations.*/
3484
nodes_clear(node_alloc_noretry);
3485
3486
for (i = 0; i < num; ++i) {
3487
struct folio *folio;
3488
3489
if (hugetlb_vmemmap_optimizable_size(h) &&
3490
(si_mem_available() == 0) && !list_empty(&folio_list)) {
3491
prep_and_add_allocated_folios(h, &folio_list);
3492
INIT_LIST_HEAD(&folio_list);
3493
}
3494
folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
3495
&node_alloc_noretry, &next_node);
3496
if (!folio)
3497
break;
3498
3499
list_move(&folio->lru, &folio_list);
3500
cond_resched();
3501
}
3502
3503
prep_and_add_allocated_folios(h, &folio_list);
3504
}
3505
3506
static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
3507
{
3508
unsigned long i;
3509
3510
for (i = 0; i < h->max_huge_pages; ++i) {
3511
if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3512
break;
3513
cond_resched();
3514
}
3515
3516
return i;
3517
}
3518
3519
static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
3520
{
3521
struct padata_mt_job job = {
3522
.fn_arg = h,
3523
.align = 1,
3524
.numa_aware = true
3525
};
3526
3527
unsigned long jiffies_start;
3528
unsigned long jiffies_end;
3529
unsigned long remaining;
3530
3531
job.thread_fn = hugetlb_pages_alloc_boot_node;
3532
3533
/*
3534
* job.max_threads is 25% of the available cpu threads by default.
3535
*
3536
* On large servers with terabytes of memory, huge page allocation
3537
* can consume a considerably amount of time.
3538
*
3539
* Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages.
3540
* 2MiB huge pages. Using more threads can significantly improve allocation time.
3541
*
3542
* +-----------------------+-------+-------+-------+-------+-------+
3543
* | threads | 8 | 16 | 32 | 64 | 128 |
3544
* +-----------------------+-------+-------+-------+-------+-------+
3545
* | skylake 144 cpus | 44s | 22s | 16s | 19s | 20s |
3546
* | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s |
3547
* +-----------------------+-------+-------+-------+-------+-------+
3548
*/
3549
if (hugepage_allocation_threads == 0) {
3550
hugepage_allocation_threads = num_online_cpus() / 4;
3551
hugepage_allocation_threads = max(hugepage_allocation_threads, 1);
3552
}
3553
3554
job.max_threads = hugepage_allocation_threads;
3555
3556
jiffies_start = jiffies;
3557
do {
3558
remaining = h->max_huge_pages - h->nr_huge_pages;
3559
3560
job.start = h->nr_huge_pages;
3561
job.size = remaining;
3562
job.min_chunk = remaining / hugepage_allocation_threads;
3563
padata_do_multithreaded(&job);
3564
3565
if (h->nr_huge_pages == h->max_huge_pages)
3566
break;
3567
3568
/*
3569
* Retry only if the vmemmap optimization might have been able to free
3570
* some memory back to the system.
3571
*/
3572
if (!hugetlb_vmemmap_optimizable(h))
3573
break;
3574
3575
/* Continue if progress was made in last iteration */
3576
} while (remaining != (h->max_huge_pages - h->nr_huge_pages));
3577
3578
jiffies_end = jiffies;
3579
3580
pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n",
3581
jiffies_to_msecs(jiffies_end - jiffies_start),
3582
hugepage_allocation_threads);
3583
3584
return h->nr_huge_pages;
3585
}
3586
3587
/*
3588
* NOTE: this routine is called in different contexts for gigantic and
3589
* non-gigantic pages.
3590
* - For gigantic pages, this is called early in the boot process and
3591
* pages are allocated from memblock allocated or something similar.
3592
* Gigantic pages are actually added to pools later with the routine
3593
* gather_bootmem_prealloc.
3594
* - For non-gigantic pages, this is called later in the boot process after
3595
* all of mm is up and functional. Pages are allocated from buddy and
3596
* then added to hugetlb pools.
3597
*/
3598
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3599
{
3600
unsigned long allocated;
3601
3602
/*
3603
* Skip gigantic hugepages allocation if early CMA
3604
* reservations are not available.
3605
*/
3606
if (hstate_is_gigantic(h) && hugetlb_cma_total_size() &&
3607
!hugetlb_early_cma(h)) {
3608
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3609
return;
3610
}
3611
3612
if (!h->max_huge_pages)
3613
return;
3614
3615
/* do node specific alloc */
3616
if (hugetlb_hstate_alloc_pages_specific_nodes(h))
3617
return;
3618
3619
/* below will do all node balanced alloc */
3620
if (hstate_is_gigantic(h))
3621
allocated = hugetlb_gigantic_pages_alloc_boot(h);
3622
else
3623
allocated = hugetlb_pages_alloc_boot(h);
3624
3625
hugetlb_hstate_alloc_pages_errcheck(allocated, h);
3626
}
3627
3628
static void __init hugetlb_init_hstates(void)
3629
{
3630
struct hstate *h, *h2;
3631
3632
for_each_hstate(h) {
3633
/*
3634
* Always reset to first_memory_node here, even if
3635
* next_nid_to_alloc was set before - we can't
3636
* reference hugetlb_bootmem_nodes after init, and
3637
* first_memory_node is right for all further allocations.
3638
*/
3639
h->next_nid_to_alloc = first_memory_node;
3640
h->next_nid_to_free = first_memory_node;
3641
3642
/* oversize hugepages were init'ed in early boot */
3643
if (!hstate_is_gigantic(h))
3644
hugetlb_hstate_alloc_pages(h);
3645
3646
/*
3647
* Set demote order for each hstate. Note that
3648
* h->demote_order is initially 0.
3649
* - We can not demote gigantic pages if runtime freeing
3650
* is not supported, so skip this.
3651
* - If CMA allocation is possible, we can not demote
3652
* HUGETLB_PAGE_ORDER or smaller size pages.
3653
*/
3654
if (hstate_is_gigantic_no_runtime(h))
3655
continue;
3656
if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER)
3657
continue;
3658
for_each_hstate(h2) {
3659
if (h2 == h)
3660
continue;
3661
if (h2->order < h->order &&
3662
h2->order > h->demote_order)
3663
h->demote_order = h2->order;
3664
}
3665
}
3666
}
3667
3668
static void __init report_hugepages(void)
3669
{
3670
struct hstate *h;
3671
unsigned long nrinvalid;
3672
3673
for_each_hstate(h) {
3674
char buf[32];
3675
3676
nrinvalid = hstate_boot_nrinvalid[hstate_index(h)];
3677
h->max_huge_pages -= nrinvalid;
3678
3679
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3680
pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3681
buf, h->nr_huge_pages);
3682
if (nrinvalid)
3683
pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n",
3684
buf, nrinvalid, str_plural(nrinvalid));
3685
pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3686
hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3687
}
3688
}
3689
3690
#ifdef CONFIG_HIGHMEM
3691
static void try_to_free_low(struct hstate *h, unsigned long count,
3692
nodemask_t *nodes_allowed)
3693
{
3694
int i;
3695
LIST_HEAD(page_list);
3696
3697
lockdep_assert_held(&hugetlb_lock);
3698
if (hstate_is_gigantic(h))
3699
return;
3700
3701
/*
3702
* Collect pages to be freed on a list, and free after dropping lock
3703
*/
3704
for_each_node_mask(i, *nodes_allowed) {
3705
struct folio *folio, *next;
3706
struct list_head *freel = &h->hugepage_freelists[i];
3707
list_for_each_entry_safe(folio, next, freel, lru) {
3708
if (count >= h->nr_huge_pages)
3709
goto out;
3710
if (folio_test_highmem(folio))
3711
continue;
3712
remove_hugetlb_folio(h, folio, false);
3713
list_add(&folio->lru, &page_list);
3714
}
3715
}
3716
3717
out:
3718
spin_unlock_irq(&hugetlb_lock);
3719
update_and_free_pages_bulk(h, &page_list);
3720
spin_lock_irq(&hugetlb_lock);
3721
}
3722
#else
3723
static inline void try_to_free_low(struct hstate *h, unsigned long count,
3724
nodemask_t *nodes_allowed)
3725
{
3726
}
3727
#endif
3728
3729
/*
3730
* Increment or decrement surplus_huge_pages. Keep node-specific counters
3731
* balanced by operating on them in a round-robin fashion.
3732
* Returns 1 if an adjustment was made.
3733
*/
3734
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3735
int delta)
3736
{
3737
int nr_nodes, node;
3738
3739
lockdep_assert_held(&hugetlb_lock);
3740
VM_BUG_ON(delta != -1 && delta != 1);
3741
3742
if (delta < 0) {
3743
for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) {
3744
if (h->surplus_huge_pages_node[node])
3745
goto found;
3746
}
3747
} else {
3748
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3749
if (h->surplus_huge_pages_node[node] <
3750
h->nr_huge_pages_node[node])
3751
goto found;
3752
}
3753
}
3754
return 0;
3755
3756
found:
3757
h->surplus_huge_pages += delta;
3758
h->surplus_huge_pages_node[node] += delta;
3759
return 1;
3760
}
3761
3762
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3763
static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3764
nodemask_t *nodes_allowed)
3765
{
3766
unsigned long persistent_free_count;
3767
unsigned long min_count;
3768
unsigned long allocated;
3769
struct folio *folio;
3770
LIST_HEAD(page_list);
3771
NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3772
3773
/*
3774
* Bit mask controlling how hard we retry per-node allocations.
3775
* If we can not allocate the bit mask, do not attempt to allocate
3776
* the requested huge pages.
3777
*/
3778
if (node_alloc_noretry)
3779
nodes_clear(*node_alloc_noretry);
3780
else
3781
return -ENOMEM;
3782
3783
/*
3784
* resize_lock mutex prevents concurrent adjustments to number of
3785
* pages in hstate via the proc/sysfs interfaces.
3786
*/
3787
mutex_lock(&h->resize_lock);
3788
flush_free_hpage_work(h);
3789
spin_lock_irq(&hugetlb_lock);
3790
3791
/*
3792
* Check for a node specific request.
3793
* Changing node specific huge page count may require a corresponding
3794
* change to the global count. In any case, the passed node mask
3795
* (nodes_allowed) will restrict alloc/free to the specified node.
3796
*/
3797
if (nid != NUMA_NO_NODE) {
3798
unsigned long old_count = count;
3799
3800
count += persistent_huge_pages(h) -
3801
(h->nr_huge_pages_node[nid] -
3802
h->surplus_huge_pages_node[nid]);
3803
/*
3804
* User may have specified a large count value which caused the
3805
* above calculation to overflow. In this case, they wanted
3806
* to allocate as many huge pages as possible. Set count to
3807
* largest possible value to align with their intention.
3808
*/
3809
if (count < old_count)
3810
count = ULONG_MAX;
3811
}
3812
3813
/*
3814
* Gigantic pages runtime allocation depend on the capability for large
3815
* page range allocation.
3816
* If the system does not provide this feature, return an error when
3817
* the user tries to allocate gigantic pages but let the user free the
3818
* boottime allocated gigantic pages.
3819
*/
3820
if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3821
if (count > persistent_huge_pages(h)) {
3822
spin_unlock_irq(&hugetlb_lock);
3823
mutex_unlock(&h->resize_lock);
3824
NODEMASK_FREE(node_alloc_noretry);
3825
return -EINVAL;
3826
}
3827
/* Fall through to decrease pool */
3828
}
3829
3830
/*
3831
* Increase the pool size
3832
* First take pages out of surplus state. Then make up the
3833
* remaining difference by allocating fresh huge pages.
3834
*
3835
* We might race with alloc_surplus_hugetlb_folio() here and be unable
3836
* to convert a surplus huge page to a normal huge page. That is
3837
* not critical, though, it just means the overall size of the
3838
* pool might be one hugepage larger than it needs to be, but
3839
* within all the constraints specified by the sysctls.
3840
*/
3841
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3842
if (!adjust_pool_surplus(h, nodes_allowed, -1))
3843
break;
3844
}
3845
3846
allocated = 0;
3847
while (count > (persistent_huge_pages(h) + allocated)) {
3848
/*
3849
* If this allocation races such that we no longer need the
3850
* page, free_huge_folio will handle it by freeing the page
3851
* and reducing the surplus.
3852
*/
3853
spin_unlock_irq(&hugetlb_lock);
3854
3855
/* yield cpu to avoid soft lockup */
3856
cond_resched();
3857
3858
folio = alloc_pool_huge_folio(h, nodes_allowed,
3859
node_alloc_noretry,
3860
&h->next_nid_to_alloc);
3861
if (!folio) {
3862
prep_and_add_allocated_folios(h, &page_list);
3863
spin_lock_irq(&hugetlb_lock);
3864
goto out;
3865
}
3866
3867
list_add(&folio->lru, &page_list);
3868
allocated++;
3869
3870
/* Bail for signals. Probably ctrl-c from user */
3871
if (signal_pending(current)) {
3872
prep_and_add_allocated_folios(h, &page_list);
3873
spin_lock_irq(&hugetlb_lock);
3874
goto out;
3875
}
3876
3877
spin_lock_irq(&hugetlb_lock);
3878
}
3879
3880
/* Add allocated pages to the pool */
3881
if (!list_empty(&page_list)) {
3882
spin_unlock_irq(&hugetlb_lock);
3883
prep_and_add_allocated_folios(h, &page_list);
3884
spin_lock_irq(&hugetlb_lock);
3885
}
3886
3887
/*
3888
* Decrease the pool size
3889
* First return free pages to the buddy allocator (being careful
3890
* to keep enough around to satisfy reservations). Then place
3891
* pages into surplus state as needed so the pool will shrink
3892
* to the desired size as pages become free.
3893
*
3894
* By placing pages into the surplus state independent of the
3895
* overcommit value, we are allowing the surplus pool size to
3896
* exceed overcommit. There are few sane options here. Since
3897
* alloc_surplus_hugetlb_folio() is checking the global counter,
3898
* though, we'll note that we're not allowed to exceed surplus
3899
* and won't grow the pool anywhere else. Not until one of the
3900
* sysctls are changed, or the surplus pages go out of use.
3901
*
3902
* min_count is the expected number of persistent pages, we
3903
* shouldn't calculate min_count by using
3904
* resv_huge_pages + persistent_huge_pages() - free_huge_pages,
3905
* because there may exist free surplus huge pages, and this will
3906
* lead to subtracting twice. Free surplus huge pages come from HVO
3907
* failing to restore vmemmap, see comments in the callers of
3908
* hugetlb_vmemmap_restore_folio(). Thus, we should calculate
3909
* persistent free count first.
3910
*/
3911
persistent_free_count = h->free_huge_pages;
3912
if (h->free_huge_pages > persistent_huge_pages(h)) {
3913
if (h->free_huge_pages > h->surplus_huge_pages)
3914
persistent_free_count -= h->surplus_huge_pages;
3915
else
3916
persistent_free_count = 0;
3917
}
3918
min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count;
3919
min_count = max(count, min_count);
3920
try_to_free_low(h, min_count, nodes_allowed);
3921
3922
/*
3923
* Collect pages to be removed on list without dropping lock
3924
*/
3925
while (min_count < persistent_huge_pages(h)) {
3926
folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
3927
if (!folio)
3928
break;
3929
3930
list_add(&folio->lru, &page_list);
3931
}
3932
/* free the pages after dropping lock */
3933
spin_unlock_irq(&hugetlb_lock);
3934
update_and_free_pages_bulk(h, &page_list);
3935
flush_free_hpage_work(h);
3936
spin_lock_irq(&hugetlb_lock);
3937
3938
while (count < persistent_huge_pages(h)) {
3939
if (!adjust_pool_surplus(h, nodes_allowed, 1))
3940
break;
3941
}
3942
out:
3943
h->max_huge_pages = persistent_huge_pages(h);
3944
spin_unlock_irq(&hugetlb_lock);
3945
mutex_unlock(&h->resize_lock);
3946
3947
NODEMASK_FREE(node_alloc_noretry);
3948
3949
return 0;
3950
}
3951
3952
static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
3953
struct list_head *src_list)
3954
{
3955
long rc;
3956
struct folio *folio, *next;
3957
LIST_HEAD(dst_list);
3958
LIST_HEAD(ret_list);
3959
3960
rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list);
3961
list_splice_init(&ret_list, src_list);
3962
3963
/*
3964
* Taking target hstate mutex synchronizes with set_max_huge_pages.
3965
* Without the mutex, pages added to target hstate could be marked
3966
* as surplus.
3967
*
3968
* Note that we already hold src->resize_lock. To prevent deadlock,
3969
* use the convention of always taking larger size hstate mutex first.
3970
*/
3971
mutex_lock(&dst->resize_lock);
3972
3973
list_for_each_entry_safe(folio, next, src_list, lru) {
3974
int i;
3975
bool cma;
3976
3977
if (folio_test_hugetlb_vmemmap_optimized(folio))
3978
continue;
3979
3980
cma = folio_test_hugetlb_cma(folio);
3981
3982
list_del(&folio->lru);
3983
3984
split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst));
3985
pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst));
3986
3987
for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
3988
struct page *page = folio_page(folio, i);
3989
/* Careful: see __split_huge_page_tail() */
3990
struct folio *new_folio = (struct folio *)page;
3991
3992
clear_compound_head(page);
3993
prep_compound_page(page, dst->order);
3994
3995
new_folio->mapping = NULL;
3996
init_new_hugetlb_folio(new_folio);
3997
/* Copy the CMA flag so that it is freed correctly */
3998
if (cma)
3999
folio_set_hugetlb_cma(new_folio);
4000
list_add(&new_folio->lru, &dst_list);
4001
}
4002
}
4003
4004
prep_and_add_allocated_folios(dst, &dst_list);
4005
4006
mutex_unlock(&dst->resize_lock);
4007
4008
return rc;
4009
}
4010
4011
long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed,
4012
unsigned long nr_to_demote)
4013
__must_hold(&hugetlb_lock)
4014
{
4015
int nr_nodes, node;
4016
struct hstate *dst;
4017
long rc = 0;
4018
long nr_demoted = 0;
4019
4020
lockdep_assert_held(&hugetlb_lock);
4021
4022
/* We should never get here if no demote order */
4023
if (!src->demote_order) {
4024
pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
4025
return -EINVAL; /* internal error */
4026
}
4027
dst = size_to_hstate(PAGE_SIZE << src->demote_order);
4028
4029
for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) {
4030
LIST_HEAD(list);
4031
struct folio *folio, *next;
4032
4033
list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) {
4034
if (folio_test_hwpoison(folio))
4035
continue;
4036
4037
remove_hugetlb_folio(src, folio, false);
4038
list_add(&folio->lru, &list);
4039
4040
if (++nr_demoted == nr_to_demote)
4041
break;
4042
}
4043
4044
spin_unlock_irq(&hugetlb_lock);
4045
4046
rc = demote_free_hugetlb_folios(src, dst, &list);
4047
4048
spin_lock_irq(&hugetlb_lock);
4049
4050
list_for_each_entry_safe(folio, next, &list, lru) {
4051
list_del(&folio->lru);
4052
add_hugetlb_folio(src, folio, false);
4053
4054
nr_demoted--;
4055
}
4056
4057
if (rc < 0 || nr_demoted == nr_to_demote)
4058
break;
4059
}
4060
4061
/*
4062
* Not absolutely necessary, but for consistency update max_huge_pages
4063
* based on pool changes for the demoted page.
4064
*/
4065
src->max_huge_pages -= nr_demoted;
4066
dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst));
4067
4068
if (rc < 0)
4069
return rc;
4070
4071
if (nr_demoted)
4072
return nr_demoted;
4073
/*
4074
* Only way to get here is if all pages on free lists are poisoned.
4075
* Return -EBUSY so that caller will not retry.
4076
*/
4077
return -EBUSY;
4078
}
4079
4080
ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
4081
struct hstate *h, int nid,
4082
unsigned long count, size_t len)
4083
{
4084
int err;
4085
nodemask_t nodes_allowed, *n_mask;
4086
4087
if (hstate_is_gigantic_no_runtime(h))
4088
return -EINVAL;
4089
4090
if (nid == NUMA_NO_NODE) {
4091
/*
4092
* global hstate attribute
4093
*/
4094
if (!(obey_mempolicy &&
4095
init_nodemask_of_mempolicy(&nodes_allowed)))
4096
n_mask = &node_states[N_MEMORY];
4097
else
4098
n_mask = &nodes_allowed;
4099
} else {
4100
/*
4101
* Node specific request. count adjustment happens in
4102
* set_max_huge_pages() after acquiring hugetlb_lock.
4103
*/
4104
init_nodemask_of_node(&nodes_allowed, nid);
4105
n_mask = &nodes_allowed;
4106
}
4107
4108
err = set_max_huge_pages(h, count, nid, n_mask);
4109
4110
return err ? err : len;
4111
}
4112
4113
static int __init hugetlb_init(void)
4114
{
4115
int i;
4116
4117
BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4118
__NR_HPAGEFLAGS);
4119
BUILD_BUG_ON_INVALID(HUGETLB_PAGE_ORDER > MAX_FOLIO_ORDER);
4120
4121
if (!hugepages_supported()) {
4122
if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4123
pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4124
return 0;
4125
}
4126
4127
/*
4128
* Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
4129
* architectures depend on setup being done here.
4130
*/
4131
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4132
if (!parsed_default_hugepagesz) {
4133
/*
4134
* If we did not parse a default huge page size, set
4135
* default_hstate_idx to HPAGE_SIZE hstate. And, if the
4136
* number of huge pages for this default size was implicitly
4137
* specified, set that here as well.
4138
* Note that the implicit setting will overwrite an explicit
4139
* setting. A warning will be printed in this case.
4140
*/
4141
default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4142
if (default_hstate_max_huge_pages) {
4143
if (default_hstate.max_huge_pages) {
4144
char buf[32];
4145
4146
string_get_size(huge_page_size(&default_hstate),
4147
1, STRING_UNITS_2, buf, 32);
4148
pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4149
default_hstate.max_huge_pages, buf);
4150
pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4151
default_hstate_max_huge_pages);
4152
}
4153
default_hstate.max_huge_pages =
4154
default_hstate_max_huge_pages;
4155
4156
for_each_online_node(i)
4157
default_hstate.max_huge_pages_node[i] =
4158
default_hugepages_in_node[i];
4159
}
4160
}
4161
4162
hugetlb_cma_check();
4163
hugetlb_init_hstates();
4164
gather_bootmem_prealloc();
4165
report_hugepages();
4166
4167
hugetlb_sysfs_init();
4168
hugetlb_cgroup_file_init();
4169
hugetlb_sysctl_init();
4170
4171
#ifdef CONFIG_SMP
4172
num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4173
#else
4174
num_fault_mutexes = 1;
4175
#endif
4176
hugetlb_fault_mutex_table =
4177
kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4178
GFP_KERNEL);
4179
BUG_ON(!hugetlb_fault_mutex_table);
4180
4181
for (i = 0; i < num_fault_mutexes; i++)
4182
mutex_init(&hugetlb_fault_mutex_table[i]);
4183
return 0;
4184
}
4185
subsys_initcall(hugetlb_init);
4186
4187
/* Overwritten by architectures with more huge page sizes */
4188
bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4189
{
4190
return size == HPAGE_SIZE;
4191
}
4192
4193
void __init hugetlb_add_hstate(unsigned int order)
4194
{
4195
struct hstate *h;
4196
unsigned long i;
4197
4198
if (size_to_hstate(PAGE_SIZE << order)) {
4199
return;
4200
}
4201
BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4202
BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
4203
WARN_ON(order > MAX_FOLIO_ORDER);
4204
h = &hstates[hugetlb_max_hstate++];
4205
__mutex_init(&h->resize_lock, "resize mutex", &h->resize_key);
4206
h->order = order;
4207
h->mask = ~(huge_page_size(h) - 1);
4208
for (i = 0; i < MAX_NUMNODES; ++i)
4209
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4210
INIT_LIST_HEAD(&h->hugepage_activelist);
4211
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4212
huge_page_size(h)/SZ_1K);
4213
4214
parsed_hstate = h;
4215
}
4216
4217
bool __init __weak hugetlb_node_alloc_supported(void)
4218
{
4219
return true;
4220
}
4221
4222
static void __init hugepages_clear_pages_in_node(void)
4223
{
4224
if (!hugetlb_max_hstate) {
4225
default_hstate_max_huge_pages = 0;
4226
memset(default_hugepages_in_node, 0,
4227
sizeof(default_hugepages_in_node));
4228
} else {
4229
parsed_hstate->max_huge_pages = 0;
4230
memset(parsed_hstate->max_huge_pages_node, 0,
4231
sizeof(parsed_hstate->max_huge_pages_node));
4232
}
4233
}
4234
4235
static __init int hugetlb_add_param(char *s, int (*setup)(char *))
4236
{
4237
size_t len;
4238
char *p;
4239
4240
if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS)
4241
return -EINVAL;
4242
4243
len = strlen(s) + 1;
4244
if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf))
4245
return -EINVAL;
4246
4247
p = &hstate_cmdline_buf[hstate_cmdline_index];
4248
memcpy(p, s, len);
4249
hstate_cmdline_index += len;
4250
4251
hugetlb_params[hugetlb_param_index].val = p;
4252
hugetlb_params[hugetlb_param_index].setup = setup;
4253
4254
hugetlb_param_index++;
4255
4256
return 0;
4257
}
4258
4259
static __init void hugetlb_parse_params(void)
4260
{
4261
int i;
4262
struct hugetlb_cmdline *hcp;
4263
4264
for (i = 0; i < hugetlb_param_index; i++) {
4265
hcp = &hugetlb_params[i];
4266
4267
hcp->setup(hcp->val);
4268
}
4269
4270
hugetlb_cma_validate_params();
4271
}
4272
4273
/*
4274
* hugepages command line processing
4275
* hugepages normally follows a valid hugepagsz or default_hugepagsz
4276
* specification. If not, ignore the hugepages value. hugepages can also
4277
* be the first huge page command line option in which case it implicitly
4278
* specifies the number of huge pages for the default size.
4279
*/
4280
static int __init hugepages_setup(char *s)
4281
{
4282
unsigned long *mhp;
4283
static unsigned long *last_mhp;
4284
int node = NUMA_NO_NODE;
4285
int count;
4286
unsigned long tmp;
4287
char *p = s;
4288
4289
if (!hugepages_supported()) {
4290
pr_warn("HugeTLB: hugepages unsupported, ignoring hugepages=%s cmdline\n", s);
4291
return 0;
4292
}
4293
4294
if (!parsed_valid_hugepagesz) {
4295
pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4296
parsed_valid_hugepagesz = true;
4297
return -EINVAL;
4298
}
4299
4300
/*
4301
* !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4302
* yet, so this hugepages= parameter goes to the "default hstate".
4303
* Otherwise, it goes with the previously parsed hugepagesz or
4304
* default_hugepagesz.
4305
*/
4306
else if (!hugetlb_max_hstate)
4307
mhp = &default_hstate_max_huge_pages;
4308
else
4309
mhp = &parsed_hstate->max_huge_pages;
4310
4311
if (mhp == last_mhp) {
4312
pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4313
return 1;
4314
}
4315
4316
while (*p) {
4317
count = 0;
4318
if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4319
goto invalid;
4320
/* Parameter is node format */
4321
if (p[count] == ':') {
4322
if (!hugetlb_node_alloc_supported()) {
4323
pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4324
return 1;
4325
}
4326
if (tmp >= MAX_NUMNODES || !node_online(tmp))
4327
goto invalid;
4328
node = array_index_nospec(tmp, MAX_NUMNODES);
4329
p += count + 1;
4330
/* Parse hugepages */
4331
if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4332
goto invalid;
4333
if (!hugetlb_max_hstate)
4334
default_hugepages_in_node[node] = tmp;
4335
else
4336
parsed_hstate->max_huge_pages_node[node] = tmp;
4337
*mhp += tmp;
4338
/* Go to parse next node*/
4339
if (p[count] == ',')
4340
p += count + 1;
4341
else
4342
break;
4343
} else {
4344
if (p != s)
4345
goto invalid;
4346
*mhp = tmp;
4347
break;
4348
}
4349
}
4350
4351
last_mhp = mhp;
4352
4353
return 0;
4354
4355
invalid:
4356
pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4357
hugepages_clear_pages_in_node();
4358
return -EINVAL;
4359
}
4360
hugetlb_early_param("hugepages", hugepages_setup);
4361
4362
/*
4363
* hugepagesz command line processing
4364
* A specific huge page size can only be specified once with hugepagesz.
4365
* hugepagesz is followed by hugepages on the command line. The global
4366
* variable 'parsed_valid_hugepagesz' is used to determine if prior
4367
* hugepagesz argument was valid.
4368
*/
4369
static int __init hugepagesz_setup(char *s)
4370
{
4371
unsigned long size;
4372
struct hstate *h;
4373
4374
if (!hugepages_supported()) {
4375
pr_warn("HugeTLB: hugepages unsupported, ignoring hugepagesz=%s cmdline\n", s);
4376
return 0;
4377
}
4378
4379
parsed_valid_hugepagesz = false;
4380
size = (unsigned long)memparse(s, NULL);
4381
4382
if (!arch_hugetlb_valid_size(size)) {
4383
pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4384
return -EINVAL;
4385
}
4386
4387
h = size_to_hstate(size);
4388
if (h) {
4389
/*
4390
* hstate for this size already exists. This is normally
4391
* an error, but is allowed if the existing hstate is the
4392
* default hstate. More specifically, it is only allowed if
4393
* the number of huge pages for the default hstate was not
4394
* previously specified.
4395
*/
4396
if (!parsed_default_hugepagesz || h != &default_hstate ||
4397
default_hstate.max_huge_pages) {
4398
pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4399
return -EINVAL;
4400
}
4401
4402
/*
4403
* No need to call hugetlb_add_hstate() as hstate already
4404
* exists. But, do set parsed_hstate so that a following
4405
* hugepages= parameter will be applied to this hstate.
4406
*/
4407
parsed_hstate = h;
4408
parsed_valid_hugepagesz = true;
4409
return 0;
4410
}
4411
4412
hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4413
parsed_valid_hugepagesz = true;
4414
return 0;
4415
}
4416
hugetlb_early_param("hugepagesz", hugepagesz_setup);
4417
4418
/*
4419
* default_hugepagesz command line input
4420
* Only one instance of default_hugepagesz allowed on command line.
4421
*/
4422
static int __init default_hugepagesz_setup(char *s)
4423
{
4424
unsigned long size;
4425
int i;
4426
4427
if (!hugepages_supported()) {
4428
pr_warn("HugeTLB: hugepages unsupported, ignoring default_hugepagesz=%s cmdline\n",
4429
s);
4430
return 0;
4431
}
4432
4433
parsed_valid_hugepagesz = false;
4434
if (parsed_default_hugepagesz) {
4435
pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4436
return -EINVAL;
4437
}
4438
4439
size = (unsigned long)memparse(s, NULL);
4440
4441
if (!arch_hugetlb_valid_size(size)) {
4442
pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4443
return -EINVAL;
4444
}
4445
4446
hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4447
parsed_valid_hugepagesz = true;
4448
parsed_default_hugepagesz = true;
4449
default_hstate_idx = hstate_index(size_to_hstate(size));
4450
4451
/*
4452
* The number of default huge pages (for this size) could have been
4453
* specified as the first hugetlb parameter: hugepages=X. If so,
4454
* then default_hstate_max_huge_pages is set. If the default huge
4455
* page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
4456
* allocated here from bootmem allocator.
4457
*/
4458
if (default_hstate_max_huge_pages) {
4459
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4460
/*
4461
* Since this is an early parameter, we can't check
4462
* NUMA node state yet, so loop through MAX_NUMNODES.
4463
*/
4464
for (i = 0; i < MAX_NUMNODES; i++) {
4465
if (default_hugepages_in_node[i] != 0)
4466
default_hstate.max_huge_pages_node[i] =
4467
default_hugepages_in_node[i];
4468
}
4469
default_hstate_max_huge_pages = 0;
4470
}
4471
4472
return 0;
4473
}
4474
hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup);
4475
4476
void __init hugetlb_bootmem_set_nodes(void)
4477
{
4478
int i, nid;
4479
unsigned long start_pfn, end_pfn;
4480
4481
if (!nodes_empty(hugetlb_bootmem_nodes))
4482
return;
4483
4484
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4485
if (end_pfn > start_pfn)
4486
node_set(nid, hugetlb_bootmem_nodes);
4487
}
4488
}
4489
4490
static bool __hugetlb_bootmem_allocated __initdata;
4491
4492
bool __init hugetlb_bootmem_allocated(void)
4493
{
4494
return __hugetlb_bootmem_allocated;
4495
}
4496
4497
void __init hugetlb_bootmem_alloc(void)
4498
{
4499
struct hstate *h;
4500
int i;
4501
4502
if (__hugetlb_bootmem_allocated)
4503
return;
4504
4505
hugetlb_bootmem_set_nodes();
4506
4507
for (i = 0; i < MAX_NUMNODES; i++)
4508
INIT_LIST_HEAD(&huge_boot_pages[i]);
4509
4510
hugetlb_parse_params();
4511
4512
for_each_hstate(h) {
4513
h->next_nid_to_alloc = first_online_node;
4514
4515
if (hstate_is_gigantic(h))
4516
hugetlb_hstate_alloc_pages(h);
4517
}
4518
4519
__hugetlb_bootmem_allocated = true;
4520
}
4521
4522
/*
4523
* hugepage_alloc_threads command line parsing.
4524
*
4525
* When set, use this specific number of threads for the boot
4526
* allocation of hugepages.
4527
*/
4528
static int __init hugepage_alloc_threads_setup(char *s)
4529
{
4530
unsigned long allocation_threads;
4531
4532
if (kstrtoul(s, 0, &allocation_threads) != 0)
4533
return 1;
4534
4535
if (allocation_threads == 0)
4536
return 1;
4537
4538
hugepage_allocation_threads = allocation_threads;
4539
4540
return 1;
4541
}
4542
__setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup);
4543
4544
static unsigned int allowed_mems_nr(struct hstate *h)
4545
{
4546
int node;
4547
unsigned int nr = 0;
4548
nodemask_t *mbind_nodemask;
4549
unsigned int *array = h->free_huge_pages_node;
4550
gfp_t gfp_mask = htlb_alloc_mask(h);
4551
4552
mbind_nodemask = policy_mbind_nodemask(gfp_mask);
4553
for_each_node_mask(node, cpuset_current_mems_allowed) {
4554
if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4555
nr += array[node];
4556
}
4557
4558
return nr;
4559
}
4560
4561
void hugetlb_report_meminfo(struct seq_file *m)
4562
{
4563
struct hstate *h;
4564
unsigned long total = 0;
4565
4566
if (!hugepages_supported())
4567
return;
4568
4569
for_each_hstate(h) {
4570
unsigned long count = h->nr_huge_pages;
4571
4572
total += huge_page_size(h) * count;
4573
4574
if (h == &default_hstate)
4575
seq_printf(m,
4576
"HugePages_Total: %5lu\n"
4577
"HugePages_Free: %5lu\n"
4578
"HugePages_Rsvd: %5lu\n"
4579
"HugePages_Surp: %5lu\n"
4580
"Hugepagesize: %8lu kB\n",
4581
count,
4582
h->free_huge_pages,
4583
h->resv_huge_pages,
4584
h->surplus_huge_pages,
4585
huge_page_size(h) / SZ_1K);
4586
}
4587
4588
seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K);
4589
}
4590
4591
int hugetlb_report_node_meminfo(char *buf, int len, int nid)
4592
{
4593
struct hstate *h = &default_hstate;
4594
4595
if (!hugepages_supported())
4596
return 0;
4597
4598
return sysfs_emit_at(buf, len,
4599
"Node %d HugePages_Total: %5u\n"
4600
"Node %d HugePages_Free: %5u\n"
4601
"Node %d HugePages_Surp: %5u\n",
4602
nid, h->nr_huge_pages_node[nid],
4603
nid, h->free_huge_pages_node[nid],
4604
nid, h->surplus_huge_pages_node[nid]);
4605
}
4606
4607
void hugetlb_show_meminfo_node(int nid)
4608
{
4609
struct hstate *h;
4610
4611
if (!hugepages_supported())
4612
return;
4613
4614
for_each_hstate(h)
4615
printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4616
nid,
4617
h->nr_huge_pages_node[nid],
4618
h->free_huge_pages_node[nid],
4619
h->surplus_huge_pages_node[nid],
4620
huge_page_size(h) / SZ_1K);
4621
}
4622
4623
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
4624
{
4625
seq_printf(m, "HugetlbPages:\t%8lu kB\n",
4626
K(atomic_long_read(&mm->hugetlb_usage)));
4627
}
4628
4629
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
4630
unsigned long hugetlb_total_pages(void)
4631
{
4632
struct hstate *h;
4633
unsigned long nr_total_pages = 0;
4634
4635
for_each_hstate(h)
4636
nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4637
return nr_total_pages;
4638
}
4639
4640
static int hugetlb_acct_memory(struct hstate *h, long delta)
4641
{
4642
int ret = -ENOMEM;
4643
4644
if (!delta)
4645
return 0;
4646
4647
spin_lock_irq(&hugetlb_lock);
4648
/*
4649
* When cpuset is configured, it breaks the strict hugetlb page
4650
* reservation as the accounting is done on a global variable. Such
4651
* reservation is completely rubbish in the presence of cpuset because
4652
* the reservation is not checked against page availability for the
4653
* current cpuset. Application can still potentially OOM'ed by kernel
4654
* with lack of free htlb page in cpuset that the task is in.
4655
* Attempt to enforce strict accounting with cpuset is almost
4656
* impossible (or too ugly) because cpuset is too fluid that
4657
* task or memory node can be dynamically moved between cpusets.
4658
*
4659
* The change of semantics for shared hugetlb mapping with cpuset is
4660
* undesirable. However, in order to preserve some of the semantics,
4661
* we fall back to check against current free page availability as
4662
* a best attempt and hopefully to minimize the impact of changing
4663
* semantics that cpuset has.
4664
*
4665
* Apart from cpuset, we also have memory policy mechanism that
4666
* also determines from which node the kernel will allocate memory
4667
* in a NUMA system. So similar to cpuset, we also should consider
4668
* the memory policy of the current task. Similar to the description
4669
* above.
4670
*/
4671
if (delta > 0) {
4672
if (gather_surplus_pages(h, delta) < 0)
4673
goto out;
4674
4675
if (delta > allowed_mems_nr(h)) {
4676
return_unused_surplus_pages(h, delta);
4677
goto out;
4678
}
4679
}
4680
4681
ret = 0;
4682
if (delta < 0)
4683
return_unused_surplus_pages(h, (unsigned long) -delta);
4684
4685
out:
4686
spin_unlock_irq(&hugetlb_lock);
4687
return ret;
4688
}
4689
4690
static void hugetlb_vm_op_open(struct vm_area_struct *vma)
4691
{
4692
struct resv_map *resv = vma_resv_map(vma);
4693
4694
/*
4695
* HPAGE_RESV_OWNER indicates a private mapping.
4696
* This new VMA should share its siblings reservation map if present.
4697
* The VMA will only ever have a valid reservation map pointer where
4698
* it is being copied for another still existing VMA. As that VMA
4699
* has a reference to the reservation map it cannot disappear until
4700
* after this open call completes. It is therefore safe to take a
4701
* new reference here without additional locking.
4702
*/
4703
if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4704
resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4705
kref_get(&resv->refs);
4706
}
4707
4708
/*
4709
* vma_lock structure for sharable mappings is vma specific.
4710
* Clear old pointer (if copied via vm_area_dup) and allocate
4711
* new structure. Before clearing, make sure vma_lock is not
4712
* for this vma.
4713
*/
4714
if (vma->vm_flags & VM_MAYSHARE) {
4715
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
4716
4717
if (vma_lock) {
4718
if (vma_lock->vma != vma) {
4719
vma->vm_private_data = NULL;
4720
hugetlb_vma_lock_alloc(vma);
4721
} else
4722
pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
4723
} else
4724
hugetlb_vma_lock_alloc(vma);
4725
}
4726
}
4727
4728
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4729
{
4730
struct hstate *h = hstate_vma(vma);
4731
struct resv_map *resv;
4732
struct hugepage_subpool *spool = subpool_vma(vma);
4733
unsigned long reserve, start, end;
4734
long gbl_reserve;
4735
4736
hugetlb_vma_lock_free(vma);
4737
4738
resv = vma_resv_map(vma);
4739
if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4740
return;
4741
4742
start = vma_hugecache_offset(h, vma, vma->vm_start);
4743
end = vma_hugecache_offset(h, vma, vma->vm_end);
4744
4745
reserve = (end - start) - region_count(resv, start, end);
4746
hugetlb_cgroup_uncharge_counter(resv, start, end);
4747
if (reserve) {
4748
/*
4749
* Decrement reserve counts. The global reserve count may be
4750
* adjusted if the subpool has a minimum size.
4751
*/
4752
gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
4753
hugetlb_acct_memory(h, -gbl_reserve);
4754
}
4755
4756
kref_put(&resv->refs, resv_map_release);
4757
}
4758
4759
static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4760
{
4761
if (addr & ~(huge_page_mask(hstate_vma(vma))))
4762
return -EINVAL;
4763
return 0;
4764
}
4765
4766
void hugetlb_split(struct vm_area_struct *vma, unsigned long addr)
4767
{
4768
/*
4769
* PMD sharing is only possible for PUD_SIZE-aligned address ranges
4770
* in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
4771
* split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
4772
* This function is called in the middle of a VMA split operation, with
4773
* MM, VMA and rmap all write-locked to prevent concurrent page table
4774
* walks (except hardware and gup_fast()).
4775
*/
4776
vma_assert_write_locked(vma);
4777
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
4778
4779
if (addr & ~PUD_MASK) {
4780
unsigned long floor = addr & PUD_MASK;
4781
unsigned long ceil = floor + PUD_SIZE;
4782
4783
if (floor >= vma->vm_start && ceil <= vma->vm_end) {
4784
/*
4785
* Locking:
4786
* Use take_locks=false here.
4787
* The file rmap lock is already held.
4788
* The hugetlb VMA lock can't be taken when we already
4789
* hold the file rmap lock, and we don't need it because
4790
* its purpose is to synchronize against concurrent page
4791
* table walks, which are not possible thanks to the
4792
* locks held by our caller.
4793
*/
4794
hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false);
4795
}
4796
}
4797
}
4798
4799
static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
4800
{
4801
return huge_page_size(hstate_vma(vma));
4802
}
4803
4804
/*
4805
* We cannot handle pagefaults against hugetlb pages at all. They cause
4806
* handle_mm_fault() to try to instantiate regular-sized pages in the
4807
* hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
4808
* this far.
4809
*/
4810
static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
4811
{
4812
BUG();
4813
return 0;
4814
}
4815
4816
/*
4817
* When a new function is introduced to vm_operations_struct and added
4818
* to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4819
* This is because under System V memory model, mappings created via
4820
* shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4821
* their original vm_ops are overwritten with shm_vm_ops.
4822
*/
4823
const struct vm_operations_struct hugetlb_vm_ops = {
4824
.fault = hugetlb_vm_op_fault,
4825
.open = hugetlb_vm_op_open,
4826
.close = hugetlb_vm_op_close,
4827
.may_split = hugetlb_vm_op_split,
4828
.pagesize = hugetlb_vm_op_pagesize,
4829
};
4830
4831
static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
4832
bool try_mkwrite)
4833
{
4834
pte_t entry = folio_mk_pte(folio, vma->vm_page_prot);
4835
unsigned int shift = huge_page_shift(hstate_vma(vma));
4836
4837
if (try_mkwrite && (vma->vm_flags & VM_WRITE)) {
4838
entry = pte_mkwrite_novma(pte_mkdirty(entry));
4839
} else {
4840
entry = pte_wrprotect(entry);
4841
}
4842
entry = pte_mkyoung(entry);
4843
entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
4844
4845
return entry;
4846
}
4847
4848
static void set_huge_ptep_writable(struct vm_area_struct *vma,
4849
unsigned long address, pte_t *ptep)
4850
{
4851
pte_t entry;
4852
4853
entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep)));
4854
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4855
update_mmu_cache(vma, address, ptep);
4856
}
4857
4858
static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma,
4859
unsigned long address, pte_t *ptep)
4860
{
4861
if (vma->vm_flags & VM_WRITE)
4862
set_huge_ptep_writable(vma, address, ptep);
4863
}
4864
4865
static void
4866
hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
4867
struct folio *new_folio, pte_t old, unsigned long sz)
4868
{
4869
pte_t newpte = make_huge_pte(vma, new_folio, true);
4870
4871
__folio_mark_uptodate(new_folio);
4872
hugetlb_add_new_anon_rmap(new_folio, vma, addr);
4873
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
4874
newpte = huge_pte_mkuffd_wp(newpte);
4875
set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
4876
hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
4877
folio_set_hugetlb_migratable(new_folio);
4878
}
4879
4880
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4881
struct vm_area_struct *dst_vma,
4882
struct vm_area_struct *src_vma)
4883
{
4884
pte_t *src_pte, *dst_pte, entry;
4885
struct folio *pte_folio;
4886
unsigned long addr;
4887
bool cow = is_cow_mapping(src_vma->vm_flags);
4888
struct hstate *h = hstate_vma(src_vma);
4889
unsigned long sz = huge_page_size(h);
4890
unsigned long npages = pages_per_huge_page(h);
4891
struct mmu_notifier_range range;
4892
unsigned long last_addr_mask;
4893
softleaf_t softleaf;
4894
int ret = 0;
4895
4896
if (cow) {
4897
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
4898
src_vma->vm_start,
4899
src_vma->vm_end);
4900
mmu_notifier_invalidate_range_start(&range);
4901
vma_assert_write_locked(src_vma);
4902
raw_write_seqcount_begin(&src->write_protect_seq);
4903
} else {
4904
/*
4905
* For shared mappings the vma lock must be held before
4906
* calling hugetlb_walk() in the src vma. Otherwise, the
4907
* returned ptep could go away if part of a shared pmd and
4908
* another thread calls huge_pmd_unshare.
4909
*/
4910
hugetlb_vma_lock_read(src_vma);
4911
}
4912
4913
last_addr_mask = hugetlb_mask_last_page(h);
4914
for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
4915
spinlock_t *src_ptl, *dst_ptl;
4916
src_pte = hugetlb_walk(src_vma, addr, sz);
4917
if (!src_pte) {
4918
addr |= last_addr_mask;
4919
continue;
4920
}
4921
dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
4922
if (!dst_pte) {
4923
ret = -ENOMEM;
4924
break;
4925
}
4926
4927
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
4928
/* If the pagetables are shared, there is nothing to do */
4929
if (ptdesc_pmd_is_shared(virt_to_ptdesc(dst_pte))) {
4930
addr |= last_addr_mask;
4931
continue;
4932
}
4933
#endif
4934
4935
dst_ptl = huge_pte_lock(h, dst, dst_pte);
4936
src_ptl = huge_pte_lockptr(h, src, src_pte);
4937
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4938
entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4939
again:
4940
if (huge_pte_none(entry)) {
4941
/* Skip if src entry none. */
4942
goto next;
4943
}
4944
4945
softleaf = softleaf_from_pte(entry);
4946
if (unlikely(softleaf_is_hwpoison(softleaf))) {
4947
if (!userfaultfd_wp(dst_vma))
4948
entry = huge_pte_clear_uffd_wp(entry);
4949
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4950
} else if (unlikely(softleaf_is_migration(softleaf))) {
4951
bool uffd_wp = pte_swp_uffd_wp(entry);
4952
4953
if (!softleaf_is_migration_read(softleaf) && cow) {
4954
/*
4955
* COW mappings require pages in both
4956
* parent and child to be set to read.
4957
*/
4958
softleaf = make_readable_migration_entry(
4959
swp_offset(softleaf));
4960
entry = swp_entry_to_pte(softleaf);
4961
if (userfaultfd_wp(src_vma) && uffd_wp)
4962
entry = pte_swp_mkuffd_wp(entry);
4963
set_huge_pte_at(src, addr, src_pte, entry, sz);
4964
}
4965
if (!userfaultfd_wp(dst_vma))
4966
entry = huge_pte_clear_uffd_wp(entry);
4967
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4968
} else if (unlikely(pte_is_marker(entry))) {
4969
const pte_marker marker = copy_pte_marker(softleaf, dst_vma);
4970
4971
if (marker)
4972
set_huge_pte_at(dst, addr, dst_pte,
4973
make_pte_marker(marker), sz);
4974
} else {
4975
entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4976
pte_folio = page_folio(pte_page(entry));
4977
folio_get(pte_folio);
4978
4979
/*
4980
* Failing to duplicate the anon rmap is a rare case
4981
* where we see pinned hugetlb pages while they're
4982
* prone to COW. We need to do the COW earlier during
4983
* fork.
4984
*
4985
* When pre-allocating the page or copying data, we
4986
* need to be without the pgtable locks since we could
4987
* sleep during the process.
4988
*/
4989
if (!folio_test_anon(pte_folio)) {
4990
hugetlb_add_file_rmap(pte_folio);
4991
} else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
4992
pte_t src_pte_old = entry;
4993
struct folio *new_folio;
4994
4995
spin_unlock(src_ptl);
4996
spin_unlock(dst_ptl);
4997
/* Do not use reserve as it's private owned */
4998
new_folio = alloc_hugetlb_folio(dst_vma, addr, false);
4999
if (IS_ERR(new_folio)) {
5000
folio_put(pte_folio);
5001
ret = PTR_ERR(new_folio);
5002
break;
5003
}
5004
ret = copy_user_large_folio(new_folio, pte_folio,
5005
addr, dst_vma);
5006
folio_put(pte_folio);
5007
if (ret) {
5008
folio_put(new_folio);
5009
break;
5010
}
5011
5012
/* Install the new hugetlb folio if src pte stable */
5013
dst_ptl = huge_pte_lock(h, dst, dst_pte);
5014
src_ptl = huge_pte_lockptr(h, src, src_pte);
5015
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5016
entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
5017
if (!pte_same(src_pte_old, entry)) {
5018
restore_reserve_on_error(h, dst_vma, addr,
5019
new_folio);
5020
folio_put(new_folio);
5021
/* huge_ptep of dst_pte won't change as in child */
5022
goto again;
5023
}
5024
hugetlb_install_folio(dst_vma, dst_pte, addr,
5025
new_folio, src_pte_old, sz);
5026
goto next;
5027
}
5028
5029
if (cow) {
5030
/*
5031
* No need to notify as we are downgrading page
5032
* table protection not changing it to point
5033
* to a new page.
5034
*
5035
* See Documentation/mm/mmu_notifier.rst
5036
*/
5037
huge_ptep_set_wrprotect(src, addr, src_pte);
5038
entry = huge_pte_wrprotect(entry);
5039
}
5040
5041
if (!userfaultfd_wp(dst_vma))
5042
entry = huge_pte_clear_uffd_wp(entry);
5043
5044
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5045
hugetlb_count_add(npages, dst);
5046
}
5047
5048
next:
5049
spin_unlock(src_ptl);
5050
spin_unlock(dst_ptl);
5051
}
5052
5053
if (cow) {
5054
raw_write_seqcount_end(&src->write_protect_seq);
5055
mmu_notifier_invalidate_range_end(&range);
5056
} else {
5057
hugetlb_vma_unlock_read(src_vma);
5058
}
5059
5060
return ret;
5061
}
5062
5063
static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5064
unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5065
unsigned long sz)
5066
{
5067
bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
5068
struct hstate *h = hstate_vma(vma);
5069
struct mm_struct *mm = vma->vm_mm;
5070
spinlock_t *src_ptl, *dst_ptl;
5071
pte_t pte;
5072
5073
dst_ptl = huge_pte_lock(h, mm, dst_pte);
5074
src_ptl = huge_pte_lockptr(h, mm, src_pte);
5075
5076
/*
5077
* We don't have to worry about the ordering of src and dst ptlocks
5078
* because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5079
*/
5080
if (src_ptl != dst_ptl)
5081
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5082
5083
pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
5084
5085
if (need_clear_uffd_wp && pte_is_uffd_wp_marker(pte)) {
5086
huge_pte_clear(mm, new_addr, dst_pte, sz);
5087
} else {
5088
if (need_clear_uffd_wp) {
5089
if (pte_present(pte))
5090
pte = huge_pte_clear_uffd_wp(pte);
5091
else
5092
pte = pte_swp_clear_uffd_wp(pte);
5093
}
5094
set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
5095
}
5096
5097
if (src_ptl != dst_ptl)
5098
spin_unlock(src_ptl);
5099
spin_unlock(dst_ptl);
5100
}
5101
5102
int move_hugetlb_page_tables(struct vm_area_struct *vma,
5103
struct vm_area_struct *new_vma,
5104
unsigned long old_addr, unsigned long new_addr,
5105
unsigned long len)
5106
{
5107
struct hstate *h = hstate_vma(vma);
5108
struct address_space *mapping = vma->vm_file->f_mapping;
5109
unsigned long sz = huge_page_size(h);
5110
struct mm_struct *mm = vma->vm_mm;
5111
unsigned long old_end = old_addr + len;
5112
unsigned long last_addr_mask;
5113
pte_t *src_pte, *dst_pte;
5114
struct mmu_notifier_range range;
5115
struct mmu_gather tlb;
5116
5117
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5118
old_end);
5119
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5120
/*
5121
* In case of shared PMDs, we should cover the maximum possible
5122
* range.
5123
*/
5124
flush_cache_range(vma, range.start, range.end);
5125
tlb_gather_mmu_vma(&tlb, vma);
5126
5127
mmu_notifier_invalidate_range_start(&range);
5128
last_addr_mask = hugetlb_mask_last_page(h);
5129
/* Prevent race with file truncation */
5130
hugetlb_vma_lock_write(vma);
5131
i_mmap_lock_write(mapping);
5132
for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5133
src_pte = hugetlb_walk(vma, old_addr, sz);
5134
if (!src_pte) {
5135
old_addr |= last_addr_mask;
5136
new_addr |= last_addr_mask;
5137
continue;
5138
}
5139
if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
5140
continue;
5141
5142
if (huge_pmd_unshare(&tlb, vma, old_addr, src_pte)) {
5143
old_addr |= last_addr_mask;
5144
new_addr |= last_addr_mask;
5145
continue;
5146
}
5147
5148
dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5149
if (!dst_pte)
5150
break;
5151
5152
move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
5153
tlb_remove_huge_tlb_entry(h, &tlb, src_pte, old_addr);
5154
}
5155
5156
tlb_flush_mmu_tlbonly(&tlb);
5157
huge_pmd_unshare_flush(&tlb, vma);
5158
5159
mmu_notifier_invalidate_range_end(&range);
5160
i_mmap_unlock_write(mapping);
5161
hugetlb_vma_unlock_write(vma);
5162
tlb_finish_mmu(&tlb);
5163
5164
return len + old_addr - old_end;
5165
}
5166
5167
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5168
unsigned long start, unsigned long end,
5169
struct folio *folio, zap_flags_t zap_flags)
5170
{
5171
struct mm_struct *mm = vma->vm_mm;
5172
const bool folio_provided = !!folio;
5173
unsigned long address;
5174
pte_t *ptep;
5175
pte_t pte;
5176
spinlock_t *ptl;
5177
struct hstate *h = hstate_vma(vma);
5178
unsigned long sz = huge_page_size(h);
5179
bool adjust_reservation;
5180
unsigned long last_addr_mask;
5181
5182
WARN_ON(!is_vm_hugetlb_page(vma));
5183
BUG_ON(start & ~huge_page_mask(h));
5184
BUG_ON(end & ~huge_page_mask(h));
5185
5186
/*
5187
* This is a hugetlb vma, all the pte entries should point
5188
* to huge page.
5189
*/
5190
tlb_change_page_size(tlb, sz);
5191
tlb_start_vma(tlb, vma);
5192
5193
last_addr_mask = hugetlb_mask_last_page(h);
5194
address = start;
5195
for (; address < end; address += sz) {
5196
ptep = hugetlb_walk(vma, address, sz);
5197
if (!ptep) {
5198
address |= last_addr_mask;
5199
continue;
5200
}
5201
5202
ptl = huge_pte_lock(h, mm, ptep);
5203
if (huge_pmd_unshare(tlb, vma, address, ptep)) {
5204
spin_unlock(ptl);
5205
address |= last_addr_mask;
5206
continue;
5207
}
5208
5209
pte = huge_ptep_get(mm, address, ptep);
5210
if (huge_pte_none(pte)) {
5211
spin_unlock(ptl);
5212
continue;
5213
}
5214
5215
/*
5216
* Migrating hugepage or HWPoisoned hugepage is already
5217
* unmapped and its refcount is dropped, so just clear pte here.
5218
*/
5219
if (unlikely(!pte_present(pte))) {
5220
/*
5221
* If the pte was wr-protected by uffd-wp in any of the
5222
* swap forms, meanwhile the caller does not want to
5223
* drop the uffd-wp bit in this zap, then replace the
5224
* pte with a marker.
5225
*/
5226
if (pte_swp_uffd_wp_any(pte) &&
5227
!(zap_flags & ZAP_FLAG_DROP_MARKER))
5228
set_huge_pte_at(mm, address, ptep,
5229
make_pte_marker(PTE_MARKER_UFFD_WP),
5230
sz);
5231
else
5232
huge_pte_clear(mm, address, ptep, sz);
5233
spin_unlock(ptl);
5234
continue;
5235
}
5236
5237
/*
5238
* If a folio is supplied, it is because a specific
5239
* folio is being unmapped, not a range. Ensure the folio we
5240
* are about to unmap is the actual folio of interest.
5241
*/
5242
if (folio_provided) {
5243
if (folio != page_folio(pte_page(pte))) {
5244
spin_unlock(ptl);
5245
continue;
5246
}
5247
/*
5248
* Mark the VMA as having unmapped its page so that
5249
* future faults in this VMA will fail rather than
5250
* looking like data was lost
5251
*/
5252
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5253
} else {
5254
folio = page_folio(pte_page(pte));
5255
}
5256
5257
pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
5258
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5259
if (huge_pte_dirty(pte))
5260
folio_mark_dirty(folio);
5261
/* Leave a uffd-wp pte marker if needed */
5262
if (huge_pte_uffd_wp(pte) &&
5263
!(zap_flags & ZAP_FLAG_DROP_MARKER))
5264
set_huge_pte_at(mm, address, ptep,
5265
make_pte_marker(PTE_MARKER_UFFD_WP),
5266
sz);
5267
hugetlb_count_sub(pages_per_huge_page(h), mm);
5268
hugetlb_remove_rmap(folio);
5269
spin_unlock(ptl);
5270
5271
/*
5272
* Restore the reservation for anonymous page, otherwise the
5273
* backing page could be stolen by someone.
5274
* If there we are freeing a surplus, do not set the restore
5275
* reservation bit.
5276
*/
5277
adjust_reservation = false;
5278
5279
spin_lock_irq(&hugetlb_lock);
5280
if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
5281
folio_test_anon(folio)) {
5282
folio_set_hugetlb_restore_reserve(folio);
5283
/* Reservation to be adjusted after the spin lock */
5284
adjust_reservation = true;
5285
}
5286
spin_unlock_irq(&hugetlb_lock);
5287
5288
/*
5289
* Adjust the reservation for the region that will have the
5290
* reserve restored. Keep in mind that vma_needs_reservation() changes
5291
* resv->adds_in_progress if it succeeds. If this is not done,
5292
* do_exit() will not see it, and will keep the reservation
5293
* forever.
5294
*/
5295
if (adjust_reservation) {
5296
int rc = vma_needs_reservation(h, vma, address);
5297
5298
if (rc < 0)
5299
/* Pressumably allocate_file_region_entries failed
5300
* to allocate a file_region struct. Clear
5301
* hugetlb_restore_reserve so that global reserve
5302
* count will not be incremented by free_huge_folio.
5303
* Act as if we consumed the reservation.
5304
*/
5305
folio_clear_hugetlb_restore_reserve(folio);
5306
else if (rc)
5307
vma_add_reservation(h, vma, address);
5308
}
5309
5310
tlb_remove_page_size(tlb, folio_page(folio, 0),
5311
folio_size(folio));
5312
/*
5313
* If we were instructed to unmap a specific folio, we're done.
5314
*/
5315
if (folio_provided)
5316
break;
5317
}
5318
tlb_end_vma(tlb, vma);
5319
5320
huge_pmd_unshare_flush(tlb, vma);
5321
}
5322
5323
void __hugetlb_zap_begin(struct vm_area_struct *vma,
5324
unsigned long *start, unsigned long *end)
5325
{
5326
if (!vma->vm_file) /* hugetlbfs_file_mmap error */
5327
return;
5328
5329
adjust_range_if_pmd_sharing_possible(vma, start, end);
5330
hugetlb_vma_lock_write(vma);
5331
if (vma->vm_file)
5332
i_mmap_lock_write(vma->vm_file->f_mapping);
5333
}
5334
5335
void __hugetlb_zap_end(struct vm_area_struct *vma,
5336
struct zap_details *details)
5337
{
5338
zap_flags_t zap_flags = details ? details->zap_flags : 0;
5339
5340
if (!vma->vm_file) /* hugetlbfs_file_mmap error */
5341
return;
5342
5343
if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
5344
/*
5345
* Unlock and free the vma lock before releasing i_mmap_rwsem.
5346
* When the vma_lock is freed, this makes the vma ineligible
5347
* for pmd sharing. And, i_mmap_rwsem is required to set up
5348
* pmd sharing. This is important as page tables for this
5349
* unmapped range will be asynchrously deleted. If the page
5350
* tables are shared, there will be issues when accessed by
5351
* someone else.
5352
*/
5353
__hugetlb_vma_unlock_write_free(vma);
5354
} else {
5355
hugetlb_vma_unlock_write(vma);
5356
}
5357
5358
if (vma->vm_file)
5359
i_mmap_unlock_write(vma->vm_file->f_mapping);
5360
}
5361
5362
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5363
unsigned long end, struct folio *folio,
5364
zap_flags_t zap_flags)
5365
{
5366
struct mmu_notifier_range range;
5367
struct mmu_gather tlb;
5368
5369
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
5370
start, end);
5371
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5372
mmu_notifier_invalidate_range_start(&range);
5373
tlb_gather_mmu(&tlb, vma->vm_mm);
5374
5375
__unmap_hugepage_range(&tlb, vma, start, end,
5376
folio, zap_flags);
5377
5378
mmu_notifier_invalidate_range_end(&range);
5379
tlb_finish_mmu(&tlb);
5380
}
5381
5382
/*
5383
* This is called when the original mapper is failing to COW a MAP_PRIVATE
5384
* mapping it owns the reserve page for. The intention is to unmap the page
5385
* from other VMAs and let the children be SIGKILLed if they are faulting the
5386
* same region.
5387
*/
5388
static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5389
struct folio *folio, unsigned long address)
5390
{
5391
struct hstate *h = hstate_vma(vma);
5392
struct vm_area_struct *iter_vma;
5393
struct address_space *mapping;
5394
pgoff_t pgoff;
5395
5396
/*
5397
* vm_pgoff is in PAGE_SIZE units, hence the different calculation
5398
* from page cache lookup which is in HPAGE_SIZE units.
5399
*/
5400
address = address & huge_page_mask(h);
5401
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5402
vma->vm_pgoff;
5403
mapping = vma->vm_file->f_mapping;
5404
5405
/*
5406
* Take the mapping lock for the duration of the table walk. As
5407
* this mapping should be shared between all the VMAs,
5408
* __unmap_hugepage_range() is called as the lock is already held
5409
*/
5410
i_mmap_lock_write(mapping);
5411
vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5412
/* Do not unmap the current VMA */
5413
if (iter_vma == vma)
5414
continue;
5415
5416
/*
5417
* Shared VMAs have their own reserves and do not affect
5418
* MAP_PRIVATE accounting but it is possible that a shared
5419
* VMA is using the same page so check and skip such VMAs.
5420
*/
5421
if (iter_vma->vm_flags & VM_MAYSHARE)
5422
continue;
5423
5424
/*
5425
* Unmap the page from other VMAs without their own reserves.
5426
* They get marked to be SIGKILLed if they fault in these
5427
* areas. This is because a future no-page fault on this VMA
5428
* could insert a zeroed page instead of the data existing
5429
* from the time of fork. This would look like data corruption
5430
*/
5431
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5432
unmap_hugepage_range(iter_vma, address,
5433
address + huge_page_size(h),
5434
folio, 0);
5435
}
5436
i_mmap_unlock_write(mapping);
5437
}
5438
5439
/*
5440
* hugetlb_wp() should be called with page lock of the original hugepage held.
5441
* Called with hugetlb_fault_mutex_table held and pte_page locked so we
5442
* cannot race with other handlers or page migration.
5443
* Keep the pte_same checks anyway to make transition from the mutex easier.
5444
*/
5445
static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
5446
{
5447
struct vm_area_struct *vma = vmf->vma;
5448
struct mm_struct *mm = vma->vm_mm;
5449
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5450
pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte);
5451
struct hstate *h = hstate_vma(vma);
5452
struct folio *old_folio;
5453
struct folio *new_folio;
5454
bool cow_from_owner = 0;
5455
vm_fault_t ret = 0;
5456
struct mmu_notifier_range range;
5457
5458
/*
5459
* Never handle CoW for uffd-wp protected pages. It should be only
5460
* handled when the uffd-wp protection is removed.
5461
*
5462
* Note that only the CoW optimization path (in hugetlb_no_page())
5463
* can trigger this, because hugetlb_fault() will always resolve
5464
* uffd-wp bit first.
5465
*/
5466
if (!unshare && huge_pte_uffd_wp(pte))
5467
return 0;
5468
5469
/* Let's take out MAP_SHARED mappings first. */
5470
if (vma->vm_flags & VM_MAYSHARE) {
5471
set_huge_ptep_writable(vma, vmf->address, vmf->pte);
5472
return 0;
5473
}
5474
5475
old_folio = page_folio(pte_page(pte));
5476
5477
delayacct_wpcopy_start();
5478
5479
retry_avoidcopy:
5480
/*
5481
* If no-one else is actually using this page, we're the exclusive
5482
* owner and can reuse this page.
5483
*
5484
* Note that we don't rely on the (safer) folio refcount here, because
5485
* copying the hugetlb folio when there are unexpected (temporary)
5486
* folio references could harm simple fork()+exit() users when
5487
* we run out of free hugetlb folios: we would have to kill processes
5488
* in scenarios that used to work. As a side effect, there can still
5489
* be leaks between processes, for example, with FOLL_GET users.
5490
*/
5491
if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5492
if (!PageAnonExclusive(&old_folio->page)) {
5493
folio_move_anon_rmap(old_folio, vma);
5494
SetPageAnonExclusive(&old_folio->page);
5495
}
5496
if (likely(!unshare))
5497
set_huge_ptep_maybe_writable(vma, vmf->address,
5498
vmf->pte);
5499
5500
delayacct_wpcopy_end();
5501
return 0;
5502
}
5503
VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5504
PageAnonExclusive(&old_folio->page), &old_folio->page);
5505
5506
/*
5507
* If the process that created a MAP_PRIVATE mapping is about to perform
5508
* a COW due to a shared page count, attempt to satisfy the allocation
5509
* without using the existing reserves.
5510
* In order to determine where this is a COW on a MAP_PRIVATE mapping it
5511
* is enough to check whether the old_folio is anonymous. This means that
5512
* the reserve for this address was consumed. If reserves were used, a
5513
* partial faulted mapping at the fime of fork() could consume its reserves
5514
* on COW instead of the full address range.
5515
*/
5516
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5517
folio_test_anon(old_folio))
5518
cow_from_owner = true;
5519
5520
folio_get(old_folio);
5521
5522
/*
5523
* Drop page table lock as buddy allocator may be called. It will
5524
* be acquired again before returning to the caller, as expected.
5525
*/
5526
spin_unlock(vmf->ptl);
5527
new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner);
5528
5529
if (IS_ERR(new_folio)) {
5530
/*
5531
* If a process owning a MAP_PRIVATE mapping fails to COW,
5532
* it is due to references held by a child and an insufficient
5533
* huge page pool. To guarantee the original mappers
5534
* reliability, unmap the page from child processes. The child
5535
* may get SIGKILLed if it later faults.
5536
*/
5537
if (cow_from_owner) {
5538
struct address_space *mapping = vma->vm_file->f_mapping;
5539
pgoff_t idx;
5540
u32 hash;
5541
5542
folio_put(old_folio);
5543
/*
5544
* Drop hugetlb_fault_mutex and vma_lock before
5545
* unmapping. unmapping needs to hold vma_lock
5546
* in write mode. Dropping vma_lock in read mode
5547
* here is OK as COW mappings do not interact with
5548
* PMD sharing.
5549
*
5550
* Reacquire both after unmap operation.
5551
*/
5552
idx = vma_hugecache_offset(h, vma, vmf->address);
5553
hash = hugetlb_fault_mutex_hash(mapping, idx);
5554
hugetlb_vma_unlock_read(vma);
5555
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5556
5557
unmap_ref_private(mm, vma, old_folio, vmf->address);
5558
5559
mutex_lock(&hugetlb_fault_mutex_table[hash]);
5560
hugetlb_vma_lock_read(vma);
5561
spin_lock(vmf->ptl);
5562
vmf->pte = hugetlb_walk(vma, vmf->address,
5563
huge_page_size(h));
5564
if (likely(vmf->pte &&
5565
pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte)))
5566
goto retry_avoidcopy;
5567
/*
5568
* race occurs while re-acquiring page table
5569
* lock, and our job is done.
5570
*/
5571
delayacct_wpcopy_end();
5572
return 0;
5573
}
5574
5575
ret = vmf_error(PTR_ERR(new_folio));
5576
goto out_release_old;
5577
}
5578
5579
/*
5580
* When the original hugepage is shared one, it does not have
5581
* anon_vma prepared.
5582
*/
5583
ret = __vmf_anon_prepare(vmf);
5584
if (unlikely(ret))
5585
goto out_release_all;
5586
5587
if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) {
5588
ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h));
5589
goto out_release_all;
5590
}
5591
__folio_mark_uptodate(new_folio);
5592
5593
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address,
5594
vmf->address + huge_page_size(h));
5595
mmu_notifier_invalidate_range_start(&range);
5596
5597
/*
5598
* Retake the page table lock to check for racing updates
5599
* before the page tables are altered
5600
*/
5601
spin_lock(vmf->ptl);
5602
vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
5603
if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
5604
pte_t newpte = make_huge_pte(vma, new_folio, !unshare);
5605
5606
/* Break COW or unshare */
5607
huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
5608
hugetlb_remove_rmap(old_folio);
5609
hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address);
5610
if (huge_pte_uffd_wp(pte))
5611
newpte = huge_pte_mkuffd_wp(newpte);
5612
set_huge_pte_at(mm, vmf->address, vmf->pte, newpte,
5613
huge_page_size(h));
5614
folio_set_hugetlb_migratable(new_folio);
5615
/* Make the old page be freed below */
5616
new_folio = old_folio;
5617
}
5618
spin_unlock(vmf->ptl);
5619
mmu_notifier_invalidate_range_end(&range);
5620
out_release_all:
5621
/*
5622
* No restore in case of successful pagetable update (Break COW or
5623
* unshare)
5624
*/
5625
if (new_folio != old_folio)
5626
restore_reserve_on_error(h, vma, vmf->address, new_folio);
5627
folio_put(new_folio);
5628
out_release_old:
5629
folio_put(old_folio);
5630
5631
spin_lock(vmf->ptl); /* Caller expects lock to be held */
5632
5633
delayacct_wpcopy_end();
5634
return ret;
5635
}
5636
5637
/*
5638
* Return whether there is a pagecache page to back given address within VMA.
5639
*/
5640
bool hugetlbfs_pagecache_present(struct hstate *h,
5641
struct vm_area_struct *vma, unsigned long address)
5642
{
5643
struct address_space *mapping = vma->vm_file->f_mapping;
5644
pgoff_t idx = linear_page_index(vma, address);
5645
struct folio *folio;
5646
5647
folio = filemap_get_folio(mapping, idx);
5648
if (IS_ERR(folio))
5649
return false;
5650
folio_put(folio);
5651
return true;
5652
}
5653
5654
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
5655
pgoff_t idx)
5656
{
5657
struct inode *inode = mapping->host;
5658
struct hstate *h = hstate_inode(inode);
5659
int err;
5660
5661
idx <<= huge_page_order(h);
5662
__folio_set_locked(folio);
5663
err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5664
5665
if (unlikely(err)) {
5666
__folio_clear_locked(folio);
5667
return err;
5668
}
5669
folio_clear_hugetlb_restore_reserve(folio);
5670
5671
/*
5672
* mark folio dirty so that it will not be removed from cache/file
5673
* by non-hugetlbfs specific code paths.
5674
*/
5675
folio_mark_dirty(folio);
5676
5677
spin_lock(&inode->i_lock);
5678
inode->i_blocks += blocks_per_huge_page(h);
5679
spin_unlock(&inode->i_lock);
5680
return 0;
5681
}
5682
5683
static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
5684
struct address_space *mapping,
5685
unsigned long reason)
5686
{
5687
u32 hash;
5688
5689
/*
5690
* vma_lock and hugetlb_fault_mutex must be dropped before handling
5691
* userfault. Also mmap_lock could be dropped due to handling
5692
* userfault, any vma operation should be careful from here.
5693
*/
5694
hugetlb_vma_unlock_read(vmf->vma);
5695
hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
5696
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5697
return handle_userfault(vmf, reason);
5698
}
5699
5700
/*
5701
* Recheck pte with pgtable lock. Returns true if pte didn't change, or
5702
* false if pte changed or is changing.
5703
*/
5704
static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
5705
pte_t *ptep, pte_t old_pte)
5706
{
5707
spinlock_t *ptl;
5708
bool same;
5709
5710
ptl = huge_pte_lock(h, mm, ptep);
5711
same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte);
5712
spin_unlock(ptl);
5713
5714
return same;
5715
}
5716
5717
static vm_fault_t hugetlb_no_page(struct address_space *mapping,
5718
struct vm_fault *vmf)
5719
{
5720
u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
5721
bool new_folio, new_anon_folio = false;
5722
struct vm_area_struct *vma = vmf->vma;
5723
struct mm_struct *mm = vma->vm_mm;
5724
struct hstate *h = hstate_vma(vma);
5725
vm_fault_t ret = VM_FAULT_SIGBUS;
5726
bool folio_locked = true;
5727
struct folio *folio;
5728
unsigned long size;
5729
pte_t new_pte;
5730
5731
/*
5732
* Currently, we are forced to kill the process in the event the
5733
* original mapper has unmapped pages from the child due to a failed
5734
* COW/unsharing. Warn that such a situation has occurred as it may not
5735
* be obvious.
5736
*/
5737
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5738
pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5739
current->pid);
5740
goto out;
5741
}
5742
5743
/*
5744
* Use page lock to guard against racing truncation
5745
* before we get page_table_lock.
5746
*/
5747
new_folio = false;
5748
folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff);
5749
if (IS_ERR(folio)) {
5750
size = i_size_read(mapping->host) >> huge_page_shift(h);
5751
if (vmf->pgoff >= size)
5752
goto out;
5753
/* Check for page in userfault range */
5754
if (userfaultfd_missing(vma)) {
5755
/*
5756
* Since hugetlb_no_page() was examining pte
5757
* without pgtable lock, we need to re-test under
5758
* lock because the pte may not be stable and could
5759
* have changed from under us. Try to detect
5760
* either changed or during-changing ptes and retry
5761
* properly when needed.
5762
*
5763
* Note that userfaultfd is actually fine with
5764
* false positives (e.g. caused by pte changed),
5765
* but not wrong logical events (e.g. caused by
5766
* reading a pte during changing). The latter can
5767
* confuse the userspace, so the strictness is very
5768
* much preferred. E.g., MISSING event should
5769
* never happen on the page after UFFDIO_COPY has
5770
* correctly installed the page and returned.
5771
*/
5772
if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
5773
ret = 0;
5774
goto out;
5775
}
5776
5777
return hugetlb_handle_userfault(vmf, mapping,
5778
VM_UFFD_MISSING);
5779
}
5780
5781
if (!(vma->vm_flags & VM_MAYSHARE)) {
5782
ret = __vmf_anon_prepare(vmf);
5783
if (unlikely(ret))
5784
goto out;
5785
}
5786
5787
folio = alloc_hugetlb_folio(vma, vmf->address, false);
5788
if (IS_ERR(folio)) {
5789
/*
5790
* Returning error will result in faulting task being
5791
* sent SIGBUS. The hugetlb fault mutex prevents two
5792
* tasks from racing to fault in the same page which
5793
* could result in false unable to allocate errors.
5794
* Page migration does not take the fault mutex, but
5795
* does a clear then write of pte's under page table
5796
* lock. Page fault code could race with migration,
5797
* notice the clear pte and try to allocate a page
5798
* here. Before returning error, get ptl and make
5799
* sure there really is no pte entry.
5800
*/
5801
if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte))
5802
ret = vmf_error(PTR_ERR(folio));
5803
else
5804
ret = 0;
5805
goto out;
5806
}
5807
folio_zero_user(folio, vmf->real_address);
5808
__folio_mark_uptodate(folio);
5809
new_folio = true;
5810
5811
if (vma->vm_flags & VM_MAYSHARE) {
5812
int err = hugetlb_add_to_page_cache(folio, mapping,
5813
vmf->pgoff);
5814
if (err) {
5815
/*
5816
* err can't be -EEXIST which implies someone
5817
* else consumed the reservation since hugetlb
5818
* fault mutex is held when add a hugetlb page
5819
* to the page cache. So it's safe to call
5820
* restore_reserve_on_error() here.
5821
*/
5822
restore_reserve_on_error(h, vma, vmf->address,
5823
folio);
5824
folio_put(folio);
5825
ret = VM_FAULT_SIGBUS;
5826
goto out;
5827
}
5828
} else {
5829
new_anon_folio = true;
5830
folio_lock(folio);
5831
}
5832
} else {
5833
/*
5834
* If memory error occurs between mmap() and fault, some process
5835
* don't have hwpoisoned swap entry for errored virtual address.
5836
* So we need to block hugepage fault by PG_hwpoison bit check.
5837
*/
5838
if (unlikely(folio_test_hwpoison(folio))) {
5839
ret = VM_FAULT_HWPOISON_LARGE |
5840
VM_FAULT_SET_HINDEX(hstate_index(h));
5841
goto backout_unlocked;
5842
}
5843
5844
/* Check for page in userfault range. */
5845
if (userfaultfd_minor(vma)) {
5846
folio_unlock(folio);
5847
folio_put(folio);
5848
/* See comment in userfaultfd_missing() block above */
5849
if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
5850
ret = 0;
5851
goto out;
5852
}
5853
return hugetlb_handle_userfault(vmf, mapping,
5854
VM_UFFD_MINOR);
5855
}
5856
}
5857
5858
/*
5859
* If we are going to COW a private mapping later, we examine the
5860
* pending reservations for this page now. This will ensure that
5861
* any allocations necessary to record that reservation occur outside
5862
* the spinlock.
5863
*/
5864
if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5865
if (vma_needs_reservation(h, vma, vmf->address) < 0) {
5866
ret = VM_FAULT_OOM;
5867
goto backout_unlocked;
5868
}
5869
/* Just decrements count, does not deallocate */
5870
vma_end_reservation(h, vma, vmf->address);
5871
}
5872
5873
vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
5874
ret = 0;
5875
/* If pte changed from under us, retry */
5876
if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
5877
goto backout;
5878
5879
if (new_anon_folio)
5880
hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
5881
else
5882
hugetlb_add_file_rmap(folio);
5883
new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED);
5884
/*
5885
* If this pte was previously wr-protected, keep it wr-protected even
5886
* if populated.
5887
*/
5888
if (unlikely(pte_is_uffd_wp_marker(vmf->orig_pte)))
5889
new_pte = huge_pte_mkuffd_wp(new_pte);
5890
set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h));
5891
5892
hugetlb_count_add(pages_per_huge_page(h), mm);
5893
if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5894
/*
5895
* No need to keep file folios locked. See comment in
5896
* hugetlb_fault().
5897
*/
5898
if (!new_anon_folio) {
5899
folio_locked = false;
5900
folio_unlock(folio);
5901
}
5902
/* Optimization, do the COW without a second fault */
5903
ret = hugetlb_wp(vmf);
5904
}
5905
5906
spin_unlock(vmf->ptl);
5907
5908
/*
5909
* Only set hugetlb_migratable in newly allocated pages. Existing pages
5910
* found in the pagecache may not have hugetlb_migratable if they have
5911
* been isolated for migration.
5912
*/
5913
if (new_folio)
5914
folio_set_hugetlb_migratable(folio);
5915
5916
if (folio_locked)
5917
folio_unlock(folio);
5918
out:
5919
hugetlb_vma_unlock_read(vma);
5920
5921
/*
5922
* We must check to release the per-VMA lock. __vmf_anon_prepare() is
5923
* the only way ret can be set to VM_FAULT_RETRY.
5924
*/
5925
if (unlikely(ret & VM_FAULT_RETRY))
5926
vma_end_read(vma);
5927
5928
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5929
return ret;
5930
5931
backout:
5932
spin_unlock(vmf->ptl);
5933
backout_unlocked:
5934
/* We only need to restore reservations for private mappings */
5935
if (new_anon_folio)
5936
restore_reserve_on_error(h, vma, vmf->address, folio);
5937
5938
folio_unlock(folio);
5939
folio_put(folio);
5940
goto out;
5941
}
5942
5943
#ifdef CONFIG_SMP
5944
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5945
{
5946
unsigned long key[2];
5947
u32 hash;
5948
5949
key[0] = (unsigned long) mapping;
5950
key[1] = idx;
5951
5952
hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
5953
5954
return hash & (num_fault_mutexes - 1);
5955
}
5956
#else
5957
/*
5958
* For uniprocessor systems we always use a single mutex, so just
5959
* return 0 and avoid the hashing overhead.
5960
*/
5961
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5962
{
5963
return 0;
5964
}
5965
#endif
5966
5967
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5968
unsigned long address, unsigned int flags)
5969
{
5970
vm_fault_t ret;
5971
u32 hash;
5972
struct folio *folio = NULL;
5973
struct hstate *h = hstate_vma(vma);
5974
struct address_space *mapping;
5975
bool need_wait_lock = false;
5976
struct vm_fault vmf = {
5977
.vma = vma,
5978
.address = address & huge_page_mask(h),
5979
.real_address = address,
5980
.flags = flags,
5981
.pgoff = vma_hugecache_offset(h, vma,
5982
address & huge_page_mask(h)),
5983
/* TODO: Track hugetlb faults using vm_fault */
5984
5985
/*
5986
* Some fields may not be initialized, be careful as it may
5987
* be hard to debug if called functions make assumptions
5988
*/
5989
};
5990
5991
/*
5992
* Serialize hugepage allocation and instantiation, so that we don't
5993
* get spurious allocation failures if two CPUs race to instantiate
5994
* the same page in the page cache.
5995
*/
5996
mapping = vma->vm_file->f_mapping;
5997
hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
5998
mutex_lock(&hugetlb_fault_mutex_table[hash]);
5999
6000
/*
6001
* Acquire vma lock before calling huge_pte_alloc and hold
6002
* until finished with vmf.pte. This prevents huge_pmd_unshare from
6003
* being called elsewhere and making the vmf.pte no longer valid.
6004
*/
6005
hugetlb_vma_lock_read(vma);
6006
vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h));
6007
if (!vmf.pte) {
6008
hugetlb_vma_unlock_read(vma);
6009
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6010
return VM_FAULT_OOM;
6011
}
6012
6013
vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
6014
if (huge_pte_none(vmf.orig_pte))
6015
/*
6016
* hugetlb_no_page will drop vma lock and hugetlb fault
6017
* mutex internally, which make us return immediately.
6018
*/
6019
return hugetlb_no_page(mapping, &vmf);
6020
6021
if (pte_is_marker(vmf.orig_pte)) {
6022
const pte_marker marker =
6023
softleaf_to_marker(softleaf_from_pte(vmf.orig_pte));
6024
6025
if (marker & PTE_MARKER_POISONED) {
6026
ret = VM_FAULT_HWPOISON_LARGE |
6027
VM_FAULT_SET_HINDEX(hstate_index(h));
6028
goto out_mutex;
6029
} else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) {
6030
/* This isn't supported in hugetlb. */
6031
ret = VM_FAULT_SIGSEGV;
6032
goto out_mutex;
6033
}
6034
6035
return hugetlb_no_page(mapping, &vmf);
6036
}
6037
6038
ret = 0;
6039
6040
/* Not present, either a migration or a hwpoisoned entry */
6041
if (!pte_present(vmf.orig_pte) && !huge_pte_none(vmf.orig_pte)) {
6042
const softleaf_t softleaf = softleaf_from_pte(vmf.orig_pte);
6043
6044
if (softleaf_is_migration(softleaf)) {
6045
/*
6046
* Release the hugetlb fault lock now, but retain
6047
* the vma lock, because it is needed to guard the
6048
* huge_pte_lockptr() later in
6049
* migration_entry_wait_huge(). The vma lock will
6050
* be released there.
6051
*/
6052
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6053
migration_entry_wait_huge(vma, vmf.address, vmf.pte);
6054
return 0;
6055
}
6056
if (softleaf_is_hwpoison(softleaf)) {
6057
ret = VM_FAULT_HWPOISON_LARGE |
6058
VM_FAULT_SET_HINDEX(hstate_index(h));
6059
}
6060
6061
goto out_mutex;
6062
}
6063
6064
/*
6065
* If we are going to COW/unshare the mapping later, we examine the
6066
* pending reservations for this page now. This will ensure that any
6067
* allocations necessary to record that reservation occur outside the
6068
* spinlock.
6069
*/
6070
if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6071
!(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) {
6072
if (vma_needs_reservation(h, vma, vmf.address) < 0) {
6073
ret = VM_FAULT_OOM;
6074
goto out_mutex;
6075
}
6076
/* Just decrements count, does not deallocate */
6077
vma_end_reservation(h, vma, vmf.address);
6078
}
6079
6080
vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
6081
6082
/* Check for a racing update before calling hugetlb_wp() */
6083
if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte))))
6084
goto out_ptl;
6085
6086
/* Handle userfault-wp first, before trying to lock more pages */
6087
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) &&
6088
(flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
6089
if (!userfaultfd_wp_async(vma)) {
6090
spin_unlock(vmf.ptl);
6091
hugetlb_vma_unlock_read(vma);
6092
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6093
return handle_userfault(&vmf, VM_UFFD_WP);
6094
}
6095
6096
vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte);
6097
set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte,
6098
huge_page_size(hstate_vma(vma)));
6099
/* Fallthrough to CoW */
6100
}
6101
6102
if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6103
if (!huge_pte_write(vmf.orig_pte)) {
6104
/*
6105
* Anonymous folios need to be lock since hugetlb_wp()
6106
* checks whether we can re-use the folio exclusively
6107
* for us in case we are the only user of it.
6108
*/
6109
folio = page_folio(pte_page(vmf.orig_pte));
6110
if (folio_test_anon(folio) && !folio_trylock(folio)) {
6111
need_wait_lock = true;
6112
goto out_ptl;
6113
}
6114
folio_get(folio);
6115
ret = hugetlb_wp(&vmf);
6116
if (folio_test_anon(folio))
6117
folio_unlock(folio);
6118
folio_put(folio);
6119
goto out_ptl;
6120
} else if (likely(flags & FAULT_FLAG_WRITE)) {
6121
vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
6122
}
6123
}
6124
vmf.orig_pte = pte_mkyoung(vmf.orig_pte);
6125
if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte,
6126
flags & FAULT_FLAG_WRITE))
6127
update_mmu_cache(vma, vmf.address, vmf.pte);
6128
out_ptl:
6129
spin_unlock(vmf.ptl);
6130
out_mutex:
6131
hugetlb_vma_unlock_read(vma);
6132
6133
/*
6134
* We must check to release the per-VMA lock. __vmf_anon_prepare() in
6135
* hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
6136
*/
6137
if (unlikely(ret & VM_FAULT_RETRY))
6138
vma_end_read(vma);
6139
6140
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6141
/*
6142
* hugetlb_wp drops all the locks, but the folio lock, before trying to
6143
* unmap the folio from other processes. During that window, if another
6144
* process mapping that folio faults in, it will take the mutex and then
6145
* it will wait on folio_lock, causing an ABBA deadlock.
6146
* Use trylock instead and bail out if we fail.
6147
*
6148
* Ideally, we should hold a refcount on the folio we wait for, but we do
6149
* not want to use the folio after it becomes unlocked, but rather just
6150
* wait for it to become unlocked, so hopefully next fault successes on
6151
* the trylock.
6152
*/
6153
if (need_wait_lock)
6154
folio_wait_locked(folio);
6155
return ret;
6156
}
6157
6158
#ifdef CONFIG_USERFAULTFD
6159
/*
6160
* Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6161
*/
6162
static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
6163
struct vm_area_struct *vma, unsigned long address)
6164
{
6165
struct mempolicy *mpol;
6166
nodemask_t *nodemask;
6167
struct folio *folio;
6168
gfp_t gfp_mask;
6169
int node;
6170
6171
gfp_mask = htlb_alloc_mask(h);
6172
node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
6173
/*
6174
* This is used to allocate a temporary hugetlb to hold the copied
6175
* content, which will then be copied again to the final hugetlb
6176
* consuming a reservation. Set the alloc_fallback to false to indicate
6177
* that breaking the per-node hugetlb pool is not allowed in this case.
6178
*/
6179
folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false);
6180
mpol_cond_put(mpol);
6181
6182
return folio;
6183
}
6184
6185
/*
6186
* Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6187
* with modifications for hugetlb pages.
6188
*/
6189
int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
6190
struct vm_area_struct *dst_vma,
6191
unsigned long dst_addr,
6192
unsigned long src_addr,
6193
uffd_flags_t flags,
6194
struct folio **foliop)
6195
{
6196
struct mm_struct *dst_mm = dst_vma->vm_mm;
6197
bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6198
bool wp_enabled = (flags & MFILL_ATOMIC_WP);
6199
struct hstate *h = hstate_vma(dst_vma);
6200
struct address_space *mapping = dst_vma->vm_file->f_mapping;
6201
pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6202
unsigned long size = huge_page_size(h);
6203
int vm_shared = dst_vma->vm_flags & VM_SHARED;
6204
pte_t _dst_pte;
6205
spinlock_t *ptl;
6206
int ret = -ENOMEM;
6207
struct folio *folio;
6208
bool folio_in_pagecache = false;
6209
pte_t dst_ptep;
6210
6211
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6212
ptl = huge_pte_lock(h, dst_mm, dst_pte);
6213
6214
/* Don't overwrite any existing PTEs (even markers) */
6215
if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
6216
spin_unlock(ptl);
6217
return -EEXIST;
6218
}
6219
6220
_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
6221
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6222
6223
/* No need to invalidate - it was non-present before */
6224
update_mmu_cache(dst_vma, dst_addr, dst_pte);
6225
6226
spin_unlock(ptl);
6227
return 0;
6228
}
6229
6230
if (is_continue) {
6231
ret = -EFAULT;
6232
folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6233
if (IS_ERR(folio))
6234
goto out;
6235
folio_in_pagecache = true;
6236
} else if (!*foliop) {
6237
/* If a folio already exists, then it's UFFDIO_COPY for
6238
* a non-missing case. Return -EEXIST.
6239
*/
6240
if (vm_shared &&
6241
hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6242
ret = -EEXIST;
6243
goto out;
6244
}
6245
6246
folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6247
if (IS_ERR(folio)) {
6248
pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, PMD_SIZE);
6249
if (actual_pte) {
6250
ret = -EEXIST;
6251
goto out;
6252
}
6253
ret = -ENOMEM;
6254
goto out;
6255
}
6256
6257
ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6258
false);
6259
6260
/* fallback to copy_from_user outside mmap_lock */
6261
if (unlikely(ret)) {
6262
ret = -ENOENT;
6263
/* Free the allocated folio which may have
6264
* consumed a reservation.
6265
*/
6266
restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6267
folio_put(folio);
6268
6269
/* Allocate a temporary folio to hold the copied
6270
* contents.
6271
*/
6272
folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6273
if (!folio) {
6274
ret = -ENOMEM;
6275
goto out;
6276
}
6277
*foliop = folio;
6278
/* Set the outparam foliop and return to the caller to
6279
* copy the contents outside the lock. Don't free the
6280
* folio.
6281
*/
6282
goto out;
6283
}
6284
} else {
6285
if (vm_shared &&
6286
hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6287
folio_put(*foliop);
6288
ret = -EEXIST;
6289
*foliop = NULL;
6290
goto out;
6291
}
6292
6293
folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6294
if (IS_ERR(folio)) {
6295
folio_put(*foliop);
6296
ret = -ENOMEM;
6297
*foliop = NULL;
6298
goto out;
6299
}
6300
ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
6301
folio_put(*foliop);
6302
*foliop = NULL;
6303
if (ret) {
6304
folio_put(folio);
6305
goto out;
6306
}
6307
}
6308
6309
/*
6310
* If we just allocated a new page, we need a memory barrier to ensure
6311
* that preceding stores to the page become visible before the
6312
* set_pte_at() write. The memory barrier inside __folio_mark_uptodate
6313
* is what we need.
6314
*
6315
* In the case where we have not allocated a new page (is_continue),
6316
* the page must already be uptodate. UFFDIO_CONTINUE already includes
6317
* an earlier smp_wmb() to ensure that prior stores will be visible
6318
* before the set_pte_at() write.
6319
*/
6320
if (!is_continue)
6321
__folio_mark_uptodate(folio);
6322
else
6323
WARN_ON_ONCE(!folio_test_uptodate(folio));
6324
6325
/* Add shared, newly allocated pages to the page cache. */
6326
if (vm_shared && !is_continue) {
6327
ret = -EFAULT;
6328
if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
6329
goto out_release_nounlock;
6330
6331
/*
6332
* Serialization between remove_inode_hugepages() and
6333
* hugetlb_add_to_page_cache() below happens through the
6334
* hugetlb_fault_mutex_table that here must be hold by
6335
* the caller.
6336
*/
6337
ret = hugetlb_add_to_page_cache(folio, mapping, idx);
6338
if (ret)
6339
goto out_release_nounlock;
6340
folio_in_pagecache = true;
6341
}
6342
6343
ptl = huge_pte_lock(h, dst_mm, dst_pte);
6344
6345
ret = -EIO;
6346
if (folio_test_hwpoison(folio))
6347
goto out_release_unlock;
6348
6349
ret = -EEXIST;
6350
6351
dst_ptep = huge_ptep_get(dst_mm, dst_addr, dst_pte);
6352
/*
6353
* See comment about UFFD marker overwriting in
6354
* mfill_atomic_install_pte().
6355
*/
6356
if (!huge_pte_none(dst_ptep) && !pte_is_uffd_marker(dst_ptep))
6357
goto out_release_unlock;
6358
6359
if (folio_in_pagecache)
6360
hugetlb_add_file_rmap(folio);
6361
else
6362
hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
6363
6364
/*
6365
* For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6366
* with wp flag set, don't set pte write bit.
6367
*/
6368
_dst_pte = make_huge_pte(dst_vma, folio,
6369
!wp_enabled && !(is_continue && !vm_shared));
6370
/*
6371
* Always mark UFFDIO_COPY page dirty; note that this may not be
6372
* extremely important for hugetlbfs for now since swapping is not
6373
* supported, but we should still be clear in that this page cannot be
6374
* thrown away at will, even if write bit not set.
6375
*/
6376
_dst_pte = huge_pte_mkdirty(_dst_pte);
6377
_dst_pte = pte_mkyoung(_dst_pte);
6378
6379
if (wp_enabled)
6380
_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6381
6382
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6383
6384
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6385
6386
/* No need to invalidate - it was non-present before */
6387
update_mmu_cache(dst_vma, dst_addr, dst_pte);
6388
6389
spin_unlock(ptl);
6390
if (!is_continue)
6391
folio_set_hugetlb_migratable(folio);
6392
if (vm_shared || is_continue)
6393
folio_unlock(folio);
6394
ret = 0;
6395
out:
6396
return ret;
6397
out_release_unlock:
6398
spin_unlock(ptl);
6399
if (vm_shared || is_continue)
6400
folio_unlock(folio);
6401
out_release_nounlock:
6402
if (!folio_in_pagecache)
6403
restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6404
folio_put(folio);
6405
goto out;
6406
}
6407
#endif /* CONFIG_USERFAULTFD */
6408
6409
long hugetlb_change_protection(struct vm_area_struct *vma,
6410
unsigned long address, unsigned long end,
6411
pgprot_t newprot, unsigned long cp_flags)
6412
{
6413
struct mm_struct *mm = vma->vm_mm;
6414
unsigned long start = address;
6415
pte_t *ptep;
6416
pte_t pte;
6417
struct hstate *h = hstate_vma(vma);
6418
long pages = 0, psize = huge_page_size(h);
6419
struct mmu_notifier_range range;
6420
unsigned long last_addr_mask;
6421
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6422
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6423
struct mmu_gather tlb;
6424
6425
/*
6426
* In the case of shared PMDs, the area to flush could be beyond
6427
* start/end. Set range.start/range.end to cover the maximum possible
6428
* range if PMD sharing is possible.
6429
*/
6430
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6431
0, mm, start, end);
6432
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6433
6434
BUG_ON(address >= end);
6435
flush_cache_range(vma, range.start, range.end);
6436
tlb_gather_mmu_vma(&tlb, vma);
6437
6438
mmu_notifier_invalidate_range_start(&range);
6439
hugetlb_vma_lock_write(vma);
6440
i_mmap_lock_write(vma->vm_file->f_mapping);
6441
last_addr_mask = hugetlb_mask_last_page(h);
6442
for (; address < end; address += psize) {
6443
softleaf_t entry;
6444
spinlock_t *ptl;
6445
6446
ptep = hugetlb_walk(vma, address, psize);
6447
if (!ptep) {
6448
if (!uffd_wp) {
6449
address |= last_addr_mask;
6450
continue;
6451
}
6452
/*
6453
* Userfaultfd wr-protect requires pgtable
6454
* pre-allocations to install pte markers.
6455
*/
6456
ptep = huge_pte_alloc(mm, vma, address, psize);
6457
if (!ptep) {
6458
pages = -ENOMEM;
6459
break;
6460
}
6461
}
6462
ptl = huge_pte_lock(h, mm, ptep);
6463
if (huge_pmd_unshare(&tlb, vma, address, ptep)) {
6464
/*
6465
* When uffd-wp is enabled on the vma, unshare
6466
* shouldn't happen at all. Warn about it if it
6467
* happened due to some reason.
6468
*/
6469
WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6470
pages++;
6471
spin_unlock(ptl);
6472
address |= last_addr_mask;
6473
continue;
6474
}
6475
pte = huge_ptep_get(mm, address, ptep);
6476
if (huge_pte_none(pte)) {
6477
if (unlikely(uffd_wp))
6478
/* Safe to modify directly (none->non-present). */
6479
set_huge_pte_at(mm, address, ptep,
6480
make_pte_marker(PTE_MARKER_UFFD_WP),
6481
psize);
6482
goto next;
6483
}
6484
6485
entry = softleaf_from_pte(pte);
6486
if (unlikely(softleaf_is_hwpoison(entry))) {
6487
/* Nothing to do. */
6488
} else if (unlikely(softleaf_is_migration(entry))) {
6489
struct folio *folio = softleaf_to_folio(entry);
6490
pte_t newpte = pte;
6491
6492
if (softleaf_is_migration_write(entry)) {
6493
if (folio_test_anon(folio))
6494
entry = make_readable_exclusive_migration_entry(
6495
swp_offset(entry));
6496
else
6497
entry = make_readable_migration_entry(
6498
swp_offset(entry));
6499
newpte = swp_entry_to_pte(entry);
6500
pages++;
6501
}
6502
6503
if (uffd_wp)
6504
newpte = pte_swp_mkuffd_wp(newpte);
6505
else if (uffd_wp_resolve)
6506
newpte = pte_swp_clear_uffd_wp(newpte);
6507
if (!pte_same(pte, newpte))
6508
set_huge_pte_at(mm, address, ptep, newpte, psize);
6509
} else if (unlikely(pte_is_marker(pte))) {
6510
/*
6511
* Do nothing on a poison marker; page is
6512
* corrupted, permissions do not apply. Here
6513
* pte_marker_uffd_wp()==true implies !poison
6514
* because they're mutual exclusive.
6515
*/
6516
if (pte_is_uffd_wp_marker(pte) && uffd_wp_resolve)
6517
/* Safe to modify directly (non-present->none). */
6518
huge_pte_clear(mm, address, ptep, psize);
6519
} else {
6520
pte_t old_pte;
6521
unsigned int shift = huge_page_shift(hstate_vma(vma));
6522
6523
old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6524
pte = huge_pte_modify(old_pte, newprot);
6525
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6526
if (uffd_wp)
6527
pte = huge_pte_mkuffd_wp(pte);
6528
else if (uffd_wp_resolve)
6529
pte = huge_pte_clear_uffd_wp(pte);
6530
huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6531
pages++;
6532
tlb_remove_huge_tlb_entry(h, &tlb, ptep, address);
6533
}
6534
6535
next:
6536
spin_unlock(ptl);
6537
cond_resched();
6538
}
6539
6540
tlb_flush_mmu_tlbonly(&tlb);
6541
huge_pmd_unshare_flush(&tlb, vma);
6542
/*
6543
* No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
6544
* downgrading page table protection not changing it to point to a new
6545
* page.
6546
*
6547
* See Documentation/mm/mmu_notifier.rst
6548
*/
6549
i_mmap_unlock_write(vma->vm_file->f_mapping);
6550
hugetlb_vma_unlock_write(vma);
6551
mmu_notifier_invalidate_range_end(&range);
6552
tlb_finish_mmu(&tlb);
6553
6554
return pages > 0 ? (pages << h->order) : pages;
6555
}
6556
6557
/*
6558
* Update the reservation map for the range [from, to].
6559
*
6560
* Returns the number of entries that would be added to the reservation map
6561
* associated with the range [from, to]. This number is greater or equal to
6562
* zero. -EINVAL or -ENOMEM is returned in case of any errors.
6563
*/
6564
6565
long hugetlb_reserve_pages(struct inode *inode,
6566
long from, long to,
6567
struct vm_area_desc *desc,
6568
vm_flags_t vm_flags)
6569
{
6570
long chg = -1, add = -1, spool_resv, gbl_resv;
6571
struct hstate *h = hstate_inode(inode);
6572
struct hugepage_subpool *spool = subpool_inode(inode);
6573
struct resv_map *resv_map;
6574
struct hugetlb_cgroup *h_cg = NULL;
6575
long gbl_reserve, regions_needed = 0;
6576
int err;
6577
6578
/* This should never happen */
6579
if (from > to) {
6580
VM_WARN(1, "%s called with a negative range\n", __func__);
6581
return -EINVAL;
6582
}
6583
6584
/*
6585
* Only apply hugepage reservation if asked. At fault time, an
6586
* attempt will be made for VM_NORESERVE to allocate a page
6587
* without using reserves
6588
*/
6589
if (vm_flags & VM_NORESERVE)
6590
return 0;
6591
6592
/*
6593
* Shared mappings base their reservation on the number of pages that
6594
* are already allocated on behalf of the file. Private mappings need
6595
* to reserve the full area even if read-only as mprotect() may be
6596
* called to make the mapping read-write. Assume !desc is a shm mapping
6597
*/
6598
if (!desc || desc->vm_flags & VM_MAYSHARE) {
6599
/*
6600
* resv_map can not be NULL as hugetlb_reserve_pages is only
6601
* called for inodes for which resv_maps were created (see
6602
* hugetlbfs_get_inode).
6603
*/
6604
resv_map = inode_resv_map(inode);
6605
6606
chg = region_chg(resv_map, from, to, &regions_needed);
6607
} else {
6608
/* Private mapping. */
6609
resv_map = resv_map_alloc();
6610
if (!resv_map) {
6611
err = -ENOMEM;
6612
goto out_err;
6613
}
6614
6615
chg = to - from;
6616
6617
set_vma_desc_resv_map(desc, resv_map);
6618
set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER);
6619
}
6620
6621
if (chg < 0) {
6622
/* region_chg() above can return -ENOMEM */
6623
err = (chg == -ENOMEM) ? -ENOMEM : -EINVAL;
6624
goto out_err;
6625
}
6626
6627
err = hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6628
chg * pages_per_huge_page(h), &h_cg);
6629
if (err < 0)
6630
goto out_err;
6631
6632
if (desc && !(desc->vm_flags & VM_MAYSHARE) && h_cg) {
6633
/* For private mappings, the hugetlb_cgroup uncharge info hangs
6634
* of the resv_map.
6635
*/
6636
resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6637
}
6638
6639
/*
6640
* There must be enough pages in the subpool for the mapping. If
6641
* the subpool has a minimum size, there may be some global
6642
* reservations already in place (gbl_reserve).
6643
*/
6644
gbl_reserve = hugepage_subpool_get_pages(spool, chg);
6645
if (gbl_reserve < 0) {
6646
err = gbl_reserve;
6647
goto out_uncharge_cgroup;
6648
}
6649
6650
/*
6651
* Check enough hugepages are available for the reservation.
6652
* Hand the pages back to the subpool if there are not
6653
*/
6654
err = hugetlb_acct_memory(h, gbl_reserve);
6655
if (err < 0)
6656
goto out_put_pages;
6657
6658
/*
6659
* Account for the reservations made. Shared mappings record regions
6660
* that have reservations as they are shared by multiple VMAs.
6661
* When the last VMA disappears, the region map says how much
6662
* the reservation was and the page cache tells how much of
6663
* the reservation was consumed. Private mappings are per-VMA and
6664
* only the consumed reservations are tracked. When the VMA
6665
* disappears, the original reservation is the VMA size and the
6666
* consumed reservations are stored in the map. Hence, nothing
6667
* else has to be done for private mappings here
6668
*/
6669
if (!desc || desc->vm_flags & VM_MAYSHARE) {
6670
add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6671
6672
if (unlikely(add < 0)) {
6673
hugetlb_acct_memory(h, -gbl_reserve);
6674
err = add;
6675
goto out_put_pages;
6676
} else if (unlikely(chg > add)) {
6677
/*
6678
* pages in this range were added to the reserve
6679
* map between region_chg and region_add. This
6680
* indicates a race with alloc_hugetlb_folio. Adjust
6681
* the subpool and reserve counts modified above
6682
* based on the difference.
6683
*/
6684
long rsv_adjust;
6685
6686
/*
6687
* hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6688
* reference to h_cg->css. See comment below for detail.
6689
*/
6690
hugetlb_cgroup_uncharge_cgroup_rsvd(
6691
hstate_index(h),
6692
(chg - add) * pages_per_huge_page(h), h_cg);
6693
6694
rsv_adjust = hugepage_subpool_put_pages(spool,
6695
chg - add);
6696
hugetlb_acct_memory(h, -rsv_adjust);
6697
} else if (h_cg) {
6698
/*
6699
* The file_regions will hold their own reference to
6700
* h_cg->css. So we should release the reference held
6701
* via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6702
* done.
6703
*/
6704
hugetlb_cgroup_put_rsvd_cgroup(h_cg);
6705
}
6706
}
6707
return chg;
6708
6709
out_put_pages:
6710
spool_resv = chg - gbl_reserve;
6711
if (spool_resv) {
6712
/* put sub pool's reservation back, chg - gbl_reserve */
6713
gbl_resv = hugepage_subpool_put_pages(spool, spool_resv);
6714
/*
6715
* subpool's reserved pages can not be put back due to race,
6716
* return to hstate.
6717
*/
6718
hugetlb_acct_memory(h, -gbl_resv);
6719
}
6720
out_uncharge_cgroup:
6721
hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6722
chg * pages_per_huge_page(h), h_cg);
6723
out_err:
6724
if (!desc || desc->vm_flags & VM_MAYSHARE)
6725
/* Only call region_abort if the region_chg succeeded but the
6726
* region_add failed or didn't run.
6727
*/
6728
if (chg >= 0 && add < 0)
6729
region_abort(resv_map, from, to, regions_needed);
6730
if (desc && is_vma_desc_resv_set(desc, HPAGE_RESV_OWNER)) {
6731
kref_put(&resv_map->refs, resv_map_release);
6732
set_vma_desc_resv_map(desc, NULL);
6733
}
6734
return err;
6735
}
6736
6737
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6738
long freed)
6739
{
6740
struct hstate *h = hstate_inode(inode);
6741
struct resv_map *resv_map = inode_resv_map(inode);
6742
long chg = 0;
6743
struct hugepage_subpool *spool = subpool_inode(inode);
6744
long gbl_reserve;
6745
6746
/*
6747
* Since this routine can be called in the evict inode path for all
6748
* hugetlbfs inodes, resv_map could be NULL.
6749
*/
6750
if (resv_map) {
6751
chg = region_del(resv_map, start, end);
6752
/*
6753
* region_del() can fail in the rare case where a region
6754
* must be split and another region descriptor can not be
6755
* allocated. If end == LONG_MAX, it will not fail.
6756
*/
6757
if (chg < 0)
6758
return chg;
6759
}
6760
6761
spin_lock(&inode->i_lock);
6762
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6763
spin_unlock(&inode->i_lock);
6764
6765
/*
6766
* If the subpool has a minimum size, the number of global
6767
* reservations to be released may be adjusted.
6768
*
6769
* Note that !resv_map implies freed == 0. So (chg - freed)
6770
* won't go negative.
6771
*/
6772
gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
6773
hugetlb_acct_memory(h, -gbl_reserve);
6774
6775
return 0;
6776
}
6777
6778
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
6779
static unsigned long page_table_shareable(struct vm_area_struct *svma,
6780
struct vm_area_struct *vma,
6781
unsigned long addr, pgoff_t idx)
6782
{
6783
unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
6784
svma->vm_start;
6785
unsigned long sbase = saddr & PUD_MASK;
6786
unsigned long s_end = sbase + PUD_SIZE;
6787
6788
/* Allow segments to share if only one is marked locked */
6789
vm_flags_t vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
6790
vm_flags_t svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
6791
6792
/*
6793
* match the virtual addresses, permission and the alignment of the
6794
* page table page.
6795
*
6796
* Also, vma_lock (vm_private_data) is required for sharing.
6797
*/
6798
if (pmd_index(addr) != pmd_index(saddr) ||
6799
vm_flags != svm_flags ||
6800
!range_in_vma(svma, sbase, s_end) ||
6801
!svma->vm_private_data)
6802
return 0;
6803
6804
return saddr;
6805
}
6806
6807
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6808
{
6809
unsigned long start = addr & PUD_MASK;
6810
unsigned long end = start + PUD_SIZE;
6811
6812
#ifdef CONFIG_USERFAULTFD
6813
if (uffd_disable_huge_pmd_share(vma))
6814
return false;
6815
#endif
6816
/*
6817
* check on proper vm_flags and page table alignment
6818
*/
6819
if (!(vma->vm_flags & VM_MAYSHARE))
6820
return false;
6821
if (!vma->vm_private_data) /* vma lock required for sharing */
6822
return false;
6823
if (!range_in_vma(vma, start, end))
6824
return false;
6825
return true;
6826
}
6827
6828
/*
6829
* Determine if start,end range within vma could be mapped by shared pmd.
6830
* If yes, adjust start and end to cover range associated with possible
6831
* shared pmd mappings.
6832
*/
6833
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6834
unsigned long *start, unsigned long *end)
6835
{
6836
unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6837
v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6838
6839
/*
6840
* vma needs to span at least one aligned PUD size, and the range
6841
* must be at least partially within in.
6842
*/
6843
if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6844
(*end <= v_start) || (*start >= v_end))
6845
return;
6846
6847
/* Extend the range to be PUD aligned for a worst case scenario */
6848
if (*start > v_start)
6849
*start = ALIGN_DOWN(*start, PUD_SIZE);
6850
6851
if (*end < v_end)
6852
*end = ALIGN(*end, PUD_SIZE);
6853
}
6854
6855
/*
6856
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
6857
* and returns the corresponding pte. While this is not necessary for the
6858
* !shared pmd case because we can allocate the pmd later as well, it makes the
6859
* code much cleaner. pmd allocation is essential for the shared case because
6860
* pud has to be populated inside the same i_mmap_rwsem section - otherwise
6861
* racing tasks could either miss the sharing (see huge_pte_offset) or select a
6862
* bad pmd for sharing.
6863
*/
6864
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6865
unsigned long addr, pud_t *pud)
6866
{
6867
struct address_space *mapping = vma->vm_file->f_mapping;
6868
pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
6869
vma->vm_pgoff;
6870
struct vm_area_struct *svma;
6871
unsigned long saddr;
6872
pte_t *spte = NULL;
6873
pte_t *pte;
6874
6875
i_mmap_lock_read(mapping);
6876
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
6877
if (svma == vma)
6878
continue;
6879
6880
saddr = page_table_shareable(svma, vma, addr, idx);
6881
if (saddr) {
6882
spte = hugetlb_walk(svma, saddr,
6883
vma_mmu_pagesize(svma));
6884
if (spte) {
6885
ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
6886
break;
6887
}
6888
}
6889
}
6890
6891
if (!spte)
6892
goto out;
6893
6894
spin_lock(&mm->page_table_lock);
6895
if (pud_none(*pud)) {
6896
pud_populate(mm, pud,
6897
(pmd_t *)((unsigned long)spte & PAGE_MASK));
6898
mm_inc_nr_pmds(mm);
6899
} else {
6900
ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
6901
}
6902
spin_unlock(&mm->page_table_lock);
6903
out:
6904
pte = (pte_t *)pmd_alloc(mm, pud, addr);
6905
i_mmap_unlock_read(mapping);
6906
return pte;
6907
}
6908
6909
/**
6910
* huge_pmd_unshare - Unmap a pmd table if it is shared by multiple users
6911
* @tlb: the current mmu_gather.
6912
* @vma: the vma covering the pmd table.
6913
* @addr: the address we are trying to unshare.
6914
* @ptep: pointer into the (pmd) page table.
6915
*
6916
* Called with the page table lock held, the i_mmap_rwsem held in write mode
6917
* and the hugetlb vma lock held in write mode.
6918
*
6919
* Note: The caller must call huge_pmd_unshare_flush() before dropping the
6920
* i_mmap_rwsem.
6921
*
6922
* Returns: 1 if it was a shared PMD table and it got unmapped, or 0 if it
6923
* was not a shared PMD table.
6924
*/
6925
int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
6926
unsigned long addr, pte_t *ptep)
6927
{
6928
unsigned long sz = huge_page_size(hstate_vma(vma));
6929
struct mm_struct *mm = vma->vm_mm;
6930
pgd_t *pgd = pgd_offset(mm, addr);
6931
p4d_t *p4d = p4d_offset(pgd, addr);
6932
pud_t *pud = pud_offset(p4d, addr);
6933
6934
if (sz != PMD_SIZE)
6935
return 0;
6936
if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep)))
6937
return 0;
6938
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6939
hugetlb_vma_assert_locked(vma);
6940
pud_clear(pud);
6941
6942
tlb_unshare_pmd_ptdesc(tlb, virt_to_ptdesc(ptep), addr);
6943
6944
mm_dec_nr_pmds(mm);
6945
return 1;
6946
}
6947
6948
/*
6949
* huge_pmd_unshare_flush - Complete a sequence of huge_pmd_unshare() calls
6950
* @tlb: the current mmu_gather.
6951
* @vma: the vma covering the pmd table.
6952
*
6953
* Perform necessary TLB flushes or IPI broadcasts to synchronize PMD table
6954
* unsharing with concurrent page table walkers.
6955
*
6956
* This function must be called after a sequence of huge_pmd_unshare()
6957
* calls while still holding the i_mmap_rwsem.
6958
*/
6959
void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
6960
{
6961
/*
6962
* We must synchronize page table unsharing such that nobody will
6963
* try reusing a previously-shared page table while it might still
6964
* be in use by previous sharers (TLB, GUP_fast).
6965
*/
6966
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6967
6968
tlb_flush_unshared_tables(tlb);
6969
}
6970
6971
#else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
6972
6973
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6974
unsigned long addr, pud_t *pud)
6975
{
6976
return NULL;
6977
}
6978
6979
int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
6980
unsigned long addr, pte_t *ptep)
6981
{
6982
return 0;
6983
}
6984
6985
void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
6986
{
6987
}
6988
6989
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6990
unsigned long *start, unsigned long *end)
6991
{
6992
}
6993
6994
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6995
{
6996
return false;
6997
}
6998
#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
6999
7000
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
7001
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
7002
unsigned long addr, unsigned long sz)
7003
{
7004
pgd_t *pgd;
7005
p4d_t *p4d;
7006
pud_t *pud;
7007
pte_t *pte = NULL;
7008
7009
pgd = pgd_offset(mm, addr);
7010
p4d = p4d_alloc(mm, pgd, addr);
7011
if (!p4d)
7012
return NULL;
7013
pud = pud_alloc(mm, p4d, addr);
7014
if (pud) {
7015
if (sz == PUD_SIZE) {
7016
pte = (pte_t *)pud;
7017
} else {
7018
BUG_ON(sz != PMD_SIZE);
7019
if (want_pmd_share(vma, addr) && pud_none(*pud))
7020
pte = huge_pmd_share(mm, vma, addr, pud);
7021
else
7022
pte = (pte_t *)pmd_alloc(mm, pud, addr);
7023
}
7024
}
7025
7026
if (pte) {
7027
pte_t pteval = ptep_get_lockless(pte);
7028
7029
BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7030
}
7031
7032
return pte;
7033
}
7034
7035
/*
7036
* huge_pte_offset() - Walk the page table to resolve the hugepage
7037
* entry at address @addr
7038
*
7039
* Return: Pointer to page table entry (PUD or PMD) for
7040
* address @addr, or NULL if a !p*d_present() entry is encountered and the
7041
* size @sz doesn't match the hugepage size at this level of the page
7042
* table.
7043
*/
7044
pte_t *huge_pte_offset(struct mm_struct *mm,
7045
unsigned long addr, unsigned long sz)
7046
{
7047
pgd_t *pgd;
7048
p4d_t *p4d;
7049
pud_t *pud;
7050
pmd_t *pmd;
7051
7052
pgd = pgd_offset(mm, addr);
7053
if (!pgd_present(*pgd))
7054
return NULL;
7055
p4d = p4d_offset(pgd, addr);
7056
if (!p4d_present(*p4d))
7057
return NULL;
7058
7059
pud = pud_offset(p4d, addr);
7060
if (sz == PUD_SIZE)
7061
/* must be pud huge, non-present or none */
7062
return (pte_t *)pud;
7063
if (!pud_present(*pud))
7064
return NULL;
7065
/* must have a valid entry and size to go further */
7066
7067
pmd = pmd_offset(pud, addr);
7068
/* must be pmd huge, non-present or none */
7069
return (pte_t *)pmd;
7070
}
7071
7072
/*
7073
* Return a mask that can be used to update an address to the last huge
7074
* page in a page table page mapping size. Used to skip non-present
7075
* page table entries when linearly scanning address ranges. Architectures
7076
* with unique huge page to page table relationships can define their own
7077
* version of this routine.
7078
*/
7079
unsigned long hugetlb_mask_last_page(struct hstate *h)
7080
{
7081
unsigned long hp_size = huge_page_size(h);
7082
7083
if (hp_size == PUD_SIZE)
7084
return P4D_SIZE - PUD_SIZE;
7085
else if (hp_size == PMD_SIZE)
7086
return PUD_SIZE - PMD_SIZE;
7087
else
7088
return 0UL;
7089
}
7090
7091
#else
7092
7093
/* See description above. Architectures can provide their own version. */
7094
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7095
{
7096
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
7097
if (huge_page_size(h) == PMD_SIZE)
7098
return PUD_SIZE - PMD_SIZE;
7099
#endif
7100
return 0UL;
7101
}
7102
7103
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7104
7105
/**
7106
* folio_isolate_hugetlb - try to isolate an allocated hugetlb folio
7107
* @folio: the folio to isolate
7108
* @list: the list to add the folio to on success
7109
*
7110
* Isolate an allocated (refcount > 0) hugetlb folio, marking it as
7111
* isolated/non-migratable, and moving it from the active list to the
7112
* given list.
7113
*
7114
* Isolation will fail if @folio is not an allocated hugetlb folio, or if
7115
* it is already isolated/non-migratable.
7116
*
7117
* On success, an additional folio reference is taken that must be dropped
7118
* using folio_putback_hugetlb() to undo the isolation.
7119
*
7120
* Return: True if isolation worked, otherwise False.
7121
*/
7122
bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
7123
{
7124
bool ret = true;
7125
7126
spin_lock_irq(&hugetlb_lock);
7127
if (!folio_test_hugetlb(folio) ||
7128
!folio_test_hugetlb_migratable(folio) ||
7129
!folio_try_get(folio)) {
7130
ret = false;
7131
goto unlock;
7132
}
7133
folio_clear_hugetlb_migratable(folio);
7134
list_move_tail(&folio->lru, list);
7135
unlock:
7136
spin_unlock_irq(&hugetlb_lock);
7137
return ret;
7138
}
7139
7140
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
7141
{
7142
int ret = 0;
7143
7144
*hugetlb = false;
7145
spin_lock_irq(&hugetlb_lock);
7146
if (folio_test_hugetlb(folio)) {
7147
*hugetlb = true;
7148
if (folio_test_hugetlb_freed(folio))
7149
ret = 0;
7150
else if (folio_test_hugetlb_migratable(folio) || unpoison)
7151
ret = folio_try_get(folio);
7152
else
7153
ret = -EBUSY;
7154
}
7155
spin_unlock_irq(&hugetlb_lock);
7156
return ret;
7157
}
7158
7159
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7160
bool *migratable_cleared)
7161
{
7162
int ret;
7163
7164
spin_lock_irq(&hugetlb_lock);
7165
ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7166
spin_unlock_irq(&hugetlb_lock);
7167
return ret;
7168
}
7169
7170
/**
7171
* folio_putback_hugetlb - unisolate a hugetlb folio
7172
* @folio: the isolated hugetlb folio
7173
*
7174
* Putback/un-isolate the hugetlb folio that was previous isolated using
7175
* folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it
7176
* back onto the active list.
7177
*
7178
* Will drop the additional folio reference obtained through
7179
* folio_isolate_hugetlb().
7180
*/
7181
void folio_putback_hugetlb(struct folio *folio)
7182
{
7183
spin_lock_irq(&hugetlb_lock);
7184
folio_set_hugetlb_migratable(folio);
7185
list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7186
spin_unlock_irq(&hugetlb_lock);
7187
folio_put(folio);
7188
}
7189
7190
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7191
{
7192
struct hstate *h = folio_hstate(old_folio);
7193
7194
hugetlb_cgroup_migrate(old_folio, new_folio);
7195
folio_set_owner_migrate_reason(new_folio, reason);
7196
7197
/*
7198
* transfer temporary state of the new hugetlb folio. This is
7199
* reverse to other transitions because the newpage is going to
7200
* be final while the old one will be freed so it takes over
7201
* the temporary status.
7202
*
7203
* Also note that we have to transfer the per-node surplus state
7204
* here as well otherwise the global surplus count will not match
7205
* the per-node's.
7206
*/
7207
if (folio_test_hugetlb_temporary(new_folio)) {
7208
int old_nid = folio_nid(old_folio);
7209
int new_nid = folio_nid(new_folio);
7210
7211
folio_set_hugetlb_temporary(old_folio);
7212
folio_clear_hugetlb_temporary(new_folio);
7213
7214
7215
/*
7216
* There is no need to transfer the per-node surplus state
7217
* when we do not cross the node.
7218
*/
7219
if (new_nid == old_nid)
7220
return;
7221
spin_lock_irq(&hugetlb_lock);
7222
if (h->surplus_huge_pages_node[old_nid]) {
7223
h->surplus_huge_pages_node[old_nid]--;
7224
h->surplus_huge_pages_node[new_nid]++;
7225
}
7226
spin_unlock_irq(&hugetlb_lock);
7227
}
7228
7229
/*
7230
* Our old folio is isolated and has "migratable" cleared until it
7231
* is putback. As migration succeeded, set the new folio "migratable"
7232
* and add it to the active list.
7233
*/
7234
spin_lock_irq(&hugetlb_lock);
7235
folio_set_hugetlb_migratable(new_folio);
7236
list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist);
7237
spin_unlock_irq(&hugetlb_lock);
7238
}
7239
7240
/*
7241
* If @take_locks is false, the caller must ensure that no concurrent page table
7242
* access can happen (except for gup_fast() and hardware page walks).
7243
* If @take_locks is true, we take the hugetlb VMA lock (to lock out things like
7244
* concurrent page fault handling) and the file rmap lock.
7245
*/
7246
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7247
unsigned long start,
7248
unsigned long end,
7249
bool take_locks)
7250
{
7251
struct hstate *h = hstate_vma(vma);
7252
unsigned long sz = huge_page_size(h);
7253
struct mm_struct *mm = vma->vm_mm;
7254
struct mmu_notifier_range range;
7255
struct mmu_gather tlb;
7256
unsigned long address;
7257
spinlock_t *ptl;
7258
pte_t *ptep;
7259
7260
if (!(vma->vm_flags & VM_MAYSHARE))
7261
return;
7262
7263
if (start >= end)
7264
return;
7265
7266
flush_cache_range(vma, start, end);
7267
tlb_gather_mmu_vma(&tlb, vma);
7268
7269
/*
7270
* No need to call adjust_range_if_pmd_sharing_possible(), because
7271
* we have already done the PUD_SIZE alignment.
7272
*/
7273
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
7274
start, end);
7275
mmu_notifier_invalidate_range_start(&range);
7276
if (take_locks) {
7277
hugetlb_vma_lock_write(vma);
7278
i_mmap_lock_write(vma->vm_file->f_mapping);
7279
} else {
7280
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7281
}
7282
for (address = start; address < end; address += PUD_SIZE) {
7283
ptep = hugetlb_walk(vma, address, sz);
7284
if (!ptep)
7285
continue;
7286
ptl = huge_pte_lock(h, mm, ptep);
7287
huge_pmd_unshare(&tlb, vma, address, ptep);
7288
spin_unlock(ptl);
7289
}
7290
huge_pmd_unshare_flush(&tlb, vma);
7291
if (take_locks) {
7292
i_mmap_unlock_write(vma->vm_file->f_mapping);
7293
hugetlb_vma_unlock_write(vma);
7294
}
7295
/*
7296
* No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7297
* Documentation/mm/mmu_notifier.rst.
7298
*/
7299
mmu_notifier_invalidate_range_end(&range);
7300
tlb_finish_mmu(&tlb);
7301
}
7302
7303
/*
7304
* This function will unconditionally remove all the shared pmd pgtable entries
7305
* within the specific vma for a hugetlbfs memory range.
7306
*/
7307
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7308
{
7309
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7310
ALIGN_DOWN(vma->vm_end, PUD_SIZE),
7311
/* take_locks = */ true);
7312
}
7313
7314
/*
7315
* For hugetlb, mremap() is an odd edge case - while the VMA copying is
7316
* performed, we permit both the old and new VMAs to reference the same
7317
* reservation.
7318
*
7319
* We fix this up after the operation succeeds, or if a newly allocated VMA
7320
* is closed as a result of a failure to allocate memory.
7321
*/
7322
void fixup_hugetlb_reservations(struct vm_area_struct *vma)
7323
{
7324
if (is_vm_hugetlb_page(vma))
7325
clear_vma_resv_huge_pages(vma);
7326
}
7327
7328