Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/mm/book3s64/slice.c
26481 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* address space "slices" (meta-segments) support
4
*
5
* Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6
*
7
* Based on hugetlb implementation
8
*
9
* Copyright (C) 2003 David Gibson, IBM Corporation.
10
*/
11
12
#undef DEBUG
13
14
#include <linux/kernel.h>
15
#include <linux/mm.h>
16
#include <linux/pagemap.h>
17
#include <linux/err.h>
18
#include <linux/spinlock.h>
19
#include <linux/export.h>
20
#include <linux/hugetlb.h>
21
#include <linux/sched/mm.h>
22
#include <linux/security.h>
23
#include <asm/mman.h>
24
#include <asm/mmu.h>
25
#include <asm/spu.h>
26
#include <asm/hugetlb.h>
27
#include <asm/mmu_context.h>
28
29
static DEFINE_SPINLOCK(slice_convert_lock);
30
31
#ifdef DEBUG
32
int _slice_debug = 1;
33
34
static void slice_print_mask(const char *label, const struct slice_mask *mask)
35
{
36
if (!_slice_debug)
37
return;
38
pr_devel("%s low_slice: %*pbl\n", label,
39
(int)SLICE_NUM_LOW, &mask->low_slices);
40
pr_devel("%s high_slice: %*pbl\n", label,
41
(int)SLICE_NUM_HIGH, mask->high_slices);
42
}
43
44
#define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
45
46
#else
47
48
static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
49
#define slice_dbg(fmt...)
50
51
#endif
52
53
static inline notrace bool slice_addr_is_low(unsigned long addr)
54
{
55
u64 tmp = (u64)addr;
56
57
return tmp < SLICE_LOW_TOP;
58
}
59
60
static void slice_range_to_mask(unsigned long start, unsigned long len,
61
struct slice_mask *ret)
62
{
63
unsigned long end = start + len - 1;
64
65
ret->low_slices = 0;
66
if (SLICE_NUM_HIGH)
67
bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
68
69
if (slice_addr_is_low(start)) {
70
unsigned long mend = min(end,
71
(unsigned long)(SLICE_LOW_TOP - 1));
72
73
ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
74
- (1u << GET_LOW_SLICE_INDEX(start));
75
}
76
77
if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
78
unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
79
unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
80
unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
81
82
bitmap_set(ret->high_slices, start_index, count);
83
}
84
}
85
86
static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
87
unsigned long len)
88
{
89
struct vm_area_struct *vma;
90
91
if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
92
return 0;
93
vma = find_vma(mm, addr);
94
return (!vma || (addr + len) <= vm_start_gap(vma));
95
}
96
97
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
98
{
99
return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
100
1ul << SLICE_LOW_SHIFT);
101
}
102
103
static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
104
{
105
unsigned long start = slice << SLICE_HIGH_SHIFT;
106
unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
107
108
/* Hack, so that each addresses is controlled by exactly one
109
* of the high or low area bitmaps, the first high area starts
110
* at 4GB, not 0 */
111
if (start == 0)
112
start = (unsigned long)SLICE_LOW_TOP;
113
114
return !slice_area_is_free(mm, start, end - start);
115
}
116
117
static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
118
unsigned long high_limit)
119
{
120
unsigned long i;
121
122
ret->low_slices = 0;
123
if (SLICE_NUM_HIGH)
124
bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
125
126
for (i = 0; i < SLICE_NUM_LOW; i++)
127
if (!slice_low_has_vma(mm, i))
128
ret->low_slices |= 1u << i;
129
130
if (slice_addr_is_low(high_limit - 1))
131
return;
132
133
for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
134
if (!slice_high_has_vma(mm, i))
135
__set_bit(i, ret->high_slices);
136
}
137
138
static bool slice_check_range_fits(struct mm_struct *mm,
139
const struct slice_mask *available,
140
unsigned long start, unsigned long len)
141
{
142
unsigned long end = start + len - 1;
143
u64 low_slices = 0;
144
145
if (slice_addr_is_low(start)) {
146
unsigned long mend = min(end,
147
(unsigned long)(SLICE_LOW_TOP - 1));
148
149
low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
150
- (1u << GET_LOW_SLICE_INDEX(start));
151
}
152
if ((low_slices & available->low_slices) != low_slices)
153
return false;
154
155
if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
156
unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
157
unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
158
unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
159
unsigned long i;
160
161
for (i = start_index; i < start_index + count; i++) {
162
if (!test_bit(i, available->high_slices))
163
return false;
164
}
165
}
166
167
return true;
168
}
169
170
static void slice_flush_segments(void *parm)
171
{
172
#ifdef CONFIG_PPC64
173
struct mm_struct *mm = parm;
174
unsigned long flags;
175
176
if (mm != current->active_mm)
177
return;
178
179
copy_mm_to_paca(current->active_mm);
180
181
local_irq_save(flags);
182
slb_flush_and_restore_bolted();
183
local_irq_restore(flags);
184
#endif
185
}
186
187
static void slice_convert(struct mm_struct *mm,
188
const struct slice_mask *mask, int psize)
189
{
190
int index, mask_index;
191
/* Write the new slice psize bits */
192
unsigned char *hpsizes, *lpsizes;
193
struct slice_mask *psize_mask, *old_mask;
194
unsigned long i, flags;
195
int old_psize;
196
197
slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
198
slice_print_mask(" mask", mask);
199
200
psize_mask = slice_mask_for_size(&mm->context, psize);
201
202
/* We need to use a spinlock here to protect against
203
* concurrent 64k -> 4k demotion ...
204
*/
205
spin_lock_irqsave(&slice_convert_lock, flags);
206
207
lpsizes = mm_ctx_low_slices(&mm->context);
208
for (i = 0; i < SLICE_NUM_LOW; i++) {
209
if (!(mask->low_slices & (1u << i)))
210
continue;
211
212
mask_index = i & 0x1;
213
index = i >> 1;
214
215
/* Update the slice_mask */
216
old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
217
old_mask = slice_mask_for_size(&mm->context, old_psize);
218
old_mask->low_slices &= ~(1u << i);
219
psize_mask->low_slices |= 1u << i;
220
221
/* Update the sizes array */
222
lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
223
(((unsigned long)psize) << (mask_index * 4));
224
}
225
226
hpsizes = mm_ctx_high_slices(&mm->context);
227
for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
228
if (!test_bit(i, mask->high_slices))
229
continue;
230
231
mask_index = i & 0x1;
232
index = i >> 1;
233
234
/* Update the slice_mask */
235
old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
236
old_mask = slice_mask_for_size(&mm->context, old_psize);
237
__clear_bit(i, old_mask->high_slices);
238
__set_bit(i, psize_mask->high_slices);
239
240
/* Update the sizes array */
241
hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
242
(((unsigned long)psize) << (mask_index * 4));
243
}
244
245
slice_dbg(" lsps=%lx, hsps=%lx\n",
246
(unsigned long)mm_ctx_low_slices(&mm->context),
247
(unsigned long)mm_ctx_high_slices(&mm->context));
248
249
spin_unlock_irqrestore(&slice_convert_lock, flags);
250
251
#ifdef CONFIG_SPU_BASE
252
spu_flush_all_slbs(mm);
253
#endif
254
}
255
256
/*
257
* Compute which slice addr is part of;
258
* set *boundary_addr to the start or end boundary of that slice
259
* (depending on 'end' parameter);
260
* return boolean indicating if the slice is marked as available in the
261
* 'available' slice_mark.
262
*/
263
static bool slice_scan_available(unsigned long addr,
264
const struct slice_mask *available,
265
int end, unsigned long *boundary_addr)
266
{
267
unsigned long slice;
268
if (slice_addr_is_low(addr)) {
269
slice = GET_LOW_SLICE_INDEX(addr);
270
*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
271
return !!(available->low_slices & (1u << slice));
272
} else {
273
slice = GET_HIGH_SLICE_INDEX(addr);
274
*boundary_addr = (slice + end) ?
275
((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
276
return !!test_bit(slice, available->high_slices);
277
}
278
}
279
280
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
281
unsigned long addr, unsigned long len,
282
const struct slice_mask *available,
283
int psize, unsigned long high_limit)
284
{
285
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
286
unsigned long found, next_end;
287
struct vm_unmapped_area_info info = {
288
.length = len,
289
.align_mask = PAGE_MASK & ((1ul << pshift) - 1),
290
};
291
/*
292
* Check till the allow max value for this mmap request
293
*/
294
while (addr < high_limit) {
295
info.low_limit = addr;
296
if (!slice_scan_available(addr, available, 1, &addr))
297
continue;
298
299
next_slice:
300
/*
301
* At this point [info.low_limit; addr) covers
302
* available slices only and ends at a slice boundary.
303
* Check if we need to reduce the range, or if we can
304
* extend it to cover the next available slice.
305
*/
306
if (addr >= high_limit)
307
addr = high_limit;
308
else if (slice_scan_available(addr, available, 1, &next_end)) {
309
addr = next_end;
310
goto next_slice;
311
}
312
info.high_limit = addr;
313
314
found = vm_unmapped_area(&info);
315
if (!(found & ~PAGE_MASK))
316
return found;
317
}
318
319
return -ENOMEM;
320
}
321
322
static unsigned long slice_find_area_topdown(struct mm_struct *mm,
323
unsigned long addr, unsigned long len,
324
const struct slice_mask *available,
325
int psize, unsigned long high_limit)
326
{
327
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
328
unsigned long found, prev;
329
struct vm_unmapped_area_info info = {
330
.flags = VM_UNMAPPED_AREA_TOPDOWN,
331
.length = len,
332
.align_mask = PAGE_MASK & ((1ul << pshift) - 1),
333
};
334
unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
335
336
/*
337
* If we are trying to allocate above DEFAULT_MAP_WINDOW
338
* Add the different to the mmap_base.
339
* Only for that request for which high_limit is above
340
* DEFAULT_MAP_WINDOW we should apply this.
341
*/
342
if (high_limit > DEFAULT_MAP_WINDOW)
343
addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
344
345
while (addr > min_addr) {
346
info.high_limit = addr;
347
if (!slice_scan_available(addr - 1, available, 0, &addr))
348
continue;
349
350
prev_slice:
351
/*
352
* At this point [addr; info.high_limit) covers
353
* available slices only and starts at a slice boundary.
354
* Check if we need to reduce the range, or if we can
355
* extend it to cover the previous available slice.
356
*/
357
if (addr < min_addr)
358
addr = min_addr;
359
else if (slice_scan_available(addr - 1, available, 0, &prev)) {
360
addr = prev;
361
goto prev_slice;
362
}
363
info.low_limit = addr;
364
365
found = vm_unmapped_area(&info);
366
if (!(found & ~PAGE_MASK))
367
return found;
368
}
369
370
/*
371
* A failed mmap() very likely causes application failure,
372
* so fall back to the bottom-up function here. This scenario
373
* can happen with large stack limits and large mmap()
374
* allocations.
375
*/
376
return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit);
377
}
378
379
380
static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
381
const struct slice_mask *mask, int psize,
382
int topdown, unsigned long high_limit)
383
{
384
if (topdown)
385
return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit);
386
else
387
return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit);
388
}
389
390
static inline void slice_copy_mask(struct slice_mask *dst,
391
const struct slice_mask *src)
392
{
393
dst->low_slices = src->low_slices;
394
if (!SLICE_NUM_HIGH)
395
return;
396
bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
397
}
398
399
static inline void slice_or_mask(struct slice_mask *dst,
400
const struct slice_mask *src1,
401
const struct slice_mask *src2)
402
{
403
dst->low_slices = src1->low_slices | src2->low_slices;
404
if (!SLICE_NUM_HIGH)
405
return;
406
bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
407
}
408
409
static inline void slice_andnot_mask(struct slice_mask *dst,
410
const struct slice_mask *src1,
411
const struct slice_mask *src2)
412
{
413
dst->low_slices = src1->low_slices & ~src2->low_slices;
414
if (!SLICE_NUM_HIGH)
415
return;
416
bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
417
}
418
419
#ifdef CONFIG_PPC_64K_PAGES
420
#define MMU_PAGE_BASE MMU_PAGE_64K
421
#else
422
#define MMU_PAGE_BASE MMU_PAGE_4K
423
#endif
424
425
unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
426
unsigned long flags, unsigned int psize,
427
int topdown)
428
{
429
struct slice_mask good_mask;
430
struct slice_mask potential_mask;
431
const struct slice_mask *maskp;
432
const struct slice_mask *compat_maskp = NULL;
433
int fixed = (flags & MAP_FIXED);
434
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
435
unsigned long page_size = 1UL << pshift;
436
struct mm_struct *mm = current->mm;
437
unsigned long newaddr;
438
unsigned long high_limit;
439
440
high_limit = DEFAULT_MAP_WINDOW;
441
if (addr >= high_limit || (fixed && (addr + len > high_limit)))
442
high_limit = TASK_SIZE;
443
444
if (len > high_limit)
445
return -ENOMEM;
446
if (len & (page_size - 1))
447
return -EINVAL;
448
if (fixed) {
449
if (addr & (page_size - 1))
450
return -EINVAL;
451
if (addr > high_limit - len)
452
return -ENOMEM;
453
}
454
455
if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
456
/*
457
* Increasing the slb_addr_limit does not require
458
* slice mask cache to be recalculated because it should
459
* be already initialised beyond the old address limit.
460
*/
461
mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
462
463
on_each_cpu(slice_flush_segments, mm, 1);
464
}
465
466
/* Sanity checks */
467
BUG_ON(mm->task_size == 0);
468
BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
469
VM_BUG_ON(radix_enabled());
470
471
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
472
slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
473
addr, len, flags, topdown);
474
475
/* If hint, make sure it matches our alignment restrictions */
476
if (!fixed && addr) {
477
addr = ALIGN(addr, page_size);
478
slice_dbg(" aligned addr=%lx\n", addr);
479
/* Ignore hint if it's too large or overlaps a VMA */
480
if (addr > high_limit - len || addr < mmap_min_addr ||
481
!slice_area_is_free(mm, addr, len))
482
addr = 0;
483
}
484
485
/* First make up a "good" mask of slices that have the right size
486
* already
487
*/
488
maskp = slice_mask_for_size(&mm->context, psize);
489
490
/*
491
* Here "good" means slices that are already the right page size,
492
* "compat" means slices that have a compatible page size (i.e.
493
* 4k in a 64k pagesize kernel), and "free" means slices without
494
* any VMAs.
495
*
496
* If MAP_FIXED:
497
* check if fits in good | compat => OK
498
* check if fits in good | compat | free => convert free
499
* else bad
500
* If have hint:
501
* check if hint fits in good => OK
502
* check if hint fits in good | free => convert free
503
* Otherwise:
504
* search in good, found => OK
505
* search in good | free, found => convert free
506
* search in good | compat | free, found => convert free.
507
*/
508
509
/*
510
* If we support combo pages, we can allow 64k pages in 4k slices
511
* The mask copies could be avoided in most cases here if we had
512
* a pointer to good mask for the next code to use.
513
*/
514
if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
515
compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
516
if (fixed)
517
slice_or_mask(&good_mask, maskp, compat_maskp);
518
else
519
slice_copy_mask(&good_mask, maskp);
520
} else {
521
slice_copy_mask(&good_mask, maskp);
522
}
523
524
slice_print_mask(" good_mask", &good_mask);
525
if (compat_maskp)
526
slice_print_mask(" compat_mask", compat_maskp);
527
528
/* First check hint if it's valid or if we have MAP_FIXED */
529
if (addr != 0 || fixed) {
530
/* Check if we fit in the good mask. If we do, we just return,
531
* nothing else to do
532
*/
533
if (slice_check_range_fits(mm, &good_mask, addr, len)) {
534
slice_dbg(" fits good !\n");
535
newaddr = addr;
536
goto return_addr;
537
}
538
} else {
539
/* Now let's see if we can find something in the existing
540
* slices for that size
541
*/
542
newaddr = slice_find_area(mm, len, &good_mask,
543
psize, topdown, high_limit);
544
if (newaddr != -ENOMEM) {
545
/* Found within the good mask, we don't have to setup,
546
* we thus return directly
547
*/
548
slice_dbg(" found area at 0x%lx\n", newaddr);
549
goto return_addr;
550
}
551
}
552
/*
553
* We don't fit in the good mask, check what other slices are
554
* empty and thus can be converted
555
*/
556
slice_mask_for_free(mm, &potential_mask, high_limit);
557
slice_or_mask(&potential_mask, &potential_mask, &good_mask);
558
slice_print_mask(" potential", &potential_mask);
559
560
if (addr != 0 || fixed) {
561
if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
562
slice_dbg(" fits potential !\n");
563
newaddr = addr;
564
goto convert;
565
}
566
}
567
568
/* If we have MAP_FIXED and failed the above steps, then error out */
569
if (fixed)
570
return -EBUSY;
571
572
slice_dbg(" search...\n");
573
574
/* If we had a hint that didn't work out, see if we can fit
575
* anywhere in the good area.
576
*/
577
if (addr) {
578
newaddr = slice_find_area(mm, len, &good_mask,
579
psize, topdown, high_limit);
580
if (newaddr != -ENOMEM) {
581
slice_dbg(" found area at 0x%lx\n", newaddr);
582
goto return_addr;
583
}
584
}
585
586
/* Now let's see if we can find something in the existing slices
587
* for that size plus free slices
588
*/
589
newaddr = slice_find_area(mm, len, &potential_mask,
590
psize, topdown, high_limit);
591
592
if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
593
psize == MMU_PAGE_64K) {
594
/* retry the search with 4k-page slices included */
595
slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
596
newaddr = slice_find_area(mm, len, &potential_mask,
597
psize, topdown, high_limit);
598
}
599
600
if (newaddr == -ENOMEM)
601
return -ENOMEM;
602
603
slice_range_to_mask(newaddr, len, &potential_mask);
604
slice_dbg(" found potential area at 0x%lx\n", newaddr);
605
slice_print_mask(" mask", &potential_mask);
606
607
convert:
608
/*
609
* Try to allocate the context before we do slice convert
610
* so that we handle the context allocation failure gracefully.
611
*/
612
if (need_extra_context(mm, newaddr)) {
613
if (alloc_extended_context(mm, newaddr) < 0)
614
return -ENOMEM;
615
}
616
617
slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
618
if (compat_maskp && !fixed)
619
slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
620
if (potential_mask.low_slices ||
621
(SLICE_NUM_HIGH &&
622
!bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
623
slice_convert(mm, &potential_mask, psize);
624
if (psize > MMU_PAGE_BASE)
625
on_each_cpu(slice_flush_segments, mm, 1);
626
}
627
return newaddr;
628
629
return_addr:
630
if (need_extra_context(mm, newaddr)) {
631
if (alloc_extended_context(mm, newaddr) < 0)
632
return -ENOMEM;
633
}
634
return newaddr;
635
}
636
EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
637
638
#ifdef CONFIG_HUGETLB_PAGE
639
static int file_to_psize(struct file *file)
640
{
641
struct hstate *hstate = hstate_file(file);
642
643
return shift_to_mmu_psize(huge_page_shift(hstate));
644
}
645
#else
646
static int file_to_psize(struct file *file)
647
{
648
return 0;
649
}
650
#endif
651
652
unsigned long arch_get_unmapped_area(struct file *filp,
653
unsigned long addr,
654
unsigned long len,
655
unsigned long pgoff,
656
unsigned long flags,
657
vm_flags_t vm_flags)
658
{
659
unsigned int psize;
660
661
if (radix_enabled())
662
return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
663
664
if (filp && is_file_hugepages(filp))
665
psize = file_to_psize(filp);
666
else
667
psize = mm_ctx_user_psize(&current->mm->context);
668
669
return slice_get_unmapped_area(addr, len, flags, psize, 0);
670
}
671
672
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
673
const unsigned long addr0,
674
const unsigned long len,
675
const unsigned long pgoff,
676
const unsigned long flags,
677
vm_flags_t vm_flags)
678
{
679
unsigned int psize;
680
681
if (radix_enabled())
682
return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags);
683
684
if (filp && is_file_hugepages(filp))
685
psize = file_to_psize(filp);
686
else
687
psize = mm_ctx_user_psize(&current->mm->context);
688
689
return slice_get_unmapped_area(addr0, len, flags, psize, 1);
690
}
691
692
unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
693
{
694
unsigned char *psizes;
695
int index, mask_index;
696
697
VM_BUG_ON(radix_enabled());
698
699
if (slice_addr_is_low(addr)) {
700
psizes = mm_ctx_low_slices(&mm->context);
701
index = GET_LOW_SLICE_INDEX(addr);
702
} else {
703
psizes = mm_ctx_high_slices(&mm->context);
704
index = GET_HIGH_SLICE_INDEX(addr);
705
}
706
mask_index = index & 0x1;
707
return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
708
}
709
EXPORT_SYMBOL_GPL(get_slice_psize);
710
711
void slice_init_new_context_exec(struct mm_struct *mm)
712
{
713
unsigned char *hpsizes, *lpsizes;
714
struct slice_mask *mask;
715
unsigned int psize = mmu_virtual_psize;
716
717
slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
718
719
/*
720
* In the case of exec, use the default limit. In the
721
* case of fork it is just inherited from the mm being
722
* duplicated.
723
*/
724
mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
725
mm_ctx_set_user_psize(&mm->context, psize);
726
727
/*
728
* Set all slice psizes to the default.
729
*/
730
lpsizes = mm_ctx_low_slices(&mm->context);
731
memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
732
733
hpsizes = mm_ctx_high_slices(&mm->context);
734
memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
735
736
/*
737
* Slice mask cache starts zeroed, fill the default size cache.
738
*/
739
mask = slice_mask_for_size(&mm->context, psize);
740
mask->low_slices = ~0UL;
741
if (SLICE_NUM_HIGH)
742
bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
743
}
744
745
void slice_setup_new_exec(void)
746
{
747
struct mm_struct *mm = current->mm;
748
749
slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
750
751
if (!is_32bit_task())
752
return;
753
754
mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
755
}
756
757
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
758
unsigned long len, unsigned int psize)
759
{
760
struct slice_mask mask;
761
762
VM_BUG_ON(radix_enabled());
763
764
slice_range_to_mask(start, len, &mask);
765
slice_convert(mm, &mask, psize);
766
}
767
768
#ifdef CONFIG_HUGETLB_PAGE
769
/*
770
* is_hugepage_only_range() is used by generic code to verify whether
771
* a normal mmap mapping (non hugetlbfs) is valid on a given area.
772
*
773
* until the generic code provides a more generic hook and/or starts
774
* calling arch get_unmapped_area for MAP_FIXED (which our implementation
775
* here knows how to deal with), we hijack it to keep standard mappings
776
* away from us.
777
*
778
* because of that generic code limitation, MAP_FIXED mapping cannot
779
* "convert" back a slice with no VMAs to the standard page size, only
780
* get_unmapped_area() can. It would be possible to fix it here but I
781
* prefer working on fixing the generic code instead.
782
*
783
* WARNING: This will not work if hugetlbfs isn't enabled since the
784
* generic code will redefine that function as 0 in that. This is ok
785
* for now as we only use slices with hugetlbfs enabled. This should
786
* be fixed as the generic code gets fixed.
787
*/
788
int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
789
unsigned long len)
790
{
791
const struct slice_mask *maskp;
792
unsigned int psize = mm_ctx_user_psize(&mm->context);
793
794
VM_BUG_ON(radix_enabled());
795
796
maskp = slice_mask_for_size(&mm->context, psize);
797
798
/* We need to account for 4k slices too */
799
if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
800
const struct slice_mask *compat_maskp;
801
struct slice_mask available;
802
803
compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
804
slice_or_mask(&available, maskp, compat_maskp);
805
return !slice_check_range_fits(mm, &available, addr, len);
806
}
807
808
return !slice_check_range_fits(mm, maskp, addr, len);
809
}
810
811
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
812
{
813
/* With radix we don't use slice, so derive it from vma*/
814
if (radix_enabled())
815
return vma_kernel_pagesize(vma);
816
817
return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
818
}
819
#endif
820
821