Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/compat/linuxkpi/common/src/linux_page.c
39586 views
1
/*-
2
* Copyright (c) 2010 Isilon Systems, Inc.
3
* Copyright (c) 2016 Matthew Macy ([email protected])
4
* Copyright (c) 2017 Mellanox Technologies, Ltd.
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice unmodified, this list of conditions, and the following
12
* disclaimer.
13
* 2. Redistributions in binary form must reproduce the above copyright
14
* notice, this list of conditions and the following disclaimer in the
15
* documentation and/or other materials provided with the distribution.
16
*
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
*/
28
29
#include <sys/param.h>
30
#include <sys/systm.h>
31
#include <sys/malloc.h>
32
#include <sys/kernel.h>
33
#include <sys/sysctl.h>
34
#include <sys/lock.h>
35
#include <sys/mutex.h>
36
#include <sys/rwlock.h>
37
#include <sys/proc.h>
38
#include <sys/sched.h>
39
#include <sys/memrange.h>
40
41
#include <machine/bus.h>
42
43
#include <vm/vm.h>
44
#include <vm/pmap.h>
45
#include <vm/vm_param.h>
46
#include <vm/vm_kern.h>
47
#include <vm/vm_object.h>
48
#include <vm/vm_map.h>
49
#include <vm/vm_page.h>
50
#include <vm/vm_pageout.h>
51
#include <vm/vm_pager.h>
52
#include <vm/vm_radix.h>
53
#include <vm/vm_reserv.h>
54
#include <vm/vm_extern.h>
55
56
#include <vm/uma.h>
57
#include <vm/uma_int.h>
58
59
#include <linux/gfp.h>
60
#include <linux/mm.h>
61
#include <linux/preempt.h>
62
#include <linux/fs.h>
63
#include <linux/shmem_fs.h>
64
#include <linux/kernel.h>
65
#include <linux/idr.h>
66
#include <linux/io.h>
67
#include <linux/io-mapping.h>
68
69
#ifdef __i386__
70
DEFINE_IDR(mtrr_idr);
71
static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat");
72
extern int pat_works;
73
#endif
74
75
void
76
si_meminfo(struct sysinfo *si)
77
{
78
si->totalram = physmem;
79
si->freeram = vm_free_count();
80
si->totalhigh = 0;
81
si->freehigh = 0;
82
si->mem_unit = PAGE_SIZE;
83
}
84
85
void *
86
linux_page_address(const struct page *page)
87
{
88
89
if (page->object != kernel_object) {
90
return (PMAP_HAS_DMAP ?
91
((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) :
92
NULL);
93
}
94
return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
95
IDX_TO_OFF(page->pindex)));
96
}
97
98
struct page *
99
linux_alloc_pages(gfp_t flags, unsigned int order)
100
{
101
struct page *page;
102
103
if (PMAP_HAS_DMAP) {
104
unsigned long npages = 1UL << order;
105
int req = VM_ALLOC_WIRED;
106
107
if ((flags & M_ZERO) != 0)
108
req |= VM_ALLOC_ZERO;
109
110
if (order == 0 && (flags & GFP_DMA32) == 0) {
111
page = vm_page_alloc_noobj(req);
112
if (page == NULL)
113
return (NULL);
114
} else {
115
vm_paddr_t pmax = (flags & GFP_DMA32) ?
116
BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
117
118
if ((flags & __GFP_NORETRY) != 0)
119
req |= VM_ALLOC_NORECLAIM;
120
121
retry:
122
page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
123
PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
124
if (page == NULL) {
125
if ((flags & (M_WAITOK | __GFP_NORETRY)) ==
126
M_WAITOK) {
127
int err = vm_page_reclaim_contig(req,
128
npages, 0, pmax, PAGE_SIZE, 0);
129
if (err == ENOMEM)
130
vm_wait(NULL);
131
else if (err != 0)
132
return (NULL);
133
flags &= ~M_WAITOK;
134
goto retry;
135
}
136
return (NULL);
137
}
138
}
139
} else {
140
vm_offset_t vaddr;
141
142
vaddr = linux_alloc_kmem(flags, order);
143
if (vaddr == 0)
144
return (NULL);
145
146
page = virt_to_page((void *)vaddr);
147
148
KASSERT(vaddr == (vm_offset_t)page_address(page),
149
("Page address mismatch"));
150
}
151
152
return (page);
153
}
154
155
static void
156
_linux_free_kmem(vm_offset_t addr, unsigned int order)
157
{
158
size_t size = ((size_t)PAGE_SIZE) << order;
159
160
kmem_free((void *)addr, size);
161
}
162
163
void
164
linux_free_pages(struct page *page, unsigned int order)
165
{
166
if (PMAP_HAS_DMAP) {
167
unsigned long npages = 1UL << order;
168
unsigned long x;
169
170
for (x = 0; x != npages; x++) {
171
vm_page_t pgo = page + x;
172
173
/*
174
* The "free page" function is used in several
175
* contexts.
176
*
177
* Some pages are allocated by `linux_alloc_pages()`
178
* above, but not all of them are. For instance in the
179
* DRM drivers, some pages come from
180
* `shmem_read_mapping_page_gfp()`.
181
*
182
* That's why we need to check if the page is managed
183
* or not here.
184
*/
185
if ((pgo->oflags & VPO_UNMANAGED) == 0) {
186
vm_page_unwire(pgo, PQ_ACTIVE);
187
} else {
188
if (vm_page_unwire_noq(pgo))
189
vm_page_free(pgo);
190
}
191
}
192
} else {
193
vm_offset_t vaddr;
194
195
vaddr = (vm_offset_t)page_address(page);
196
197
_linux_free_kmem(vaddr, order);
198
}
199
}
200
201
void
202
linux_release_pages(release_pages_arg arg, int nr)
203
{
204
int i;
205
206
CTASSERT(offsetof(struct folio, page) == 0);
207
208
for (i = 0; i < nr; i++)
209
__free_page(arg.pages[i]);
210
}
211
212
vm_offset_t
213
linux_alloc_kmem(gfp_t flags, unsigned int order)
214
{
215
size_t size = ((size_t)PAGE_SIZE) << order;
216
void *addr;
217
218
addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
219
((flags & GFP_DMA32) == 0) ? -1UL : BUS_SPACE_MAXADDR_32BIT,
220
PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
221
222
return ((vm_offset_t)addr);
223
}
224
225
void
226
linux_free_kmem(vm_offset_t addr, unsigned int order)
227
{
228
KASSERT((addr & ~PAGE_MASK) == 0,
229
("%s: addr %p is not page aligned", __func__, (void *)addr));
230
231
if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) {
232
_linux_free_kmem(addr, order);
233
} else {
234
vm_page_t page;
235
236
page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr));
237
linux_free_pages(page, order);
238
}
239
}
240
241
static int
242
linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
243
int write, struct page **pages)
244
{
245
vm_prot_t prot;
246
size_t len;
247
int count;
248
249
prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
250
len = ptoa((vm_offset_t)nr_pages);
251
count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
252
return (count == -1 ? -EFAULT : nr_pages);
253
}
254
255
int
256
__get_user_pages_fast(unsigned long start, int nr_pages, int write,
257
struct page **pages)
258
{
259
vm_map_t map;
260
vm_page_t *mp;
261
vm_offset_t va;
262
vm_offset_t end;
263
vm_prot_t prot;
264
int count;
265
266
if (nr_pages == 0 || in_interrupt())
267
return (0);
268
269
MPASS(pages != NULL);
270
map = &curthread->td_proc->p_vmspace->vm_map;
271
end = start + ptoa((vm_offset_t)nr_pages);
272
if (!vm_map_range_valid(map, start, end))
273
return (-EINVAL);
274
prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
275
for (count = 0, mp = pages, va = start; va < end;
276
mp++, va += PAGE_SIZE, count++) {
277
*mp = pmap_extract_and_hold(map->pmap, va, prot);
278
if (*mp == NULL)
279
break;
280
281
if ((prot & VM_PROT_WRITE) != 0 &&
282
(*mp)->dirty != VM_PAGE_BITS_ALL) {
283
/*
284
* Explicitly dirty the physical page. Otherwise, the
285
* caller's changes may go unnoticed because they are
286
* performed through an unmanaged mapping or by a DMA
287
* operation.
288
*
289
* The object lock is not held here.
290
* See vm_page_clear_dirty_mask().
291
*/
292
vm_page_dirty(*mp);
293
}
294
}
295
return (count);
296
}
297
298
long
299
get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
300
unsigned long start, unsigned long nr_pages, unsigned int gup_flags,
301
struct page **pages, struct vm_area_struct **vmas)
302
{
303
vm_map_t map;
304
305
map = &task->task_thread->td_proc->p_vmspace->vm_map;
306
return (linux_get_user_pages_internal(map, start, nr_pages,
307
!!(gup_flags & FOLL_WRITE), pages));
308
}
309
310
long
311
lkpi_get_user_pages(unsigned long start, unsigned long nr_pages,
312
unsigned int gup_flags, struct page **pages)
313
{
314
vm_map_t map;
315
316
map = &curthread->td_proc->p_vmspace->vm_map;
317
return (linux_get_user_pages_internal(map, start, nr_pages,
318
!!(gup_flags & FOLL_WRITE), pages));
319
}
320
321
int
322
is_vmalloc_addr(const void *addr)
323
{
324
return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
325
}
326
327
vm_fault_t
328
lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
329
unsigned long pfn, pgprot_t prot)
330
{
331
struct pctrie_iter pages;
332
vm_object_t vm_obj = vma->vm_obj;
333
vm_object_t tmp_obj;
334
vm_page_t page;
335
vm_pindex_t pindex;
336
337
VM_OBJECT_ASSERT_WLOCKED(vm_obj);
338
vm_page_iter_init(&pages, vm_obj);
339
pindex = OFF_TO_IDX(addr - vma->vm_start);
340
if (vma->vm_pfn_count == 0)
341
vma->vm_pfn_first = pindex;
342
MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
343
344
retry:
345
page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages);
346
if (page == NULL) {
347
page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
348
if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
349
pctrie_iter_reset(&pages);
350
goto retry;
351
}
352
if (page->object != NULL) {
353
tmp_obj = page->object;
354
vm_page_xunbusy(page);
355
VM_OBJECT_WUNLOCK(vm_obj);
356
VM_OBJECT_WLOCK(tmp_obj);
357
if (page->object == tmp_obj &&
358
vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
359
KASSERT(page->object == tmp_obj,
360
("page has changed identity"));
361
KASSERT((page->oflags & VPO_UNMANAGED) == 0,
362
("page does not belong to shmem"));
363
vm_pager_page_unswapped(page);
364
if (pmap_page_is_mapped(page)) {
365
vm_page_xunbusy(page);
366
VM_OBJECT_WUNLOCK(tmp_obj);
367
printf("%s: page rename failed: page "
368
"is mapped\n", __func__);
369
VM_OBJECT_WLOCK(vm_obj);
370
return (VM_FAULT_NOPAGE);
371
}
372
vm_page_remove(page);
373
}
374
VM_OBJECT_WUNLOCK(tmp_obj);
375
pctrie_iter_reset(&pages);
376
VM_OBJECT_WLOCK(vm_obj);
377
goto retry;
378
}
379
if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) {
380
vm_page_xunbusy(page);
381
return (VM_FAULT_OOM);
382
}
383
vm_page_valid(page);
384
}
385
pmap_page_set_memattr(page, pgprot2cachemode(prot));
386
vma->vm_pfn_count++;
387
388
return (VM_FAULT_NOPAGE);
389
}
390
391
int
392
lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr,
393
unsigned long start_pfn, unsigned long size, pgprot_t prot)
394
{
395
vm_object_t vm_obj;
396
unsigned long addr, pfn;
397
int err = 0;
398
399
vm_obj = vma->vm_obj;
400
401
VM_OBJECT_WLOCK(vm_obj);
402
for (addr = start_addr, pfn = start_pfn;
403
addr < start_addr + size;
404
addr += PAGE_SIZE) {
405
vm_fault_t ret;
406
retry:
407
ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
408
409
if ((ret & VM_FAULT_OOM) != 0) {
410
VM_OBJECT_WUNLOCK(vm_obj);
411
vm_wait(NULL);
412
VM_OBJECT_WLOCK(vm_obj);
413
goto retry;
414
}
415
416
if ((ret & VM_FAULT_ERROR) != 0) {
417
err = -EFAULT;
418
break;
419
}
420
421
pfn++;
422
}
423
VM_OBJECT_WUNLOCK(vm_obj);
424
425
if (unlikely(err)) {
426
zap_vma_ptes(vma, start_addr,
427
(pfn - start_pfn) << PAGE_SHIFT);
428
return (err);
429
}
430
431
return (0);
432
}
433
434
int
435
lkpi_io_mapping_map_user(struct io_mapping *iomap,
436
struct vm_area_struct *vma, unsigned long addr,
437
unsigned long pfn, unsigned long size)
438
{
439
pgprot_t prot;
440
int ret;
441
442
prot = cachemode2protval(iomap->attr);
443
ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot);
444
445
return (ret);
446
}
447
448
/*
449
* Although FreeBSD version of unmap_mapping_range has semantics and types of
450
* parameters compatible with Linux version, the values passed in are different
451
* @obj should match to vm_private_data field of vm_area_struct returned by
452
* mmap file operation handler, see linux_file_mmap_single() sources
453
* @holelen should match to size of area to be munmapped.
454
*/
455
void
456
lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
457
loff_t const holelen __unused, int even_cows __unused)
458
{
459
vm_object_t devobj;
460
461
devobj = cdev_pager_lookup(obj);
462
if (devobj != NULL) {
463
cdev_mgtdev_pager_free_pages(devobj);
464
vm_object_deallocate(devobj);
465
}
466
}
467
468
int
469
lkpi_arch_phys_wc_add(unsigned long base, unsigned long size)
470
{
471
#ifdef __i386__
472
struct mem_range_desc *mrdesc;
473
int error, id, act;
474
475
/* If PAT is available, do nothing */
476
if (pat_works)
477
return (0);
478
479
mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK);
480
mrdesc->mr_base = base;
481
mrdesc->mr_len = size;
482
mrdesc->mr_flags = MDF_WRITECOMBINE;
483
strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner));
484
act = MEMRANGE_SET_UPDATE;
485
error = mem_range_attr_set(mrdesc, &act);
486
if (error == 0) {
487
error = idr_get_new(&mtrr_idr, mrdesc, &id);
488
MPASS(idr_find(&mtrr_idr, id) == mrdesc);
489
if (error != 0) {
490
act = MEMRANGE_SET_REMOVE;
491
mem_range_attr_set(mrdesc, &act);
492
}
493
}
494
if (error != 0) {
495
free(mrdesc, M_LKMTRR);
496
pr_warn(
497
"Failed to add WC MTRR for [%p-%p]: %d; "
498
"performance may suffer\n",
499
(void *)base, (void *)(base + size - 1), error);
500
} else
501
pr_warn("Successfully added WC MTRR for [%p-%p]\n",
502
(void *)base, (void *)(base + size - 1));
503
504
return (error != 0 ? -error : id + __MTRR_ID_BASE);
505
#else
506
return (0);
507
#endif
508
}
509
510
void
511
lkpi_arch_phys_wc_del(int reg)
512
{
513
#ifdef __i386__
514
struct mem_range_desc *mrdesc;
515
int act;
516
517
/* Check if arch_phys_wc_add() failed. */
518
if (reg < __MTRR_ID_BASE)
519
return;
520
521
mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE);
522
MPASS(mrdesc != NULL);
523
idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE);
524
act = MEMRANGE_SET_REMOVE;
525
mem_range_attr_set(mrdesc, &act);
526
free(mrdesc, M_LKMTRR);
527
#endif
528
}
529
530
/*
531
* This is a highly simplified version of the Linux page_frag_cache.
532
* We only support up-to 1 single page as fragment size and we will
533
* always return a full page. This may be wasteful on small objects
534
* but the only known consumer (mt76) is either asking for a half-page
535
* or a full page. If this was to become a problem we can implement
536
* a more elaborate version.
537
*/
538
void *
539
linuxkpi_page_frag_alloc(struct page_frag_cache *pfc,
540
size_t fragsz, gfp_t gfp)
541
{
542
vm_page_t pages;
543
544
if (fragsz == 0)
545
return (NULL);
546
547
KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet "
548
"supported", __func__, fragsz));
549
550
pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1));
551
if (pages == NULL)
552
return (NULL);
553
pfc->va = linux_page_address(pages);
554
555
/* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */
556
pfc->pagecnt_bias = 0;
557
558
return (pfc->va);
559
}
560
561
void
562
linuxkpi_page_frag_free(void *addr)
563
{
564
vm_page_t page;
565
566
page = virt_to_page(addr);
567
linux_free_pages(page, 0);
568
}
569
570
void
571
linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused)
572
{
573
574
linux_free_pages(page, 0);
575
}
576
577