Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/compat/linuxkpi/common/src/linux_page.c
102437 views
1
/*-
2
* Copyright (c) 2010 Isilon Systems, Inc.
3
* Copyright (c) 2016 Matthew Macy ([email protected])
4
* Copyright (c) 2017 Mellanox Technologies, Ltd.
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice unmodified, this list of conditions, and the following
12
* disclaimer.
13
* 2. Redistributions in binary form must reproduce the above copyright
14
* notice, this list of conditions and the following disclaimer in the
15
* documentation and/or other materials provided with the distribution.
16
*
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
*/
28
29
#include <sys/param.h>
30
#include <sys/systm.h>
31
#include <sys/malloc.h>
32
#include <sys/kernel.h>
33
#include <sys/sysctl.h>
34
#include <sys/lock.h>
35
#include <sys/mutex.h>
36
#include <sys/rwlock.h>
37
#include <sys/proc.h>
38
#include <sys/sched.h>
39
#include <sys/memrange.h>
40
41
#include <machine/bus.h>
42
43
#include <vm/vm.h>
44
#include <vm/pmap.h>
45
#include <vm/vm_param.h>
46
#include <vm/vm_kern.h>
47
#include <vm/vm_object.h>
48
#include <vm/vm_map.h>
49
#include <vm/vm_page.h>
50
#include <vm/vm_pageout.h>
51
#include <vm/vm_pager.h>
52
#include <vm/vm_radix.h>
53
#include <vm/vm_reserv.h>
54
#include <vm/vm_extern.h>
55
56
#include <vm/uma.h>
57
#include <vm/uma_int.h>
58
59
#include <linux/gfp.h>
60
#include <linux/mm.h>
61
#include <linux/preempt.h>
62
#include <linux/fs.h>
63
#include <linux/shmem_fs.h>
64
#include <linux/kernel.h>
65
#include <linux/idr.h>
66
#include <linux/io.h>
67
#include <linux/io-mapping.h>
68
69
#ifdef __i386__
70
DEFINE_IDR(mtrr_idr);
71
static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat");
72
extern int pat_works;
73
#endif
74
75
void
76
si_meminfo(struct sysinfo *si)
77
{
78
si->totalram = physmem;
79
si->freeram = vm_free_count();
80
si->totalhigh = 0;
81
si->freehigh = 0;
82
si->mem_unit = PAGE_SIZE;
83
}
84
85
void *
86
linux_page_address(const struct page *page)
87
{
88
89
if (page->object != kernel_object) {
90
return (PMAP_HAS_DMAP ?
91
((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) :
92
NULL);
93
}
94
return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
95
IDX_TO_OFF(page->pindex)));
96
}
97
98
struct page *
99
linux_alloc_pages(gfp_t flags, unsigned int order)
100
{
101
struct page *page;
102
103
if (PMAP_HAS_DMAP) {
104
unsigned long npages = 1UL << order;
105
int req = VM_ALLOC_WIRED;
106
107
if ((flags & M_ZERO) != 0)
108
req |= VM_ALLOC_ZERO;
109
110
if (order == 0 && (flags & GFP_DMA32) == 0) {
111
page = vm_page_alloc_noobj(req);
112
if (page == NULL)
113
return (NULL);
114
} else {
115
vm_paddr_t pmax = (flags & GFP_DMA32) ?
116
BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
117
118
if ((flags & __GFP_NORETRY) != 0)
119
req |= VM_ALLOC_NORECLAIM;
120
121
retry:
122
page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
123
PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
124
if (page == NULL) {
125
if ((flags & (M_WAITOK | __GFP_NORETRY)) ==
126
M_WAITOK) {
127
int err = vm_page_reclaim_contig(req,
128
npages, 0, pmax, PAGE_SIZE, 0);
129
if (err == ENOMEM)
130
vm_wait(NULL);
131
else if (err != 0)
132
return (NULL);
133
flags &= ~M_WAITOK;
134
goto retry;
135
}
136
return (NULL);
137
}
138
}
139
} else {
140
vm_offset_t vaddr;
141
142
vaddr = linux_alloc_kmem(flags, order);
143
if (vaddr == 0)
144
return (NULL);
145
146
page = virt_to_page((void *)vaddr);
147
148
KASSERT(vaddr == (vm_offset_t)page_address(page),
149
("Page address mismatch"));
150
}
151
152
return (page);
153
}
154
155
static void
156
_linux_free_kmem(vm_offset_t addr, unsigned int order)
157
{
158
size_t size = ((size_t)PAGE_SIZE) << order;
159
160
kmem_free((void *)addr, size);
161
}
162
163
void
164
linux_free_pages(struct page *page, unsigned int order)
165
{
166
if (PMAP_HAS_DMAP) {
167
unsigned long npages = 1UL << order;
168
unsigned long x;
169
170
for (x = 0; x != npages; x++) {
171
vm_page_t pgo = page + x;
172
173
/*
174
* The "free page" function is used in several
175
* contexts.
176
*
177
* Some pages are allocated by `linux_alloc_pages()`
178
* above, but not all of them are. For instance in the
179
* DRM drivers, some pages come from
180
* `shmem_read_mapping_page_gfp()`.
181
*
182
* That's why we need to check if the page is managed
183
* or not here.
184
*/
185
if ((pgo->oflags & VPO_UNMANAGED) == 0) {
186
vm_page_unwire(pgo, PQ_ACTIVE);
187
} else {
188
if (vm_page_unwire_noq(pgo))
189
vm_page_free(pgo);
190
}
191
}
192
} else {
193
vm_offset_t vaddr;
194
195
vaddr = (vm_offset_t)page_address(page);
196
197
_linux_free_kmem(vaddr, order);
198
}
199
}
200
201
void
202
linux_release_pages(release_pages_arg arg, int nr)
203
{
204
int i;
205
206
CTASSERT(offsetof(struct folio, page) == 0);
207
208
for (i = 0; i < nr; i++)
209
__free_page(arg.pages[i]);
210
}
211
212
vm_offset_t
213
linux_alloc_kmem(gfp_t flags, unsigned int order)
214
{
215
size_t size = ((size_t)PAGE_SIZE) << order;
216
void *addr;
217
218
addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
219
((flags & GFP_DMA32) == 0) ? -1UL : BUS_SPACE_MAXADDR_32BIT,
220
PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
221
222
return ((vm_offset_t)addr);
223
}
224
225
void
226
linux_free_kmem(vm_offset_t addr, unsigned int order)
227
{
228
KASSERT((addr & ~PAGE_MASK) == 0,
229
("%s: addr %p is not page aligned", __func__, (void *)addr));
230
231
if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) {
232
_linux_free_kmem(addr, order);
233
} else {
234
vm_page_t page;
235
236
page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr));
237
linux_free_pages(page, order);
238
}
239
}
240
241
static int
242
linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
243
int write, struct page **pages)
244
{
245
vm_prot_t prot;
246
size_t len;
247
int count;
248
249
prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
250
len = ptoa((vm_offset_t)nr_pages);
251
count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
252
return (count == -1 ? -EFAULT : nr_pages);
253
}
254
255
int
256
__get_user_pages_fast(unsigned long start, int nr_pages, int write,
257
struct page **pages)
258
{
259
vm_map_t map;
260
vm_page_t *mp;
261
vm_offset_t va;
262
vm_offset_t end;
263
vm_prot_t prot;
264
int count;
265
266
if (nr_pages == 0 || in_interrupt())
267
return (0);
268
269
MPASS(pages != NULL);
270
map = &curthread->td_proc->p_vmspace->vm_map;
271
end = start + ptoa((vm_offset_t)nr_pages);
272
if (!vm_map_range_valid(map, start, end))
273
return (-EINVAL);
274
prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
275
for (count = 0, mp = pages, va = start; va < end;
276
mp++, va += PAGE_SIZE, count++) {
277
*mp = pmap_extract_and_hold(map->pmap, va, prot);
278
if (*mp == NULL)
279
break;
280
281
if ((prot & VM_PROT_WRITE) != 0 &&
282
(*mp)->dirty != VM_PAGE_BITS_ALL) {
283
/*
284
* Explicitly dirty the physical page. Otherwise, the
285
* caller's changes may go unnoticed because they are
286
* performed through an unmanaged mapping or by a DMA
287
* operation.
288
*
289
* The object lock is not held here.
290
* See vm_page_clear_dirty_mask().
291
*/
292
vm_page_dirty(*mp);
293
}
294
}
295
return (count);
296
}
297
298
long
299
get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
300
unsigned long start, unsigned long nr_pages, unsigned int gup_flags,
301
struct page **pages, struct vm_area_struct **vmas)
302
{
303
vm_map_t map;
304
305
map = &task->task_thread->td_proc->p_vmspace->vm_map;
306
return (linux_get_user_pages_internal(map, start, nr_pages,
307
!!(gup_flags & FOLL_WRITE), pages));
308
}
309
310
long
311
lkpi_get_user_pages(unsigned long start, unsigned long nr_pages,
312
unsigned int gup_flags, struct page **pages)
313
{
314
vm_map_t map;
315
316
map = &curthread->td_proc->p_vmspace->vm_map;
317
return (linux_get_user_pages_internal(map, start, nr_pages,
318
!!(gup_flags & FOLL_WRITE), pages));
319
}
320
321
/*
322
* Hash of vmmap addresses. This is infrequently accessed and does not
323
* need to be particularly large. This is done because we must store the
324
* caller's idea of the map size to properly unmap.
325
*/
326
struct vmmap {
327
LIST_ENTRY(vmmap) vm_next;
328
void *vm_addr;
329
unsigned long vm_size;
330
};
331
332
struct vmmaphd {
333
struct vmmap *lh_first;
334
};
335
#define VMMAP_HASH_SIZE 64
336
#define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
337
#define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
338
static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
339
static struct mtx vmmaplock;
340
341
int
342
is_vmalloc_addr(const void *addr)
343
{
344
struct vmmap *vmmap;
345
346
mtx_lock(&vmmaplock);
347
LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
348
if (addr == vmmap->vm_addr)
349
break;
350
mtx_unlock(&vmmaplock);
351
if (vmmap != NULL)
352
return (1);
353
354
return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
355
}
356
357
static void
358
vmmap_add(void *addr, unsigned long size)
359
{
360
struct vmmap *vmmap;
361
362
vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
363
mtx_lock(&vmmaplock);
364
vmmap->vm_size = size;
365
vmmap->vm_addr = addr;
366
LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
367
mtx_unlock(&vmmaplock);
368
}
369
370
static struct vmmap *
371
vmmap_remove(void *addr)
372
{
373
struct vmmap *vmmap;
374
375
mtx_lock(&vmmaplock);
376
LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
377
if (vmmap->vm_addr == addr)
378
break;
379
if (vmmap)
380
LIST_REMOVE(vmmap, vm_next);
381
mtx_unlock(&vmmaplock);
382
383
return (vmmap);
384
}
385
386
#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
387
void *
388
_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
389
{
390
void *addr;
391
392
addr = pmap_mapdev_attr(phys_addr, size, attr);
393
if (addr == NULL)
394
return (NULL);
395
vmmap_add(addr, size);
396
397
return (addr);
398
}
399
#endif
400
401
void
402
iounmap(void *addr)
403
{
404
struct vmmap *vmmap;
405
406
vmmap = vmmap_remove(addr);
407
if (vmmap == NULL)
408
return;
409
#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
410
pmap_unmapdev(addr, vmmap->vm_size);
411
#endif
412
kfree(vmmap);
413
}
414
415
void *
416
vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
417
{
418
vm_offset_t off;
419
size_t size;
420
421
size = count * PAGE_SIZE;
422
off = kva_alloc(size);
423
if (off == 0)
424
return (NULL);
425
vmmap_add((void *)off, size);
426
pmap_qenter(off, pages, count);
427
428
return ((void *)off);
429
}
430
431
#define VMAP_MAX_CHUNK_SIZE (65536U / sizeof(struct vm_page)) /* KMEM_ZMAX */
432
433
void *
434
linuxkpi_vmap_pfn(unsigned long *pfns, unsigned int count, int prot)
435
{
436
vm_page_t m, *ma, fma;
437
vm_offset_t off, coff;
438
vm_paddr_t pa;
439
vm_memattr_t attr;
440
size_t size;
441
unsigned int i, c, chunk;
442
443
size = ptoa(count);
444
off = kva_alloc(size);
445
if (off == 0)
446
return (NULL);
447
vmmap_add((void *)off, size);
448
449
chunk = MIN(count, VMAP_MAX_CHUNK_SIZE);
450
attr = pgprot2cachemode(prot);
451
ma = malloc(chunk * sizeof(vm_page_t), M_TEMP, M_WAITOK | M_ZERO);
452
fma = NULL;
453
c = 0;
454
coff = off;
455
for (i = 0; i < count; i++) {
456
pa = IDX_TO_OFF(pfns[i]);
457
m = PHYS_TO_VM_PAGE(pa);
458
if (m == NULL) {
459
if (fma == NULL)
460
fma = malloc(chunk * sizeof(struct vm_page),
461
M_TEMP, M_WAITOK | M_ZERO);
462
m = fma + c;
463
vm_page_initfake(m, pa, attr);
464
} else {
465
pmap_page_set_memattr(m, attr);
466
}
467
ma[c] = m;
468
c++;
469
if (c == chunk || i == count - 1) {
470
pmap_qenter(coff, ma, c);
471
if (i == count - 1)
472
break;
473
coff += ptoa(c);
474
c = 0;
475
memset(ma, 0, chunk * sizeof(vm_page_t));
476
if (fma != NULL)
477
memset(fma, 0, chunk * sizeof(struct vm_page));
478
}
479
}
480
free(fma, M_TEMP);
481
free(ma, M_TEMP);
482
483
return ((void *)off);
484
}
485
486
void
487
vunmap(void *addr)
488
{
489
struct vmmap *vmmap;
490
491
vmmap = vmmap_remove(addr);
492
if (vmmap == NULL)
493
return;
494
pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
495
kva_free((vm_offset_t)addr, vmmap->vm_size);
496
kfree(vmmap);
497
}
498
499
vm_fault_t
500
lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
501
unsigned long pfn, pgprot_t prot)
502
{
503
struct pctrie_iter pages;
504
vm_object_t vm_obj = vma->vm_obj;
505
vm_object_t tmp_obj;
506
vm_page_t page;
507
vm_pindex_t pindex;
508
509
VM_OBJECT_ASSERT_WLOCKED(vm_obj);
510
vm_page_iter_init(&pages, vm_obj);
511
pindex = OFF_TO_IDX(addr - vma->vm_start);
512
if (vma->vm_pfn_count == 0)
513
vma->vm_pfn_first = pindex;
514
MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
515
516
retry:
517
page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages);
518
if (page == NULL) {
519
page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
520
if (page == NULL)
521
return (VM_FAULT_SIGBUS);
522
if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
523
pctrie_iter_reset(&pages);
524
goto retry;
525
}
526
if (page->object != NULL) {
527
tmp_obj = page->object;
528
vm_page_xunbusy(page);
529
VM_OBJECT_WUNLOCK(vm_obj);
530
VM_OBJECT_WLOCK(tmp_obj);
531
if (page->object == tmp_obj &&
532
vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
533
KASSERT(page->object == tmp_obj,
534
("page has changed identity"));
535
KASSERT((page->oflags & VPO_UNMANAGED) == 0,
536
("page does not belong to shmem"));
537
vm_pager_page_unswapped(page);
538
if (pmap_page_is_mapped(page)) {
539
vm_page_xunbusy(page);
540
VM_OBJECT_WUNLOCK(tmp_obj);
541
printf("%s: page rename failed: page "
542
"is mapped\n", __func__);
543
VM_OBJECT_WLOCK(vm_obj);
544
return (VM_FAULT_NOPAGE);
545
}
546
vm_page_remove(page);
547
}
548
VM_OBJECT_WUNLOCK(tmp_obj);
549
pctrie_iter_reset(&pages);
550
VM_OBJECT_WLOCK(vm_obj);
551
goto retry;
552
}
553
if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) {
554
vm_page_xunbusy(page);
555
return (VM_FAULT_OOM);
556
}
557
vm_page_valid(page);
558
}
559
pmap_page_set_memattr(page, pgprot2cachemode(prot));
560
vma->vm_pfn_count++;
561
562
return (VM_FAULT_NOPAGE);
563
}
564
565
int
566
lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr,
567
unsigned long start_pfn, unsigned long size, pgprot_t prot)
568
{
569
vm_object_t vm_obj;
570
unsigned long addr, pfn;
571
int err = 0;
572
573
vm_obj = vma->vm_obj;
574
575
VM_OBJECT_WLOCK(vm_obj);
576
for (addr = start_addr, pfn = start_pfn;
577
addr < start_addr + size;
578
addr += PAGE_SIZE) {
579
vm_fault_t ret;
580
retry:
581
ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
582
583
if ((ret & VM_FAULT_OOM) != 0) {
584
VM_OBJECT_WUNLOCK(vm_obj);
585
vm_wait(NULL);
586
VM_OBJECT_WLOCK(vm_obj);
587
goto retry;
588
}
589
590
if ((ret & VM_FAULT_ERROR) != 0) {
591
err = -EFAULT;
592
break;
593
}
594
595
pfn++;
596
}
597
VM_OBJECT_WUNLOCK(vm_obj);
598
599
if (unlikely(err)) {
600
zap_vma_ptes(vma, start_addr,
601
(pfn - start_pfn) << PAGE_SHIFT);
602
return (err);
603
}
604
605
return (0);
606
}
607
608
int
609
lkpi_io_mapping_map_user(struct io_mapping *iomap,
610
struct vm_area_struct *vma, unsigned long addr,
611
unsigned long pfn, unsigned long size)
612
{
613
pgprot_t prot;
614
int ret;
615
616
prot = cachemode2protval(iomap->attr);
617
ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot);
618
619
return (ret);
620
}
621
622
/*
623
* Although FreeBSD version of unmap_mapping_range has semantics and types of
624
* parameters compatible with Linux version, the values passed in are different
625
* @obj should match to vm_private_data field of vm_area_struct returned by
626
* mmap file operation handler, see linux_file_mmap_single() sources
627
* @holelen should match to size of area to be munmapped.
628
*/
629
void
630
lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
631
loff_t const holelen __unused, int even_cows __unused)
632
{
633
vm_object_t devobj;
634
635
devobj = cdev_pager_lookup(obj);
636
if (devobj != NULL) {
637
cdev_mgtdev_pager_free_pages(devobj);
638
vm_object_deallocate(devobj);
639
}
640
}
641
642
int
643
lkpi_arch_phys_wc_add(unsigned long base, unsigned long size)
644
{
645
#ifdef __i386__
646
struct mem_range_desc *mrdesc;
647
int error, id, act;
648
649
/* If PAT is available, do nothing */
650
if (pat_works)
651
return (0);
652
653
mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK);
654
mrdesc->mr_base = base;
655
mrdesc->mr_len = size;
656
mrdesc->mr_flags = MDF_WRITECOMBINE;
657
strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner));
658
act = MEMRANGE_SET_UPDATE;
659
error = mem_range_attr_set(mrdesc, &act);
660
if (error == 0) {
661
error = idr_get_new(&mtrr_idr, mrdesc, &id);
662
MPASS(idr_find(&mtrr_idr, id) == mrdesc);
663
if (error != 0) {
664
act = MEMRANGE_SET_REMOVE;
665
mem_range_attr_set(mrdesc, &act);
666
}
667
}
668
if (error != 0) {
669
free(mrdesc, M_LKMTRR);
670
pr_warn(
671
"Failed to add WC MTRR for [%p-%p]: %d; "
672
"performance may suffer\n",
673
(void *)base, (void *)(base + size - 1), error);
674
} else
675
pr_warn("Successfully added WC MTRR for [%p-%p]\n",
676
(void *)base, (void *)(base + size - 1));
677
678
return (error != 0 ? -error : id + __MTRR_ID_BASE);
679
#else
680
return (0);
681
#endif
682
}
683
684
void
685
lkpi_arch_phys_wc_del(int reg)
686
{
687
#ifdef __i386__
688
struct mem_range_desc *mrdesc;
689
int act;
690
691
/* Check if arch_phys_wc_add() failed. */
692
if (reg < __MTRR_ID_BASE)
693
return;
694
695
mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE);
696
MPASS(mrdesc != NULL);
697
idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE);
698
act = MEMRANGE_SET_REMOVE;
699
mem_range_attr_set(mrdesc, &act);
700
free(mrdesc, M_LKMTRR);
701
#endif
702
}
703
704
/*
705
* This is a highly simplified version of the Linux page_frag_cache.
706
* We only support up-to 1 single page as fragment size and we will
707
* always return a full page. This may be wasteful on small objects
708
* but the only known consumer (mt76) is either asking for a half-page
709
* or a full page. If this was to become a problem we can implement
710
* a more elaborate version.
711
*/
712
void *
713
linuxkpi_page_frag_alloc(struct page_frag_cache *pfc,
714
size_t fragsz, gfp_t gfp)
715
{
716
vm_page_t pages;
717
718
if (fragsz == 0)
719
return (NULL);
720
721
KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet "
722
"supported", __func__, fragsz));
723
724
pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1));
725
if (pages == NULL)
726
return (NULL);
727
pfc->va = linux_page_address(pages);
728
729
/* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */
730
pfc->pagecnt_bias = 0;
731
732
return (pfc->va);
733
}
734
735
void
736
linuxkpi_page_frag_free(void *addr)
737
{
738
vm_page_t page;
739
740
page = virt_to_page(addr);
741
linux_free_pages(page, 0);
742
}
743
744
void
745
linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused)
746
{
747
748
linux_free_pages(page, 0);
749
}
750
751
static void
752
lkpi_page_init(void *arg)
753
{
754
int i;
755
756
mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
757
for (i = 0; i < VMMAP_HASH_SIZE; i++)
758
LIST_INIT(&vmmaphead[i]);
759
}
760
SYSINIT(lkpi_page, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_page_init, NULL);
761
762
static void
763
lkpi_page_uninit(void *arg)
764
{
765
mtx_destroy(&vmmaplock);
766
}
767
SYSUNINIT(lkpi_page, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_page_uninit, NULL);
768
769