Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/vm/vm_pager.c
39478 views
1
/*-
2
* SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3
*
4
* Copyright (c) 1991, 1993
5
* The Regents of the University of California. All rights reserved.
6
*
7
* This code is derived from software contributed to Berkeley by
8
* The Mach Operating System project at Carnegie-Mellon University.
9
*
10
* Redistribution and use in source and binary forms, with or without
11
* modification, are permitted provided that the following conditions
12
* are met:
13
* 1. Redistributions of source code must retain the above copyright
14
* notice, this list of conditions and the following disclaimer.
15
* 2. Redistributions in binary form must reproduce the above copyright
16
* notice, this list of conditions and the following disclaimer in the
17
* documentation and/or other materials provided with the distribution.
18
* 3. Neither the name of the University nor the names of its contributors
19
* may be used to endorse or promote products derived from this software
20
* without specific prior written permission.
21
*
22
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32
* SUCH DAMAGE.
33
*
34
*
35
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
36
* All rights reserved.
37
*
38
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
39
*
40
* Permission to use, copy, modify and distribute this software and
41
* its documentation is hereby granted, provided that both the copyright
42
* notice and this permission notice appear in all copies of the
43
* software, derivative works or modified versions, and any portions
44
* thereof, and that both notices appear in supporting documentation.
45
*
46
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49
*
50
* Carnegie Mellon requests users of this software to return to
51
*
52
* Software Distribution Coordinator or [email protected]
53
* School of Computer Science
54
* Carnegie Mellon University
55
* Pittsburgh PA 15213-3890
56
*
57
* any improvements or extensions that they make and grant Carnegie the
58
* rights to redistribute these changes.
59
*/
60
61
/*
62
* Paging space routine stubs. Emulates a matchmaker-like interface
63
* for builtin pagers.
64
*/
65
66
#include <sys/cdefs.h>
67
#include "opt_param.h"
68
69
#include <sys/param.h>
70
#include <sys/systm.h>
71
#include <sys/kernel.h>
72
#include <sys/vnode.h>
73
#include <sys/bio.h>
74
#include <sys/buf.h>
75
#include <sys/ucred.h>
76
#include <sys/malloc.h>
77
#include <sys/rwlock.h>
78
#include <sys/user.h>
79
80
#include <vm/vm.h>
81
#include <vm/vm_param.h>
82
#include <vm/vm_kern.h>
83
#include <vm/vm_object.h>
84
#include <vm/vm_page.h>
85
#include <vm/vm_pager.h>
86
#include <vm/vm_extern.h>
87
#include <vm/uma.h>
88
89
uma_zone_t pbuf_zone;
90
static int pbuf_init(void *, int, int);
91
static int pbuf_ctor(void *, int, void *, int);
92
static void pbuf_dtor(void *, int, void *);
93
94
static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
95
static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
96
vm_ooffset_t, struct ucred *);
97
static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
98
static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
99
static void dead_pager_dealloc(vm_object_t);
100
static void dead_pager_getvp(vm_object_t, struct vnode **, bool *);
101
102
static int
103
dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int *rbehind,
104
int *rahead)
105
{
106
107
return (VM_PAGER_FAIL);
108
}
109
110
static vm_object_t
111
dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
112
vm_ooffset_t off, struct ucred *cred)
113
{
114
115
return (NULL);
116
}
117
118
static void
119
dead_pager_putpages(vm_object_t object, vm_page_t *m, int count,
120
int flags, int *rtvals)
121
{
122
int i;
123
124
for (i = 0; i < count; i++)
125
rtvals[i] = VM_PAGER_AGAIN;
126
}
127
128
static boolean_t
129
dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next)
130
{
131
132
if (prev != NULL)
133
*prev = 0;
134
if (next != NULL)
135
*next = 0;
136
return (FALSE);
137
}
138
139
static void
140
dead_pager_dealloc(vm_object_t object)
141
{
142
143
}
144
145
static void
146
dead_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
147
{
148
/*
149
* For OBJT_DEAD objects, v_writecount was handled in
150
* vnode_pager_dealloc().
151
*/
152
}
153
154
static const struct pagerops deadpagerops = {
155
.pgo_kvme_type = KVME_TYPE_DEAD,
156
.pgo_alloc = dead_pager_alloc,
157
.pgo_dealloc = dead_pager_dealloc,
158
.pgo_getpages = dead_pager_getpages,
159
.pgo_putpages = dead_pager_putpages,
160
.pgo_haspage = dead_pager_haspage,
161
.pgo_getvp = dead_pager_getvp,
162
};
163
164
const struct pagerops *pagertab[16] __read_mostly = {
165
[OBJT_SWAP] = &swappagerops,
166
[OBJT_VNODE] = &vnodepagerops,
167
[OBJT_DEVICE] = &devicepagerops,
168
[OBJT_PHYS] = &physpagerops,
169
[OBJT_DEAD] = &deadpagerops,
170
[OBJT_SG] = &sgpagerops,
171
[OBJT_MGTDEVICE] = &mgtdevicepagerops,
172
};
173
static struct mtx pagertab_lock;
174
175
void
176
vm_pager_init(void)
177
{
178
const struct pagerops **pgops;
179
int i;
180
181
mtx_init(&pagertab_lock, "dynpag", NULL, MTX_DEF);
182
183
/*
184
* Initialize known pagers
185
*/
186
for (i = 0; i < OBJT_FIRST_DYN; i++) {
187
pgops = &pagertab[i];
188
if (*pgops != NULL && (*pgops)->pgo_init != NULL)
189
(*(*pgops)->pgo_init)();
190
}
191
}
192
193
static int nswbuf_max;
194
195
void
196
vm_pager_bufferinit(void)
197
{
198
199
/* Main zone for paging bufs. */
200
pbuf_zone = uma_zcreate("pbuf",
201
sizeof(struct buf) + PBUF_PAGES * sizeof(vm_page_t),
202
pbuf_ctor, pbuf_dtor, pbuf_init, NULL, UMA_ALIGN_CACHE,
203
UMA_ZONE_NOFREE);
204
/* Few systems may still use this zone directly, so it needs a limit. */
205
nswbuf_max += uma_zone_set_max(pbuf_zone, NSWBUF_MIN);
206
}
207
208
uma_zone_t
209
pbuf_zsecond_create(const char *name, int max)
210
{
211
uma_zone_t zone;
212
213
zone = uma_zsecond_create(name, pbuf_ctor, pbuf_dtor, NULL, NULL,
214
pbuf_zone);
215
216
#ifdef KMSAN
217
/*
218
* Shrink the size of the pbuf pools if KMSAN is enabled, otherwise the
219
* shadows of the large KVA allocations eat up too much memory.
220
*/
221
max /= 3;
222
#endif
223
224
/*
225
* uma_prealloc() rounds up to items per slab. If we would prealloc
226
* immediately on every pbuf_zsecond_create(), we may accumulate too
227
* much of difference between hard limit and prealloced items, which
228
* means wasted memory.
229
*/
230
if (nswbuf_max > 0)
231
nswbuf_max += uma_zone_set_max(zone, max);
232
else
233
uma_prealloc(pbuf_zone, uma_zone_set_max(zone, max));
234
235
return (zone);
236
}
237
238
static void
239
pbuf_prealloc(void *arg __unused)
240
{
241
242
uma_prealloc(pbuf_zone, nswbuf_max);
243
nswbuf_max = -1;
244
}
245
246
SYSINIT(pbuf, SI_SUB_KTHREAD_BUF, SI_ORDER_ANY, pbuf_prealloc, NULL);
247
248
/*
249
* Allocate an instance of a pager of the given type.
250
* Size, protection and offset parameters are passed in for pagers that
251
* need to perform page-level validation (e.g. the device pager).
252
*/
253
vm_object_t
254
vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
255
vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
256
{
257
vm_object_t object;
258
259
MPASS(type < nitems(pagertab));
260
261
object = (*pagertab[type]->pgo_alloc)(handle, size, prot, off, cred);
262
if (object != NULL)
263
object->type = type;
264
return (object);
265
}
266
267
/*
268
* The object must be locked.
269
*/
270
void
271
vm_pager_deallocate(vm_object_t object)
272
{
273
274
VM_OBJECT_ASSERT_WLOCKED(object);
275
MPASS(object->type < nitems(pagertab));
276
(*pagertab[object->type]->pgo_dealloc) (object);
277
}
278
279
static void
280
vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
281
{
282
#ifdef INVARIANTS
283
284
/*
285
* All pages must be consecutive, busied, not mapped, not fully valid,
286
* not dirty and belong to the proper object. Some pages may be the
287
* bogus page, but the first and last pages must be a real ones.
288
*/
289
290
VM_OBJECT_ASSERT_UNLOCKED(object);
291
VM_OBJECT_ASSERT_PAGING(object);
292
KASSERT(count > 0, ("%s: 0 count", __func__));
293
for (int i = 0 ; i < count; i++) {
294
if (m[i] == bogus_page) {
295
KASSERT(i != 0 && i != count - 1,
296
("%s: page %d is the bogus page", __func__, i));
297
continue;
298
}
299
vm_page_assert_xbusied(m[i]);
300
KASSERT(!pmap_page_is_mapped(m[i]),
301
("%s: page %p is mapped", __func__, m[i]));
302
KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
303
("%s: request for a valid page %p", __func__, m[i]));
304
KASSERT(m[i]->dirty == 0,
305
("%s: page %p is dirty", __func__, m[i]));
306
KASSERT(m[i]->object == object,
307
("%s: wrong object %p/%p", __func__, object, m[i]->object));
308
KASSERT(m[i]->pindex == m[0]->pindex + i,
309
("%s: page %p isn't consecutive", __func__, m[i]));
310
}
311
#endif
312
}
313
314
/*
315
* Page in the pages for the object using its associated pager.
316
* The requested page must be fully valid on successful return.
317
*/
318
int
319
vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
320
int *rahead)
321
{
322
#ifdef INVARIANTS
323
vm_pindex_t pindex = m[0]->pindex;
324
#endif
325
int r;
326
327
MPASS(object->type < nitems(pagertab));
328
vm_pager_assert_in(object, m, count);
329
330
r = (*pagertab[object->type]->pgo_getpages)(object, m, count, rbehind,
331
rahead);
332
if (r != VM_PAGER_OK)
333
return (r);
334
335
for (int i = 0; i < count; i++) {
336
/*
337
* If pager has replaced a page, assert that it had
338
* updated the array.
339
*/
340
#ifdef INVARIANTS
341
KASSERT(m[i] == vm_page_relookup(object, pindex++),
342
("%s: mismatch page %p pindex %ju", __func__,
343
m[i], (uintmax_t )pindex - 1));
344
#endif
345
346
/*
347
* Zero out partially filled data.
348
*/
349
if (m[i]->valid != VM_PAGE_BITS_ALL)
350
vm_page_zero_invalid(m[i], TRUE);
351
}
352
return (VM_PAGER_OK);
353
}
354
355
int
356
vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
357
int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
358
{
359
360
MPASS(object->type < nitems(pagertab));
361
vm_pager_assert_in(object, m, count);
362
363
return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
364
count, rbehind, rahead, iodone, arg));
365
}
366
367
/*
368
* vm_pager_put_pages() - inline, see vm/vm_pager.h
369
* vm_pager_has_page() - inline, see vm/vm_pager.h
370
*/
371
372
/*
373
* Search the specified pager object list for an object with the
374
* specified handle. If an object with the specified handle is found,
375
* increase its reference count and return it. Otherwise, return NULL.
376
*
377
* The pager object list must be locked.
378
*/
379
vm_object_t
380
vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
381
{
382
vm_object_t object;
383
384
TAILQ_FOREACH(object, pg_list, pager_object_list) {
385
if (object->handle == handle) {
386
VM_OBJECT_WLOCK(object);
387
if ((object->flags & OBJ_DEAD) == 0) {
388
vm_object_reference_locked(object);
389
VM_OBJECT_WUNLOCK(object);
390
break;
391
}
392
VM_OBJECT_WUNLOCK(object);
393
}
394
}
395
return (object);
396
}
397
398
int
399
vm_pager_alloc_dyn_type(struct pagerops *ops, int base_type)
400
{
401
int res;
402
403
mtx_lock(&pagertab_lock);
404
MPASS(base_type == -1 ||
405
(base_type >= OBJT_SWAP && base_type < nitems(pagertab)));
406
for (res = OBJT_FIRST_DYN; res < nitems(pagertab); res++) {
407
if (pagertab[res] == NULL)
408
break;
409
}
410
if (res == nitems(pagertab)) {
411
mtx_unlock(&pagertab_lock);
412
return (-1);
413
}
414
if (base_type != -1) {
415
MPASS(pagertab[base_type] != NULL);
416
#define FIX(n) \
417
if (ops->pgo_##n == NULL) \
418
ops->pgo_##n = pagertab[base_type]->pgo_##n
419
FIX(init);
420
FIX(alloc);
421
FIX(dealloc);
422
FIX(getpages);
423
FIX(getpages_async);
424
FIX(putpages);
425
FIX(haspage);
426
FIX(populate);
427
FIX(pageunswapped);
428
FIX(update_writecount);
429
FIX(release_writecount);
430
FIX(set_writeable_dirty);
431
FIX(mightbedirty);
432
FIX(getvp);
433
FIX(freespace);
434
FIX(page_inserted);
435
FIX(page_removed);
436
FIX(can_alloc_page);
437
#undef FIX
438
}
439
pagertab[res] = ops; /* XXXKIB should be rel, but acq is too much */
440
mtx_unlock(&pagertab_lock);
441
return (res);
442
}
443
444
void
445
vm_pager_free_dyn_type(objtype_t type)
446
{
447
MPASS(type >= OBJT_FIRST_DYN && type < nitems(pagertab));
448
449
mtx_lock(&pagertab_lock);
450
MPASS(pagertab[type] != NULL);
451
pagertab[type] = NULL;
452
mtx_unlock(&pagertab_lock);
453
}
454
455
static int
456
pbuf_ctor(void *mem, int size, void *arg, int flags)
457
{
458
struct buf *bp = mem;
459
460
bp->b_vp = NULL;
461
bp->b_bufobj = NULL;
462
463
/* copied from initpbuf() */
464
bp->b_rcred = NOCRED;
465
bp->b_wcred = NOCRED;
466
bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
467
bp->b_data = bp->b_kvabase;
468
bp->b_xflags = 0;
469
bp->b_flags = B_MAXPHYS;
470
bp->b_ioflags = 0;
471
bp->b_iodone = NULL;
472
bp->b_error = 0;
473
BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
474
475
return (0);
476
}
477
478
static void
479
pbuf_dtor(void *mem, int size, void *arg)
480
{
481
struct buf *bp = mem;
482
483
if (bp->b_rcred != NOCRED) {
484
crfree(bp->b_rcred);
485
bp->b_rcred = NOCRED;
486
}
487
if (bp->b_wcred != NOCRED) {
488
crfree(bp->b_wcred);
489
bp->b_wcred = NOCRED;
490
}
491
492
BUF_UNLOCK(bp);
493
}
494
495
static const char pbuf_wmesg[] = "pbufwait";
496
497
static int
498
pbuf_init(void *mem, int size, int flags)
499
{
500
struct buf *bp = mem;
501
502
TSENTER();
503
504
bp->b_kvabase = (void *)kva_alloc(ptoa(PBUF_PAGES));
505
if (bp->b_kvabase == NULL)
506
return (ENOMEM);
507
bp->b_kvasize = ptoa(PBUF_PAGES);
508
BUF_LOCKINIT(bp, pbuf_wmesg);
509
LIST_INIT(&bp->b_dep);
510
bp->b_rcred = bp->b_wcred = NOCRED;
511
bp->b_xflags = 0;
512
513
TSEXIT();
514
515
return (0);
516
}
517
518
/*
519
* Associate a p-buffer with a vnode.
520
*
521
* Also sets B_PAGING flag to indicate that vnode is not fully associated
522
* with the buffer. i.e. the bp has not been linked into the vnode or
523
* ref-counted.
524
*/
525
void
526
pbgetvp(struct vnode *vp, struct buf *bp)
527
{
528
529
KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
530
KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
531
532
bp->b_vp = vp;
533
bp->b_flags |= B_PAGING;
534
bp->b_bufobj = &vp->v_bufobj;
535
}
536
537
/*
538
* Associate a p-buffer with a vnode.
539
*
540
* Also sets B_PAGING flag to indicate that vnode is not fully associated
541
* with the buffer. i.e. the bp has not been linked into the vnode or
542
* ref-counted.
543
*/
544
void
545
pbgetbo(struct bufobj *bo, struct buf *bp)
546
{
547
548
KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
549
KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
550
551
bp->b_flags |= B_PAGING;
552
bp->b_bufobj = bo;
553
}
554
555
/*
556
* Disassociate a p-buffer from a vnode.
557
*/
558
void
559
pbrelvp(struct buf *bp)
560
{
561
562
KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
563
KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
564
KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
565
("pbrelvp: pager buf on vnode list."));
566
567
bp->b_vp = NULL;
568
bp->b_bufobj = NULL;
569
bp->b_flags &= ~B_PAGING;
570
}
571
572
/*
573
* Disassociate a p-buffer from a bufobj.
574
*/
575
void
576
pbrelbo(struct buf *bp)
577
{
578
579
KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
580
KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
581
KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
582
("pbrelbo: pager buf on vnode list."));
583
584
bp->b_bufobj = NULL;
585
bp->b_flags &= ~B_PAGING;
586
}
587
588
void
589
vm_object_set_writeable_dirty(vm_object_t object)
590
{
591
pgo_set_writeable_dirty_t *method;
592
593
MPASS(object->type < nitems(pagertab));
594
595
method = pagertab[object->type]->pgo_set_writeable_dirty;
596
if (method != NULL)
597
method(object);
598
}
599
600
bool
601
vm_object_mightbedirty(vm_object_t object)
602
{
603
pgo_mightbedirty_t *method;
604
605
MPASS(object->type < nitems(pagertab));
606
607
method = pagertab[object->type]->pgo_mightbedirty;
608
if (method == NULL)
609
return (false);
610
return (method(object));
611
}
612
613
/*
614
* Return the kvme type of the given object.
615
* If vpp is not NULL, set it to the object's vm_object_vnode() or NULL.
616
*/
617
int
618
vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
619
{
620
VM_OBJECT_ASSERT_LOCKED(object);
621
MPASS(object->type < nitems(pagertab));
622
623
if (vpp != NULL)
624
*vpp = vm_object_vnode(object);
625
return (pagertab[object->type]->pgo_kvme_type);
626
}
627
628