Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/vm/vm_pagequeue.h
39476 views
1
/*-
2
* SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3
*
4
* Copyright (c) 1991, 1993
5
* The Regents of the University of California. All rights reserved.
6
*
7
* This code is derived from software contributed to Berkeley by
8
* The Mach Operating System project at Carnegie-Mellon University.
9
*
10
* Redistribution and use in source and binary forms, with or without
11
* modification, are permitted provided that the following conditions
12
* are met:
13
* 1. Redistributions of source code must retain the above copyright
14
* notice, this list of conditions and the following disclaimer.
15
* 2. Redistributions in binary form must reproduce the above copyright
16
* notice, this list of conditions and the following disclaimer in the
17
* documentation and/or other materials provided with the distribution.
18
* 3. Neither the name of the University nor the names of its contributors
19
* may be used to endorse or promote products derived from this software
20
* without specific prior written permission.
21
*
22
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32
* SUCH DAMAGE.
33
*
34
*
35
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
36
* All rights reserved.
37
*
38
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
39
*
40
* Permission to use, copy, modify and distribute this software and
41
* its documentation is hereby granted, provided that both the copyright
42
* notice and this permission notice appear in all copies of the
43
* software, derivative works or modified versions, and any portions
44
* thereof, and that both notices appear in supporting documentation.
45
*
46
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49
*
50
* Carnegie Mellon requests users of this software to return to
51
*
52
* Software Distribution Coordinator or [email protected]
53
* School of Computer Science
54
* Carnegie Mellon University
55
* Pittsburgh PA 15213-3890
56
*
57
* any improvements or extensions that they make and grant Carnegie the
58
* rights to redistribute these changes.
59
*/
60
61
#ifndef _VM_PAGEQUEUE_
62
#define _VM_PAGEQUEUE_
63
64
#ifdef _KERNEL
65
struct vm_pagequeue {
66
struct mtx pq_mutex;
67
struct pglist pq_pl;
68
int pq_cnt;
69
const char * const pq_name;
70
uint64_t pq_pdpages;
71
} __aligned(CACHE_LINE_SIZE);
72
73
#if __SIZEOF_LONG__ == 8
74
#define VM_BATCHQUEUE_SIZE 63
75
#else
76
#define VM_BATCHQUEUE_SIZE 15
77
#endif
78
79
struct vm_batchqueue {
80
vm_page_t bq_pa[VM_BATCHQUEUE_SIZE];
81
int bq_cnt;
82
} __aligned(CACHE_LINE_SIZE);
83
84
#include <vm/uma.h>
85
#include <sys/_blockcount.h>
86
#include <sys/pidctrl.h>
87
struct sysctl_oid;
88
89
/*
90
* One vm_domain per NUMA domain. Contains pagequeues, free page structures,
91
* and accounting.
92
*
93
* Lock Key:
94
* f vmd_free_mtx
95
* p vmd_pageout_mtx
96
* d vm_domainset_lock
97
* a atomic
98
* c const after boot
99
* q page queue lock
100
*
101
* A unique page daemon thread manages each vm_domain structure and is
102
* responsible for ensuring that some free memory is available by freeing
103
* inactive pages and aging active pages. To decide how many pages to process,
104
* it uses thresholds derived from the number of pages in the domain:
105
*
106
* vmd_page_count
107
* ---
108
* |
109
* |-> vmd_inactive_target (~3%)
110
* | - The active queue scan target is given by
111
* | (vmd_inactive_target + vmd_free_target - vmd_free_count).
112
* |
113
* |
114
* |-> vmd_free_target (~2%)
115
* | - Target for page reclamation.
116
* |
117
* |-> vmd_pageout_wakeup_thresh (~1.8%)
118
* | - Threshold for waking up the page daemon.
119
* |
120
* |
121
* |-> vmd_free_min (~0.5%)
122
* | - First low memory threshold.
123
* | - Causes per-CPU caching to be lazily disabled in UMA.
124
* | - vm_wait() sleeps below this threshold.
125
* |
126
* |-> vmd_free_severe (~0.25%)
127
* | - Second low memory threshold.
128
* | - Triggers aggressive UMA reclamation, disables delayed buffer
129
* | writes.
130
* |
131
* |-> vmd_free_reserved (~0.13%)
132
* | - Minimum for VM_ALLOC_NORMAL page allocations.
133
* |-> vmd_pageout_free_min (32 + 2 pages)
134
* | - Minimum for waking a page daemon thread sleeping in vm_wait().
135
* |-> vmd_interrupt_free_min (2 pages)
136
* | - Minimum for VM_ALLOC_SYSTEM page allocations.
137
* ---
138
*
139
*--
140
* Free page count regulation:
141
*
142
* The page daemon attempts to ensure that the free page count is above the free
143
* target. It wakes up periodically (every 100ms) to input the current free
144
* page shortage (free_target - free_count) to a PID controller, which in
145
* response outputs the number of pages to attempt to reclaim. The shortage's
146
* current magnitude, rate of change, and cumulative value are together used to
147
* determine the controller's output. The page daemon target thus adapts
148
* dynamically to the system's demand for free pages, resulting in less
149
* burstiness than a simple hysteresis loop.
150
*
151
* When the free page count drops below the wakeup threshold,
152
* vm_domain_allocate() proactively wakes up the page daemon. This helps ensure
153
* that the system responds promptly to a large instantaneous free page
154
* shortage.
155
*
156
* The page daemon also attempts to ensure that some fraction of the system's
157
* memory is present in the inactive (I) and laundry (L) page queues, so that it
158
* can respond promptly to a sudden free page shortage. In particular, the page
159
* daemon thread aggressively scans active pages so long as the following
160
* condition holds:
161
*
162
* len(I) + len(L) + free_target - free_count < inactive_target
163
*
164
* Otherwise, when the inactive target is met, the page daemon periodically
165
* scans a small portion of the active queue in order to maintain up-to-date
166
* per-page access history. Unreferenced pages in the active queue thus
167
* eventually migrate to the inactive queue.
168
*
169
* The per-domain laundry thread periodically launders dirty pages based on the
170
* number of clean pages freed by the page daemon since the last laundering. If
171
* the page daemon fails to meet its scan target (i.e., the PID controller
172
* output) because of a shortage of clean inactive pages, the laundry thread
173
* attempts to launder enough pages to meet the free page target.
174
*
175
*--
176
* Page allocation priorities:
177
*
178
* The system defines three page allocation priorities: VM_ALLOC_NORMAL,
179
* VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can
180
* claim any free page. This priority is used in the pmap layer when attempting
181
* to allocate a page for the kernel page tables; in such cases an allocation
182
* failure will usually result in a kernel panic. The system priority is used
183
* for most other kernel memory allocations, for instance by UMA's slab
184
* allocator or the buffer cache. Such allocations will fail if the free count
185
* is below interrupt_free_min. All other allocations occur at the normal
186
* priority, which is typically used for allocation of user pages, for instance
187
* in the page fault handler or when allocating page table pages or pv_entry
188
* structures for user pmaps. Such allocations fail if the free count is below
189
* the free_reserved threshold.
190
*
191
*--
192
* Free memory shortages:
193
*
194
* The system uses the free_min and free_severe thresholds to apply
195
* back-pressure and give the page daemon a chance to recover. When a page
196
* allocation fails due to a shortage and the allocating thread cannot handle
197
* failure, it may call vm_wait() to sleep until free pages are available.
198
* vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises
199
* above the free_min threshold; the page daemon and laundry threads are given
200
* priority and will wake up once free_count reaches the (much smaller)
201
* pageout_free_min threshold.
202
*
203
* On NUMA systems, the domainset iterators always prefer NUMA domains where the
204
* free page count is above the free_min threshold. This means that given the
205
* choice between two NUMA domains, one above the free_min threshold and one
206
* below, the former will be used to satisfy the allocation request regardless
207
* of the domain selection policy.
208
*
209
* In addition to reclaiming memory from the page queues, the vm_lowmem event
210
* fires every ten seconds so long as the system is under memory pressure (i.e.,
211
* vmd_free_count < vmd_free_target). This allows kernel subsystems to register
212
* for notifications of free page shortages, upon which they may shrink their
213
* caches. Following a vm_lowmem event, UMA's caches are pruned to ensure that
214
* they do not contain an excess of unused memory. When a domain is below the
215
* free_min threshold, UMA limits the population of per-CPU caches. When a
216
* domain falls below the free_severe threshold, UMA's caches are completely
217
* drained.
218
*
219
* If the system encounters a global memory shortage, it may resort to the
220
* out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
221
* last-ditch attempt to free up some pages. Either of the two following
222
* conditions will activate the OOM killer:
223
*
224
* 1. The page daemons collectively fail to reclaim any pages during their
225
* inactive queue scans. After vm_pageout_oom_seq consecutive scans fail,
226
* the page daemon thread votes for an OOM kill, and an OOM kill is
227
* triggered when all page daemons have voted. This heuristic is strict and
228
* may fail to trigger even when the system is effectively deadlocked.
229
*
230
* 2. Threads in the user fault handler are repeatedly unable to make progress
231
* while allocating a page to satisfy the fault. After
232
* vm_pfault_oom_attempts page allocation failures with intervening
233
* vm_wait() calls, the faulting thread will trigger an OOM kill.
234
*/
235
struct vm_domain {
236
struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
237
struct mtx_padalign vmd_free_mtx;
238
struct mtx_padalign vmd_pageout_mtx;
239
struct vm_pgcache {
240
int domain;
241
int pool;
242
uma_zone_t zone;
243
} vmd_pgcache[VM_NFREEPOOL];
244
struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
245
struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
246
struct vmem *vmd_kernel_nofree_arena; /* (c) per-domain kva NOFREE arena. */
247
u_int vmd_domain; /* (c) Domain number. */
248
u_int vmd_page_count; /* (c) Total page count. */
249
long vmd_segs; /* (c) bitmask of the segments */
250
struct pglist vmd_nofreeq; /* (f) NOFREE page bump allocator. */
251
u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
252
u_int vmd_pageout_deficit; /* (a) Estimated number of pages deficit */
253
uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
254
255
/* Paging control variables, used within single threaded page daemon. */
256
struct pidctrl vmd_pid; /* Pageout controller. */
257
bool vmd_oom; /* An OOM kill was requested. */
258
bool vmd_helper_threads_enabled;/* Use multiple threads to scan. */
259
u_int vmd_inactive_threads; /* Number of extra helper threads. */
260
u_int vmd_inactive_shortage; /* Per-thread shortage. */
261
blockcount_t vmd_inactive_running; /* Number of inactive threads. */
262
blockcount_t vmd_inactive_starting; /* Number of threads started. */
263
u_int vmd_addl_shortage; /* (a) Shortage accumulator. */
264
u_int vmd_inactive_freed; /* (a) Successful inactive frees. */
265
u_int vmd_inactive_us; /* (a) Microseconds for above. */
266
u_int vmd_inactive_pps; /* Exponential decay frees/second. */
267
int vmd_oom_seq;
268
int vmd_last_active_scan;
269
struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
270
struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
271
struct vm_page vmd_clock[2]; /* markers for active queue scan */
272
273
int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */
274
int vmd_pageout_pages_needed; /* (d) page daemon waiting for pages? */
275
bool vmd_minset; /* (d) Are we in vm_min_domains? */
276
bool vmd_severeset; /* (d) Are we in vm_severe_domains? */
277
enum {
278
VM_LAUNDRY_IDLE = 0,
279
VM_LAUNDRY_BACKGROUND,
280
VM_LAUNDRY_SHORTFALL
281
} vmd_laundry_request;
282
283
/* Paging thresholds and targets. */
284
u_int vmd_clean_pages_freed; /* (q) accumulator for laundry thread */
285
u_int vmd_background_launder_target; /* (c) */
286
u_int vmd_free_reserved; /* (c) pages reserved for deadlock */
287
u_int vmd_free_target; /* (c) pages desired free */
288
u_int vmd_free_min; /* (c) pages desired free */
289
u_int vmd_inactive_target; /* (c) pages desired inactive */
290
u_int vmd_pageout_free_min; /* (c) min pages reserved for kernel */
291
u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
292
u_int vmd_interrupt_free_min; /* (c) reserved pages for int code */
293
u_int vmd_free_severe; /* (c) severe page depletion point */
294
295
/* Name for sysctl etc. */
296
struct sysctl_oid *vmd_oid;
297
char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
298
} __aligned(CACHE_LINE_SIZE);
299
300
extern struct vm_domain vm_dom[MAXMEMDOM];
301
302
#define VM_DOMAIN(n) (&vm_dom[(n)])
303
#define VM_DOMAIN_EMPTY(n) (vm_dom[(n)].vmd_page_count == 0)
304
305
#define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
306
#define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
307
#define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
308
#define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
309
#define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
310
311
#define vm_domain_free_assert_locked(n) \
312
mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
313
#define vm_domain_free_assert_unlocked(n) \
314
mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
315
#define vm_domain_free_lock(d) \
316
mtx_lock(vm_domain_free_lockptr((d)))
317
#define vm_domain_free_lockptr(d) \
318
(&(d)->vmd_free_mtx)
319
#define vm_domain_free_trylock(d) \
320
mtx_trylock(vm_domain_free_lockptr((d)))
321
#define vm_domain_free_unlock(d) \
322
mtx_unlock(vm_domain_free_lockptr((d)))
323
324
#define vm_domain_pageout_lockptr(d) \
325
(&(d)->vmd_pageout_mtx)
326
#define vm_domain_pageout_assert_locked(n) \
327
mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
328
#define vm_domain_pageout_assert_unlocked(n) \
329
mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
330
#define vm_domain_pageout_lock(d) \
331
mtx_lock(vm_domain_pageout_lockptr((d)))
332
#define vm_domain_pageout_unlock(d) \
333
mtx_unlock(vm_domain_pageout_lockptr((d)))
334
335
static __inline void
336
vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
337
{
338
339
vm_pagequeue_assert_locked(pq);
340
pq->pq_cnt += addend;
341
}
342
#define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
343
#define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
344
345
static inline void
346
vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
347
{
348
349
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
350
vm_pagequeue_cnt_dec(pq);
351
}
352
353
static inline void
354
vm_batchqueue_init(struct vm_batchqueue *bq)
355
{
356
357
bq->bq_cnt = 0;
358
}
359
360
static inline bool
361
vm_batchqueue_empty(const struct vm_batchqueue *bq)
362
{
363
return (bq->bq_cnt == 0);
364
}
365
366
static inline int
367
vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
368
{
369
int slots_free;
370
371
slots_free = nitems(bq->bq_pa) - bq->bq_cnt;
372
if (slots_free > 0) {
373
bq->bq_pa[bq->bq_cnt++] = m;
374
return (slots_free);
375
}
376
return (slots_free);
377
}
378
379
static inline vm_page_t
380
vm_batchqueue_pop(struct vm_batchqueue *bq)
381
{
382
383
if (bq->bq_cnt == 0)
384
return (NULL);
385
return (bq->bq_pa[--bq->bq_cnt]);
386
}
387
388
void vm_domain_set(struct vm_domain *vmd);
389
void vm_domain_clear(struct vm_domain *vmd);
390
int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
391
392
/*
393
* vm_pagequeue_domain:
394
*
395
* Return the memory domain the page belongs to.
396
*/
397
static inline struct vm_domain *
398
vm_pagequeue_domain(vm_page_t m)
399
{
400
401
return (VM_DOMAIN(vm_page_domain(m)));
402
}
403
404
/*
405
* Return the number of pages we need to free-up or cache
406
* A positive number indicates that we do not have enough free pages.
407
*/
408
static inline int
409
vm_paging_target(struct vm_domain *vmd)
410
{
411
412
return (vmd->vmd_free_target - vmd->vmd_free_count);
413
}
414
415
/*
416
* Returns TRUE if the pagedaemon needs to be woken up.
417
*/
418
static inline int
419
vm_paging_needed(struct vm_domain *vmd, u_int free_count)
420
{
421
422
return (free_count < vmd->vmd_pageout_wakeup_thresh);
423
}
424
425
/*
426
* Returns TRUE if the domain is below the min paging target.
427
*/
428
static inline int
429
vm_paging_min(struct vm_domain *vmd)
430
{
431
432
return (vmd->vmd_free_min > vmd->vmd_free_count);
433
}
434
435
/*
436
* Returns TRUE if the domain is below the severe paging target.
437
*/
438
static inline int
439
vm_paging_severe(struct vm_domain *vmd)
440
{
441
442
return (vmd->vmd_free_severe > vmd->vmd_free_count);
443
}
444
445
/*
446
* Return the number of pages we need to launder.
447
* A positive number indicates that we have a shortfall of clean pages.
448
*/
449
static inline int
450
vm_laundry_target(struct vm_domain *vmd)
451
{
452
453
return (vm_paging_target(vmd));
454
}
455
456
void pagedaemon_wakeup(int domain);
457
458
static inline void
459
vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
460
{
461
u_int old, new;
462
463
old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
464
new = old + adj;
465
/*
466
* Only update bitsets on transitions. Notice we short-circuit the
467
* rest of the checks if we're above min already.
468
*/
469
if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
470
(old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
471
(old < vmd->vmd_pageout_free_min &&
472
new >= vmd->vmd_pageout_free_min)))
473
vm_domain_clear(vmd);
474
}
475
476
#endif /* _KERNEL */
477
#endif /* !_VM_PAGEQUEUE_ */
478
479