Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
48775 views
1
// SPDX-License-Identifier: CDDL-1.0
2
/*
3
* CDDL HEADER START
4
*
5
* The contents of this file are subject to the terms of the
6
* Common Development and Distribution License (the "License").
7
* You may not use this file except in compliance with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or https://opensource.org/licenses/CDDL-1.0.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*/
22
/*
23
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
24
* Copyright (c) 2019 by Delphix. All rights reserved.
25
* Copyright (c) 2023, 2024, Klara Inc.
26
* Copyright (c) 2025, Rob Norris <[email protected]>
27
*/
28
29
/*
30
* See abd.c for a general overview of the arc buffered data (ABD).
31
*
32
* Linear buffers act exactly like normal buffers and are always mapped into the
33
* kernel's virtual memory space, while scattered ABD data chunks are allocated
34
* as physical pages and then mapped in only while they are actually being
35
* accessed through one of the abd_* library functions. Using scattered ABDs
36
* provides several benefits:
37
*
38
* (1) They avoid use of kmem_*, preventing performance problems where running
39
* kmem_reap on very large memory systems never finishes and causes
40
* constant TLB shootdowns.
41
*
42
* (2) Fragmentation is less of an issue since when we are at the limit of
43
* allocatable space, we won't have to search around for a long free
44
* hole in the VA space for large ARC allocations. Each chunk is mapped in
45
* individually, so even if we are using HIGHMEM (see next point) we
46
* wouldn't need to worry about finding a contiguous address range.
47
*
48
* (3) If we are not using HIGHMEM, then all physical memory is always
49
* mapped into the kernel's address space, so we also avoid the map /
50
* unmap costs on each ABD access.
51
*
52
* If we are not using HIGHMEM, scattered buffers which have only one chunk
53
* can be treated as linear buffers, because they are contiguous in the
54
* kernel's virtual address space. See abd_alloc_chunks() for details.
55
*/
56
57
#include <sys/abd_impl.h>
58
#include <sys/param.h>
59
#include <sys/zio.h>
60
#include <sys/arc.h>
61
#include <sys/zfs_context.h>
62
#include <sys/zfs_znode.h>
63
#include <linux/kmap_compat.h>
64
#include <linux/mm_compat.h>
65
#include <linux/scatterlist.h>
66
#include <linux/version.h>
67
68
#if defined(MAX_ORDER)
69
#define ABD_MAX_ORDER (MAX_ORDER)
70
#elif defined(MAX_PAGE_ORDER)
71
#define ABD_MAX_ORDER (MAX_PAGE_ORDER)
72
#endif
73
74
typedef struct abd_stats {
75
kstat_named_t abdstat_struct_size;
76
kstat_named_t abdstat_linear_cnt;
77
kstat_named_t abdstat_linear_data_size;
78
kstat_named_t abdstat_scatter_cnt;
79
kstat_named_t abdstat_scatter_data_size;
80
kstat_named_t abdstat_scatter_chunk_waste;
81
kstat_named_t abdstat_scatter_orders[ABD_MAX_ORDER];
82
kstat_named_t abdstat_scatter_page_multi_chunk;
83
kstat_named_t abdstat_scatter_page_multi_zone;
84
kstat_named_t abdstat_scatter_page_alloc_retry;
85
kstat_named_t abdstat_scatter_sg_table_retry;
86
} abd_stats_t;
87
88
static abd_stats_t abd_stats = {
89
/* Amount of memory occupied by all of the abd_t struct allocations */
90
{ "struct_size", KSTAT_DATA_UINT64 },
91
/*
92
* The number of linear ABDs which are currently allocated, excluding
93
* ABDs which don't own their data (for instance the ones which were
94
* allocated through abd_get_offset() and abd_get_from_buf()). If an
95
* ABD takes ownership of its buf then it will become tracked.
96
*/
97
{ "linear_cnt", KSTAT_DATA_UINT64 },
98
/* Amount of data stored in all linear ABDs tracked by linear_cnt */
99
{ "linear_data_size", KSTAT_DATA_UINT64 },
100
/*
101
* The number of scatter ABDs which are currently allocated, excluding
102
* ABDs which don't own their data (for instance the ones which were
103
* allocated through abd_get_offset()).
104
*/
105
{ "scatter_cnt", KSTAT_DATA_UINT64 },
106
/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
107
{ "scatter_data_size", KSTAT_DATA_UINT64 },
108
/*
109
* The amount of space wasted at the end of the last chunk across all
110
* scatter ABDs tracked by scatter_cnt.
111
*/
112
{ "scatter_chunk_waste", KSTAT_DATA_UINT64 },
113
/*
114
* The number of compound allocations of a given order. These
115
* allocations are spread over all currently allocated ABDs, and
116
* act as a measure of memory fragmentation.
117
*/
118
{ { "scatter_order_N", KSTAT_DATA_UINT64 } },
119
/*
120
* The number of scatter ABDs which contain multiple chunks.
121
* ABDs are preferentially allocated from the minimum number of
122
* contiguous multi-page chunks, a single chunk is optimal.
123
*/
124
{ "scatter_page_multi_chunk", KSTAT_DATA_UINT64 },
125
/*
126
* The number of scatter ABDs which are split across memory zones.
127
* ABDs are preferentially allocated using pages from a single zone.
128
*/
129
{ "scatter_page_multi_zone", KSTAT_DATA_UINT64 },
130
/*
131
* The total number of retries encountered when attempting to
132
* allocate the pages to populate the scatter ABD.
133
*/
134
{ "scatter_page_alloc_retry", KSTAT_DATA_UINT64 },
135
/*
136
* The total number of retries encountered when attempting to
137
* allocate the sg table for an ABD.
138
*/
139
{ "scatter_sg_table_retry", KSTAT_DATA_UINT64 },
140
};
141
142
static struct {
143
wmsum_t abdstat_struct_size;
144
wmsum_t abdstat_linear_cnt;
145
wmsum_t abdstat_linear_data_size;
146
wmsum_t abdstat_scatter_cnt;
147
wmsum_t abdstat_scatter_data_size;
148
wmsum_t abdstat_scatter_chunk_waste;
149
wmsum_t abdstat_scatter_orders[ABD_MAX_ORDER];
150
wmsum_t abdstat_scatter_page_multi_chunk;
151
wmsum_t abdstat_scatter_page_multi_zone;
152
wmsum_t abdstat_scatter_page_alloc_retry;
153
wmsum_t abdstat_scatter_sg_table_retry;
154
} abd_sums;
155
156
#define abd_for_each_sg(abd, sg, n, i) \
157
for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
158
159
/*
160
* zfs_abd_scatter_min_size is the minimum allocation size to use scatter
161
* ABD's. Smaller allocations will use linear ABD's which uses
162
* zio_[data_]buf_alloc().
163
*
164
* Scatter ABD's use at least one page each, so sub-page allocations waste
165
* some space when allocated as scatter (e.g. 2KB scatter allocation wastes
166
* half of each page). Using linear ABD's for small allocations means that
167
* they will be put on slabs which contain many allocations. This can
168
* improve memory efficiency, but it also makes it much harder for ARC
169
* evictions to actually free pages, because all the buffers on one slab need
170
* to be freed in order for the slab (and underlying pages) to be freed.
171
* Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
172
* possible for them to actually waste more memory than scatter (one page per
173
* buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
174
*
175
* Spill blocks are typically 512B and are heavily used on systems running
176
* selinux with the default dnode size and the `xattr=sa` property set.
177
*
178
* By default we use linear allocations for 512B and 1KB, and scatter
179
* allocations for larger (1.5KB and up).
180
*/
181
static int zfs_abd_scatter_min_size = 512 * 3;
182
183
/*
184
* We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
185
* just a single zero'd page. This allows us to conserve memory by
186
* only using a single zero page for the scatterlist.
187
*/
188
abd_t *abd_zero_scatter = NULL;
189
190
struct page;
191
192
/*
193
* abd_zero_page is assigned to each of the pages of abd_zero_scatter. It will
194
* point to ZERO_PAGE if it is available or it will be an allocated zero'd
195
* PAGESIZE buffer.
196
*/
197
static struct page *abd_zero_page = NULL;
198
199
static kmem_cache_t *abd_cache = NULL;
200
static kstat_t *abd_ksp;
201
202
static uint_t
203
abd_chunkcnt_for_bytes(size_t size)
204
{
205
return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
206
}
207
208
abd_t *
209
abd_alloc_struct_impl(size_t size)
210
{
211
/*
212
* In Linux we do not use the size passed in during ABD
213
* allocation, so we just ignore it.
214
*/
215
(void) size;
216
abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
217
ASSERT3P(abd, !=, NULL);
218
ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
219
220
return (abd);
221
}
222
223
void
224
abd_free_struct_impl(abd_t *abd)
225
{
226
kmem_cache_free(abd_cache, abd);
227
ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
228
}
229
230
static unsigned zfs_abd_scatter_max_order = ABD_MAX_ORDER - 1;
231
232
/*
233
* Mark zfs data pages so they can be excluded from kernel crash dumps
234
*/
235
#ifdef _LP64
236
#define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E
237
238
static inline void
239
abd_mark_zfs_page(struct page *page)
240
{
241
get_page(page);
242
SetPagePrivate(page);
243
set_page_private(page, ABD_FILE_CACHE_PAGE);
244
}
245
246
static inline void
247
abd_unmark_zfs_page(struct page *page)
248
{
249
set_page_private(page, 0UL);
250
ClearPagePrivate(page);
251
put_page(page);
252
}
253
#else
254
#define abd_mark_zfs_page(page)
255
#define abd_unmark_zfs_page(page)
256
#endif /* _LP64 */
257
258
#ifndef CONFIG_HIGHMEM
259
260
/*
261
* The goal is to minimize fragmentation by preferentially populating ABDs
262
* with higher order compound pages from a single zone. Allocation size is
263
* progressively decreased until it can be satisfied without performing
264
* reclaim or compaction. When necessary this function will degenerate to
265
* allocating individual pages and allowing reclaim to satisfy allocations.
266
*/
267
void
268
abd_alloc_chunks(abd_t *abd, size_t size)
269
{
270
struct list_head pages;
271
struct sg_table table;
272
struct scatterlist *sg;
273
struct page *page, *tmp_page = NULL;
274
gfp_t gfp = __GFP_RECLAIMABLE | __GFP_NOWARN | GFP_NOIO;
275
gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
276
unsigned int max_order = MIN(zfs_abd_scatter_max_order,
277
ABD_MAX_ORDER - 1);
278
unsigned int nr_pages = abd_chunkcnt_for_bytes(size);
279
unsigned int chunks = 0, zones = 0;
280
size_t remaining_size;
281
int nid = NUMA_NO_NODE;
282
unsigned int alloc_pages = 0;
283
284
INIT_LIST_HEAD(&pages);
285
286
ASSERT3U(alloc_pages, <, nr_pages);
287
288
while (alloc_pages < nr_pages) {
289
unsigned int chunk_pages;
290
unsigned int order;
291
292
order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
293
chunk_pages = (1U << order);
294
295
page = alloc_pages_node(nid, order ? gfp_comp : gfp, order);
296
if (page == NULL) {
297
if (order == 0) {
298
ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
299
schedule_timeout_interruptible(1);
300
} else {
301
max_order = MAX(0, order - 1);
302
}
303
continue;
304
}
305
306
list_add_tail(&page->lru, &pages);
307
308
if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid))
309
zones++;
310
311
nid = page_to_nid(page);
312
ABDSTAT_BUMP(abdstat_scatter_orders[order]);
313
chunks++;
314
alloc_pages += chunk_pages;
315
}
316
317
ASSERT3S(alloc_pages, ==, nr_pages);
318
319
while (sg_alloc_table(&table, chunks, gfp)) {
320
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
321
schedule_timeout_interruptible(1);
322
}
323
324
sg = table.sgl;
325
remaining_size = size;
326
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
327
size_t sg_size = MIN(PAGESIZE << compound_order(page),
328
remaining_size);
329
sg_set_page(sg, page, sg_size, 0);
330
abd_mark_zfs_page(page);
331
remaining_size -= sg_size;
332
333
sg = sg_next(sg);
334
list_del(&page->lru);
335
}
336
337
/*
338
* These conditions ensure that a possible transformation to a linear
339
* ABD would be valid.
340
*/
341
ASSERT(!PageHighMem(sg_page(table.sgl)));
342
ASSERT0(ABD_SCATTER(abd).abd_offset);
343
344
if (table.nents == 1) {
345
/*
346
* Since there is only one entry, this ABD can be represented
347
* as a linear buffer. All single-page (4K) ABD's can be
348
* represented this way. Some multi-page ABD's can also be
349
* represented this way, if we were able to allocate a single
350
* "chunk" (higher-order "page" which represents a power-of-2
351
* series of physically-contiguous pages). This is often the
352
* case for 2-page (8K) ABD's.
353
*
354
* Representing a single-entry scatter ABD as a linear ABD
355
* has the performance advantage of avoiding the copy (and
356
* allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
357
* A performance increase of around 5% has been observed for
358
* ARC-cached reads (of small blocks which can take advantage
359
* of this).
360
*
361
* Note that this optimization is only possible because the
362
* pages are always mapped into the kernel's address space.
363
* This is not the case for highmem pages, so the
364
* optimization can not be made there.
365
*/
366
abd->abd_flags |= ABD_FLAG_LINEAR;
367
abd->abd_flags |= ABD_FLAG_LINEAR_PAGE;
368
abd->abd_u.abd_linear.abd_sgl = table.sgl;
369
ABD_LINEAR_BUF(abd) = page_address(sg_page(table.sgl));
370
} else if (table.nents > 1) {
371
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
372
abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
373
374
if (zones) {
375
ABDSTAT_BUMP(abdstat_scatter_page_multi_zone);
376
abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
377
}
378
379
ABD_SCATTER(abd).abd_sgl = table.sgl;
380
ABD_SCATTER(abd).abd_nents = table.nents;
381
}
382
}
383
#else
384
385
/*
386
* Allocate N individual pages to construct a scatter ABD. This function
387
* makes no attempt to request contiguous pages and requires the minimal
388
* number of kernel interfaces. It's designed for maximum compatibility.
389
*/
390
void
391
abd_alloc_chunks(abd_t *abd, size_t size)
392
{
393
struct scatterlist *sg = NULL;
394
struct sg_table table;
395
struct page *page;
396
gfp_t gfp = __GFP_RECLAIMABLE | __GFP_NOWARN | GFP_NOIO;
397
int nr_pages = abd_chunkcnt_for_bytes(size);
398
int i = 0;
399
400
while (sg_alloc_table(&table, nr_pages, gfp)) {
401
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
402
schedule_timeout_interruptible(1);
403
}
404
405
ASSERT3U(table.nents, ==, nr_pages);
406
ABD_SCATTER(abd).abd_sgl = table.sgl;
407
ABD_SCATTER(abd).abd_nents = nr_pages;
408
409
abd_for_each_sg(abd, sg, nr_pages, i) {
410
while ((page = __page_cache_alloc(gfp)) == NULL) {
411
ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
412
schedule_timeout_interruptible(1);
413
}
414
415
ABDSTAT_BUMP(abdstat_scatter_orders[0]);
416
sg_set_page(sg, page, PAGESIZE, 0);
417
abd_mark_zfs_page(page);
418
}
419
420
if (nr_pages > 1) {
421
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
422
abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
423
}
424
}
425
#endif /* !CONFIG_HIGHMEM */
426
427
/*
428
* This must be called if any of the sg_table allocation functions
429
* are called.
430
*/
431
static void
432
abd_free_sg_table(abd_t *abd)
433
{
434
struct sg_table table;
435
436
table.sgl = ABD_SCATTER(abd).abd_sgl;
437
table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents;
438
sg_free_table(&table);
439
}
440
441
void
442
abd_free_chunks(abd_t *abd)
443
{
444
struct scatterlist *sg = NULL;
445
struct page *page;
446
int nr_pages = ABD_SCATTER(abd).abd_nents;
447
int order, i = 0;
448
449
if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
450
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone);
451
452
if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
453
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
454
455
/*
456
* Scatter ABDs may be constructed by abd_alloc_from_pages() from
457
* an array of pages. In which case they should not be freed.
458
*/
459
if (!abd_is_from_pages(abd)) {
460
abd_for_each_sg(abd, sg, nr_pages, i) {
461
page = sg_page(sg);
462
abd_unmark_zfs_page(page);
463
order = compound_order(page);
464
__free_pages(page, order);
465
ASSERT3U(sg->length, <=, PAGE_SIZE << order);
466
ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]);
467
}
468
}
469
470
abd_free_sg_table(abd);
471
}
472
473
/*
474
* Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
475
* the scatterlist will be set to the zero'd out buffer abd_zero_page.
476
*/
477
static void
478
abd_alloc_zero_scatter(void)
479
{
480
struct scatterlist *sg = NULL;
481
struct sg_table table;
482
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
483
int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
484
int i = 0;
485
486
#if defined(HAVE_ZERO_PAGE_GPL_ONLY)
487
gfp_t gfp_zero_page = gfp | __GFP_ZERO;
488
while ((abd_zero_page = __page_cache_alloc(gfp_zero_page)) == NULL) {
489
ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
490
schedule_timeout_interruptible(1);
491
}
492
abd_mark_zfs_page(abd_zero_page);
493
#else
494
abd_zero_page = ZERO_PAGE(0);
495
#endif /* HAVE_ZERO_PAGE_GPL_ONLY */
496
497
while (sg_alloc_table(&table, nr_pages, gfp)) {
498
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
499
schedule_timeout_interruptible(1);
500
}
501
ASSERT3U(table.nents, ==, nr_pages);
502
503
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
504
abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
505
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
506
ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl;
507
ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
508
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
509
abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK;
510
511
abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
512
sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
513
}
514
515
ABDSTAT_BUMP(abdstat_scatter_cnt);
516
ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
517
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
518
}
519
520
boolean_t
521
abd_size_alloc_linear(size_t size)
522
{
523
return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
524
}
525
526
void
527
abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
528
{
529
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
530
int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size;
531
if (op == ABDSTAT_INCR) {
532
ABDSTAT_BUMP(abdstat_scatter_cnt);
533
ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
534
ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
535
arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
536
} else {
537
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
538
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
539
ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
540
arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
541
}
542
}
543
544
void
545
abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
546
{
547
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
548
if (op == ABDSTAT_INCR) {
549
ABDSTAT_BUMP(abdstat_linear_cnt);
550
ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
551
} else {
552
ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
553
ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
554
}
555
}
556
557
void
558
abd_verify_scatter(abd_t *abd)
559
{
560
ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
561
ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
562
ABD_SCATTER(abd).abd_sgl->length);
563
564
#ifdef ZFS_DEBUG
565
struct scatterlist *sg = NULL;
566
size_t n = ABD_SCATTER(abd).abd_nents;
567
int i = 0;
568
569
abd_for_each_sg(abd, sg, n, i) {
570
ASSERT3P(sg_page(sg), !=, NULL);
571
}
572
#endif
573
}
574
575
static void
576
abd_free_zero_scatter(void)
577
{
578
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
579
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE);
580
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
581
582
abd_free_sg_table(abd_zero_scatter);
583
abd_free_struct(abd_zero_scatter);
584
abd_zero_scatter = NULL;
585
ASSERT3P(abd_zero_page, !=, NULL);
586
#if defined(HAVE_ZERO_PAGE_GPL_ONLY)
587
abd_unmark_zfs_page(abd_zero_page);
588
__free_page(abd_zero_page);
589
#endif /* HAVE_ZERO_PAGE_GPL_ONLY */
590
}
591
592
static int
593
abd_kstats_update(kstat_t *ksp, int rw)
594
{
595
abd_stats_t *as = ksp->ks_data;
596
597
if (rw == KSTAT_WRITE)
598
return (EACCES);
599
as->abdstat_struct_size.value.ui64 =
600
wmsum_value(&abd_sums.abdstat_struct_size);
601
as->abdstat_linear_cnt.value.ui64 =
602
wmsum_value(&abd_sums.abdstat_linear_cnt);
603
as->abdstat_linear_data_size.value.ui64 =
604
wmsum_value(&abd_sums.abdstat_linear_data_size);
605
as->abdstat_scatter_cnt.value.ui64 =
606
wmsum_value(&abd_sums.abdstat_scatter_cnt);
607
as->abdstat_scatter_data_size.value.ui64 =
608
wmsum_value(&abd_sums.abdstat_scatter_data_size);
609
as->abdstat_scatter_chunk_waste.value.ui64 =
610
wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
611
for (int i = 0; i < ABD_MAX_ORDER; i++) {
612
as->abdstat_scatter_orders[i].value.ui64 =
613
wmsum_value(&abd_sums.abdstat_scatter_orders[i]);
614
}
615
as->abdstat_scatter_page_multi_chunk.value.ui64 =
616
wmsum_value(&abd_sums.abdstat_scatter_page_multi_chunk);
617
as->abdstat_scatter_page_multi_zone.value.ui64 =
618
wmsum_value(&abd_sums.abdstat_scatter_page_multi_zone);
619
as->abdstat_scatter_page_alloc_retry.value.ui64 =
620
wmsum_value(&abd_sums.abdstat_scatter_page_alloc_retry);
621
as->abdstat_scatter_sg_table_retry.value.ui64 =
622
wmsum_value(&abd_sums.abdstat_scatter_sg_table_retry);
623
return (0);
624
}
625
626
void
627
abd_init(void)
628
{
629
int i;
630
631
abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
632
0, NULL, NULL, NULL, NULL, NULL, KMC_RECLAIMABLE);
633
634
wmsum_init(&abd_sums.abdstat_struct_size, 0);
635
wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
636
wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
637
wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
638
wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
639
wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
640
for (i = 0; i < ABD_MAX_ORDER; i++)
641
wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0);
642
wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0);
643
wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0);
644
wmsum_init(&abd_sums.abdstat_scatter_page_alloc_retry, 0);
645
wmsum_init(&abd_sums.abdstat_scatter_sg_table_retry, 0);
646
647
abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
648
sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
649
if (abd_ksp != NULL) {
650
for (i = 0; i < ABD_MAX_ORDER; i++) {
651
snprintf(abd_stats.abdstat_scatter_orders[i].name,
652
KSTAT_STRLEN, "scatter_order_%d", i);
653
abd_stats.abdstat_scatter_orders[i].data_type =
654
KSTAT_DATA_UINT64;
655
}
656
abd_ksp->ks_data = &abd_stats;
657
abd_ksp->ks_update = abd_kstats_update;
658
kstat_install(abd_ksp);
659
}
660
661
abd_alloc_zero_scatter();
662
}
663
664
void
665
abd_fini(void)
666
{
667
abd_free_zero_scatter();
668
669
if (abd_ksp != NULL) {
670
kstat_delete(abd_ksp);
671
abd_ksp = NULL;
672
}
673
674
wmsum_fini(&abd_sums.abdstat_struct_size);
675
wmsum_fini(&abd_sums.abdstat_linear_cnt);
676
wmsum_fini(&abd_sums.abdstat_linear_data_size);
677
wmsum_fini(&abd_sums.abdstat_scatter_cnt);
678
wmsum_fini(&abd_sums.abdstat_scatter_data_size);
679
wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
680
for (int i = 0; i < ABD_MAX_ORDER; i++)
681
wmsum_fini(&abd_sums.abdstat_scatter_orders[i]);
682
wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk);
683
wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone);
684
wmsum_fini(&abd_sums.abdstat_scatter_page_alloc_retry);
685
wmsum_fini(&abd_sums.abdstat_scatter_sg_table_retry);
686
687
if (abd_cache) {
688
kmem_cache_destroy(abd_cache);
689
abd_cache = NULL;
690
}
691
}
692
693
void
694
abd_free_linear_page(abd_t *abd)
695
{
696
/* Transform it back into a scatter ABD for freeing */
697
struct scatterlist *sg = abd->abd_u.abd_linear.abd_sgl;
698
699
/* When backed by user page unmap it */
700
if (abd_is_from_pages(abd))
701
zfs_kunmap(sg_page(sg));
702
else
703
abd_update_scatter_stats(abd, ABDSTAT_DECR);
704
705
abd->abd_flags &= ~ABD_FLAG_LINEAR;
706
abd->abd_flags &= ~ABD_FLAG_LINEAR_PAGE;
707
ABD_SCATTER(abd).abd_nents = 1;
708
ABD_SCATTER(abd).abd_offset = 0;
709
ABD_SCATTER(abd).abd_sgl = sg;
710
abd_free_chunks(abd);
711
}
712
713
/*
714
* Allocate a scatter ABD structure from user pages. The pages must be
715
* pinned with get_user_pages, or similiar, but need not be mapped via
716
* the kmap interfaces.
717
*/
718
abd_t *
719
abd_alloc_from_pages(struct page **pages, unsigned long offset, uint64_t size)
720
{
721
uint_t npages = DIV_ROUND_UP(size, PAGE_SIZE);
722
struct sg_table table;
723
724
VERIFY3U(size, <=, DMU_MAX_ACCESS);
725
ASSERT3U(offset, <, PAGE_SIZE);
726
ASSERT3P(pages, !=, NULL);
727
728
/*
729
* Even if this buf is filesystem metadata, we only track that we
730
* own the underlying data buffer, which is not true in this case.
731
* Therefore, we don't ever use ABD_FLAG_META here.
732
*/
733
abd_t *abd = abd_alloc_struct(0);
734
abd->abd_flags |= ABD_FLAG_FROM_PAGES | ABD_FLAG_OWNER;
735
abd->abd_size = size;
736
737
while (sg_alloc_table_from_pages(&table, pages, npages, offset,
738
size, __GFP_NOWARN | GFP_NOIO) != 0) {
739
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
740
schedule_timeout_interruptible(1);
741
}
742
743
if ((offset + size) <= PAGE_SIZE) {
744
/*
745
* Since there is only one entry, this ABD can be represented
746
* as a linear buffer. All single-page (4K) ABD's constructed
747
* from a user page can be represented this way as long as the
748
* page is mapped to a virtual address. This allows us to
749
* apply an offset in to the mapped page.
750
*
751
* Note that kmap() must be used, not kmap_atomic(), because
752
* the mapping needs to bet set up on all CPUs. Using kmap()
753
* also enables the user of highmem pages when required.
754
*/
755
abd->abd_flags |= ABD_FLAG_LINEAR | ABD_FLAG_LINEAR_PAGE;
756
abd->abd_u.abd_linear.abd_sgl = table.sgl;
757
zfs_kmap(sg_page(table.sgl));
758
ABD_LINEAR_BUF(abd) = sg_virt(table.sgl);
759
} else {
760
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
761
abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
762
763
ABD_SCATTER(abd).abd_offset = offset;
764
ABD_SCATTER(abd).abd_sgl = table.sgl;
765
ABD_SCATTER(abd).abd_nents = table.nents;
766
767
ASSERT0(ABD_SCATTER(abd).abd_offset);
768
}
769
770
return (abd);
771
}
772
773
/*
774
* If we're going to use this ABD for doing I/O using the block layer, the
775
* consumer of the ABD data doesn't care if it's scattered or not, and we don't
776
* plan to store this ABD in memory for a long period of time, we should
777
* allocate the ABD type that requires the least data copying to do the I/O.
778
*
779
* On Linux the optimal thing to do would be to use abd_get_offset() and
780
* construct a new ABD which shares the original pages thereby eliminating
781
* the copy. But for the moment a new linear ABD is allocated until this
782
* performance optimization can be implemented.
783
*/
784
abd_t *
785
abd_alloc_for_io(size_t size, boolean_t is_metadata)
786
{
787
return (abd_alloc(size, is_metadata));
788
}
789
790
abd_t *
791
abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
792
size_t size)
793
{
794
(void) size;
795
int i = 0;
796
struct scatterlist *sg = NULL;
797
798
abd_verify(sabd);
799
ASSERT3U(off, <=, sabd->abd_size);
800
801
size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
802
803
if (abd == NULL)
804
abd = abd_alloc_struct(0);
805
806
/*
807
* Even if this buf is filesystem metadata, we only track that
808
* if we own the underlying data buffer, which is not true in
809
* this case. Therefore, we don't ever use ABD_FLAG_META here.
810
*/
811
812
abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
813
if (new_offset < sg->length)
814
break;
815
new_offset -= sg->length;
816
}
817
818
ABD_SCATTER(abd).abd_sgl = sg;
819
ABD_SCATTER(abd).abd_offset = new_offset;
820
ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
821
822
if (abd_is_from_pages(sabd))
823
abd->abd_flags |= ABD_FLAG_FROM_PAGES;
824
825
return (abd);
826
}
827
828
/*
829
* Initialize the abd_iter.
830
*/
831
void
832
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
833
{
834
ASSERT(!abd_is_gang(abd));
835
abd_verify(abd);
836
memset(aiter, 0, sizeof (struct abd_iter));
837
aiter->iter_abd = abd;
838
if (!abd_is_linear(abd)) {
839
aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
840
aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
841
}
842
}
843
844
/*
845
* This is just a helper function to see if we have exhausted the
846
* abd_iter and reached the end.
847
*/
848
boolean_t
849
abd_iter_at_end(struct abd_iter *aiter)
850
{
851
ASSERT3U(aiter->iter_pos, <=, aiter->iter_abd->abd_size);
852
return (aiter->iter_pos == aiter->iter_abd->abd_size);
853
}
854
855
/*
856
* Advance the iterator by a certain amount. Cannot be called when a chunk is
857
* in use. This can be safely called when the aiter has already exhausted, in
858
* which case this does nothing.
859
*/
860
void
861
abd_iter_advance(struct abd_iter *aiter, size_t amount)
862
{
863
/*
864
* Ensure that last chunk is not in use. abd_iterate_*() must clear
865
* this state (directly or abd_iter_unmap()) before advancing.
866
*/
867
ASSERT0P(aiter->iter_mapaddr);
868
ASSERT0(aiter->iter_mapsize);
869
ASSERT0P(aiter->iter_page);
870
ASSERT0(aiter->iter_page_doff);
871
ASSERT0(aiter->iter_page_dsize);
872
873
/* There's nothing left to advance to, so do nothing */
874
if (abd_iter_at_end(aiter))
875
return;
876
877
aiter->iter_pos += amount;
878
aiter->iter_offset += amount;
879
if (!abd_is_linear(aiter->iter_abd)) {
880
while (aiter->iter_offset >= aiter->iter_sg->length) {
881
aiter->iter_offset -= aiter->iter_sg->length;
882
aiter->iter_sg = sg_next(aiter->iter_sg);
883
if (aiter->iter_sg == NULL) {
884
ASSERT0(aiter->iter_offset);
885
break;
886
}
887
}
888
}
889
}
890
891
/*
892
* Map the current chunk into aiter. This can be safely called when the aiter
893
* has already exhausted, in which case this does nothing.
894
*/
895
void
896
abd_iter_map(struct abd_iter *aiter)
897
{
898
void *paddr;
899
size_t offset = 0;
900
901
ASSERT0P(aiter->iter_mapaddr);
902
ASSERT0(aiter->iter_mapsize);
903
904
/* There's nothing left to iterate over, so do nothing */
905
if (abd_iter_at_end(aiter))
906
return;
907
908
if (abd_is_linear(aiter->iter_abd)) {
909
ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
910
offset = aiter->iter_offset;
911
aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
912
paddr = ABD_LINEAR_BUF(aiter->iter_abd);
913
} else {
914
offset = aiter->iter_offset;
915
aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
916
aiter->iter_abd->abd_size - aiter->iter_pos);
917
918
paddr = zfs_kmap_local(sg_page(aiter->iter_sg));
919
}
920
921
aiter->iter_mapaddr = (char *)paddr + offset;
922
}
923
924
/*
925
* Unmap the current chunk from aiter. This can be safely called when the aiter
926
* has already exhausted, in which case this does nothing.
927
*/
928
void
929
abd_iter_unmap(struct abd_iter *aiter)
930
{
931
/* There's nothing left to unmap, so do nothing */
932
if (abd_iter_at_end(aiter))
933
return;
934
935
if (!abd_is_linear(aiter->iter_abd)) {
936
/* LINTED E_FUNC_SET_NOT_USED */
937
zfs_kunmap_local(aiter->iter_mapaddr - aiter->iter_offset);
938
}
939
940
ASSERT3P(aiter->iter_mapaddr, !=, NULL);
941
ASSERT3U(aiter->iter_mapsize, >, 0);
942
943
aiter->iter_mapaddr = NULL;
944
aiter->iter_mapsize = 0;
945
}
946
947
void
948
abd_cache_reap_now(void)
949
{
950
}
951
952
/*
953
* Borrow a raw buffer from an ABD without copying the contents of the ABD
954
* into the buffer. If the ABD is scattered, this will allocate a raw buffer
955
* whose contents are undefined. To copy over the existing data in the ABD, use
956
* abd_borrow_buf_copy() instead.
957
*/
958
void *
959
abd_borrow_buf(abd_t *abd, size_t n)
960
{
961
void *buf;
962
abd_verify(abd);
963
ASSERT3U(abd->abd_size, >=, 0);
964
/*
965
* In the event the ABD is composed of a single user page from Direct
966
* I/O we can not direclty return the raw buffer. This is a consequence
967
* of not being able to write protect the page and the contents of the
968
* page can be changed at any time by the user.
969
*/
970
if (abd_is_from_pages(abd)) {
971
buf = zio_buf_alloc(n);
972
} else if (abd_is_linear(abd)) {
973
buf = abd_to_buf(abd);
974
} else {
975
buf = zio_buf_alloc(n);
976
}
977
978
#ifdef ZFS_DEBUG
979
(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
980
#endif
981
return (buf);
982
}
983
984
void *
985
abd_borrow_buf_copy(abd_t *abd, size_t n)
986
{
987
void *buf = abd_borrow_buf(abd, n);
988
989
/*
990
* In the event the ABD is composed of a single user page from Direct
991
* I/O we must make sure copy the data over into the newly allocated
992
* buffer. This is a consequence of the fact that we can not write
993
* protect the user page and there is a risk the contents of the page
994
* could be changed by the user at any moment.
995
*/
996
if (!abd_is_linear(abd) || abd_is_from_pages(abd)) {
997
abd_copy_to_buf(buf, abd, n);
998
}
999
return (buf);
1000
}
1001
1002
/*
1003
* Return a borrowed raw buffer to an ABD. If the ABD is scatterd, this will
1004
* not change the contents of the ABD. If you want any changes you made to
1005
* buf to be copied back to abd, use abd_return_buf_copy() instead. If the
1006
* ABD is not constructed from user pages for Direct I/O then an ASSERT
1007
* checks to make sure the contents of buffer have not changed since it was
1008
* borrowed. We can not ASSERT that the contents of the buffer have not changed
1009
* if it is composed of user pages because the pages can not be placed under
1010
* write protection and the user could have possibly changed the contents in
1011
* the pages at any time. This is also an issue for Direct I/O reads. Checksum
1012
* verifications in the ZIO pipeline check for this issue and handle it by
1013
* returning an error on checksum verification failure.
1014
*/
1015
void
1016
abd_return_buf(abd_t *abd, void *buf, size_t n)
1017
{
1018
abd_verify(abd);
1019
ASSERT3U(abd->abd_size, >=, n);
1020
#ifdef ZFS_DEBUG
1021
(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
1022
#endif
1023
if (abd_is_from_pages(abd)) {
1024
zio_buf_free(buf, n);
1025
} else if (abd_is_linear(abd)) {
1026
ASSERT3P(buf, ==, abd_to_buf(abd));
1027
} else if (abd_is_gang(abd)) {
1028
#ifdef ZFS_DEBUG
1029
/*
1030
* We have to be careful with gang ABD's that we do not ASSERT0
1031
* for any ABD's that contain user pages from Direct I/O. In
1032
* order to handle this, we just iterate through the gang ABD
1033
* and only verify ABDs that are not from user pages.
1034
*/
1035
void *cmp_buf = buf;
1036
1037
for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
1038
cabd != NULL;
1039
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1040
if (!abd_is_from_pages(cabd)) {
1041
ASSERT0(abd_cmp_buf(cabd, cmp_buf,
1042
cabd->abd_size));
1043
}
1044
cmp_buf = (char *)cmp_buf + cabd->abd_size;
1045
}
1046
#endif
1047
zio_buf_free(buf, n);
1048
} else {
1049
ASSERT0(abd_cmp_buf(abd, buf, n));
1050
zio_buf_free(buf, n);
1051
}
1052
}
1053
1054
void
1055
abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
1056
{
1057
if (!abd_is_linear(abd) || abd_is_from_pages(abd)) {
1058
abd_copy_from_buf(abd, buf, n);
1059
}
1060
abd_return_buf(abd, buf, n);
1061
}
1062
1063
/*
1064
* This is abd_iter_page(), the function underneath abd_iterate_page_func().
1065
* It yields the next page struct and data offset and size within it, without
1066
* mapping it into the address space.
1067
*/
1068
1069
/*
1070
* "Compound pages" are a group of pages that can be referenced from a single
1071
* struct page *. Its organised as a "head" page, followed by a series of
1072
* "tail" pages.
1073
*
1074
* In OpenZFS, compound pages are allocated using the __GFP_COMP flag, which we
1075
* get from scatter ABDs and SPL vmalloc slabs (ie >16K allocations). So a
1076
* great many of the IO buffers we get are going to be of this type.
1077
*
1078
* The tail pages are just regular PAGESIZE pages, and can be safely used
1079
* as-is. However, the head page has length covering itself and all the tail
1080
* pages. If the ABD chunk spans multiple pages, then we can use the head page
1081
* and a >PAGESIZE length, which is far more efficient.
1082
*
1083
* Before kernel 4.5 however, compound page heads were refcounted separately
1084
* from tail pages, such that moving back to the head page would require us to
1085
* take a reference to it and releasing it once we're completely finished with
1086
* it. In practice, that meant when our caller is done with the ABD, which we
1087
* have no insight into from here. Rather than contort this API to track head
1088
* page references on such ancient kernels, we disabled this special compound
1089
* page handling on kernels before 4.5, instead just using treating each page
1090
* within it as a regular PAGESIZE page (which it is). This is slightly less
1091
* efficient, but makes everything far simpler.
1092
*
1093
* We no longer support kernels before 4.5, so in theory none of this is
1094
* necessary. However, this code is still relatively new in the grand scheme of
1095
* things, so I'm leaving the ability to compile this out for the moment.
1096
*
1097
* Setting/clearing ABD_ITER_COMPOUND_PAGES below enables/disables the special
1098
* handling, by defining the ABD_ITER_PAGE_SIZE(page) macro to understand
1099
* compound pages, or not, and compiling in/out the support to detect compound
1100
* tail pages and move back to the start.
1101
*/
1102
1103
/* On by default */
1104
#define ABD_ITER_COMPOUND_PAGES
1105
1106
#ifdef ABD_ITER_COMPOUND_PAGES
1107
#define ABD_ITER_PAGE_SIZE(page) \
1108
(PageCompound(page) ? page_size(page) : PAGESIZE)
1109
#else
1110
#define ABD_ITER_PAGE_SIZE(page) (PAGESIZE)
1111
#endif
1112
1113
#ifndef nth_page
1114
/*
1115
* Since 6.18 nth_page() no longer exists, and is no longer required to iterate
1116
* within a single SG entry, so we replace it with a simple addition.
1117
*/
1118
#define nth_page(p, n) ((p)+(n))
1119
#endif
1120
1121
void
1122
abd_iter_page(struct abd_iter *aiter)
1123
{
1124
if (abd_iter_at_end(aiter)) {
1125
aiter->iter_page = NULL;
1126
aiter->iter_page_doff = 0;
1127
aiter->iter_page_dsize = 0;
1128
return;
1129
}
1130
1131
struct page *page;
1132
size_t doff, dsize;
1133
1134
/*
1135
* Find the page, and the start of the data within it. This is computed
1136
* differently for linear and scatter ABDs; linear is referenced by
1137
* virtual memory location, while scatter is referenced by page
1138
* pointer.
1139
*/
1140
if (abd_is_linear(aiter->iter_abd)) {
1141
ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
1142
1143
/* memory address at iter_pos */
1144
void *paddr = ABD_LINEAR_BUF(aiter->iter_abd) + aiter->iter_pos;
1145
1146
/* struct page for address */
1147
page = is_vmalloc_addr(paddr) ?
1148
vmalloc_to_page(paddr) : virt_to_page(paddr);
1149
1150
/* offset of address within the page */
1151
doff = offset_in_page(paddr);
1152
} else {
1153
ASSERT(!abd_is_gang(aiter->iter_abd));
1154
1155
/* current scatter page */
1156
page = nth_page(sg_page(aiter->iter_sg),
1157
aiter->iter_offset >> PAGE_SHIFT);
1158
1159
/* position within page */
1160
doff = aiter->iter_offset & (PAGESIZE - 1);
1161
}
1162
1163
#ifdef ABD_ITER_COMPOUND_PAGES
1164
if (PageTail(page)) {
1165
/*
1166
* If this is a compound tail page, move back to the head, and
1167
* adjust the offset to match. This may let us yield a much
1168
* larger amount of data from a single logical page, and so
1169
* leave our caller with fewer pages to process.
1170
*/
1171
struct page *head = compound_head(page);
1172
doff += ((page - head) * PAGESIZE);
1173
page = head;
1174
}
1175
#endif
1176
1177
ASSERT(page);
1178
1179
/*
1180
* Compute the maximum amount of data we can take from this page. This
1181
* is the smaller of:
1182
* - the remaining space in the page
1183
* - the remaining space in this scatterlist entry (which may not cover
1184
* the entire page)
1185
* - the remaining space in the abd (which may not cover the entire
1186
* scatterlist entry)
1187
*/
1188
dsize = MIN(ABD_ITER_PAGE_SIZE(page) - doff,
1189
aiter->iter_abd->abd_size - aiter->iter_pos);
1190
if (!abd_is_linear(aiter->iter_abd))
1191
dsize = MIN(dsize, aiter->iter_sg->length - aiter->iter_offset);
1192
ASSERT3U(dsize, >, 0);
1193
1194
/* final iterator outputs */
1195
aiter->iter_page = page;
1196
aiter->iter_page_doff = doff;
1197
aiter->iter_page_dsize = dsize;
1198
}
1199
1200
/*
1201
* Note: ABD BIO functions only needed to support vdev_classic. See comments in
1202
* vdev_disk.c.
1203
*/
1204
1205
/*
1206
* bio_nr_pages for ABD.
1207
* @off is the offset in @abd
1208
*/
1209
unsigned long
1210
abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
1211
{
1212
unsigned long pos;
1213
1214
if (abd_is_gang(abd)) {
1215
unsigned long count = 0;
1216
1217
for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1218
cabd != NULL && size != 0;
1219
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1220
ASSERT3U(off, <, cabd->abd_size);
1221
int mysize = MIN(size, cabd->abd_size - off);
1222
count += abd_nr_pages_off(cabd, mysize, off);
1223
size -= mysize;
1224
off = 0;
1225
}
1226
return (count);
1227
}
1228
1229
if (abd_is_linear(abd))
1230
pos = (unsigned long)abd_to_buf(abd) + off;
1231
else
1232
pos = ABD_SCATTER(abd).abd_offset + off;
1233
1234
return (((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
1235
(pos >> PAGE_SHIFT));
1236
}
1237
1238
static unsigned int
1239
bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size)
1240
{
1241
unsigned int offset, size, i;
1242
struct page *page;
1243
1244
offset = offset_in_page(buf_ptr);
1245
for (i = 0; i < bio->bi_max_vecs; i++) {
1246
size = PAGE_SIZE - offset;
1247
1248
if (bio_size <= 0)
1249
break;
1250
1251
if (size > bio_size)
1252
size = bio_size;
1253
1254
if (is_vmalloc_addr(buf_ptr))
1255
page = vmalloc_to_page(buf_ptr);
1256
else
1257
page = virt_to_page(buf_ptr);
1258
1259
/*
1260
* Some network related block device uses tcp_sendpage, which
1261
* doesn't behave well when using 0-count page, this is a
1262
* safety net to catch them.
1263
*/
1264
ASSERT3S(page_count(page), >, 0);
1265
1266
if (bio_add_page(bio, page, size, offset) != size)
1267
break;
1268
1269
buf_ptr += size;
1270
bio_size -= size;
1271
offset = 0;
1272
}
1273
1274
return (bio_size);
1275
}
1276
1277
/*
1278
* bio_map for gang ABD.
1279
*/
1280
static unsigned int
1281
abd_gang_bio_map_off(struct bio *bio, abd_t *abd,
1282
unsigned int io_size, size_t off)
1283
{
1284
ASSERT(abd_is_gang(abd));
1285
1286
for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1287
cabd != NULL;
1288
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1289
ASSERT3U(off, <, cabd->abd_size);
1290
int size = MIN(io_size, cabd->abd_size - off);
1291
int remainder = abd_bio_map_off(bio, cabd, size, off);
1292
io_size -= (size - remainder);
1293
if (io_size == 0 || remainder > 0)
1294
return (io_size);
1295
off = 0;
1296
}
1297
ASSERT0(io_size);
1298
return (io_size);
1299
}
1300
1301
/*
1302
* bio_map for ABD.
1303
* @off is the offset in @abd
1304
* Remaining IO size is returned
1305
*/
1306
unsigned int
1307
abd_bio_map_off(struct bio *bio, abd_t *abd,
1308
unsigned int io_size, size_t off)
1309
{
1310
struct abd_iter aiter;
1311
1312
ASSERT3U(io_size, <=, abd->abd_size - off);
1313
if (abd_is_linear(abd))
1314
return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size));
1315
1316
ASSERT(!abd_is_linear(abd));
1317
if (abd_is_gang(abd))
1318
return (abd_gang_bio_map_off(bio, abd, io_size, off));
1319
1320
abd_iter_init(&aiter, abd);
1321
abd_iter_advance(&aiter, off);
1322
1323
for (int i = 0; i < bio->bi_max_vecs; i++) {
1324
struct page *pg;
1325
size_t len, sgoff, pgoff;
1326
struct scatterlist *sg;
1327
1328
if (io_size <= 0)
1329
break;
1330
1331
sg = aiter.iter_sg;
1332
sgoff = aiter.iter_offset;
1333
pgoff = sgoff & (PAGESIZE - 1);
1334
len = MIN(io_size, PAGESIZE - pgoff);
1335
ASSERT(len > 0);
1336
1337
pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT);
1338
if (bio_add_page(bio, pg, len, pgoff) != len)
1339
break;
1340
1341
io_size -= len;
1342
abd_iter_advance(&aiter, len);
1343
}
1344
1345
return (io_size);
1346
}
1347
1348
EXPORT_SYMBOL(abd_alloc_from_pages);
1349
1350
/* Tunable Parameters */
1351
module_param(zfs_abd_scatter_enabled, int, 0644);
1352
MODULE_PARM_DESC(zfs_abd_scatter_enabled,
1353
"Toggle whether ABD allocations must be linear.");
1354
module_param(zfs_abd_scatter_min_size, int, 0644);
1355
MODULE_PARM_DESC(zfs_abd_scatter_min_size,
1356
"Minimum size of scatter allocations.");
1357
module_param(zfs_abd_scatter_max_order, uint, 0644);
1358
MODULE_PARM_DESC(zfs_abd_scatter_max_order,
1359
"Maximum order allocation used for a scatter ABD.");
1360
1361