Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/geom/geom_io.c
39475 views
1
/*-
2
* SPDX-License-Identifier: BSD-3-Clause
3
*
4
* Copyright (c) 2002 Poul-Henning Kamp
5
* Copyright (c) 2002 Networks Associates Technology, Inc.
6
* Copyright (c) 2013 The FreeBSD Foundation
7
* All rights reserved.
8
*
9
* This software was developed for the FreeBSD Project by Poul-Henning Kamp
10
* and NAI Labs, the Security Research Division of Network Associates, Inc.
11
* under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
12
* DARPA CHATS research program.
13
*
14
* Portions of this software were developed by Konstantin Belousov
15
* under sponsorship from the FreeBSD Foundation.
16
*
17
* Redistribution and use in source and binary forms, with or without
18
* modification, are permitted provided that the following conditions
19
* are met:
20
* 1. Redistributions of source code must retain the above copyright
21
* notice, this list of conditions and the following disclaimer.
22
* 2. Redistributions in binary form must reproduce the above copyright
23
* notice, this list of conditions and the following disclaimer in the
24
* documentation and/or other materials provided with the distribution.
25
* 3. The names of the authors may not be used to endorse or promote
26
* products derived from this software without specific prior written
27
* permission.
28
*
29
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39
* SUCH DAMAGE.
40
*/
41
42
#include <sys/param.h>
43
#include <sys/systm.h>
44
#include <sys/kernel.h>
45
#include <sys/malloc.h>
46
#include <sys/bio.h>
47
#include <sys/ktr.h>
48
#include <sys/proc.h>
49
#include <sys/sbuf.h>
50
#include <sys/stack.h>
51
#include <sys/stdarg.h>
52
#include <sys/sysctl.h>
53
#include <sys/vmem.h>
54
#include <machine/stack.h>
55
56
#include <sys/errno.h>
57
#include <geom/geom.h>
58
#include <geom/geom_int.h>
59
#include <sys/devicestat.h>
60
61
#include <vm/uma.h>
62
#include <vm/vm.h>
63
#include <vm/vm_param.h>
64
#include <vm/vm_kern.h>
65
#include <vm/vm_page.h>
66
#include <vm/vm_object.h>
67
#include <vm/vm_extern.h>
68
#include <vm/vm_map.h>
69
70
#define KTR_GEOM_ENABLED \
71
((KTR_COMPILE & KTR_GEOM) != 0 && (ktr_mask & KTR_GEOM) != 0)
72
73
static int g_io_transient_map_bio(struct bio *bp);
74
75
static struct g_bioq g_bio_run_down;
76
static struct g_bioq g_bio_run_up;
77
78
static u_long nomem_count;
79
static u_long pause_count;
80
81
/*
82
* Pace is a hint that we've had some trouble recently allocating
83
* bios, so we should back off trying to send I/O down the stack
84
* a bit to let the problem resolve. When pacing, we also turn
85
* off direct dispatch to also reduce memory pressure from I/Os
86
* there, at the expxense of some added latency while the memory
87
* pressures exist. See g_io_schedule_down() for more details
88
* and limitations.
89
*/
90
static volatile u_int __read_mostly pace;
91
92
static uma_zone_t __read_mostly biozone;
93
94
#include <machine/atomic.h>
95
96
static void
97
g_bioq_lock(struct g_bioq *bq)
98
{
99
100
mtx_lock(&bq->bio_queue_lock);
101
}
102
103
static void
104
g_bioq_unlock(struct g_bioq *bq)
105
{
106
107
mtx_unlock(&bq->bio_queue_lock);
108
}
109
110
#if 0
111
static void
112
g_bioq_destroy(struct g_bioq *bq)
113
{
114
115
mtx_destroy(&bq->bio_queue_lock);
116
}
117
#endif
118
119
static void
120
g_bioq_init(struct g_bioq *bq)
121
{
122
123
TAILQ_INIT(&bq->bio_queue);
124
mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
125
}
126
127
static struct bio *
128
g_bioq_first(struct g_bioq *bq)
129
{
130
struct bio *bp;
131
132
bp = TAILQ_FIRST(&bq->bio_queue);
133
if (bp != NULL) {
134
KASSERT((bp->bio_flags & BIO_ONQUEUE),
135
("Bio not on queue bp=%p target %p", bp, bq));
136
bp->bio_flags &= ~BIO_ONQUEUE;
137
TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
138
bq->bio_queue_length--;
139
}
140
return (bp);
141
}
142
143
struct bio *
144
g_new_bio(void)
145
{
146
struct bio *bp;
147
148
bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
149
#ifdef KTR
150
if (KTR_GEOM_ENABLED) {
151
struct stack st;
152
153
CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
154
stack_save(&st);
155
CTRSTACK(KTR_GEOM, &st, 3);
156
}
157
#endif
158
return (bp);
159
}
160
161
struct bio *
162
g_alloc_bio(void)
163
{
164
struct bio *bp;
165
166
bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
167
#ifdef KTR
168
if (KTR_GEOM_ENABLED) {
169
struct stack st;
170
171
CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
172
stack_save(&st);
173
CTRSTACK(KTR_GEOM, &st, 3);
174
}
175
#endif
176
return (bp);
177
}
178
179
void
180
g_destroy_bio(struct bio *bp)
181
{
182
#ifdef KTR
183
if (KTR_GEOM_ENABLED) {
184
struct stack st;
185
186
CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
187
stack_save(&st);
188
CTRSTACK(KTR_GEOM, &st, 3);
189
}
190
#endif
191
uma_zfree(biozone, bp);
192
}
193
194
struct bio *
195
g_clone_bio(struct bio *bp)
196
{
197
struct bio *bp2;
198
199
bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
200
if (bp2 != NULL) {
201
bp2->bio_parent = bp;
202
bp2->bio_cmd = bp->bio_cmd;
203
/*
204
* BIO_ORDERED flag may be used by disk drivers to enforce
205
* ordering restrictions, so this flag needs to be cloned.
206
* BIO_UNMAPPED, BIO_VLIST, and BIO_SWAP should be inherited,
207
* to properly indicate which way the buffer is passed.
208
* Other bio flags are not suitable for cloning.
209
*/
210
bp2->bio_flags = bp->bio_flags &
211
(BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST | BIO_SWAP);
212
bp2->bio_length = bp->bio_length;
213
bp2->bio_offset = bp->bio_offset;
214
bp2->bio_data = bp->bio_data;
215
bp2->bio_ma = bp->bio_ma;
216
bp2->bio_ma_n = bp->bio_ma_n;
217
bp2->bio_ma_offset = bp->bio_ma_offset;
218
bp2->bio_attribute = bp->bio_attribute;
219
if (bp->bio_cmd == BIO_ZONE)
220
bcopy(&bp->bio_zone, &bp2->bio_zone,
221
sizeof(bp->bio_zone));
222
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
223
bp2->bio_track_bp = bp->bio_track_bp;
224
#endif
225
bp->bio_children++;
226
}
227
#ifdef KTR
228
if (KTR_GEOM_ENABLED) {
229
struct stack st;
230
231
CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
232
stack_save(&st);
233
CTRSTACK(KTR_GEOM, &st, 3);
234
}
235
#endif
236
return(bp2);
237
}
238
239
struct bio *
240
g_duplicate_bio(struct bio *bp)
241
{
242
struct bio *bp2;
243
244
bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
245
bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST | BIO_SWAP);
246
bp2->bio_parent = bp;
247
bp2->bio_cmd = bp->bio_cmd;
248
bp2->bio_length = bp->bio_length;
249
bp2->bio_offset = bp->bio_offset;
250
bp2->bio_data = bp->bio_data;
251
bp2->bio_ma = bp->bio_ma;
252
bp2->bio_ma_n = bp->bio_ma_n;
253
bp2->bio_ma_offset = bp->bio_ma_offset;
254
bp2->bio_attribute = bp->bio_attribute;
255
bp->bio_children++;
256
#ifdef KTR
257
if (KTR_GEOM_ENABLED) {
258
struct stack st;
259
260
CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
261
stack_save(&st);
262
CTRSTACK(KTR_GEOM, &st, 3);
263
}
264
#endif
265
return(bp2);
266
}
267
268
void
269
g_reset_bio(struct bio *bp)
270
{
271
272
bzero(bp, sizeof(*bp));
273
}
274
275
void
276
g_io_init(void)
277
{
278
279
g_bioq_init(&g_bio_run_down);
280
g_bioq_init(&g_bio_run_up);
281
biozone = uma_zcreate("g_bio", sizeof(struct bio),
282
NULL, NULL,
283
NULL, NULL,
284
0, 0);
285
}
286
287
int
288
g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
289
{
290
struct bio *bp;
291
int error;
292
293
g_trace(G_T_BIO, "bio_getattr(%s)", attr);
294
bp = g_alloc_bio();
295
bp->bio_cmd = BIO_GETATTR;
296
bp->bio_done = NULL;
297
bp->bio_attribute = attr;
298
bp->bio_length = *len;
299
bp->bio_data = ptr;
300
g_io_request(bp, cp);
301
error = biowait(bp, "ggetattr");
302
*len = bp->bio_completed;
303
g_destroy_bio(bp);
304
return (error);
305
}
306
307
int
308
g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
309
{
310
struct bio *bp;
311
int error;
312
313
g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
314
bp = g_alloc_bio();
315
bp->bio_cmd = BIO_ZONE;
316
bp->bio_done = NULL;
317
/*
318
* XXX KDM need to handle report zone data.
319
*/
320
bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
321
if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
322
bp->bio_length =
323
zone_args->zone_params.report.entries_allocated *
324
sizeof(struct disk_zone_rep_entry);
325
else
326
bp->bio_length = 0;
327
328
g_io_request(bp, cp);
329
error = biowait(bp, "gzone");
330
bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
331
g_destroy_bio(bp);
332
return (error);
333
}
334
335
/*
336
* Send a BIO_SPEEDUP down the stack. This is used to tell the lower layers that
337
* the upper layers have detected a resource shortage. The lower layers are
338
* advised to stop delaying I/O that they might be holding for performance
339
* reasons and to schedule it (non-trims) or complete it successfully (trims) as
340
* quickly as it can. bio_length is the amount of the shortage. This call
341
* should be non-blocking. bio_resid is used to communicate back if the lower
342
* layers couldn't find bio_length worth of I/O to schedule or discard. A length
343
* of 0 means to do as much as you can (schedule the h/w queues full, discard
344
* all trims). flags are a hint from the upper layers to the lower layers what
345
* operation should be done.
346
*/
347
int
348
g_io_speedup(off_t shortage, u_int flags, size_t *resid, struct g_consumer *cp)
349
{
350
struct bio *bp;
351
int error;
352
353
KASSERT((flags & (BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE)) != 0,
354
("Invalid flags passed to g_io_speedup: %#x", flags));
355
g_trace(G_T_BIO, "bio_speedup(%s, %jd, %#x)", cp->provider->name,
356
(intmax_t)shortage, flags);
357
bp = g_new_bio();
358
if (bp == NULL)
359
return (ENOMEM);
360
bp->bio_cmd = BIO_SPEEDUP;
361
bp->bio_length = shortage;
362
bp->bio_done = NULL;
363
bp->bio_flags |= flags;
364
g_io_request(bp, cp);
365
error = biowait(bp, "gflush");
366
*resid = bp->bio_resid;
367
g_destroy_bio(bp);
368
return (error);
369
}
370
371
int
372
g_io_flush(struct g_consumer *cp)
373
{
374
struct bio *bp;
375
int error;
376
377
g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
378
bp = g_alloc_bio();
379
bp->bio_cmd = BIO_FLUSH;
380
bp->bio_flags |= BIO_ORDERED;
381
bp->bio_done = NULL;
382
bp->bio_attribute = NULL;
383
bp->bio_offset = cp->provider->mediasize;
384
bp->bio_length = 0;
385
bp->bio_data = NULL;
386
g_io_request(bp, cp);
387
error = biowait(bp, "gflush");
388
g_destroy_bio(bp);
389
return (error);
390
}
391
392
static int
393
g_io_check(struct bio *bp)
394
{
395
struct g_consumer *cp;
396
struct g_provider *pp;
397
off_t excess;
398
int error;
399
400
biotrack(bp, __func__);
401
402
cp = bp->bio_from;
403
pp = bp->bio_to;
404
405
/* Fail if access counters dont allow the operation */
406
switch(bp->bio_cmd) {
407
case BIO_READ:
408
case BIO_GETATTR:
409
if (cp->acr == 0)
410
return (EPERM);
411
break;
412
case BIO_WRITE:
413
case BIO_DELETE:
414
case BIO_SPEEDUP:
415
case BIO_FLUSH:
416
if (cp->acw == 0)
417
return (EPERM);
418
break;
419
case BIO_ZONE:
420
if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
421
(bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
422
if (cp->acr == 0)
423
return (EPERM);
424
} else if (cp->acw == 0)
425
return (EPERM);
426
break;
427
default:
428
return (EPERM);
429
}
430
/* if provider is marked for error, don't disturb. */
431
if (pp->error)
432
return (pp->error);
433
if (cp->flags & G_CF_ORPHAN)
434
return (ENXIO);
435
436
switch(bp->bio_cmd) {
437
case BIO_READ:
438
case BIO_WRITE:
439
case BIO_DELETE:
440
/* Zero sectorsize or mediasize is probably a lack of media. */
441
if (pp->sectorsize == 0 || pp->mediasize == 0)
442
return (ENXIO);
443
/* Reject I/O not on sector boundary */
444
if (bp->bio_offset % pp->sectorsize)
445
return (EINVAL);
446
/* Reject I/O not integral sector long */
447
if (bp->bio_length % pp->sectorsize)
448
return (EINVAL);
449
/* Reject requests before or past the end of media. */
450
if (bp->bio_offset < 0)
451
return (EIO);
452
if (bp->bio_offset > pp->mediasize)
453
return (EIO);
454
455
/* Truncate requests to the end of providers media. */
456
excess = bp->bio_offset + bp->bio_length;
457
if (excess > bp->bio_to->mediasize) {
458
KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
459
round_page(bp->bio_ma_offset +
460
bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
461
("excess bio %p too short", bp));
462
excess -= bp->bio_to->mediasize;
463
bp->bio_length -= excess;
464
if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
465
bp->bio_ma_n = round_page(bp->bio_ma_offset +
466
bp->bio_length) / PAGE_SIZE;
467
}
468
if (excess > 0)
469
CTR3(KTR_GEOM, "g_down truncated bio "
470
"%p provider %s by %d", bp,
471
bp->bio_to->name, excess);
472
}
473
474
/* Deliver zero length transfers right here. */
475
if (bp->bio_length == 0) {
476
CTR2(KTR_GEOM, "g_down terminated 0-length "
477
"bp %p provider %s", bp, bp->bio_to->name);
478
return (0);
479
}
480
481
if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
482
(bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
483
(bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
484
if ((error = g_io_transient_map_bio(bp)) >= 0)
485
return (error);
486
}
487
break;
488
default:
489
break;
490
}
491
return (EJUSTRETURN);
492
}
493
494
void
495
g_io_request(struct bio *bp, struct g_consumer *cp)
496
{
497
struct g_provider *pp;
498
int direct, error, first;
499
uint8_t cmd;
500
501
biotrack(bp, __func__);
502
503
KASSERT(cp != NULL, ("NULL cp in g_io_request"));
504
KASSERT(bp != NULL, ("NULL bp in g_io_request"));
505
pp = cp->provider;
506
KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
507
#ifdef DIAGNOSTIC
508
KASSERT(bp->bio_driver1 == NULL,
509
("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
510
KASSERT(bp->bio_driver2 == NULL,
511
("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
512
KASSERT(bp->bio_pflags == 0,
513
("bio_pflags used by the consumer (geom %s)", cp->geom->name));
514
/*
515
* Remember consumer's private fields, so we can detect if they were
516
* modified by the provider.
517
*/
518
bp->_bio_caller1 = bp->bio_caller1;
519
bp->_bio_caller2 = bp->bio_caller2;
520
bp->_bio_cflags = bp->bio_cflags;
521
#endif
522
523
cmd = bp->bio_cmd;
524
if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
525
KASSERT(bp->bio_data != NULL,
526
("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
527
}
528
if (cmd == BIO_DELETE || cmd == BIO_FLUSH || cmd == BIO_SPEEDUP) {
529
KASSERT(bp->bio_data == NULL,
530
("non-NULL bp->data in g_io_request(cmd=%hu)",
531
bp->bio_cmd));
532
}
533
if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
534
KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
535
("wrong offset %jd for sectorsize %u",
536
bp->bio_offset, cp->provider->sectorsize));
537
KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
538
("wrong length %jd for sectorsize %u",
539
bp->bio_length, cp->provider->sectorsize));
540
}
541
542
g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
543
bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
544
545
bp->bio_from = cp;
546
bp->bio_to = pp;
547
bp->bio_error = 0;
548
bp->bio_completed = 0;
549
550
KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
551
("Bio already on queue bp=%p", bp));
552
553
if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
554
((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
555
binuptime(&bp->bio_t0);
556
else
557
getbinuptime(&bp->bio_t0);
558
if (g_collectstats & G_STATS_CONSUMERS)
559
devstat_start_transaction_bio_t0(cp->stat, bp);
560
if (g_collectstats & G_STATS_PROVIDERS)
561
devstat_start_transaction_bio_t0(pp->stat, bp);
562
#ifdef INVARIANTS
563
atomic_add_int(&cp->nstart, 1);
564
#endif
565
566
direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
567
(pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
568
curthread != g_down_td &&
569
((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
570
(bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
571
pace == 0;
572
if (direct) {
573
/* Block direct execution if less then half of stack left. */
574
size_t st, su;
575
GET_STACK_USAGE(st, su);
576
if (su * 2 > st)
577
direct = 0;
578
}
579
580
if (direct) {
581
error = g_io_check(bp);
582
if (error >= 0) {
583
CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
584
"provider %s returned %d", bp, bp->bio_to->name,
585
error);
586
g_io_deliver(bp, error);
587
return;
588
}
589
bp->bio_to->geom->start(bp);
590
} else {
591
g_bioq_lock(&g_bio_run_down);
592
first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
593
TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
594
bp->bio_flags |= BIO_ONQUEUE;
595
g_bio_run_down.bio_queue_length++;
596
g_bioq_unlock(&g_bio_run_down);
597
/* Pass it on down. */
598
if (first)
599
wakeup(&g_wait_down);
600
}
601
}
602
603
void
604
g_io_deliver(struct bio *bp, int error)
605
{
606
struct bintime now;
607
struct g_consumer *cp;
608
struct g_provider *pp;
609
struct mtx *mtxp;
610
int direct, first;
611
612
biotrack(bp, __func__);
613
614
KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
615
pp = bp->bio_to;
616
KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
617
cp = bp->bio_from;
618
if (cp == NULL) {
619
bp->bio_error = error;
620
bp->bio_done(bp);
621
return;
622
}
623
KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
624
KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
625
#ifdef DIAGNOSTIC
626
/*
627
* Some classes - GJournal in particular - can modify bio's
628
* private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
629
* flag means it's an expected behaviour for that particular geom.
630
*/
631
if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
632
KASSERT(bp->bio_caller1 == bp->_bio_caller1,
633
("bio_caller1 used by the provider %s", pp->name));
634
KASSERT(bp->bio_caller2 == bp->_bio_caller2,
635
("bio_caller2 used by the provider %s", pp->name));
636
KASSERT(bp->bio_cflags == bp->_bio_cflags,
637
("bio_cflags used by the provider %s", pp->name));
638
}
639
#endif
640
KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
641
KASSERT(bp->bio_completed <= bp->bio_length,
642
("bio_completed can't be greater than bio_length"));
643
644
g_trace(G_T_BIO,
645
"g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
646
bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
647
(intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
648
649
KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
650
("Bio already on queue bp=%p", bp));
651
652
/*
653
* XXX: next two doesn't belong here
654
*/
655
bp->bio_bcount = bp->bio_length;
656
bp->bio_resid = bp->bio_bcount - bp->bio_completed;
657
658
direct = (pp->flags & G_PF_DIRECT_SEND) &&
659
(cp->flags & G_CF_DIRECT_RECEIVE) &&
660
curthread != g_up_td;
661
if (direct) {
662
/* Block direct execution if less then half of stack left. */
663
size_t st, su;
664
GET_STACK_USAGE(st, su);
665
if (su * 2 > st)
666
direct = 0;
667
}
668
669
/*
670
* The statistics collection is lockless, as such, but we
671
* can not update one instance of the statistics from more
672
* than one thread at a time, so grab the lock first.
673
*/
674
if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
675
((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
676
binuptime(&now);
677
mtxp = mtx_pool_find(mtxpool_sleep, pp);
678
mtx_lock(mtxp);
679
if (g_collectstats & G_STATS_PROVIDERS)
680
devstat_end_transaction_bio_bt(pp->stat, bp, &now);
681
if (g_collectstats & G_STATS_CONSUMERS)
682
devstat_end_transaction_bio_bt(cp->stat, bp, &now);
683
#ifdef INVARIANTS
684
cp->nend++;
685
#endif
686
mtx_unlock(mtxp);
687
688
if (error != ENOMEM) {
689
bp->bio_error = error;
690
if (direct) {
691
biodone(bp);
692
} else {
693
g_bioq_lock(&g_bio_run_up);
694
first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
695
TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
696
bp->bio_flags |= BIO_ONQUEUE;
697
g_bio_run_up.bio_queue_length++;
698
g_bioq_unlock(&g_bio_run_up);
699
if (first)
700
wakeup(&g_wait_up);
701
}
702
return;
703
}
704
705
if (bootverbose)
706
printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
707
atomic_add_long(&nomem_count, 1); /* Rare event, but no locks held */
708
bp->bio_children = 0;
709
bp->bio_inbed = 0;
710
bp->bio_driver1 = NULL;
711
bp->bio_driver2 = NULL;
712
bp->bio_pflags = 0;
713
g_io_request(bp, cp);
714
pace = 1;
715
return;
716
}
717
718
SYSCTL_DECL(_kern_geom);
719
720
static long transient_maps;
721
SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
722
&transient_maps, 0,
723
"Total count of the transient mapping requests");
724
u_int transient_map_retries = 10;
725
SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
726
&transient_map_retries, 0,
727
"Max count of retries used before giving up on creating transient map");
728
int transient_map_hard_failures;
729
SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
730
&transient_map_hard_failures, 0,
731
"Failures to establish the transient mapping due to retry attempts "
732
"exhausted");
733
int transient_map_soft_failures;
734
SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
735
&transient_map_soft_failures, 0,
736
"Count of retried failures to establish the transient mapping");
737
int inflight_transient_maps;
738
SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
739
&inflight_transient_maps, 0,
740
"Current count of the active transient maps");
741
SYSCTL_ULONG(_kern_geom, OID_AUTO, nomem_count, CTLFLAG_RD,
742
&nomem_count, 0,
743
"Total count of requests completed with status of ENOMEM");
744
SYSCTL_ULONG(_kern_geom, OID_AUTO, pause_count, CTLFLAG_RD,
745
&pause_count, 0,
746
"Total count of requests stalled due to low memory in g_down");
747
748
static int
749
g_io_transient_map_bio(struct bio *bp)
750
{
751
vm_offset_t addr;
752
long size;
753
u_int retried;
754
755
KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
756
757
size = round_page(bp->bio_ma_offset + bp->bio_length);
758
KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
759
addr = 0;
760
retried = 0;
761
atomic_add_long(&transient_maps, 1);
762
retry:
763
if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
764
if (transient_map_retries != 0 &&
765
retried >= transient_map_retries) {
766
CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
767
bp, bp->bio_to->name);
768
atomic_add_int(&transient_map_hard_failures, 1);
769
return (EDEADLK/* XXXKIB */);
770
} else {
771
/*
772
* Naive attempt to quisce the I/O to get more
773
* in-flight requests completed and defragment
774
* the transient_arena.
775
*/
776
CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
777
bp, bp->bio_to->name, retried);
778
pause("g_d_tra", hz / 10);
779
retried++;
780
atomic_add_int(&transient_map_soft_failures, 1);
781
goto retry;
782
}
783
}
784
atomic_add_int(&inflight_transient_maps, 1);
785
pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
786
bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
787
bp->bio_flags |= BIO_TRANSIENT_MAPPING;
788
bp->bio_flags &= ~BIO_UNMAPPED;
789
return (EJUSTRETURN);
790
}
791
792
void
793
g_io_schedule_down(struct thread *tp __unused)
794
{
795
struct bio *bp;
796
int error;
797
798
for(;;) {
799
g_bioq_lock(&g_bio_run_down);
800
bp = g_bioq_first(&g_bio_run_down);
801
if (bp == NULL) {
802
CTR0(KTR_GEOM, "g_down going to sleep");
803
msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
804
PRIBIO | PDROP, "-", 0);
805
continue;
806
}
807
CTR0(KTR_GEOM, "g_down has work to do");
808
g_bioq_unlock(&g_bio_run_down);
809
biotrack(bp, __func__);
810
if (pace != 0) {
811
/*
812
* There has been at least one memory allocation failure
813
* since the last I/O completed. Pause 1ms to give the
814
* system a chance to free up memory. Pause time is not
815
* scaled to the number of I/O failures since they tend
816
* to cluster and the number is not predictive of how
817
* long a pause is needed.
818
*
819
* Older versions had a longer pause, which limited the
820
* IOPS to 10, which prolonged memory shortages that could
821
* be alleviated by I/O completing since it eliminated
822
* direct dispatch as well.
823
*
824
* XXX This pacing is really lame. It needs to be solved
825
* by other methods. This is OK only because the worst
826
* case scenario is so rare. In the worst case scenario
827
* all memory is tied up waiting for I/O to complete
828
* which can never happen since we can't allocate bios
829
* for that I/O.
830
*/
831
CTR0(KTR_GEOM, "g_down pacing self");
832
pause_count++; /* g_down has only one thread */
833
pause_sbt("g_down", SBT_1MS, 0, 0);
834
pace = 0;
835
}
836
CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
837
bp->bio_to->name);
838
error = g_io_check(bp);
839
if (error >= 0) {
840
CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
841
"%s returned %d", bp, bp->bio_to->name, error);
842
g_io_deliver(bp, error);
843
continue;
844
}
845
THREAD_NO_SLEEPING();
846
CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
847
"len %ld", bp, bp->bio_to->name, bp->bio_offset,
848
bp->bio_length);
849
bp->bio_to->geom->start(bp);
850
THREAD_SLEEPING_OK();
851
}
852
}
853
854
void
855
g_io_schedule_up(struct thread *tp __unused)
856
{
857
struct bio *bp;
858
859
for(;;) {
860
g_bioq_lock(&g_bio_run_up);
861
bp = g_bioq_first(&g_bio_run_up);
862
if (bp == NULL) {
863
CTR0(KTR_GEOM, "g_up going to sleep");
864
msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
865
PRIBIO | PDROP, "-", 0);
866
continue;
867
}
868
g_bioq_unlock(&g_bio_run_up);
869
THREAD_NO_SLEEPING();
870
CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
871
"%jd len %ld", bp, bp->bio_to->name,
872
bp->bio_offset, bp->bio_length);
873
biodone(bp);
874
THREAD_SLEEPING_OK();
875
}
876
}
877
878
void *
879
g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
880
{
881
struct bio *bp;
882
void *ptr;
883
int errorc;
884
885
KASSERT(length > 0 && length >= cp->provider->sectorsize &&
886
length <= maxphys, ("g_read_data(): invalid length %jd",
887
(intmax_t)length));
888
889
bp = g_alloc_bio();
890
bp->bio_cmd = BIO_READ;
891
bp->bio_done = NULL;
892
bp->bio_offset = offset;
893
bp->bio_length = length;
894
ptr = g_malloc(length, M_WAITOK);
895
bp->bio_data = ptr;
896
g_io_request(bp, cp);
897
errorc = biowait(bp, "gread");
898
if (errorc == 0 && bp->bio_completed != length)
899
errorc = EIO;
900
if (error != NULL)
901
*error = errorc;
902
g_destroy_bio(bp);
903
if (errorc) {
904
g_free(ptr);
905
ptr = NULL;
906
}
907
return (ptr);
908
}
909
910
/*
911
* A read function for use by ffs_sbget when used by GEOM-layer routines.
912
*/
913
int
914
g_use_g_read_data(void *devfd, off_t loc, void **bufp, int size)
915
{
916
struct g_consumer *cp;
917
918
KASSERT(*bufp == NULL,
919
("g_use_g_read_data: non-NULL *bufp %p\n", *bufp));
920
921
cp = (struct g_consumer *)devfd;
922
/*
923
* Take care not to issue an invalid I/O request. The offset of
924
* the superblock candidate must be multiples of the provider's
925
* sector size, otherwise an FFS can't exist on the provider
926
* anyway.
927
*/
928
if (loc % cp->provider->sectorsize != 0)
929
return (ENOENT);
930
*bufp = g_read_data(cp, loc, size, NULL);
931
if (*bufp == NULL)
932
return (ENOENT);
933
return (0);
934
}
935
936
int
937
g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
938
{
939
struct bio *bp;
940
int error;
941
942
KASSERT(length > 0 && length >= cp->provider->sectorsize &&
943
length <= maxphys, ("g_write_data(): invalid length %jd",
944
(intmax_t)length));
945
946
bp = g_alloc_bio();
947
bp->bio_cmd = BIO_WRITE;
948
bp->bio_done = NULL;
949
bp->bio_offset = offset;
950
bp->bio_length = length;
951
bp->bio_data = ptr;
952
g_io_request(bp, cp);
953
error = biowait(bp, "gwrite");
954
if (error == 0 && bp->bio_completed != length)
955
error = EIO;
956
g_destroy_bio(bp);
957
return (error);
958
}
959
960
/*
961
* A write function for use by ffs_sbput when used by GEOM-layer routines.
962
*/
963
int
964
g_use_g_write_data(void *devfd, off_t loc, void *buf, int size)
965
{
966
967
return (g_write_data((struct g_consumer *)devfd, loc, buf, size));
968
}
969
970
int
971
g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
972
{
973
struct bio *bp;
974
int error;
975
976
KASSERT(length > 0 && length >= cp->provider->sectorsize,
977
("g_delete_data(): invalid length %jd", (intmax_t)length));
978
979
bp = g_alloc_bio();
980
bp->bio_cmd = BIO_DELETE;
981
bp->bio_done = NULL;
982
bp->bio_offset = offset;
983
bp->bio_length = length;
984
bp->bio_data = NULL;
985
g_io_request(bp, cp);
986
error = biowait(bp, "gdelete");
987
if (error == 0 && bp->bio_completed != length)
988
error = EIO;
989
g_destroy_bio(bp);
990
return (error);
991
}
992
993
void
994
g_print_bio(const char *prefix, const struct bio *bp, const char *fmtsuffix,
995
...)
996
{
997
#ifndef PRINTF_BUFR_SIZE
998
#define PRINTF_BUFR_SIZE 64
999
#endif
1000
char bufr[PRINTF_BUFR_SIZE];
1001
struct sbuf sb, *sbp __unused;
1002
va_list ap;
1003
1004
sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN);
1005
KASSERT(sbp != NULL, ("sbuf_new misused?"));
1006
1007
sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1008
1009
sbuf_cat(&sb, prefix);
1010
g_format_bio(&sb, bp);
1011
1012
va_start(ap, fmtsuffix);
1013
sbuf_vprintf(&sb, fmtsuffix, ap);
1014
va_end(ap);
1015
1016
sbuf_nl_terminate(&sb);
1017
1018
sbuf_finish(&sb);
1019
sbuf_delete(&sb);
1020
}
1021
1022
void
1023
g_format_bio(struct sbuf *sb, const struct bio *bp)
1024
{
1025
const char *pname, *cmd = NULL;
1026
1027
if (bp->bio_to != NULL)
1028
pname = bp->bio_to->name;
1029
else if (bp->bio_parent != NULL && bp->bio_parent->bio_to != NULL)
1030
pname = bp->bio_parent->bio_to->name;
1031
else
1032
pname = "[unknown]";
1033
1034
switch (bp->bio_cmd) {
1035
case BIO_GETATTR:
1036
cmd = "GETATTR";
1037
sbuf_printf(sb, "%s[%s(attr=%s)]", pname, cmd,
1038
bp->bio_attribute);
1039
return;
1040
case BIO_FLUSH:
1041
cmd = "FLUSH";
1042
sbuf_printf(sb, "%s[%s]", pname, cmd);
1043
return;
1044
case BIO_ZONE: {
1045
char *subcmd = NULL;
1046
cmd = "ZONE";
1047
switch (bp->bio_zone.zone_cmd) {
1048
case DISK_ZONE_OPEN:
1049
subcmd = "OPEN";
1050
break;
1051
case DISK_ZONE_CLOSE:
1052
subcmd = "CLOSE";
1053
break;
1054
case DISK_ZONE_FINISH:
1055
subcmd = "FINISH";
1056
break;
1057
case DISK_ZONE_RWP:
1058
subcmd = "RWP";
1059
break;
1060
case DISK_ZONE_REPORT_ZONES:
1061
subcmd = "REPORT ZONES";
1062
break;
1063
case DISK_ZONE_GET_PARAMS:
1064
subcmd = "GET PARAMS";
1065
break;
1066
default:
1067
subcmd = "UNKNOWN";
1068
break;
1069
}
1070
sbuf_printf(sb, "%s[%s,%s]", pname, cmd, subcmd);
1071
return;
1072
}
1073
case BIO_READ:
1074
cmd = "READ";
1075
break;
1076
case BIO_WRITE:
1077
cmd = "WRITE";
1078
break;
1079
case BIO_DELETE:
1080
cmd = "DELETE";
1081
break;
1082
default:
1083
cmd = "UNKNOWN";
1084
sbuf_printf(sb, "%s[%s()]", pname, cmd);
1085
return;
1086
}
1087
sbuf_printf(sb, "%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1088
(intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
1089
}
1090
1091