Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/fs/fuse/fuse_io.c
39586 views
1
/*-
2
* SPDX-License-Identifier: BSD-3-Clause
3
*
4
* Copyright (c) 2007-2009 Google Inc.
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions are
9
* met:
10
*
11
* * Redistributions of source code must retain the above copyright
12
* notice, this list of conditions and the following disclaimer.
13
* * Redistributions in binary form must reproduce the above
14
* copyright notice, this list of conditions and the following disclaimer
15
* in the documentation and/or other materials provided with the
16
* distribution.
17
* * Neither the name of Google Inc. nor the names of its
18
* contributors may be used to endorse or promote products derived from
19
* this software without specific prior written permission.
20
*
21
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32
*
33
* Copyright (C) 2005 Csaba Henk.
34
* All rights reserved.
35
*
36
* Copyright (c) 2019 The FreeBSD Foundation
37
*
38
* Portions of this software were developed by BFF Storage Systems, LLC under
39
* sponsorship from the FreeBSD Foundation.
40
*
41
* Redistribution and use in source and binary forms, with or without
42
* modification, are permitted provided that the following conditions
43
* are met:
44
* 1. Redistributions of source code must retain the above copyright
45
* notice, this list of conditions and the following disclaimer.
46
* 2. Redistributions in binary form must reproduce the above copyright
47
* notice, this list of conditions and the following disclaimer in the
48
* documentation and/or other materials provided with the distribution.
49
*
50
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
51
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
54
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60
* SUCH DAMAGE.
61
*/
62
63
#include <sys/types.h>
64
#include <sys/param.h>
65
#include <sys/module.h>
66
#include <sys/systm.h>
67
#include <sys/errno.h>
68
#include <sys/param.h>
69
#include <sys/kernel.h>
70
#include <sys/conf.h>
71
#include <sys/uio.h>
72
#include <sys/malloc.h>
73
#include <sys/queue.h>
74
#include <sys/lock.h>
75
#include <sys/sx.h>
76
#include <sys/mutex.h>
77
#include <sys/rwlock.h>
78
#include <sys/priv.h>
79
#include <sys/proc.h>
80
#include <sys/mount.h>
81
#include <sys/vnode.h>
82
#include <sys/stat.h>
83
#include <sys/unistd.h>
84
#include <sys/filedesc.h>
85
#include <sys/file.h>
86
#include <sys/fcntl.h>
87
#include <sys/bio.h>
88
#include <sys/buf.h>
89
#include <sys/sysctl.h>
90
#include <sys/vmmeter.h>
91
92
#include <vm/vm.h>
93
#include <vm/vm_extern.h>
94
#include <vm/pmap.h>
95
#include <vm/vm_map.h>
96
#include <vm/vm_page.h>
97
#include <vm/vm_object.h>
98
#include <vm/vnode_pager.h>
99
100
#include "fuse.h"
101
#include "fuse_file.h"
102
#include "fuse_node.h"
103
#include "fuse_internal.h"
104
#include "fuse_ipc.h"
105
#include "fuse_io.h"
106
107
/*
108
* Set in a struct buf to indicate that the write came from the buffer cache
109
* and the originating cred and pid are no longer known.
110
*/
111
#define B_FUSEFS_WRITE_CACHE B_FS_FLAG1
112
113
SDT_PROVIDER_DECLARE(fusefs);
114
/*
115
* Fuse trace probe:
116
* arg0: verbosity. Higher numbers give more verbose messages
117
* arg1: Textual message
118
*/
119
SDT_PROBE_DEFINE2(fusefs, , io, trace, "int", "char*");
120
121
SDT_PROBE_DEFINE4(fusefs, , io, read_bio_backend_start, "int", "int", "int", "int");
122
SDT_PROBE_DEFINE2(fusefs, , io, read_bio_backend_feed, "int", "struct buf*");
123
SDT_PROBE_DEFINE4(fusefs, , io, read_bio_backend_end, "int", "ssize_t", "int",
124
"struct buf*");
125
int
126
fuse_read_biobackend(struct vnode *vp, struct uio *uio, int ioflag,
127
struct ucred *cred, struct fuse_filehandle *fufh, pid_t pid)
128
{
129
struct buf *bp;
130
struct mount *mp;
131
struct fuse_data *data;
132
daddr_t lbn, nextlbn;
133
int bcount, nextsize;
134
int err, n = 0, on = 0, seqcount;
135
off_t filesize;
136
137
const int biosize = fuse_iosize(vp);
138
mp = vnode_mount(vp);
139
data = fuse_get_mpdata(mp);
140
141
if (uio->uio_offset < 0)
142
return (EINVAL);
143
144
seqcount = ioflag >> IO_SEQSHIFT;
145
146
err = fuse_vnode_size(vp, &filesize, cred, curthread);
147
if (err)
148
return err;
149
150
for (err = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
151
if (fuse_isdeadfs(vp)) {
152
err = ENXIO;
153
break;
154
}
155
if (filesize - uio->uio_offset <= 0)
156
break;
157
lbn = uio->uio_offset / biosize;
158
on = uio->uio_offset & (biosize - 1);
159
160
if ((off_t)lbn * biosize >= filesize) {
161
bcount = 0;
162
} else if ((off_t)(lbn + 1) * biosize > filesize) {
163
bcount = filesize - (off_t)lbn *biosize;
164
} else {
165
bcount = biosize;
166
}
167
nextlbn = lbn + 1;
168
nextsize = MIN(biosize, filesize - nextlbn * biosize);
169
170
SDT_PROBE4(fusefs, , io, read_bio_backend_start,
171
biosize, (int)lbn, on, bcount);
172
173
if (bcount < biosize) {
174
/* If near EOF, don't do readahead */
175
err = bread(vp, lbn, bcount, NOCRED, &bp);
176
} else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
177
/* Try clustered read */
178
long totread = uio->uio_resid + on;
179
seqcount = MIN(seqcount,
180
data->max_readahead_blocks + 1);
181
err = cluster_read(vp, filesize, lbn, bcount, NOCRED,
182
totread, seqcount, 0, &bp);
183
} else if (seqcount > 1 && data->max_readahead_blocks >= 1) {
184
/* Try non-clustered readahead */
185
err = breadn(vp, lbn, bcount, &nextlbn, &nextsize, 1,
186
NOCRED, &bp);
187
} else {
188
/* Just read what was requested */
189
err = bread(vp, lbn, bcount, NOCRED, &bp);
190
}
191
192
if (err) {
193
brelse(bp);
194
bp = NULL;
195
break;
196
}
197
198
/*
199
* on is the offset into the current bp. Figure out how many
200
* bytes we can copy out of the bp. Note that bcount is
201
* NOT DEV_BSIZE aligned.
202
*
203
* Then figure out how many bytes we can copy into the uio.
204
*/
205
206
n = 0;
207
if (on < bcount - bp->b_resid)
208
n = MIN((unsigned)(bcount - bp->b_resid - on),
209
uio->uio_resid);
210
if (n > 0) {
211
SDT_PROBE2(fusefs, , io, read_bio_backend_feed, n, bp);
212
err = uiomove(bp->b_data + on, n, uio);
213
}
214
vfs_bio_brelse(bp, ioflag);
215
SDT_PROBE4(fusefs, , io, read_bio_backend_end, err,
216
uio->uio_resid, n, bp);
217
if (bp->b_resid > 0) {
218
/* Short read indicates EOF */
219
break;
220
}
221
}
222
223
return (err);
224
}
225
226
SDT_PROBE_DEFINE1(fusefs, , io, read_directbackend_start,
227
"struct fuse_read_in*");
228
SDT_PROBE_DEFINE3(fusefs, , io, read_directbackend_complete,
229
"struct fuse_dispatcher*", "struct fuse_read_in*", "struct uio*");
230
231
int
232
fuse_read_directbackend(struct vnode *vp, struct uio *uio,
233
struct ucred *cred, struct fuse_filehandle *fufh)
234
{
235
struct fuse_data *data;
236
struct fuse_dispatcher fdi;
237
struct fuse_read_in *fri;
238
int err = 0;
239
240
data = fuse_get_mpdata(vp->v_mount);
241
242
if (uio->uio_resid == 0)
243
return (0);
244
245
fdisp_init(&fdi, 0);
246
247
/*
248
* XXX In "normal" case we use an intermediate kernel buffer for
249
* transmitting data from daemon's context to ours. Eventually, we should
250
* get rid of this. Anyway, if the target uio lives in sysspace (we are
251
* called from pageops), and the input data doesn't need kernel-side
252
* processing (we are not called from readdir) we can already invoke
253
* an optimized, "peer-to-peer" I/O routine.
254
*/
255
while (uio->uio_resid > 0) {
256
fdi.iosize = sizeof(*fri);
257
fdisp_make_vp(&fdi, FUSE_READ, vp, uio->uio_td, cred);
258
fri = fdi.indata;
259
fri->fh = fufh->fh_id;
260
fri->offset = uio->uio_offset;
261
fri->size = MIN(uio->uio_resid,
262
fuse_get_mpdata(vp->v_mount)->max_read);
263
if (fuse_libabi_geq(data, 7, 9)) {
264
/* See comment regarding FUSE_WRITE_LOCKOWNER */
265
fri->read_flags = 0;
266
fri->flags = fufh_type_2_fflags(fufh->fufh_type);
267
}
268
269
SDT_PROBE1(fusefs, , io, read_directbackend_start, fri);
270
271
if ((err = fdisp_wait_answ(&fdi)))
272
goto out;
273
274
SDT_PROBE3(fusefs, , io, read_directbackend_complete,
275
&fdi, fri, uio);
276
277
if ((err = uiomove(fdi.answ, MIN(fri->size, fdi.iosize), uio)))
278
break;
279
if (fdi.iosize < fri->size) {
280
/*
281
* Short read. Should only happen at EOF or with
282
* direct io.
283
*/
284
break;
285
}
286
}
287
288
out:
289
fdisp_destroy(&fdi);
290
return (err);
291
}
292
293
int
294
fuse_write_directbackend(struct vnode *vp, struct uio *uio,
295
struct ucred *cred, struct fuse_filehandle *fufh, off_t filesize,
296
int ioflag, bool pages)
297
{
298
struct fuse_vnode_data *fvdat = VTOFUD(vp);
299
struct fuse_data *data;
300
struct fuse_write_in *fwi;
301
struct fuse_write_out *fwo;
302
struct fuse_dispatcher fdi;
303
size_t chunksize;
304
ssize_t r;
305
void *fwi_data;
306
off_t as_written_offset;
307
int diff;
308
int err = 0;
309
bool direct_io = fufh->fuse_open_flags & FOPEN_DIRECT_IO;
310
bool wrote_anything = false;
311
uint32_t write_flags;
312
313
data = fuse_get_mpdata(vp->v_mount);
314
315
/*
316
* Don't set FUSE_WRITE_LOCKOWNER in write_flags. It can't be set
317
* accurately when using POSIX AIO, libfuse doesn't use it, and I'm not
318
* aware of any file systems that do. It was an attempt to add
319
* Linux-style mandatory locking to the FUSE protocol, but mandatory
320
* locking is deprecated even on Linux. See Linux commit
321
* f33321141b273d60cbb3a8f56a5489baad82ba5e .
322
*/
323
/*
324
* Set FUSE_WRITE_CACHE whenever we don't know the uid, gid, and/or pid
325
* that originated a write. For example when writing from the
326
* writeback cache. I don't know of a single file system that cares,
327
* but the protocol says we're supposed to do this.
328
*/
329
write_flags = !pages && (
330
(ioflag & IO_DIRECT) ||
331
!fsess_opt_datacache(vnode_mount(vp)) ||
332
!fsess_opt_writeback(vnode_mount(vp))) ? 0 : FUSE_WRITE_CACHE;
333
334
if (uio->uio_resid == 0)
335
return (0);
336
337
if (ioflag & IO_APPEND)
338
uio_setoffset(uio, filesize);
339
340
err = vn_rlimit_fsizex(vp, uio, 0, &r, uio->uio_td);
341
if (err != 0) {
342
vn_rlimit_fsizex_res(uio, r);
343
return (err);
344
}
345
346
fdisp_init(&fdi, 0);
347
348
while (uio->uio_resid > 0) {
349
size_t sizeof_fwi;
350
351
if (fuse_libabi_geq(data, 7, 9)) {
352
sizeof_fwi = sizeof(*fwi);
353
} else {
354
sizeof_fwi = FUSE_COMPAT_WRITE_IN_SIZE;
355
}
356
357
chunksize = MIN(uio->uio_resid, data->max_write);
358
359
fdi.iosize = sizeof_fwi + chunksize;
360
fdisp_make_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred);
361
362
fwi = fdi.indata;
363
fwi->fh = fufh->fh_id;
364
fwi->offset = uio->uio_offset;
365
fwi->size = chunksize;
366
fwi->write_flags = write_flags;
367
if (fuse_libabi_geq(data, 7, 9)) {
368
fwi->flags = fufh_type_2_fflags(fufh->fufh_type);
369
}
370
fwi_data = (char *)fdi.indata + sizeof_fwi;
371
372
if ((err = uiomove(fwi_data, chunksize, uio)))
373
break;
374
375
retry:
376
err = fdisp_wait_answ(&fdi);
377
if (err == ERESTART || err == EINTR || err == EWOULDBLOCK) {
378
/*
379
* Rewind the uio so dofilewrite will know it's
380
* incomplete
381
*/
382
uio->uio_resid += fwi->size;
383
uio->uio_offset -= fwi->size;
384
/*
385
* Change ERESTART into EINTR because we can't rewind
386
* uio->uio_iov. Basically, once uiomove(9) has been
387
* called, it's impossible to restart a syscall.
388
*/
389
if (err == ERESTART)
390
err = EINTR;
391
break;
392
} else if (err) {
393
break;
394
} else {
395
wrote_anything = true;
396
}
397
398
fwo = ((struct fuse_write_out *)fdi.answ);
399
400
if (fwo->size > fwi->size) {
401
fuse_warn(data, FSESS_WARN_WROTE_LONG,
402
"wrote more data than we provided it.");
403
/* This is bonkers. Clear attr cache. */
404
fvdat->flag &= ~FN_SIZECHANGE;
405
fuse_vnode_clear_attr_cache(vp);
406
err = EINVAL;
407
break;
408
}
409
410
/* Adjust the uio in the case of short writes */
411
diff = fwi->size - fwo->size;
412
413
as_written_offset = uio->uio_offset - diff;
414
415
if (as_written_offset - diff > filesize) {
416
fuse_vnode_setsize(vp, as_written_offset, false);
417
getnanouptime(&fvdat->last_local_modify);
418
}
419
if (as_written_offset - diff >= filesize)
420
fvdat->flag &= ~FN_SIZECHANGE;
421
422
if (diff > 0) {
423
/* Short write */
424
if (!direct_io) {
425
fuse_warn(data, FSESS_WARN_SHORT_WRITE,
426
"short writes are only allowed with "
427
"direct_io.");
428
}
429
if (ioflag & IO_DIRECT) {
430
/* Return early */
431
uio->uio_resid += diff;
432
uio->uio_offset -= diff;
433
break;
434
} else {
435
/* Resend the unwritten portion of data */
436
fdi.iosize = sizeof_fwi + diff;
437
/* Refresh fdi without clearing data buffer */
438
fdisp_refresh_vp(&fdi, FUSE_WRITE, vp,
439
uio->uio_td, cred);
440
fwi = fdi.indata;
441
MPASS2(fwi == fdi.indata, "FUSE dispatcher "
442
"reallocated despite no increase in "
443
"size?");
444
void *src = (char*)fwi_data + fwo->size;
445
memmove(fwi_data, src, diff);
446
fwi->fh = fufh->fh_id;
447
fwi->offset = as_written_offset;
448
fwi->size = diff;
449
fwi->write_flags = write_flags;
450
goto retry;
451
}
452
}
453
}
454
455
fdisp_destroy(&fdi);
456
457
if (wrote_anything)
458
fuse_vnode_undirty_cached_timestamps(vp, false);
459
460
vn_rlimit_fsizex_res(uio, r);
461
return (err);
462
}
463
464
SDT_PROBE_DEFINE6(fusefs, , io, write_biobackend_start, "int64_t", "int", "int",
465
"struct uio*", "int", "bool");
466
SDT_PROBE_DEFINE2(fusefs, , io, write_biobackend_append_race, "long", "int");
467
SDT_PROBE_DEFINE2(fusefs, , io, write_biobackend_issue, "int", "struct buf*");
468
469
int
470
fuse_write_biobackend(struct vnode *vp, struct uio *uio,
471
struct ucred *cred, struct fuse_filehandle *fufh, int ioflag, pid_t pid)
472
{
473
struct fuse_vnode_data *fvdat = VTOFUD(vp);
474
struct buf *bp;
475
daddr_t lbn;
476
off_t filesize;
477
ssize_t r;
478
int bcount;
479
int n, on, seqcount, err = 0;
480
481
const int biosize = fuse_iosize(vp);
482
483
seqcount = ioflag >> IO_SEQSHIFT;
484
485
KASSERT(uio->uio_rw == UIO_WRITE, ("fuse_write_biobackend mode"));
486
if (vp->v_type != VREG)
487
return (EIO);
488
if (uio->uio_offset < 0)
489
return (EINVAL);
490
if (uio->uio_resid == 0)
491
return (0);
492
493
err = fuse_vnode_size(vp, &filesize, cred, curthread);
494
if (err)
495
return err;
496
497
if (ioflag & IO_APPEND)
498
uio_setoffset(uio, filesize);
499
500
err = vn_rlimit_fsizex(vp, uio, 0, &r, uio->uio_td);
501
if (err != 0) {
502
vn_rlimit_fsizex_res(uio, r);
503
return (err);
504
}
505
506
do {
507
bool direct_append, extending;
508
509
if (fuse_isdeadfs(vp)) {
510
err = ENXIO;
511
break;
512
}
513
lbn = uio->uio_offset / biosize;
514
on = uio->uio_offset & (biosize - 1);
515
n = MIN((unsigned)(biosize - on), uio->uio_resid);
516
517
again:
518
/* Get or create a buffer for the write */
519
direct_append = uio->uio_offset == filesize && n;
520
if (uio->uio_offset + n < filesize) {
521
extending = false;
522
if ((off_t)(lbn + 1) * biosize < filesize) {
523
/* Not the file's last block */
524
bcount = biosize;
525
} else {
526
/* The file's last block */
527
bcount = filesize - (off_t)lbn * biosize;
528
}
529
} else {
530
extending = true;
531
bcount = on + n;
532
}
533
if (direct_append) {
534
/*
535
* Take care to preserve the buffer's B_CACHE state so
536
* as not to cause an unnecessary read.
537
*/
538
bp = getblk(vp, lbn, on, PCATCH, 0, 0);
539
if (bp != NULL) {
540
uint32_t save = bp->b_flags & B_CACHE;
541
allocbuf(bp, bcount);
542
bp->b_flags |= save;
543
}
544
} else {
545
bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
546
}
547
if (!bp) {
548
err = EINTR;
549
break;
550
}
551
if (extending) {
552
/*
553
* Extend file _after_ locking buffer so we won't race
554
* with other readers
555
*/
556
err = fuse_vnode_setsize(vp, uio->uio_offset + n, false);
557
filesize = uio->uio_offset + n;
558
getnanouptime(&fvdat->last_local_modify);
559
fvdat->flag |= FN_SIZECHANGE;
560
if (err) {
561
brelse(bp);
562
break;
563
}
564
}
565
566
SDT_PROBE6(fusefs, , io, write_biobackend_start,
567
lbn, on, n, uio, bcount, direct_append);
568
/*
569
* Issue a READ if B_CACHE is not set. In special-append
570
* mode, B_CACHE is based on the buffer prior to the write
571
* op and is typically set, avoiding the read. If a read
572
* is required in special append mode, the server will
573
* probably send us a short-read since we extended the file
574
* on our end, resulting in b_resid == 0 and, thusly,
575
* B_CACHE getting set.
576
*
577
* We can also avoid issuing the read if the write covers
578
* the entire buffer. We have to make sure the buffer state
579
* is reasonable in this case since we will not be initiating
580
* I/O. See the comments in kern/vfs_bio.c's getblk() for
581
* more information.
582
*
583
* B_CACHE may also be set due to the buffer being cached
584
* normally.
585
*/
586
587
if (on == 0 && n == bcount) {
588
bp->b_flags |= B_CACHE;
589
bp->b_flags &= ~B_INVAL;
590
bp->b_ioflags &= ~BIO_ERROR;
591
}
592
if ((bp->b_flags & B_CACHE) == 0) {
593
bp->b_iocmd = BIO_READ;
594
vfs_busy_pages(bp, 0);
595
fuse_io_strategy(vp, bp);
596
if ((err = bp->b_error)) {
597
brelse(bp);
598
break;
599
}
600
if (bp->b_resid > 0) {
601
/*
602
* Short read indicates EOF. Update file size
603
* from the server and try again.
604
*/
605
SDT_PROBE2(fusefs, , io, trace, 1,
606
"Short read during a RMW");
607
brelse(bp);
608
err = fuse_vnode_size(vp, &filesize, cred,
609
curthread);
610
if (err)
611
break;
612
else
613
goto again;
614
}
615
}
616
if (bp->b_wcred == NOCRED)
617
bp->b_wcred = crhold(cred);
618
619
/*
620
* If dirtyend exceeds file size, chop it down. This should
621
* not normally occur but there is an append race where it
622
* might occur XXX, so we log it.
623
*
624
* If the chopping creates a reverse-indexed or degenerate
625
* situation with dirtyoff/end, we 0 both of them.
626
*/
627
if (bp->b_dirtyend > bcount) {
628
SDT_PROBE2(fusefs, , io, write_biobackend_append_race,
629
(long)bp->b_blkno * biosize,
630
bp->b_dirtyend - bcount);
631
bp->b_dirtyend = bcount;
632
}
633
if (bp->b_dirtyoff >= bp->b_dirtyend)
634
bp->b_dirtyoff = bp->b_dirtyend = 0;
635
636
/*
637
* If the new write will leave a contiguous dirty
638
* area, just update the b_dirtyoff and b_dirtyend,
639
* otherwise force a write rpc of the old dirty area.
640
*
641
* While it is possible to merge discontiguous writes due to
642
* our having a B_CACHE buffer ( and thus valid read data
643
* for the hole), we don't because it could lead to
644
* significant cache coherency problems with multiple clients,
645
* especially if locking is implemented later on.
646
*
647
* as an optimization we could theoretically maintain
648
* a linked list of discontinuous areas, but we would still
649
* have to commit them separately so there isn't much
650
* advantage to it except perhaps a bit of asynchronization.
651
*/
652
653
if (bp->b_dirtyend > 0 &&
654
(on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
655
/*
656
* Yes, we mean it. Write out everything to "storage"
657
* immediately, without hesitation. (Apart from other
658
* reasons: the only way to know if a write is valid
659
* if its actually written out.)
660
*/
661
SDT_PROBE2(fusefs, , io, write_biobackend_issue, 0, bp);
662
bwrite(bp);
663
if (bp->b_error == EINTR) {
664
err = EINTR;
665
break;
666
}
667
goto again;
668
}
669
err = uiomove((char *)bp->b_data + on, n, uio);
670
671
if (err) {
672
bp->b_ioflags |= BIO_ERROR;
673
bp->b_error = err;
674
brelse(bp);
675
break;
676
/* TODO: vfs_bio_clrbuf like ffs_write does? */
677
}
678
/*
679
* Only update dirtyoff/dirtyend if not a degenerate
680
* condition.
681
*/
682
if (n) {
683
if (bp->b_dirtyend > 0) {
684
bp->b_dirtyoff = MIN(on, bp->b_dirtyoff);
685
bp->b_dirtyend = MAX((on + n), bp->b_dirtyend);
686
} else {
687
bp->b_dirtyoff = on;
688
bp->b_dirtyend = on + n;
689
}
690
vfs_bio_set_valid(bp, on, n);
691
}
692
693
vfs_bio_set_flags(bp, ioflag);
694
695
bp->b_flags |= B_FUSEFS_WRITE_CACHE;
696
if (ioflag & IO_SYNC) {
697
SDT_PROBE2(fusefs, , io, write_biobackend_issue, 2, bp);
698
if (!(ioflag & IO_VMIO))
699
bp->b_flags &= ~B_FUSEFS_WRITE_CACHE;
700
err = bwrite(bp);
701
} else if (vm_page_count_severe() ||
702
buf_dirty_count_severe() ||
703
(ioflag & IO_ASYNC)) {
704
bp->b_flags |= B_CLUSTEROK;
705
SDT_PROBE2(fusefs, , io, write_biobackend_issue, 3, bp);
706
bawrite(bp);
707
} else if (on == 0 && n == bcount) {
708
if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
709
bp->b_flags |= B_CLUSTEROK;
710
SDT_PROBE2(fusefs, , io, write_biobackend_issue,
711
4, bp);
712
cluster_write(vp, &fvdat->clusterw, bp,
713
filesize, seqcount, 0);
714
} else {
715
SDT_PROBE2(fusefs, , io, write_biobackend_issue,
716
5, bp);
717
bawrite(bp);
718
}
719
} else if (ioflag & IO_DIRECT) {
720
bp->b_flags |= B_CLUSTEROK;
721
SDT_PROBE2(fusefs, , io, write_biobackend_issue, 6, bp);
722
bawrite(bp);
723
} else {
724
bp->b_flags &= ~B_CLUSTEROK;
725
SDT_PROBE2(fusefs, , io, write_biobackend_issue, 7, bp);
726
bdwrite(bp);
727
}
728
if (err)
729
break;
730
} while (uio->uio_resid > 0 && n > 0);
731
732
vn_rlimit_fsizex_res(uio, r);
733
return (err);
734
}
735
736
int
737
fuse_io_strategy(struct vnode *vp, struct buf *bp)
738
{
739
struct fuse_vnode_data *fvdat = VTOFUD(vp);
740
struct fuse_filehandle *fufh;
741
struct ucred *cred;
742
struct uio *uiop;
743
struct uio uio;
744
struct iovec io;
745
off_t filesize;
746
int error = 0;
747
int fflag;
748
/* We don't know the true pid when we're dealing with the cache */
749
pid_t pid = 0;
750
751
const int biosize = fuse_iosize(vp);
752
753
MPASS(vp->v_type == VREG || vp->v_type == VDIR);
754
MPASS(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE);
755
756
fflag = bp->b_iocmd == BIO_READ ? FREAD : FWRITE;
757
cred = bp->b_iocmd == BIO_READ ? bp->b_rcred : bp->b_wcred;
758
error = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid);
759
if (bp->b_iocmd == BIO_READ && error == EBADF) {
760
/*
761
* This may be a read-modify-write operation on a cached file
762
* opened O_WRONLY. The FUSE protocol allows this.
763
*/
764
error = fuse_filehandle_get(vp, FWRITE, &fufh, cred, pid);
765
}
766
if (error) {
767
printf("FUSE: strategy: filehandles are closed\n");
768
bp->b_ioflags |= BIO_ERROR;
769
bp->b_error = error;
770
bufdone(bp);
771
return (error);
772
}
773
774
uiop = &uio;
775
uiop->uio_iov = &io;
776
uiop->uio_iovcnt = 1;
777
uiop->uio_segflg = UIO_SYSSPACE;
778
uiop->uio_td = curthread;
779
780
/*
781
* clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
782
* do this here so we do not have to do it in all the code that
783
* calls us.
784
*/
785
bp->b_flags &= ~B_INVAL;
786
bp->b_ioflags &= ~BIO_ERROR;
787
788
KASSERT(!(bp->b_flags & B_DONE),
789
("fuse_io_strategy: bp %p already marked done", bp));
790
if (bp->b_iocmd == BIO_READ) {
791
ssize_t left;
792
793
io.iov_len = uiop->uio_resid = bp->b_bcount;
794
io.iov_base = bp->b_data;
795
uiop->uio_rw = UIO_READ;
796
797
uiop->uio_offset = ((off_t)bp->b_lblkno) * biosize;
798
error = fuse_read_directbackend(vp, uiop, cred, fufh);
799
/*
800
* Store the amount we failed to read in the buffer's private
801
* field, so callers can truncate the file if necessary'
802
*/
803
804
if (!error && uiop->uio_resid) {
805
int nread = bp->b_bcount - uiop->uio_resid;
806
left = uiop->uio_resid;
807
bzero((char *)bp->b_data + nread, left);
808
809
if ((fvdat->flag & FN_SIZECHANGE) == 0) {
810
/*
811
* A short read with no error, when not using
812
* direct io, and when no writes are cached,
813
* indicates EOF caused by a server-side
814
* truncation. Clear the attr cache so we'll
815
* pick up the new file size and timestamps.
816
*
817
* We must still bzero the remaining buffer so
818
* uninitialized data doesn't get exposed by a
819
* future truncate that extends the file.
820
*
821
* To prevent lock order problems, we must
822
* truncate the file upstack, not here.
823
*/
824
SDT_PROBE2(fusefs, , io, trace, 1,
825
"Short read of a clean file");
826
fuse_vnode_clear_attr_cache(vp);
827
} else {
828
/*
829
* If dirty writes _are_ cached beyond EOF,
830
* that indicates a newly created hole that the
831
* server doesn't know about. Those don't pose
832
* any problem.
833
* XXX: we don't currently track whether dirty
834
* writes are cached beyond EOF, before EOF, or
835
* both.
836
*/
837
SDT_PROBE2(fusefs, , io, trace, 1,
838
"Short read of a dirty file");
839
uiop->uio_resid = 0;
840
}
841
}
842
if (error) {
843
bp->b_ioflags |= BIO_ERROR;
844
bp->b_error = error;
845
}
846
} else {
847
/*
848
* Setup for actual write
849
*/
850
/*
851
* If the file's size is cached, use that value, even if the
852
* cache is expired. At this point we're already committed to
853
* writing something. If the FUSE server has changed the
854
* file's size behind our back, it's too late for us to do
855
* anything about it. In particular, we can't invalidate any
856
* part of the file's buffers because VOP_STRATEGY is called
857
* with them already locked.
858
*/
859
filesize = fvdat->cached_attrs.va_size;
860
/* filesize must've been cached by fuse_vnop_open. */
861
KASSERT(filesize != VNOVAL, ("filesize should've been cached"));
862
863
if ((off_t)bp->b_lblkno * biosize + bp->b_dirtyend > filesize)
864
bp->b_dirtyend = filesize -
865
(off_t)bp->b_lblkno * biosize;
866
867
if (bp->b_dirtyend > bp->b_dirtyoff) {
868
io.iov_len = uiop->uio_resid = bp->b_dirtyend
869
- bp->b_dirtyoff;
870
uiop->uio_offset = (off_t)bp->b_lblkno * biosize
871
+ bp->b_dirtyoff;
872
io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
873
uiop->uio_rw = UIO_WRITE;
874
875
bool pages = bp->b_flags & B_FUSEFS_WRITE_CACHE;
876
error = fuse_write_directbackend(vp, uiop, cred, fufh,
877
filesize, 0, pages);
878
879
if (error == EINTR || error == ETIMEDOUT) {
880
bp->b_flags &= ~(B_INVAL | B_NOCACHE);
881
if ((bp->b_flags & B_PAGING) == 0) {
882
bdirty(bp);
883
bp->b_flags &= ~B_DONE;
884
}
885
if ((error == EINTR || error == ETIMEDOUT) &&
886
(bp->b_flags & B_ASYNC) == 0)
887
bp->b_flags |= B_EINTR;
888
} else {
889
if (error) {
890
bp->b_ioflags |= BIO_ERROR;
891
bp->b_flags |= B_INVAL;
892
bp->b_error = error;
893
}
894
bp->b_dirtyoff = bp->b_dirtyend = 0;
895
}
896
} else {
897
bp->b_resid = 0;
898
bufdone(bp);
899
return (0);
900
}
901
}
902
bp->b_resid = uiop->uio_resid;
903
bufdone(bp);
904
return (error);
905
}
906
907
int
908
fuse_io_flushbuf(struct vnode *vp, int waitfor, struct thread *td)
909
{
910
911
return (vn_fsync_buf(vp, waitfor));
912
}
913
914
/*
915
* Flush and invalidate all dirty buffers. If another process is already
916
* doing the flush, just wait for completion.
917
*/
918
int
919
fuse_io_invalbuf(struct vnode *vp, struct thread *td)
920
{
921
struct fuse_vnode_data *fvdat = VTOFUD(vp);
922
int error = 0;
923
924
if (VN_IS_DOOMED(vp))
925
return 0;
926
927
ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf");
928
929
while (fvdat->flag & FN_FLUSHINPROG) {
930
struct proc *p = td->td_proc;
931
932
if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)
933
return EIO;
934
fvdat->flag |= FN_FLUSHWANT;
935
tsleep(&fvdat->flag, PRIBIO, "fusevinv", 2 * hz);
936
error = 0;
937
if (p != NULL) {
938
PROC_LOCK(p);
939
if (SIGNOTEMPTY(p->p_siglist) ||
940
SIGNOTEMPTY(td->td_siglist))
941
error = EINTR;
942
PROC_UNLOCK(p);
943
}
944
if (error == EINTR)
945
return EINTR;
946
}
947
fvdat->flag |= FN_FLUSHINPROG;
948
949
vnode_pager_clean_sync(vp);
950
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
951
while (error) {
952
if (error == ERESTART || error == EINTR) {
953
fvdat->flag &= ~FN_FLUSHINPROG;
954
if (fvdat->flag & FN_FLUSHWANT) {
955
fvdat->flag &= ~FN_FLUSHWANT;
956
wakeup(&fvdat->flag);
957
}
958
return EINTR;
959
}
960
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
961
}
962
fvdat->flag &= ~FN_FLUSHINPROG;
963
if (fvdat->flag & FN_FLUSHWANT) {
964
fvdat->flag &= ~FN_FLUSHWANT;
965
wakeup(&fvdat->flag);
966
}
967
return (error);
968
}
969
970