Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/fs/smbfs/smbfs_io.c
39536 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2000-2001 Boris Popov
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*
28
*/
29
#include <sys/param.h>
30
#include <sys/systm.h>
31
#include <sys/kernel.h>
32
#include <sys/fcntl.h>
33
#include <sys/bio.h>
34
#include <sys/buf.h>
35
#include <sys/mount.h>
36
#include <sys/namei.h>
37
#include <sys/vnode.h>
38
#include <sys/dirent.h>
39
#include <sys/rwlock.h>
40
#include <sys/signalvar.h>
41
#include <sys/sysctl.h>
42
#include <sys/vmmeter.h>
43
44
#include <vm/vm.h>
45
#include <vm/vm_param.h>
46
#include <vm/vm_page.h>
47
#include <vm/vm_extern.h>
48
#include <vm/vm_object.h>
49
#include <vm/vm_pager.h>
50
#include <vm/vnode_pager.h>
51
/*
52
#include <sys/ioccom.h>
53
*/
54
#include <netsmb/smb.h>
55
#include <netsmb/smb_conn.h>
56
#include <netsmb/smb_subr.h>
57
58
#include <fs/smbfs/smbfs.h>
59
#include <fs/smbfs/smbfs_node.h>
60
#include <fs/smbfs/smbfs_subr.h>
61
62
/*#define SMBFS_RWGENERIC*/
63
64
extern uma_zone_t smbfs_pbuf_zone;
65
66
static int smbfs_fastlookup = 1;
67
68
SYSCTL_DECL(_vfs_smbfs);
69
SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
70
71
#define DE_SIZE (sizeof(struct dirent))
72
73
static int
74
smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred, int *eofp)
75
{
76
struct dirent de;
77
struct componentname cn;
78
struct smb_cred *scred;
79
struct smbfs_fctx *ctx;
80
struct vnode *newvp;
81
struct smbnode *np = VTOSMB(vp);
82
int error/*, *eofflag = ap->a_eofflag*/;
83
long offset, limit;
84
85
np = VTOSMB(vp);
86
SMBVDEBUG("dirname='%s'\n", np->n_name);
87
scred = smbfs_malloc_scred();
88
smb_makescred(scred, uio->uio_td, cred);
89
if (eofp != NULL)
90
*eofp = 0;
91
offset = uio->uio_offset / DE_SIZE; /* offset in the directory */
92
limit = uio->uio_resid / DE_SIZE;
93
if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
94
error = EINVAL;
95
goto out;
96
}
97
while (limit && offset < 2) {
98
limit--;
99
bzero((caddr_t)&de, DE_SIZE);
100
de.d_reclen = DE_SIZE;
101
de.d_fileno = (offset == 0) ? np->n_ino :
102
(np->n_parent ? np->n_parentino : 2);
103
if (de.d_fileno == 0)
104
de.d_fileno = 0x7ffffffd + offset;
105
de.d_off = offset + 1;
106
de.d_namlen = offset + 1;
107
de.d_name[0] = '.';
108
de.d_name[1] = '.';
109
de.d_type = DT_DIR;
110
dirent_terminate(&de);
111
error = uiomove(&de, DE_SIZE, uio);
112
if (error)
113
goto out;
114
offset++;
115
uio->uio_offset += DE_SIZE;
116
}
117
if (limit == 0) {
118
error = 0;
119
goto out;
120
}
121
if (offset != np->n_dirofs || np->n_dirseq == NULL) {
122
SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
123
if (np->n_dirseq) {
124
smbfs_findclose(np->n_dirseq, scred);
125
np->n_dirseq = NULL;
126
}
127
np->n_dirofs = 2;
128
error = smbfs_findopen(np, "*", 1,
129
SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
130
scred, &ctx);
131
if (error) {
132
SMBVDEBUG("can not open search, error = %d", error);
133
goto out;
134
}
135
np->n_dirseq = ctx;
136
} else
137
ctx = np->n_dirseq;
138
while (np->n_dirofs < offset) {
139
error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
140
if (error) {
141
smbfs_findclose(np->n_dirseq, scred);
142
np->n_dirseq = NULL;
143
goto out1;
144
}
145
}
146
error = 0;
147
for (; limit; limit--, offset++) {
148
error = smbfs_findnext(ctx, limit, scred);
149
if (error)
150
break;
151
np->n_dirofs++;
152
bzero((caddr_t)&de, DE_SIZE);
153
de.d_reclen = DE_SIZE;
154
de.d_fileno = ctx->f_attr.fa_ino;
155
de.d_off = offset + 1;
156
de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
157
de.d_namlen = ctx->f_nmlen;
158
bcopy(ctx->f_name, de.d_name, de.d_namlen);
159
dirent_terminate(&de);
160
if (smbfs_fastlookup) {
161
error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
162
ctx->f_nmlen, &ctx->f_attr, &newvp);
163
if (!error) {
164
cn.cn_nameptr = de.d_name;
165
cn.cn_namelen = de.d_namlen;
166
cache_enter(vp, newvp, &cn);
167
vput(newvp);
168
}
169
}
170
error = uiomove(&de, DE_SIZE, uio);
171
if (error)
172
break;
173
}
174
uio->uio_offset = offset * DE_SIZE;
175
out1:
176
if (error == ENOENT) {
177
if (eofp != NULL)
178
*eofp = 1;
179
error = 0;
180
}
181
out:
182
smbfs_free_scred(scred);
183
return error;
184
}
185
186
int
187
smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred,
188
int *eofp)
189
{
190
struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
191
struct smbnode *np = VTOSMB(vp);
192
struct thread *td;
193
struct vattr vattr;
194
struct smb_cred *scred;
195
int error, lks;
196
197
/*
198
* Protect against method which is not supported for now
199
*/
200
if (uiop->uio_segflg == UIO_NOCOPY)
201
return EOPNOTSUPP;
202
203
if (vp->v_type != VREG && vp->v_type != VDIR) {
204
SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
205
return EIO;
206
}
207
if (uiop->uio_resid == 0)
208
return 0;
209
if (uiop->uio_offset < 0)
210
return EINVAL;
211
/* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
212
return EFBIG;*/
213
td = uiop->uio_td;
214
if (vp->v_type == VDIR) {
215
lks = LK_EXCLUSIVE; /* lockstatus(vp->v_vnlock); */
216
if (lks == LK_SHARED)
217
vn_lock(vp, LK_UPGRADE | LK_RETRY);
218
error = smbfs_readvdir(vp, uiop, cred, eofp);
219
if (lks == LK_SHARED)
220
vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
221
return error;
222
}
223
224
/* biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
225
if (np->n_flag & NMODIFIED) {
226
smbfs_attr_cacheremove(vp);
227
error = VOP_GETATTR(vp, &vattr, cred);
228
if (error)
229
return error;
230
np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
231
} else {
232
error = VOP_GETATTR(vp, &vattr, cred);
233
if (error)
234
return error;
235
if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
236
error = smbfs_vinvalbuf(vp, td);
237
if (error)
238
return error;
239
np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
240
}
241
}
242
scred = smbfs_malloc_scred();
243
smb_makescred(scred, td, cred);
244
error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
245
smbfs_free_scred(scred);
246
return (error);
247
}
248
249
int
250
smbfs_writevnode(struct vnode *vp, struct uio *uiop,
251
struct ucred *cred, int ioflag)
252
{
253
struct smbmount *smp = VTOSMBFS(vp);
254
struct smbnode *np = VTOSMB(vp);
255
struct smb_cred *scred;
256
struct thread *td;
257
int error = 0;
258
259
if (vp->v_type != VREG) {
260
SMBERROR("vn types other than VREG unsupported !\n");
261
return EIO;
262
}
263
SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
264
uiop->uio_resid);
265
if (uiop->uio_offset < 0)
266
return EINVAL;
267
/* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
268
return (EFBIG);*/
269
td = uiop->uio_td;
270
if (ioflag & (IO_APPEND | IO_SYNC)) {
271
if (np->n_flag & NMODIFIED) {
272
smbfs_attr_cacheremove(vp);
273
error = smbfs_vinvalbuf(vp, td);
274
if (error)
275
return error;
276
}
277
if (ioflag & IO_APPEND) {
278
#ifdef notyet
279
/*
280
* File size can be changed by another client
281
*/
282
smbfs_attr_cacheremove(vp);
283
error = VOP_GETATTR(vp, &vattr, cred);
284
if (error) return (error);
285
#endif
286
uiop->uio_offset = np->n_size;
287
}
288
}
289
if (uiop->uio_resid == 0)
290
return 0;
291
292
error = vn_rlimit_fsize(vp, uiop, td);
293
if (error != 0)
294
return (error);
295
296
scred = smbfs_malloc_scred();
297
smb_makescred(scred, td, cred);
298
error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
299
smbfs_free_scred(scred);
300
SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
301
uiop->uio_resid);
302
if (!error) {
303
if (uiop->uio_offset > np->n_size) {
304
np->n_size = uiop->uio_offset;
305
vnode_pager_setsize(vp, np->n_size);
306
}
307
}
308
return error;
309
}
310
311
/*
312
* Do an I/O operation to/from a cache block.
313
*/
314
int
315
smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
316
{
317
struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
318
struct smbnode *np = VTOSMB(vp);
319
struct uio *uiop;
320
struct iovec io;
321
struct smb_cred *scred;
322
int error = 0;
323
324
uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
325
uiop->uio_iov = &io;
326
uiop->uio_iovcnt = 1;
327
uiop->uio_segflg = UIO_SYSSPACE;
328
uiop->uio_td = td;
329
330
scred = smbfs_malloc_scred();
331
smb_makescred(scred, td, cr);
332
333
if (bp->b_iocmd == BIO_READ) {
334
io.iov_len = uiop->uio_resid = bp->b_bcount;
335
io.iov_base = bp->b_data;
336
uiop->uio_rw = UIO_READ;
337
switch (vp->v_type) {
338
case VREG:
339
uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
340
error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
341
if (error)
342
break;
343
if (uiop->uio_resid) {
344
int left = uiop->uio_resid;
345
int nread = bp->b_bcount - left;
346
if (left > 0)
347
bzero((char *)bp->b_data + nread, left);
348
}
349
break;
350
default:
351
printf("smbfs_doio: type %x unexpected\n",vp->v_type);
352
break;
353
}
354
if (error) {
355
bp->b_error = error;
356
bp->b_ioflags |= BIO_ERROR;
357
}
358
} else { /* write */
359
if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
360
bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
361
362
if (bp->b_dirtyend > bp->b_dirtyoff) {
363
io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
364
uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
365
io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
366
uiop->uio_rw = UIO_WRITE;
367
error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
368
369
/*
370
* For an interrupted write, the buffer is still valid
371
* and the write hasn't been pushed to the server yet,
372
* so we can't set BIO_ERROR and report the interruption
373
* by setting B_EINTR. For the B_ASYNC case, B_EINTR
374
* is not relevant, so the rpc attempt is essentially
375
* a noop. For the case of a V3 write rpc not being
376
* committed to stable storage, the block is still
377
* dirty and requires either a commit rpc or another
378
* write rpc with iomode == NFSV3WRITE_FILESYNC before
379
* the block is reused. This is indicated by setting
380
* the B_DELWRI and B_NEEDCOMMIT flags.
381
*/
382
if (error == EINTR
383
|| (!error && (bp->b_flags & B_NEEDCOMMIT))) {
384
bp->b_flags &= ~(B_INVAL|B_NOCACHE);
385
if ((bp->b_flags & B_ASYNC) == 0)
386
bp->b_flags |= B_EINTR;
387
if ((bp->b_flags & B_PAGING) == 0) {
388
bdirty(bp);
389
bp->b_flags &= ~B_DONE;
390
}
391
if ((bp->b_flags & B_ASYNC) == 0)
392
bp->b_flags |= B_EINTR;
393
} else {
394
if (error) {
395
bp->b_ioflags |= BIO_ERROR;
396
bp->b_error = error;
397
}
398
bp->b_dirtyoff = bp->b_dirtyend = 0;
399
}
400
} else {
401
bp->b_resid = 0;
402
bufdone(bp);
403
free(uiop, M_SMBFSDATA);
404
smbfs_free_scred(scred);
405
return 0;
406
}
407
}
408
bp->b_resid = uiop->uio_resid;
409
bufdone(bp);
410
free(uiop, M_SMBFSDATA);
411
smbfs_free_scred(scred);
412
return error;
413
}
414
415
/*
416
* Vnode op for VM getpages.
417
* Wish wish .... get rid from multiple IO routines
418
*/
419
int
420
smbfs_getpages(struct vop_getpages_args *ap)
421
{
422
#ifdef SMBFS_RWGENERIC
423
return vop_stdgetpages(ap);
424
#else
425
int i, error, nextoff, size, toff, npages, count;
426
struct uio uio;
427
struct iovec iov;
428
vm_offset_t kva;
429
struct buf *bp;
430
struct vnode *vp;
431
struct thread *td;
432
struct ucred *cred;
433
struct smbmount *smp;
434
struct smbnode *np;
435
struct smb_cred *scred;
436
vm_object_t object;
437
vm_page_t *pages;
438
439
vp = ap->a_vp;
440
if ((object = vp->v_object) == NULL) {
441
printf("smbfs_getpages: called with non-merged cache vnode??\n");
442
return VM_PAGER_ERROR;
443
}
444
445
td = curthread; /* XXX */
446
cred = td->td_ucred; /* XXX */
447
np = VTOSMB(vp);
448
smp = VFSTOSMBFS(vp->v_mount);
449
pages = ap->a_m;
450
npages = ap->a_count;
451
452
/*
453
* If the requested page is partially valid, just return it and
454
* allow the pager to zero-out the blanks. Partially valid pages
455
* can only occur at the file EOF.
456
*
457
* XXXGL: is that true for SMB filesystem?
458
*/
459
VM_OBJECT_WLOCK(object);
460
if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0)
461
goto out;
462
VM_OBJECT_WUNLOCK(object);
463
464
scred = smbfs_malloc_scred();
465
smb_makescred(scred, td, cred);
466
467
bp = uma_zalloc(smbfs_pbuf_zone, M_WAITOK);
468
469
kva = (vm_offset_t) bp->b_data;
470
pmap_qenter(kva, pages, npages);
471
VM_CNT_INC(v_vnodein);
472
VM_CNT_ADD(v_vnodepgsin, npages);
473
474
count = npages << PAGE_SHIFT;
475
iov.iov_base = (caddr_t) kva;
476
iov.iov_len = count;
477
uio.uio_iov = &iov;
478
uio.uio_iovcnt = 1;
479
uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
480
uio.uio_resid = count;
481
uio.uio_segflg = UIO_SYSSPACE;
482
uio.uio_rw = UIO_READ;
483
uio.uio_td = td;
484
485
error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
486
smbfs_free_scred(scred);
487
pmap_qremove(kva, npages);
488
489
uma_zfree(smbfs_pbuf_zone, bp);
490
491
if (error && (uio.uio_resid == count)) {
492
printf("smbfs_getpages: error %d\n",error);
493
return VM_PAGER_ERROR;
494
}
495
496
size = count - uio.uio_resid;
497
498
VM_OBJECT_WLOCK(object);
499
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
500
vm_page_t m;
501
nextoff = toff + PAGE_SIZE;
502
m = pages[i];
503
504
if (nextoff <= size) {
505
/*
506
* Read operation filled an entire page
507
*/
508
vm_page_valid(m);
509
KASSERT(m->dirty == 0,
510
("smbfs_getpages: page %p is dirty", m));
511
} else if (size > toff) {
512
/*
513
* Read operation filled a partial page.
514
*/
515
vm_page_invalid(m);
516
vm_page_set_valid_range(m, 0, size - toff);
517
KASSERT(m->dirty == 0,
518
("smbfs_getpages: page %p is dirty", m));
519
} else {
520
/*
521
* Read operation was short. If no error occurred
522
* we may have hit a zero-fill section. We simply
523
* leave valid set to 0.
524
*/
525
;
526
}
527
}
528
out:
529
VM_OBJECT_WUNLOCK(object);
530
if (ap->a_rbehind)
531
*ap->a_rbehind = 0;
532
if (ap->a_rahead)
533
*ap->a_rahead = 0;
534
return (VM_PAGER_OK);
535
#endif /* SMBFS_RWGENERIC */
536
}
537
538
/*
539
* Vnode op for VM putpages.
540
* possible bug: all IO done in sync mode
541
* Note that vop_close always invalidate pages before close, so it's
542
* not necessary to open vnode.
543
*/
544
int
545
smbfs_putpages(struct vop_putpages_args *ap)
546
{
547
int error;
548
struct vnode *vp = ap->a_vp;
549
struct thread *td;
550
struct ucred *cred;
551
552
#ifdef SMBFS_RWGENERIC
553
td = curthread; /* XXX */
554
cred = td->td_ucred; /* XXX */
555
VOP_OPEN(vp, FWRITE, cred, td, NULL);
556
error = vop_stdputpages(ap);
557
VOP_CLOSE(vp, FWRITE, cred, td);
558
return error;
559
#else
560
struct uio uio;
561
struct iovec iov;
562
vm_offset_t kva;
563
struct buf *bp;
564
int i, npages, count;
565
int *rtvals;
566
struct smbmount *smp;
567
struct smbnode *np;
568
struct smb_cred *scred;
569
vm_page_t *pages;
570
571
td = curthread; /* XXX */
572
cred = td->td_ucred; /* XXX */
573
/* VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
574
np = VTOSMB(vp);
575
smp = VFSTOSMBFS(vp->v_mount);
576
pages = ap->a_m;
577
count = ap->a_count;
578
rtvals = ap->a_rtvals;
579
npages = btoc(count);
580
581
for (i = 0; i < npages; i++) {
582
rtvals[i] = VM_PAGER_ERROR;
583
}
584
585
bp = uma_zalloc(smbfs_pbuf_zone, M_WAITOK);
586
587
kva = (vm_offset_t) bp->b_data;
588
pmap_qenter(kva, pages, npages);
589
VM_CNT_INC(v_vnodeout);
590
VM_CNT_ADD(v_vnodepgsout, count);
591
592
iov.iov_base = (caddr_t) kva;
593
iov.iov_len = count;
594
uio.uio_iov = &iov;
595
uio.uio_iovcnt = 1;
596
uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
597
uio.uio_resid = count;
598
uio.uio_segflg = UIO_SYSSPACE;
599
uio.uio_rw = UIO_WRITE;
600
uio.uio_td = td;
601
SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
602
uio.uio_resid);
603
604
scred = smbfs_malloc_scred();
605
smb_makescred(scred, td, cred);
606
error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
607
smbfs_free_scred(scred);
608
/* VOP_CLOSE(vp, FWRITE, cred, td);*/
609
SMBVDEBUG("paged write done: %d\n", error);
610
611
pmap_qremove(kva, npages);
612
613
uma_zfree(smbfs_pbuf_zone, bp);
614
615
if (error == 0) {
616
vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid,
617
npages * PAGE_SIZE, npages * PAGE_SIZE);
618
}
619
return (rtvals[0]);
620
#endif /* SMBFS_RWGENERIC */
621
}
622
623
/*
624
* Flush and invalidate all dirty buffers. If another process is already
625
* doing the flush, just wait for completion.
626
*/
627
int
628
smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
629
{
630
struct smbnode *np = VTOSMB(vp);
631
int error = 0;
632
633
if (VN_IS_DOOMED(vp))
634
return 0;
635
636
while (np->n_flag & NFLUSHINPROG) {
637
np->n_flag |= NFLUSHWANT;
638
error = tsleep(&np->n_flag, PRIBIO, "smfsvinv", 2 * hz);
639
error = smb_td_intr(td);
640
if (error == EINTR)
641
return EINTR;
642
}
643
np->n_flag |= NFLUSHINPROG;
644
645
vnode_pager_clean_sync(vp);
646
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
647
while (error) {
648
if (error == ERESTART || error == EINTR) {
649
np->n_flag &= ~NFLUSHINPROG;
650
if (np->n_flag & NFLUSHWANT) {
651
np->n_flag &= ~NFLUSHWANT;
652
wakeup(&np->n_flag);
653
}
654
return EINTR;
655
}
656
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
657
}
658
np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
659
if (np->n_flag & NFLUSHWANT) {
660
np->n_flag &= ~NFLUSHWANT;
661
wakeup(&np->n_flag);
662
}
663
return (error);
664
}
665
666