Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/ufs/ffs/ffs_vfsops.c
39478 views
1
/*-
2
* SPDX-License-Identifier: BSD-3-Clause
3
*
4
* Copyright (c) 1989, 1991, 1993, 1994
5
* The Regents of the University of California. All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
* 3. Neither the name of the University nor the names of its contributors
16
* may be used to endorse or promote products derived from this software
17
* without specific prior written permission.
18
*
19
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
* SUCH DAMAGE.
30
*/
31
32
#include <sys/cdefs.h>
33
#include "opt_quota.h"
34
#include "opt_ufs.h"
35
#include "opt_ffs.h"
36
#include "opt_ddb.h"
37
38
#include <sys/param.h>
39
#include <sys/gsb_crc32.h>
40
#include <sys/systm.h>
41
#include <sys/namei.h>
42
#include <sys/priv.h>
43
#include <sys/proc.h>
44
#include <sys/taskqueue.h>
45
#include <sys/kernel.h>
46
#include <sys/ktr.h>
47
#include <sys/vnode.h>
48
#include <sys/mount.h>
49
#include <sys/bio.h>
50
#include <sys/buf.h>
51
#include <sys/conf.h>
52
#include <sys/fcntl.h>
53
#include <sys/ioccom.h>
54
#include <sys/malloc.h>
55
#include <sys/mutex.h>
56
#include <sys/rwlock.h>
57
#include <sys/sysctl.h>
58
#include <sys/vmmeter.h>
59
60
#include <security/mac/mac_framework.h>
61
62
#include <ufs/ufs/dir.h>
63
#include <ufs/ufs/extattr.h>
64
#include <ufs/ufs/gjournal.h>
65
#include <ufs/ufs/quota.h>
66
#include <ufs/ufs/ufsmount.h>
67
#include <ufs/ufs/inode.h>
68
#include <ufs/ufs/ufs_extern.h>
69
70
#include <ufs/ffs/fs.h>
71
#include <ufs/ffs/ffs_extern.h>
72
73
#include <vm/vm.h>
74
#include <vm/uma.h>
75
#include <vm/vm_page.h>
76
77
#include <geom/geom.h>
78
#include <geom/geom_vfs.h>
79
80
#include <ddb/ddb.h>
81
82
static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
83
VFS_SMR_DECLARE;
84
85
static int ffs_mountfs(struct vnode *, struct mount *, struct thread *);
86
static void ffs_ifree(struct ufsmount *ump, struct inode *ip);
87
static int ffs_sync_lazy(struct mount *mp);
88
static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size);
89
static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size);
90
91
static vfs_init_t ffs_init;
92
static vfs_uninit_t ffs_uninit;
93
static vfs_extattrctl_t ffs_extattrctl;
94
static vfs_cmount_t ffs_cmount;
95
static vfs_unmount_t ffs_unmount;
96
static vfs_mount_t ffs_mount;
97
static vfs_statfs_t ffs_statfs;
98
static vfs_fhtovp_t ffs_fhtovp;
99
static vfs_sync_t ffs_sync;
100
101
static struct vfsops ufs_vfsops = {
102
.vfs_extattrctl = ffs_extattrctl,
103
.vfs_fhtovp = ffs_fhtovp,
104
.vfs_init = ffs_init,
105
.vfs_mount = ffs_mount,
106
.vfs_cmount = ffs_cmount,
107
.vfs_quotactl = ufs_quotactl,
108
.vfs_root = vfs_cache_root,
109
.vfs_cachedroot = ufs_root,
110
.vfs_statfs = ffs_statfs,
111
.vfs_sync = ffs_sync,
112
.vfs_uninit = ffs_uninit,
113
.vfs_unmount = ffs_unmount,
114
.vfs_vget = ffs_vget,
115
.vfs_susp_clean = process_deferred_inactive,
116
};
117
118
VFS_SET(ufs_vfsops, ufs, VFCF_FILEREVINC);
119
MODULE_VERSION(ufs, 1);
120
121
static b_strategy_t ffs_geom_strategy;
122
static b_write_t ffs_bufwrite;
123
124
static struct buf_ops ffs_ops = {
125
.bop_name = "FFS",
126
.bop_write = ffs_bufwrite,
127
.bop_strategy = ffs_geom_strategy,
128
.bop_sync = bufsync,
129
#ifdef NO_FFS_SNAPSHOT
130
.bop_bdflush = bufbdflush,
131
#else
132
.bop_bdflush = ffs_bdflush,
133
#endif
134
};
135
136
/*
137
* Note that userquota and groupquota options are not currently used
138
* by UFS/FFS code and generally mount(8) does not pass those options
139
* from userland, but they can be passed by loader(8) via
140
* vfs.root.mountfrom.options.
141
*/
142
static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
143
"noclusterw", "noexec", "export", "force", "from", "groupquota",
144
"multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir",
145
"nosymfollow", "sync", "union", "userquota", "untrusted", NULL };
146
147
static int ffs_enxio_enable = 1;
148
SYSCTL_DECL(_vfs_ffs);
149
SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN,
150
&ffs_enxio_enable, 0,
151
"enable mapping of other disk I/O errors to ENXIO");
152
153
/*
154
* Return buffer with the contents of block "offset" from the beginning of
155
* directory "ip". If "res" is non-zero, fill it in with a pointer to the
156
* remaining space in the directory.
157
*/
158
static int
159
ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp)
160
{
161
struct inode *ip;
162
struct fs *fs;
163
struct buf *bp;
164
ufs_lbn_t lbn;
165
int bsize, error;
166
167
ip = VTOI(vp);
168
fs = ITOFS(ip);
169
lbn = lblkno(fs, offset);
170
bsize = blksize(fs, ip, lbn);
171
172
*bpp = NULL;
173
error = bread(vp, lbn, bsize, NOCRED, &bp);
174
if (error) {
175
return (error);
176
}
177
if (res)
178
*res = (char *)bp->b_data + blkoff(fs, offset);
179
*bpp = bp;
180
return (0);
181
}
182
183
/*
184
* Load up the contents of an inode and copy the appropriate pieces
185
* to the incore copy.
186
*/
187
static int
188
ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino)
189
{
190
struct ufs1_dinode *dip1;
191
struct ufs2_dinode *dip2;
192
int error;
193
194
if (I_IS_UFS1(ip)) {
195
dip1 = ip->i_din1;
196
*dip1 =
197
*((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
198
ip->i_mode = dip1->di_mode;
199
ip->i_nlink = dip1->di_nlink;
200
ip->i_effnlink = dip1->di_nlink;
201
ip->i_size = dip1->di_size;
202
ip->i_flags = dip1->di_flags;
203
ip->i_gen = dip1->di_gen;
204
ip->i_uid = dip1->di_uid;
205
ip->i_gid = dip1->di_gid;
206
if (ffs_oldfscompat_inode_read(fs, ip->i_dp, time_second) &&
207
fs->fs_ronly == 0)
208
UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
209
return (0);
210
}
211
dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
212
if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 &&
213
!ffs_fsfail_cleanup(ITOUMP(ip), error)) {
214
printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt,
215
(intmax_t)ino);
216
return (error);
217
}
218
*ip->i_din2 = *dip2;
219
dip2 = ip->i_din2;
220
ip->i_mode = dip2->di_mode;
221
ip->i_nlink = dip2->di_nlink;
222
ip->i_effnlink = dip2->di_nlink;
223
ip->i_size = dip2->di_size;
224
ip->i_flags = dip2->di_flags;
225
ip->i_gen = dip2->di_gen;
226
ip->i_uid = dip2->di_uid;
227
ip->i_gid = dip2->di_gid;
228
if (ffs_oldfscompat_inode_read(fs, ip->i_dp, time_second) &&
229
fs->fs_ronly == 0)
230
UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
231
return (0);
232
}
233
234
/*
235
* Verify that a filesystem block number is a valid data block.
236
* This routine is only called on untrusted filesystems.
237
*/
238
static int
239
ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize)
240
{
241
struct fs *fs;
242
struct ufsmount *ump;
243
ufs2_daddr_t end_daddr;
244
int cg, havemtx;
245
246
KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0,
247
("ffs_check_blkno called on a trusted file system"));
248
ump = VFSTOUFS(mp);
249
fs = ump->um_fs;
250
cg = dtog(fs, daddr);
251
end_daddr = daddr + numfrags(fs, blksize);
252
/*
253
* Verify that the block number is a valid data block. Also check
254
* that it does not point to an inode block or a superblock. Accept
255
* blocks that are unalloacted (0) or part of snapshot metadata
256
* (BLK_NOCOPY or BLK_SNAP).
257
*
258
* Thus, the block must be in a valid range for the filesystem and
259
* either in the space before a backup superblock (except the first
260
* cylinder group where that space is used by the bootstrap code) or
261
* after the inode blocks and before the end of the cylinder group.
262
*/
263
if ((uint64_t)daddr <= BLK_SNAP ||
264
((uint64_t)end_daddr <= fs->fs_size &&
265
((cg > 0 && end_daddr <= cgsblock(fs, cg)) ||
266
(daddr >= cgdmin(fs, cg) &&
267
end_daddr <= cgbase(fs, cg) + fs->fs_fpg))))
268
return (0);
269
if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0)
270
UFS_LOCK(ump);
271
if (ppsratecheck(&ump->um_last_integritymsg,
272
&ump->um_secs_integritymsg, 1)) {
273
UFS_UNLOCK(ump);
274
uprintf("\n%s: inode %jd, out-of-range indirect block "
275
"number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr);
276
if (havemtx)
277
UFS_LOCK(ump);
278
} else if (!havemtx)
279
UFS_UNLOCK(ump);
280
return (EINTEGRITY);
281
}
282
283
/*
284
* On first ENXIO error, initiate an asynchronous forcible unmount.
285
* Used to unmount filesystems whose underlying media has gone away.
286
*
287
* Return true if a cleanup is in progress.
288
*/
289
int
290
ffs_fsfail_cleanup(struct ufsmount *ump, int error)
291
{
292
int retval;
293
294
UFS_LOCK(ump);
295
retval = ffs_fsfail_cleanup_locked(ump, error);
296
UFS_UNLOCK(ump);
297
return (retval);
298
}
299
300
int
301
ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error)
302
{
303
mtx_assert(UFS_MTX(ump), MA_OWNED);
304
if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) {
305
ump->um_flags |= UM_FSFAIL_CLEANUP;
306
if (ump->um_mountp == rootvnode->v_mount)
307
panic("UFS: root fs would be forcibly unmounted");
308
309
/*
310
* Queue an async forced unmount.
311
*/
312
vfs_ref(ump->um_mountp);
313
dounmount(ump->um_mountp,
314
MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread);
315
printf("UFS: forcibly unmounting %s from %s\n",
316
ump->um_mountp->mnt_stat.f_mntfromname,
317
ump->um_mountp->mnt_stat.f_mntonname);
318
}
319
return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0);
320
}
321
322
/*
323
* Wrapper used during ENXIO cleanup to allocate empty buffers when
324
* the kernel is unable to read the real one. They are needed so that
325
* the soft updates code can use them to unwind its dependencies.
326
*/
327
int
328
ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno,
329
daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt,
330
struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *),
331
struct buf **bpp)
332
{
333
int error;
334
335
flags |= GB_CVTENXIO;
336
error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt,
337
cred, flags, ckhashfunc, bpp);
338
if (error != 0 && ffs_fsfail_cleanup(ump, error)) {
339
error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp);
340
KASSERT(error == 0, ("getblkx failed"));
341
vfs_bio_bzero_buf(*bpp, 0, size);
342
}
343
return (error);
344
}
345
346
static int
347
ffs_mount(struct mount *mp)
348
{
349
struct vnode *devvp, *odevvp;
350
struct thread *td;
351
struct ufsmount *ump = NULL;
352
struct fs *fs;
353
int error, flags;
354
int error1 __diagused;
355
uint64_t mntorflags, saved_mnt_flag;
356
accmode_t accmode;
357
struct nameidata ndp;
358
char *fspec;
359
bool mounted_softdep;
360
361
td = curthread;
362
if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
363
return (EINVAL);
364
if (uma_inode == NULL) {
365
uma_inode = uma_zcreate("FFS inode",
366
sizeof(struct inode), NULL, NULL, NULL, NULL,
367
UMA_ALIGN_PTR, 0);
368
uma_ufs1 = uma_zcreate("FFS1 dinode",
369
sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
370
UMA_ALIGN_PTR, 0);
371
uma_ufs2 = uma_zcreate("FFS2 dinode",
372
sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
373
UMA_ALIGN_PTR, 0);
374
VFS_SMR_ZONE_SET(uma_inode);
375
}
376
377
vfs_deleteopt(mp->mnt_optnew, "groupquota");
378
vfs_deleteopt(mp->mnt_optnew, "userquota");
379
380
fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
381
if (error)
382
return (error);
383
384
mntorflags = 0;
385
if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0)
386
mntorflags |= MNT_UNTRUSTED;
387
388
if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
389
mntorflags |= MNT_ACLS;
390
391
if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
392
mntorflags |= MNT_SNAPSHOT;
393
/*
394
* Once we have set the MNT_SNAPSHOT flag, do not
395
* persist "snapshot" in the options list.
396
*/
397
vfs_deleteopt(mp->mnt_optnew, "snapshot");
398
vfs_deleteopt(mp->mnt_opt, "snapshot");
399
}
400
401
if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
402
if (mntorflags & MNT_ACLS) {
403
vfs_mount_error(mp,
404
"\"acls\" and \"nfsv4acls\" options "
405
"are mutually exclusive");
406
return (EINVAL);
407
}
408
mntorflags |= MNT_NFS4ACLS;
409
}
410
411
MNT_ILOCK(mp);
412
mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
413
mp->mnt_flag |= mntorflags;
414
MNT_IUNLOCK(mp);
415
416
/*
417
* If this is a snapshot request, take the snapshot.
418
*/
419
if (mp->mnt_flag & MNT_SNAPSHOT) {
420
if ((mp->mnt_flag & MNT_UPDATE) == 0)
421
return (EINVAL);
422
return (ffs_snapshot(mp, fspec));
423
}
424
425
/*
426
* Must not call namei() while owning busy ref.
427
*/
428
if (mp->mnt_flag & MNT_UPDATE)
429
vfs_unbusy(mp);
430
431
/*
432
* Not an update, or updating the name: look up the name
433
* and verify that it refers to a sensible disk device.
434
*/
435
NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec);
436
error = namei(&ndp);
437
if ((mp->mnt_flag & MNT_UPDATE) != 0) {
438
/*
439
* Unmount does not start if MNT_UPDATE is set. Mount
440
* update busies mp before setting MNT_UPDATE. We
441
* must be able to retain our busy ref successfully,
442
* without sleep.
443
*/
444
error1 = vfs_busy(mp, MBF_NOWAIT);
445
MPASS(error1 == 0);
446
}
447
if (error != 0)
448
return (error);
449
NDFREE_PNBUF(&ndp);
450
if (!vn_isdisk_error(ndp.ni_vp, &error)) {
451
vput(ndp.ni_vp);
452
return (error);
453
}
454
455
/*
456
* If mount by non-root, then verify that user has necessary
457
* permissions on the device.
458
*/
459
accmode = VREAD;
460
if ((mp->mnt_flag & MNT_RDONLY) == 0)
461
accmode |= VWRITE;
462
error = VOP_ACCESS(ndp.ni_vp, accmode, td->td_ucred, td);
463
if (error)
464
error = priv_check(td, PRIV_VFS_MOUNT_PERM);
465
if (error) {
466
vput(ndp.ni_vp);
467
return (error);
468
}
469
470
/*
471
* New mount
472
*
473
* We need the name for the mount point (also used for
474
* "last mounted on") copied in. If an error occurs,
475
* the mount point is discarded by the upper level code.
476
* Note that vfs_mount_alloc() populates f_mntonname for us.
477
*/
478
if ((mp->mnt_flag & MNT_UPDATE) == 0) {
479
if ((error = ffs_mountfs(ndp.ni_vp, mp, td)) != 0) {
480
vrele(ndp.ni_vp);
481
return (error);
482
}
483
} else {
484
/*
485
* When updating, check whether changing from read-only to
486
* read/write; if there is no device name, that's all we do.
487
*/
488
ump = VFSTOUFS(mp);
489
fs = ump->um_fs;
490
odevvp = ump->um_odevvp;
491
devvp = ump->um_devvp;
492
493
/*
494
* If it's not the same vnode, or at least the same device
495
* then it's not correct.
496
*/
497
if (ndp.ni_vp->v_rdev != ump->um_odevvp->v_rdev)
498
error = EINVAL; /* needs translation */
499
vput(ndp.ni_vp);
500
if (error)
501
return (error);
502
if (fs->fs_ronly == 0 &&
503
vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
504
/*
505
* Flush any dirty data and suspend filesystem.
506
*/
507
if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
508
return (error);
509
error = vfs_write_suspend_umnt(mp);
510
if (error != 0)
511
return (error);
512
513
fs->fs_ronly = 1;
514
if (MOUNTEDSOFTDEP(mp)) {
515
MNT_ILOCK(mp);
516
mp->mnt_flag &= ~MNT_SOFTDEP;
517
MNT_IUNLOCK(mp);
518
mounted_softdep = true;
519
} else
520
mounted_softdep = false;
521
522
/*
523
* Check for and optionally get rid of files open
524
* for writing.
525
*/
526
flags = WRITECLOSE;
527
if (mp->mnt_flag & MNT_FORCE)
528
flags |= FORCECLOSE;
529
if (mounted_softdep) {
530
error = softdep_flushfiles(mp, flags, td);
531
} else {
532
error = ffs_flushfiles(mp, flags, td);
533
}
534
if (error) {
535
fs->fs_ronly = 0;
536
if (mounted_softdep) {
537
MNT_ILOCK(mp);
538
mp->mnt_flag |= MNT_SOFTDEP;
539
MNT_IUNLOCK(mp);
540
}
541
vfs_write_resume(mp, 0);
542
return (error);
543
}
544
545
if (fs->fs_pendingblocks != 0 ||
546
fs->fs_pendinginodes != 0) {
547
printf("WARNING: %s Update error: blocks %jd "
548
"files %d\n", fs->fs_fsmnt,
549
(intmax_t)fs->fs_pendingblocks,
550
fs->fs_pendinginodes);
551
fs->fs_pendingblocks = 0;
552
fs->fs_pendinginodes = 0;
553
}
554
if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
555
fs->fs_clean = 1;
556
if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
557
fs->fs_ronly = 0;
558
fs->fs_clean = 0;
559
if (mounted_softdep) {
560
MNT_ILOCK(mp);
561
mp->mnt_flag |= MNT_SOFTDEP;
562
MNT_IUNLOCK(mp);
563
}
564
vfs_write_resume(mp, 0);
565
return (error);
566
}
567
if (mounted_softdep)
568
softdep_unmount(mp);
569
g_topology_lock();
570
/*
571
* Drop our write and exclusive access.
572
*/
573
g_access(ump->um_cp, 0, -1, -1);
574
g_topology_unlock();
575
MNT_ILOCK(mp);
576
mp->mnt_flag |= MNT_RDONLY;
577
MNT_IUNLOCK(mp);
578
/*
579
* Allow the writers to note that filesystem
580
* is ro now.
581
*/
582
vfs_write_resume(mp, 0);
583
}
584
if ((mp->mnt_flag & MNT_RELOAD) &&
585
(error = ffs_reload(mp, 0)) != 0) {
586
return (error);
587
} else {
588
/* ffs_reload replaces the superblock structure */
589
fs = ump->um_fs;
590
}
591
if (fs->fs_ronly &&
592
!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
593
/*
594
* If upgrade to read-write by non-root, then verify
595
* that user has necessary permissions on the device.
596
*/
597
vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY);
598
error = VOP_ACCESS(odevvp, VREAD | VWRITE,
599
td->td_ucred, td);
600
if (error)
601
error = priv_check(td, PRIV_VFS_MOUNT_PERM);
602
VOP_UNLOCK(odevvp);
603
if (error) {
604
return (error);
605
}
606
fs->fs_flags &= ~FS_UNCLEAN;
607
if (fs->fs_clean == 0) {
608
fs->fs_flags |= FS_UNCLEAN;
609
if ((mp->mnt_flag & MNT_FORCE) ||
610
((fs->fs_flags &
611
(FS_SUJ | FS_NEEDSFSCK)) == 0 &&
612
(fs->fs_flags & FS_DOSOFTDEP))) {
613
printf("WARNING: %s was not properly "
614
"dismounted\n",
615
mp->mnt_stat.f_mntonname);
616
} else {
617
vfs_mount_error(mp,
618
"R/W mount of %s denied. %s.%s",
619
mp->mnt_stat.f_mntonname,
620
"Filesystem is not clean - run fsck",
621
(fs->fs_flags & FS_SUJ) == 0 ? "" :
622
" Forced mount will invalidate"
623
" journal contents");
624
return (EPERM);
625
}
626
}
627
g_topology_lock();
628
/*
629
* Request exclusive write access.
630
*/
631
error = g_access(ump->um_cp, 0, 1, 1);
632
g_topology_unlock();
633
if (error)
634
return (error);
635
if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
636
return (error);
637
error = vfs_write_suspend_umnt(mp);
638
if (error != 0)
639
return (error);
640
fs->fs_ronly = 0;
641
MNT_ILOCK(mp);
642
saved_mnt_flag = MNT_RDONLY;
643
if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag &
644
MNT_ASYNC) != 0)
645
saved_mnt_flag |= MNT_ASYNC;
646
mp->mnt_flag &= ~saved_mnt_flag;
647
MNT_IUNLOCK(mp);
648
fs->fs_mtime = time_second;
649
/* check to see if we need to start softdep */
650
if ((fs->fs_flags & FS_DOSOFTDEP) &&
651
(error = softdep_mount(devvp, mp, fs, td->td_ucred))){
652
fs->fs_ronly = 1;
653
MNT_ILOCK(mp);
654
mp->mnt_flag |= saved_mnt_flag;
655
MNT_IUNLOCK(mp);
656
vfs_write_resume(mp, 0);
657
return (error);
658
}
659
fs->fs_clean = 0;
660
if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
661
fs->fs_ronly = 1;
662
if ((fs->fs_flags & FS_DOSOFTDEP) != 0)
663
softdep_unmount(mp);
664
MNT_ILOCK(mp);
665
mp->mnt_flag |= saved_mnt_flag;
666
MNT_IUNLOCK(mp);
667
vfs_write_resume(mp, 0);
668
return (error);
669
}
670
if (fs->fs_snapinum[0] != 0)
671
ffs_snapshot_mount(mp);
672
vfs_write_resume(mp, 0);
673
}
674
/*
675
* Soft updates is incompatible with "async",
676
* so if we are doing softupdates stop the user
677
* from setting the async flag in an update.
678
* Softdep_mount() clears it in an initial mount
679
* or ro->rw remount.
680
*/
681
if (MOUNTEDSOFTDEP(mp)) {
682
/* XXX: Reset too late ? */
683
MNT_ILOCK(mp);
684
mp->mnt_flag &= ~MNT_ASYNC;
685
MNT_IUNLOCK(mp);
686
}
687
/*
688
* Keep MNT_ACLS flag if it is stored in superblock.
689
*/
690
if ((fs->fs_flags & FS_ACLS) != 0) {
691
/* XXX: Set too late ? */
692
MNT_ILOCK(mp);
693
mp->mnt_flag |= MNT_ACLS;
694
MNT_IUNLOCK(mp);
695
}
696
697
if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
698
/* XXX: Set too late ? */
699
MNT_ILOCK(mp);
700
mp->mnt_flag |= MNT_NFS4ACLS;
701
MNT_IUNLOCK(mp);
702
}
703
704
}
705
706
MNT_ILOCK(mp);
707
/*
708
* This is racy versus lookup, see ufs_fplookup_vexec for details.
709
*/
710
if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0)
711
panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp);
712
if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0)
713
mp->mnt_kern_flag |= MNTK_FPLOOKUP;
714
MNT_IUNLOCK(mp);
715
716
vfs_mountedfrom(mp, fspec);
717
return (0);
718
}
719
720
/*
721
* Compatibility with old mount system call.
722
*/
723
724
static int
725
ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
726
{
727
struct ufs_args args;
728
int error;
729
730
if (data == NULL)
731
return (EINVAL);
732
error = copyin(data, &args, sizeof args);
733
if (error)
734
return (error);
735
736
ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
737
ma = mount_arg(ma, "export", &args.export, sizeof(args.export));
738
error = kernel_mount(ma, flags);
739
740
return (error);
741
}
742
743
/*
744
* Reload all incore data for a filesystem (used after running fsck on
745
* the root filesystem and finding things to fix). If the 'force' flag
746
* is 0, the filesystem must be mounted read-only.
747
*
748
* Things to do to update the mount:
749
* 1) invalidate all cached meta-data.
750
* 2) re-read superblock from disk.
751
* 3) If requested, clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags
752
* to allow secondary writers.
753
* 4) invalidate all cached file data.
754
* 5) re-read inode data for all active vnodes.
755
*/
756
int
757
ffs_reload(struct mount *mp, int flags)
758
{
759
struct vnode *vp, *mvp, *devvp;
760
struct inode *ip;
761
struct buf *bp;
762
struct fs *fs, *newfs;
763
struct ufsmount *ump;
764
int error;
765
766
ump = VFSTOUFS(mp);
767
768
MNT_ILOCK(mp);
769
if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) {
770
MNT_IUNLOCK(mp);
771
return (EINVAL);
772
}
773
MNT_IUNLOCK(mp);
774
775
/*
776
* Step 1: invalidate all cached meta-data.
777
*/
778
devvp = VFSTOUFS(mp)->um_devvp;
779
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
780
if (vinvalbuf(devvp, 0, 0, 0) != 0)
781
panic("ffs_reload: dirty1");
782
VOP_UNLOCK(devvp);
783
784
/*
785
* Step 2: re-read superblock from disk.
786
*/
787
if ((error = ffs_sbget(devvp, &newfs, UFS_STDSB, 0, M_UFSMNT,
788
ffs_use_bread)) != 0)
789
return (error);
790
/*
791
* Replace our superblock with the new superblock. Preserve
792
* our read-only status.
793
*/
794
fs = VFSTOUFS(mp)->um_fs;
795
newfs->fs_ronly = fs->fs_ronly;
796
free(fs->fs_csp, M_UFSMNT);
797
free(fs->fs_si, M_UFSMNT);
798
free(fs, M_UFSMNT);
799
fs = VFSTOUFS(mp)->um_fs = newfs;
800
ump->um_bsize = fs->fs_bsize;
801
ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
802
UFS_LOCK(ump);
803
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
804
printf("WARNING: %s: reload pending error: blocks %jd "
805
"files %d\n", mp->mnt_stat.f_mntonname,
806
(intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
807
fs->fs_pendingblocks = 0;
808
fs->fs_pendinginodes = 0;
809
}
810
UFS_UNLOCK(ump);
811
/*
812
* Step 3: If requested, clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags
813
* to allow secondary writers.
814
*/
815
if ((flags & FFSR_UNSUSPEND) != 0) {
816
MNT_ILOCK(mp);
817
mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
818
wakeup(&mp->mnt_flag);
819
MNT_IUNLOCK(mp);
820
}
821
822
loop:
823
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
824
/*
825
* Skip syncer vnode.
826
*/
827
if (vp->v_type == VNON) {
828
VI_UNLOCK(vp);
829
continue;
830
}
831
/*
832
* Step 4: invalidate all cached file data.
833
*/
834
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
835
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
836
goto loop;
837
}
838
if (vinvalbuf(vp, 0, 0, 0))
839
panic("ffs_reload: dirty2");
840
/*
841
* Step 5: re-read inode data for all active vnodes.
842
*/
843
ip = VTOI(vp);
844
error =
845
bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
846
(int)fs->fs_bsize, NOCRED, &bp);
847
if (error) {
848
vput(vp);
849
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
850
return (error);
851
}
852
if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) {
853
brelse(bp);
854
vput(vp);
855
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
856
return (error);
857
}
858
ip->i_effnlink = ip->i_nlink;
859
brelse(bp);
860
vput(vp);
861
}
862
return (0);
863
}
864
865
/*
866
* Common code for mount and mountroot
867
*/
868
static int
869
ffs_mountfs(struct vnode *odevvp, struct mount *mp, struct thread *td)
870
{
871
struct ufsmount *ump;
872
struct fs *fs;
873
struct cdev *dev;
874
int error, i, len, ronly;
875
struct ucred *cred;
876
struct g_consumer *cp;
877
struct mount *nmp;
878
struct vnode *devvp;
879
int candelete, canspeedup;
880
881
fs = NULL;
882
ump = NULL;
883
cred = td ? td->td_ucred : NOCRED;
884
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
885
886
devvp = mntfs_allocvp(mp, odevvp);
887
KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
888
dev = devvp->v_rdev;
889
KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data"));
890
if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
891
(uintptr_t)mp) == 0) {
892
mntfs_freevp(devvp);
893
return (EBUSY);
894
}
895
g_topology_lock();
896
error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
897
g_topology_unlock();
898
if (error != 0) {
899
atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
900
mntfs_freevp(devvp);
901
return (error);
902
}
903
dev_ref(dev);
904
devvp->v_bufobj.bo_ops = &ffs_ops;
905
BO_LOCK(&odevvp->v_bufobj);
906
odevvp->v_bufobj.bo_flag |= BO_NOBUFS;
907
BO_UNLOCK(&odevvp->v_bufobj);
908
VOP_UNLOCK(devvp);
909
if (dev->si_iosize_max != 0)
910
mp->mnt_iosize_max = dev->si_iosize_max;
911
if (mp->mnt_iosize_max > maxphys)
912
mp->mnt_iosize_max = maxphys;
913
if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
914
error = EINVAL;
915
vfs_mount_error(mp,
916
"Invalid sectorsize %d for superblock size %d",
917
cp->provider->sectorsize, SBLOCKSIZE);
918
goto out;
919
}
920
/* fetch the superblock and summary information */
921
if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0)
922
error = ffs_sbsearch(devvp, &fs, 0, M_UFSMNT, ffs_use_bread);
923
else
924
error = ffs_sbget(devvp, &fs, UFS_STDSB, 0, M_UFSMNT,
925
ffs_use_bread);
926
if (error != 0)
927
goto out;
928
fs->fs_flags &= ~FS_UNCLEAN;
929
if (fs->fs_clean == 0) {
930
fs->fs_flags |= FS_UNCLEAN;
931
if (ronly || (mp->mnt_flag & MNT_FORCE) ||
932
((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
933
(fs->fs_flags & FS_DOSOFTDEP))) {
934
printf("WARNING: %s was not properly dismounted\n",
935
mp->mnt_stat.f_mntonname);
936
} else {
937
vfs_mount_error(mp, "R/W mount on %s denied. "
938
"Filesystem is not clean - run fsck.%s",
939
mp->mnt_stat.f_mntonname,
940
(fs->fs_flags & FS_SUJ) == 0 ? "" :
941
" Forced mount will invalidate journal contents");
942
error = EPERM;
943
goto out;
944
}
945
if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
946
(mp->mnt_flag & MNT_FORCE)) {
947
printf("WARNING: %s: lost blocks %jd files %d\n",
948
mp->mnt_stat.f_mntonname,
949
(intmax_t)fs->fs_pendingblocks,
950
fs->fs_pendinginodes);
951
fs->fs_pendingblocks = 0;
952
fs->fs_pendinginodes = 0;
953
}
954
}
955
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
956
printf("WARNING: %s: mount pending error: blocks %jd "
957
"files %d\n", mp->mnt_stat.f_mntonname,
958
(intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
959
fs->fs_pendingblocks = 0;
960
fs->fs_pendinginodes = 0;
961
}
962
if ((fs->fs_flags & FS_GJOURNAL) != 0) {
963
#ifdef UFS_GJOURNAL
964
/*
965
* Get journal provider name.
966
*/
967
len = 1024;
968
mp->mnt_gjprovider = malloc((uint64_t)len, M_UFSMNT, M_WAITOK);
969
if (g_io_getattr("GJOURNAL::provider", cp, &len,
970
mp->mnt_gjprovider) == 0) {
971
mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len,
972
M_UFSMNT, M_WAITOK);
973
MNT_ILOCK(mp);
974
mp->mnt_flag |= MNT_GJOURNAL;
975
MNT_IUNLOCK(mp);
976
} else {
977
if ((mp->mnt_flag & MNT_RDONLY) == 0)
978
printf("WARNING: %s: GJOURNAL flag on fs "
979
"but no gjournal provider below\n",
980
mp->mnt_stat.f_mntonname);
981
free(mp->mnt_gjprovider, M_UFSMNT);
982
mp->mnt_gjprovider = NULL;
983
}
984
#else
985
printf("WARNING: %s: GJOURNAL flag on fs but no "
986
"UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
987
#endif
988
} else {
989
mp->mnt_gjprovider = NULL;
990
}
991
ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
992
ump->um_cp = cp;
993
ump->um_bo = &devvp->v_bufobj;
994
ump->um_fs = fs;
995
if (fs->fs_magic == FS_UFS1_MAGIC) {
996
ump->um_fstype = UFS1;
997
ump->um_balloc = ffs_balloc_ufs1;
998
} else {
999
ump->um_fstype = UFS2;
1000
ump->um_balloc = ffs_balloc_ufs2;
1001
}
1002
ump->um_blkatoff = ffs_blkatoff;
1003
ump->um_truncate = ffs_truncate;
1004
ump->um_update = ffs_update;
1005
ump->um_valloc = ffs_valloc;
1006
ump->um_vfree = ffs_vfree;
1007
ump->um_ifree = ffs_ifree;
1008
ump->um_rdonly = ffs_rdonly;
1009
ump->um_snapgone = ffs_snapgone;
1010
if ((mp->mnt_flag & MNT_UNTRUSTED) != 0)
1011
ump->um_check_blkno = ffs_check_blkno;
1012
else
1013
ump->um_check_blkno = NULL;
1014
mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
1015
fs->fs_ronly = ronly;
1016
fs->fs_active = NULL;
1017
mp->mnt_data = ump;
1018
mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
1019
mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
1020
nmp = NULL;
1021
if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
1022
(nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
1023
if (nmp)
1024
vfs_rel(nmp);
1025
vfs_getnewfsid(mp);
1026
}
1027
ump->um_bsize = fs->fs_bsize;
1028
ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1029
MNT_ILOCK(mp);
1030
mp->mnt_flag |= MNT_LOCAL;
1031
MNT_IUNLOCK(mp);
1032
if ((fs->fs_flags & FS_MULTILABEL) != 0) {
1033
#ifdef MAC
1034
MNT_ILOCK(mp);
1035
mp->mnt_flag |= MNT_MULTILABEL;
1036
MNT_IUNLOCK(mp);
1037
#else
1038
printf("WARNING: %s: multilabel flag on fs but "
1039
"no MAC support\n", mp->mnt_stat.f_mntonname);
1040
#endif
1041
}
1042
if ((fs->fs_flags & FS_ACLS) != 0) {
1043
#ifdef UFS_ACL
1044
MNT_ILOCK(mp);
1045
1046
if (mp->mnt_flag & MNT_NFS4ACLS)
1047
printf("WARNING: %s: ACLs flag on fs conflicts with "
1048
"\"nfsv4acls\" mount option; option ignored\n",
1049
mp->mnt_stat.f_mntonname);
1050
mp->mnt_flag &= ~MNT_NFS4ACLS;
1051
mp->mnt_flag |= MNT_ACLS;
1052
1053
MNT_IUNLOCK(mp);
1054
#else
1055
printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
1056
mp->mnt_stat.f_mntonname);
1057
#endif
1058
}
1059
if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
1060
#ifdef UFS_ACL
1061
MNT_ILOCK(mp);
1062
1063
if (mp->mnt_flag & MNT_ACLS)
1064
printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
1065
"with \"acls\" mount option; option ignored\n",
1066
mp->mnt_stat.f_mntonname);
1067
mp->mnt_flag &= ~MNT_ACLS;
1068
mp->mnt_flag |= MNT_NFS4ACLS;
1069
1070
MNT_IUNLOCK(mp);
1071
#else
1072
printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
1073
"ACLs support\n", mp->mnt_stat.f_mntonname);
1074
#endif
1075
}
1076
if ((fs->fs_flags & FS_TRIM) != 0) {
1077
len = sizeof(int);
1078
if (g_io_getattr("GEOM::candelete", cp, &len,
1079
&candelete) == 0) {
1080
if (candelete)
1081
ump->um_flags |= UM_CANDELETE;
1082
else
1083
printf("WARNING: %s: TRIM flag on fs but disk "
1084
"does not support TRIM\n",
1085
mp->mnt_stat.f_mntonname);
1086
} else {
1087
printf("WARNING: %s: TRIM flag on fs but disk does "
1088
"not confirm that it supports TRIM\n",
1089
mp->mnt_stat.f_mntonname);
1090
}
1091
if (((ump->um_flags) & UM_CANDELETE) != 0) {
1092
ump->um_trim_tq = taskqueue_create("trim", M_WAITOK,
1093
taskqueue_thread_enqueue, &ump->um_trim_tq);
1094
taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS,
1095
"%s trim", mp->mnt_stat.f_mntonname);
1096
ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM,
1097
&ump->um_trimlisthashsize);
1098
}
1099
}
1100
1101
len = sizeof(int);
1102
if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) {
1103
if (canspeedup)
1104
ump->um_flags |= UM_CANSPEEDUP;
1105
}
1106
1107
ump->um_mountp = mp;
1108
ump->um_dev = dev;
1109
ump->um_devvp = devvp;
1110
ump->um_odevvp = odevvp;
1111
ump->um_nindir = fs->fs_nindir;
1112
ump->um_bptrtodb = fs->fs_fsbtodb;
1113
ump->um_seqinc = fs->fs_frag;
1114
for (i = 0; i < MAXQUOTAS; i++)
1115
ump->um_quotas[i] = NULL;
1116
#ifdef UFS_EXTATTR
1117
ufs_extattr_uepm_init(&ump->um_extattr);
1118
#endif
1119
/*
1120
* Set FS local "last mounted on" information (NULL pad)
1121
*/
1122
bzero(fs->fs_fsmnt, MAXMNTLEN);
1123
strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1124
mp->mnt_stat.f_iosize = fs->fs_bsize;
1125
1126
if (mp->mnt_flag & MNT_ROOTFS) {
1127
/*
1128
* Root mount; update timestamp in mount structure.
1129
* this will be used by the common root mount code
1130
* to update the system clock.
1131
*/
1132
mp->mnt_time = fs->fs_time;
1133
}
1134
1135
if (ronly == 0) {
1136
fs->fs_mtime = time_second;
1137
if ((fs->fs_flags & FS_DOSOFTDEP) &&
1138
(error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1139
ffs_flushfiles(mp, FORCECLOSE, td);
1140
goto out;
1141
}
1142
if (fs->fs_snapinum[0] != 0)
1143
ffs_snapshot_mount(mp);
1144
fs->fs_fmod = 1;
1145
fs->fs_clean = 0;
1146
(void) ffs_sbupdate(ump, MNT_WAIT, 0);
1147
}
1148
/*
1149
* Initialize filesystem state information in mount struct.
1150
*/
1151
MNT_ILOCK(mp);
1152
mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1153
MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE;
1154
MNT_IUNLOCK(mp);
1155
#ifdef UFS_EXTATTR
1156
#ifdef UFS_EXTATTR_AUTOSTART
1157
/*
1158
*
1159
* Auto-starting does the following:
1160
* - check for /.attribute in the fs, and extattr_start if so
1161
* - for each file in .attribute, enable that file with
1162
* an attribute of the same name.
1163
* Not clear how to report errors -- probably eat them.
1164
* This would all happen while the filesystem was busy/not
1165
* available, so would effectively be "atomic".
1166
*/
1167
(void) ufs_extattr_autostart(mp, td);
1168
#endif /* !UFS_EXTATTR_AUTOSTART */
1169
#endif /* !UFS_EXTATTR */
1170
return (0);
1171
out:
1172
if (fs != NULL) {
1173
free(fs->fs_csp, M_UFSMNT);
1174
free(fs->fs_si, M_UFSMNT);
1175
free(fs, M_UFSMNT);
1176
}
1177
if (cp != NULL) {
1178
g_topology_lock();
1179
g_vfs_close(cp);
1180
g_topology_unlock();
1181
}
1182
if (ump != NULL) {
1183
mtx_destroy(UFS_MTX(ump));
1184
if (mp->mnt_gjprovider != NULL) {
1185
free(mp->mnt_gjprovider, M_UFSMNT);
1186
mp->mnt_gjprovider = NULL;
1187
}
1188
MPASS(ump->um_softdep == NULL);
1189
free(ump, M_UFSMNT);
1190
mp->mnt_data = NULL;
1191
}
1192
BO_LOCK(&odevvp->v_bufobj);
1193
odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1194
BO_UNLOCK(&odevvp->v_bufobj);
1195
atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
1196
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1197
mntfs_freevp(devvp);
1198
dev_rel(dev);
1199
return (error);
1200
}
1201
1202
/*
1203
* A read function for use by filesystem-layer routines.
1204
*/
1205
static int
1206
ffs_use_bread(void *devfd, off_t loc, void **bufp, int size)
1207
{
1208
struct buf *bp;
1209
int error;
1210
1211
KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp));
1212
*bufp = malloc(size, M_UFSMNT, M_WAITOK);
1213
if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED,
1214
&bp)) != 0)
1215
return (error);
1216
bcopy(bp->b_data, *bufp, size);
1217
bp->b_flags |= B_INVAL | B_NOCACHE;
1218
brelse(bp);
1219
return (0);
1220
}
1221
1222
/*
1223
* unmount system call
1224
*/
1225
static int
1226
ffs_unmount(struct mount *mp, int mntflags)
1227
{
1228
struct thread *td;
1229
struct ufsmount *ump = VFSTOUFS(mp);
1230
struct fs *fs;
1231
int error, flags, susp;
1232
#ifdef UFS_EXTATTR
1233
int e_restart;
1234
#endif
1235
1236
flags = 0;
1237
td = curthread;
1238
fs = ump->um_fs;
1239
if (mntflags & MNT_FORCE)
1240
flags |= FORCECLOSE;
1241
susp = fs->fs_ronly == 0;
1242
#ifdef UFS_EXTATTR
1243
if ((error = ufs_extattr_stop(mp, td))) {
1244
if (error != EOPNOTSUPP)
1245
printf("WARNING: unmount %s: ufs_extattr_stop "
1246
"returned errno %d\n", mp->mnt_stat.f_mntonname,
1247
error);
1248
e_restart = 0;
1249
} else {
1250
ufs_extattr_uepm_destroy(&ump->um_extattr);
1251
e_restart = 1;
1252
}
1253
#endif
1254
if (susp) {
1255
error = vfs_write_suspend_umnt(mp);
1256
if (error != 0)
1257
goto fail1;
1258
}
1259
if (MOUNTEDSOFTDEP(mp))
1260
error = softdep_flushfiles(mp, flags, td);
1261
else
1262
error = ffs_flushfiles(mp, flags, td);
1263
if (error != 0 && !ffs_fsfail_cleanup(ump, error))
1264
goto fail;
1265
1266
UFS_LOCK(ump);
1267
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1268
printf("WARNING: unmount %s: pending error: blocks %jd "
1269
"files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1270
fs->fs_pendinginodes);
1271
fs->fs_pendingblocks = 0;
1272
fs->fs_pendinginodes = 0;
1273
}
1274
UFS_UNLOCK(ump);
1275
if (MOUNTEDSOFTDEP(mp))
1276
softdep_unmount(mp);
1277
MPASS(ump->um_softdep == NULL);
1278
if (fs->fs_ronly == 0) {
1279
fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1280
error = ffs_sbupdate(ump, MNT_WAIT, 0);
1281
if (ffs_fsfail_cleanup(ump, error))
1282
error = 0;
1283
if (error != 0 && !ffs_fsfail_cleanup(ump, error)) {
1284
fs->fs_clean = 0;
1285
goto fail;
1286
}
1287
}
1288
if (susp)
1289
vfs_write_resume(mp, VR_START_WRITE);
1290
if (ump->um_trim_tq != NULL) {
1291
MPASS(ump->um_trim_inflight == 0);
1292
taskqueue_free(ump->um_trim_tq);
1293
free (ump->um_trimhash, M_TRIM);
1294
}
1295
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1296
g_topology_lock();
1297
g_vfs_close(ump->um_cp);
1298
g_topology_unlock();
1299
BO_LOCK(&ump->um_odevvp->v_bufobj);
1300
ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1301
BO_UNLOCK(&ump->um_odevvp->v_bufobj);
1302
atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
1303
mntfs_freevp(ump->um_devvp);
1304
vrele(ump->um_odevvp);
1305
dev_rel(ump->um_dev);
1306
mtx_destroy(UFS_MTX(ump));
1307
if (mp->mnt_gjprovider != NULL) {
1308
free(mp->mnt_gjprovider, M_UFSMNT);
1309
mp->mnt_gjprovider = NULL;
1310
}
1311
free(fs->fs_csp, M_UFSMNT);
1312
free(fs->fs_si, M_UFSMNT);
1313
free(fs, M_UFSMNT);
1314
free(ump, M_UFSMNT);
1315
mp->mnt_data = NULL;
1316
if (td->td_su == mp) {
1317
td->td_su = NULL;
1318
vfs_rel(mp);
1319
}
1320
return (error);
1321
1322
fail:
1323
if (susp)
1324
vfs_write_resume(mp, VR_START_WRITE);
1325
fail1:
1326
#ifdef UFS_EXTATTR
1327
if (e_restart) {
1328
ufs_extattr_uepm_init(&ump->um_extattr);
1329
#ifdef UFS_EXTATTR_AUTOSTART
1330
(void) ufs_extattr_autostart(mp, td);
1331
#endif
1332
}
1333
#endif
1334
1335
return (error);
1336
}
1337
1338
/*
1339
* Flush out all the files in a filesystem.
1340
*/
1341
int
1342
ffs_flushfiles(struct mount *mp, int flags, struct thread *td)
1343
{
1344
struct ufsmount *ump;
1345
int qerror, error;
1346
1347
ump = VFSTOUFS(mp);
1348
qerror = 0;
1349
#ifdef QUOTA
1350
if (mp->mnt_flag & MNT_QUOTA) {
1351
int i;
1352
error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1353
if (error)
1354
return (error);
1355
for (i = 0; i < MAXQUOTAS; i++) {
1356
error = quotaoff(td, mp, i);
1357
if (error != 0) {
1358
if ((flags & EARLYFLUSH) == 0)
1359
return (error);
1360
else
1361
qerror = error;
1362
}
1363
}
1364
1365
/*
1366
* Here we fall through to vflush again to ensure that
1367
* we have gotten rid of all the system vnodes, unless
1368
* quotas must not be closed.
1369
*/
1370
}
1371
#endif
1372
/* devvp is not locked there */
1373
if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1374
if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1375
return (error);
1376
ffs_snapshot_unmount(mp);
1377
flags |= FORCECLOSE;
1378
/*
1379
* Here we fall through to vflush again to ensure
1380
* that we have gotten rid of all the system vnodes.
1381
*/
1382
}
1383
1384
/*
1385
* Do not close system files if quotas were not closed, to be
1386
* able to sync the remaining dquots. The freeblks softupdate
1387
* workitems might hold a reference on a dquot, preventing
1388
* quotaoff() from completing. Next round of
1389
* softdep_flushworklist() iteration should process the
1390
* blockers, allowing the next run of quotaoff() to finally
1391
* flush held dquots.
1392
*
1393
* Otherwise, flush all the files.
1394
*/
1395
if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1396
return (error);
1397
1398
/*
1399
* If this is a forcible unmount and there were any files that
1400
* were unlinked but still open, then vflush() will have
1401
* truncated and freed those files, which might have started
1402
* some trim work. Wait here for any trims to complete
1403
* and process the blkfrees which follow the trims.
1404
* This may create more dirty devvp buffers and softdep deps.
1405
*/
1406
if (ump->um_trim_tq != NULL) {
1407
while (ump->um_trim_inflight != 0)
1408
pause("ufsutr", hz);
1409
taskqueue_drain_all(ump->um_trim_tq);
1410
}
1411
1412
/*
1413
* Flush filesystem metadata.
1414
*/
1415
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1416
error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1417
VOP_UNLOCK(ump->um_devvp);
1418
return (error);
1419
}
1420
1421
/*
1422
* Get filesystem statistics.
1423
*/
1424
static int
1425
ffs_statfs(struct mount *mp, struct statfs *sbp)
1426
{
1427
struct ufsmount *ump;
1428
struct fs *fs;
1429
1430
ump = VFSTOUFS(mp);
1431
fs = ump->um_fs;
1432
if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1433
panic("ffs_statfs");
1434
sbp->f_version = STATFS_VERSION;
1435
sbp->f_bsize = fs->fs_fsize;
1436
sbp->f_iosize = fs->fs_bsize;
1437
sbp->f_blocks = fs->fs_dsize;
1438
UFS_LOCK(ump);
1439
sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1440
fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1441
sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1442
dbtofsb(fs, fs->fs_pendingblocks);
1443
sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1444
sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1445
UFS_UNLOCK(ump);
1446
sbp->f_namemax = UFS_MAXNAMLEN;
1447
return (0);
1448
}
1449
1450
static bool
1451
sync_doupdate(struct inode *ip)
1452
{
1453
1454
return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1455
IN_UPDATE)) != 0);
1456
}
1457
1458
static int
1459
ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused)
1460
{
1461
struct inode *ip;
1462
1463
/*
1464
* Flags are safe to access because ->v_data invalidation
1465
* is held off by listmtx.
1466
*/
1467
if (vp->v_type == VNON)
1468
return (false);
1469
ip = VTOI(vp);
1470
if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0)
1471
return (false);
1472
return (true);
1473
}
1474
1475
/*
1476
* For a lazy sync, we only care about access times, quotas and the
1477
* superblock. Other filesystem changes are already converted to
1478
* cylinder group blocks or inode blocks updates and are written to
1479
* disk by syncer.
1480
*/
1481
static int
1482
ffs_sync_lazy(struct mount *mp)
1483
{
1484
struct vnode *mvp, *vp;
1485
struct inode *ip;
1486
int allerror, error;
1487
1488
allerror = 0;
1489
if ((mp->mnt_flag & MNT_NOATIME) != 0) {
1490
#ifdef QUOTA
1491
qsync(mp);
1492
#endif
1493
goto sbupdate;
1494
}
1495
MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) {
1496
if (vp->v_type == VNON) {
1497
VI_UNLOCK(vp);
1498
continue;
1499
}
1500
ip = VTOI(vp);
1501
1502
/*
1503
* The IN_ACCESS flag is converted to IN_MODIFIED by
1504
* ufs_close() and ufs_getattr() by the calls to
1505
* ufs_itimes_locked(), without subsequent UFS_UPDATE().
1506
* Test also all the other timestamp flags too, to pick up
1507
* any other cases that could be missed.
1508
*/
1509
if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) {
1510
VI_UNLOCK(vp);
1511
continue;
1512
}
1513
if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0)
1514
continue;
1515
#ifdef QUOTA
1516
qsyncvp(vp);
1517
#endif
1518
if (sync_doupdate(ip))
1519
error = ffs_update(vp, 0);
1520
if (error != 0)
1521
allerror = error;
1522
vput(vp);
1523
}
1524
sbupdate:
1525
if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1526
(error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1527
allerror = error;
1528
return (allerror);
1529
}
1530
1531
/*
1532
* Go through the disk queues to initiate sandbagged IO;
1533
* go through the inodes to write those that have been modified;
1534
* initiate the writing of the super block if it has been modified.
1535
*
1536
* Note: we are always called with the filesystem marked busy using
1537
* vfs_busy().
1538
*/
1539
static int
1540
ffs_sync(struct mount *mp, int waitfor)
1541
{
1542
struct vnode *mvp, *vp, *devvp;
1543
struct thread *td;
1544
struct inode *ip;
1545
struct ufsmount *ump = VFSTOUFS(mp);
1546
struct fs *fs;
1547
int error, count, lockreq, allerror = 0;
1548
int suspend;
1549
int suspended;
1550
int secondary_writes;
1551
int secondary_accwrites;
1552
int softdep_deps;
1553
int softdep_accdeps;
1554
struct bufobj *bo;
1555
1556
suspend = 0;
1557
suspended = 0;
1558
td = curthread;
1559
fs = ump->um_fs;
1560
if (fs->fs_fmod != 0 && fs->fs_ronly != 0)
1561
panic("%s: ffs_sync: modification on read-only filesystem",
1562
fs->fs_fsmnt);
1563
if (waitfor == MNT_LAZY) {
1564
if (!rebooting)
1565
return (ffs_sync_lazy(mp));
1566
waitfor = MNT_NOWAIT;
1567
}
1568
1569
/*
1570
* Write back each (modified) inode.
1571
*/
1572
lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1573
if (waitfor == MNT_SUSPEND) {
1574
suspend = 1;
1575
waitfor = MNT_WAIT;
1576
}
1577
if (waitfor == MNT_WAIT)
1578
lockreq = LK_EXCLUSIVE;
1579
lockreq |= LK_INTERLOCK;
1580
loop:
1581
/* Grab snapshot of secondary write counts */
1582
MNT_ILOCK(mp);
1583
secondary_writes = mp->mnt_secondary_writes;
1584
secondary_accwrites = mp->mnt_secondary_accwrites;
1585
MNT_IUNLOCK(mp);
1586
1587
/* Grab snapshot of softdep dependency counts */
1588
softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1589
1590
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1591
/*
1592
* Depend on the vnode interlock to keep things stable enough
1593
* for a quick test. Since there might be hundreds of
1594
* thousands of vnodes, we cannot afford even a subroutine
1595
* call unless there's a good chance that we have work to do.
1596
*/
1597
if (vp->v_type == VNON) {
1598
VI_UNLOCK(vp);
1599
continue;
1600
}
1601
ip = VTOI(vp);
1602
if ((ip->i_flag &
1603
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1604
vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1605
VI_UNLOCK(vp);
1606
continue;
1607
}
1608
if ((error = vget(vp, lockreq)) != 0) {
1609
if (error == ENOENT) {
1610
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1611
goto loop;
1612
}
1613
continue;
1614
}
1615
#ifdef QUOTA
1616
qsyncvp(vp);
1617
#endif
1618
for (;;) {
1619
error = ffs_syncvnode(vp, waitfor, 0);
1620
if (error == ERELOOKUP)
1621
continue;
1622
if (error != 0)
1623
allerror = error;
1624
break;
1625
}
1626
vput(vp);
1627
}
1628
/*
1629
* Force stale filesystem control information to be flushed.
1630
*/
1631
if (waitfor == MNT_WAIT || rebooting) {
1632
if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1633
allerror = error;
1634
if (ffs_fsfail_cleanup(ump, allerror))
1635
allerror = 0;
1636
/* Flushed work items may create new vnodes to clean */
1637
if (allerror == 0 && count)
1638
goto loop;
1639
}
1640
1641
devvp = ump->um_devvp;
1642
bo = &devvp->v_bufobj;
1643
BO_LOCK(bo);
1644
if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1645
BO_UNLOCK(bo);
1646
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1647
error = VOP_FSYNC(devvp, waitfor, td);
1648
VOP_UNLOCK(devvp);
1649
if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
1650
error = ffs_sbupdate(ump, waitfor, 0);
1651
if (error != 0)
1652
allerror = error;
1653
if (ffs_fsfail_cleanup(ump, allerror))
1654
allerror = 0;
1655
if (allerror == 0 && waitfor == MNT_WAIT)
1656
goto loop;
1657
} else if (suspend != 0) {
1658
if (softdep_check_suspend(mp,
1659
devvp,
1660
softdep_deps,
1661
softdep_accdeps,
1662
secondary_writes,
1663
secondary_accwrites) != 0) {
1664
MNT_IUNLOCK(mp);
1665
goto loop; /* More work needed */
1666
}
1667
mtx_assert(MNT_MTX(mp), MA_OWNED);
1668
mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1669
MNT_IUNLOCK(mp);
1670
suspended = 1;
1671
} else
1672
BO_UNLOCK(bo);
1673
/*
1674
* Write back modified superblock.
1675
*/
1676
if (fs->fs_fmod != 0 &&
1677
(error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1678
allerror = error;
1679
if (ffs_fsfail_cleanup(ump, allerror))
1680
allerror = 0;
1681
return (allerror);
1682
}
1683
1684
int
1685
ffs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
1686
{
1687
return (ffs_vgetf(mp, ino, flags, vpp, 0));
1688
}
1689
1690
int
1691
ffs_vgetf(struct mount *mp,
1692
ino_t ino,
1693
int flags,
1694
struct vnode **vpp,
1695
int ffs_flags)
1696
{
1697
struct fs *fs;
1698
struct inode *ip;
1699
struct ufsmount *ump;
1700
struct buf *bp;
1701
struct vnode *vp;
1702
daddr_t dbn;
1703
int error;
1704
1705
MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 ||
1706
(flags & LK_EXCLUSIVE) != 0);
1707
1708
error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1709
if (error != 0)
1710
return (error);
1711
if (*vpp != NULL) {
1712
if ((ffs_flags & FFSV_REPLACE) == 0 ||
1713
((ffs_flags & FFSV_REPLACE_DOOMED) == 0 ||
1714
!VN_IS_DOOMED(*vpp)))
1715
return (0);
1716
vgone(*vpp);
1717
vput(*vpp);
1718
}
1719
1720
/*
1721
* We must promote to an exclusive lock for vnode creation. This
1722
* can happen if lookup is passed LOCKSHARED.
1723
*/
1724
if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1725
flags &= ~LK_TYPE_MASK;
1726
flags |= LK_EXCLUSIVE;
1727
}
1728
1729
/*
1730
* We do not lock vnode creation as it is believed to be too
1731
* expensive for such rare case as simultaneous creation of vnode
1732
* for same ino by different processes. We just allow them to race
1733
* and check later to decide who wins. Let the race begin!
1734
*/
1735
1736
ump = VFSTOUFS(mp);
1737
fs = ump->um_fs;
1738
ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO);
1739
1740
/* Allocate a new vnode/inode. */
1741
error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ?
1742
&ffs_vnodeops1 : &ffs_vnodeops2, &vp);
1743
if (error) {
1744
*vpp = NULL;
1745
uma_zfree_smr(uma_inode, ip);
1746
return (error);
1747
}
1748
/*
1749
* FFS supports recursive locking.
1750
*/
1751
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1752
VN_LOCK_AREC(vp);
1753
vp->v_data = ip;
1754
vp->v_bufobj.bo_bsize = fs->fs_bsize;
1755
ip->i_vnode = vp;
1756
ip->i_ump = ump;
1757
ip->i_number = ino;
1758
ip->i_ea_refs = 0;
1759
ip->i_nextclustercg = -1;
1760
ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2;
1761
ip->i_mode = 0; /* ensure error cases below throw away vnode */
1762
cluster_init_vn(&ip->i_clusterw);
1763
#ifdef DIAGNOSTIC
1764
ufs_init_trackers(ip);
1765
#endif
1766
#ifdef QUOTA
1767
{
1768
int i;
1769
for (i = 0; i < MAXQUOTAS; i++)
1770
ip->i_dquot[i] = NODQUOT;
1771
}
1772
#endif
1773
1774
if (ffs_flags & FFSV_FORCEINSMQ)
1775
vp->v_vflag |= VV_FORCEINSMQ;
1776
error = insmntque(vp, mp);
1777
if (error != 0) {
1778
uma_zfree_smr(uma_inode, ip);
1779
*vpp = NULL;
1780
return (error);
1781
}
1782
vp->v_vflag &= ~VV_FORCEINSMQ;
1783
error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1784
if (error != 0)
1785
return (error);
1786
if (*vpp != NULL) {
1787
/*
1788
* Calls from ffs_valloc() (i.e. FFSV_REPLACE set)
1789
* operate on empty inode, which must not be found by
1790
* other threads until fully filled. Vnode for empty
1791
* inode must be not re-inserted on the hash by other
1792
* thread, after removal by us at the beginning.
1793
*/
1794
MPASS((ffs_flags & FFSV_REPLACE) == 0);
1795
return (0);
1796
}
1797
if (I_IS_UFS1(ip))
1798
ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1799
else
1800
ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1801
1802
if ((ffs_flags & FFSV_NEWINODE) != 0) {
1803
/* New inode, just zero out its contents. */
1804
if (I_IS_UFS1(ip))
1805
memset(ip->i_din1, 0, sizeof(struct ufs1_dinode));
1806
else
1807
memset(ip->i_din2, 0, sizeof(struct ufs2_dinode));
1808
} else {
1809
/* Read the disk contents for the inode, copy into the inode. */
1810
dbn = fsbtodb(fs, ino_to_fsba(fs, ino));
1811
error = ffs_breadz(ump, ump->um_devvp, dbn, dbn,
1812
(int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp);
1813
if (error != 0) {
1814
/*
1815
* The inode does not contain anything useful, so it
1816
* would be misleading to leave it on its hash chain.
1817
* With mode still zero, it will be unlinked and
1818
* returned to the free list by vput().
1819
*/
1820
vgone(vp);
1821
vput(vp);
1822
*vpp = NULL;
1823
return (error);
1824
}
1825
if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) {
1826
bqrelse(bp);
1827
vgone(vp);
1828
vput(vp);
1829
*vpp = NULL;
1830
return (error);
1831
}
1832
bqrelse(bp);
1833
}
1834
if (DOINGSOFTDEP(vp) && (!fs->fs_ronly ||
1835
(ffs_flags & FFSV_FORCEINODEDEP) != 0))
1836
softdep_load_inodeblock(ip);
1837
else
1838
ip->i_effnlink = ip->i_nlink;
1839
1840
/*
1841
* Initialize the vnode from the inode, check for aliases.
1842
* Note that the underlying vnode may have changed.
1843
*/
1844
error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2,
1845
&vp);
1846
if (error) {
1847
vgone(vp);
1848
vput(vp);
1849
*vpp = NULL;
1850
return (error);
1851
}
1852
1853
/*
1854
* Finish inode initialization.
1855
*/
1856
if (vp->v_type != VFIFO) {
1857
/* FFS supports shared locking for all files except fifos. */
1858
VN_LOCK_ASHARE(vp);
1859
}
1860
1861
/*
1862
* Set up a generation number for this inode if it does not
1863
* already have one. This should only happen on old filesystems.
1864
*/
1865
if (ip->i_gen == 0) {
1866
while (ip->i_gen == 0)
1867
ip->i_gen = arc4random();
1868
if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
1869
UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
1870
DIP_SET(ip, i_gen, ip->i_gen);
1871
}
1872
}
1873
#ifdef MAC
1874
if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
1875
/*
1876
* If this vnode is already allocated, and we're running
1877
* multi-label, attempt to perform a label association
1878
* from the extended attributes on the inode.
1879
*/
1880
error = mac_vnode_associate_extattr(mp, vp);
1881
if (error) {
1882
/* ufs_inactive will release ip->i_devvp ref. */
1883
vgone(vp);
1884
vput(vp);
1885
*vpp = NULL;
1886
return (error);
1887
}
1888
}
1889
#endif
1890
1891
vn_set_state(vp, VSTATE_CONSTRUCTED);
1892
*vpp = vp;
1893
return (0);
1894
}
1895
1896
/*
1897
* File handle to vnode
1898
*
1899
* Have to be really careful about stale file handles:
1900
* - check that the inode number is valid
1901
* - for UFS2 check that the inode number is initialized
1902
* - call ffs_vget() to get the locked inode
1903
* - check for an unallocated inode (i_mode == 0)
1904
* - check that the given client host has export rights and return
1905
* those rights via. exflagsp and credanonp
1906
*/
1907
static int
1908
ffs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
1909
{
1910
struct ufid *ufhp;
1911
1912
ufhp = (struct ufid *)fhp;
1913
return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags,
1914
vpp, 0));
1915
}
1916
1917
/*
1918
* Return a vnode from a mounted filesystem for inode with specified
1919
* generation number. Return ESTALE if the inode with given generation
1920
* number no longer exists on that filesystem.
1921
*/
1922
int
1923
ffs_inotovp(struct mount *mp,
1924
ino_t ino,
1925
uint64_t gen,
1926
int lflags,
1927
struct vnode **vpp,
1928
int ffs_flags)
1929
{
1930
struct ufsmount *ump;
1931
struct vnode *nvp;
1932
struct inode *ip;
1933
struct fs *fs;
1934
struct cg *cgp;
1935
struct buf *bp;
1936
uint64_t cg;
1937
1938
ump = VFSTOUFS(mp);
1939
fs = ump->um_fs;
1940
*vpp = NULL;
1941
1942
if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg)
1943
return (ESTALE);
1944
1945
/*
1946
* Need to check if inode is initialized because UFS2 does lazy
1947
* initialization and nfs_fhtovp can offer arbitrary inode numbers.
1948
*/
1949
if (fs->fs_magic == FS_UFS2_MAGIC) {
1950
cg = ino_to_cg(fs, ino);
1951
if (ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp) != 0)
1952
return (ESTALE);
1953
if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) {
1954
brelse(bp);
1955
return (ESTALE);
1956
}
1957
brelse(bp);
1958
}
1959
1960
if (ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags) != 0)
1961
return (ESTALE);
1962
1963
ip = VTOI(nvp);
1964
if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) {
1965
if (ip->i_mode == 0)
1966
vgone(nvp);
1967
vput(nvp);
1968
return (ESTALE);
1969
}
1970
1971
vnode_create_vobject(nvp, DIP(ip, i_size), curthread);
1972
*vpp = nvp;
1973
return (0);
1974
}
1975
1976
/*
1977
* Initialize the filesystem.
1978
*/
1979
static int
1980
ffs_init(struct vfsconf *vfsp)
1981
{
1982
1983
ffs_susp_initialize();
1984
softdep_initialize();
1985
return (ufs_init(vfsp));
1986
}
1987
1988
/*
1989
* Undo the work of ffs_init().
1990
*/
1991
static int
1992
ffs_uninit(struct vfsconf *vfsp)
1993
{
1994
int ret;
1995
1996
ret = ufs_uninit(vfsp);
1997
softdep_uninitialize();
1998
ffs_susp_uninitialize();
1999
taskqueue_drain_all(taskqueue_thread);
2000
return (ret);
2001
}
2002
2003
/*
2004
* Structure used to pass information from ffs_sbupdate to its
2005
* helper routine ffs_use_bwrite.
2006
*/
2007
struct devfd {
2008
struct ufsmount *ump;
2009
struct buf *sbbp;
2010
int waitfor;
2011
int suspended;
2012
int error;
2013
};
2014
2015
/*
2016
* Write a superblock and associated information back to disk.
2017
*/
2018
int
2019
ffs_sbupdate(struct ufsmount *ump, int waitfor, int suspended)
2020
{
2021
struct fs *fs;
2022
struct buf *sbbp;
2023
struct devfd devfd;
2024
2025
fs = ump->um_fs;
2026
if (fs->fs_ronly == 1 &&
2027
(ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
2028
(MNT_RDONLY | MNT_UPDATE))
2029
panic("ffs_sbupdate: write read-only filesystem");
2030
/*
2031
* We use the superblock's buf to serialize calls to ffs_sbupdate().
2032
* Copy superblock to this buffer and have it written out.
2033
*/
2034
sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
2035
(int)fs->fs_sbsize, 0, 0, 0);
2036
UFS_LOCK(ump);
2037
fs->fs_fmod = 0;
2038
bcopy((caddr_t)fs, sbbp->b_data, (uint64_t)fs->fs_sbsize);
2039
UFS_UNLOCK(ump);
2040
fs = (struct fs *)sbbp->b_data;
2041
/*
2042
* Initialize info needed for write function.
2043
*/
2044
devfd.ump = ump;
2045
devfd.sbbp = sbbp;
2046
devfd.waitfor = waitfor;
2047
devfd.suspended = suspended;
2048
devfd.error = 0;
2049
return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite));
2050
}
2051
2052
/*
2053
* Write function for use by filesystem-layer routines.
2054
*/
2055
static int
2056
ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size)
2057
{
2058
struct devfd *devfdp;
2059
struct ufsmount *ump;
2060
struct buf *bp;
2061
struct fs *fs;
2062
int error;
2063
2064
devfdp = devfd;
2065
ump = devfdp->ump;
2066
bp = devfdp->sbbp;
2067
fs = (struct fs *)bp->b_data;
2068
/*
2069
* Writing the superblock summary information.
2070
*/
2071
if (loc != fs->fs_sblockloc) {
2072
bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0);
2073
bcopy(buf, bp->b_data, (uint64_t)size);
2074
if (devfdp->suspended)
2075
bp->b_flags |= B_VALIDSUSPWRT;
2076
if (devfdp->waitfor != MNT_WAIT)
2077
bawrite(bp);
2078
else if ((error = bwrite(bp)) != 0)
2079
devfdp->error = error;
2080
return (0);
2081
}
2082
/*
2083
* Writing the superblock itself. We need to do special checks for it.
2084
* A negative error code is returned to indicate that a copy of the
2085
* superblock has been made and that the copy is discarded when the
2086
* I/O is done. So the the caller should not attempt to restore the
2087
* fs_si field after the write is done. The caller will convert the
2088
* error code back to its usual positive value when returning it.
2089
*/
2090
if (ffs_fsfail_cleanup(ump, devfdp->error))
2091
devfdp->error = 0;
2092
if (devfdp->error != 0) {
2093
brelse(bp);
2094
return (-devfdp->error - 1);
2095
}
2096
if (MOUNTEDSOFTDEP(ump->um_mountp))
2097
softdep_setup_sbupdate(ump, fs, bp);
2098
if (devfdp->suspended)
2099
bp->b_flags |= B_VALIDSUSPWRT;
2100
if (devfdp->waitfor != MNT_WAIT)
2101
bawrite(bp);
2102
else if ((error = bwrite(bp)) != 0)
2103
devfdp->error = error;
2104
return (-devfdp->error - 1);
2105
}
2106
2107
static int
2108
ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
2109
int attrnamespace, const char *attrname)
2110
{
2111
2112
#ifdef UFS_EXTATTR
2113
return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
2114
attrname));
2115
#else
2116
return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
2117
attrname));
2118
#endif
2119
}
2120
2121
static void
2122
ffs_ifree(struct ufsmount *ump, struct inode *ip)
2123
{
2124
2125
if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
2126
uma_zfree(uma_ufs1, ip->i_din1);
2127
else if (ip->i_din2 != NULL)
2128
uma_zfree(uma_ufs2, ip->i_din2);
2129
uma_zfree_smr(uma_inode, ip);
2130
}
2131
2132
static int dobkgrdwrite = 1;
2133
SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
2134
"Do background writes (honoring the BV_BKGRDWRITE flag)?");
2135
2136
/*
2137
* Complete a background write started from bwrite.
2138
*/
2139
static void
2140
ffs_backgroundwritedone(struct buf *bp)
2141
{
2142
struct bufobj *bufobj;
2143
struct buf *origbp;
2144
2145
#ifdef SOFTUPDATES
2146
if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0)
2147
softdep_handle_error(bp);
2148
#endif
2149
2150
/*
2151
* Find the original buffer that we are writing.
2152
*/
2153
bufobj = bp->b_bufobj;
2154
BO_LOCK(bufobj);
2155
if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
2156
panic("backgroundwritedone: lost buffer");
2157
2158
/*
2159
* We should mark the cylinder group buffer origbp as
2160
* dirty, to not lose the failed write.
2161
*/
2162
if ((bp->b_ioflags & BIO_ERROR) != 0)
2163
origbp->b_vflags |= BV_BKGRDERR;
2164
BO_UNLOCK(bufobj);
2165
/*
2166
* Process dependencies then return any unfinished ones.
2167
*/
2168
if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0)
2169
buf_complete(bp);
2170
#ifdef SOFTUPDATES
2171
if (!LIST_EMPTY(&bp->b_dep))
2172
softdep_move_dependencies(bp, origbp);
2173
#endif
2174
/*
2175
* This buffer is marked B_NOCACHE so when it is released
2176
* by biodone it will be tossed. Clear B_IOSTARTED in case of error.
2177
*/
2178
bp->b_flags |= B_NOCACHE;
2179
bp->b_flags &= ~(B_CACHE | B_IOSTARTED);
2180
pbrelvp(bp);
2181
2182
/*
2183
* Prevent brelse() from trying to keep and re-dirtying bp on
2184
* errors. It causes b_bufobj dereference in
2185
* bdirty()/reassignbuf(), and b_bufobj was cleared in
2186
* pbrelvp() above.
2187
*/
2188
if ((bp->b_ioflags & BIO_ERROR) != 0)
2189
bp->b_flags |= B_INVAL;
2190
bufdone(bp);
2191
BO_LOCK(bufobj);
2192
/*
2193
* Clear the BV_BKGRDINPROG flag in the original buffer
2194
* and awaken it if it is waiting for the write to complete.
2195
* If BV_BKGRDINPROG is not set in the original buffer it must
2196
* have been released and re-instantiated - which is not legal.
2197
*/
2198
KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
2199
("backgroundwritedone: lost buffer2"));
2200
origbp->b_vflags &= ~BV_BKGRDINPROG;
2201
if (origbp->b_vflags & BV_BKGRDWAIT) {
2202
origbp->b_vflags &= ~BV_BKGRDWAIT;
2203
wakeup(&origbp->b_xflags);
2204
}
2205
BO_UNLOCK(bufobj);
2206
}
2207
2208
/*
2209
* Write, release buffer on completion. (Done by iodone
2210
* if async). Do not bother writing anything if the buffer
2211
* is invalid.
2212
*
2213
* Note that we set B_CACHE here, indicating that buffer is
2214
* fully valid and thus cacheable. This is true even of NFS
2215
* now so we set it generally. This could be set either here
2216
* or in biodone() since the I/O is synchronous. We put it
2217
* here.
2218
*/
2219
static int
2220
ffs_bufwrite(struct buf *bp)
2221
{
2222
struct buf *newbp;
2223
struct cg *cgp;
2224
2225
CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2226
if (bp->b_flags & B_INVAL) {
2227
brelse(bp);
2228
return (0);
2229
}
2230
2231
if (!BUF_ISLOCKED(bp))
2232
panic("bufwrite: buffer is not busy???");
2233
/*
2234
* If a background write is already in progress, delay
2235
* writing this block if it is asynchronous. Otherwise
2236
* wait for the background write to complete.
2237
*/
2238
BO_LOCK(bp->b_bufobj);
2239
if (bp->b_vflags & BV_BKGRDINPROG) {
2240
if (bp->b_flags & B_ASYNC) {
2241
BO_UNLOCK(bp->b_bufobj);
2242
bdwrite(bp);
2243
return (0);
2244
}
2245
bp->b_vflags |= BV_BKGRDWAIT;
2246
msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2247
"bwrbg", 0);
2248
if (bp->b_vflags & BV_BKGRDINPROG)
2249
panic("bufwrite: still writing");
2250
}
2251
bp->b_vflags &= ~BV_BKGRDERR;
2252
BO_UNLOCK(bp->b_bufobj);
2253
2254
/*
2255
* If this buffer is marked for background writing and we
2256
* do not have to wait for it, make a copy and write the
2257
* copy so as to leave this buffer ready for further use.
2258
*
2259
* This optimization eats a lot of memory. If we have a page
2260
* or buffer shortfall we can't do it.
2261
*/
2262
if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2263
(bp->b_flags & B_ASYNC) &&
2264
!vm_page_count_severe() &&
2265
!buf_dirty_count_severe()) {
2266
KASSERT(bp->b_iodone == NULL,
2267
("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2268
2269
/* get a new block */
2270
newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2271
if (newbp == NULL)
2272
goto normal_write;
2273
2274
KASSERT(buf_mapped(bp), ("Unmapped cg"));
2275
memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2276
BO_LOCK(bp->b_bufobj);
2277
bp->b_vflags |= BV_BKGRDINPROG;
2278
BO_UNLOCK(bp->b_bufobj);
2279
newbp->b_xflags |=
2280
(bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER;
2281
newbp->b_lblkno = bp->b_lblkno;
2282
newbp->b_blkno = bp->b_blkno;
2283
newbp->b_offset = bp->b_offset;
2284
newbp->b_iodone = ffs_backgroundwritedone;
2285
newbp->b_flags |= B_ASYNC;
2286
newbp->b_flags &= ~B_INVAL;
2287
pbgetvp(bp->b_vp, newbp);
2288
2289
#ifdef SOFTUPDATES
2290
/*
2291
* Move over the dependencies. If there are rollbacks,
2292
* leave the parent buffer dirtied as it will need to
2293
* be written again.
2294
*/
2295
if (LIST_EMPTY(&bp->b_dep) ||
2296
softdep_move_dependencies(bp, newbp) == 0)
2297
bundirty(bp);
2298
#else
2299
bundirty(bp);
2300
#endif
2301
2302
/*
2303
* Initiate write on the copy, release the original. The
2304
* BKGRDINPROG flag prevents it from going away until
2305
* the background write completes. We have to recalculate
2306
* its check hash in case the buffer gets freed and then
2307
* reconstituted from the buffer cache during a later read.
2308
*/
2309
if ((bp->b_xflags & BX_CYLGRP) != 0) {
2310
cgp = (struct cg *)bp->b_data;
2311
cgp->cg_ckhash = 0;
2312
cgp->cg_ckhash =
2313
calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2314
}
2315
bqrelse(bp);
2316
bp = newbp;
2317
} else
2318
/* Mark the buffer clean */
2319
bundirty(bp);
2320
2321
/* Let the normal bufwrite do the rest for us */
2322
normal_write:
2323
/*
2324
* If we are writing a cylinder group, update its time.
2325
*/
2326
if ((bp->b_xflags & BX_CYLGRP) != 0) {
2327
cgp = (struct cg *)bp->b_data;
2328
cgp->cg_old_time = cgp->cg_time = time_second;
2329
}
2330
return (bufwrite(bp));
2331
}
2332
2333
static void
2334
ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2335
{
2336
struct vnode *vp;
2337
struct buf *tbp;
2338
int error, nocopy;
2339
2340
/*
2341
* This is the bufobj strategy for the private VCHR vnodes
2342
* used by FFS to access the underlying storage device.
2343
* We override the default bufobj strategy and thus bypass
2344
* VOP_STRATEGY() for these vnodes.
2345
*/
2346
vp = bo2vnode(bo);
2347
KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR ||
2348
bp->b_vp->v_rdev == NULL ||
2349
bp->b_vp->v_rdev->si_mountpt == NULL ||
2350
VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL ||
2351
vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp,
2352
("ffs_geom_strategy() with wrong vp"));
2353
if (bp->b_iocmd == BIO_WRITE) {
2354
if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2355
bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2356
(bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2357
panic("ffs_geom_strategy: bad I/O");
2358
nocopy = bp->b_flags & B_NOCOPY;
2359
bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2360
if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2361
vp->v_rdev->si_snapdata != NULL) {
2362
if ((bp->b_flags & B_CLUSTER) != 0) {
2363
runningbufwakeup(bp);
2364
TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2365
b_cluster.cluster_entry) {
2366
error = ffs_copyonwrite(vp, tbp);
2367
if (error != 0 &&
2368
error != EOPNOTSUPP) {
2369
bp->b_error = error;
2370
bp->b_ioflags |= BIO_ERROR;
2371
bp->b_flags &= ~B_BARRIER;
2372
bufdone(bp);
2373
return;
2374
}
2375
}
2376
(void)runningbufclaim(bp, bp->b_bufsize);
2377
} else {
2378
error = ffs_copyonwrite(vp, bp);
2379
if (error != 0 && error != EOPNOTSUPP) {
2380
bp->b_error = error;
2381
bp->b_ioflags |= BIO_ERROR;
2382
bp->b_flags &= ~B_BARRIER;
2383
bufdone(bp);
2384
return;
2385
}
2386
}
2387
}
2388
#ifdef SOFTUPDATES
2389
if ((bp->b_flags & B_CLUSTER) != 0) {
2390
TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2391
b_cluster.cluster_entry) {
2392
if (!LIST_EMPTY(&tbp->b_dep))
2393
buf_start(tbp);
2394
}
2395
} else {
2396
if (!LIST_EMPTY(&bp->b_dep))
2397
buf_start(bp);
2398
}
2399
2400
#endif
2401
/*
2402
* Check for metadata that needs check-hashes and update them.
2403
*/
2404
switch (bp->b_xflags & BX_FSPRIV) {
2405
case BX_CYLGRP:
2406
((struct cg *)bp->b_data)->cg_ckhash = 0;
2407
((struct cg *)bp->b_data)->cg_ckhash =
2408
calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2409
break;
2410
2411
case BX_SUPERBLOCK:
2412
case BX_INODE:
2413
case BX_INDIR:
2414
case BX_DIR:
2415
printf("Check-hash write is unimplemented!!!\n");
2416
break;
2417
2418
case 0:
2419
break;
2420
2421
default:
2422
printf("multiple buffer types 0x%b\n",
2423
(bp->b_xflags & BX_FSPRIV), PRINT_UFS_BUF_XFLAGS);
2424
break;
2425
}
2426
}
2427
if (bp->b_iocmd != BIO_READ && ffs_enxio_enable)
2428
bp->b_xflags |= BX_CVTENXIO;
2429
g_vfs_strategy(bo, bp);
2430
}
2431
2432
int
2433
ffs_own_mount(const struct mount *mp)
2434
{
2435
2436
if (mp->mnt_op == &ufs_vfsops)
2437
return (1);
2438
return (0);
2439
}
2440
2441
#ifdef DDB
2442
#ifdef SOFTUPDATES
2443
2444
/* defined in ffs_softdep.c */
2445
extern void db_print_ffs(struct ufsmount *ump);
2446
2447
DB_SHOW_COMMAND(ffs, db_show_ffs)
2448
{
2449
struct mount *mp;
2450
struct ufsmount *ump;
2451
2452
if (have_addr) {
2453
ump = VFSTOUFS((struct mount *)addr);
2454
db_print_ffs(ump);
2455
return;
2456
}
2457
2458
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2459
if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2460
db_print_ffs(VFSTOUFS(mp));
2461
}
2462
}
2463
2464
#endif /* SOFTUPDATES */
2465
#endif /* DDB */
2466
2467