Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/fs/unionfs/union_vfsops.c
105645 views
1
/*-
2
* SPDX-License-Identifier: BSD-3-Clause
3
*
4
* Copyright (c) 1994, 1995 The Regents of the University of California.
5
* Copyright (c) 1994, 1995 Jan-Simon Pendry.
6
* Copyright (c) 2005, 2006, 2012 Masanori Ozawa <[email protected]>, ONGS Inc.
7
* Copyright (c) 2006, 2012 Daichi Goto <[email protected]>
8
* All rights reserved.
9
*
10
* This code is derived from software donated to Berkeley by
11
* Jan-Simon Pendry.
12
*
13
* Redistribution and use in source and binary forms, with or without
14
* modification, are permitted provided that the following conditions
15
* are met:
16
* 1. Redistributions of source code must retain the above copyright
17
* notice, this list of conditions and the following disclaimer.
18
* 2. Redistributions in binary form must reproduce the above copyright
19
* notice, this list of conditions and the following disclaimer in the
20
* documentation and/or other materials provided with the distribution.
21
* 3. Neither the name of the University nor the names of its contributors
22
* may be used to endorse or promote products derived from this software
23
* without specific prior written permission.
24
*
25
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35
* SUCH DAMAGE.
36
*/
37
38
#include <sys/param.h>
39
#include <sys/systm.h>
40
#include <sys/kdb.h>
41
#include <sys/fcntl.h>
42
#include <sys/kernel.h>
43
#include <sys/lock.h>
44
#include <sys/malloc.h>
45
#include <sys/mount.h>
46
#include <sys/namei.h>
47
#include <sys/proc.h>
48
#include <sys/vnode.h>
49
#include <sys/stat.h>
50
51
#include <fs/unionfs/union.h>
52
53
static MALLOC_DEFINE(M_UNIONFSMNT, "UNIONFS mount", "UNIONFS mount structure");
54
55
static vfs_fhtovp_t unionfs_fhtovp;
56
static vfs_checkexp_t unionfs_checkexp;
57
static vfs_mount_t unionfs_domount;
58
static vfs_quotactl_t unionfs_quotactl;
59
static vfs_root_t unionfs_root;
60
static vfs_sync_t unionfs_sync;
61
static vfs_statfs_t unionfs_statfs;
62
static vfs_unmount_t unionfs_unmount;
63
static vfs_vget_t unionfs_vget;
64
static vfs_extattrctl_t unionfs_extattrctl;
65
66
static struct vfsops unionfs_vfsops;
67
68
/*
69
* Mount unionfs layer.
70
*/
71
static int
72
unionfs_domount(struct mount *mp)
73
{
74
struct vnode *lowerrootvp;
75
struct vnode *upperrootvp;
76
struct vnode *lvp1;
77
struct vnode *lvp2;
78
struct unionfs_mount *ump;
79
char *target;
80
char *tmp;
81
char *ep;
82
struct nameidata nd, *ndp;
83
struct vattr va;
84
unionfs_copymode copymode;
85
unionfs_whitemode whitemode;
86
int below;
87
int error;
88
int len;
89
uid_t uid;
90
gid_t gid;
91
u_short udir;
92
u_short ufile;
93
94
UNIONFSDEBUG("unionfs_mount(mp = %p)\n", mp);
95
96
error = 0;
97
below = 0;
98
uid = 0;
99
gid = 0;
100
udir = 0;
101
ufile = 0;
102
copymode = UNIONFS_TRANSPARENT; /* default */
103
whitemode = UNIONFS_WHITE_ALWAYS;
104
ndp = &nd;
105
106
if (mp->mnt_flag & MNT_ROOTFS) {
107
vfs_mount_error(mp, "Cannot union mount root filesystem");
108
return (EOPNOTSUPP);
109
}
110
111
/*
112
* Update is a no operation.
113
*/
114
if (mp->mnt_flag & MNT_UPDATE) {
115
vfs_mount_error(mp, "unionfs does not support mount update");
116
return (EOPNOTSUPP);
117
}
118
119
/*
120
* Get argument
121
*/
122
error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len);
123
if (error)
124
error = vfs_getopt(mp->mnt_optnew, "from", (void **)&target,
125
&len);
126
if (error || target[len - 1] != '\0') {
127
vfs_mount_error(mp, "Invalid target");
128
return (EINVAL);
129
}
130
if (vfs_getopt(mp->mnt_optnew, "below", NULL, NULL) == 0)
131
below = 1;
132
if (vfs_getopt(mp->mnt_optnew, "udir", (void **)&tmp, NULL) == 0) {
133
if (tmp != NULL)
134
udir = (mode_t)strtol(tmp, &ep, 8);
135
if (tmp == NULL || *ep) {
136
vfs_mount_error(mp, "Invalid udir");
137
return (EINVAL);
138
}
139
udir &= S_IRWXU | S_IRWXG | S_IRWXO;
140
}
141
if (vfs_getopt(mp->mnt_optnew, "ufile", (void **)&tmp, NULL) == 0) {
142
if (tmp != NULL)
143
ufile = (mode_t)strtol(tmp, &ep, 8);
144
if (tmp == NULL || *ep) {
145
vfs_mount_error(mp, "Invalid ufile");
146
return (EINVAL);
147
}
148
ufile &= S_IRWXU | S_IRWXG | S_IRWXO;
149
}
150
/* check umask, uid and gid */
151
if (udir == 0 && ufile != 0)
152
udir = ufile;
153
if (ufile == 0 && udir != 0)
154
ufile = udir;
155
156
vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
157
error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
158
if (!error) {
159
if (udir == 0)
160
udir = va.va_mode;
161
if (ufile == 0)
162
ufile = va.va_mode;
163
uid = va.va_uid;
164
gid = va.va_gid;
165
}
166
VOP_UNLOCK(mp->mnt_vnodecovered);
167
if (error)
168
return (error);
169
170
if (mp->mnt_cred->cr_ruid == 0) { /* root only */
171
if (vfs_getopt(mp->mnt_optnew, "uid", (void **)&tmp,
172
NULL) == 0) {
173
if (tmp != NULL)
174
uid = (uid_t)strtol(tmp, &ep, 10);
175
if (tmp == NULL || *ep) {
176
vfs_mount_error(mp, "Invalid uid");
177
return (EINVAL);
178
}
179
}
180
if (vfs_getopt(mp->mnt_optnew, "gid", (void **)&tmp,
181
NULL) == 0) {
182
if (tmp != NULL)
183
gid = (gid_t)strtol(tmp, &ep, 10);
184
if (tmp == NULL || *ep) {
185
vfs_mount_error(mp, "Invalid gid");
186
return (EINVAL);
187
}
188
}
189
if (vfs_getopt(mp->mnt_optnew, "copymode", (void **)&tmp,
190
NULL) == 0) {
191
if (tmp == NULL) {
192
vfs_mount_error(mp, "Invalid copymode");
193
return (EINVAL);
194
} else if (strcasecmp(tmp, "traditional") == 0)
195
copymode = UNIONFS_TRADITIONAL;
196
else if (strcasecmp(tmp, "transparent") == 0)
197
copymode = UNIONFS_TRANSPARENT;
198
else if (strcasecmp(tmp, "masquerade") == 0)
199
copymode = UNIONFS_MASQUERADE;
200
else {
201
vfs_mount_error(mp, "Invalid copymode");
202
return (EINVAL);
203
}
204
}
205
if (vfs_getopt(mp->mnt_optnew, "whiteout", (void **)&tmp,
206
NULL) == 0) {
207
if (tmp == NULL) {
208
vfs_mount_error(mp, "Invalid whiteout mode");
209
return (EINVAL);
210
} else if (strcasecmp(tmp, "always") == 0)
211
whitemode = UNIONFS_WHITE_ALWAYS;
212
else if (strcasecmp(tmp, "whenneeded") == 0)
213
whitemode = UNIONFS_WHITE_WHENNEEDED;
214
else {
215
vfs_mount_error(mp, "Invalid whiteout mode");
216
return (EINVAL);
217
}
218
}
219
}
220
/* If copymode is UNIONFS_TRADITIONAL, uid/gid is mounted user. */
221
if (copymode == UNIONFS_TRADITIONAL) {
222
uid = mp->mnt_cred->cr_ruid;
223
gid = mp->mnt_cred->cr_rgid;
224
}
225
226
UNIONFSDEBUG("unionfs_mount: uid=%d, gid=%d\n", uid, gid);
227
UNIONFSDEBUG("unionfs_mount: udir=0%03o, ufile=0%03o\n", udir, ufile);
228
UNIONFSDEBUG("unionfs_mount: copymode=%d\n", copymode);
229
230
/*
231
* Find upper node
232
*/
233
NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, target);
234
if ((error = namei(ndp)))
235
return (error);
236
237
NDFREE_PNBUF(ndp);
238
239
/* get root vnodes */
240
lowerrootvp = mp->mnt_vnodecovered;
241
upperrootvp = ndp->ni_vp;
242
KASSERT(lowerrootvp != NULL, ("%s: NULL lower root vp", __func__));
243
KASSERT(upperrootvp != NULL, ("%s: NULL upper root vp", __func__));
244
245
/* create unionfs_mount */
246
ump = malloc(sizeof(struct unionfs_mount), M_UNIONFSMNT,
247
M_WAITOK | M_ZERO);
248
249
/*
250
* Save reference
251
*/
252
if (below) {
253
VOP_UNLOCK(upperrootvp);
254
vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY);
255
ump->um_lowervp = upperrootvp;
256
ump->um_uppervp = lowerrootvp;
257
} else {
258
ump->um_lowervp = lowerrootvp;
259
ump->um_uppervp = upperrootvp;
260
}
261
ump->um_rootvp = NULL;
262
ump->um_uid = uid;
263
ump->um_gid = gid;
264
ump->um_udir = udir;
265
ump->um_ufile = ufile;
266
ump->um_copymode = copymode;
267
ump->um_whitemode = whitemode;
268
269
mp->mnt_data = ump;
270
271
/*
272
* Copy upper layer's RDONLY flag.
273
*/
274
mp->mnt_flag |= ump->um_uppervp->v_mount->mnt_flag & MNT_RDONLY;
275
276
/*
277
* Unlock the node
278
*/
279
VOP_UNLOCK(ump->um_uppervp);
280
281
/*
282
* Detect common cases in which constructing a unionfs hierarchy
283
* would produce deadlock (or failed locking assertions) upon
284
* use of the resulting unionfs vnodes. This typically happens
285
* when the requested upper and lower filesytems (which themselves
286
* may be unionfs instances and/or nullfs aliases) end up resolving
287
* to the same base-layer files. Note that this is not meant to be
288
* an exhaustive check of all possible deadlock-producing scenarios.
289
*/
290
lvp1 = lvp2 = NULL;
291
VOP_GETLOWVNODE(ump->um_lowervp, &lvp1, FREAD);
292
VOP_GETLOWVNODE(ump->um_uppervp, &lvp2, FREAD);
293
if (lvp1 != NULL && lvp1 == lvp2)
294
error = EDEADLK;
295
if (lvp1 != NULL)
296
vrele(lvp1);
297
if (lvp2 != NULL)
298
vrele(lvp2);
299
300
/*
301
* Get the unionfs root vnode.
302
*/
303
if (error == 0) {
304
error = unionfs_nodeget(mp, ump->um_uppervp, ump->um_lowervp,
305
NULL, &(ump->um_rootvp), NULL);
306
}
307
if (error != 0) {
308
vrele(upperrootvp);
309
free(ump, M_UNIONFSMNT);
310
mp->mnt_data = NULL;
311
return (error);
312
}
313
KASSERT(ump->um_rootvp != NULL, ("rootvp cannot be NULL"));
314
KASSERT((ump->um_rootvp->v_vflag & VV_ROOT) != 0,
315
("%s: rootvp without VV_ROOT", __func__));
316
317
/*
318
* Do not release the namei() reference on upperrootvp until after
319
* we attempt to register the upper mounts. A concurrent unmount
320
* of the upper or lower FS may have caused unionfs_nodeget() to
321
* create a unionfs node with a NULL upper or lower vp and with
322
* no reference held on upperrootvp or lowerrootvp.
323
* vfs_register_upper() should subsequently fail, which is what
324
* we want, but we must ensure neither underlying vnode can be
325
* reused until that happens. We assume the caller holds a reference
326
* to lowerrootvp as it is the mount's covered vnode.
327
*/
328
ump->um_lowermp = vfs_register_upper_from_vp(ump->um_lowervp, mp,
329
&ump->um_lower_link);
330
ump->um_uppermp = vfs_register_upper_from_vp(ump->um_uppervp, mp,
331
&ump->um_upper_link);
332
333
vrele(upperrootvp);
334
335
if (ump->um_lowermp == NULL || ump->um_uppermp == NULL) {
336
if (ump->um_lowermp != NULL)
337
vfs_unregister_upper(ump->um_lowermp, &ump->um_lower_link);
338
if (ump->um_uppermp != NULL)
339
vfs_unregister_upper(ump->um_uppermp, &ump->um_upper_link);
340
vflush(mp, 1, FORCECLOSE, curthread);
341
free(ump, M_UNIONFSMNT);
342
mp->mnt_data = NULL;
343
return (ENOENT);
344
}
345
346
/*
347
* Specify that the covered vnode lock should remain held while
348
* lookup() performs the cross-mount walk. This prevents a lock-order
349
* reversal between the covered vnode lock (which is also locked by
350
* unionfs_lock()) and the mountpoint's busy count. Without this,
351
* unmount will lock the covered vnode lock (directly through the
352
* covered vnode) and wait for the busy count to drain, while a
353
* concurrent lookup will increment the busy count and then may lock
354
* the covered vnode lock (indirectly through unionfs_lock()).
355
*
356
* Note that this is only needed for the 'below' case in which the
357
* upper vnode is also the covered vnode, because unionfs_lock()
358
* only locks the upper vnode as long as both lower and upper vnodes
359
* are present (which they will always be for the unionfs mount root).
360
*/
361
if (below) {
362
vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
363
mp->mnt_vnodecovered->v_vflag |= VV_CROSSLOCK;
364
VOP_UNLOCK(mp->mnt_vnodecovered);
365
}
366
367
MNT_ILOCK(mp);
368
if ((ump->um_lowermp->mnt_flag & MNT_LOCAL) != 0 &&
369
(ump->um_uppermp->mnt_flag & MNT_LOCAL) != 0)
370
mp->mnt_flag |= MNT_LOCAL;
371
mp->mnt_kern_flag |= MNTK_NOMSYNC | MNTK_UNIONFS |
372
(ump->um_uppermp->mnt_kern_flag & MNTK_SHARED_WRITES);
373
MNT_IUNLOCK(mp);
374
375
/*
376
* Get new fsid
377
*/
378
vfs_getnewfsid(mp);
379
380
snprintf(mp->mnt_stat.f_mntfromname, MNAMELEN, "<%s>:%s",
381
below ? "below" : "above", target);
382
383
UNIONFSDEBUG("unionfs_mount: from %s, on %s\n",
384
mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname);
385
386
return (0);
387
}
388
389
/*
390
* Free reference to unionfs layer
391
*/
392
static int
393
unionfs_unmount(struct mount *mp, int mntflags)
394
{
395
struct unionfs_mount *ump;
396
int error;
397
int num;
398
int freeing;
399
int flags;
400
401
UNIONFSDEBUG("unionfs_unmount: mp = %p\n", mp);
402
403
ump = MOUNTTOUNIONFSMOUNT(mp);
404
flags = 0;
405
406
if (mntflags & MNT_FORCE)
407
flags |= FORCECLOSE;
408
409
/* vflush (no need to call vrele) */
410
for (freeing = 0; (error = vflush(mp, 1, flags, curthread)) != 0;) {
411
num = mp->mnt_nvnodelistsize;
412
if (num == freeing)
413
break;
414
freeing = num;
415
}
416
417
if (error)
418
return (error);
419
420
vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
421
mp->mnt_vnodecovered->v_vflag &= ~VV_CROSSLOCK;
422
VOP_UNLOCK(mp->mnt_vnodecovered);
423
vfs_unregister_upper(ump->um_lowermp, &ump->um_lower_link);
424
vfs_unregister_upper(ump->um_uppermp, &ump->um_upper_link);
425
free(ump, M_UNIONFSMNT);
426
mp->mnt_data = NULL;
427
428
return (0);
429
}
430
431
static int
432
unionfs_root(struct mount *mp, int flags, struct vnode **vpp)
433
{
434
struct unionfs_mount *ump;
435
struct vnode *vp;
436
437
ump = MOUNTTOUNIONFSMOUNT(mp);
438
vp = ump->um_rootvp;
439
440
UNIONFSDEBUG("unionfs_root: rootvp=%p locked=%x\n",
441
vp, VOP_ISLOCKED(vp));
442
443
vref(vp);
444
if (flags & LK_TYPE_MASK)
445
vn_lock(vp, flags);
446
447
*vpp = vp;
448
449
return (0);
450
}
451
452
static int
453
unionfs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg,
454
bool *mp_busy)
455
{
456
struct mount *uppermp;
457
struct unionfs_mount *ump;
458
int error;
459
bool unbusy;
460
461
ump = MOUNTTOUNIONFSMOUNT(mp);
462
/*
463
* Issue a volatile load of um_uppermp here, as the mount may be
464
* torn down after we call vfs_unbusy().
465
*/
466
uppermp = atomic_load_ptr(&ump->um_uppermp);
467
KASSERT(*mp_busy == true, ("upper mount not busy"));
468
/*
469
* See comment in sys_quotactl() for an explanation of why the
470
* lower mount needs to be busied by the caller of VFS_QUOTACTL()
471
* but may be unbusied by the implementation. We must unbusy
472
* the upper mount for the same reason; otherwise a namei lookup
473
* issued by the VFS_QUOTACTL() implementation could traverse the
474
* upper mount and deadlock.
475
*/
476
vfs_unbusy(mp);
477
*mp_busy = false;
478
unbusy = true;
479
error = vfs_busy(uppermp, 0);
480
/*
481
* Writing is always performed to upper vnode.
482
*/
483
if (error == 0)
484
error = VFS_QUOTACTL(uppermp, cmd, uid, arg, &unbusy);
485
if (unbusy)
486
vfs_unbusy(uppermp);
487
488
return (error);
489
}
490
491
static int
492
unionfs_statfs(struct mount *mp, struct statfs *sbp)
493
{
494
struct unionfs_mount *ump;
495
struct statfs *mstat;
496
uint64_t lbsize;
497
int error;
498
499
ump = MOUNTTOUNIONFSMOUNT(mp);
500
501
UNIONFSDEBUG("unionfs_statfs(mp = %p, lvp = %p, uvp = %p)\n",
502
mp, ump->um_lowervp, ump->um_uppervp);
503
504
mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO);
505
506
error = VFS_STATFS(ump->um_lowermp, mstat);
507
if (error) {
508
free(mstat, M_STATFS);
509
return (error);
510
}
511
512
/* now copy across the "interesting" information and fake the rest */
513
sbp->f_blocks = mstat->f_blocks;
514
sbp->f_files = mstat->f_files;
515
516
lbsize = mstat->f_bsize;
517
518
error = VFS_STATFS(ump->um_uppermp, mstat);
519
if (error) {
520
free(mstat, M_STATFS);
521
return (error);
522
}
523
524
/*
525
* The FS type etc is copy from upper vfs.
526
* (write able vfs have priority)
527
*/
528
sbp->f_type = mstat->f_type;
529
sbp->f_flags = mstat->f_flags;
530
sbp->f_bsize = mstat->f_bsize;
531
sbp->f_iosize = mstat->f_iosize;
532
533
if (mstat->f_bsize != lbsize)
534
sbp->f_blocks = ((off_t)sbp->f_blocks * lbsize) /
535
mstat->f_bsize;
536
537
sbp->f_blocks += mstat->f_blocks;
538
sbp->f_bfree = mstat->f_bfree;
539
sbp->f_bavail = mstat->f_bavail;
540
sbp->f_files += mstat->f_files;
541
sbp->f_ffree = mstat->f_ffree;
542
543
free(mstat, M_STATFS);
544
return (0);
545
}
546
547
static int
548
unionfs_sync(struct mount *mp, int waitfor)
549
{
550
/* nothing to do */
551
return (0);
552
}
553
554
static int
555
unionfs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
556
{
557
return (EOPNOTSUPP);
558
}
559
560
static int
561
unionfs_fhtovp(struct mount *mp, struct fid *fidp, int flags,
562
struct vnode **vpp)
563
{
564
return (EOPNOTSUPP);
565
}
566
567
static int
568
unionfs_checkexp(struct mount *mp, struct sockaddr *nam, uint64_t *extflagsp,
569
struct ucred **credanonp, int *numsecflavors, int *secflavors)
570
{
571
return (EOPNOTSUPP);
572
}
573
574
static int
575
unionfs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
576
int namespace, const char *attrname)
577
{
578
struct unionfs_mount *ump;
579
struct unionfs_node *unp;
580
581
ump = MOUNTTOUNIONFSMOUNT(mp);
582
unp = VTOUNIONFS(filename_vp);
583
584
if (unp->un_uppervp != NULL) {
585
return (VFS_EXTATTRCTL(ump->um_uppermp, cmd,
586
unp->un_uppervp, namespace, attrname));
587
} else {
588
return (VFS_EXTATTRCTL(ump->um_lowermp, cmd,
589
unp->un_lowervp, namespace, attrname));
590
}
591
}
592
593
static struct vfsops unionfs_vfsops = {
594
.vfs_checkexp = unionfs_checkexp,
595
.vfs_extattrctl = unionfs_extattrctl,
596
.vfs_fhtovp = unionfs_fhtovp,
597
.vfs_init = unionfs_init,
598
.vfs_mount = unionfs_domount,
599
.vfs_quotactl = unionfs_quotactl,
600
.vfs_root = unionfs_root,
601
.vfs_statfs = unionfs_statfs,
602
.vfs_sync = unionfs_sync,
603
.vfs_uninit = unionfs_uninit,
604
.vfs_unmount = unionfs_unmount,
605
.vfs_vget = unionfs_vget,
606
};
607
608
VFS_SET(unionfs_vfsops, unionfs, VFCF_LOOPBACK);
609
610