Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/ufs/ffs/ffs_snapshot.c
39478 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright 2000 Marshall Kirk McKusick. All Rights Reserved.
5
*
6
* Further information about snapshots can be obtained from:
7
*
8
* Marshall Kirk McKusick http://www.mckusick.com/softdep/
9
* 1614 Oxford Street [email protected]
10
* Berkeley, CA 94709-1608 +1-510-843-9542
11
* USA
12
*
13
* Redistribution and use in source and binary forms, with or without
14
* modification, are permitted provided that the following conditions
15
* are met:
16
*
17
* 1. Redistributions of source code must retain the above copyright
18
* notice, this list of conditions and the following disclaimer.
19
* 2. Redistributions in binary form must reproduce the above copyright
20
* notice, this list of conditions and the following disclaimer in the
21
* documentation and/or other materials provided with the distribution.
22
*
23
* THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY
24
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26
* DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR
27
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33
* SUCH DAMAGE.
34
*/
35
36
#include <sys/cdefs.h>
37
#include "opt_quota.h"
38
39
#include <sys/param.h>
40
#include <sys/kernel.h>
41
#include <sys/systm.h>
42
#include <sys/conf.h>
43
#include <sys/gsb_crc32.h>
44
#include <sys/bio.h>
45
#include <sys/buf.h>
46
#include <sys/fcntl.h>
47
#include <sys/proc.h>
48
#include <sys/namei.h>
49
#include <sys/sched.h>
50
#include <sys/stat.h>
51
#include <sys/malloc.h>
52
#include <sys/mount.h>
53
#include <sys/resource.h>
54
#include <sys/resourcevar.h>
55
#include <sys/rwlock.h>
56
#include <sys/vnode.h>
57
58
#include <vm/vm.h>
59
#include <vm/vm_extern.h>
60
61
#include <geom/geom.h>
62
#include <geom/geom_vfs.h>
63
64
#include <ufs/ufs/extattr.h>
65
#include <ufs/ufs/quota.h>
66
#include <ufs/ufs/ufsmount.h>
67
#include <ufs/ufs/inode.h>
68
#include <ufs/ufs/ufs_extern.h>
69
70
#include <ufs/ffs/fs.h>
71
#include <ufs/ffs/ffs_extern.h>
72
73
#define KERNCRED thread0.td_ucred
74
75
#include "opt_ffs.h"
76
77
#ifdef NO_FFS_SNAPSHOT
78
int
79
ffs_snapshot(struct mount *mp, char *snapfile)
80
{
81
return (EINVAL);
82
}
83
84
int
85
ffs_snapblkfree(struct fs *fs,
86
struct vnode *devvp,
87
ufs2_daddr_t bno,
88
long size,
89
ino_t inum,
90
__enum_uint8(vtype) vtype,
91
struct workhead *wkhd)
92
{
93
return (EINVAL);
94
}
95
96
void
97
ffs_snapremove(struct vnode *vp)
98
{
99
}
100
101
void
102
ffs_snapshot_mount(struct mount *mp)
103
{
104
}
105
106
void
107
ffs_snapshot_unmount(struct mount *mp)
108
{
109
}
110
111
void
112
ffs_snapgone(struct inode *ip)
113
{
114
}
115
116
int
117
ffs_copyonwrite(struct vnode *devvp, struct buf *bp)
118
{
119
return (EINVAL);
120
}
121
122
void
123
ffs_sync_snap(struct mount *mp, int waitfor)
124
{
125
}
126
127
#else
128
FEATURE(ffs_snapshot, "FFS snapshot support");
129
130
LIST_HEAD(, snapdata) snapfree;
131
static struct mtx snapfree_lock;
132
MTX_SYSINIT(ffs_snapfree, &snapfree_lock, "snapdata free list", MTX_DEF);
133
134
static int cgaccount(int, struct vnode *, struct buf *, int);
135
static int expunge_ufs1(struct vnode *, struct inode *, struct fs *,
136
int (*)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *,
137
ufs_lbn_t, int), int, int);
138
static int indiracct_ufs1(struct vnode *, struct vnode *, int,
139
ufs1_daddr_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, struct fs *,
140
int (*)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *,
141
ufs_lbn_t, int), int);
142
static int fullacct_ufs1(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *,
143
struct fs *, ufs_lbn_t, int);
144
static int snapacct_ufs1(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *,
145
struct fs *, ufs_lbn_t, int);
146
static int mapacct_ufs1(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *,
147
struct fs *, ufs_lbn_t, int);
148
static int expunge_ufs2(struct vnode *, struct inode *, struct fs *,
149
int (*)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *,
150
ufs_lbn_t, int), int, int);
151
static int indiracct_ufs2(struct vnode *, struct vnode *, int,
152
ufs2_daddr_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, struct fs *,
153
int (*)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *,
154
ufs_lbn_t, int), int);
155
static int fullacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *,
156
struct fs *, ufs_lbn_t, int);
157
static int snapacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *,
158
struct fs *, ufs_lbn_t, int);
159
static int mapacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *,
160
struct fs *, ufs_lbn_t, int);
161
static int readblock(struct vnode *vp, struct buf *, ufs2_daddr_t);
162
static void try_free_snapdata(struct vnode *devvp);
163
static void revert_snaplock(struct vnode *, struct vnode *, struct snapdata *);
164
static struct snapdata *ffs_snapdata_acquire(struct vnode *devvp);
165
static int ffs_bp_snapblk(struct vnode *, struct buf *);
166
167
/*
168
* To ensure the consistency of snapshots across crashes, we must
169
* synchronously write out copied blocks before allowing the
170
* originals to be modified. Because of the rather severe speed
171
* penalty that this imposes, the code normally only ensures
172
* persistence for the filesystem metadata contained within a
173
* snapshot. Setting the following flag allows this crash
174
* persistence to be enabled for file contents.
175
*/
176
int dopersistence = 0;
177
178
#ifdef DIAGNOSTIC
179
#include <sys/sysctl.h>
180
SYSCTL_INT(_debug, OID_AUTO, dopersistence, CTLFLAG_RW, &dopersistence, 0, "");
181
static int snapdebug = 0;
182
SYSCTL_INT(_debug, OID_AUTO, snapdebug, CTLFLAG_RW, &snapdebug, 0, "");
183
int collectsnapstats = 0;
184
SYSCTL_INT(_debug, OID_AUTO, collectsnapstats, CTLFLAG_RW, &collectsnapstats,
185
0, "");
186
#endif /* DIAGNOSTIC */
187
188
/*
189
* Create a snapshot file and initialize it for the filesystem.
190
*/
191
int
192
ffs_snapshot(struct mount *mp, char *snapfile)
193
{
194
ufs2_daddr_t numblks, blkno, *blkp, *snapblklist;
195
int error, cg, snaploc;
196
int i, size, len, loc;
197
ufs2_daddr_t blockno;
198
uint64_t flag;
199
char saved_nice = 0;
200
#ifdef DIAGNOSTIC
201
long redo = 0;
202
#endif
203
long snaplistsize = 0;
204
int32_t *lp;
205
void *space;
206
struct fs *copy_fs = NULL, *fs, *bpfs;
207
struct thread *td = curthread;
208
struct inode *ip, *xp;
209
struct buf *bp, *nbp, *ibp;
210
struct nameidata nd;
211
struct mount *wrtmp;
212
struct vattr vat;
213
struct vnode *vp, *xvp, *mvp, *devvp;
214
struct uio auio;
215
struct iovec aiov;
216
struct snapdata *sn;
217
struct ufsmount *ump;
218
#ifdef DIAGNOSTIC
219
struct timespec starttime = {0, 0}, endtime;
220
#endif
221
222
ump = VFSTOUFS(mp);
223
fs = ump->um_fs;
224
sn = NULL;
225
/*
226
* At the moment, filesystems using gjournal cannot support
227
* taking snapshots.
228
*/
229
if ((mp->mnt_flag & MNT_GJOURNAL) != 0) {
230
vfs_mount_error(mp, "%s: Snapshots are not yet supported when "
231
"using gjournal", fs->fs_fsmnt);
232
return (EOPNOTSUPP);
233
}
234
MNT_ILOCK(mp);
235
flag = mp->mnt_flag;
236
MNT_IUNLOCK(mp);
237
/*
238
* Need to serialize access to snapshot code per filesystem.
239
*/
240
/*
241
* Assign a snapshot slot in the superblock.
242
*/
243
UFS_LOCK(ump);
244
for (snaploc = 0; snaploc < FSMAXSNAP; snaploc++)
245
if (fs->fs_snapinum[snaploc] == 0)
246
break;
247
UFS_UNLOCK(ump);
248
if (snaploc == FSMAXSNAP)
249
return (ENOSPC);
250
/*
251
* Create the snapshot file.
252
*/
253
restart:
254
NDINIT(&nd, CREATE, LOCKPARENT | LOCKLEAF | NOCACHE, UIO_SYSSPACE,
255
snapfile);
256
if ((error = namei(&nd)) != 0)
257
return (error);
258
if (nd.ni_vp != NULL) {
259
vput(nd.ni_vp);
260
error = EEXIST;
261
}
262
if (nd.ni_dvp->v_mount != mp)
263
error = EXDEV;
264
if (error) {
265
NDFREE_PNBUF(&nd);
266
if (nd.ni_dvp == nd.ni_vp)
267
vrele(nd.ni_dvp);
268
else
269
vput(nd.ni_dvp);
270
return (error);
271
}
272
VATTR_NULL(&vat);
273
vat.va_type = VREG;
274
vat.va_mode = S_IRUSR;
275
vat.va_vaflags |= VA_EXCLUSIVE;
276
if (VOP_GETWRITEMOUNT(nd.ni_dvp, &wrtmp))
277
wrtmp = NULL;
278
if (wrtmp != mp)
279
panic("ffs_snapshot: mount mismatch");
280
vfs_rel(wrtmp);
281
if (vn_start_write(NULL, &wrtmp, V_NOWAIT) != 0) {
282
NDFREE_PNBUF(&nd);
283
vput(nd.ni_dvp);
284
if ((error = vn_start_write(NULL, &wrtmp,
285
V_XSLEEP | PCATCH)) != 0)
286
return (error);
287
goto restart;
288
}
289
error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vat);
290
if (error) {
291
VOP_VPUT_PAIR(nd.ni_dvp, NULL, true);
292
NDFREE_PNBUF(&nd);
293
vn_finished_write(wrtmp);
294
if (error == ERELOOKUP)
295
goto restart;
296
return (error);
297
}
298
vp = nd.ni_vp;
299
vref(nd.ni_dvp);
300
VOP_VPUT_PAIR(nd.ni_dvp, &vp, false);
301
if (VN_IS_DOOMED(vp)) {
302
error = EBADF;
303
goto out;
304
}
305
vnode_create_vobject(nd.ni_vp, fs->fs_size, td);
306
vp->v_vflag |= VV_SYSTEM;
307
ip = VTOI(vp);
308
devvp = ITODEVVP(ip);
309
/*
310
* Calculate the size of the filesystem then allocate the block
311
* immediately following the last block of the filesystem that
312
* will contain the snapshot list. This operation allows us to
313
* set the size of the snapshot.
314
*/
315
numblks = howmany(fs->fs_size, fs->fs_frag);
316
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)numblks),
317
fs->fs_bsize, KERNCRED, BA_CLRBUF, &bp);
318
if (error)
319
goto out;
320
bawrite(bp);
321
ip->i_size = lblktosize(fs, (off_t)(numblks + 1));
322
vnode_pager_setsize(vp, ip->i_size);
323
DIP_SET(ip, i_size, ip->i_size);
324
UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_UPDATE);
325
/*
326
* Preallocate critical data structures so that we can copy
327
* them in without further allocation after we suspend all
328
* operations on the filesystem. We would like to just release
329
* the allocated buffers without writing them since they will
330
* be filled in below once we are ready to go, but this upsets
331
* the soft update code, so we go ahead and write the new buffers.
332
*
333
* Allocate all indirect blocks and mark all of them as not
334
* needing to be copied.
335
*/
336
for (blkno = UFS_NDADDR; blkno < numblks; blkno += NINDIR(fs)) {
337
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)blkno),
338
fs->fs_bsize, td->td_ucred, BA_METAONLY, &ibp);
339
if (error)
340
goto out;
341
bawrite(ibp);
342
}
343
/*
344
* Allocate copies for the superblock and its summary information.
345
*/
346
error = UFS_BALLOC(vp, fs->fs_sblockloc, fs->fs_sbsize, KERNCRED,
347
0, &nbp);
348
if (error)
349
goto out;
350
bawrite(nbp);
351
blkno = fragstoblks(fs, fs->fs_csaddr);
352
len = howmany(fs->fs_cssize, fs->fs_bsize);
353
for (loc = 0; loc < len; loc++) {
354
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(blkno + loc)),
355
fs->fs_bsize, KERNCRED, 0, &nbp);
356
if (error)
357
goto out;
358
bawrite(nbp);
359
}
360
/*
361
* Allocate all cylinder group blocks.
362
*/
363
for (cg = 0; cg < fs->fs_ncg; cg++) {
364
error = UFS_BALLOC(vp, lfragtosize(fs, cgtod(fs, cg)),
365
fs->fs_bsize, KERNCRED, 0, &nbp);
366
if (error)
367
goto out;
368
bawrite(nbp);
369
if (cg % 10 == 0) {
370
error = ffs_syncvnode(vp, MNT_WAIT, 0);
371
/* vp possibly reclaimed if unlocked */
372
if (error != 0)
373
goto out;
374
}
375
}
376
/*
377
* Change inode to snapshot type file. Before setting its block
378
* pointers to BLK_SNAP and BLK_NOCOPY in cgaccount, we have to
379
* set its type to SF_SNAPSHOT so that VOP_REMOVE will know that
380
* they need to be rolled back before attempting deletion.
381
*/
382
ip->i_flags |= SF_SNAPSHOT;
383
DIP_SET(ip, i_flags, ip->i_flags);
384
UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
385
/*
386
* Copy all the cylinder group maps. Although the
387
* filesystem is still active, we hope that only a few
388
* cylinder groups will change between now and when we
389
* suspend operations. Thus, we will be able to quickly
390
* touch up the few cylinder groups that changed during
391
* the suspension period.
392
*/
393
len = roundup2(howmany(fs->fs_ncg, NBBY), sizeof(uint64_t));
394
space = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
395
UFS_LOCK(ump);
396
fs->fs_active = space;
397
UFS_UNLOCK(ump);
398
for (cg = 0; cg < fs->fs_ncg; cg++) {
399
error = UFS_BALLOC(vp, lfragtosize(fs, cgtod(fs, cg)),
400
fs->fs_bsize, KERNCRED, 0, &nbp);
401
if (error)
402
goto out;
403
error = cgaccount(cg, vp, nbp, 1);
404
bawrite(nbp);
405
if (cg % 10 == 0 && error == 0)
406
error = ffs_syncvnode(vp, MNT_WAIT, 0);
407
if (error)
408
goto out;
409
}
410
/*
411
* Ensure that the snapshot is completely on disk.
412
* Since we have marked it as a snapshot it is safe to
413
* unlock it as no process will be allowed to write to it.
414
*/
415
if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0)
416
goto out;
417
VOP_UNLOCK(vp);
418
/*
419
* All allocations are done, so we can now snapshot the system.
420
*
421
* Recind nice scheduling while running with the filesystem suspended.
422
*/
423
if (td->td_proc->p_nice > 0) {
424
struct proc *p;
425
426
p = td->td_proc;
427
PROC_LOCK(p);
428
saved_nice = p->p_nice;
429
sched_nice(p, 0);
430
PROC_UNLOCK(p);
431
}
432
/*
433
* Suspend operation on filesystem.
434
*/
435
for (;;) {
436
vn_finished_write(wrtmp);
437
if ((error = vfs_write_suspend(vp->v_mount, 0)) != 0) {
438
vn_start_write(NULL, &wrtmp, V_WAIT);
439
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
440
goto out;
441
}
442
if (mp->mnt_kern_flag & MNTK_SUSPENDED)
443
break;
444
vn_start_write(NULL, &wrtmp, V_WAIT);
445
}
446
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
447
if (ip->i_effnlink == 0) {
448
error = ENOENT; /* Snapshot file unlinked */
449
goto resumefs;
450
}
451
#ifdef DIAGNOSTIC
452
if (collectsnapstats)
453
nanotime(&starttime);
454
#endif
455
456
/*
457
* First, copy all the cylinder group maps that have changed.
458
*/
459
for (cg = 0; cg < fs->fs_ncg; cg++) {
460
if ((ACTIVECGNUM(fs, cg) & ACTIVECGOFF(cg)) != 0)
461
continue;
462
#ifdef DIAGNOSTIC
463
redo++;
464
#endif
465
error = UFS_BALLOC(vp, lfragtosize(fs, cgtod(fs, cg)),
466
fs->fs_bsize, KERNCRED, 0, &nbp);
467
if (error)
468
goto resumefs;
469
error = cgaccount(cg, vp, nbp, 2);
470
bawrite(nbp);
471
if (error)
472
goto resumefs;
473
}
474
/*
475
* Grab a copy of the superblock and its summary information.
476
* We delay writing it until the suspension is released below.
477
*/
478
copy_fs = malloc((uint64_t)fs->fs_bsize, M_UFSMNT, M_WAITOK);
479
bcopy(fs, copy_fs, fs->fs_sbsize);
480
copy_fs->fs_si = malloc(sizeof(struct fs_summary_info), M_UFSMNT,
481
M_ZERO | M_WAITOK);
482
if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
483
copy_fs->fs_clean = 1;
484
size = fs->fs_bsize < SBLOCKSIZE ? fs->fs_bsize : SBLOCKSIZE;
485
if (fs->fs_sbsize < size)
486
bzero(&((char *)copy_fs)[fs->fs_sbsize],
487
size - fs->fs_sbsize);
488
size = blkroundup(fs, fs->fs_cssize);
489
if (fs->fs_contigsumsize > 0)
490
size += fs->fs_ncg * sizeof(int32_t);
491
space = malloc((uint64_t)size, M_UFSMNT, M_WAITOK);
492
copy_fs->fs_csp = space;
493
bcopy(fs->fs_csp, copy_fs->fs_csp, fs->fs_cssize);
494
space = (char *)space + fs->fs_cssize;
495
loc = howmany(fs->fs_cssize, fs->fs_fsize);
496
i = fs->fs_frag - loc % fs->fs_frag;
497
len = (i == fs->fs_frag) ? 0 : i * fs->fs_fsize;
498
if (len > 0) {
499
if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + loc),
500
len, KERNCRED, &bp)) != 0) {
501
brelse(bp);
502
goto resumefs;
503
}
504
bcopy(bp->b_data, space, (uint64_t)len);
505
space = (char *)space + len;
506
bp->b_flags |= B_INVAL | B_NOCACHE;
507
brelse(bp);
508
}
509
if (fs->fs_contigsumsize > 0) {
510
copy_fs->fs_maxcluster = lp = space;
511
for (i = 0; i < fs->fs_ncg; i++)
512
*lp++ = fs->fs_contigsumsize;
513
}
514
/*
515
* We must check for active files that have been unlinked
516
* (e.g., with a zero link count). We have to expunge all
517
* trace of these files from the snapshot so that they are
518
* not reclaimed prematurely by fsck or unnecessarily dumped.
519
* We turn off the MNTK_SUSPENDED flag to avoid a panic from
520
* spec_strategy about writing on a suspended filesystem.
521
* Note that we skip unlinked snapshot files as they will
522
* be handled separately below.
523
*
524
* We also calculate the size needed for the snapshot list.
525
* Initial number of entries is composed of:
526
* - one for each cylinder group map
527
* - one for each block used by superblock summary table
528
* - one for each snapshot inode block
529
* - one for the superblock
530
* - one for the snapshot list
531
* The direct block entries in the snapshot are always
532
* copied (see reason below). Note that the superblock and
533
* the first cylinder group will almost always be allocated
534
* in the direct blocks, but we add the slop for them in case
535
* they do not end up there. The snapshot list size may get
536
* expanded by one because of an update of an inode block for
537
* an unlinked but still open file when it is expunged.
538
*
539
* Because the direct block pointers are always copied, they
540
* are not added to the list. Instead ffs_copyonwrite()
541
* explicitly checks for them before checking the snapshot list.
542
*/
543
snaplistsize = fs->fs_ncg + howmany(fs->fs_cssize, fs->fs_bsize) +
544
FSMAXSNAP + /* superblock */ 1 + /* snaplist */ 1;
545
MNT_ILOCK(mp);
546
mp->mnt_kern_flag &= ~MNTK_SUSPENDED;
547
MNT_IUNLOCK(mp);
548
loop:
549
MNT_VNODE_FOREACH_ALL(xvp, mp, mvp) {
550
if ((xvp->v_usecount == 0 &&
551
(xvp->v_iflag & (VI_OWEINACT | VI_DOINGINACT)) == 0) ||
552
xvp->v_type == VNON ||
553
IS_SNAPSHOT(VTOI(xvp))) {
554
VI_UNLOCK(xvp);
555
continue;
556
}
557
/*
558
* We can skip parent directory vnode because it must have
559
* this snapshot file in it.
560
*/
561
if (xvp == nd.ni_dvp) {
562
VI_UNLOCK(xvp);
563
continue;
564
}
565
vholdl(xvp);
566
if (vn_lock(xvp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) {
567
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
568
vdrop(xvp);
569
goto loop;
570
}
571
VI_LOCK(xvp);
572
if (xvp->v_usecount == 0 &&
573
(xvp->v_iflag & (VI_OWEINACT | VI_DOINGINACT)) == 0) {
574
VI_UNLOCK(xvp);
575
VOP_UNLOCK(xvp);
576
vdrop(xvp);
577
continue;
578
}
579
VI_UNLOCK(xvp);
580
#ifdef DIAGNOSTIC
581
if (snapdebug)
582
vn_printf(xvp, "ffs_snapshot: busy vnode ");
583
#endif
584
if (VOP_GETATTR(xvp, &vat, td->td_ucred) == 0 &&
585
vat.va_nlink > 0) {
586
VOP_UNLOCK(xvp);
587
vdrop(xvp);
588
continue;
589
}
590
xp = VTOI(xvp);
591
if (ffs_checkfreefile(copy_fs, vp, xp->i_number)) {
592
VOP_UNLOCK(xvp);
593
vdrop(xvp);
594
continue;
595
}
596
/*
597
* If there is a fragment, clear it here.
598
*/
599
blkno = 0;
600
loc = howmany(xp->i_size, fs->fs_bsize) - 1;
601
if (loc < UFS_NDADDR) {
602
len = fragroundup(fs, blkoff(fs, xp->i_size));
603
if (len != 0 && len < fs->fs_bsize) {
604
ffs_blkfree(ump, copy_fs, vp,
605
DIP(xp, i_db[loc]), len, xp->i_number,
606
xvp->v_type, NULL, SINGLETON_KEY);
607
blkno = DIP(xp, i_db[loc]);
608
DIP_SET(xp, i_db[loc], 0);
609
}
610
}
611
snaplistsize += 1;
612
if (I_IS_UFS1(xp))
613
error = expunge_ufs1(vp, xp, copy_fs, fullacct_ufs1,
614
BLK_NOCOPY, 1);
615
else
616
error = expunge_ufs2(vp, xp, copy_fs, fullacct_ufs2,
617
BLK_NOCOPY, 1);
618
if (blkno)
619
DIP_SET(xp, i_db[loc], blkno);
620
if (!error)
621
error = ffs_freefile(ump, copy_fs, vp, xp->i_number,
622
xp->i_mode, NULL);
623
VOP_UNLOCK(xvp);
624
vdrop(xvp);
625
if (error) {
626
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
627
goto resumefs;
628
}
629
}
630
/*
631
* Erase the journal file from the snapshot.
632
*/
633
if (fs->fs_flags & FS_SUJ) {
634
error = softdep_journal_lookup(mp, &xvp);
635
if (error)
636
goto resumefs;
637
xp = VTOI(xvp);
638
if (I_IS_UFS1(xp))
639
error = expunge_ufs1(vp, xp, copy_fs, fullacct_ufs1,
640
BLK_NOCOPY, 0);
641
else
642
error = expunge_ufs2(vp, xp, copy_fs, fullacct_ufs2,
643
BLK_NOCOPY, 0);
644
vput(xvp);
645
}
646
/*
647
* Preallocate all the direct blocks in the snapshot inode so
648
* that we never have to write the inode itself to commit an
649
* update to the contents of the snapshot. Note that once
650
* created, the size of the snapshot will never change, so
651
* there will never be a need to write the inode except to
652
* update the non-integrity-critical time fields and
653
* allocated-block count.
654
*/
655
for (blockno = 0; blockno < UFS_NDADDR; blockno++) {
656
if (DIP(ip, i_db[blockno]) != 0)
657
continue;
658
error = UFS_BALLOC(vp, lblktosize(fs, blockno),
659
fs->fs_bsize, KERNCRED, BA_CLRBUF, &bp);
660
if (error)
661
goto resumefs;
662
error = readblock(vp, bp, blockno);
663
bawrite(bp);
664
if (error != 0)
665
goto resumefs;
666
}
667
/*
668
* Acquire a lock on the snapdata structure, creating it if necessary.
669
*/
670
sn = ffs_snapdata_acquire(devvp);
671
/*
672
* Change vnode to use shared snapshot lock instead of the original
673
* private lock.
674
*/
675
vp->v_vnlock = &sn->sn_lock;
676
lockmgr(&vp->v_lock, LK_RELEASE, NULL);
677
xp = TAILQ_FIRST(&sn->sn_head);
678
/*
679
* If this is the first snapshot on this filesystem, then we need
680
* to allocate the space for the list of preallocated snapshot blocks.
681
* This list will be refined below, but this preliminary one will
682
* keep us out of deadlock until the full one is ready.
683
*/
684
if (xp == NULL) {
685
snapblklist = malloc(snaplistsize * sizeof(daddr_t),
686
M_UFSMNT, M_WAITOK);
687
blkp = &snapblklist[1];
688
*blkp++ = lblkno(fs, fs->fs_sblockloc);
689
blkno = fragstoblks(fs, fs->fs_csaddr);
690
for (cg = 0; cg < fs->fs_ncg; cg++) {
691
if (fragstoblks(fs, cgtod(fs, cg)) > blkno)
692
break;
693
*blkp++ = fragstoblks(fs, cgtod(fs, cg));
694
}
695
len = howmany(fs->fs_cssize, fs->fs_bsize);
696
for (loc = 0; loc < len; loc++)
697
*blkp++ = blkno + loc;
698
for (; cg < fs->fs_ncg; cg++)
699
*blkp++ = fragstoblks(fs, cgtod(fs, cg));
700
snapblklist[0] = blkp - snapblklist;
701
VI_LOCK(devvp);
702
if (sn->sn_blklist != NULL)
703
panic("ffs_snapshot: non-empty list");
704
sn->sn_blklist = snapblklist;
705
sn->sn_listsize = blkp - snapblklist;
706
VI_UNLOCK(devvp);
707
}
708
/*
709
* Record snapshot inode. Since this is the newest snapshot,
710
* it must be placed at the end of the list.
711
*/
712
VI_LOCK(devvp);
713
fs->fs_snapinum[snaploc] = ip->i_number;
714
if (ip->i_nextsnap.tqe_prev != 0)
715
panic("ffs_snapshot: %ju already on list",
716
(uintmax_t)ip->i_number);
717
TAILQ_INSERT_TAIL(&sn->sn_head, ip, i_nextsnap);
718
devvp->v_vflag |= VV_COPYONWRITE;
719
VI_UNLOCK(devvp);
720
resumefs:
721
ASSERT_VOP_LOCKED(vp, "ffs_snapshot vp");
722
if (error != 0 && copy_fs != NULL) {
723
free(copy_fs->fs_csp, M_UFSMNT);
724
free(copy_fs->fs_si, M_UFSMNT);
725
free(copy_fs, M_UFSMNT);
726
copy_fs = NULL;
727
}
728
KASSERT(error != 0 || (sn != NULL && copy_fs != NULL),
729
("missing snapshot setup parameters"));
730
/*
731
* Resume operation on filesystem.
732
*/
733
vfs_write_resume(vp->v_mount, VR_START_WRITE | VR_NO_SUSPCLR);
734
#ifdef DIAGNOSTIC
735
if (collectsnapstats && starttime.tv_sec > 0) {
736
nanotime(&endtime);
737
timespecsub(&endtime, &starttime, &endtime);
738
printf("%s: suspended %ld.%03ld sec, redo %ld of %d\n",
739
vp->v_mount->mnt_stat.f_mntonname, (long)endtime.tv_sec,
740
endtime.tv_nsec / 1000000, redo, fs->fs_ncg);
741
}
742
#endif
743
if (copy_fs == NULL)
744
goto out;
745
/*
746
* Copy allocation information from all the snapshots in
747
* this snapshot and then expunge them from its view.
748
*/
749
TAILQ_FOREACH(xp, &sn->sn_head, i_nextsnap) {
750
if (xp == ip)
751
break;
752
if (I_IS_UFS1(xp))
753
error = expunge_ufs1(vp, xp, fs, snapacct_ufs1,
754
BLK_SNAP, 0);
755
else
756
error = expunge_ufs2(vp, xp, fs, snapacct_ufs2,
757
BLK_SNAP, 0);
758
if (error == 0 && xp->i_effnlink == 0) {
759
error = ffs_freefile(ump,
760
copy_fs,
761
vp,
762
xp->i_number,
763
xp->i_mode, NULL);
764
}
765
if (error) {
766
fs->fs_snapinum[snaploc] = 0;
767
goto done;
768
}
769
}
770
/*
771
* Allocate space for the full list of preallocated snapshot blocks.
772
*/
773
snapblklist = malloc(snaplistsize * sizeof(daddr_t),
774
M_UFSMNT, M_WAITOK);
775
ip->i_snapblklist = &snapblklist[1];
776
/*
777
* Expunge the blocks used by the snapshots from the set of
778
* blocks marked as used in the snapshot bitmaps. Also, collect
779
* the list of allocated blocks in i_snapblklist.
780
*/
781
if (I_IS_UFS1(ip))
782
error = expunge_ufs1(vp, ip, copy_fs, mapacct_ufs1,
783
BLK_SNAP, 0);
784
else
785
error = expunge_ufs2(vp, ip, copy_fs, mapacct_ufs2,
786
BLK_SNAP, 0);
787
if (error) {
788
fs->fs_snapinum[snaploc] = 0;
789
free(snapblklist, M_UFSMNT);
790
goto done;
791
}
792
if (snaplistsize < ip->i_snapblklist - snapblklist)
793
panic("ffs_snapshot: list too small");
794
snaplistsize = ip->i_snapblklist - snapblklist;
795
snapblklist[0] = snaplistsize;
796
ip->i_snapblklist = 0;
797
/*
798
* Write out the list of allocated blocks to the end of the snapshot.
799
*/
800
auio.uio_iov = &aiov;
801
auio.uio_iovcnt = 1;
802
aiov.iov_base = (void *)snapblklist;
803
aiov.iov_len = snaplistsize * sizeof(daddr_t);
804
auio.uio_resid = aiov.iov_len;
805
auio.uio_offset = lblktosize(fs, (off_t)numblks);
806
auio.uio_segflg = UIO_SYSSPACE;
807
auio.uio_rw = UIO_WRITE;
808
auio.uio_td = td;
809
if ((error = VOP_WRITE(vp, &auio, IO_UNIT, td->td_ucred)) != 0) {
810
fs->fs_snapinum[snaploc] = 0;
811
free(snapblklist, M_UFSMNT);
812
goto done;
813
}
814
/*
815
* Write the superblock and its summary information
816
* to the snapshot.
817
*/
818
blkno = fragstoblks(fs, fs->fs_csaddr);
819
len = howmany(fs->fs_cssize, fs->fs_bsize);
820
space = copy_fs->fs_csp;
821
for (loc = 0; loc < len; loc++) {
822
error = bread(vp, blkno + loc, fs->fs_bsize, KERNCRED, &nbp);
823
if (error) {
824
fs->fs_snapinum[snaploc] = 0;
825
free(snapblklist, M_UFSMNT);
826
goto done;
827
}
828
bcopy(space, nbp->b_data, fs->fs_bsize);
829
space = (char *)space + fs->fs_bsize;
830
bawrite(nbp);
831
}
832
error = bread(vp, lblkno(fs, fs->fs_sblockloc), fs->fs_bsize,
833
KERNCRED, &nbp);
834
if (error) {
835
brelse(nbp);
836
} else {
837
loc = blkoff(fs, fs->fs_sblockloc);
838
copy_fs->fs_fmod = 0;
839
bpfs = (struct fs *)&nbp->b_data[loc];
840
bcopy((caddr_t)copy_fs, (caddr_t)bpfs, (uint64_t)fs->fs_sbsize);
841
ffs_oldfscompat_write(bpfs);
842
bpfs->fs_ckhash = ffs_calc_sbhash(bpfs);
843
bawrite(nbp);
844
}
845
/*
846
* As this is the newest list, it is the most inclusive, so
847
* should replace the previous list.
848
*/
849
VI_LOCK(devvp);
850
space = sn->sn_blklist;
851
sn->sn_blklist = snapblklist;
852
sn->sn_listsize = snaplistsize;
853
VI_UNLOCK(devvp);
854
if (space != NULL)
855
free(space, M_UFSMNT);
856
done:
857
free(copy_fs->fs_csp, M_UFSMNT);
858
free(copy_fs->fs_si, M_UFSMNT);
859
free(copy_fs, M_UFSMNT);
860
copy_fs = NULL;
861
out:
862
if (saved_nice > 0) {
863
struct proc *p;
864
865
p = td->td_proc;
866
PROC_LOCK(p);
867
sched_nice(td->td_proc, saved_nice);
868
PROC_UNLOCK(td->td_proc);
869
}
870
UFS_LOCK(ump);
871
if (fs->fs_active != 0) {
872
free(fs->fs_active, M_DEVBUF);
873
fs->fs_active = 0;
874
}
875
UFS_UNLOCK(ump);
876
MNT_ILOCK(mp);
877
mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA);
878
MNT_IUNLOCK(mp);
879
NDFREE_PNBUF(&nd);
880
vrele(nd.ni_dvp);
881
if (error == 0) {
882
(void) ffs_syncvnode(vp, MNT_WAIT, 0);
883
VOP_UNLOCK(vp);
884
} else if (VN_IS_DOOMED(vp)) {
885
vput(vp);
886
} else {
887
int rmerr;
888
889
/* Remove snapshot as its creation has failed. */
890
vput(vp);
891
NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF, UIO_SYSSPACE,
892
snapfile);
893
if ((rmerr = namei(&nd)) != 0 ||
894
(rmerr = VOP_REMOVE(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd)) != 0)
895
printf("Delete of %s failed with error %d\n",
896
nd.ni_dirp, rmerr);
897
NDFREE_PNBUF(&nd);
898
if (nd.ni_dvp != NULL)
899
vput(nd.ni_dvp);
900
if (nd.ni_vp != NULL)
901
vput(nd.ni_vp);
902
}
903
vn_finished_write(wrtmp);
904
process_deferred_inactive(mp);
905
return (error);
906
}
907
908
/*
909
* Copy a cylinder group map. All the unallocated blocks are marked
910
* BLK_NOCOPY so that the snapshot knows that it need not copy them
911
* if they are later written. If passno is one, then this is a first
912
* pass, so only setting needs to be done. If passno is 2, then this
913
* is a revision to a previous pass which must be undone as the
914
* replacement pass is done.
915
*/
916
static int
917
cgaccount(int cg,
918
struct vnode *vp,
919
struct buf *nbp,
920
int passno)
921
{
922
struct buf *bp, *ibp;
923
struct inode *ip;
924
struct cg *cgp;
925
struct fs *fs;
926
ufs2_daddr_t base, numblks;
927
int error, len, loc, indiroff;
928
929
ip = VTOI(vp);
930
fs = ITOFS(ip);
931
if ((error = ffs_getcg(fs, ITODEVVP(ip), cg, 0, &bp, &cgp)) != 0)
932
return (error);
933
UFS_LOCK(ITOUMP(ip));
934
ACTIVESET(fs, cg);
935
/*
936
* Recomputation of summary information might not have been performed
937
* at mount time. Sync up summary information for current cylinder
938
* group while data is in memory to ensure that result of background
939
* fsck is slightly more consistent.
940
*/
941
fs->fs_cs(fs, cg) = cgp->cg_cs;
942
UFS_UNLOCK(ITOUMP(ip));
943
bcopy(bp->b_data, nbp->b_data, fs->fs_cgsize);
944
if (fs->fs_cgsize < fs->fs_bsize)
945
bzero(&nbp->b_data[fs->fs_cgsize],
946
fs->fs_bsize - fs->fs_cgsize);
947
cgp = (struct cg *)nbp->b_data;
948
bqrelse(bp);
949
if (passno == 2)
950
nbp->b_flags |= B_VALIDSUSPWRT;
951
numblks = howmany(fs->fs_size, fs->fs_frag);
952
len = howmany(fs->fs_fpg, fs->fs_frag);
953
base = cgbase(fs, cg) / fs->fs_frag;
954
if (base + len >= numblks)
955
len = numblks - base - 1;
956
loc = 0;
957
if (base < UFS_NDADDR) {
958
for ( ; loc < UFS_NDADDR; loc++) {
959
if (ffs_isblock(fs, cg_blksfree(cgp), loc))
960
DIP_SET(ip, i_db[loc], BLK_NOCOPY);
961
else if (passno == 2 && DIP(ip, i_db[loc])== BLK_NOCOPY)
962
DIP_SET(ip, i_db[loc], 0);
963
else if (passno == 1 && DIP(ip, i_db[loc])== BLK_NOCOPY)
964
panic("ffs_snapshot: lost direct block");
965
}
966
}
967
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(base + loc)),
968
fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp);
969
if (error) {
970
goto out;
971
}
972
indiroff = (base + loc - UFS_NDADDR) % NINDIR(fs);
973
for ( ; loc < len; loc++, indiroff++) {
974
if (indiroff >= NINDIR(fs)) {
975
if (passno == 2)
976
ibp->b_flags |= B_VALIDSUSPWRT;
977
bawrite(ibp);
978
error = UFS_BALLOC(vp,
979
lblktosize(fs, (off_t)(base + loc)),
980
fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp);
981
if (error) {
982
goto out;
983
}
984
indiroff = 0;
985
}
986
if (I_IS_UFS1(ip)) {
987
if (ffs_isblock(fs, cg_blksfree(cgp), loc))
988
((ufs1_daddr_t *)(ibp->b_data))[indiroff] =
989
BLK_NOCOPY;
990
else if (passno == 2 && ((ufs1_daddr_t *)(ibp->b_data))
991
[indiroff] == BLK_NOCOPY)
992
((ufs1_daddr_t *)(ibp->b_data))[indiroff] = 0;
993
else if (passno == 1 && ((ufs1_daddr_t *)(ibp->b_data))
994
[indiroff] == BLK_NOCOPY)
995
panic("ffs_snapshot: lost indirect block");
996
continue;
997
}
998
if (ffs_isblock(fs, cg_blksfree(cgp), loc))
999
((ufs2_daddr_t *)(ibp->b_data))[indiroff] = BLK_NOCOPY;
1000
else if (passno == 2 &&
1001
((ufs2_daddr_t *)(ibp->b_data)) [indiroff] == BLK_NOCOPY)
1002
((ufs2_daddr_t *)(ibp->b_data))[indiroff] = 0;
1003
else if (passno == 1 &&
1004
((ufs2_daddr_t *)(ibp->b_data)) [indiroff] == BLK_NOCOPY)
1005
panic("ffs_snapshot: lost indirect block");
1006
}
1007
if (passno == 2)
1008
ibp->b_flags |= B_VALIDSUSPWRT;
1009
bdwrite(ibp);
1010
out:
1011
/*
1012
* We have to calculate the crc32c here rather than just setting the
1013
* BX_CYLGRP b_xflags because the allocation of the block for the
1014
* the cylinder group map will always be a full size block (fs_bsize)
1015
* even though the cylinder group may be smaller (fs_cgsize). The
1016
* crc32c must be computed only over fs_cgsize whereas the BX_CYLGRP
1017
* flag causes it to be computed over the size of the buffer.
1018
*/
1019
if ((fs->fs_metackhash & CK_CYLGRP) != 0) {
1020
((struct cg *)nbp->b_data)->cg_ckhash = 0;
1021
((struct cg *)nbp->b_data)->cg_ckhash =
1022
calculate_crc32c(~0L, nbp->b_data, fs->fs_cgsize);
1023
}
1024
return (error);
1025
}
1026
1027
/*
1028
* Before expunging a snapshot inode, note all the
1029
* blocks that it claims with BLK_SNAP so that fsck will
1030
* be able to account for those blocks properly and so
1031
* that this snapshot knows that it need not copy them
1032
* if the other snapshot holding them is freed. This code
1033
* is reproduced once each for UFS1 and UFS2.
1034
*/
1035
static int
1036
expunge_ufs1(struct vnode *snapvp,
1037
struct inode *cancelip,
1038
struct fs *fs,
1039
int (*acctfunc)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *,
1040
struct fs *, ufs_lbn_t, int),
1041
int expungetype,
1042
int clearmode)
1043
{
1044
int i, error, indiroff;
1045
ufs_lbn_t lbn, rlbn;
1046
ufs2_daddr_t len, blkno, numblks, blksperindir;
1047
struct ufs1_dinode *dip;
1048
struct thread *td = curthread;
1049
struct buf *bp;
1050
1051
/*
1052
* Prepare to expunge the inode. If its inode block has not
1053
* yet been copied, then allocate and fill the copy.
1054
*/
1055
lbn = fragstoblks(fs, ino_to_fsba(fs, cancelip->i_number));
1056
blkno = 0;
1057
if (lbn < UFS_NDADDR) {
1058
blkno = VTOI(snapvp)->i_din1->di_db[lbn];
1059
} else {
1060
if (DOINGSOFTDEP(snapvp))
1061
softdep_prealloc(snapvp, MNT_WAIT);
1062
td->td_pflags |= TDP_COWINPROGRESS;
1063
error = ffs_balloc_ufs1(snapvp, lblktosize(fs, (off_t)lbn),
1064
fs->fs_bsize, KERNCRED, BA_METAONLY, &bp);
1065
td->td_pflags &= ~TDP_COWINPROGRESS;
1066
if (error)
1067
return (error);
1068
indiroff = (lbn - UFS_NDADDR) % NINDIR(fs);
1069
blkno = ((ufs1_daddr_t *)(bp->b_data))[indiroff];
1070
bqrelse(bp);
1071
}
1072
if (blkno != 0) {
1073
if ((error = bread(snapvp, lbn, fs->fs_bsize, KERNCRED, &bp)))
1074
return (error);
1075
} else {
1076
error = ffs_balloc_ufs1(snapvp, lblktosize(fs, (off_t)lbn),
1077
fs->fs_bsize, KERNCRED, 0, &bp);
1078
if (error)
1079
return (error);
1080
if ((error = readblock(snapvp, bp, lbn)) != 0)
1081
return (error);
1082
}
1083
/*
1084
* Set a snapshot inode to be a zero length file, regular files
1085
* or unlinked snapshots to be completely unallocated.
1086
*/
1087
dip = (struct ufs1_dinode *)bp->b_data +
1088
ino_to_fsbo(fs, cancelip->i_number);
1089
if (clearmode || cancelip->i_effnlink == 0)
1090
dip->di_mode = 0;
1091
dip->di_size = 0;
1092
dip->di_blocks = 0;
1093
dip->di_flags &= ~SF_SNAPSHOT;
1094
bzero(dip->di_db, UFS_NDADDR * sizeof(ufs1_daddr_t));
1095
bzero(dip->di_ib, UFS_NIADDR * sizeof(ufs1_daddr_t));
1096
bdwrite(bp);
1097
/*
1098
* Now go through and expunge all the blocks in the file
1099
* using the function requested.
1100
*/
1101
numblks = howmany(cancelip->i_size, fs->fs_bsize);
1102
if ((error = (*acctfunc)(snapvp, &cancelip->i_din1->di_db[0],
1103
&cancelip->i_din1->di_db[UFS_NDADDR], fs, 0, expungetype)))
1104
return (error);
1105
if ((error = (*acctfunc)(snapvp, &cancelip->i_din1->di_ib[0],
1106
&cancelip->i_din1->di_ib[UFS_NIADDR], fs, -1, expungetype)))
1107
return (error);
1108
blksperindir = 1;
1109
lbn = -UFS_NDADDR;
1110
len = numblks - UFS_NDADDR;
1111
rlbn = UFS_NDADDR;
1112
for (i = 0; len > 0 && i < UFS_NIADDR; i++) {
1113
error = indiracct_ufs1(snapvp, ITOV(cancelip), i,
1114
cancelip->i_din1->di_ib[i], lbn, rlbn, len,
1115
blksperindir, fs, acctfunc, expungetype);
1116
if (error)
1117
return (error);
1118
blksperindir *= NINDIR(fs);
1119
lbn -= blksperindir + 1;
1120
len -= blksperindir;
1121
rlbn += blksperindir;
1122
}
1123
return (0);
1124
}
1125
1126
/*
1127
* Descend an indirect block chain for vnode cancelvp accounting for all
1128
* its indirect blocks in snapvp.
1129
*/
1130
static int
1131
indiracct_ufs1(struct vnode *snapvp,
1132
struct vnode *cancelvp,
1133
int level,
1134
ufs1_daddr_t blkno,
1135
ufs_lbn_t lbn,
1136
ufs_lbn_t rlbn,
1137
ufs_lbn_t remblks,
1138
ufs_lbn_t blksperindir,
1139
struct fs *fs,
1140
int (*acctfunc)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *,
1141
struct fs *, ufs_lbn_t, int),
1142
int expungetype)
1143
{
1144
int error, num, i;
1145
ufs_lbn_t subblksperindir;
1146
struct indir indirs[UFS_NIADDR + 2];
1147
ufs1_daddr_t last, *bap;
1148
struct buf *bp;
1149
1150
if (blkno == 0) {
1151
if (expungetype == BLK_NOCOPY)
1152
return (0);
1153
panic("indiracct_ufs1: missing indir");
1154
}
1155
if ((error = ufs_getlbns(cancelvp, rlbn, indirs, &num)) != 0)
1156
return (error);
1157
if (lbn != indirs[num - 1 - level].in_lbn || num < 2)
1158
panic("indiracct_ufs1: botched params");
1159
/*
1160
* We have to expand bread here since it will deadlock looking
1161
* up the block number for any blocks that are not in the cache.
1162
*/
1163
bp = getblk(cancelvp, lbn, fs->fs_bsize, 0, 0, 0);
1164
bp->b_blkno = fsbtodb(fs, blkno);
1165
if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0 &&
1166
(error = readblock(cancelvp, bp, fragstoblks(fs, blkno)))) {
1167
brelse(bp);
1168
return (error);
1169
}
1170
/*
1171
* Account for the block pointers in this indirect block.
1172
*/
1173
last = howmany(remblks, blksperindir);
1174
if (last > NINDIR(fs))
1175
last = NINDIR(fs);
1176
bap = malloc(fs->fs_bsize, M_DEVBUF, M_WAITOK);
1177
bcopy(bp->b_data, (caddr_t)bap, fs->fs_bsize);
1178
bqrelse(bp);
1179
error = (*acctfunc)(snapvp, &bap[0], &bap[last], fs,
1180
level == 0 ? rlbn : -1, expungetype);
1181
if (error || level == 0)
1182
goto out;
1183
/*
1184
* Account for the block pointers in each of the indirect blocks
1185
* in the levels below us.
1186
*/
1187
subblksperindir = blksperindir / NINDIR(fs);
1188
for (lbn++, level--, i = 0; i < last; i++) {
1189
error = indiracct_ufs1(snapvp, cancelvp, level, bap[i], lbn,
1190
rlbn, remblks, subblksperindir, fs, acctfunc, expungetype);
1191
if (error)
1192
goto out;
1193
rlbn += blksperindir;
1194
lbn -= blksperindir;
1195
remblks -= blksperindir;
1196
}
1197
out:
1198
free(bap, M_DEVBUF);
1199
return (error);
1200
}
1201
1202
/*
1203
* Do both snap accounting and map accounting.
1204
*/
1205
static int
1206
fullacct_ufs1(struct vnode *vp,
1207
ufs1_daddr_t *oldblkp,
1208
ufs1_daddr_t *lastblkp,
1209
struct fs *fs,
1210
ufs_lbn_t lblkno,
1211
int exptype) /* BLK_SNAP or BLK_NOCOPY */
1212
{
1213
int error;
1214
1215
if ((error = snapacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, exptype)))
1216
return (error);
1217
return (mapacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, exptype));
1218
}
1219
1220
/*
1221
* Identify a set of blocks allocated in a snapshot inode.
1222
*/
1223
static int
1224
snapacct_ufs1(struct vnode *vp,
1225
ufs1_daddr_t *oldblkp,
1226
ufs1_daddr_t *lastblkp,
1227
struct fs *fs,
1228
ufs_lbn_t lblkno,
1229
int expungetype) /* BLK_SNAP or BLK_NOCOPY */
1230
{
1231
struct inode *ip = VTOI(vp);
1232
ufs1_daddr_t blkno, *blkp;
1233
ufs_lbn_t lbn;
1234
struct buf *ibp;
1235
int error;
1236
1237
for ( ; oldblkp < lastblkp; oldblkp++) {
1238
blkno = *oldblkp;
1239
if (blkno == 0 || blkno == BLK_NOCOPY || blkno == BLK_SNAP)
1240
continue;
1241
lbn = fragstoblks(fs, blkno);
1242
if (lbn < UFS_NDADDR) {
1243
blkp = &ip->i_din1->di_db[lbn];
1244
UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
1245
} else {
1246
error = ffs_balloc_ufs1(vp, lblktosize(fs, (off_t)lbn),
1247
fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp);
1248
if (error)
1249
return (error);
1250
blkp = &((ufs1_daddr_t *)(ibp->b_data))
1251
[(lbn - UFS_NDADDR) % NINDIR(fs)];
1252
}
1253
/*
1254
* If we are expunging a snapshot vnode and we
1255
* find a block marked BLK_NOCOPY, then it is
1256
* one that has been allocated to this snapshot after
1257
* we took our current snapshot and can be ignored.
1258
*/
1259
if (expungetype == BLK_SNAP && *blkp == BLK_NOCOPY) {
1260
if (lbn >= UFS_NDADDR)
1261
brelse(ibp);
1262
} else {
1263
if (*blkp != 0)
1264
panic("snapacct_ufs1: bad block");
1265
*blkp = expungetype;
1266
if (lbn >= UFS_NDADDR)
1267
bdwrite(ibp);
1268
}
1269
}
1270
return (0);
1271
}
1272
1273
/*
1274
* Account for a set of blocks allocated in a snapshot inode.
1275
*/
1276
static int
1277
mapacct_ufs1(struct vnode *vp,
1278
ufs1_daddr_t *oldblkp,
1279
ufs1_daddr_t *lastblkp,
1280
struct fs *fs,
1281
ufs_lbn_t lblkno,
1282
int expungetype)
1283
{
1284
ufs1_daddr_t blkno;
1285
struct inode *ip;
1286
ino_t inum;
1287
int acctit;
1288
1289
ip = VTOI(vp);
1290
inum = ip->i_number;
1291
if (lblkno == -1)
1292
acctit = 0;
1293
else
1294
acctit = 1;
1295
for ( ; oldblkp < lastblkp; oldblkp++, lblkno++) {
1296
blkno = *oldblkp;
1297
if (blkno == 0 || blkno == BLK_NOCOPY)
1298
continue;
1299
if (acctit && expungetype == BLK_SNAP && blkno != BLK_SNAP)
1300
*ip->i_snapblklist++ = lblkno;
1301
if (blkno == BLK_SNAP)
1302
blkno = blkstofrags(fs, lblkno);
1303
ffs_blkfree(ITOUMP(ip), fs, vp, blkno, fs->fs_bsize, inum,
1304
vp->v_type, NULL, SINGLETON_KEY);
1305
}
1306
return (0);
1307
}
1308
1309
/*
1310
* Before expunging a snapshot inode, note all the
1311
* blocks that it claims with BLK_SNAP so that fsck will
1312
* be able to account for those blocks properly and so
1313
* that this snapshot knows that it need not copy them
1314
* if the other snapshot holding them is freed. This code
1315
* is reproduced once each for UFS1 and UFS2.
1316
*/
1317
static int
1318
expunge_ufs2(struct vnode *snapvp,
1319
struct inode *cancelip,
1320
struct fs *fs,
1321
int (*acctfunc)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *,
1322
struct fs *, ufs_lbn_t, int),
1323
int expungetype,
1324
int clearmode)
1325
{
1326
int i, error, indiroff;
1327
ufs_lbn_t lbn, rlbn;
1328
ufs2_daddr_t len, blkno, numblks, blksperindir;
1329
struct ufs2_dinode *dip;
1330
struct thread *td = curthread;
1331
struct buf *bp;
1332
1333
/*
1334
* Prepare to expunge the inode. If its inode block has not
1335
* yet been copied, then allocate and fill the copy.
1336
*/
1337
lbn = fragstoblks(fs, ino_to_fsba(fs, cancelip->i_number));
1338
blkno = 0;
1339
if (lbn < UFS_NDADDR) {
1340
blkno = VTOI(snapvp)->i_din2->di_db[lbn];
1341
} else {
1342
if (DOINGSOFTDEP(snapvp))
1343
softdep_prealloc(snapvp, MNT_WAIT);
1344
td->td_pflags |= TDP_COWINPROGRESS;
1345
error = ffs_balloc_ufs2(snapvp, lblktosize(fs, (off_t)lbn),
1346
fs->fs_bsize, KERNCRED, BA_METAONLY, &bp);
1347
td->td_pflags &= ~TDP_COWINPROGRESS;
1348
if (error)
1349
return (error);
1350
indiroff = (lbn - UFS_NDADDR) % NINDIR(fs);
1351
blkno = ((ufs2_daddr_t *)(bp->b_data))[indiroff];
1352
bqrelse(bp);
1353
}
1354
if (blkno != 0) {
1355
if ((error = bread(snapvp, lbn, fs->fs_bsize, KERNCRED, &bp)))
1356
return (error);
1357
} else {
1358
error = ffs_balloc_ufs2(snapvp, lblktosize(fs, (off_t)lbn),
1359
fs->fs_bsize, KERNCRED, 0, &bp);
1360
if (error)
1361
return (error);
1362
if ((error = readblock(snapvp, bp, lbn)) != 0)
1363
return (error);
1364
}
1365
/*
1366
* Set a snapshot inode to be a zero length file, regular files
1367
* to be completely unallocated.
1368
*/
1369
dip = (struct ufs2_dinode *)bp->b_data +
1370
ino_to_fsbo(fs, cancelip->i_number);
1371
dip->di_size = 0;
1372
dip->di_blocks = 0;
1373
dip->di_flags &= ~SF_SNAPSHOT;
1374
bzero(dip->di_db, UFS_NDADDR * sizeof(ufs2_daddr_t));
1375
bzero(dip->di_ib, UFS_NIADDR * sizeof(ufs2_daddr_t));
1376
if (clearmode || cancelip->i_effnlink == 0)
1377
dip->di_mode = 0;
1378
else
1379
ffs_update_dinode_ckhash(fs, dip);
1380
bdwrite(bp);
1381
/*
1382
* Now go through and expunge all the blocks in the file
1383
* using the function requested.
1384
*/
1385
numblks = howmany(cancelip->i_size, fs->fs_bsize);
1386
if ((error = (*acctfunc)(snapvp, &cancelip->i_din2->di_db[0],
1387
&cancelip->i_din2->di_db[UFS_NDADDR], fs, 0, expungetype)))
1388
return (error);
1389
if ((error = (*acctfunc)(snapvp, &cancelip->i_din2->di_ib[0],
1390
&cancelip->i_din2->di_ib[UFS_NIADDR], fs, -1, expungetype)))
1391
return (error);
1392
blksperindir = 1;
1393
lbn = -UFS_NDADDR;
1394
len = numblks - UFS_NDADDR;
1395
rlbn = UFS_NDADDR;
1396
for (i = 0; len > 0 && i < UFS_NIADDR; i++) {
1397
error = indiracct_ufs2(snapvp, ITOV(cancelip), i,
1398
cancelip->i_din2->di_ib[i], lbn, rlbn, len,
1399
blksperindir, fs, acctfunc, expungetype);
1400
if (error)
1401
return (error);
1402
blksperindir *= NINDIR(fs);
1403
lbn -= blksperindir + 1;
1404
len -= blksperindir;
1405
rlbn += blksperindir;
1406
}
1407
return (0);
1408
}
1409
1410
/*
1411
* Descend an indirect block chain for vnode cancelvp accounting for all
1412
* its indirect blocks in snapvp.
1413
*/
1414
static int
1415
indiracct_ufs2(struct vnode *snapvp,
1416
struct vnode *cancelvp,
1417
int level,
1418
ufs2_daddr_t blkno,
1419
ufs_lbn_t lbn,
1420
ufs_lbn_t rlbn,
1421
ufs_lbn_t remblks,
1422
ufs_lbn_t blksperindir,
1423
struct fs *fs,
1424
int (*acctfunc)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *,
1425
struct fs *, ufs_lbn_t, int),
1426
int expungetype)
1427
{
1428
int error, num, i;
1429
ufs_lbn_t subblksperindir;
1430
struct indir indirs[UFS_NIADDR + 2];
1431
ufs2_daddr_t last, *bap;
1432
struct buf *bp;
1433
1434
if (blkno == 0) {
1435
if (expungetype == BLK_NOCOPY)
1436
return (0);
1437
panic("indiracct_ufs2: missing indir");
1438
}
1439
if ((error = ufs_getlbns(cancelvp, rlbn, indirs, &num)) != 0)
1440
return (error);
1441
if (lbn != indirs[num - 1 - level].in_lbn || num < 2)
1442
panic("indiracct_ufs2: botched params");
1443
/*
1444
* We have to expand bread here since it will deadlock looking
1445
* up the block number for any blocks that are not in the cache.
1446
*/
1447
bp = getblk(cancelvp, lbn, fs->fs_bsize, 0, 0, 0);
1448
bp->b_blkno = fsbtodb(fs, blkno);
1449
if ((bp->b_flags & B_CACHE) == 0 &&
1450
(error = readblock(cancelvp, bp, fragstoblks(fs, blkno)))) {
1451
brelse(bp);
1452
return (error);
1453
}
1454
/*
1455
* Account for the block pointers in this indirect block.
1456
*/
1457
last = howmany(remblks, blksperindir);
1458
if (last > NINDIR(fs))
1459
last = NINDIR(fs);
1460
bap = malloc(fs->fs_bsize, M_DEVBUF, M_WAITOK);
1461
bcopy(bp->b_data, (caddr_t)bap, fs->fs_bsize);
1462
bqrelse(bp);
1463
error = (*acctfunc)(snapvp, &bap[0], &bap[last], fs,
1464
level == 0 ? rlbn : -1, expungetype);
1465
if (error || level == 0)
1466
goto out;
1467
/*
1468
* Account for the block pointers in each of the indirect blocks
1469
* in the levels below us.
1470
*/
1471
subblksperindir = blksperindir / NINDIR(fs);
1472
for (lbn++, level--, i = 0; i < last; i++) {
1473
error = indiracct_ufs2(snapvp, cancelvp, level, bap[i], lbn,
1474
rlbn, remblks, subblksperindir, fs, acctfunc, expungetype);
1475
if (error)
1476
goto out;
1477
rlbn += blksperindir;
1478
lbn -= blksperindir;
1479
remblks -= blksperindir;
1480
}
1481
out:
1482
free(bap, M_DEVBUF);
1483
return (error);
1484
}
1485
1486
/*
1487
* Do both snap accounting and map accounting.
1488
*/
1489
static int
1490
fullacct_ufs2(struct vnode *vp,
1491
ufs2_daddr_t *oldblkp,
1492
ufs2_daddr_t *lastblkp,
1493
struct fs *fs,
1494
ufs_lbn_t lblkno,
1495
int exptype) /* BLK_SNAP or BLK_NOCOPY */
1496
{
1497
int error;
1498
1499
if ((error = snapacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, exptype)))
1500
return (error);
1501
return (mapacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, exptype));
1502
}
1503
1504
/*
1505
* Identify a set of blocks allocated in a snapshot inode.
1506
*/
1507
static int
1508
snapacct_ufs2(struct vnode *vp,
1509
ufs2_daddr_t *oldblkp,
1510
ufs2_daddr_t *lastblkp,
1511
struct fs *fs,
1512
ufs_lbn_t lblkno,
1513
int expungetype) /* BLK_SNAP or BLK_NOCOPY */
1514
{
1515
struct inode *ip = VTOI(vp);
1516
ufs2_daddr_t blkno, *blkp;
1517
ufs_lbn_t lbn;
1518
struct buf *ibp;
1519
int error;
1520
1521
for ( ; oldblkp < lastblkp; oldblkp++) {
1522
blkno = *oldblkp;
1523
if (blkno == 0 || blkno == BLK_NOCOPY || blkno == BLK_SNAP)
1524
continue;
1525
lbn = fragstoblks(fs, blkno);
1526
if (lbn < UFS_NDADDR) {
1527
blkp = &ip->i_din2->di_db[lbn];
1528
UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
1529
} else {
1530
error = ffs_balloc_ufs2(vp, lblktosize(fs, (off_t)lbn),
1531
fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp);
1532
if (error)
1533
return (error);
1534
blkp = &((ufs2_daddr_t *)(ibp->b_data))
1535
[(lbn - UFS_NDADDR) % NINDIR(fs)];
1536
}
1537
/*
1538
* If we are expunging a snapshot vnode and we
1539
* find a block marked BLK_NOCOPY, then it is
1540
* one that has been allocated to this snapshot after
1541
* we took our current snapshot and can be ignored.
1542
*/
1543
if (expungetype == BLK_SNAP && *blkp == BLK_NOCOPY) {
1544
if (lbn >= UFS_NDADDR)
1545
brelse(ibp);
1546
} else {
1547
if (*blkp != 0)
1548
panic("snapacct_ufs2: bad block");
1549
*blkp = expungetype;
1550
if (lbn >= UFS_NDADDR)
1551
bdwrite(ibp);
1552
}
1553
}
1554
return (0);
1555
}
1556
1557
/*
1558
* Account for a set of blocks allocated in a snapshot inode.
1559
*/
1560
static int
1561
mapacct_ufs2(struct vnode *vp,
1562
ufs2_daddr_t *oldblkp,
1563
ufs2_daddr_t *lastblkp,
1564
struct fs *fs,
1565
ufs_lbn_t lblkno,
1566
int expungetype)
1567
{
1568
ufs2_daddr_t blkno;
1569
struct inode *ip;
1570
ino_t inum;
1571
int acctit;
1572
1573
ip = VTOI(vp);
1574
inum = ip->i_number;
1575
if (lblkno == -1)
1576
acctit = 0;
1577
else
1578
acctit = 1;
1579
for ( ; oldblkp < lastblkp; oldblkp++, lblkno++) {
1580
blkno = *oldblkp;
1581
if (blkno == 0 || blkno == BLK_NOCOPY)
1582
continue;
1583
if (acctit && expungetype == BLK_SNAP && blkno != BLK_SNAP &&
1584
lblkno >= UFS_NDADDR)
1585
*ip->i_snapblklist++ = lblkno;
1586
if (blkno == BLK_SNAP)
1587
blkno = blkstofrags(fs, lblkno);
1588
ffs_blkfree(ITOUMP(ip), fs, vp, blkno, fs->fs_bsize, inum,
1589
vp->v_type, NULL, SINGLETON_KEY);
1590
}
1591
return (0);
1592
}
1593
1594
/*
1595
* Decrement extra reference on snapshot when last name is removed.
1596
* It will not be freed until the last open reference goes away.
1597
*/
1598
void
1599
ffs_snapgone(struct inode *ip)
1600
{
1601
struct inode *xp;
1602
struct fs *fs;
1603
int snaploc;
1604
struct snapdata *sn;
1605
struct ufsmount *ump;
1606
1607
/*
1608
* Find snapshot in incore list.
1609
*/
1610
xp = NULL;
1611
sn = ITODEVVP(ip)->v_rdev->si_snapdata;
1612
if (sn != NULL)
1613
TAILQ_FOREACH(xp, &sn->sn_head, i_nextsnap)
1614
if (xp == ip)
1615
break;
1616
if (xp != NULL)
1617
vrele(ITOV(ip));
1618
#ifdef DIAGNOSTIC
1619
else if (snapdebug)
1620
printf("ffs_snapgone: lost snapshot vnode %ju\n",
1621
(uintmax_t)ip->i_number);
1622
#endif
1623
/*
1624
* Delete snapshot inode from superblock. Keep list dense.
1625
*/
1626
ump = ITOUMP(ip);
1627
fs = ump->um_fs;
1628
UFS_LOCK(ump);
1629
for (snaploc = 0; snaploc < FSMAXSNAP; snaploc++)
1630
if (fs->fs_snapinum[snaploc] == ip->i_number)
1631
break;
1632
if (snaploc < FSMAXSNAP) {
1633
for (snaploc++; snaploc < FSMAXSNAP; snaploc++) {
1634
if (fs->fs_snapinum[snaploc] == 0)
1635
break;
1636
fs->fs_snapinum[snaploc - 1] = fs->fs_snapinum[snaploc];
1637
}
1638
fs->fs_snapinum[snaploc - 1] = 0;
1639
}
1640
UFS_UNLOCK(ump);
1641
}
1642
1643
/*
1644
* Prepare a snapshot file for being removed.
1645
*/
1646
void
1647
ffs_snapremove(struct vnode *vp)
1648
{
1649
struct inode *ip;
1650
struct vnode *devvp;
1651
struct buf *ibp;
1652
struct fs *fs;
1653
ufs2_daddr_t numblks, blkno, dblk;
1654
int error, last, loc;
1655
struct snapdata *sn;
1656
1657
ip = VTOI(vp);
1658
fs = ITOFS(ip);
1659
devvp = ITODEVVP(ip);
1660
/*
1661
* If active, delete from incore list (this snapshot may
1662
* already have been in the process of being deleted, so
1663
* would not have been active).
1664
*
1665
* Clear copy-on-write flag if last snapshot.
1666
*/
1667
VI_LOCK(devvp);
1668
if (ip->i_nextsnap.tqe_prev != 0) {
1669
sn = devvp->v_rdev->si_snapdata;
1670
TAILQ_REMOVE(&sn->sn_head, ip, i_nextsnap);
1671
ip->i_nextsnap.tqe_prev = 0;
1672
revert_snaplock(vp, devvp, sn);
1673
try_free_snapdata(devvp);
1674
}
1675
VI_UNLOCK(devvp);
1676
/*
1677
* Clear all BLK_NOCOPY fields. Pass any block claims to other
1678
* snapshots that want them (see ffs_snapblkfree below).
1679
*/
1680
for (blkno = 1; blkno < UFS_NDADDR; blkno++) {
1681
dblk = DIP(ip, i_db[blkno]);
1682
if (dblk == 0)
1683
continue;
1684
if (dblk == BLK_NOCOPY || dblk == BLK_SNAP)
1685
DIP_SET(ip, i_db[blkno], 0);
1686
else if ((dblk == blkstofrags(fs, blkno) &&
1687
ffs_snapblkfree(fs, ITODEVVP(ip), dblk, fs->fs_bsize,
1688
ip->i_number, vp->v_type, NULL))) {
1689
DIP_SET(ip, i_blocks, DIP(ip, i_blocks) -
1690
btodb(fs->fs_bsize));
1691
DIP_SET(ip, i_db[blkno], 0);
1692
}
1693
}
1694
numblks = howmany(ip->i_size, fs->fs_bsize);
1695
for (blkno = UFS_NDADDR; blkno < numblks; blkno += NINDIR(fs)) {
1696
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)blkno),
1697
fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp);
1698
if (error)
1699
continue;
1700
if (fs->fs_size - blkno > NINDIR(fs))
1701
last = NINDIR(fs);
1702
else
1703
last = fs->fs_size - blkno;
1704
for (loc = 0; loc < last; loc++) {
1705
if (I_IS_UFS1(ip)) {
1706
dblk = ((ufs1_daddr_t *)(ibp->b_data))[loc];
1707
if (dblk == 0)
1708
continue;
1709
if (dblk == BLK_NOCOPY || dblk == BLK_SNAP)
1710
((ufs1_daddr_t *)(ibp->b_data))[loc]= 0;
1711
else if ((dblk == blkstofrags(fs, blkno) &&
1712
ffs_snapblkfree(fs, ITODEVVP(ip), dblk,
1713
fs->fs_bsize, ip->i_number, vp->v_type,
1714
NULL))) {
1715
ip->i_din1->di_blocks -=
1716
btodb(fs->fs_bsize);
1717
((ufs1_daddr_t *)(ibp->b_data))[loc]= 0;
1718
}
1719
continue;
1720
}
1721
dblk = ((ufs2_daddr_t *)(ibp->b_data))[loc];
1722
if (dblk == 0)
1723
continue;
1724
if (dblk == BLK_NOCOPY || dblk == BLK_SNAP)
1725
((ufs2_daddr_t *)(ibp->b_data))[loc] = 0;
1726
else if ((dblk == blkstofrags(fs, blkno) &&
1727
ffs_snapblkfree(fs, ITODEVVP(ip), dblk,
1728
fs->fs_bsize, ip->i_number, vp->v_type, NULL))) {
1729
ip->i_din2->di_blocks -= btodb(fs->fs_bsize);
1730
((ufs2_daddr_t *)(ibp->b_data))[loc] = 0;
1731
}
1732
}
1733
bawrite(ibp);
1734
}
1735
/*
1736
* Clear snapshot flag and drop reference.
1737
*/
1738
ip->i_flags &= ~SF_SNAPSHOT;
1739
DIP_SET(ip, i_flags, ip->i_flags);
1740
UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
1741
/*
1742
* The dirtied indirects must be written out before
1743
* softdep_setup_freeblocks() is called. Otherwise indir_trunc()
1744
* may find indirect pointers using the magic BLK_* values.
1745
*/
1746
if (DOINGSOFTDEP(vp))
1747
ffs_syncvnode(vp, MNT_WAIT, 0);
1748
#ifdef QUOTA
1749
/*
1750
* Reenable disk quotas for ex-snapshot file.
1751
*/
1752
if (!getinoquota(ip))
1753
(void) chkdq(ip, DIP(ip, i_blocks), KERNCRED, FORCE);
1754
#endif
1755
}
1756
1757
/*
1758
* Notification that a block is being freed. Return zero if the free
1759
* should be allowed to proceed. Return non-zero if the snapshot file
1760
* wants to claim the block. The block will be claimed if it is an
1761
* uncopied part of one of the snapshots. It will be freed if it is
1762
* either a BLK_NOCOPY or has already been copied in all of the snapshots.
1763
* If a fragment is being freed, then all snapshots that care about
1764
* it must make a copy since a snapshot file can only claim full sized
1765
* blocks. Note that if more than one snapshot file maps the block,
1766
* we can pick one at random to claim it. Since none of the snapshots
1767
* can change, we are assurred that they will all see the same unmodified
1768
* image. When deleting a snapshot file (see ffs_snapremove above), we
1769
* must push any of these claimed blocks to one of the other snapshots
1770
* that maps it. These claimed blocks are easily identified as they will
1771
* have a block number equal to their logical block number within the
1772
* snapshot. A copied block can never have this property because they
1773
* must always have been allocated from a BLK_NOCOPY location.
1774
*/
1775
int
1776
ffs_snapblkfree(struct fs *fs,
1777
struct vnode *devvp,
1778
ufs2_daddr_t bno,
1779
long size,
1780
ino_t inum,
1781
__enum_uint8(vtype) vtype,
1782
struct workhead *wkhd)
1783
{
1784
struct buf *ibp, *cbp, *savedcbp = NULL;
1785
struct thread *td = curthread;
1786
struct inode *ip;
1787
struct vnode *vp = NULL;
1788
ufs_lbn_t lbn;
1789
ufs2_daddr_t blkno;
1790
int indiroff = 0, error = 0, claimedblk = 0;
1791
struct snapdata *sn;
1792
1793
lbn = fragstoblks(fs, bno);
1794
retry:
1795
VI_LOCK(devvp);
1796
sn = devvp->v_rdev->si_snapdata;
1797
if (sn == NULL) {
1798
VI_UNLOCK(devvp);
1799
return (0);
1800
}
1801
1802
/*
1803
* Use LK_SLEEPFAIL because sn might be freed under us while
1804
* both devvp interlock and snaplk are not owned.
1805
*/
1806
if (lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL,
1807
VI_MTX(devvp)) != 0)
1808
goto retry;
1809
1810
TAILQ_FOREACH(ip, &sn->sn_head, i_nextsnap) {
1811
vp = ITOV(ip);
1812
if (DOINGSOFTDEP(vp))
1813
softdep_prealloc(vp, MNT_WAIT);
1814
/*
1815
* Lookup block being written.
1816
*/
1817
if (lbn < UFS_NDADDR) {
1818
blkno = DIP(ip, i_db[lbn]);
1819
} else {
1820
td->td_pflags |= TDP_COWINPROGRESS;
1821
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn),
1822
fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp);
1823
td->td_pflags &= ~TDP_COWINPROGRESS;
1824
if (error)
1825
break;
1826
indiroff = (lbn - UFS_NDADDR) % NINDIR(fs);
1827
if (I_IS_UFS1(ip))
1828
blkno=((ufs1_daddr_t *)(ibp->b_data))[indiroff];
1829
else
1830
blkno=((ufs2_daddr_t *)(ibp->b_data))[indiroff];
1831
}
1832
/*
1833
* Check to see if block needs to be copied.
1834
*/
1835
if (blkno == 0) {
1836
/*
1837
* A block that we map is being freed. If it has not
1838
* been claimed yet, we will claim or copy it (below).
1839
*/
1840
claimedblk = 1;
1841
} else if (blkno == BLK_SNAP) {
1842
/*
1843
* No previous snapshot claimed the block,
1844
* so it will be freed and become a BLK_NOCOPY
1845
* (don't care) for us.
1846
*/
1847
if (claimedblk)
1848
panic("snapblkfree: inconsistent block type");
1849
if (lbn < UFS_NDADDR) {
1850
DIP_SET(ip, i_db[lbn], BLK_NOCOPY);
1851
UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
1852
} else if (I_IS_UFS1(ip)) {
1853
((ufs1_daddr_t *)(ibp->b_data))[indiroff] =
1854
BLK_NOCOPY;
1855
bdwrite(ibp);
1856
} else {
1857
((ufs2_daddr_t *)(ibp->b_data))[indiroff] =
1858
BLK_NOCOPY;
1859
bdwrite(ibp);
1860
}
1861
continue;
1862
} else /* BLK_NOCOPY or default */ {
1863
/*
1864
* If the snapshot has already copied the block
1865
* (default), or does not care about the block,
1866
* it is not needed.
1867
*/
1868
if (lbn >= UFS_NDADDR)
1869
bqrelse(ibp);
1870
continue;
1871
}
1872
/*
1873
* If this is a full size block, we will just grab it
1874
* and assign it to the snapshot inode. Otherwise we
1875
* will proceed to copy it. See explanation for this
1876
* routine as to why only a single snapshot needs to
1877
* claim this block.
1878
*/
1879
if (size == fs->fs_bsize) {
1880
#ifdef DIAGNOSTIC
1881
if (snapdebug)
1882
printf("%s %ju lbn %jd from inum %ju\n",
1883
"Grabonremove: snapino",
1884
(uintmax_t)ip->i_number,
1885
(intmax_t)lbn, (uintmax_t)inum);
1886
#endif
1887
/*
1888
* If journaling is tracking this write we must add
1889
* the work to the inode or indirect being written.
1890
*/
1891
if (wkhd != NULL) {
1892
if (lbn < UFS_NDADDR)
1893
softdep_inode_append(ip,
1894
curthread->td_ucred, wkhd);
1895
else
1896
softdep_buf_append(ibp, wkhd);
1897
}
1898
if (lbn < UFS_NDADDR) {
1899
DIP_SET(ip, i_db[lbn], bno);
1900
} else if (I_IS_UFS1(ip)) {
1901
((ufs1_daddr_t *)(ibp->b_data))[indiroff] = bno;
1902
bdwrite(ibp);
1903
} else {
1904
((ufs2_daddr_t *)(ibp->b_data))[indiroff] = bno;
1905
bdwrite(ibp);
1906
}
1907
DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + btodb(size));
1908
UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
1909
lockmgr(vp->v_vnlock, LK_RELEASE, NULL);
1910
return (1);
1911
}
1912
if (lbn >= UFS_NDADDR)
1913
bqrelse(ibp);
1914
/*
1915
* Allocate the block into which to do the copy. Note that this
1916
* allocation will never require any additional allocations for
1917
* the snapshot inode.
1918
*/
1919
td->td_pflags |= TDP_COWINPROGRESS;
1920
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn),
1921
fs->fs_bsize, KERNCRED, 0, &cbp);
1922
td->td_pflags &= ~TDP_COWINPROGRESS;
1923
if (error)
1924
break;
1925
#ifdef DIAGNOSTIC
1926
if (snapdebug)
1927
printf("%s%ju lbn %jd %s %ju size %ld to blkno %jd\n",
1928
"Copyonremove: snapino ", (uintmax_t)ip->i_number,
1929
(intmax_t)lbn, "for inum", (uintmax_t)inum, size,
1930
(intmax_t)cbp->b_blkno);
1931
#endif
1932
/*
1933
* If we have already read the old block contents, then
1934
* simply copy them to the new block. Note that we need
1935
* to synchronously write snapshots that have not been
1936
* unlinked, and hence will be visible after a crash,
1937
* to ensure their integrity. At a minimum we ensure the
1938
* integrity of the filesystem metadata, but use the
1939
* dopersistence sysctl-setable flag to decide on the
1940
* persistence needed for file content data.
1941
*/
1942
if (savedcbp != NULL) {
1943
bcopy(savedcbp->b_data, cbp->b_data, fs->fs_bsize);
1944
bawrite(cbp);
1945
if ((vtype == VDIR || dopersistence) &&
1946
ip->i_effnlink > 0)
1947
(void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT);
1948
continue;
1949
}
1950
/*
1951
* Otherwise, read the old block contents into the buffer.
1952
*/
1953
if ((error = readblock(vp, cbp, lbn)) != 0) {
1954
bzero(cbp->b_data, fs->fs_bsize);
1955
bawrite(cbp);
1956
if ((vtype == VDIR || dopersistence) &&
1957
ip->i_effnlink > 0)
1958
(void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT);
1959
break;
1960
}
1961
savedcbp = cbp;
1962
}
1963
/*
1964
* Note that we need to synchronously write snapshots that
1965
* have not been unlinked, and hence will be visible after
1966
* a crash, to ensure their integrity. At a minimum we
1967
* ensure the integrity of the filesystem metadata, but
1968
* use the dopersistence sysctl-setable flag to decide on
1969
* the persistence needed for file content data.
1970
*/
1971
if (savedcbp) {
1972
vp = savedcbp->b_vp;
1973
bawrite(savedcbp);
1974
if ((vtype == VDIR || dopersistence) &&
1975
VTOI(vp)->i_effnlink > 0)
1976
(void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT);
1977
}
1978
/*
1979
* If we have been unable to allocate a block in which to do
1980
* the copy, then return non-zero so that the fragment will
1981
* not be freed. Although space will be lost, the snapshot
1982
* will stay consistent.
1983
*/
1984
if (error != 0 && wkhd != NULL)
1985
softdep_freework(wkhd);
1986
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
1987
return (error);
1988
}
1989
1990
/*
1991
* Associate snapshot files when mounting.
1992
*/
1993
void
1994
ffs_snapshot_mount(struct mount *mp)
1995
{
1996
struct ufsmount *ump = VFSTOUFS(mp);
1997
struct vnode *devvp = ump->um_devvp;
1998
struct fs *fs = ump->um_fs;
1999
struct thread *td = curthread;
2000
struct snapdata *sn;
2001
struct vnode *vp;
2002
struct vnode *lastvp;
2003
struct inode *ip;
2004
struct uio auio;
2005
struct iovec aiov;
2006
void *snapblklist;
2007
char *reason;
2008
daddr_t snaplistsize;
2009
int error, snaploc, loc;
2010
2011
/*
2012
* XXX The following needs to be set before ffs_truncate or
2013
* VOP_READ can be called.
2014
*/
2015
mp->mnt_stat.f_iosize = fs->fs_bsize;
2016
/*
2017
* Process each snapshot listed in the superblock.
2018
*/
2019
vp = NULL;
2020
lastvp = NULL;
2021
sn = NULL;
2022
for (snaploc = 0; snaploc < FSMAXSNAP; snaploc++) {
2023
if (fs->fs_snapinum[snaploc] == 0)
2024
break;
2025
if ((error = ffs_vget(mp, fs->fs_snapinum[snaploc],
2026
LK_EXCLUSIVE, &vp)) != 0){
2027
printf("ffs_snapshot_mount: vget failed %d\n", error);
2028
continue;
2029
}
2030
ip = VTOI(vp);
2031
if (vp->v_type != VREG) {
2032
reason = "non-file snapshot";
2033
} else if (!IS_SNAPSHOT(ip)) {
2034
reason = "non-snapshot";
2035
} else if (ip->i_size ==
2036
lblktosize(fs, howmany(fs->fs_size, fs->fs_frag))) {
2037
reason = "old format snapshot";
2038
(void)ffs_truncate(vp, (off_t)0, 0, NOCRED);
2039
(void)ffs_syncvnode(vp, MNT_WAIT, 0);
2040
} else {
2041
reason = NULL;
2042
}
2043
if (reason != NULL) {
2044
printf("ffs_snapshot_mount: %s inode %d\n",
2045
reason, fs->fs_snapinum[snaploc]);
2046
vput(vp);
2047
vp = NULL;
2048
for (loc = snaploc + 1; loc < FSMAXSNAP; loc++) {
2049
if (fs->fs_snapinum[loc] == 0)
2050
break;
2051
fs->fs_snapinum[loc - 1] = fs->fs_snapinum[loc];
2052
}
2053
fs->fs_snapinum[loc - 1] = 0;
2054
snaploc--;
2055
continue;
2056
}
2057
/*
2058
* Acquire a lock on the snapdata structure, creating it if
2059
* necessary.
2060
*/
2061
sn = ffs_snapdata_acquire(devvp);
2062
/*
2063
* Change vnode to use shared snapshot lock instead of the
2064
* original private lock.
2065
*/
2066
vp->v_vnlock = &sn->sn_lock;
2067
lockmgr(&vp->v_lock, LK_RELEASE, NULL);
2068
/*
2069
* Link it onto the active snapshot list.
2070
*/
2071
VI_LOCK(devvp);
2072
if (ip->i_nextsnap.tqe_prev != 0)
2073
panic("ffs_snapshot_mount: %ju already on list",
2074
(uintmax_t)ip->i_number);
2075
else
2076
TAILQ_INSERT_TAIL(&sn->sn_head, ip, i_nextsnap);
2077
vp->v_vflag |= VV_SYSTEM;
2078
VI_UNLOCK(devvp);
2079
VOP_UNLOCK(vp);
2080
lastvp = vp;
2081
}
2082
vp = lastvp;
2083
/*
2084
* No usable snapshots found.
2085
*/
2086
if (sn == NULL || vp == NULL)
2087
return;
2088
/*
2089
* Allocate the space for the block hints list. We always want to
2090
* use the list from the newest snapshot.
2091
*/
2092
auio.uio_iov = &aiov;
2093
auio.uio_iovcnt = 1;
2094
aiov.iov_base = (void *)&snaplistsize;
2095
aiov.iov_len = sizeof(snaplistsize);
2096
auio.uio_resid = aiov.iov_len;
2097
auio.uio_offset =
2098
lblktosize(fs, howmany(fs->fs_size, fs->fs_frag));
2099
auio.uio_segflg = UIO_SYSSPACE;
2100
auio.uio_rw = UIO_READ;
2101
auio.uio_td = td;
2102
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2103
if ((error = VOP_READ(vp, &auio, IO_UNIT, td->td_ucred)) != 0) {
2104
printf("ffs_snapshot_mount: read_1 failed %d\n", error);
2105
VOP_UNLOCK(vp);
2106
return;
2107
}
2108
snapblklist = malloc(snaplistsize * sizeof(daddr_t),
2109
M_UFSMNT, M_WAITOK);
2110
auio.uio_iovcnt = 1;
2111
aiov.iov_base = snapblklist;
2112
aiov.iov_len = snaplistsize * sizeof (daddr_t);
2113
auio.uio_resid = aiov.iov_len;
2114
auio.uio_offset -= sizeof(snaplistsize);
2115
if ((error = VOP_READ(vp, &auio, IO_UNIT, td->td_ucred)) != 0) {
2116
printf("ffs_snapshot_mount: read_2 failed %d\n", error);
2117
VOP_UNLOCK(vp);
2118
free(snapblklist, M_UFSMNT);
2119
return;
2120
}
2121
VOP_UNLOCK(vp);
2122
VI_LOCK(devvp);
2123
sn->sn_listsize = snaplistsize;
2124
sn->sn_blklist = (daddr_t *)snapblklist;
2125
devvp->v_vflag |= VV_COPYONWRITE;
2126
VI_UNLOCK(devvp);
2127
}
2128
2129
/*
2130
* Disassociate snapshot files when unmounting.
2131
*/
2132
void
2133
ffs_snapshot_unmount(struct mount *mp)
2134
{
2135
struct vnode *devvp = VFSTOUFS(mp)->um_devvp;
2136
struct snapdata *sn;
2137
struct inode *xp;
2138
struct vnode *vp;
2139
2140
VI_LOCK(devvp);
2141
sn = devvp->v_rdev->si_snapdata;
2142
while (sn != NULL && (xp = TAILQ_FIRST(&sn->sn_head)) != NULL) {
2143
vp = ITOV(xp);
2144
TAILQ_REMOVE(&sn->sn_head, xp, i_nextsnap);
2145
xp->i_nextsnap.tqe_prev = 0;
2146
lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE,
2147
VI_MTX(devvp));
2148
VI_LOCK(devvp);
2149
revert_snaplock(vp, devvp, sn);
2150
lockmgr(&vp->v_lock, LK_RELEASE, NULL);
2151
if (xp->i_effnlink > 0) {
2152
VI_UNLOCK(devvp);
2153
vrele(vp);
2154
VI_LOCK(devvp);
2155
}
2156
sn = devvp->v_rdev->si_snapdata;
2157
}
2158
try_free_snapdata(devvp);
2159
VI_UNLOCK(devvp);
2160
}
2161
2162
/*
2163
* Check the buffer block to be belong to device buffer that shall be
2164
* locked after snaplk. devvp shall be locked on entry, and will be
2165
* leaved locked upon exit.
2166
*/
2167
static int
2168
ffs_bp_snapblk(struct vnode *devvp, struct buf *bp)
2169
{
2170
struct snapdata *sn;
2171
struct fs *fs;
2172
ufs2_daddr_t lbn, *snapblklist;
2173
int lower, upper, mid;
2174
2175
ASSERT_VI_LOCKED(devvp, "ffs_bp_snapblk");
2176
KASSERT(devvp->v_type == VCHR, ("Not a device %p", devvp));
2177
sn = devvp->v_rdev->si_snapdata;
2178
if (sn == NULL || TAILQ_FIRST(&sn->sn_head) == NULL)
2179
return (0);
2180
fs = ITOFS(TAILQ_FIRST(&sn->sn_head));
2181
lbn = fragstoblks(fs, dbtofsb(fs, bp->b_blkno));
2182
snapblklist = sn->sn_blklist;
2183
upper = sn->sn_listsize - 1;
2184
lower = 1;
2185
while (lower <= upper) {
2186
mid = (lower + upper) / 2;
2187
if (snapblklist[mid] == lbn)
2188
break;
2189
if (snapblklist[mid] < lbn)
2190
lower = mid + 1;
2191
else
2192
upper = mid - 1;
2193
}
2194
if (lower <= upper)
2195
return (1);
2196
return (0);
2197
}
2198
2199
void
2200
ffs_bdflush(struct bufobj *bo, struct buf *bp)
2201
{
2202
struct thread *td;
2203
struct vnode *vp, *devvp;
2204
struct buf *nbp;
2205
int bp_bdskip;
2206
2207
if (bo->bo_dirty.bv_cnt <= dirtybufthresh)
2208
return;
2209
2210
td = curthread;
2211
vp = bp->b_vp;
2212
devvp = bo2vnode(bo);
2213
KASSERT(vp == devvp, ("devvp != vp %p %p", bo, bp));
2214
2215
VI_LOCK(devvp);
2216
bp_bdskip = ffs_bp_snapblk(devvp, bp);
2217
if (bp_bdskip)
2218
bdwriteskip++;
2219
VI_UNLOCK(devvp);
2220
if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10 && !bp_bdskip) {
2221
(void) VOP_FSYNC(vp, MNT_NOWAIT, td);
2222
altbufferflushes++;
2223
} else {
2224
BO_LOCK(bo);
2225
/*
2226
* Try to find a buffer to flush.
2227
*/
2228
TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
2229
if ((nbp->b_vflags & BV_BKGRDINPROG) ||
2230
BUF_LOCK(nbp,
2231
LK_EXCLUSIVE | LK_NOWAIT, NULL))
2232
continue;
2233
if (bp == nbp)
2234
panic("bdwrite: found ourselves");
2235
BO_UNLOCK(bo);
2236
/*
2237
* Don't countdeps with the bo lock
2238
* held.
2239
*/
2240
if (buf_countdeps(nbp, 0)) {
2241
BO_LOCK(bo);
2242
BUF_UNLOCK(nbp);
2243
continue;
2244
}
2245
if (bp_bdskip) {
2246
VI_LOCK(devvp);
2247
if (!ffs_bp_snapblk(vp, nbp)) {
2248
VI_UNLOCK(devvp);
2249
BO_LOCK(bo);
2250
BUF_UNLOCK(nbp);
2251
continue;
2252
}
2253
VI_UNLOCK(devvp);
2254
}
2255
if (nbp->b_flags & B_CLUSTEROK) {
2256
vfs_bio_awrite(nbp);
2257
} else {
2258
bremfree(nbp);
2259
bawrite(nbp);
2260
}
2261
dirtybufferflushes++;
2262
break;
2263
}
2264
if (nbp == NULL)
2265
BO_UNLOCK(bo);
2266
}
2267
}
2268
2269
/*
2270
* Check for need to copy block that is about to be written,
2271
* copying the block if necessary.
2272
*/
2273
int
2274
ffs_copyonwrite(struct vnode *devvp, struct buf *bp)
2275
{
2276
struct snapdata *sn;
2277
struct buf *ibp, *cbp, *savedcbp = NULL;
2278
struct thread *td = curthread;
2279
struct fs *fs;
2280
struct inode *ip;
2281
struct vnode *vp = NULL;
2282
ufs2_daddr_t lbn, blkno, *snapblklist;
2283
int lower, upper, mid, indiroff, error = 0;
2284
int launched_async_io, prev_norunningbuf;
2285
long saved_runningbufspace;
2286
2287
if (devvp != bp->b_vp && IS_SNAPSHOT(VTOI(bp->b_vp)))
2288
return (0); /* Update on a snapshot file */
2289
if (td->td_pflags & TDP_COWINPROGRESS)
2290
panic("ffs_copyonwrite: recursive call");
2291
/*
2292
* First check to see if it is in the preallocated list.
2293
* By doing this check we avoid several potential deadlocks.
2294
*/
2295
VI_LOCK(devvp);
2296
sn = devvp->v_rdev->si_snapdata;
2297
if (sn == NULL ||
2298
TAILQ_EMPTY(&sn->sn_head)) {
2299
VI_UNLOCK(devvp);
2300
return (0); /* No snapshot */
2301
}
2302
ip = TAILQ_FIRST(&sn->sn_head);
2303
fs = ITOFS(ip);
2304
lbn = fragstoblks(fs, dbtofsb(fs, bp->b_blkno));
2305
if (lbn < UFS_NDADDR) {
2306
VI_UNLOCK(devvp);
2307
return (0); /* Direct blocks are always copied */
2308
}
2309
snapblklist = sn->sn_blklist;
2310
upper = sn->sn_listsize - 1;
2311
lower = 1;
2312
while (lower <= upper) {
2313
mid = (lower + upper) / 2;
2314
if (snapblklist[mid] == lbn)
2315
break;
2316
if (snapblklist[mid] < lbn)
2317
lower = mid + 1;
2318
else
2319
upper = mid - 1;
2320
}
2321
if (lower <= upper) {
2322
VI_UNLOCK(devvp);
2323
return (0);
2324
}
2325
launched_async_io = 0;
2326
prev_norunningbuf = td->td_pflags & TDP_NORUNNINGBUF;
2327
/*
2328
* Since I/O on bp isn't yet in progress and it may be blocked
2329
* for a long time waiting on snaplk, back it out of
2330
* runningbufspace, possibly waking other threads waiting for space.
2331
*/
2332
saved_runningbufspace = bp->b_runningbufspace;
2333
if (saved_runningbufspace != 0)
2334
runningbufwakeup(bp);
2335
/*
2336
* Not in the precomputed list, so check the snapshots.
2337
*/
2338
while (lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL,
2339
VI_MTX(devvp)) != 0) {
2340
VI_LOCK(devvp);
2341
sn = devvp->v_rdev->si_snapdata;
2342
if (sn == NULL ||
2343
TAILQ_EMPTY(&sn->sn_head)) {
2344
VI_UNLOCK(devvp);
2345
if (saved_runningbufspace != 0) {
2346
(void)runningbufclaim(bp,
2347
saved_runningbufspace);
2348
}
2349
return (0); /* Snapshot gone */
2350
}
2351
}
2352
TAILQ_FOREACH(ip, &sn->sn_head, i_nextsnap) {
2353
vp = ITOV(ip);
2354
if (DOINGSOFTDEP(vp))
2355
softdep_prealloc(vp, MNT_WAIT);
2356
/*
2357
* We ensure that everything of our own that needs to be
2358
* copied will be done at the time that ffs_snapshot is
2359
* called. Thus we can skip the check here which can
2360
* deadlock in doing the lookup in UFS_BALLOC.
2361
*/
2362
if (bp->b_vp == vp)
2363
continue;
2364
/*
2365
* Check to see if block needs to be copied. We do not have
2366
* to hold the snapshot lock while doing this lookup as it
2367
* will never require any additional allocations for the
2368
* snapshot inode.
2369
*/
2370
if (lbn < UFS_NDADDR) {
2371
blkno = DIP(ip, i_db[lbn]);
2372
} else {
2373
td->td_pflags |= TDP_COWINPROGRESS | TDP_NORUNNINGBUF;
2374
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn),
2375
fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp);
2376
td->td_pflags &= ~TDP_COWINPROGRESS;
2377
if (error)
2378
break;
2379
indiroff = (lbn - UFS_NDADDR) % NINDIR(fs);
2380
if (I_IS_UFS1(ip))
2381
blkno=((ufs1_daddr_t *)(ibp->b_data))[indiroff];
2382
else
2383
blkno=((ufs2_daddr_t *)(ibp->b_data))[indiroff];
2384
bqrelse(ibp);
2385
}
2386
#ifdef INVARIANTS
2387
if (blkno == BLK_SNAP && bp->b_lblkno >= 0)
2388
panic("ffs_copyonwrite: bad copy block");
2389
#endif
2390
if (blkno != 0)
2391
continue;
2392
/*
2393
* Allocate the block into which to do the copy. Since
2394
* multiple processes may all try to copy the same block,
2395
* we have to recheck our need to do a copy if we sleep
2396
* waiting for the lock.
2397
*
2398
* Because all snapshots on a filesystem share a single
2399
* lock, we ensure that we will never be in competition
2400
* with another process to allocate a block.
2401
*/
2402
td->td_pflags |= TDP_COWINPROGRESS | TDP_NORUNNINGBUF;
2403
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn),
2404
fs->fs_bsize, KERNCRED, 0, &cbp);
2405
td->td_pflags &= ~TDP_COWINPROGRESS;
2406
if (error)
2407
break;
2408
#ifdef DIAGNOSTIC
2409
if (snapdebug) {
2410
printf("Copyonwrite: snapino %ju lbn %jd for ",
2411
(uintmax_t)ip->i_number, (intmax_t)lbn);
2412
if (bp->b_vp == devvp)
2413
printf("fs metadata");
2414
else
2415
printf("inum %ju",
2416
(uintmax_t)VTOI(bp->b_vp)->i_number);
2417
printf(" lblkno %jd to blkno %jd\n",
2418
(intmax_t)bp->b_lblkno, (intmax_t)cbp->b_blkno);
2419
}
2420
#endif
2421
/*
2422
* If we have already read the old block contents, then
2423
* simply copy them to the new block. Note that we need
2424
* to synchronously write snapshots that have not been
2425
* unlinked, and hence will be visible after a crash,
2426
* to ensure their integrity. At a minimum we ensure the
2427
* integrity of the filesystem metadata, but use the
2428
* dopersistence sysctl-setable flag to decide on the
2429
* persistence needed for file content data.
2430
*/
2431
if (savedcbp != NULL) {
2432
bcopy(savedcbp->b_data, cbp->b_data, fs->fs_bsize);
2433
bawrite(cbp);
2434
if ((devvp == bp->b_vp || bp->b_vp->v_type == VDIR ||
2435
dopersistence) && ip->i_effnlink > 0)
2436
(void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT);
2437
else
2438
launched_async_io = 1;
2439
continue;
2440
}
2441
/*
2442
* Otherwise, read the old block contents into the buffer.
2443
*/
2444
if ((error = readblock(vp, cbp, lbn)) != 0) {
2445
bzero(cbp->b_data, fs->fs_bsize);
2446
bawrite(cbp);
2447
if ((devvp == bp->b_vp || bp->b_vp->v_type == VDIR ||
2448
dopersistence) && ip->i_effnlink > 0)
2449
(void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT);
2450
else
2451
launched_async_io = 1;
2452
break;
2453
}
2454
savedcbp = cbp;
2455
}
2456
/*
2457
* Note that we need to synchronously write snapshots that
2458
* have not been unlinked, and hence will be visible after
2459
* a crash, to ensure their integrity. At a minimum we
2460
* ensure the integrity of the filesystem metadata, but
2461
* use the dopersistence sysctl-setable flag to decide on
2462
* the persistence needed for file content data.
2463
*/
2464
if (savedcbp) {
2465
vp = savedcbp->b_vp;
2466
bawrite(savedcbp);
2467
if ((devvp == bp->b_vp || bp->b_vp->v_type == VDIR ||
2468
dopersistence) && VTOI(vp)->i_effnlink > 0)
2469
(void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT);
2470
else
2471
launched_async_io = 1;
2472
}
2473
lockmgr(vp->v_vnlock, LK_RELEASE, NULL);
2474
td->td_pflags = (td->td_pflags & ~TDP_NORUNNINGBUF) |
2475
prev_norunningbuf;
2476
if (launched_async_io && (td->td_pflags & TDP_NORUNNINGBUF) == 0)
2477
waitrunningbufspace();
2478
/*
2479
* I/O on bp will now be started, so count it in runningbufspace.
2480
*/
2481
if (saved_runningbufspace != 0)
2482
(void)runningbufclaim(bp, saved_runningbufspace);
2483
return (error);
2484
}
2485
2486
/*
2487
* sync snapshots to force freework records waiting on snapshots to claim
2488
* blocks to free.
2489
*/
2490
void
2491
ffs_sync_snap(struct mount *mp, int waitfor)
2492
{
2493
struct snapdata *sn;
2494
struct vnode *devvp;
2495
struct vnode *vp;
2496
struct inode *ip;
2497
2498
devvp = VFSTOUFS(mp)->um_devvp;
2499
if ((devvp->v_vflag & VV_COPYONWRITE) == 0)
2500
return;
2501
for (;;) {
2502
VI_LOCK(devvp);
2503
sn = devvp->v_rdev->si_snapdata;
2504
if (sn == NULL) {
2505
VI_UNLOCK(devvp);
2506
return;
2507
}
2508
if (lockmgr(&sn->sn_lock,
2509
LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL,
2510
VI_MTX(devvp)) == 0)
2511
break;
2512
}
2513
TAILQ_FOREACH(ip, &sn->sn_head, i_nextsnap) {
2514
vp = ITOV(ip);
2515
ffs_syncvnode(vp, waitfor, NO_INO_UPDT);
2516
}
2517
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
2518
}
2519
2520
/*
2521
* Read the specified block into the given buffer.
2522
* Much of this boiler-plate comes from bwrite().
2523
*/
2524
static int
2525
readblock(struct vnode *vp,
2526
struct buf *bp,
2527
ufs2_daddr_t lbn)
2528
{
2529
struct inode *ip;
2530
struct fs *fs;
2531
2532
ip = VTOI(vp);
2533
fs = ITOFS(ip);
2534
2535
bp->b_iocmd = BIO_READ;
2536
bp->b_iooffset = dbtob(fsbtodb(fs, blkstofrags(fs, lbn)));
2537
bp->b_iodone = bdone;
2538
g_vfs_strategy(&ITODEVVP(ip)->v_bufobj, bp);
2539
bufwait(bp);
2540
return (bp->b_error);
2541
}
2542
2543
#endif
2544
2545
/*
2546
* Process file deletes that were deferred by ufs_inactive() due to
2547
* the file system being suspended. Transfer IN_LAZYACCESS into
2548
* IN_MODIFIED for vnodes that were accessed during suspension.
2549
*/
2550
void
2551
process_deferred_inactive(struct mount *mp)
2552
{
2553
struct vnode *vp, *mvp;
2554
struct inode *ip;
2555
int error;
2556
2557
(void) vn_start_secondary_write(NULL, &mp, V_WAIT);
2558
loop:
2559
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
2560
/*
2561
* IN_LAZYACCESS is checked here without holding any
2562
* vnode lock, but this flag is set only while holding
2563
* vnode interlock.
2564
*/
2565
if (vp->v_type == VNON ||
2566
((VTOI(vp)->i_flag & IN_LAZYACCESS) == 0 &&
2567
((vp->v_iflag & VI_OWEINACT) == 0 || vp->v_usecount > 0))) {
2568
VI_UNLOCK(vp);
2569
continue;
2570
}
2571
vholdl(vp);
2572
retry_vnode:
2573
error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
2574
if (error != 0) {
2575
vdrop(vp);
2576
if (error == ENOENT)
2577
continue; /* vnode recycled */
2578
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
2579
goto loop;
2580
}
2581
ip = VTOI(vp);
2582
if ((ip->i_flag & IN_LAZYACCESS) != 0) {
2583
ip->i_flag &= ~IN_LAZYACCESS;
2584
UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
2585
}
2586
VI_LOCK(vp);
2587
error = vinactive(vp);
2588
if (error == ERELOOKUP && vp->v_usecount == 0) {
2589
VI_UNLOCK(vp);
2590
VOP_UNLOCK(vp);
2591
goto retry_vnode;
2592
}
2593
VI_UNLOCK(vp);
2594
VOP_UNLOCK(vp);
2595
vdrop(vp);
2596
}
2597
vn_finished_secondary_write(mp);
2598
}
2599
2600
#ifndef NO_FFS_SNAPSHOT
2601
2602
static struct snapdata *
2603
ffs_snapdata_alloc(void)
2604
{
2605
struct snapdata *sn;
2606
2607
/*
2608
* Fetch a snapdata from the free list if there is one available.
2609
*/
2610
mtx_lock(&snapfree_lock);
2611
sn = LIST_FIRST(&snapfree);
2612
if (sn != NULL)
2613
LIST_REMOVE(sn, sn_link);
2614
mtx_unlock(&snapfree_lock);
2615
if (sn != NULL)
2616
return (sn);
2617
/*
2618
* If there were no free snapdatas allocate one.
2619
*/
2620
sn = malloc(sizeof *sn, M_UFSMNT, M_WAITOK | M_ZERO);
2621
TAILQ_INIT(&sn->sn_head);
2622
lockinit(&sn->sn_lock, PVFS, "snaplk", VLKTIMEOUT,
2623
LK_CANRECURSE | LK_NOSHARE);
2624
return (sn);
2625
}
2626
2627
/*
2628
* The snapdata is never freed because we can not be certain that
2629
* there are no threads sleeping on the snap lock. Persisting
2630
* them permanently avoids costly synchronization in ffs_lock().
2631
*/
2632
static void
2633
ffs_snapdata_free(struct snapdata *sn)
2634
{
2635
mtx_lock(&snapfree_lock);
2636
LIST_INSERT_HEAD(&snapfree, sn, sn_link);
2637
mtx_unlock(&snapfree_lock);
2638
}
2639
2640
/* Try to free snapdata associated with devvp */
2641
static void
2642
try_free_snapdata(struct vnode *devvp)
2643
{
2644
struct snapdata *sn;
2645
ufs2_daddr_t *snapblklist;
2646
2647
ASSERT_VI_LOCKED(devvp, "try_free_snapdata");
2648
sn = devvp->v_rdev->si_snapdata;
2649
2650
if (sn == NULL || TAILQ_FIRST(&sn->sn_head) != NULL ||
2651
(devvp->v_vflag & VV_COPYONWRITE) == 0)
2652
return;
2653
2654
devvp->v_rdev->si_snapdata = NULL;
2655
devvp->v_vflag &= ~VV_COPYONWRITE;
2656
lockmgr(&sn->sn_lock, LK_DRAIN|LK_INTERLOCK, VI_MTX(devvp));
2657
snapblklist = sn->sn_blklist;
2658
sn->sn_blklist = NULL;
2659
sn->sn_listsize = 0;
2660
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
2661
if (snapblklist != NULL)
2662
free(snapblklist, M_UFSMNT);
2663
ffs_snapdata_free(sn);
2664
VI_LOCK(devvp);
2665
}
2666
2667
/*
2668
* Revert a vnode lock from using the snapshot lock back to its own lock.
2669
*
2670
* Aquire a lock on the vnode's own lock and release the lock on the
2671
* snapshot lock. If there are any recursions on the snapshot lock
2672
* get the same number of recursions on the vnode's own lock.
2673
*/
2674
static void
2675
revert_snaplock(struct vnode *vp,
2676
struct vnode *devvp,
2677
struct snapdata *sn)
2678
{
2679
int i;
2680
2681
ASSERT_VI_LOCKED(devvp, "revert_snaplock");
2682
/*
2683
* Avoid LOR with snapshot lock. The LK_NOWAIT should
2684
* never fail as the lock is currently unused. Rather than
2685
* panic, we recover by doing the blocking lock.
2686
*/
2687
for (i = 0; i <= sn->sn_lock.lk_recurse; i++) {
2688
if (lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT |
2689
LK_INTERLOCK, VI_MTX(devvp)) != 0) {
2690
printf("revert_snaplock: Unexpected LK_NOWAIT "
2691
"failure\n");
2692
lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_INTERLOCK,
2693
VI_MTX(devvp));
2694
}
2695
VI_LOCK(devvp);
2696
}
2697
KASSERT(vp->v_vnlock == &sn->sn_lock,
2698
("revert_snaplock: lost lock mutation"));
2699
vp->v_vnlock = &vp->v_lock;
2700
while (sn->sn_lock.lk_recurse > 0)
2701
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
2702
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
2703
}
2704
2705
static struct snapdata *
2706
ffs_snapdata_acquire(struct vnode *devvp)
2707
{
2708
struct snapdata *nsn, *sn;
2709
int error;
2710
2711
/*
2712
* Allocate a free snapdata. This is done before acquiring the
2713
* devvp lock to avoid allocation while the devvp interlock is
2714
* held.
2715
*/
2716
nsn = ffs_snapdata_alloc();
2717
2718
for (;;) {
2719
VI_LOCK(devvp);
2720
sn = devvp->v_rdev->si_snapdata;
2721
if (sn == NULL) {
2722
/*
2723
* This is the first snapshot on this
2724
* filesystem and we use our pre-allocated
2725
* snapdata. Publish sn with the sn_lock
2726
* owned by us, to avoid the race.
2727
*/
2728
error = lockmgr(&nsn->sn_lock, LK_EXCLUSIVE |
2729
LK_NOWAIT, NULL);
2730
if (error != 0)
2731
panic("leaked sn, lockmgr error %d", error);
2732
sn = devvp->v_rdev->si_snapdata = nsn;
2733
VI_UNLOCK(devvp);
2734
nsn = NULL;
2735
break;
2736
}
2737
2738
/*
2739
* There is a snapshots which already exists on this
2740
* filesystem, grab a reference to the common lock.
2741
*/
2742
error = lockmgr(&sn->sn_lock, LK_INTERLOCK |
2743
LK_EXCLUSIVE | LK_SLEEPFAIL, VI_MTX(devvp));
2744
if (error == 0)
2745
break;
2746
}
2747
2748
/*
2749
* Free any unused snapdata.
2750
*/
2751
if (nsn != NULL)
2752
ffs_snapdata_free(nsn);
2753
2754
return (sn);
2755
}
2756
2757
#endif
2758
2759