Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/module/zfs/dsl_pool.c
48383 views
1
// SPDX-License-Identifier: CDDL-1.0
2
/*
3
* CDDL HEADER START
4
*
5
* The contents of this file are subject to the terms of the
6
* Common Development and Distribution License (the "License").
7
* You may not use this file except in compliance with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or https://opensource.org/licenses/CDDL-1.0.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*/
22
/*
23
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
25
* Copyright (c) 2013 Steven Hartland. All rights reserved.
26
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
28
*/
29
30
#include <sys/dsl_pool.h>
31
#include <sys/dsl_dataset.h>
32
#include <sys/dsl_prop.h>
33
#include <sys/dsl_dir.h>
34
#include <sys/dsl_synctask.h>
35
#include <sys/dsl_scan.h>
36
#include <sys/dnode.h>
37
#include <sys/dmu_tx.h>
38
#include <sys/dmu_objset.h>
39
#include <sys/arc.h>
40
#include <sys/zap.h>
41
#include <sys/zio.h>
42
#include <sys/zfs_context.h>
43
#include <sys/fs/zfs.h>
44
#include <sys/zfs_znode.h>
45
#include <sys/spa_impl.h>
46
#include <sys/vdev_impl.h>
47
#include <sys/metaslab_impl.h>
48
#include <sys/bptree.h>
49
#include <sys/zfeature.h>
50
#include <sys/zil_impl.h>
51
#include <sys/dsl_userhold.h>
52
#include <sys/trace_zfs.h>
53
#include <sys/mmp.h>
54
55
/*
56
* ZFS Write Throttle
57
* ------------------
58
*
59
* ZFS must limit the rate of incoming writes to the rate at which it is able
60
* to sync data modifications to the backend storage. Throttling by too much
61
* creates an artificial limit; throttling by too little can only be sustained
62
* for short periods and would lead to highly lumpy performance. On a per-pool
63
* basis, ZFS tracks the amount of modified (dirty) data. As operations change
64
* data, the amount of dirty data increases; as ZFS syncs out data, the amount
65
* of dirty data decreases. When the amount of dirty data exceeds a
66
* predetermined threshold further modifications are blocked until the amount
67
* of dirty data decreases (as data is synced out).
68
*
69
* The limit on dirty data is tunable, and should be adjusted according to
70
* both the IO capacity and available memory of the system. The larger the
71
* window, the more ZFS is able to aggregate and amortize metadata (and data)
72
* changes. However, memory is a limited resource, and allowing for more dirty
73
* data comes at the cost of keeping other useful data in memory (for example
74
* ZFS data cached by the ARC).
75
*
76
* Implementation
77
*
78
* As buffers are modified dsl_pool_willuse_space() increments both the per-
79
* txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
80
* dirty space used; dsl_pool_dirty_space() decrements those values as data
81
* is synced out from dsl_pool_sync(). While only the poolwide value is
82
* relevant, the per-txg value is useful for debugging. The tunable
83
* zfs_dirty_data_max determines the dirty space limit. Once that value is
84
* exceeded, new writes are halted until space frees up.
85
*
86
* The zfs_dirty_data_sync_percent tunable dictates the threshold at which we
87
* ensure that there is a txg syncing (see the comment in txg.c for a full
88
* description of transaction group stages).
89
*
90
* The IO scheduler uses both the dirty space limit and current amount of
91
* dirty data as inputs. Those values affect the number of concurrent IOs ZFS
92
* issues. See the comment in vdev_queue.c for details of the IO scheduler.
93
*
94
* The delay is also calculated based on the amount of dirty data. See the
95
* comment above dmu_tx_delay() for details.
96
*/
97
98
/*
99
* zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
100
* capped at zfs_dirty_data_max_max. It can also be overridden with a module
101
* parameter.
102
*/
103
uint64_t zfs_dirty_data_max = 0;
104
uint64_t zfs_dirty_data_max_max = 0;
105
uint_t zfs_dirty_data_max_percent = 10;
106
uint_t zfs_dirty_data_max_max_percent = 25;
107
108
/*
109
* The upper limit of TX_WRITE log data. Write operations are throttled
110
* when approaching the limit until log data is cleared out after txg sync.
111
* It only counts TX_WRITE log with WR_COPIED or WR_NEED_COPY.
112
*/
113
uint64_t zfs_wrlog_data_max = 0;
114
115
/*
116
* If there's at least this much dirty data (as a percentage of
117
* zfs_dirty_data_max), push out a txg. This should be less than
118
* zfs_vdev_async_write_active_min_dirty_percent.
119
*/
120
static uint_t zfs_dirty_data_sync_percent = 20;
121
122
/*
123
* Once there is this amount of dirty data, the dmu_tx_delay() will kick in
124
* and delay each transaction.
125
* This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
126
*/
127
uint_t zfs_delay_min_dirty_percent = 60;
128
129
/*
130
* This controls how quickly the delay approaches infinity.
131
* Larger values cause it to delay more for a given amount of dirty data.
132
* Therefore larger values will cause there to be less dirty data for a
133
* given throughput.
134
*
135
* For the smoothest delay, this value should be about 1 billion divided
136
* by the maximum number of operations per second. This will smoothly
137
* handle between 10x and 1/10th this number.
138
*
139
* Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
140
* multiply in dmu_tx_delay().
141
*/
142
uint64_t zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
143
144
/*
145
* These tunables determine the behavior of how zil_itxg_clean() is
146
* called via zil_clean() in the context of spa_sync(). When an itxg
147
* list needs to be cleaned, TQ_NOSLEEP will be used when dispatching.
148
* If the dispatch fails, the call to zil_itxg_clean() will occur
149
* synchronously in the context of spa_sync(), which can negatively
150
* impact the performance of spa_sync() (e.g. in the case of the itxg
151
* list having a large number of itxs that needs to be cleaned).
152
*
153
* Thus, these tunables can be used to manipulate the behavior of the
154
* taskq used by zil_clean(); they determine the number of taskq entries
155
* that are pre-populated when the taskq is first created (via the
156
* "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of
157
* taskq entries that are cached after an on-demand allocation (via the
158
* "zfs_zil_clean_taskq_maxalloc").
159
*
160
* The idea being, we want to try reasonably hard to ensure there will
161
* already be a taskq entry pre-allocated by the time that it is needed
162
* by zil_clean(). This way, we can avoid the possibility of an
163
* on-demand allocation of a new taskq entry from failing, which would
164
* result in zil_itxg_clean() being called synchronously from zil_clean()
165
* (which can adversely affect performance of spa_sync()).
166
*
167
* Additionally, the number of threads used by the taskq can be
168
* configured via the "zfs_zil_clean_taskq_nthr_pct" tunable.
169
*/
170
static int zfs_zil_clean_taskq_nthr_pct = 100;
171
static int zfs_zil_clean_taskq_minalloc = 1024;
172
static int zfs_zil_clean_taskq_maxalloc = 1024 * 1024;
173
174
int
175
dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
176
{
177
uint64_t obj;
178
int err;
179
180
err = zap_lookup(dp->dp_meta_objset,
181
dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj,
182
name, sizeof (obj), 1, &obj);
183
if (err)
184
return (err);
185
186
return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
187
}
188
189
static dsl_pool_t *
190
dsl_pool_open_impl(spa_t *spa, uint64_t txg)
191
{
192
dsl_pool_t *dp;
193
blkptr_t *bp = spa_get_rootblkptr(spa);
194
195
dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
196
dp->dp_spa = spa;
197
dp->dp_meta_rootbp = *bp;
198
rrw_init(&dp->dp_config_rwlock, B_TRUE);
199
txg_init(dp, txg);
200
mmp_init(spa);
201
202
txg_list_create(&dp->dp_dirty_datasets, spa,
203
offsetof(dsl_dataset_t, ds_dirty_link));
204
txg_list_create(&dp->dp_dirty_zilogs, spa,
205
offsetof(zilog_t, zl_dirty_link));
206
txg_list_create(&dp->dp_dirty_dirs, spa,
207
offsetof(dsl_dir_t, dd_dirty_link));
208
txg_list_create(&dp->dp_sync_tasks, spa,
209
offsetof(dsl_sync_task_t, dst_node));
210
txg_list_create(&dp->dp_early_sync_tasks, spa,
211
offsetof(dsl_sync_task_t, dst_node));
212
213
dp->dp_sync_taskq = spa_sync_tq_create(spa, "dp_sync_taskq");
214
215
dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq",
216
zfs_zil_clean_taskq_nthr_pct, minclsyspri,
217
zfs_zil_clean_taskq_minalloc,
218
zfs_zil_clean_taskq_maxalloc,
219
TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
220
221
mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
222
cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
223
224
aggsum_init(&dp->dp_wrlog_total, 0);
225
for (int i = 0; i < TXG_SIZE; i++) {
226
aggsum_init(&dp->dp_wrlog_pertxg[i], 0);
227
}
228
229
dp->dp_zrele_taskq = taskq_create("z_zrele", 100, defclsyspri,
230
boot_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC |
231
TASKQ_THREADS_CPU_PCT);
232
dp->dp_unlinked_drain_taskq = taskq_create("z_unlinked_drain",
233
100, defclsyspri, boot_ncpus, INT_MAX,
234
TASKQ_PREPOPULATE | TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
235
236
return (dp);
237
}
238
239
int
240
dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
241
{
242
int err;
243
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
244
245
/*
246
* Initialize the caller's dsl_pool_t structure before we actually open
247
* the meta objset. This is done because a self-healing write zio may
248
* be issued as part of dmu_objset_open_impl() and the spa needs its
249
* dsl_pool_t initialized in order to handle the write.
250
*/
251
*dpp = dp;
252
253
err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
254
&dp->dp_meta_objset);
255
if (err != 0) {
256
dsl_pool_close(dp);
257
*dpp = NULL;
258
}
259
260
return (err);
261
}
262
263
int
264
dsl_pool_open(dsl_pool_t *dp)
265
{
266
int err;
267
dsl_dir_t *dd;
268
dsl_dataset_t *ds;
269
uint64_t obj;
270
271
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
272
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
273
DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
274
&dp->dp_root_dir_obj);
275
if (err)
276
goto out;
277
278
err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
279
NULL, dp, &dp->dp_root_dir);
280
if (err)
281
goto out;
282
283
err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
284
if (err)
285
goto out;
286
287
if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
288
err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
289
if (err)
290
goto out;
291
err = dsl_dataset_hold_obj(dp,
292
dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds);
293
if (err == 0) {
294
err = dsl_dataset_hold_obj(dp,
295
dsl_dataset_phys(ds)->ds_prev_snap_obj, dp,
296
&dp->dp_origin_snap);
297
dsl_dataset_rele(ds, FTAG);
298
}
299
dsl_dir_rele(dd, dp);
300
if (err)
301
goto out;
302
}
303
304
if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
305
err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
306
&dp->dp_free_dir);
307
if (err)
308
goto out;
309
310
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
311
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
312
if (err)
313
goto out;
314
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
315
dp->dp_meta_objset, obj));
316
}
317
318
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
319
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
320
DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj);
321
if (err == 0) {
322
VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj,
323
dp->dp_meta_objset, obj));
324
} else if (err == ENOENT) {
325
/*
326
* We might not have created the remap bpobj yet.
327
*/
328
} else {
329
goto out;
330
}
331
}
332
333
/*
334
* Note: errors ignored, because the these special dirs, used for
335
* space accounting, are only created on demand.
336
*/
337
(void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME,
338
&dp->dp_leak_dir);
339
340
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
341
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
342
DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
343
&dp->dp_bptree_obj);
344
if (err != 0)
345
goto out;
346
}
347
348
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) {
349
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
350
DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
351
&dp->dp_empty_bpobj);
352
if (err != 0)
353
goto out;
354
}
355
356
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
357
DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
358
&dp->dp_tmp_userrefs_obj);
359
if (err == ENOENT)
360
err = 0;
361
if (err)
362
goto out;
363
364
err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
365
366
out:
367
rrw_exit(&dp->dp_config_rwlock, FTAG);
368
return (err);
369
}
370
371
void
372
dsl_pool_close(dsl_pool_t *dp)
373
{
374
/*
375
* Drop our references from dsl_pool_open().
376
*
377
* Since we held the origin_snap from "syncing" context (which
378
* includes pool-opening context), it actually only got a "ref"
379
* and not a hold, so just drop that here.
380
*/
381
if (dp->dp_origin_snap != NULL)
382
dsl_dataset_rele(dp->dp_origin_snap, dp);
383
if (dp->dp_mos_dir != NULL)
384
dsl_dir_rele(dp->dp_mos_dir, dp);
385
if (dp->dp_free_dir != NULL)
386
dsl_dir_rele(dp->dp_free_dir, dp);
387
if (dp->dp_leak_dir != NULL)
388
dsl_dir_rele(dp->dp_leak_dir, dp);
389
if (dp->dp_root_dir != NULL)
390
dsl_dir_rele(dp->dp_root_dir, dp);
391
392
bpobj_close(&dp->dp_free_bpobj);
393
bpobj_close(&dp->dp_obsolete_bpobj);
394
395
/* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
396
if (dp->dp_meta_objset != NULL)
397
dmu_objset_evict(dp->dp_meta_objset);
398
399
txg_list_destroy(&dp->dp_dirty_datasets);
400
txg_list_destroy(&dp->dp_dirty_zilogs);
401
txg_list_destroy(&dp->dp_sync_tasks);
402
txg_list_destroy(&dp->dp_early_sync_tasks);
403
txg_list_destroy(&dp->dp_dirty_dirs);
404
405
taskq_destroy(dp->dp_zil_clean_taskq);
406
spa_sync_tq_destroy(dp->dp_spa);
407
408
if (dp->dp_spa->spa_state == POOL_STATE_EXPORTED ||
409
dp->dp_spa->spa_state == POOL_STATE_DESTROYED) {
410
/*
411
* On export/destroy perform the ARC flush asynchronously.
412
*/
413
arc_flush_async(dp->dp_spa);
414
} else {
415
/*
416
* We can't set retry to TRUE since we're explicitly specifying
417
* a spa to flush. This is good enough; any missed buffers for
418
* this spa won't cause trouble, and they'll eventually fall
419
* out of the ARC just like any other unused buffer.
420
*/
421
arc_flush(dp->dp_spa, FALSE);
422
}
423
424
mmp_fini(dp->dp_spa);
425
txg_fini(dp);
426
dsl_scan_fini(dp);
427
dmu_buf_user_evict_wait();
428
429
rrw_destroy(&dp->dp_config_rwlock);
430
mutex_destroy(&dp->dp_lock);
431
cv_destroy(&dp->dp_spaceavail_cv);
432
433
ASSERT0(aggsum_value(&dp->dp_wrlog_total));
434
aggsum_fini(&dp->dp_wrlog_total);
435
for (int i = 0; i < TXG_SIZE; i++) {
436
ASSERT0(aggsum_value(&dp->dp_wrlog_pertxg[i]));
437
aggsum_fini(&dp->dp_wrlog_pertxg[i]);
438
}
439
440
taskq_destroy(dp->dp_unlinked_drain_taskq);
441
taskq_destroy(dp->dp_zrele_taskq);
442
if (dp->dp_blkstats != NULL)
443
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
444
kmem_free(dp, sizeof (dsl_pool_t));
445
}
446
447
void
448
dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
449
{
450
uint64_t obj;
451
/*
452
* Currently, we only create the obsolete_bpobj where there are
453
* indirect vdevs with referenced mappings.
454
*/
455
ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL));
456
/* create and open the obsolete_bpobj */
457
obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
458
VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj));
459
VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
460
DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
461
spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
462
}
463
464
void
465
dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
466
{
467
spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
468
VERIFY0(zap_remove(dp->dp_meta_objset,
469
DMU_POOL_DIRECTORY_OBJECT,
470
DMU_POOL_OBSOLETE_BPOBJ, tx));
471
bpobj_free(dp->dp_meta_objset,
472
dp->dp_obsolete_bpobj.bpo_object, tx);
473
bpobj_close(&dp->dp_obsolete_bpobj);
474
}
475
476
dsl_pool_t *
477
dsl_pool_create(spa_t *spa, nvlist_t *zplprops __attribute__((unused)),
478
dsl_crypto_params_t *dcp, uint64_t txg)
479
{
480
int err;
481
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
482
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
483
#ifdef _KERNEL
484
objset_t *os;
485
#else
486
objset_t *os __attribute__((unused));
487
#endif
488
dsl_dataset_t *ds;
489
uint64_t obj;
490
491
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
492
493
/* create and open the MOS (meta-objset) */
494
dp->dp_meta_objset = dmu_objset_create_impl(spa,
495
NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
496
spa->spa_meta_objset = dp->dp_meta_objset;
497
498
/* create the pool directory */
499
err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
500
DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
501
ASSERT0(err);
502
503
/* Initialize scan structures */
504
VERIFY0(dsl_scan_init(dp, txg));
505
506
/* create and open the root dir */
507
dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
508
VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
509
NULL, dp, &dp->dp_root_dir));
510
511
/* create and open the meta-objset dir */
512
(void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
513
VERIFY0(dsl_pool_open_special_dir(dp,
514
MOS_DIR_NAME, &dp->dp_mos_dir));
515
516
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
517
/* create and open the free dir */
518
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
519
FREE_DIR_NAME, tx);
520
VERIFY0(dsl_pool_open_special_dir(dp,
521
FREE_DIR_NAME, &dp->dp_free_dir));
522
523
/* create and open the free_bplist */
524
obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
525
VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
526
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
527
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
528
dp->dp_meta_objset, obj));
529
}
530
531
if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
532
dsl_pool_create_origin(dp, tx);
533
534
/*
535
* Some features may be needed when creating the root dataset, so we
536
* create the feature objects here.
537
*/
538
if (spa_version(spa) >= SPA_VERSION_FEATURES)
539
spa_feature_create_zap_objects(spa, tx);
540
541
if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF &&
542
dcp->cp_crypt != ZIO_CRYPT_INHERIT)
543
spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx);
544
545
/* create the root dataset */
546
obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx);
547
548
/* create the root objset */
549
VERIFY0(dsl_dataset_hold_obj_flags(dp, obj,
550
DS_HOLD_FLAG_DECRYPT, FTAG, &ds));
551
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
552
os = dmu_objset_create_impl(dp->dp_spa, ds,
553
dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
554
rrw_exit(&ds->ds_bp_rwlock, FTAG);
555
#ifdef _KERNEL
556
zfs_create_fs(os, kcred, zplprops, tx);
557
#endif
558
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
559
560
dmu_tx_commit(tx);
561
562
rrw_exit(&dp->dp_config_rwlock, FTAG);
563
564
return (dp);
565
}
566
567
/*
568
* Account for the meta-objset space in its placeholder dsl_dir.
569
*/
570
void
571
dsl_pool_mos_diduse_space(dsl_pool_t *dp,
572
int64_t used, int64_t comp, int64_t uncomp)
573
{
574
ASSERT3U(comp, ==, uncomp); /* it's all metadata */
575
mutex_enter(&dp->dp_lock);
576
dp->dp_mos_used_delta += used;
577
dp->dp_mos_compressed_delta += comp;
578
dp->dp_mos_uncompressed_delta += uncomp;
579
mutex_exit(&dp->dp_lock);
580
}
581
582
static void
583
dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
584
{
585
zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
586
dmu_objset_sync(dp->dp_meta_objset, zio, tx);
587
VERIFY0(zio_wait(zio));
588
dmu_objset_sync_done(dp->dp_meta_objset, tx);
589
taskq_wait(dp->dp_sync_taskq);
590
multilist_destroy(&dp->dp_meta_objset->os_synced_dnodes);
591
592
dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
593
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
594
}
595
596
static void
597
dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
598
{
599
ASSERT(MUTEX_HELD(&dp->dp_lock));
600
601
if (delta < 0)
602
ASSERT3U(-delta, <=, dp->dp_dirty_total);
603
604
dp->dp_dirty_total += delta;
605
606
/*
607
* Note: we signal even when increasing dp_dirty_total.
608
* This ensures forward progress -- each thread wakes the next waiter.
609
*/
610
if (dp->dp_dirty_total < zfs_dirty_data_max)
611
cv_signal(&dp->dp_spaceavail_cv);
612
}
613
614
void
615
dsl_pool_wrlog_count(dsl_pool_t *dp, int64_t size, uint64_t txg)
616
{
617
ASSERT3S(size, >=, 0);
618
619
aggsum_add(&dp->dp_wrlog_pertxg[txg & TXG_MASK], size);
620
aggsum_add(&dp->dp_wrlog_total, size);
621
622
/* Choose a value slightly bigger than min dirty sync bytes */
623
uint64_t sync_min =
624
zfs_wrlog_data_max * (zfs_dirty_data_sync_percent + 10) / 200;
625
if (aggsum_compare(&dp->dp_wrlog_pertxg[txg & TXG_MASK], sync_min) > 0)
626
txg_kick(dp, txg);
627
}
628
629
boolean_t
630
dsl_pool_need_wrlog_delay(dsl_pool_t *dp)
631
{
632
uint64_t delay_min_bytes =
633
zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100;
634
635
return (aggsum_compare(&dp->dp_wrlog_total, delay_min_bytes) > 0);
636
}
637
638
static void
639
dsl_pool_wrlog_clear(dsl_pool_t *dp, uint64_t txg)
640
{
641
int64_t delta;
642
delta = -(int64_t)aggsum_value(&dp->dp_wrlog_pertxg[txg & TXG_MASK]);
643
aggsum_add(&dp->dp_wrlog_pertxg[txg & TXG_MASK], delta);
644
aggsum_add(&dp->dp_wrlog_total, delta);
645
/* Compact per-CPU sums after the big change. */
646
(void) aggsum_value(&dp->dp_wrlog_pertxg[txg & TXG_MASK]);
647
(void) aggsum_value(&dp->dp_wrlog_total);
648
}
649
650
#ifdef ZFS_DEBUG
651
static boolean_t
652
dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg)
653
{
654
spa_t *spa = dp->dp_spa;
655
vdev_t *rvd = spa->spa_root_vdev;
656
657
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
658
vdev_t *vd = rvd->vdev_child[c];
659
txg_list_t *tl = &vd->vdev_ms_list;
660
metaslab_t *ms;
661
662
for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms;
663
ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) {
664
VERIFY(zfs_range_tree_is_empty(ms->ms_freeing));
665
VERIFY(zfs_range_tree_is_empty(ms->ms_checkpointing));
666
}
667
}
668
669
return (B_TRUE);
670
}
671
#else
672
#define dsl_early_sync_task_verify(dp, txg) \
673
((void) sizeof (dp), (void) sizeof (txg), B_TRUE)
674
#endif
675
676
void
677
dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
678
{
679
zio_t *rio; /* root zio for all dirty dataset syncs */
680
dmu_tx_t *tx;
681
dsl_dir_t *dd;
682
dsl_dataset_t *ds;
683
objset_t *mos = dp->dp_meta_objset;
684
list_t synced_datasets;
685
686
list_create(&synced_datasets, sizeof (dsl_dataset_t),
687
offsetof(dsl_dataset_t, ds_synced_link));
688
689
tx = dmu_tx_create_assigned(dp, txg);
690
691
/*
692
* Run all early sync tasks before writing out any dirty blocks.
693
* For more info on early sync tasks see block comment in
694
* dsl_early_sync_task().
695
*/
696
if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) {
697
dsl_sync_task_t *dst;
698
699
ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
700
while ((dst =
701
txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) {
702
ASSERT(dsl_early_sync_task_verify(dp, txg));
703
dsl_sync_task_sync(dst, tx);
704
}
705
ASSERT(dsl_early_sync_task_verify(dp, txg));
706
}
707
708
/*
709
* Write out all dirty blocks of dirty datasets. Note, this could
710
* create a very large (+10k) zio tree.
711
*/
712
rio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
713
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
714
/*
715
* We must not sync any non-MOS datasets twice, because
716
* we may have taken a snapshot of them. However, we
717
* may sync newly-created datasets on pass 2.
718
*/
719
ASSERT(!list_link_active(&ds->ds_synced_link));
720
list_insert_tail(&synced_datasets, ds);
721
dsl_dataset_sync(ds, rio, tx);
722
}
723
VERIFY0(zio_wait(rio));
724
725
/*
726
* Update the long range free counter after
727
* we're done syncing user data
728
*/
729
mutex_enter(&dp->dp_lock);
730
ASSERT(spa_sync_pass(dp->dp_spa) == 1 ||
731
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
732
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
733
mutex_exit(&dp->dp_lock);
734
735
/*
736
* After the data blocks have been written (ensured by the zio_wait()
737
* above), update the user/group/project space accounting. This happens
738
* in tasks dispatched to dp_sync_taskq, so wait for them before
739
* continuing.
740
*/
741
for (ds = list_head(&synced_datasets); ds != NULL;
742
ds = list_next(&synced_datasets, ds)) {
743
dmu_objset_sync_done(ds->ds_objset, tx);
744
}
745
taskq_wait(dp->dp_sync_taskq);
746
747
/*
748
* Sync the datasets again to push out the changes due to
749
* userspace updates. This must be done before we process the
750
* sync tasks, so that any snapshots will have the correct
751
* user accounting information (and we won't get confused
752
* about which blocks are part of the snapshot).
753
*/
754
rio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
755
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
756
objset_t *os = ds->ds_objset;
757
758
ASSERT(list_link_active(&ds->ds_synced_link));
759
dmu_buf_rele(ds->ds_dbuf, ds);
760
dsl_dataset_sync(ds, rio, tx);
761
762
/*
763
* Release any key mappings created by calls to
764
* dsl_dataset_dirty() from the userquota accounting
765
* code paths.
766
*/
767
if (os->os_encrypted && !os->os_raw_receive &&
768
!os->os_next_write_raw[txg & TXG_MASK]) {
769
ASSERT3P(ds->ds_key_mapping, !=, NULL);
770
key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
771
}
772
}
773
VERIFY0(zio_wait(rio));
774
775
/*
776
* Now that the datasets have been completely synced, we can
777
* clean up our in-memory structures accumulated while syncing:
778
*
779
* - move dead blocks from the pending deadlist and livelists
780
* to the on-disk versions
781
* - release hold from dsl_dataset_dirty()
782
* - release key mapping hold from dsl_dataset_dirty()
783
*/
784
while ((ds = list_remove_head(&synced_datasets)) != NULL) {
785
objset_t *os = ds->ds_objset;
786
787
if (os->os_encrypted && !os->os_raw_receive &&
788
!os->os_next_write_raw[txg & TXG_MASK]) {
789
ASSERT3P(ds->ds_key_mapping, !=, NULL);
790
key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
791
}
792
793
dsl_dataset_sync_done(ds, tx);
794
dmu_buf_rele(ds->ds_dbuf, ds);
795
}
796
797
while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
798
dsl_dir_sync(dd, tx);
799
}
800
801
/*
802
* The MOS's space is accounted for in the pool/$MOS
803
* (dp_mos_dir). We can't modify the mos while we're syncing
804
* it, so we remember the deltas and apply them here.
805
*/
806
if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
807
dp->dp_mos_uncompressed_delta != 0) {
808
dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
809
dp->dp_mos_used_delta,
810
dp->dp_mos_compressed_delta,
811
dp->dp_mos_uncompressed_delta, tx);
812
dp->dp_mos_used_delta = 0;
813
dp->dp_mos_compressed_delta = 0;
814
dp->dp_mos_uncompressed_delta = 0;
815
}
816
817
if (dmu_objset_is_dirty(mos, txg)) {
818
dsl_pool_sync_mos(dp, tx);
819
}
820
821
/*
822
* We have written all of the accounted dirty data, so our
823
* dp_space_towrite should now be zero. However, some seldom-used
824
* code paths do not adhere to this (e.g. dbuf_undirty()). Shore up
825
* the accounting of any dirtied space now.
826
*
827
* Note that, besides any dirty data from datasets, the amount of
828
* dirty data in the MOS is also accounted by the pool. Therefore,
829
* we want to do this cleanup after dsl_pool_sync_mos() so we don't
830
* attempt to update the accounting for the same dirty data twice.
831
* (i.e. at this point we only update the accounting for the space
832
* that we know that we "leaked").
833
*/
834
dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
835
836
/*
837
* If we modify a dataset in the same txg that we want to destroy it,
838
* its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
839
* dsl_dir_destroy_check() will fail if there are unexpected holds.
840
* Therefore, we want to sync the MOS (thus syncing the dd_dbuf
841
* and clearing the hold on it) before we process the sync_tasks.
842
* The MOS data dirtied by the sync_tasks will be synced on the next
843
* pass.
844
*/
845
if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
846
dsl_sync_task_t *dst;
847
/*
848
* No more sync tasks should have been added while we
849
* were syncing.
850
*/
851
ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
852
while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
853
dsl_sync_task_sync(dst, tx);
854
}
855
856
dmu_tx_commit(tx);
857
858
DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
859
}
860
861
void
862
dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
863
{
864
zilog_t *zilog;
865
866
while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) {
867
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
868
/*
869
* We don't remove the zilog from the dp_dirty_zilogs
870
* list until after we've cleaned it. This ensures that
871
* callers of zilog_is_dirty() receive an accurate
872
* answer when they are racing with the spa sync thread.
873
*/
874
zil_clean(zilog, txg);
875
(void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg);
876
ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
877
dmu_buf_rele(ds->ds_dbuf, zilog);
878
}
879
880
dsl_pool_wrlog_clear(dp, txg);
881
882
ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
883
}
884
885
/*
886
* TRUE if the current thread is the tx_sync_thread or if we
887
* are being called from SPA context during pool initialization.
888
*/
889
int
890
dsl_pool_sync_context(dsl_pool_t *dp)
891
{
892
return (curthread == dp->dp_tx.tx_sync_thread ||
893
spa_is_initializing(dp->dp_spa) ||
894
taskq_member(dp->dp_sync_taskq, curthread));
895
}
896
897
/*
898
* This function returns the amount of allocatable space in the pool
899
* minus whatever space is currently reserved by ZFS for specific
900
* purposes. Specifically:
901
*
902
* 1] Any reserved SLOP space
903
* 2] Any space used by the checkpoint
904
* 3] Any space used for deferred frees
905
*
906
* The latter 2 are especially important because they are needed to
907
* rectify the SPA's and DMU's different understanding of how much space
908
* is used. Now the DMU is aware of that extra space tracked by the SPA
909
* without having to maintain a separate special dir (e.g similar to
910
* $MOS, $FREEING, and $LEAKED).
911
*
912
* Note: By deferred frees here, we mean the frees that were deferred
913
* in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the
914
* segments placed in ms_defer trees during metaslab_sync_done().
915
*/
916
uint64_t
917
dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy)
918
{
919
spa_t *spa = dp->dp_spa;
920
uint64_t space, resv, adjustedsize;
921
uint64_t spa_deferred_frees =
922
spa->spa_deferred_bpobj.bpo_phys->bpo_bytes;
923
924
space = spa_get_dspace(spa)
925
- spa_get_checkpoint_space(spa) - spa_deferred_frees;
926
resv = spa_get_slop_space(spa);
927
928
switch (slop_policy) {
929
case ZFS_SPACE_CHECK_NORMAL:
930
break;
931
case ZFS_SPACE_CHECK_RESERVED:
932
resv >>= 1;
933
break;
934
case ZFS_SPACE_CHECK_EXTRA_RESERVED:
935
resv >>= 2;
936
break;
937
case ZFS_SPACE_CHECK_NONE:
938
resv = 0;
939
break;
940
default:
941
panic("invalid slop policy value: %d", slop_policy);
942
break;
943
}
944
adjustedsize = (space >= resv) ? (space - resv) : 0;
945
946
return (adjustedsize);
947
}
948
949
uint64_t
950
dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy)
951
{
952
uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy);
953
uint64_t deferred =
954
metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
955
uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0;
956
return (quota);
957
}
958
959
uint64_t
960
dsl_pool_deferred_space(dsl_pool_t *dp)
961
{
962
return (metaslab_class_get_deferred(spa_normal_class(dp->dp_spa)));
963
}
964
965
boolean_t
966
dsl_pool_need_dirty_delay(dsl_pool_t *dp)
967
{
968
uint64_t delay_min_bytes =
969
zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
970
971
/*
972
* We are not taking the dp_lock here and few other places, since torn
973
* reads are unlikely: on 64-bit systems due to register size and on
974
* 32-bit due to memory constraints. Pool-wide locks in hot path may
975
* be too expensive, while we do not need a precise result here.
976
*/
977
return (dp->dp_dirty_total > delay_min_bytes);
978
}
979
980
static boolean_t
981
dsl_pool_need_dirty_sync(dsl_pool_t *dp, uint64_t txg)
982
{
983
uint64_t dirty_min_bytes =
984
zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
985
uint64_t dirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
986
987
return (dirty > dirty_min_bytes);
988
}
989
990
void
991
dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
992
{
993
if (space > 0) {
994
mutex_enter(&dp->dp_lock);
995
dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
996
dsl_pool_dirty_delta(dp, space);
997
boolean_t needsync = !dmu_tx_is_syncing(tx) &&
998
dsl_pool_need_dirty_sync(dp, tx->tx_txg);
999
mutex_exit(&dp->dp_lock);
1000
1001
if (needsync)
1002
txg_kick(dp, tx->tx_txg);
1003
}
1004
}
1005
1006
void
1007
dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
1008
{
1009
ASSERT3S(space, >=, 0);
1010
if (space == 0)
1011
return;
1012
1013
mutex_enter(&dp->dp_lock);
1014
if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
1015
/* XXX writing something we didn't dirty? */
1016
space = dp->dp_dirty_pertxg[txg & TXG_MASK];
1017
}
1018
ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
1019
dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
1020
ASSERT3U(dp->dp_dirty_total, >=, space);
1021
dsl_pool_dirty_delta(dp, -space);
1022
mutex_exit(&dp->dp_lock);
1023
}
1024
1025
static int
1026
upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
1027
{
1028
dmu_tx_t *tx = arg;
1029
dsl_dataset_t *ds, *prev = NULL;
1030
int err;
1031
1032
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
1033
if (err)
1034
return (err);
1035
1036
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
1037
err = dsl_dataset_hold_obj(dp,
1038
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
1039
if (err) {
1040
dsl_dataset_rele(ds, FTAG);
1041
return (err);
1042
}
1043
1044
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object)
1045
break;
1046
dsl_dataset_rele(ds, FTAG);
1047
ds = prev;
1048
prev = NULL;
1049
}
1050
1051
if (prev == NULL) {
1052
prev = dp->dp_origin_snap;
1053
1054
/*
1055
* The $ORIGIN can't have any data, or the accounting
1056
* will be wrong.
1057
*/
1058
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1059
ASSERT0(BP_GET_BIRTH(&dsl_dataset_phys(prev)->ds_bp));
1060
rrw_exit(&ds->ds_bp_rwlock, FTAG);
1061
1062
/* The origin doesn't get attached to itself */
1063
if (ds->ds_object == prev->ds_object) {
1064
dsl_dataset_rele(ds, FTAG);
1065
return (0);
1066
}
1067
1068
dmu_buf_will_dirty(ds->ds_dbuf, tx);
1069
dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object;
1070
dsl_dataset_phys(ds)->ds_prev_snap_txg =
1071
dsl_dataset_phys(prev)->ds_creation_txg;
1072
1073
dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1074
dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object;
1075
1076
dmu_buf_will_dirty(prev->ds_dbuf, tx);
1077
dsl_dataset_phys(prev)->ds_num_children++;
1078
1079
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
1080
ASSERT0P(ds->ds_prev);
1081
VERIFY0(dsl_dataset_hold_obj(dp,
1082
dsl_dataset_phys(ds)->ds_prev_snap_obj,
1083
ds, &ds->ds_prev));
1084
}
1085
}
1086
1087
ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object);
1088
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object);
1089
1090
if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) {
1091
dmu_buf_will_dirty(prev->ds_dbuf, tx);
1092
dsl_dataset_phys(prev)->ds_next_clones_obj =
1093
zap_create(dp->dp_meta_objset,
1094
DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
1095
}
1096
VERIFY0(zap_add_int(dp->dp_meta_objset,
1097
dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
1098
1099
dsl_dataset_rele(ds, FTAG);
1100
if (prev != dp->dp_origin_snap)
1101
dsl_dataset_rele(prev, FTAG);
1102
return (0);
1103
}
1104
1105
void
1106
dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
1107
{
1108
ASSERT(dmu_tx_is_syncing(tx));
1109
ASSERT(dp->dp_origin_snap != NULL);
1110
1111
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
1112
tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
1113
}
1114
1115
static int
1116
upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1117
{
1118
dmu_tx_t *tx = arg;
1119
objset_t *mos = dp->dp_meta_objset;
1120
1121
if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
1122
dsl_dataset_t *origin;
1123
1124
VERIFY0(dsl_dataset_hold_obj(dp,
1125
dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
1126
1127
if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
1128
dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
1129
dsl_dir_phys(origin->ds_dir)->dd_clones =
1130
zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
1131
0, tx);
1132
}
1133
1134
VERIFY0(zap_add_int(dp->dp_meta_objset,
1135
dsl_dir_phys(origin->ds_dir)->dd_clones,
1136
ds->ds_object, tx));
1137
1138
dsl_dataset_rele(origin, FTAG);
1139
}
1140
return (0);
1141
}
1142
1143
void
1144
dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
1145
{
1146
uint64_t obj;
1147
1148
ASSERT(dmu_tx_is_syncing(tx));
1149
1150
(void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
1151
VERIFY0(dsl_pool_open_special_dir(dp,
1152
FREE_DIR_NAME, &dp->dp_free_dir));
1153
1154
/*
1155
* We can't use bpobj_alloc(), because spa_version() still
1156
* returns the old version, and we need a new-version bpobj with
1157
* subobj support. So call dmu_object_alloc() directly.
1158
*/
1159
obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
1160
SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
1161
VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1162
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
1163
VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
1164
1165
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1166
upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
1167
}
1168
1169
void
1170
dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
1171
{
1172
uint64_t dsobj;
1173
dsl_dataset_t *ds;
1174
1175
ASSERT(dmu_tx_is_syncing(tx));
1176
ASSERT0P(dp->dp_origin_snap);
1177
ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
1178
1179
/* create the origin dir, ds, & snap-ds */
1180
dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
1181
NULL, 0, kcred, NULL, tx);
1182
VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1183
dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
1184
VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
1185
dp, &dp->dp_origin_snap));
1186
dsl_dataset_rele(ds, FTAG);
1187
}
1188
1189
taskq_t *
1190
dsl_pool_zrele_taskq(dsl_pool_t *dp)
1191
{
1192
return (dp->dp_zrele_taskq);
1193
}
1194
1195
taskq_t *
1196
dsl_pool_unlinked_drain_taskq(dsl_pool_t *dp)
1197
{
1198
return (dp->dp_unlinked_drain_taskq);
1199
}
1200
1201
/*
1202
* Walk through the pool-wide zap object of temporary snapshot user holds
1203
* and release them.
1204
*/
1205
void
1206
dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
1207
{
1208
zap_attribute_t *za;
1209
zap_cursor_t zc;
1210
objset_t *mos = dp->dp_meta_objset;
1211
uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1212
nvlist_t *holds;
1213
1214
if (zapobj == 0)
1215
return;
1216
ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1217
1218
holds = fnvlist_alloc();
1219
1220
za = zap_attribute_alloc();
1221
for (zap_cursor_init(&zc, mos, zapobj);
1222
zap_cursor_retrieve(&zc, za) == 0;
1223
zap_cursor_advance(&zc)) {
1224
char *htag;
1225
nvlist_t *tags;
1226
1227
htag = strchr(za->za_name, '-');
1228
*htag = '\0';
1229
++htag;
1230
if (nvlist_lookup_nvlist(holds, za->za_name, &tags) != 0) {
1231
tags = fnvlist_alloc();
1232
fnvlist_add_boolean(tags, htag);
1233
fnvlist_add_nvlist(holds, za->za_name, tags);
1234
fnvlist_free(tags);
1235
} else {
1236
fnvlist_add_boolean(tags, htag);
1237
}
1238
}
1239
dsl_dataset_user_release_tmp(dp, holds);
1240
fnvlist_free(holds);
1241
zap_cursor_fini(&zc);
1242
zap_attribute_free(za);
1243
}
1244
1245
/*
1246
* Create the pool-wide zap object for storing temporary snapshot holds.
1247
*/
1248
static void
1249
dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
1250
{
1251
objset_t *mos = dp->dp_meta_objset;
1252
1253
ASSERT0(dp->dp_tmp_userrefs_obj);
1254
ASSERT(dmu_tx_is_syncing(tx));
1255
1256
dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
1257
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
1258
}
1259
1260
static int
1261
dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
1262
const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
1263
{
1264
objset_t *mos = dp->dp_meta_objset;
1265
uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1266
char *name;
1267
int error;
1268
1269
ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1270
ASSERT(dmu_tx_is_syncing(tx));
1271
1272
/*
1273
* If the pool was created prior to SPA_VERSION_USERREFS, the
1274
* zap object for temporary holds might not exist yet.
1275
*/
1276
if (zapobj == 0) {
1277
if (holding) {
1278
dsl_pool_user_hold_create_obj(dp, tx);
1279
zapobj = dp->dp_tmp_userrefs_obj;
1280
} else {
1281
return (SET_ERROR(ENOENT));
1282
}
1283
}
1284
1285
name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
1286
if (holding)
1287
error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
1288
else
1289
error = zap_remove(mos, zapobj, name, tx);
1290
kmem_strfree(name);
1291
1292
return (error);
1293
}
1294
1295
/*
1296
* Add a temporary hold for the given dataset object and tag.
1297
*/
1298
int
1299
dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
1300
uint64_t now, dmu_tx_t *tx)
1301
{
1302
return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
1303
}
1304
1305
/*
1306
* Release a temporary hold for the given dataset object and tag.
1307
*/
1308
int
1309
dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
1310
dmu_tx_t *tx)
1311
{
1312
return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
1313
tx, B_FALSE));
1314
}
1315
1316
/*
1317
* DSL Pool Configuration Lock
1318
*
1319
* The dp_config_rwlock protects against changes to DSL state (e.g. dataset
1320
* creation / destruction / rename / property setting). It must be held for
1321
* read to hold a dataset or dsl_dir. I.e. you must call
1322
* dsl_pool_config_enter() or dsl_pool_hold() before calling
1323
* dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
1324
* must be held continuously until all datasets and dsl_dirs are released.
1325
*
1326
* The only exception to this rule is that if a "long hold" is placed on
1327
* a dataset, then the dp_config_rwlock may be dropped while the dataset
1328
* is still held. The long hold will prevent the dataset from being
1329
* destroyed -- the destroy will fail with EBUSY. A long hold can be
1330
* obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
1331
* (by calling dsl_{dataset,objset}_{try}own{_obj}).
1332
*
1333
* Legitimate long-holders (including owners) should be long-running, cancelable
1334
* tasks that should cause "zfs destroy" to fail. This includes DMU
1335
* consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
1336
* "zfs send", and "zfs diff". There are several other long-holders whose
1337
* uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
1338
*
1339
* The usual formula for long-holding would be:
1340
* dsl_pool_hold()
1341
* dsl_dataset_hold()
1342
* ... perform checks ...
1343
* dsl_dataset_long_hold()
1344
* dsl_pool_rele()
1345
* ... perform long-running task ...
1346
* dsl_dataset_long_rele()
1347
* dsl_dataset_rele()
1348
*
1349
* Note that when the long hold is released, the dataset is still held but
1350
* the pool is not held. The dataset may change arbitrarily during this time
1351
* (e.g. it could be destroyed). Therefore you shouldn't do anything to the
1352
* dataset except release it.
1353
*
1354
* Operations generally fall somewhere into the following taxonomy:
1355
*
1356
* Read-Only Modifying
1357
*
1358
* Dataset Layer / MOS zfs get zfs destroy
1359
*
1360
* Individual Dataset read() write()
1361
*
1362
*
1363
* Dataset Layer Operations
1364
*
1365
* Modifying operations should generally use dsl_sync_task(). The synctask
1366
* infrastructure enforces proper locking strategy with respect to the
1367
* dp_config_rwlock. See the comment above dsl_sync_task() for details.
1368
*
1369
* Read-only operations will manually hold the pool, then the dataset, obtain
1370
* information from the dataset, then release the pool and dataset.
1371
* dmu_objset_{hold,rele}() are convenience routines that also do the pool
1372
* hold/rele.
1373
*
1374
*
1375
* Operations On Individual Datasets
1376
*
1377
* Objects _within_ an objset should only be modified by the current 'owner'
1378
* of the objset to prevent incorrect concurrent modification. Thus, use
1379
* {dmu_objset,dsl_dataset}_own to mark some entity as the current owner,
1380
* and fail with EBUSY if there is already an owner. The owner can then
1381
* implement its own locking strategy, independent of the dataset layer's
1382
* locking infrastructure.
1383
* (E.g., the ZPL has its own set of locks to control concurrency. A regular
1384
* vnop will not reach into the dataset layer).
1385
*
1386
* Ideally, objects would also only be read by the objset’s owner, so that we
1387
* don’t observe state mid-modification.
1388
* (E.g. the ZPL is creating a new object and linking it into a directory; if
1389
* you don’t coordinate with the ZPL to hold ZPL-level locks, you could see an
1390
* intermediate state. The ioctl level violates this but in pretty benign
1391
* ways, e.g. reading the zpl props object.)
1392
*/
1393
1394
int
1395
dsl_pool_hold(const char *name, const void *tag, dsl_pool_t **dp)
1396
{
1397
spa_t *spa;
1398
int error;
1399
1400
error = spa_open(name, &spa, tag);
1401
if (error == 0) {
1402
*dp = spa_get_dsl(spa);
1403
dsl_pool_config_enter(*dp, tag);
1404
}
1405
return (error);
1406
}
1407
1408
void
1409
dsl_pool_rele(dsl_pool_t *dp, const void *tag)
1410
{
1411
dsl_pool_config_exit(dp, tag);
1412
spa_close(dp->dp_spa, tag);
1413
}
1414
1415
void
1416
dsl_pool_config_enter(dsl_pool_t *dp, const void *tag)
1417
{
1418
/*
1419
* We use a "reentrant" reader-writer lock, but not reentrantly.
1420
*
1421
* The rrwlock can (with the track_all flag) track all reading threads,
1422
* which is very useful for debugging which code path failed to release
1423
* the lock, and for verifying that the *current* thread does hold
1424
* the lock.
1425
*
1426
* (Unlike a rwlock, which knows that N threads hold it for
1427
* read, but not *which* threads, so rw_held(RW_READER) returns TRUE
1428
* if any thread holds it for read, even if this thread doesn't).
1429
*/
1430
ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1431
rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
1432
}
1433
1434
void
1435
dsl_pool_config_enter_prio(dsl_pool_t *dp, const void *tag)
1436
{
1437
ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1438
rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
1439
}
1440
1441
void
1442
dsl_pool_config_exit(dsl_pool_t *dp, const void *tag)
1443
{
1444
rrw_exit(&dp->dp_config_rwlock, tag);
1445
}
1446
1447
boolean_t
1448
dsl_pool_config_held(dsl_pool_t *dp)
1449
{
1450
return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
1451
}
1452
1453
boolean_t
1454
dsl_pool_config_held_writer(dsl_pool_t *dp)
1455
{
1456
return (RRW_WRITE_HELD(&dp->dp_config_rwlock));
1457
}
1458
1459
EXPORT_SYMBOL(dsl_pool_config_enter);
1460
EXPORT_SYMBOL(dsl_pool_config_exit);
1461
1462
/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
1463
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, UINT, ZMOD_RD,
1464
"Max percent of RAM allowed to be dirty");
1465
1466
/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
1467
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, UINT, ZMOD_RD,
1468
"zfs_dirty_data_max upper bound as % of RAM");
1469
1470
ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, UINT, ZMOD_RW,
1471
"Transaction delay threshold");
1472
1473
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max, U64, ZMOD_RW,
1474
"Determines the dirty space limit");
1475
1476
ZFS_MODULE_PARAM(zfs, zfs_, wrlog_data_max, U64, ZMOD_RW,
1477
"The size limit of write-transaction zil log data");
1478
1479
/* zfs_dirty_data_max_max only applied at module load in arc_init(). */
1480
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, U64, ZMOD_RD,
1481
"zfs_dirty_data_max upper bound in bytes");
1482
1483
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, UINT, ZMOD_RW,
1484
"Dirty data txg sync threshold as a percentage of zfs_dirty_data_max");
1485
1486
ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, U64, ZMOD_RW,
1487
"How quickly delay approaches infinity");
1488
1489
ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_nthr_pct, INT, ZMOD_RW,
1490
"Max percent of CPUs that are used per dp_sync_taskq");
1491
1492
ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_minalloc, INT, ZMOD_RW,
1493
"Number of taskq entries that are pre-populated");
1494
1495
ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_maxalloc, INT, ZMOD_RW,
1496
"Max number of taskq entries that are cached");
1497
1498