Path: blob/main/sys/contrib/openzfs/module/zfs/bptree.c
107265 views
// SPDX-License-Identifier: CDDL-1.01/*2* CDDL HEADER START3*4* The contents of this file are subject to the terms of the5* Common Development and Distribution License (the "License").6* You may not use this file except in compliance with the License.7*8* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE9* or https://opensource.org/licenses/CDDL-1.0.10* See the License for the specific language governing permissions11* and limitations under the License.12*13* When distributing Covered Code, include this CDDL HEADER in each14* file and include the License file at usr/src/OPENSOLARIS.LICENSE.15* If applicable, add the following below this CDDL HEADER, with the16* fields enclosed by brackets "[]" replaced with your own identifying17* information: Portions Copyright [yyyy] [name of copyright owner]18*19* CDDL HEADER END20*/2122/*23* Copyright (c) 2011, 2018 by Delphix. All rights reserved.24*/2526#include <sys/arc.h>27#include <sys/bptree.h>28#include <sys/dmu.h>29#include <sys/dmu_objset.h>30#include <sys/dmu_tx.h>31#include <sys/dmu_traverse.h>32#include <sys/dsl_dataset.h>33#include <sys/dsl_dir.h>34#include <sys/dsl_pool.h>35#include <sys/dnode.h>36#include <sys/spa.h>3738/*39* A bptree is a queue of root block pointers from destroyed datasets. When a40* dataset is destroyed its root block pointer is put on the end of the pool's41* bptree queue so the dataset's blocks can be freed asynchronously by42* dsl_scan_sync. This allows the delete operation to finish without traversing43* all the dataset's blocks.44*45* Note that while bt_begin and bt_end are only ever incremented in this code,46* they are effectively reset to 0 every time the entire bptree is freed because47* the bptree's object is destroyed and re-created.48*/4950struct bptree_args {51bptree_phys_t *ba_phys; /* data in bonus buffer, dirtied if freeing */52boolean_t ba_free; /* true if freeing during traversal */5354bptree_itor_t *ba_func; /* function to call for each blockpointer */55void *ba_arg; /* caller supplied argument to ba_func */56dmu_tx_t *ba_tx; /* caller supplied tx, NULL if not freeing */57} bptree_args_t;5859uint64_t60bptree_alloc(objset_t *os, dmu_tx_t *tx)61{62uint64_t obj;63dmu_buf_t *db;64bptree_phys_t *bt;6566obj = dmu_object_alloc(os, DMU_OTN_UINT64_METADATA,67SPA_OLD_MAXBLOCKSIZE, DMU_OTN_UINT64_METADATA,68sizeof (bptree_phys_t), tx);6970/*71* Bonus buffer contents are already initialized to 0, but for72* readability we make it explicit.73*/74VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));75dmu_buf_will_dirty(db, tx);76bt = db->db_data;77bt->bt_begin = 0;78bt->bt_end = 0;79bt->bt_bytes = 0;80bt->bt_comp = 0;81bt->bt_uncomp = 0;82dmu_buf_rele(db, FTAG);8384return (obj);85}8687int88bptree_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)89{90dmu_buf_t *db;91bptree_phys_t *bt;9293VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));94bt = db->db_data;95ASSERT3U(bt->bt_begin, ==, bt->bt_end);96ASSERT0(bt->bt_bytes);97ASSERT0(bt->bt_comp);98ASSERT0(bt->bt_uncomp);99dmu_buf_rele(db, FTAG);100101return (dmu_object_free(os, obj, tx));102}103104boolean_t105bptree_is_empty(objset_t *os, uint64_t obj)106{107dmu_buf_t *db;108bptree_phys_t *bt;109boolean_t rv;110111VERIFY0(dmu_bonus_hold(os, obj, FTAG, &db));112bt = db->db_data;113rv = (bt->bt_begin == bt->bt_end);114dmu_buf_rele(db, FTAG);115return (rv);116}117118void119bptree_add(objset_t *os, uint64_t obj, blkptr_t *bp, uint64_t birth_txg,120uint64_t bytes, uint64_t comp, uint64_t uncomp, dmu_tx_t *tx)121{122dmu_buf_t *db;123bptree_phys_t *bt;124bptree_entry_phys_t *bte;125126/*127* bptree objects are in the pool mos, therefore they can only be128* modified in syncing context. Furthermore, this is only modified129* by the sync thread, so no locking is necessary.130*/131ASSERT(dmu_tx_is_syncing(tx));132133VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));134bt = db->db_data;135136bte = kmem_zalloc(sizeof (*bte), KM_SLEEP);137bte->be_birth_txg = birth_txg;138bte->be_bp = *bp;139dmu_write(os, obj, bt->bt_end * sizeof (*bte), sizeof (*bte), bte, tx,140DMU_READ_NO_PREFETCH);141kmem_free(bte, sizeof (*bte));142143dmu_buf_will_dirty(db, tx);144bt->bt_end++;145bt->bt_bytes += bytes;146bt->bt_comp += comp;147bt->bt_uncomp += uncomp;148dmu_buf_rele(db, FTAG);149}150151static int152bptree_visit_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,153const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)154{155(void) zilog, (void) dnp;156int err;157struct bptree_args *ba = arg;158159if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||160BP_IS_REDACTED(bp))161return (0);162163err = ba->ba_func(ba->ba_arg, bp, ba->ba_tx);164if (err == 0 && ba->ba_free) {165ba->ba_phys->bt_bytes -= bp_get_dsize_sync(spa, bp);166ba->ba_phys->bt_comp -= BP_GET_PSIZE(bp);167ba->ba_phys->bt_uncomp -= BP_GET_UCSIZE(bp);168}169return (err);170}171172/*173* If "free" is set:174* - It is assumed that "func" will be freeing the block pointers.175* - If "func" returns nonzero, the bookmark will be remembered and176* iteration will be restarted from this point on next invocation.177* - If an i/o error is encountered (e.g. "func" returns EIO or ECKSUM),178* bptree_iterate will remember the bookmark, continue traversing179* any additional entries, and return 0.180*181* If "free" is not set, traversal will stop and return an error if182* an i/o error is encountered.183*184* In either case, if zfs_free_leak_on_eio is set, i/o errors will be185* ignored and traversal will continue (i.e. TRAVERSE_HARD will be passed to186* traverse_dataset_destroyed()).187*/188int189bptree_iterate(objset_t *os, uint64_t obj, boolean_t free, bptree_itor_t func,190void *arg, dmu_tx_t *tx)191{192boolean_t ioerr = B_FALSE;193int err;194uint64_t i;195dmu_buf_t *db;196struct bptree_args ba;197198ASSERT(!free || dmu_tx_is_syncing(tx));199200err = dmu_bonus_hold(os, obj, FTAG, &db);201if (err != 0)202return (err);203204if (free)205dmu_buf_will_dirty(db, tx);206207ba.ba_phys = db->db_data;208ba.ba_free = free;209ba.ba_func = func;210ba.ba_arg = arg;211ba.ba_tx = tx;212213err = 0;214for (i = ba.ba_phys->bt_begin; i < ba.ba_phys->bt_end; i++) {215bptree_entry_phys_t bte;216int flags = TRAVERSE_PREFETCH_METADATA | TRAVERSE_POST |217TRAVERSE_NO_DECRYPT;218219err = dmu_read(os, obj, i * sizeof (bte), sizeof (bte),220&bte, DMU_READ_NO_PREFETCH);221if (err != 0)222break;223224if (zfs_free_leak_on_eio)225flags |= TRAVERSE_HARD;226zfs_dbgmsg("bptree index %lld: traversing from min_txg=%lld "227"bookmark %lld/%lld/%lld/%lld",228(longlong_t)i,229(longlong_t)bte.be_birth_txg,230(longlong_t)bte.be_zb.zb_objset,231(longlong_t)bte.be_zb.zb_object,232(longlong_t)bte.be_zb.zb_level,233(longlong_t)bte.be_zb.zb_blkid);234err = traverse_dataset_destroyed(os->os_spa, &bte.be_bp,235bte.be_birth_txg, &bte.be_zb, flags,236bptree_visit_cb, &ba);237if (free) {238/*239* The callback has freed the visited block pointers.240* Record our traversal progress on disk, either by241* updating this record's bookmark, or by logically242* removing this record by advancing bt_begin.243*/244if (err != 0) {245/* save bookmark for future resume */246ASSERT3U(bte.be_zb.zb_objset, ==,247ZB_DESTROYED_OBJSET);248ASSERT0(bte.be_zb.zb_level);249dmu_write(os, obj, i * sizeof (bte),250sizeof (bte), &bte, tx,251DMU_READ_NO_PREFETCH);252if (err == EIO || err == ECKSUM ||253err == ENXIO) {254/*255* Skip the rest of this tree and256* continue on to the next entry.257*/258err = 0;259ioerr = B_TRUE;260} else {261break;262}263} else if (ioerr) {264/*265* This entry is finished, but there were266* i/o errors on previous entries, so we267* can't adjust bt_begin. Set this entry's268* be_birth_txg such that it will be269* treated as a no-op in future traversals.270*/271bte.be_birth_txg = UINT64_MAX;272dmu_write(os, obj, i * sizeof (bte),273sizeof (bte), &bte, tx,274DMU_READ_NO_PREFETCH);275}276277if (!ioerr) {278ba.ba_phys->bt_begin++;279(void) dmu_free_range(os, obj,280i * sizeof (bte), sizeof (bte), tx);281}282} else if (err != 0) {283break;284}285}286287ASSERT(!free || err != 0 || ioerr ||288ba.ba_phys->bt_begin == ba.ba_phys->bt_end);289290/* if all blocks are free there should be no used space */291if (ba.ba_phys->bt_begin == ba.ba_phys->bt_end) {292if (zfs_free_leak_on_eio) {293ba.ba_phys->bt_bytes = 0;294ba.ba_phys->bt_comp = 0;295ba.ba_phys->bt_uncomp = 0;296}297298ASSERT0(ba.ba_phys->bt_bytes);299ASSERT0(ba.ba_phys->bt_comp);300ASSERT0(ba.ba_phys->bt_uncomp);301}302303dmu_buf_rele(db, FTAG);304305return (err);306}307308309