Path: blob/main/sys/contrib/openzfs/module/zfs/btree.c
48383 views
// SPDX-License-Identifier: CDDL-1.01/*2* CDDL HEADER START3*4* This file and its contents are supplied under the terms of the5* Common Development and Distribution License ("CDDL"), version 1.0.6* You may only use this file in accordance with the terms of version7* 1.0 of the CDDL.8*9* A full copy of the text of the CDDL should have accompanied this10* source. A copy of the CDDL is also available via the Internet at11* http://www.illumos.org/license/CDDL.12*13* CDDL HEADER END14*/15/*16* Copyright (c) 2019 by Delphix. All rights reserved.17*/1819#include <sys/btree.h>20#include <sys/bitops.h>21#include <sys/zfs_context.h>2223kmem_cache_t *zfs_btree_leaf_cache;2425/*26* Control the extent of the verification that occurs when zfs_btree_verify is27* called. Primarily used for debugging when extending the btree logic and28* functionality. As the intensity is increased, new verification steps are29* added. These steps are cumulative; intensity = 3 includes the intensity = 130* and intensity = 2 steps as well.31*32* Intensity 1: Verify that the tree's height is consistent throughout.33* Intensity 2: Verify that a core node's children's parent pointers point34* to the core node.35* Intensity 3: Verify that the total number of elements in the tree matches the36* sum of the number of elements in each node. Also verifies that each node's37* count obeys the invariants (less than or equal to maximum value, greater than38* or equal to half the maximum minus one).39* Intensity 4: Verify that each element compares less than the element40* immediately after it and greater than the one immediately before it using the41* comparator function. For core nodes, also checks that each element is greater42* than the last element in the first of the two nodes it separates, and less43* than the first element in the second of the two nodes.44* Intensity 5: Verifies, if ZFS_DEBUG is defined, that all unused memory inside45* of each node is poisoned appropriately. Note that poisoning always occurs if46* ZFS_DEBUG is set, so it is safe to set the intensity to 5 during normal47* operation.48*49* Intensity 4 and 5 are particularly expensive to perform; the previous levels50* are a few memory operations per node, while these levels require multiple51* operations per element. In addition, when creating large btrees, these52* operations are called at every step, resulting in extremely slow operation53* (while the asymptotic complexity of the other steps is the same, the54* importance of the constant factors cannot be denied).55*/56uint_t zfs_btree_verify_intensity = 0;5758/*59* Convenience functions to silence warnings from memcpy/memmove's60* return values and change argument order to src, dest.61*/62static void63bcpy(const void *src, void *dest, size_t size)64{65(void) memcpy(dest, src, size);66}6768static void69bmov(const void *src, void *dest, size_t size)70{71(void) memmove(dest, src, size);72}7374static boolean_t75zfs_btree_is_core(struct zfs_btree_hdr *hdr)76{77return (hdr->bth_first == -1);78}7980#ifdef _ILP3281#define BTREE_POISON 0xabadb10c82#else83#define BTREE_POISON 0xabadb10cdeadbeef84#endif8586static void87zfs_btree_poison_node(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)88{89#ifdef ZFS_DEBUG90size_t size = tree->bt_elem_size;91if (zfs_btree_is_core(hdr)) {92zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;93for (uint32_t i = hdr->bth_count + 1; i <= BTREE_CORE_ELEMS;94i++) {95node->btc_children[i] =96(zfs_btree_hdr_t *)BTREE_POISON;97}98(void) memset(node->btc_elems + hdr->bth_count * size, 0x0f,99(BTREE_CORE_ELEMS - hdr->bth_count) * size);100} else {101zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;102(void) memset(leaf->btl_elems, 0x0f, hdr->bth_first * size);103(void) memset(leaf->btl_elems +104(hdr->bth_first + hdr->bth_count) * size, 0x0f,105tree->bt_leaf_size - offsetof(zfs_btree_leaf_t, btl_elems) -106(hdr->bth_first + hdr->bth_count) * size);107}108#endif109}110111static inline void112zfs_btree_poison_node_at(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,113uint32_t idx, uint32_t count)114{115#ifdef ZFS_DEBUG116size_t size = tree->bt_elem_size;117if (zfs_btree_is_core(hdr)) {118ASSERT3U(idx, >=, hdr->bth_count);119ASSERT3U(idx, <=, BTREE_CORE_ELEMS);120ASSERT3U(idx + count, <=, BTREE_CORE_ELEMS);121zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;122for (uint32_t i = 1; i <= count; i++) {123node->btc_children[idx + i] =124(zfs_btree_hdr_t *)BTREE_POISON;125}126(void) memset(node->btc_elems + idx * size, 0x0f, count * size);127} else {128ASSERT3U(idx, <=, tree->bt_leaf_cap);129ASSERT3U(idx + count, <=, tree->bt_leaf_cap);130zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;131(void) memset(leaf->btl_elems +132(hdr->bth_first + idx) * size, 0x0f, count * size);133}134#endif135}136137static inline void138zfs_btree_verify_poison_at(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,139uint32_t idx)140{141#ifdef ZFS_DEBUG142size_t size = tree->bt_elem_size;143if (zfs_btree_is_core(hdr)) {144ASSERT3U(idx, <, BTREE_CORE_ELEMS);145zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;146zfs_btree_hdr_t *cval = (zfs_btree_hdr_t *)BTREE_POISON;147VERIFY3P(node->btc_children[idx + 1], ==, cval);148for (size_t i = 0; i < size; i++)149VERIFY3U(node->btc_elems[idx * size + i], ==, 0x0f);150} else {151ASSERT3U(idx, <, tree->bt_leaf_cap);152zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;153if (idx >= tree->bt_leaf_cap - hdr->bth_first)154return;155for (size_t i = 0; i < size; i++) {156VERIFY3U(leaf->btl_elems[(hdr->bth_first + idx)157* size + i], ==, 0x0f);158}159}160#endif161}162163void164zfs_btree_init(void)165{166zfs_btree_leaf_cache = kmem_cache_create("zfs_btree_leaf_cache",167BTREE_LEAF_SIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);168}169170void171zfs_btree_fini(void)172{173kmem_cache_destroy(zfs_btree_leaf_cache);174}175176static void *177zfs_btree_leaf_alloc(zfs_btree_t *tree)178{179if (tree->bt_leaf_size == BTREE_LEAF_SIZE)180return (kmem_cache_alloc(zfs_btree_leaf_cache, KM_SLEEP));181else182return (kmem_alloc(tree->bt_leaf_size, KM_SLEEP));183}184185static void186zfs_btree_leaf_free(zfs_btree_t *tree, void *ptr)187{188if (tree->bt_leaf_size == BTREE_LEAF_SIZE)189return (kmem_cache_free(zfs_btree_leaf_cache, ptr));190else191return (kmem_free(ptr, tree->bt_leaf_size));192}193194void195zfs_btree_create(zfs_btree_t *tree, int (*compar) (const void *, const void *),196bt_find_in_buf_f bt_find_in_buf, size_t size)197{198zfs_btree_create_custom(tree, compar, bt_find_in_buf, size,199BTREE_LEAF_SIZE);200}201202static void *203zfs_btree_find_in_buf(zfs_btree_t *tree, uint8_t *buf, uint32_t nelems,204const void *value, zfs_btree_index_t *where);205206void207zfs_btree_create_custom(zfs_btree_t *tree,208int (*compar) (const void *, const void *),209bt_find_in_buf_f bt_find_in_buf,210size_t size, size_t lsize)211{212size_t esize = lsize - offsetof(zfs_btree_leaf_t, btl_elems);213214ASSERT3U(size, <=, esize / 2);215memset(tree, 0, sizeof (*tree));216tree->bt_compar = compar;217tree->bt_find_in_buf = (bt_find_in_buf == NULL) ?218zfs_btree_find_in_buf : bt_find_in_buf;219tree->bt_elem_size = size;220tree->bt_leaf_size = lsize;221tree->bt_leaf_cap = P2ALIGN_TYPED(esize / size, 2, size_t);222tree->bt_height = -1;223tree->bt_bulk = NULL;224}225226/*227* Find value in the array of elements provided. Uses a simple binary search.228*/229static void *230zfs_btree_find_in_buf(zfs_btree_t *tree, uint8_t *buf, uint32_t nelems,231const void *value, zfs_btree_index_t *where)232{233uint32_t max = nelems;234uint32_t min = 0;235while (max > min) {236uint32_t idx = (min + max) / 2;237uint8_t *cur = buf + idx * tree->bt_elem_size;238int comp = tree->bt_compar(cur, value);239if (comp < 0) {240min = idx + 1;241} else if (comp > 0) {242max = idx;243} else {244where->bti_offset = idx;245where->bti_before = B_FALSE;246return (cur);247}248}249250where->bti_offset = max;251where->bti_before = B_TRUE;252return (NULL);253}254255/*256* Find the given value in the tree. where may be passed as null to use as a257* membership test or if the btree is being used as a map.258*/259void *260zfs_btree_find(zfs_btree_t *tree, const void *value, zfs_btree_index_t *where)261{262if (tree->bt_height == -1) {263if (where != NULL) {264where->bti_node = NULL;265where->bti_offset = 0;266}267ASSERT0(tree->bt_num_elems);268return (NULL);269}270271/*272* If we're in bulk-insert mode, we check the last spot in the tree273* and the last leaf in the tree before doing the normal search,274* because for most workloads the vast majority of finds in275* bulk-insert mode are to insert new elements.276*/277zfs_btree_index_t idx;278size_t size = tree->bt_elem_size;279if (tree->bt_bulk != NULL) {280zfs_btree_leaf_t *last_leaf = tree->bt_bulk;281int comp = tree->bt_compar(last_leaf->btl_elems +282(last_leaf->btl_hdr.bth_first +283last_leaf->btl_hdr.bth_count - 1) * size, value);284if (comp < 0) {285/*286* If what they're looking for is after the last287* element, it's not in the tree.288*/289if (where != NULL) {290where->bti_node = (zfs_btree_hdr_t *)last_leaf;291where->bti_offset =292last_leaf->btl_hdr.bth_count;293where->bti_before = B_TRUE;294}295return (NULL);296} else if (comp == 0) {297if (where != NULL) {298where->bti_node = (zfs_btree_hdr_t *)last_leaf;299where->bti_offset =300last_leaf->btl_hdr.bth_count - 1;301where->bti_before = B_FALSE;302}303return (last_leaf->btl_elems +304(last_leaf->btl_hdr.bth_first +305last_leaf->btl_hdr.bth_count - 1) * size);306}307if (tree->bt_compar(last_leaf->btl_elems +308last_leaf->btl_hdr.bth_first * size, value) <= 0) {309/*310* If what they're looking for is after the first311* element in the last leaf, it's in the last leaf or312* it's not in the tree.313*/314void *d = tree->bt_find_in_buf(tree,315last_leaf->btl_elems +316last_leaf->btl_hdr.bth_first * size,317last_leaf->btl_hdr.bth_count, value, &idx);318319if (where != NULL) {320idx.bti_node = (zfs_btree_hdr_t *)last_leaf;321*where = idx;322}323return (d);324}325}326327zfs_btree_core_t *node = NULL;328uint32_t child = 0;329uint32_t depth = 0;330331/*332* Iterate down the tree, finding which child the value should be in333* by comparing with the separators.334*/335for (node = (zfs_btree_core_t *)tree->bt_root; depth < tree->bt_height;336node = (zfs_btree_core_t *)node->btc_children[child], depth++) {337ASSERT3P(node, !=, NULL);338void *d = tree->bt_find_in_buf(tree, node->btc_elems,339node->btc_hdr.bth_count, value, &idx);340EQUIV(d != NULL, !idx.bti_before);341if (d != NULL) {342if (where != NULL) {343idx.bti_node = (zfs_btree_hdr_t *)node;344*where = idx;345}346return (d);347}348ASSERT(idx.bti_before);349child = idx.bti_offset;350}351352/*353* The value is in this leaf, or it would be if it were in the354* tree. Find its proper location and return it.355*/356zfs_btree_leaf_t *leaf = (depth == 0 ?357(zfs_btree_leaf_t *)tree->bt_root : (zfs_btree_leaf_t *)node);358void *d = tree->bt_find_in_buf(tree, leaf->btl_elems +359leaf->btl_hdr.bth_first * size,360leaf->btl_hdr.bth_count, value, &idx);361362if (where != NULL) {363idx.bti_node = (zfs_btree_hdr_t *)leaf;364*where = idx;365}366367return (d);368}369370/*371* To explain the following functions, it is useful to understand the four372* kinds of shifts used in btree operation. First, a shift is a movement of373* elements within a node. It is used to create gaps for inserting new374* elements and children, or cover gaps created when things are removed. A375* shift has two fundamental properties, each of which can be one of two376* values, making four types of shifts. There is the direction of the shift377* (left or right) and the shape of the shift (parallelogram or isoceles378* trapezoid (shortened to trapezoid hereafter)). The shape distinction only379* applies to shifts of core nodes.380*381* The names derive from the following imagining of the layout of a node:382*383* Elements: * * * * * * * ... * * *384* Children: * * * * * * * * ... * * *385*386* This layout follows from the fact that the elements act as separators387* between pairs of children, and that children root subtrees "below" the388* current node. A left and right shift are fairly self-explanatory; a left389* shift moves things to the left, while a right shift moves things to the390* right. A parallelogram shift is a shift with the same number of elements391* and children being moved, while a trapezoid shift is a shift that moves one392* more children than elements. An example follows:393*394* A parallelogram shift could contain the following:395* _______________396* \* * * * \ * * * ... * * *397* * \ * * * *\ * * * ... * * *398* ---------------399* A trapezoid shift could contain the following:400* ___________401* * / * * * \ * * * ... * * *402* * / * * * *\ * * * ... * * *403* ---------------404*405* Note that a parallelogram shift is always shaped like a "left-leaning"406* parallelogram, where the starting index of the children being moved is407* always one higher than the starting index of the elements being moved. No408* "right-leaning" parallelogram shifts are needed (shifts where the starting409* element index and starting child index being moved are the same) to achieve410* any btree operations, so we ignore them.411*/412413enum bt_shift_shape {414BSS_TRAPEZOID,415BSS_PARALLELOGRAM416};417418enum bt_shift_direction {419BSD_LEFT,420BSD_RIGHT421};422423/*424* Shift elements and children in the provided core node by off spots. The425* first element moved is idx, and count elements are moved. The shape of the426* shift is determined by shape. The direction is determined by dir.427*/428static inline void429bt_shift_core(zfs_btree_t *tree, zfs_btree_core_t *node, uint32_t idx,430uint32_t count, uint32_t off, enum bt_shift_shape shape,431enum bt_shift_direction dir)432{433size_t size = tree->bt_elem_size;434ASSERT(zfs_btree_is_core(&node->btc_hdr));435436uint8_t *e_start = node->btc_elems + idx * size;437uint8_t *e_out = (dir == BSD_LEFT ? e_start - off * size :438e_start + off * size);439bmov(e_start, e_out, count * size);440441zfs_btree_hdr_t **c_start = node->btc_children + idx +442(shape == BSS_TRAPEZOID ? 0 : 1);443zfs_btree_hdr_t **c_out = (dir == BSD_LEFT ? c_start - off :444c_start + off);445uint32_t c_count = count + (shape == BSS_TRAPEZOID ? 1 : 0);446bmov(c_start, c_out, c_count * sizeof (*c_start));447}448449/*450* Shift elements and children in the provided core node left by one spot.451* The first element moved is idx, and count elements are moved. The452* shape of the shift is determined by trap; true if the shift is a trapezoid,453* false if it is a parallelogram.454*/455static inline void456bt_shift_core_left(zfs_btree_t *tree, zfs_btree_core_t *node, uint32_t idx,457uint32_t count, enum bt_shift_shape shape)458{459bt_shift_core(tree, node, idx, count, 1, shape, BSD_LEFT);460}461462/*463* Shift elements and children in the provided core node right by one spot.464* Starts with elements[idx] and children[idx] and one more child than element.465*/466static inline void467bt_shift_core_right(zfs_btree_t *tree, zfs_btree_core_t *node, uint32_t idx,468uint32_t count, enum bt_shift_shape shape)469{470bt_shift_core(tree, node, idx, count, 1, shape, BSD_RIGHT);471}472473/*474* Shift elements and children in the provided leaf node by off spots.475* The first element moved is idx, and count elements are moved. The direction476* is determined by left.477*/478static inline void479bt_shift_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *node, uint32_t idx,480uint32_t count, uint32_t off, enum bt_shift_direction dir)481{482size_t size = tree->bt_elem_size;483zfs_btree_hdr_t *hdr = &node->btl_hdr;484ASSERT(!zfs_btree_is_core(hdr));485486if (count == 0)487return;488uint8_t *start = node->btl_elems + (hdr->bth_first + idx) * size;489uint8_t *out = (dir == BSD_LEFT ? start - off * size :490start + off * size);491bmov(start, out, count * size);492}493494/*495* Grow leaf for n new elements before idx.496*/497static void498bt_grow_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *leaf, uint32_t idx,499uint32_t n)500{501zfs_btree_hdr_t *hdr = &leaf->btl_hdr;502ASSERT(!zfs_btree_is_core(hdr));503ASSERT3U(idx, <=, hdr->bth_count);504uint32_t capacity = tree->bt_leaf_cap;505ASSERT3U(hdr->bth_count + n, <=, capacity);506boolean_t cl = (hdr->bth_first >= n);507boolean_t cr = (hdr->bth_first + hdr->bth_count + n <= capacity);508509if (cl && (!cr || idx <= hdr->bth_count / 2)) {510/* Grow left. */511hdr->bth_first -= n;512bt_shift_leaf(tree, leaf, n, idx, n, BSD_LEFT);513} else if (cr) {514/* Grow right. */515bt_shift_leaf(tree, leaf, idx, hdr->bth_count - idx, n,516BSD_RIGHT);517} else {518/* Grow both ways. */519uint32_t fn = hdr->bth_first -520(capacity - (hdr->bth_count + n)) / 2;521hdr->bth_first -= fn;522bt_shift_leaf(tree, leaf, fn, idx, fn, BSD_LEFT);523bt_shift_leaf(tree, leaf, fn + idx, hdr->bth_count - idx,524n - fn, BSD_RIGHT);525}526hdr->bth_count += n;527}528529/*530* Shrink leaf for count elements starting from idx.531*/532static void533bt_shrink_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *leaf, uint32_t idx,534uint32_t n)535{536zfs_btree_hdr_t *hdr = &leaf->btl_hdr;537ASSERT(!zfs_btree_is_core(hdr));538ASSERT3U(idx, <=, hdr->bth_count);539ASSERT3U(idx + n, <=, hdr->bth_count);540541if (idx <= (hdr->bth_count - n) / 2) {542bt_shift_leaf(tree, leaf, 0, idx, n, BSD_RIGHT);543zfs_btree_poison_node_at(tree, hdr, 0, n);544hdr->bth_first += n;545} else {546bt_shift_leaf(tree, leaf, idx + n, hdr->bth_count - idx - n, n,547BSD_LEFT);548zfs_btree_poison_node_at(tree, hdr, hdr->bth_count - n, n);549}550hdr->bth_count -= n;551}552553/*554* Move children and elements from one core node to another. The shape555* parameter behaves the same as it does in the shift logic.556*/557static inline void558bt_transfer_core(zfs_btree_t *tree, zfs_btree_core_t *source, uint32_t sidx,559uint32_t count, zfs_btree_core_t *dest, uint32_t didx,560enum bt_shift_shape shape)561{562size_t size = tree->bt_elem_size;563ASSERT(zfs_btree_is_core(&source->btc_hdr));564ASSERT(zfs_btree_is_core(&dest->btc_hdr));565566bcpy(source->btc_elems + sidx * size, dest->btc_elems + didx * size,567count * size);568569uint32_t c_count = count + (shape == BSS_TRAPEZOID ? 1 : 0);570bcpy(source->btc_children + sidx + (shape == BSS_TRAPEZOID ? 0 : 1),571dest->btc_children + didx + (shape == BSS_TRAPEZOID ? 0 : 1),572c_count * sizeof (*source->btc_children));573}574575static inline void576bt_transfer_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *source, uint32_t sidx,577uint32_t count, zfs_btree_leaf_t *dest, uint32_t didx)578{579size_t size = tree->bt_elem_size;580ASSERT(!zfs_btree_is_core(&source->btl_hdr));581ASSERT(!zfs_btree_is_core(&dest->btl_hdr));582583bcpy(source->btl_elems + (source->btl_hdr.bth_first + sidx) * size,584dest->btl_elems + (dest->btl_hdr.bth_first + didx) * size,585count * size);586}587588/*589* Find the first element in the subtree rooted at hdr, return its value and590* put its location in where if non-null.591*/592static void *593zfs_btree_first_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,594zfs_btree_index_t *where)595{596zfs_btree_hdr_t *node;597598for (node = hdr; zfs_btree_is_core(node);599node = ((zfs_btree_core_t *)node)->btc_children[0])600;601602ASSERT(!zfs_btree_is_core(node));603zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)node;604if (where != NULL) {605where->bti_node = node;606where->bti_offset = 0;607where->bti_before = B_FALSE;608}609return (&leaf->btl_elems[node->bth_first * tree->bt_elem_size]);610}611612/* Insert an element and a child into a core node at the given offset. */613static void614zfs_btree_insert_core_impl(zfs_btree_t *tree, zfs_btree_core_t *parent,615uint32_t offset, zfs_btree_hdr_t *new_node, void *buf)616{617size_t size = tree->bt_elem_size;618zfs_btree_hdr_t *par_hdr = &parent->btc_hdr;619ASSERT3P(par_hdr, ==, new_node->bth_parent);620ASSERT3U(par_hdr->bth_count, <, BTREE_CORE_ELEMS);621622if (zfs_btree_verify_intensity >= 5) {623zfs_btree_verify_poison_at(tree, par_hdr,624par_hdr->bth_count);625}626/* Shift existing elements and children */627uint32_t count = par_hdr->bth_count - offset;628bt_shift_core_right(tree, parent, offset, count,629BSS_PARALLELOGRAM);630631/* Insert new values */632parent->btc_children[offset + 1] = new_node;633bcpy(buf, parent->btc_elems + offset * size, size);634par_hdr->bth_count++;635}636637/*638* Insert new_node into the parent of old_node directly after old_node, with639* buf as the dividing element between the two.640*/641static void642zfs_btree_insert_into_parent(zfs_btree_t *tree, zfs_btree_hdr_t *old_node,643zfs_btree_hdr_t *new_node, void *buf)644{645ASSERT3P(old_node->bth_parent, ==, new_node->bth_parent);646size_t size = tree->bt_elem_size;647zfs_btree_core_t *parent = old_node->bth_parent;648649/*650* If this is the root node we were splitting, we create a new root651* and increase the height of the tree.652*/653if (parent == NULL) {654ASSERT3P(old_node, ==, tree->bt_root);655tree->bt_num_nodes++;656zfs_btree_core_t *new_root =657kmem_alloc(sizeof (zfs_btree_core_t) + BTREE_CORE_ELEMS *658size, KM_SLEEP);659zfs_btree_hdr_t *new_root_hdr = &new_root->btc_hdr;660new_root_hdr->bth_parent = NULL;661new_root_hdr->bth_first = -1;662new_root_hdr->bth_count = 1;663664old_node->bth_parent = new_node->bth_parent = new_root;665new_root->btc_children[0] = old_node;666new_root->btc_children[1] = new_node;667bcpy(buf, new_root->btc_elems, size);668669tree->bt_height++;670tree->bt_root = new_root_hdr;671zfs_btree_poison_node(tree, new_root_hdr);672return;673}674675/*676* Since we have the new separator, binary search for where to put677* new_node.678*/679zfs_btree_hdr_t *par_hdr = &parent->btc_hdr;680zfs_btree_index_t idx;681ASSERT(zfs_btree_is_core(par_hdr));682VERIFY3P(tree->bt_find_in_buf(tree, parent->btc_elems,683par_hdr->bth_count, buf, &idx), ==, NULL);684ASSERT(idx.bti_before);685uint32_t offset = idx.bti_offset;686ASSERT3U(offset, <=, par_hdr->bth_count);687ASSERT3P(parent->btc_children[offset], ==, old_node);688689/*690* If the parent isn't full, shift things to accommodate our insertions691* and return.692*/693if (par_hdr->bth_count != BTREE_CORE_ELEMS) {694zfs_btree_insert_core_impl(tree, parent, offset, new_node, buf);695return;696}697698/*699* We need to split this core node into two. Currently there are700* BTREE_CORE_ELEMS + 1 child nodes, and we are adding one for701* BTREE_CORE_ELEMS + 2. Some of the children will be part of the702* current node, and the others will be moved to the new core node.703* There are BTREE_CORE_ELEMS + 1 elements including the new one. One704* will be used as the new separator in our parent, and the others705* will be split among the two core nodes.706*707* Usually we will split the node in half evenly, with708* BTREE_CORE_ELEMS/2 elements in each node. If we're bulk loading, we709* instead move only about a quarter of the elements (and children) to710* the new node. Since the average state after a long time is a 3/4711* full node, shortcutting directly to that state improves efficiency.712*713* We do this in two stages: first we split into two nodes, and then we714* reuse our existing logic to insert the new element and child.715*/716uint32_t move_count = MAX((BTREE_CORE_ELEMS / (tree->bt_bulk == NULL ?7172 : 4)) - 1, 2);718uint32_t keep_count = BTREE_CORE_ELEMS - move_count - 1;719ASSERT3U(BTREE_CORE_ELEMS - move_count, >=, 2);720tree->bt_num_nodes++;721zfs_btree_core_t *new_parent = kmem_alloc(sizeof (zfs_btree_core_t) +722BTREE_CORE_ELEMS * size, KM_SLEEP);723zfs_btree_hdr_t *new_par_hdr = &new_parent->btc_hdr;724new_par_hdr->bth_parent = par_hdr->bth_parent;725new_par_hdr->bth_first = -1;726new_par_hdr->bth_count = move_count;727zfs_btree_poison_node(tree, new_par_hdr);728729par_hdr->bth_count = keep_count;730731bt_transfer_core(tree, parent, keep_count + 1, move_count, new_parent,7320, BSS_TRAPEZOID);733734/* Store the new separator in a buffer. */735uint8_t *tmp_buf = kmem_alloc(size, KM_SLEEP);736bcpy(parent->btc_elems + keep_count * size, tmp_buf,737size);738zfs_btree_poison_node(tree, par_hdr);739740if (offset < keep_count) {741/* Insert the new node into the left half */742zfs_btree_insert_core_impl(tree, parent, offset, new_node,743buf);744745/*746* Move the new separator to the existing buffer.747*/748bcpy(tmp_buf, buf, size);749} else if (offset > keep_count) {750/* Insert the new node into the right half */751new_node->bth_parent = new_parent;752zfs_btree_insert_core_impl(tree, new_parent,753offset - keep_count - 1, new_node, buf);754755/*756* Move the new separator to the existing buffer.757*/758bcpy(tmp_buf, buf, size);759} else {760/*761* Move the new separator into the right half, and replace it762* with buf. We also need to shift back the elements in the763* right half to accommodate new_node.764*/765bt_shift_core_right(tree, new_parent, 0, move_count,766BSS_TRAPEZOID);767new_parent->btc_children[0] = new_node;768bcpy(tmp_buf, new_parent->btc_elems, size);769new_par_hdr->bth_count++;770}771kmem_free(tmp_buf, size);772zfs_btree_poison_node(tree, par_hdr);773774for (uint32_t i = 0; i <= new_parent->btc_hdr.bth_count; i++)775new_parent->btc_children[i]->bth_parent = new_parent;776777for (uint32_t i = 0; i <= parent->btc_hdr.bth_count; i++)778ASSERT3P(parent->btc_children[i]->bth_parent, ==, parent);779780/*781* Now that the node is split, we need to insert the new node into its782* parent. This may cause further splitting.783*/784zfs_btree_insert_into_parent(tree, &parent->btc_hdr,785&new_parent->btc_hdr, buf);786}787788/* Insert an element into a leaf node at the given offset. */789static void790zfs_btree_insert_leaf_impl(zfs_btree_t *tree, zfs_btree_leaf_t *leaf,791uint32_t idx, const void *value)792{793size_t size = tree->bt_elem_size;794zfs_btree_hdr_t *hdr = &leaf->btl_hdr;795ASSERT3U(leaf->btl_hdr.bth_count, <, tree->bt_leaf_cap);796797if (zfs_btree_verify_intensity >= 5) {798zfs_btree_verify_poison_at(tree, &leaf->btl_hdr,799leaf->btl_hdr.bth_count);800}801802bt_grow_leaf(tree, leaf, idx, 1);803uint8_t *start = leaf->btl_elems + (hdr->bth_first + idx) * size;804bcpy(value, start, size);805}806807static void808zfs_btree_verify_order_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr);809810/* Helper function for inserting a new value into leaf at the given index. */811static void812zfs_btree_insert_into_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *leaf,813const void *value, uint32_t idx)814{815size_t size = tree->bt_elem_size;816uint32_t capacity = tree->bt_leaf_cap;817818/*819* If the leaf isn't full, shift the elements after idx and insert820* value.821*/822if (leaf->btl_hdr.bth_count != capacity) {823zfs_btree_insert_leaf_impl(tree, leaf, idx, value);824return;825}826827/*828* Otherwise, we split the leaf node into two nodes. If we're not bulk829* inserting, each is of size (capacity / 2). If we are bulk830* inserting, we move a quarter of the elements to the new node so831* inserts into the old node don't cause immediate splitting but the832* tree stays relatively dense. Since the average state after a long833* time is a 3/4 full node, shortcutting directly to that state834* improves efficiency. At the end of the bulk insertion process835* we'll need to go through and fix up any nodes (the last leaf and836* its ancestors, potentially) that are below the minimum.837*838* In either case, we're left with one extra element. The leftover839* element will become the new dividing element between the two nodes.840*/841uint32_t move_count = MAX(capacity / (tree->bt_bulk ? 4 : 2), 1) - 1;842uint32_t keep_count = capacity - move_count - 1;843ASSERT3U(keep_count, >=, 1);844/* If we insert on left. move one more to keep leaves balanced. */845if (idx < keep_count) {846keep_count--;847move_count++;848}849tree->bt_num_nodes++;850zfs_btree_leaf_t *new_leaf = zfs_btree_leaf_alloc(tree);851zfs_btree_hdr_t *new_hdr = &new_leaf->btl_hdr;852new_hdr->bth_parent = leaf->btl_hdr.bth_parent;853new_hdr->bth_first = (tree->bt_bulk ? 0 : capacity / 4) +854(idx >= keep_count && idx <= keep_count + move_count / 2);855new_hdr->bth_count = move_count;856zfs_btree_poison_node(tree, new_hdr);857858if (tree->bt_bulk != NULL && leaf == tree->bt_bulk)859tree->bt_bulk = new_leaf;860861/* Copy the back part to the new leaf. */862bt_transfer_leaf(tree, leaf, keep_count + 1, move_count, new_leaf, 0);863864/* We store the new separator in a buffer we control for simplicity. */865uint8_t *buf = kmem_alloc(size, KM_SLEEP);866bcpy(leaf->btl_elems + (leaf->btl_hdr.bth_first + keep_count) * size,867buf, size);868869bt_shrink_leaf(tree, leaf, keep_count, 1 + move_count);870871if (idx < keep_count) {872/* Insert into the existing leaf. */873zfs_btree_insert_leaf_impl(tree, leaf, idx, value);874} else if (idx > keep_count) {875/* Insert into the new leaf. */876zfs_btree_insert_leaf_impl(tree, new_leaf, idx - keep_count -8771, value);878} else {879/*880* Insert planned separator into the new leaf, and use881* the new value as the new separator.882*/883zfs_btree_insert_leaf_impl(tree, new_leaf, 0, buf);884bcpy(value, buf, size);885}886887/*888* Now that the node is split, we need to insert the new node into its889* parent. This may cause further splitting, bur only of core nodes.890*/891zfs_btree_insert_into_parent(tree, &leaf->btl_hdr, &new_leaf->btl_hdr,892buf);893kmem_free(buf, size);894}895896static uint32_t897zfs_btree_find_parent_idx(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)898{899void *buf;900if (zfs_btree_is_core(hdr)) {901buf = ((zfs_btree_core_t *)hdr)->btc_elems;902} else {903buf = ((zfs_btree_leaf_t *)hdr)->btl_elems +904hdr->bth_first * tree->bt_elem_size;905}906zfs_btree_index_t idx;907zfs_btree_core_t *parent = hdr->bth_parent;908VERIFY3P(tree->bt_find_in_buf(tree, parent->btc_elems,909parent->btc_hdr.bth_count, buf, &idx), ==, NULL);910ASSERT(idx.bti_before);911ASSERT3U(idx.bti_offset, <=, parent->btc_hdr.bth_count);912ASSERT3P(parent->btc_children[idx.bti_offset], ==, hdr);913return (idx.bti_offset);914}915916/*917* Take the b-tree out of bulk insert mode. During bulk-insert mode, some918* nodes may violate the invariant that non-root nodes must be at least half919* full. All nodes violating this invariant should be the last node in their920* particular level. To correct the invariant, we take values from their left921* neighbor until they are half full. They must have a left neighbor at their922* level because the last node at a level is not the first node unless it's923* the root.924*/925static void926zfs_btree_bulk_finish(zfs_btree_t *tree)927{928ASSERT3P(tree->bt_bulk, !=, NULL);929ASSERT3P(tree->bt_root, !=, NULL);930zfs_btree_leaf_t *leaf = tree->bt_bulk;931zfs_btree_hdr_t *hdr = &leaf->btl_hdr;932zfs_btree_core_t *parent = hdr->bth_parent;933size_t size = tree->bt_elem_size;934uint32_t capacity = tree->bt_leaf_cap;935936/*937* The invariant doesn't apply to the root node, if that's the only938* node in the tree we're done.939*/940if (parent == NULL) {941tree->bt_bulk = NULL;942return;943}944945/* First, take elements to rebalance the leaf node. */946if (hdr->bth_count < capacity / 2) {947/*948* First, find the left neighbor. The simplest way to do this949* is to call zfs_btree_prev twice; the first time finds some950* ancestor of this node, and the second time finds the left951* neighbor. The ancestor found is the lowest common ancestor952* of leaf and the neighbor.953*/954zfs_btree_index_t idx = {955.bti_node = hdr,956.bti_offset = 0957};958VERIFY3P(zfs_btree_prev(tree, &idx, &idx), !=, NULL);959ASSERT(zfs_btree_is_core(idx.bti_node));960zfs_btree_core_t *common = (zfs_btree_core_t *)idx.bti_node;961uint32_t common_idx = idx.bti_offset;962963VERIFY3P(zfs_btree_prev(tree, &idx, &idx), !=, NULL);964ASSERT(!zfs_btree_is_core(idx.bti_node));965zfs_btree_leaf_t *l_neighbor = (zfs_btree_leaf_t *)idx.bti_node;966zfs_btree_hdr_t *l_hdr = idx.bti_node;967uint32_t move_count = (capacity / 2) - hdr->bth_count;968ASSERT3U(l_neighbor->btl_hdr.bth_count - move_count, >=,969capacity / 2);970971if (zfs_btree_verify_intensity >= 5) {972for (uint32_t i = 0; i < move_count; i++) {973zfs_btree_verify_poison_at(tree, hdr,974leaf->btl_hdr.bth_count + i);975}976}977978/* First, shift elements in leaf back. */979bt_grow_leaf(tree, leaf, 0, move_count);980981/* Next, move the separator from the common ancestor to leaf. */982uint8_t *separator = common->btc_elems + common_idx * size;983uint8_t *out = leaf->btl_elems +984(hdr->bth_first + move_count - 1) * size;985bcpy(separator, out, size);986987/*988* Now we move elements from the tail of the left neighbor to989* fill the remaining spots in leaf.990*/991bt_transfer_leaf(tree, l_neighbor, l_hdr->bth_count -992(move_count - 1), move_count - 1, leaf, 0);993994/*995* Finally, move the new last element in the left neighbor to996* the separator.997*/998bcpy(l_neighbor->btl_elems + (l_hdr->bth_first +999l_hdr->bth_count - move_count) * size, separator, size);10001001/* Adjust the node's counts, and we're done. */1002bt_shrink_leaf(tree, l_neighbor, l_hdr->bth_count - move_count,1003move_count);10041005ASSERT3U(l_hdr->bth_count, >=, capacity / 2);1006ASSERT3U(hdr->bth_count, >=, capacity / 2);1007}10081009/*1010* Now we have to rebalance any ancestors of leaf that may also1011* violate the invariant.1012*/1013capacity = BTREE_CORE_ELEMS;1014while (parent->btc_hdr.bth_parent != NULL) {1015zfs_btree_core_t *cur = parent;1016zfs_btree_hdr_t *hdr = &cur->btc_hdr;1017parent = hdr->bth_parent;1018/*1019* If the invariant isn't violated, move on to the next1020* ancestor.1021*/1022if (hdr->bth_count >= capacity / 2)1023continue;10241025/*1026* Because the smallest number of nodes we can move when1027* splitting is 2, we never need to worry about not having a1028* left sibling (a sibling is a neighbor with the same parent).1029*/1030uint32_t parent_idx = zfs_btree_find_parent_idx(tree, hdr);1031ASSERT3U(parent_idx, >, 0);1032zfs_btree_core_t *l_neighbor =1033(zfs_btree_core_t *)parent->btc_children[parent_idx - 1];1034uint32_t move_count = (capacity / 2) - hdr->bth_count;1035ASSERT3U(l_neighbor->btc_hdr.bth_count - move_count, >=,1036capacity / 2);10371038if (zfs_btree_verify_intensity >= 5) {1039for (uint32_t i = 0; i < move_count; i++) {1040zfs_btree_verify_poison_at(tree, hdr,1041hdr->bth_count + i);1042}1043}1044/* First, shift things in the right node back. */1045bt_shift_core(tree, cur, 0, hdr->bth_count, move_count,1046BSS_TRAPEZOID, BSD_RIGHT);10471048/* Next, move the separator to the right node. */1049uint8_t *separator = parent->btc_elems + ((parent_idx - 1) *1050size);1051uint8_t *e_out = cur->btc_elems + ((move_count - 1) * size);1052bcpy(separator, e_out, size);10531054/*1055* Now, move elements and children from the left node to the1056* right. We move one more child than elements.1057*/1058move_count--;1059uint32_t move_idx = l_neighbor->btc_hdr.bth_count - move_count;1060bt_transfer_core(tree, l_neighbor, move_idx, move_count, cur, 0,1061BSS_TRAPEZOID);10621063/*1064* Finally, move the last element in the left node to the1065* separator's position.1066*/1067move_idx--;1068bcpy(l_neighbor->btc_elems + move_idx * size, separator, size);10691070l_neighbor->btc_hdr.bth_count -= move_count + 1;1071hdr->bth_count += move_count + 1;10721073ASSERT3U(l_neighbor->btc_hdr.bth_count, >=, capacity / 2);1074ASSERT3U(hdr->bth_count, >=, capacity / 2);10751076zfs_btree_poison_node(tree, &l_neighbor->btc_hdr);10771078for (uint32_t i = 0; i <= hdr->bth_count; i++)1079cur->btc_children[i]->bth_parent = cur;1080}10811082tree->bt_bulk = NULL;1083zfs_btree_verify(tree);1084}10851086/*1087* Insert value into tree at the location specified by where.1088*/1089void1090zfs_btree_add_idx(zfs_btree_t *tree, const void *value,1091const zfs_btree_index_t *where)1092{1093zfs_btree_index_t idx = {0};10941095/* If we're not inserting in the last leaf, end bulk insert mode. */1096if (tree->bt_bulk != NULL) {1097if (where->bti_node != &tree->bt_bulk->btl_hdr) {1098zfs_btree_bulk_finish(tree);1099VERIFY3P(zfs_btree_find(tree, value, &idx), ==, NULL);1100where = &idx;1101}1102}11031104tree->bt_num_elems++;1105/*1106* If this is the first element in the tree, create a leaf root node1107* and add the value to it.1108*/1109if (where->bti_node == NULL) {1110ASSERT3U(tree->bt_num_elems, ==, 1);1111ASSERT3S(tree->bt_height, ==, -1);1112ASSERT0P(tree->bt_root);1113ASSERT0(where->bti_offset);11141115tree->bt_num_nodes++;1116zfs_btree_leaf_t *leaf = zfs_btree_leaf_alloc(tree);1117tree->bt_root = &leaf->btl_hdr;1118tree->bt_height++;11191120zfs_btree_hdr_t *hdr = &leaf->btl_hdr;1121hdr->bth_parent = NULL;1122hdr->bth_first = 0;1123hdr->bth_count = 0;1124zfs_btree_poison_node(tree, hdr);11251126zfs_btree_insert_into_leaf(tree, leaf, value, 0);1127tree->bt_bulk = leaf;1128} else if (!zfs_btree_is_core(where->bti_node)) {1129/*1130* If we're inserting into a leaf, go directly to the helper1131* function.1132*/1133zfs_btree_insert_into_leaf(tree,1134(zfs_btree_leaf_t *)where->bti_node, value,1135where->bti_offset);1136} else {1137/*1138* If we're inserting into a core node, we can't just shift1139* the existing element in that slot in the same node without1140* breaking our ordering invariants. Instead we place the new1141* value in the node at that spot and then insert the old1142* separator into the first slot in the subtree to the right.1143*/1144zfs_btree_core_t *node = (zfs_btree_core_t *)where->bti_node;11451146/*1147* We can ignore bti_before, because either way the value1148* should end up in bti_offset.1149*/1150uint32_t off = where->bti_offset;1151zfs_btree_hdr_t *subtree = node->btc_children[off + 1];1152size_t size = tree->bt_elem_size;1153uint8_t *buf = kmem_alloc(size, KM_SLEEP);1154bcpy(node->btc_elems + off * size, buf, size);1155bcpy(value, node->btc_elems + off * size, size);11561157/*1158* Find the first slot in the subtree to the right, insert1159* there.1160*/1161zfs_btree_index_t new_idx;1162VERIFY3P(zfs_btree_first_helper(tree, subtree, &new_idx), !=,1163NULL);1164ASSERT0(new_idx.bti_offset);1165ASSERT(!zfs_btree_is_core(new_idx.bti_node));1166zfs_btree_insert_into_leaf(tree,1167(zfs_btree_leaf_t *)new_idx.bti_node, buf, 0);1168kmem_free(buf, size);1169}1170zfs_btree_verify(tree);1171}11721173/*1174* Return the first element in the tree, and put its location in where if1175* non-null.1176*/1177void *1178zfs_btree_first(zfs_btree_t *tree, zfs_btree_index_t *where)1179{1180if (tree->bt_height == -1) {1181ASSERT0(tree->bt_num_elems);1182return (NULL);1183}1184return (zfs_btree_first_helper(tree, tree->bt_root, where));1185}11861187/*1188* Find the last element in the subtree rooted at hdr, return its value and1189* put its location in where if non-null.1190*/1191static void *1192zfs_btree_last_helper(zfs_btree_t *btree, zfs_btree_hdr_t *hdr,1193zfs_btree_index_t *where)1194{1195zfs_btree_hdr_t *node;11961197for (node = hdr; zfs_btree_is_core(node); node =1198((zfs_btree_core_t *)node)->btc_children[node->bth_count])1199;12001201zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)node;1202if (where != NULL) {1203where->bti_node = node;1204where->bti_offset = node->bth_count - 1;1205where->bti_before = B_FALSE;1206}1207return (leaf->btl_elems + (node->bth_first + node->bth_count - 1) *1208btree->bt_elem_size);1209}12101211/*1212* Return the last element in the tree, and put its location in where if1213* non-null.1214*/1215void *1216zfs_btree_last(zfs_btree_t *tree, zfs_btree_index_t *where)1217{1218if (tree->bt_height == -1) {1219ASSERT0(tree->bt_num_elems);1220return (NULL);1221}1222return (zfs_btree_last_helper(tree, tree->bt_root, where));1223}12241225/*1226* This function contains the logic to find the next node in the tree. A1227* helper function is used because there are multiple internal consumemrs of1228* this logic. The done_func is used by zfs_btree_destroy_nodes to clean up each1229* node after we've finished with it.1230*/1231static void *1232zfs_btree_next_helper(zfs_btree_t *tree, const zfs_btree_index_t *idx,1233zfs_btree_index_t *out_idx,1234void (*done_func)(zfs_btree_t *, zfs_btree_hdr_t *))1235{1236if (idx->bti_node == NULL) {1237ASSERT3S(tree->bt_height, ==, -1);1238return (NULL);1239}12401241uint32_t offset = idx->bti_offset;1242if (!zfs_btree_is_core(idx->bti_node)) {1243/*1244* When finding the next element of an element in a leaf,1245* there are two cases. If the element isn't the last one in1246* the leaf, in which case we just return the next element in1247* the leaf. Otherwise, we need to traverse up our parents1248* until we find one where our ancestor isn't the last child1249* of its parent. Once we do, the next element is the1250* separator after our ancestor in its parent.1251*/1252zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)idx->bti_node;1253uint32_t new_off = offset + (idx->bti_before ? 0 : 1);1254if (leaf->btl_hdr.bth_count > new_off) {1255out_idx->bti_node = &leaf->btl_hdr;1256out_idx->bti_offset = new_off;1257out_idx->bti_before = B_FALSE;1258return (leaf->btl_elems + (leaf->btl_hdr.bth_first +1259new_off) * tree->bt_elem_size);1260}12611262zfs_btree_hdr_t *prev = &leaf->btl_hdr;1263for (zfs_btree_core_t *node = leaf->btl_hdr.bth_parent;1264node != NULL; node = node->btc_hdr.bth_parent) {1265zfs_btree_hdr_t *hdr = &node->btc_hdr;1266ASSERT(zfs_btree_is_core(hdr));1267uint32_t i = zfs_btree_find_parent_idx(tree, prev);1268if (done_func != NULL)1269done_func(tree, prev);1270if (i == hdr->bth_count) {1271prev = hdr;1272continue;1273}1274out_idx->bti_node = hdr;1275out_idx->bti_offset = i;1276out_idx->bti_before = B_FALSE;1277return (node->btc_elems + i * tree->bt_elem_size);1278}1279if (done_func != NULL)1280done_func(tree, prev);1281/*1282* We've traversed all the way up and been at the end of the1283* node every time, so this was the last element in the tree.1284*/1285return (NULL);1286}12871288/* If we were before an element in a core node, return that element. */1289ASSERT(zfs_btree_is_core(idx->bti_node));1290zfs_btree_core_t *node = (zfs_btree_core_t *)idx->bti_node;1291if (idx->bti_before) {1292out_idx->bti_before = B_FALSE;1293return (node->btc_elems + offset * tree->bt_elem_size);1294}12951296/*1297* The next element from one in a core node is the first element in1298* the subtree just to the right of the separator.1299*/1300zfs_btree_hdr_t *child = node->btc_children[offset + 1];1301return (zfs_btree_first_helper(tree, child, out_idx));1302}13031304/*1305* Return the next valued node in the tree. The same address can be safely1306* passed for idx and out_idx.1307*/1308void *1309zfs_btree_next(zfs_btree_t *tree, const zfs_btree_index_t *idx,1310zfs_btree_index_t *out_idx)1311{1312return (zfs_btree_next_helper(tree, idx, out_idx, NULL));1313}13141315/*1316* Return the previous valued node in the tree. The same value can be safely1317* passed for idx and out_idx.1318*/1319void *1320zfs_btree_prev(zfs_btree_t *tree, const zfs_btree_index_t *idx,1321zfs_btree_index_t *out_idx)1322{1323if (idx->bti_node == NULL) {1324ASSERT3S(tree->bt_height, ==, -1);1325return (NULL);1326}13271328uint32_t offset = idx->bti_offset;1329if (!zfs_btree_is_core(idx->bti_node)) {1330/*1331* When finding the previous element of an element in a leaf,1332* there are two cases. If the element isn't the first one in1333* the leaf, in which case we just return the previous element1334* in the leaf. Otherwise, we need to traverse up our parents1335* until we find one where our previous ancestor isn't the1336* first child. Once we do, the previous element is the1337* separator after our previous ancestor.1338*/1339zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)idx->bti_node;1340if (offset != 0) {1341out_idx->bti_node = &leaf->btl_hdr;1342out_idx->bti_offset = offset - 1;1343out_idx->bti_before = B_FALSE;1344return (leaf->btl_elems + (leaf->btl_hdr.bth_first +1345offset - 1) * tree->bt_elem_size);1346}1347zfs_btree_hdr_t *prev = &leaf->btl_hdr;1348for (zfs_btree_core_t *node = leaf->btl_hdr.bth_parent;1349node != NULL; node = node->btc_hdr.bth_parent) {1350zfs_btree_hdr_t *hdr = &node->btc_hdr;1351ASSERT(zfs_btree_is_core(hdr));1352uint32_t i = zfs_btree_find_parent_idx(tree, prev);1353if (i == 0) {1354prev = hdr;1355continue;1356}1357out_idx->bti_node = hdr;1358out_idx->bti_offset = i - 1;1359out_idx->bti_before = B_FALSE;1360return (node->btc_elems + (i - 1) * tree->bt_elem_size);1361}1362/*1363* We've traversed all the way up and been at the start of the1364* node every time, so this was the first node in the tree.1365*/1366return (NULL);1367}13681369/*1370* The previous element from one in a core node is the last element in1371* the subtree just to the left of the separator.1372*/1373ASSERT(zfs_btree_is_core(idx->bti_node));1374zfs_btree_core_t *node = (zfs_btree_core_t *)idx->bti_node;1375zfs_btree_hdr_t *child = node->btc_children[offset];1376return (zfs_btree_last_helper(tree, child, out_idx));1377}13781379/*1380* Get the value at the provided index in the tree.1381*1382* Note that the value returned from this function can be mutated, but only1383* if it will not change the ordering of the element with respect to any other1384* elements that could be in the tree.1385*/1386void *1387zfs_btree_get(zfs_btree_t *tree, zfs_btree_index_t *idx)1388{1389ASSERT(!idx->bti_before);1390size_t size = tree->bt_elem_size;1391if (!zfs_btree_is_core(idx->bti_node)) {1392zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)idx->bti_node;1393return (leaf->btl_elems + (leaf->btl_hdr.bth_first +1394idx->bti_offset) * size);1395}1396zfs_btree_core_t *node = (zfs_btree_core_t *)idx->bti_node;1397return (node->btc_elems + idx->bti_offset * size);1398}13991400/* Add the given value to the tree. Must not already be in the tree. */1401void1402zfs_btree_add(zfs_btree_t *tree, const void *node)1403{1404zfs_btree_index_t where = {0};1405VERIFY3P(zfs_btree_find(tree, node, &where), ==, NULL);1406zfs_btree_add_idx(tree, node, &where);1407}14081409/* Helper function to free a tree node. */1410static void1411zfs_btree_node_destroy(zfs_btree_t *tree, zfs_btree_hdr_t *node)1412{1413tree->bt_num_nodes--;1414if (!zfs_btree_is_core(node)) {1415zfs_btree_leaf_free(tree, node);1416} else {1417kmem_free(node, sizeof (zfs_btree_core_t) +1418BTREE_CORE_ELEMS * tree->bt_elem_size);1419}1420}14211422/*1423* Remove the rm_hdr and the separator to its left from the parent node. The1424* buffer that rm_hdr was stored in may already be freed, so its contents1425* cannot be accessed.1426*/1427static void1428zfs_btree_remove_from_node(zfs_btree_t *tree, zfs_btree_core_t *node,1429zfs_btree_hdr_t *rm_hdr)1430{1431size_t size = tree->bt_elem_size;1432uint32_t min_count = (BTREE_CORE_ELEMS / 2) - 1;1433zfs_btree_hdr_t *hdr = &node->btc_hdr;1434/*1435* If the node is the root node and rm_hdr is one of two children,1436* promote the other child to the root.1437*/1438if (hdr->bth_parent == NULL && hdr->bth_count <= 1) {1439ASSERT3U(hdr->bth_count, ==, 1);1440ASSERT3P(tree->bt_root, ==, node);1441ASSERT3P(node->btc_children[1], ==, rm_hdr);1442tree->bt_root = node->btc_children[0];1443node->btc_children[0]->bth_parent = NULL;1444zfs_btree_node_destroy(tree, hdr);1445tree->bt_height--;1446return;1447}14481449uint32_t idx;1450for (idx = 0; idx <= hdr->bth_count; idx++) {1451if (node->btc_children[idx] == rm_hdr)1452break;1453}1454ASSERT3U(idx, <=, hdr->bth_count);14551456/*1457* If the node is the root or it has more than the minimum number of1458* children, just remove the child and separator, and return.1459*/1460if (hdr->bth_parent == NULL ||1461hdr->bth_count > min_count) {1462/*1463* Shift the element and children to the right of rm_hdr to1464* the left by one spot.1465*/1466bt_shift_core_left(tree, node, idx, hdr->bth_count - idx,1467BSS_PARALLELOGRAM);1468hdr->bth_count--;1469zfs_btree_poison_node_at(tree, hdr, hdr->bth_count, 1);1470return;1471}14721473ASSERT3U(hdr->bth_count, ==, min_count);14741475/*1476* Now we try to take a node from a neighbor. We check left, then1477* right. If the neighbor exists and has more than the minimum number1478* of elements, we move the separator between us and them to our1479* node, move their closest element (last for left, first for right)1480* to the separator, and move their closest child to our node. Along1481* the way we need to collapse the gap made by idx, and (for our right1482* neighbor) the gap made by removing their first element and child.1483*1484* Note: this logic currently doesn't support taking from a neighbor1485* that isn't a sibling (i.e. a neighbor with a different1486* parent). This isn't critical functionality, but may be worth1487* implementing in the future for completeness' sake.1488*/1489zfs_btree_core_t *parent = hdr->bth_parent;1490uint32_t parent_idx = zfs_btree_find_parent_idx(tree, hdr);14911492zfs_btree_hdr_t *l_hdr = (parent_idx == 0 ? NULL :1493parent->btc_children[parent_idx - 1]);1494if (l_hdr != NULL && l_hdr->bth_count > min_count) {1495/* We can take a node from the left neighbor. */1496ASSERT(zfs_btree_is_core(l_hdr));1497zfs_btree_core_t *neighbor = (zfs_btree_core_t *)l_hdr;14981499/*1500* Start by shifting the elements and children in the current1501* node to the right by one spot.1502*/1503bt_shift_core_right(tree, node, 0, idx - 1, BSS_TRAPEZOID);15041505/*1506* Move the separator between node and neighbor to the first1507* element slot in the current node.1508*/1509uint8_t *separator = parent->btc_elems + (parent_idx - 1) *1510size;1511bcpy(separator, node->btc_elems, size);15121513/* Move the last child of neighbor to our first child slot. */1514node->btc_children[0] =1515neighbor->btc_children[l_hdr->bth_count];1516node->btc_children[0]->bth_parent = node;15171518/* Move the last element of neighbor to the separator spot. */1519uint8_t *take_elem = neighbor->btc_elems +1520(l_hdr->bth_count - 1) * size;1521bcpy(take_elem, separator, size);1522l_hdr->bth_count--;1523zfs_btree_poison_node_at(tree, l_hdr, l_hdr->bth_count, 1);1524return;1525}15261527zfs_btree_hdr_t *r_hdr = (parent_idx == parent->btc_hdr.bth_count ?1528NULL : parent->btc_children[parent_idx + 1]);1529if (r_hdr != NULL && r_hdr->bth_count > min_count) {1530/* We can take a node from the right neighbor. */1531ASSERT(zfs_btree_is_core(r_hdr));1532zfs_btree_core_t *neighbor = (zfs_btree_core_t *)r_hdr;15331534/*1535* Shift elements in node left by one spot to overwrite rm_hdr1536* and the separator before it.1537*/1538bt_shift_core_left(tree, node, idx, hdr->bth_count - idx,1539BSS_PARALLELOGRAM);15401541/*1542* Move the separator between node and neighbor to the last1543* element spot in node.1544*/1545uint8_t *separator = parent->btc_elems + parent_idx * size;1546bcpy(separator, node->btc_elems + (hdr->bth_count - 1) * size,1547size);15481549/*1550* Move the first child of neighbor to the last child spot in1551* node.1552*/1553node->btc_children[hdr->bth_count] = neighbor->btc_children[0];1554node->btc_children[hdr->bth_count]->bth_parent = node;15551556/* Move the first element of neighbor to the separator spot. */1557uint8_t *take_elem = neighbor->btc_elems;1558bcpy(take_elem, separator, size);1559r_hdr->bth_count--;15601561/*1562* Shift the elements and children of neighbor to cover the1563* stolen elements.1564*/1565bt_shift_core_left(tree, neighbor, 1, r_hdr->bth_count,1566BSS_TRAPEZOID);1567zfs_btree_poison_node_at(tree, r_hdr, r_hdr->bth_count, 1);1568return;1569}15701571/*1572* In this case, neither of our neighbors can spare an element, so we1573* need to merge with one of them. We prefer the left one,1574* arbitrarily. Move the separator into the leftmost merging node1575* (which may be us or the left neighbor), and then move the right1576* merging node's elements. Once that's done, we go back and delete1577* the element we're removing. Finally, go into the parent and delete1578* the right merging node and the separator. This may cause further1579* merging.1580*/1581zfs_btree_hdr_t *new_rm_hdr, *keep_hdr;1582uint32_t new_idx = idx;1583if (l_hdr != NULL) {1584keep_hdr = l_hdr;1585new_rm_hdr = hdr;1586new_idx += keep_hdr->bth_count + 1;1587} else {1588ASSERT3P(r_hdr, !=, NULL);1589keep_hdr = hdr;1590new_rm_hdr = r_hdr;1591parent_idx++;1592}15931594ASSERT(zfs_btree_is_core(keep_hdr));1595ASSERT(zfs_btree_is_core(new_rm_hdr));15961597zfs_btree_core_t *keep = (zfs_btree_core_t *)keep_hdr;1598zfs_btree_core_t *rm = (zfs_btree_core_t *)new_rm_hdr;15991600if (zfs_btree_verify_intensity >= 5) {1601for (uint32_t i = 0; i < new_rm_hdr->bth_count + 1; i++) {1602zfs_btree_verify_poison_at(tree, keep_hdr,1603keep_hdr->bth_count + i);1604}1605}16061607/* Move the separator into the left node. */1608uint8_t *e_out = keep->btc_elems + keep_hdr->bth_count * size;1609uint8_t *separator = parent->btc_elems + (parent_idx - 1) *1610size;1611bcpy(separator, e_out, size);1612keep_hdr->bth_count++;16131614/* Move all our elements and children into the left node. */1615bt_transfer_core(tree, rm, 0, new_rm_hdr->bth_count, keep,1616keep_hdr->bth_count, BSS_TRAPEZOID);16171618uint32_t old_count = keep_hdr->bth_count;16191620/* Update bookkeeping */1621keep_hdr->bth_count += new_rm_hdr->bth_count;1622ASSERT3U(keep_hdr->bth_count, ==, (min_count * 2) + 1);16231624/*1625* Shift the element and children to the right of rm_hdr to1626* the left by one spot.1627*/1628ASSERT3P(keep->btc_children[new_idx], ==, rm_hdr);1629bt_shift_core_left(tree, keep, new_idx, keep_hdr->bth_count - new_idx,1630BSS_PARALLELOGRAM);1631keep_hdr->bth_count--;16321633/* Reparent all our children to point to the left node. */1634zfs_btree_hdr_t **new_start = keep->btc_children +1635old_count - 1;1636for (uint32_t i = 0; i < new_rm_hdr->bth_count + 1; i++)1637new_start[i]->bth_parent = keep;1638for (uint32_t i = 0; i <= keep_hdr->bth_count; i++) {1639ASSERT3P(keep->btc_children[i]->bth_parent, ==, keep);1640ASSERT3P(keep->btc_children[i], !=, rm_hdr);1641}1642zfs_btree_poison_node_at(tree, keep_hdr, keep_hdr->bth_count, 1);16431644new_rm_hdr->bth_count = 0;1645zfs_btree_remove_from_node(tree, parent, new_rm_hdr);1646zfs_btree_node_destroy(tree, new_rm_hdr);1647}16481649/* Remove the element at the specific location. */1650void1651zfs_btree_remove_idx(zfs_btree_t *tree, zfs_btree_index_t *where)1652{1653size_t size = tree->bt_elem_size;1654zfs_btree_hdr_t *hdr = where->bti_node;1655uint32_t idx = where->bti_offset;16561657ASSERT(!where->bti_before);1658if (tree->bt_bulk != NULL) {1659/*1660* Leave bulk insert mode. Note that our index would be1661* invalid after we correct the tree, so we copy the value1662* we're planning to remove and find it again after1663* bulk_finish.1664*/1665uint8_t *value = zfs_btree_get(tree, where);1666uint8_t *tmp = kmem_alloc(size, KM_SLEEP);1667bcpy(value, tmp, size);1668zfs_btree_bulk_finish(tree);1669VERIFY3P(zfs_btree_find(tree, tmp, where), !=, NULL);1670kmem_free(tmp, size);1671hdr = where->bti_node;1672idx = where->bti_offset;1673}16741675tree->bt_num_elems--;1676/*1677* If the element happens to be in a core node, we move a leaf node's1678* element into its place and then remove the leaf node element. This1679* makes the rebalance logic not need to be recursive both upwards and1680* downwards.1681*/1682if (zfs_btree_is_core(hdr)) {1683zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;1684zfs_btree_hdr_t *left_subtree = node->btc_children[idx];1685void *new_value = zfs_btree_last_helper(tree, left_subtree,1686where);1687ASSERT3P(new_value, !=, NULL);16881689bcpy(new_value, node->btc_elems + idx * size, size);16901691hdr = where->bti_node;1692idx = where->bti_offset;1693ASSERT(!where->bti_before);1694}16951696/*1697* First, we'll update the leaf's metadata. Then, we shift any1698* elements after the idx to the left. After that, we rebalance if1699* needed.1700*/1701ASSERT(!zfs_btree_is_core(hdr));1702zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;1703ASSERT3U(hdr->bth_count, >, 0);17041705uint32_t min_count = (tree->bt_leaf_cap / 2) - 1;17061707/*1708* If we're over the minimum size or this is the root, just overwrite1709* the value and return.1710*/1711if (hdr->bth_count > min_count || hdr->bth_parent == NULL) {1712bt_shrink_leaf(tree, leaf, idx, 1);1713if (hdr->bth_parent == NULL) {1714ASSERT0(tree->bt_height);1715if (hdr->bth_count == 0) {1716tree->bt_root = NULL;1717tree->bt_height--;1718zfs_btree_node_destroy(tree, &leaf->btl_hdr);1719}1720}1721zfs_btree_verify(tree);1722return;1723}1724ASSERT3U(hdr->bth_count, ==, min_count);17251726/*1727* Now we try to take a node from a sibling. We check left, then1728* right. If they exist and have more than the minimum number of1729* elements, we move the separator between us and them to our node1730* and move their closest element (last for left, first for right) to1731* the separator. Along the way we need to collapse the gap made by1732* idx, and (for our right neighbor) the gap made by removing their1733* first element.1734*1735* Note: this logic currently doesn't support taking from a neighbor1736* that isn't a sibling. This isn't critical functionality, but may be1737* worth implementing in the future for completeness' sake.1738*/1739zfs_btree_core_t *parent = hdr->bth_parent;1740uint32_t parent_idx = zfs_btree_find_parent_idx(tree, hdr);17411742zfs_btree_hdr_t *l_hdr = (parent_idx == 0 ? NULL :1743parent->btc_children[parent_idx - 1]);1744if (l_hdr != NULL && l_hdr->bth_count > min_count) {1745/* We can take a node from the left neighbor. */1746ASSERT(!zfs_btree_is_core(l_hdr));1747zfs_btree_leaf_t *neighbor = (zfs_btree_leaf_t *)l_hdr;17481749/*1750* Move our elements back by one spot to make room for the1751* stolen element and overwrite the element being removed.1752*/1753bt_shift_leaf(tree, leaf, 0, idx, 1, BSD_RIGHT);17541755/* Move the separator to our first spot. */1756uint8_t *separator = parent->btc_elems + (parent_idx - 1) *1757size;1758bcpy(separator, leaf->btl_elems + hdr->bth_first * size, size);17591760/* Move our neighbor's last element to the separator. */1761uint8_t *take_elem = neighbor->btl_elems +1762(l_hdr->bth_first + l_hdr->bth_count - 1) * size;1763bcpy(take_elem, separator, size);17641765/* Delete our neighbor's last element. */1766bt_shrink_leaf(tree, neighbor, l_hdr->bth_count - 1, 1);1767zfs_btree_verify(tree);1768return;1769}17701771zfs_btree_hdr_t *r_hdr = (parent_idx == parent->btc_hdr.bth_count ?1772NULL : parent->btc_children[parent_idx + 1]);1773if (r_hdr != NULL && r_hdr->bth_count > min_count) {1774/* We can take a node from the right neighbor. */1775ASSERT(!zfs_btree_is_core(r_hdr));1776zfs_btree_leaf_t *neighbor = (zfs_btree_leaf_t *)r_hdr;17771778/*1779* Move our elements after the element being removed forwards1780* by one spot to make room for the stolen element and1781* overwrite the element being removed.1782*/1783bt_shift_leaf(tree, leaf, idx + 1, hdr->bth_count - idx - 1,17841, BSD_LEFT);17851786/* Move the separator between us to our last spot. */1787uint8_t *separator = parent->btc_elems + parent_idx * size;1788bcpy(separator, leaf->btl_elems + (hdr->bth_first +1789hdr->bth_count - 1) * size, size);17901791/* Move our neighbor's first element to the separator. */1792uint8_t *take_elem = neighbor->btl_elems +1793r_hdr->bth_first * size;1794bcpy(take_elem, separator, size);17951796/* Delete our neighbor's first element. */1797bt_shrink_leaf(tree, neighbor, 0, 1);1798zfs_btree_verify(tree);1799return;1800}18011802/*1803* In this case, neither of our neighbors can spare an element, so we1804* need to merge with one of them. We prefer the left one, arbitrarily.1805* After remove we move the separator into the leftmost merging node1806* (which may be us or the left neighbor), and then move the right1807* merging node's elements. Once that's done, we go back and delete1808* the element we're removing. Finally, go into the parent and delete1809* the right merging node and the separator. This may cause further1810* merging.1811*/1812zfs_btree_hdr_t *rm_hdr, *k_hdr;1813if (l_hdr != NULL) {1814k_hdr = l_hdr;1815rm_hdr = hdr;1816} else {1817ASSERT3P(r_hdr, !=, NULL);1818k_hdr = hdr;1819rm_hdr = r_hdr;1820parent_idx++;1821}1822ASSERT(!zfs_btree_is_core(k_hdr));1823ASSERT(!zfs_btree_is_core(rm_hdr));1824ASSERT3U(k_hdr->bth_count, ==, min_count);1825ASSERT3U(rm_hdr->bth_count, ==, min_count);1826zfs_btree_leaf_t *keep = (zfs_btree_leaf_t *)k_hdr;1827zfs_btree_leaf_t *rm = (zfs_btree_leaf_t *)rm_hdr;18281829if (zfs_btree_verify_intensity >= 5) {1830for (uint32_t i = 0; i < rm_hdr->bth_count + 1; i++) {1831zfs_btree_verify_poison_at(tree, k_hdr,1832k_hdr->bth_count + i);1833}1834}18351836/*1837* Remove the value from the node. It will go below the minimum,1838* but we'll fix it in no time.1839*/1840bt_shrink_leaf(tree, leaf, idx, 1);18411842/* Prepare space for elements to be moved from the right. */1843uint32_t k_count = k_hdr->bth_count;1844bt_grow_leaf(tree, keep, k_count, 1 + rm_hdr->bth_count);1845ASSERT3U(k_hdr->bth_count, ==, min_count * 2);18461847/* Move the separator into the first open spot. */1848uint8_t *out = keep->btl_elems + (k_hdr->bth_first + k_count) * size;1849uint8_t *separator = parent->btc_elems + (parent_idx - 1) * size;1850bcpy(separator, out, size);18511852/* Move our elements to the left neighbor. */1853bt_transfer_leaf(tree, rm, 0, rm_hdr->bth_count, keep, k_count + 1);18541855/* Remove the emptied node from the parent. */1856zfs_btree_remove_from_node(tree, parent, rm_hdr);1857zfs_btree_node_destroy(tree, rm_hdr);1858zfs_btree_verify(tree);1859}18601861/* Remove the given value from the tree. */1862void1863zfs_btree_remove(zfs_btree_t *tree, const void *value)1864{1865zfs_btree_index_t where = {0};1866VERIFY3P(zfs_btree_find(tree, value, &where), !=, NULL);1867zfs_btree_remove_idx(tree, &where);1868}18691870/* Return the number of elements in the tree. */1871ulong_t1872zfs_btree_numnodes(zfs_btree_t *tree)1873{1874return (tree->bt_num_elems);1875}18761877/*1878* This function is used to visit all the elements in the tree before1879* destroying the tree. This allows the calling code to perform any cleanup it1880* needs to do. This is more efficient than just removing the first element1881* over and over, because it removes all rebalancing. Once the destroy_nodes()1882* function has been called, no other btree operations are valid until it1883* returns NULL, which point the only valid operation is zfs_btree_destroy().1884*1885* example:1886*1887* zfs_btree_index_t *cookie = NULL;1888* my_data_t *node;1889*1890* while ((node = zfs_btree_destroy_nodes(tree, &cookie)) != NULL)1891* free(node->ptr);1892* zfs_btree_destroy(tree);1893*1894*/1895void *1896zfs_btree_destroy_nodes(zfs_btree_t *tree, zfs_btree_index_t **cookie)1897{1898if (*cookie == NULL) {1899if (tree->bt_height == -1)1900return (NULL);1901*cookie = kmem_alloc(sizeof (**cookie), KM_SLEEP);1902return (zfs_btree_first(tree, *cookie));1903}19041905void *rval = zfs_btree_next_helper(tree, *cookie, *cookie,1906zfs_btree_node_destroy);1907if (rval == NULL) {1908tree->bt_root = NULL;1909tree->bt_height = -1;1910tree->bt_num_elems = 0;1911kmem_free(*cookie, sizeof (**cookie));1912tree->bt_bulk = NULL;1913}1914return (rval);1915}19161917static void1918zfs_btree_clear_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)1919{1920if (zfs_btree_is_core(hdr)) {1921zfs_btree_core_t *btc = (zfs_btree_core_t *)hdr;1922for (uint32_t i = 0; i <= hdr->bth_count; i++)1923zfs_btree_clear_helper(tree, btc->btc_children[i]);1924}19251926zfs_btree_node_destroy(tree, hdr);1927}19281929void1930zfs_btree_clear(zfs_btree_t *tree)1931{1932if (tree->bt_root == NULL) {1933ASSERT0(tree->bt_num_elems);1934return;1935}19361937zfs_btree_clear_helper(tree, tree->bt_root);1938tree->bt_num_elems = 0;1939tree->bt_root = NULL;1940tree->bt_num_nodes = 0;1941tree->bt_height = -1;1942tree->bt_bulk = NULL;1943}19441945void1946zfs_btree_destroy(zfs_btree_t *tree)1947{1948ASSERT0(tree->bt_num_elems);1949ASSERT0P(tree->bt_root);1950}19511952/* Verify that every child of this node has the correct parent pointer. */1953static void1954zfs_btree_verify_pointers_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)1955{1956if (!zfs_btree_is_core(hdr))1957return;19581959zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;1960for (uint32_t i = 0; i <= hdr->bth_count; i++) {1961VERIFY3P(node->btc_children[i]->bth_parent, ==, hdr);1962zfs_btree_verify_pointers_helper(tree, node->btc_children[i]);1963}1964}19651966/* Verify that every node has the correct parent pointer. */1967static void1968zfs_btree_verify_pointers(zfs_btree_t *tree)1969{1970if (tree->bt_height == -1) {1971VERIFY0P(tree->bt_root);1972return;1973}1974VERIFY0P(tree->bt_root->bth_parent);1975zfs_btree_verify_pointers_helper(tree, tree->bt_root);1976}19771978/*1979* Verify that all the current node and its children satisfy the count1980* invariants, and return the total count in the subtree rooted in this node.1981*/1982static uint64_t1983zfs_btree_verify_counts_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)1984{1985if (!zfs_btree_is_core(hdr)) {1986if (tree->bt_root != hdr && tree->bt_bulk &&1987hdr != &tree->bt_bulk->btl_hdr) {1988VERIFY3U(hdr->bth_count, >=, tree->bt_leaf_cap / 2 - 1);1989}19901991return (hdr->bth_count);1992} else {19931994zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;1995uint64_t ret = hdr->bth_count;1996if (tree->bt_root != hdr && tree->bt_bulk == NULL)1997VERIFY3P(hdr->bth_count, >=, BTREE_CORE_ELEMS / 2 - 1);1998for (uint32_t i = 0; i <= hdr->bth_count; i++) {1999ret += zfs_btree_verify_counts_helper(tree,2000node->btc_children[i]);2001}20022003return (ret);2004}2005}20062007/*2008* Verify that all nodes satisfy the invariants and that the total number of2009* elements is correct.2010*/2011static void2012zfs_btree_verify_counts(zfs_btree_t *tree)2013{2014EQUIV(tree->bt_num_elems == 0, tree->bt_height == -1);2015if (tree->bt_height == -1) {2016return;2017}2018VERIFY3P(zfs_btree_verify_counts_helper(tree, tree->bt_root), ==,2019tree->bt_num_elems);2020}20212022/*2023* Check that the subtree rooted at this node has a uniform height. Returns2024* the number of nodes under this node, to help verify bt_num_nodes.2025*/2026static uint64_t2027zfs_btree_verify_height_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,2028int32_t height)2029{2030if (!zfs_btree_is_core(hdr)) {2031VERIFY0(height);2032return (1);2033}20342035zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;2036uint64_t ret = 1;2037for (uint32_t i = 0; i <= hdr->bth_count; i++) {2038ret += zfs_btree_verify_height_helper(tree,2039node->btc_children[i], height - 1);2040}2041return (ret);2042}20432044/*2045* Check that the tree rooted at this node has a uniform height, and that the2046* bt_height in the tree is correct.2047*/2048static void2049zfs_btree_verify_height(zfs_btree_t *tree)2050{2051EQUIV(tree->bt_height == -1, tree->bt_root == NULL);2052if (tree->bt_height == -1) {2053return;2054}20552056VERIFY3U(zfs_btree_verify_height_helper(tree, tree->bt_root,2057tree->bt_height), ==, tree->bt_num_nodes);2058}20592060/*2061* Check that the elements in this node are sorted, and that if this is a core2062* node, the separators are properly between the subtrees they separaate and2063* that the children also satisfy this requirement.2064*/2065static void2066zfs_btree_verify_order_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)2067{2068size_t size = tree->bt_elem_size;2069if (!zfs_btree_is_core(hdr)) {2070zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;2071for (uint32_t i = 1; i < hdr->bth_count; i++) {2072VERIFY3S(tree->bt_compar(leaf->btl_elems +2073(hdr->bth_first + i - 1) * size,2074leaf->btl_elems +2075(hdr->bth_first + i) * size), ==, -1);2076}2077return;2078}20792080zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;2081for (uint32_t i = 1; i < hdr->bth_count; i++) {2082VERIFY3S(tree->bt_compar(node->btc_elems + (i - 1) * size,2083node->btc_elems + i * size), ==, -1);2084}2085for (uint32_t i = 0; i < hdr->bth_count; i++) {2086uint8_t *left_child_last = NULL;2087zfs_btree_hdr_t *left_child_hdr = node->btc_children[i];2088if (zfs_btree_is_core(left_child_hdr)) {2089zfs_btree_core_t *left_child =2090(zfs_btree_core_t *)left_child_hdr;2091left_child_last = left_child->btc_elems +2092(left_child_hdr->bth_count - 1) * size;2093} else {2094zfs_btree_leaf_t *left_child =2095(zfs_btree_leaf_t *)left_child_hdr;2096left_child_last = left_child->btl_elems +2097(left_child_hdr->bth_first +2098left_child_hdr->bth_count - 1) * size;2099}2100int comp = tree->bt_compar(node->btc_elems + i * size,2101left_child_last);2102if (comp <= 0) {2103panic("btree: compar returned %d (expected 1) at "2104"%px %d: compar(%px, %px)", comp, node, i,2105node->btc_elems + i * size, left_child_last);2106}21072108uint8_t *right_child_first = NULL;2109zfs_btree_hdr_t *right_child_hdr = node->btc_children[i + 1];2110if (zfs_btree_is_core(right_child_hdr)) {2111zfs_btree_core_t *right_child =2112(zfs_btree_core_t *)right_child_hdr;2113right_child_first = right_child->btc_elems;2114} else {2115zfs_btree_leaf_t *right_child =2116(zfs_btree_leaf_t *)right_child_hdr;2117right_child_first = right_child->btl_elems +2118right_child_hdr->bth_first * size;2119}2120comp = tree->bt_compar(node->btc_elems + i * size,2121right_child_first);2122if (comp >= 0) {2123panic("btree: compar returned %d (expected -1) at "2124"%px %d: compar(%px, %px)", comp, node, i,2125node->btc_elems + i * size, right_child_first);2126}2127}2128for (uint32_t i = 0; i <= hdr->bth_count; i++)2129zfs_btree_verify_order_helper(tree, node->btc_children[i]);2130}21312132/* Check that all elements in the tree are in sorted order. */2133static void2134zfs_btree_verify_order(zfs_btree_t *tree)2135{2136EQUIV(tree->bt_height == -1, tree->bt_root == NULL);2137if (tree->bt_height == -1) {2138return;2139}21402141zfs_btree_verify_order_helper(tree, tree->bt_root);2142}21432144#ifdef ZFS_DEBUG2145/* Check that all unused memory is poisoned correctly. */2146static void2147zfs_btree_verify_poison_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)2148{2149size_t size = tree->bt_elem_size;2150if (!zfs_btree_is_core(hdr)) {2151zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;2152for (size_t i = 0; i < hdr->bth_first * size; i++)2153VERIFY3U(leaf->btl_elems[i], ==, 0x0f);2154size_t esize = tree->bt_leaf_size -2155offsetof(zfs_btree_leaf_t, btl_elems);2156for (size_t i = (hdr->bth_first + hdr->bth_count) * size;2157i < esize; i++)2158VERIFY3U(leaf->btl_elems[i], ==, 0x0f);2159} else {2160zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;2161for (size_t i = hdr->bth_count * size;2162i < BTREE_CORE_ELEMS * size; i++)2163VERIFY3U(node->btc_elems[i], ==, 0x0f);21642165for (uint32_t i = hdr->bth_count + 1; i <= BTREE_CORE_ELEMS;2166i++) {2167VERIFY3P(node->btc_children[i], ==,2168(zfs_btree_hdr_t *)BTREE_POISON);2169}21702171for (uint32_t i = 0; i <= hdr->bth_count; i++) {2172zfs_btree_verify_poison_helper(tree,2173node->btc_children[i]);2174}2175}2176}2177#endif21782179/* Check that unused memory in the tree is still poisoned. */2180static void2181zfs_btree_verify_poison(zfs_btree_t *tree)2182{2183#ifdef ZFS_DEBUG2184if (tree->bt_height == -1)2185return;2186zfs_btree_verify_poison_helper(tree, tree->bt_root);2187#endif2188}21892190void2191zfs_btree_verify(zfs_btree_t *tree)2192{2193if (zfs_btree_verify_intensity == 0)2194return;2195zfs_btree_verify_height(tree);2196if (zfs_btree_verify_intensity == 1)2197return;2198zfs_btree_verify_pointers(tree);2199if (zfs_btree_verify_intensity == 2)2200return;2201zfs_btree_verify_counts(tree);2202if (zfs_btree_verify_intensity == 3)2203return;2204zfs_btree_verify_order(tree);22052206if (zfs_btree_verify_intensity == 4)2207return;2208zfs_btree_verify_poison(tree);2209}22102211ZFS_MODULE_PARAM(zfs, zfs_, btree_verify_intensity, UINT, ZMOD_RW,2212"Enable btree verification. Levels above 4 require ZFS be built "2213"with debugging");221422152216